source: zzuf/trunk/src/lib-mem.c @ 2523

Last change on this file since 2523 was 2523, checked in by Sam Hocevar, 12 years ago
  • Fix a compilation issue on FreeBSD by defining _BSD_SOURCE and including <sys/cdefs.h>.
  • Property svn:keywords set to Id
File size: 10.5 KB
Line 
1/*
2 *  zzuf - general purpose fuzzer
3 *  Copyright (c) 2006,2007 Sam Hocevar <sam@zoy.org>
4 *                All Rights Reserved
5 *
6 *  $Id: lib-mem.c 2523 2008-07-15 20:15:47Z sam $
7 *
8 *  This program is free software. It comes without any warranty, to
9 *  the extent permitted by applicable law. You can redistribute it
10 *  and/or modify it under the terms of the Do What The Fuck You Want
11 *  To Public License, Version 2, as published by Sam Hocevar. See
12 *  http://sam.zoy.org/wtfpl/COPYING for more details.
13 */
14
15/*
16 *  load-mem.c: loaded memory handling functions
17 */
18
19#include "config.h"
20
21/* Need this for RTLD_NEXT */
22#define _GNU_SOURCE
23/* Need this for MAP_ANON and valloc() on FreeBSD (together with cdefs.h) */
24#define _BSD_SOURCE
25/* Use this to get mmap64() on glibc systems */
26#define _LARGEFILE64_SOURCE
27/* Use this to get posix_memalign */
28#if defined HAVE_POSIX_MEMALIGN
29#   define _XOPEN_SOURCE 600
30#endif
31
32#if defined HAVE_STDINT_H
33#   include <stdint.h>
34#elif defined HAVE_INTTYPES_H
35#   include <inttypes.h>
36#endif
37#include <stdlib.h>
38#include <string.h>
39#include <errno.h>
40#include <signal.h>
41
42#if defined HAVE_SYS_CDEFS_H
43#   include <sys/cdefs.h>
44#endif
45#if defined HAVE_MALLOC_H
46#   include <malloc.h>
47#endif
48#if defined HAVE_UNISTD_H
49#   include <unistd.h>
50#endif
51#if defined HAVE_SYS_MMAN_H
52#   include <sys/mman.h>
53#endif
54#if defined HAVE_LIBC_H
55#   include <libc.h>
56#endif
57
58#include "libzzuf.h"
59#include "lib-load.h"
60#include "debug.h"
61#include "fuzz.h"
62#include "fd.h"
63
64#if !defined SIGKILL
65#   define SIGKILL 9
66#endif
67
68#if !defined MAP_ANONYMOUS
69#   define MAP_ANONYMOUS MAP_ANON
70#endif
71
72/* TODO: mremap, maybe brk/sbrk (haha) */
73
74/* Library functions that we divert */
75static void *  (*ORIG(calloc))   (size_t nmemb, size_t size);
76static void *  (*ORIG(malloc))   (size_t size);
77static void    (*ORIG(free))     (void *ptr);
78#if defined HAVE_VALLOC
79static void *  (*ORIG(valloc))   (size_t size);
80#endif
81#if defined HAVE_MEMALIGN
82static void *  (*ORIG(memalign)) (size_t boundary, size_t size);
83#endif
84#if defined HAVE_POSIX_MEMALIGN
85static int     (*ORIG(posix_memalign)) (void **memptr, size_t alignment,
86                                        size_t size);
87#endif
88static void *  (*ORIG(realloc))  (void *ptr, size_t size);
89
90#if defined HAVE_MMAP
91static void *  (*ORIG(mmap))     (void *start, size_t length, int prot,
92                                  int flags, int fd, off_t offset);
93#endif
94#if defined HAVE_MMAP64
95static void *  (*ORIG(mmap64))   (void *start, size_t length, int prot,
96                                  int flags, int fd, off64_t offset);
97#endif
98#if defined HAVE_MUNMAP
99static int     (*ORIG(munmap))   (void *start, size_t length);
100#endif
101#if defined HAVE_MAP_FD
102static kern_return_t (*ORIG(map_fd)) (int fd, vm_offset_t offset,
103                                      vm_offset_t *addr, boolean_t find_space,
104                                      vm_size_t numbytes);
105#endif
106
107/* We need a static memory buffer because some functions call memory
108 * allocation routines before our library is loaded. Hell, even dlsym()
109 * calls calloc(), so we need to do something about it */
110#define DUMMY_BYTES 655360 /* 640 kB ought to be enough for anybody */
111static uint64_t dummy_buffer[DUMMY_BYTES / 8];
112static int64_t dummy_offset = 0;
113#define DUMMY_START ((uintptr_t)dummy_buffer)
114#define DUMMY_STOP ((uintptr_t)dummy_buffer + DUMMY_BYTES)
115
116void _zz_mem_init(void)
117{
118    LOADSYM(free);
119    LOADSYM(calloc);
120    LOADSYM(malloc);
121    LOADSYM(realloc);
122}
123
124void *NEW(calloc)(size_t nmemb, size_t size)
125{
126    void *ret;
127    if(!ORIG(calloc))
128    {
129        ret = dummy_buffer + dummy_offset;
130        memset(ret, 0, (nmemb * size + 7) / 8);
131        dummy_offset += (nmemb * size + 7) / 8;
132        debug("%s(%li, %li) = %p", __func__,
133              (long int)nmemb, (long int)size, ret);
134        return ret;
135    }
136    ret = ORIG(calloc)(nmemb, size);
137    if(ret == NULL && _zz_memory && errno == ENOMEM)
138        raise(SIGKILL);
139    return ret;
140}
141
142void *NEW(malloc)(size_t size)
143{
144    void *ret;
145    if(!ORIG(malloc))
146    {
147        ret = dummy_buffer + dummy_offset;
148        dummy_offset += (size + 7) / 8;
149        debug("%s(%li) = %p", __func__, (long int)size, ret);
150        return ret;
151    }
152    ret = ORIG(malloc)(size);
153    if(ret == NULL && _zz_memory && errno == ENOMEM)
154        raise(SIGKILL);
155    return ret;
156}
157
158void NEW(free)(void *ptr)
159{
160    if((uintptr_t)ptr >= DUMMY_START && (uintptr_t)ptr < DUMMY_STOP)
161    {
162        debug("%s(%p)", __func__, ptr);
163        return;
164    }
165    if(!ORIG(free))
166    {
167        /* FIXME: memory leak */
168        debug("%s(%p) IGNORED", __func__, ptr);
169        return;
170    }
171    ORIG(free)(ptr);
172}
173
174void *NEW(realloc)(void *ptr, size_t size)
175{
176    void *ret;
177    if(!ORIG(realloc)
178        || ((uintptr_t)ptr >= DUMMY_START && (uintptr_t)ptr < DUMMY_STOP))
179    {
180        ret = dummy_buffer + dummy_offset;
181        /* XXX: If ptr is NULL, we don't copy anything. If it is non-NULL, we
182         * copy everything even if it is too big, we don't have anything to
183         * overflow really. */
184        if(ptr)
185            memcpy(ret, ptr, size);
186        dummy_offset += (size + 7) * 8;
187        debug("%s(%p, %li) = %p", __func__, ptr, (long int)size, ret);
188        return ret;
189    }
190    LOADSYM(realloc);
191    ret = ORIG(realloc)(ptr, size);
192    if(ret == NULL && _zz_memory && errno == ENOMEM)
193        raise(SIGKILL);
194    return ret;
195}
196
197#if defined HAVE_VALLOC
198void *NEW(valloc)(size_t size)
199{
200    void *ret;
201    LOADSYM(valloc);
202    ret = ORIG(valloc)(size);
203    if(ret == NULL && _zz_memory && errno == ENOMEM)
204        raise(SIGKILL);
205    return ret;
206}
207#endif
208
209#if defined HAVE_MEMALIGN
210void *NEW(memalign)(size_t boundary, size_t size)
211{
212    void *ret;
213    LOADSYM(memalign);
214    ret = ORIG(memalign)(boundary, size);
215    if(ret == NULL && _zz_memory && errno == ENOMEM)
216        raise(SIGKILL);
217    return ret;
218}
219#endif
220
221#if defined HAVE_POSIX_MEMALIGN
222int NEW(posix_memalign)(void **memptr, size_t alignment, size_t size)
223{
224    int ret;
225    LOADSYM(posix_memalign);
226    ret = ORIG(posix_memalign)(memptr, alignment, size);
227    if(ret == ENOMEM && _zz_memory)
228        raise(SIGKILL);
229    return ret;
230}
231#endif
232
233/* Table used for mmap() and munmap() */
234void **maps = NULL;
235int nbmaps = 0;
236
237#define MMAP(fn, off_t) \
238    do { \
239        char *b = MAP_FAILED; \
240        LOADSYM(fn); \
241        if(!_zz_ready || !_zz_iswatched(fd) || _zz_islocked(fd) \
242             || !_zz_isactive(fd)) \
243            return ORIG(fn)(start, length, prot, flags, fd, offset); \
244        ret = ORIG(fn)(NULL, length, prot, flags, fd, offset); \
245        if(ret != MAP_FAILED && length) \
246        { \
247            b = ORIG(fn)(start, length, PROT_READ | PROT_WRITE, \
248                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
249            if(b == MAP_FAILED) \
250            { \
251                munmap(ret, length); \
252                ret = MAP_FAILED; \
253            } \
254        } \
255        if(b != MAP_FAILED) \
256        { \
257            int i, oldpos; \
258            for(i = 0; i < nbmaps; i += 2) \
259                if(maps[i] == NULL) \
260                    break; \
261            if(i == nbmaps) \
262            { \
263                nbmaps += 2; \
264                maps = realloc(maps, nbmaps * sizeof(void *)); \
265            } \
266            maps[i] = b; \
267            maps[i + 1] = ret; \
268            oldpos = _zz_getpos(fd); \
269            _zz_setpos(fd, offset); /* mmap() maps the fd at offset 0 */ \
270            memcpy(b, ret, length); /* FIXME: get rid of this */ \
271            _zz_fuzz(fd, (uint8_t *)b, length); \
272            _zz_setpos(fd, oldpos); \
273            ret = b; \
274            if(length >= 4) \
275                debug("%s(%p, %li, %i, %i, %i, %lli) = %p \"%c%c%c%c...", \
276                      __func__, start, (long int)length, prot, flags, fd, \
277                      (long long int)offset, ret, b[0], b[1], b[2], b[3]); \
278            else \
279                debug("%s(%p, %li, %i, %i, %i, %lli) = %p \"%c...", \
280                      __func__, start, (long int)length, prot, flags, fd, \
281                      (long long int)offset, ret, b[0]); \
282        } \
283        else \
284            debug("%s(%p, %li, %i, %i, %i, %lli) = %p", \
285                  __func__, start, (long int)length, prot, flags, fd, \
286                  (long long int)offset, ret); \
287    } while(0)
288
289#if defined HAVE_MMAP
290void *NEW(mmap)(void *start, size_t length, int prot, int flags,
291                int fd, off_t offset)
292{
293    void *ret; MMAP(mmap, off_t); return ret;
294}
295#endif
296
297#if defined HAVE_MMAP64
298void *NEW(mmap64)(void *start, size_t length, int prot, int flags,
299                  int fd, off64_t offset)
300{
301    void *ret; MMAP(mmap64, off64_t); return ret;
302}
303#endif
304
305#if defined HAVE_MUNMAP
306int NEW(munmap)(void *start, size_t length)
307{
308    int ret, i;
309
310    LOADSYM(munmap);
311    for(i = 0; i < nbmaps; i++)
312    {
313        if(maps[i] != start)
314            continue;
315
316        ORIG(munmap)(start, length);
317        ret = ORIG(munmap)(maps[i + 1], length);
318        maps[i] = NULL;
319        maps[i + 1] = NULL;
320        debug("%s(%p, %li) = %i", __func__, start, (long int)length, ret);
321        return ret;
322    }
323
324    return ORIG(munmap)(start, length);
325}
326#endif
327
328#if defined HAVE_MAP_FD
329kern_return_t NEW(map_fd)(int fd, vm_offset_t offset, vm_offset_t *addr,
330                          boolean_t find_space, vm_size_t numbytes)
331{
332    kern_return_t ret;
333
334    LOADSYM(map_fd);
335    ret = ORIG(map_fd)(fd, offset, addr, find_space, numbytes);
336    if(!_zz_ready || !_zz_iswatched(fd) || _zz_islocked(fd)
337         || !_zz_isactive(fd))
338        return ret;
339
340    if(ret == 0 && numbytes)
341    {
342        /* FIXME: do we also have to rewind the filedescriptor like in mmap? */
343        char *b = malloc(numbytes);
344        memcpy(b, (void *)*addr, numbytes);
345        _zz_fuzz(fd, (void *)b, numbytes);
346        *addr = (vm_offset_t)b;
347        /* FIXME: the map is never freed; there is no such thing as unmap_fd,
348         * but I suppose that kind of map should go when the filedescriptor is
349         * closed (unlike mmap, which returns a persistent buffer). */
350
351        if(numbytes >= 4)
352           debug("%s(%i, %lli, &%p, %i, %lli) = %i \"%c%c%c%c", __func__,
353                 fd, (long long int)offset, (void *)*addr, (int)find_space,
354                 (long long int)numbytes, ret, b[0], b[1], b[2], b[3]);
355        else
356           debug("%s(%i, %lli, &%p, %i, %lli) = %i \"%c", __func__, fd,
357                 (long long int)offset, (void *)*addr, (int)find_space,
358                 (long long int)numbytes, ret, b[0]);
359    }
360    else
361        debug("%s(%i, %lli, &%p, %i, %lli) = %i", __func__, fd,
362              (long long int)offset, (void *)*addr, (int)find_space,
363              (long long int)numbytes, ret);
364
365    return ret;
366}
367#endif
368
Note: See TracBrowser for help on using the repository browser.