source: zzuf/trunk/src/lib-mem.c @ 1791

Last change on this file since 1791 was 1791, checked in by Sam Hocevar, 13 years ago
  • Added -p flag to cherry pick file descriptors that get fuzzed.
  • Property svn:keywords set to Id
File size: 10.0 KB
Line 
1/*
2 *  zzuf - general purpose fuzzer
3 *  Copyright (c) 2006,2007 Sam Hocevar <sam@zoy.org>
4 *                All Rights Reserved
5 *
6 *  $Id: lib-mem.c 1791 2007-07-06 13:40:55Z sam $
7 *
8 *  This program is free software. It comes without any warranty, to
9 *  the extent permitted by applicable law. You can redistribute it
10 *  and/or modify it under the terms of the Do What The Fuck You Want
11 *  To Public License, Version 2, as published by Sam Hocevar. See
12 *  http://sam.zoy.org/wtfpl/COPYING for more details.
13 */
14
15/*
16 *  load-mem.c: loaded memory handling functions
17 */
18
19#include "config.h"
20
21/* Need this for RTLD_NEXT */
22#define _GNU_SOURCE
23/* Use this to get mmap64() on glibc systems */
24#define _LARGEFILE64_SOURCE
25/* Use this to get posix_memalign */
26#if defined HAVE_POSIX_MEMALIGN
27#   define _XOPEN_SOURCE 600
28#endif
29
30#if defined HAVE_STDINT_H
31#   include <stdint.h>
32#elif defined HAVE_INTTYPES_H
33#   include <inttypes.h>
34#endif
35#include <stdlib.h>
36#include <string.h>
37#include <errno.h>
38#include <signal.h>
39
40#if defined HAVE_MALLOC_H
41#   include <malloc.h>
42#endif
43#if defined HAVE_UNISTD_H
44#   include <unistd.h>
45#endif
46#if defined HAVE_SYS_MMAN_H
47#   include <sys/mman.h>
48#endif
49#if defined HAVE_LIBC_H
50#   include <libc.h>
51#endif
52
53#include "libzzuf.h"
54#include "lib-load.h"
55#include "debug.h"
56#include "fuzz.h"
57#include "fd.h"
58
59#if !defined SIGKILL
60#   define SIGKILL 9
61#endif
62
63#if !defined MAP_ANONYMOUS
64#   define MAP_ANONYMOUS MAP_ANON
65#endif
66
67/* TODO: mremap, maybe brk/sbrk (haha) */
68
69/* Library functions that we divert */
70static void *  (*ORIG(calloc))   (size_t nmemb, size_t size);
71static void *  (*ORIG(malloc))   (size_t size);
72static void    (*ORIG(free))     (void *ptr);
73#if defined HAVE_VALLOC
74static void *  (*ORIG(valloc))   (size_t size);
75#endif
76#if defined HAVE_MEMALIGN
77static void *  (*ORIG(memalign)) (size_t boundary, size_t size);
78#endif
79#if defined HAVE_POSIX_MEMALIGN
80static int     (*ORIG(posix_memalign)) (void **memptr, size_t alignment,
81                                        size_t size);
82#endif
83static void *  (*ORIG(realloc))  (void *ptr, size_t size);
84
85#if defined HAVE_MMAP
86static void *  (*ORIG(mmap))     (void *start, size_t length, int prot,
87                                  int flags, int fd, off_t offset);
88#endif
89#if defined HAVE_MMAP64
90static void *  (*ORIG(mmap64))   (void *start, size_t length, int prot,
91                                  int flags, int fd, off64_t offset);
92#endif
93#if defined HAVE_MUNMAP
94static int     (*ORIG(munmap))   (void *start, size_t length);
95#endif
96#if defined HAVE_MAP_FD
97static kern_return_t (*ORIG(map_fd)) (int fd, vm_offset_t offset,
98                                      vm_offset_t *addr, boolean_t find_space,
99                                      vm_size_t numbytes);
100#endif
101
102/* We need a static memory buffer because some functions call memory
103 * allocation routines before our library is loaded. Hell, even dlsym()
104 * calls calloc(), so we need to do something about it */
105#define DUMMY_BYTES 655360 /* 640 kB ought to be enough for anybody */
106static uint64_t dummy_buffer[DUMMY_BYTES / 8];
107static int64_t dummy_offset = 0;
108#define DUMMY_START ((uintptr_t)dummy_buffer)
109#define DUMMY_STOP ((uintptr_t)dummy_buffer + DUMMY_BYTES)
110
111void _zz_mem_init(void)
112{
113    LOADSYM(calloc);
114    LOADSYM(malloc);
115    LOADSYM(realloc);
116}
117
118void *NEW(calloc)(size_t nmemb, size_t size)
119{
120    void *ret;
121    if(!ORIG(calloc))
122    {
123        ret = dummy_buffer + dummy_offset;
124        memset(ret, 0, (nmemb * size + 7) / 8);
125        dummy_offset += (nmemb * size + 7) / 8;
126        debug("%s(%li, %li) = %p", __func__,
127              (long int)nmemb, (long int)size, ret);
128        return ret;
129    }
130    ret = ORIG(calloc)(nmemb, size);
131    if(ret == NULL && _zz_memory && errno == ENOMEM)
132        raise(SIGKILL);
133    return ret;
134}
135
136void *NEW(malloc)(size_t size)
137{
138    void *ret;
139    if(!ORIG(malloc))
140    {
141        ret = dummy_buffer + dummy_offset;
142        dummy_offset += (size + 7) / 8;
143        debug("%s(%li) = %p", __func__, (long int)size, ret);
144        return ret;
145    }
146    ret = ORIG(malloc)(size);
147    if(ret == NULL && _zz_memory && errno == ENOMEM)
148        raise(SIGKILL);
149    return ret;
150}
151
152void NEW(free)(void *ptr)
153{
154    if((uintptr_t)ptr >= DUMMY_START && (uintptr_t)ptr < DUMMY_STOP)
155    {
156        debug("%s(%p)", __func__, ptr);
157        return;
158    }
159    LOADSYM(free);
160    ORIG(free)(ptr);
161}
162
163void *NEW(realloc)(void *ptr, size_t size)
164{
165    void *ret;
166    if(!ORIG(realloc)
167        || ((uintptr_t)ptr >= DUMMY_START && (uintptr_t)ptr < DUMMY_STOP))
168    {
169        ret = dummy_buffer + dummy_offset;
170        memcpy(ret, ptr, size);
171        dummy_offset += (size + 7) * 8;
172        debug("%s(%p, %li) = %p", __func__, ptr, (long int)size, ret);
173        return ret;
174    }
175    LOADSYM(realloc);
176    ret = ORIG(realloc)(ptr, size);
177    if(ret == NULL && _zz_memory && errno == ENOMEM)
178        raise(SIGKILL);
179    return ret;
180}
181
182#if defined HAVE_VALLOC
183void *NEW(valloc)(size_t size)
184{
185    void *ret;
186    LOADSYM(valloc);
187    ret = ORIG(valloc)(size);
188    if(ret == NULL && _zz_memory && errno == ENOMEM)
189        raise(SIGKILL);
190    return ret;
191}
192#endif
193
194#if defined HAVE_MEMALIGN
195void *NEW(memalign)(size_t boundary, size_t size)
196{
197    void *ret;
198    LOADSYM(memalign);
199    ret = ORIG(memalign)(boundary, size);
200    if(ret == NULL && _zz_memory && errno == ENOMEM)
201        raise(SIGKILL);
202    return ret;
203}
204#endif
205
206#if defined HAVE_POSIX_MEMALIGN
207int NEW(posix_memalign)(void **memptr, size_t alignment, size_t size)
208{
209    int ret;
210    LOADSYM(posix_memalign);
211    ret = ORIG(posix_memalign)(memptr, alignment, size);
212    if(ret == ENOMEM && _zz_memory)
213        raise(SIGKILL);
214    return ret;
215}
216#endif
217
218/* Table used for mmap() and munmap() */
219void **maps = NULL;
220int nbmaps = 0;
221
222#define MMAP(fn, off_t) \
223    do { \
224        char *b = MAP_FAILED; \
225        LOADSYM(fn); \
226        if(!_zz_ready || !_zz_iswatched(fd) || _zz_islocked(fd) \
227             || !_zz_isactive(fd)) \
228            return ORIG(fn)(start, length, prot, flags, fd, offset); \
229        ret = ORIG(fn)(NULL, length, prot, flags, fd, offset); \
230        if(ret != MAP_FAILED && length) \
231        { \
232            b = ORIG(fn)(start, length, PROT_READ | PROT_WRITE, \
233                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
234            if(b == MAP_FAILED) \
235            { \
236                munmap(ret, length); \
237                ret = MAP_FAILED; \
238            } \
239        } \
240        if(b != MAP_FAILED) \
241        { \
242            int i, oldpos; \
243            for(i = 0; i < nbmaps; i += 2) \
244                if(maps[i] == NULL) \
245                    break; \
246            if(i == nbmaps) \
247            { \
248                nbmaps += 2; \
249                maps = realloc(maps, nbmaps * sizeof(void *)); \
250            } \
251            maps[i] = b; \
252            maps[i + 1] = ret; \
253            oldpos = _zz_getpos(fd); \
254            _zz_setpos(fd, offset); /* mmap() maps the fd at offset 0 */ \
255            memcpy(b, ret, length); /* FIXME: get rid of this */ \
256            _zz_fuzz(fd, (uint8_t *)b, length); \
257            _zz_setpos(fd, oldpos); \
258            ret = b; \
259            if(length >= 4) \
260                debug("%s(%p, %li, %i, %i, %i, %lli) = %p \"%c%c%c%c...", \
261                      __func__, start, (long int)length, prot, flags, fd, \
262                      (long long int)offset, ret, b[0], b[1], b[2], b[3]); \
263            else \
264                debug("%s(%p, %li, %i, %i, %i, %lli) = %p \"%c...", \
265                      __func__, start, (long int)length, prot, flags, fd, \
266                      (long long int)offset, ret, b[0]); \
267        } \
268        else \
269            debug("%s(%p, %li, %i, %i, %i, %lli) = %p", \
270                  __func__, start, (long int)length, prot, flags, fd, \
271                  (long long int)offset, ret); \
272    } while(0)
273
274#if defined HAVE_MMAP
275void *NEW(mmap)(void *start, size_t length, int prot, int flags,
276                int fd, off_t offset)
277{
278    void *ret; MMAP(mmap, off_t); return ret;
279}
280#endif
281
282#if defined HAVE_MMAP64
283void *NEW(mmap64)(void *start, size_t length, int prot, int flags,
284                  int fd, off64_t offset)
285{
286    void *ret; MMAP(mmap64, off64_t); return ret;
287}
288#endif
289
290#if defined HAVE_MUNMAP
291int NEW(munmap)(void *start, size_t length)
292{
293    int ret, i;
294
295    LOADSYM(munmap);
296    for(i = 0; i < nbmaps; i++)
297    {
298        if(maps[i] != start)
299            continue;
300
301        ORIG(munmap)(start, length);
302        ret = ORIG(munmap)(maps[i + 1], length);
303        maps[i] = NULL;
304        maps[i + 1] = NULL;
305        debug("%s(%p, %li) = %i", __func__, start, (long int)length, ret);
306        return ret;
307    }
308
309    return ORIG(munmap)(start, length);
310}
311#endif
312
313#if defined HAVE_MAP_FD
314kern_return_t NEW(map_fd)(int fd, vm_offset_t offset, vm_offset_t *addr,
315                          boolean_t find_space, vm_size_t numbytes)
316{
317    kern_return_t ret;
318
319    LOADSYM(map_fd);
320    ret = ORIG(map_fd)(fd, offset, addr, find_space, numbytes);
321    if(!_zz_ready || !_zz_iswatched(fd) || _zz_islocked(fd)
322         || !_zz_isactive(fd))
323        return ret;
324
325    if(ret == 0 && numbytes)
326    {
327        /* FIXME: do we also have to rewind the filedescriptor like in mmap? */
328        char *b = malloc(numbytes);
329        memcpy(b, (void *)*addr, numbytes);
330        _zz_fuzz(fd, (void *)b, numbytes);
331        *addr = (vm_offset_t)b;
332        /* FIXME: the map is never freed; there is no such thing as unmap_fd,
333         * but I suppose that kind of map should go when the filedescriptor is
334         * closed (unlike mmap, which returns a persistent buffer). */
335
336        if(numbytes >= 4)
337           debug("%s(%i, %lli, &%p, %i, %lli) = %i \"%c%c%c%c", __func__,
338                 fd, (long long int)offset, (void *)*addr, (int)find_space,
339                 (long long int)numbytes, ret, b[0], b[1], b[2], b[3]);
340        else
341           debug("%s(%i, %lli, &%p, %i, %lli) = %i \"%c", __func__, fd,
342                 (long long int)offset, (void *)*addr, (int)find_space,
343                 (long long int)numbytes, ret, b[0]);
344    }
345    else
346        debug("%s(%i, %lli, &%p, %i, %lli) = %i", __func__, fd,
347              (long long int)offset, (void *)*addr, (int)find_space,
348              (long long int)numbytes, ret);
349
350    return ret;
351}
352#endif
353
Note: See TracBrowser for help on using the repository browser.