527#ifndef DLMALLOC_VERSION
528#define DLMALLOC_VERSION 20806
531#ifndef DLMALLOC_EXPORT
532#define DLMALLOC_EXPORT extern
545#define WIN32_LEAN_AND_MEAN
549#define HAVE_MORECORE 0
550#define LACKS_UNISTD_H
551#define LACKS_SYS_PARAM_H
552#define LACKS_SYS_MMAN_H
553#define LACKS_STRING_H
554#define LACKS_STRINGS_H
555#define LACKS_SYS_TYPES_H
558#ifndef MALLOC_FAILURE_ACTION
559#define MALLOC_FAILURE_ACTION
570#if defined(DARWIN) || defined(_DARWIN)
573#define HAVE_MORECORE 0
576#ifndef MALLOC_ALIGNMENT
577#define MALLOC_ALIGNMENT ((size_t) 16U)
582#ifndef LACKS_SYS_TYPES_H
583#include <sys/types.h>
587#define MAX_SIZE_T (~(size_t) 0)
591 ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \
592 (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0))
596#if ((defined(__GNUC__) && \
597 ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \
598 defined(__i386__) || defined(__x86_64__))) || \
599 (defined(_MSC_VER) && _MSC_VER >= 1310))
600#ifndef USE_SPIN_LOCKS
601#define USE_SPIN_LOCKS 1
604#error "USE_SPIN_LOCKS defined without implementation"
606#elif !defined(USE_SPIN_LOCKS)
607#define USE_SPIN_LOCKS 0
611#define ONLY_MSPACES 0
620#ifndef MALLOC_ALIGNMENT
621#define MALLOC_ALIGNMENT ((size_t) (2 * sizeof(void *)))
629#ifndef ABORT_ON_ASSERT_FAILURE
630#define ABORT_ON_ASSERT_FAILURE 1
632#ifndef PROCEED_ON_ERROR
633#define PROCEED_ON_ERROR 0
639#ifndef MALLOC_INSPECT_ALL
640#define MALLOC_INSPECT_ALL 0
656#ifndef MALLOC_FAILURE_ACTION
657#define MALLOC_FAILURE_ACTION errno = ENOMEM;
661#define HAVE_MORECORE 0
663#define HAVE_MORECORE 1
667#define MORECORE_CONTIGUOUS 0
669#define MORECORE_DEFAULT sbrk
670#ifndef MORECORE_CONTIGUOUS
671#define MORECORE_CONTIGUOUS 1
674#ifndef DEFAULT_GRANULARITY
675#if (MORECORE_CONTIGUOUS || defined(WIN32))
676#define DEFAULT_GRANULARITY (0)
678#define DEFAULT_GRANULARITY ((size_t) 64U * (size_t) 1024U)
681#ifndef DEFAULT_TRIM_THRESHOLD
682#ifndef MORECORE_CANNOT_TRIM
683#define DEFAULT_TRIM_THRESHOLD ((size_t) 2U * (size_t) 1024U * (size_t) 1024U)
685#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
688#ifndef DEFAULT_MMAP_THRESHOLD
690#define DEFAULT_MMAP_THRESHOLD ((size_t) 256U * (size_t) 1024U)
692#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
695#ifndef MAX_RELEASE_CHECK_RATE
697#define MAX_RELEASE_CHECK_RATE 4095
699#define MAX_RELEASE_CHECK_RATE MAX_SIZE_T
702#ifndef USE_BUILTIN_FFS
703#define USE_BUILTIN_FFS 0
705#ifndef USE_DEV_RANDOM
706#define USE_DEV_RANDOM 0
711#ifndef MALLINFO_FIELD_TYPE
712#define MALLINFO_FIELD_TYPE size_t
714#ifndef NO_MALLOC_STATS
715#define NO_MALLOC_STATS 0
717#ifndef NO_SEGMENT_TRAVERSAL
718#define NO_SEGMENT_TRAVERSAL 0
728#ifndef HAVE_USR_INCLUDE_MALLOC_H
729#define M_TRIM_THRESHOLD (-1)
731#define M_GRANULARITY (-2)
732#ifndef HAVE_USR_INCLUDE_MALLOC_H
733#define M_MMAP_THRESHOLD (-3)
763#ifdef HAVE_USR_INCLUDE_MALLOC_H
764#include "/usr/include/malloc.h"
766#ifndef STRUCT_MALLINFO_DECLARED
768#define _STRUCT_MALLINFO
769#define STRUCT_MALLINFO_DECLARED 1
794#define FORCEINLINE __inline __attribute__((always_inline))
795#elif defined(_MSC_VER)
796#define FORCEINLINE __forceinline
801#define NOINLINE __attribute__((noinline))
802#elif defined(_MSC_VER)
803#define NOINLINE __declspec(noinline)
812#define FORCEINLINE inline
824#define dlcalloc calloc
826#define dlmalloc malloc
827#define dlmemalign memalign
828#define dlposix_memalign posix_memalign
829#define dlrealloc realloc
830#define dlrealloc_in_place realloc_in_place
831#define dlvalloc valloc
832#define dlpvalloc pvalloc
833#define dlmallinfo mallinfo
834#define dlmallopt mallopt
835#define dlmalloc_trim malloc_trim
836#define dlmalloc_stats malloc_stats
837#define dlmalloc_usable_size malloc_usable_size
838#define dlmalloc_footprint malloc_footprint
839#define dlmalloc_max_footprint malloc_max_footprint
840#define dlmalloc_footprint_limit malloc_footprint_limit
841#define dlmalloc_set_footprint_limit malloc_set_footprint_limit
842#define dlmalloc_inspect_all malloc_inspect_all
843#define dlindependent_calloc independent_calloc
844#define dlindependent_comalloc independent_comalloc
845#define dlbulk_free bulk_free
1023#if MALLOC_INSPECT_ALL
1280typedef void *mspace;
1312DLMALLOC_EXPORT mspace create_mspace_with_base(
void *base,
size_t capacity,
int locked);
1353DLMALLOC_EXPORT void* mspace_realloc(mspace msp,
void *mem,
size_t newsize);
1359DLMALLOC_EXPORT void* mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size);
1365DLMALLOC_EXPORT void* mspace_memalign(mspace msp,
size_t alignment,
size_t bytes);
1371DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp,
size_t n_elements,
1372 size_t elem_size,
void *chunks[]);
1378DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp,
size_t n_elements,
1379 size_t sizes[],
void *chunks[]);
1444#pragma warning( disable : 4146 )
1449#ifndef LACKS_ERRNO_H
1453#if ABORT_ON_ASSERT_FAILURE
1455#define assert(x) if (!(x)) ABORT
1465#if !defined(WIN32) && !defined(LACKS_TIME_H)
1468#ifndef LACKS_STDLIB_H
1471#ifndef LACKS_STRING_H
1475#ifndef LACKS_STRINGS_H
1480#ifndef LACKS_SYS_MMAN_H
1482#if (defined(linux) && !defined(__USE_GNU))
1484#include <sys/mman.h>
1487#include <sys/mman.h>
1490#ifndef LACKS_FCNTL_H
1494#ifndef LACKS_UNISTD_H
1497#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1498extern void* sbrk(ptrdiff_t);
1505#if defined(__SVR4) && defined(__sun)
1507#elif !defined(LACKS_SCHED_H)
1510#if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
1513#elif defined(_MSC_VER)
1519LONG __cdecl _InterlockedCompareExchange(LONG
volatile *Dest, LONG Exchange, LONG Comp);
1520LONG __cdecl _InterlockedExchange(LONG
volatile *Target, LONG Value);
1525#pragma intrinsic (_InterlockedCompareExchange)
1526#pragma intrinsic (_InterlockedExchange)
1527#define interlockedcompareexchange _InterlockedCompareExchange
1528#define interlockedexchange _InterlockedExchange
1529#elif defined(WIN32) && defined(__GNUC__)
1530#define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
1531#define interlockedexchange __sync_lock_test_and_set
1537#define LOCK_AT_FORK 0
1541#if defined(_MSC_VER) && _MSC_VER >= 1300
1542#ifndef BitScanForward
1546unsigned char _BitScanForward(
unsigned long *index,
unsigned long mask);
1547unsigned char _BitScanReverse(
unsigned long *index,
unsigned long mask);
1552#define BitScanForward _BitScanForward
1553#define BitScanReverse _BitScanReverse
1554#pragma intrinsic(_BitScanForward)
1555#pragma intrinsic(_BitScanReverse)
1560#ifndef malloc_getpagesize
1562#ifndef _SC_PAGE_SIZE
1563#define _SC_PAGE_SIZE _SC_PAGESIZE
1567#define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1569#if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1570extern size_t getpagesize();
1571#define malloc_getpagesize getpagesize()
1574#define malloc_getpagesize getpagesize()
1576#ifndef LACKS_SYS_PARAM_H
1577#include <sys/param.h>
1580#define malloc_getpagesize EXEC_PAGESIZE
1584#define malloc_getpagesize NBPG
1586#define malloc_getpagesize (NBPG * CLSIZE)
1590#define malloc_getpagesize NBPC
1593#define malloc_getpagesize PAGESIZE
1595#define malloc_getpagesize ((size_t) 4096U)
1609#define SIZE_T_SIZE (sizeof(size_t))
1610#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1614#define SIZE_T_ZERO ((size_t) 0)
1615#define SIZE_T_ONE ((size_t) 1)
1616#define SIZE_T_TWO ((size_t) 2)
1617#define SIZE_T_FOUR ((size_t) 4)
1618#define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1)
1619#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2)
1620#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES)
1621#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1624#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1627#define is_aligned(A) (((size_t) ((A)) & (CHUNK_ALIGN_MASK)) == 0)
1630#define align_offset(A) \
1631 ((((size_t) (A) &CHUNK_ALIGN_MASK) == 0) ? 0 : \
1632 ((MALLOC_ALIGNMENT - ((size_t) (A) &CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1644#define MFAIL ((void *) (MAX_SIZE_T))
1645#define CMFAIL ((char *) (MFAIL))
1650#define MUNMAP_DEFAULT(a, s) munmap((a), (s))
1651#define MMAP_PROT (PROT_READ | PROT_WRITE)
1652#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1653#define MAP_ANONYMOUS MAP_ANON
1656#define MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
1657#define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1664#define MMAP_FLAGS (MAP_PRIVATE)
1665static int dev_zero_fd = -1;
1666#define MMAP_DEFAULT(s) \
1667 ((dev_zero_fd < 0) ? \
1668 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1669 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1670 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1673#define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
1680 void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1682 return (ptr != 0) ? ptr :
MFAIL;
1686static FORCEINLINE void* win32direct_mmap(
size_t size)
1688 void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
1691 return (ptr != 0) ? ptr :
MFAIL;
1695static FORCEINLINE int win32munmap(
void *ptr,
size_t size)
1697 MEMORY_BASIC_INFORMATION minfo;
1698 char *cptr = (
char *) ptr;
1701 if (VirtualQuery(cptr, &minfo,
sizeof(minfo)) == 0)
1704 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1705 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1708 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1711 cptr += minfo.RegionSize;
1712 size -= minfo.RegionSize;
1717#define MMAP_DEFAULT(s) win32mmap(s)
1718#define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
1719#define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
1725#define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1734#define CALL_MORECORE(S) MORECORE(S)
1736#define CALL_MORECORE(S) MORECORE_DEFAULT(S)
1739#define CALL_MORECORE(S) MFAIL
1744#define MMAP(s) arax_mmap(s)
1745#define DIRECT_MMAP(s) MMAP(s)
1747#define MUNMAP(a, s) arax_ummap(a, s)
1753#define USE_MMAP_BIT (SIZE_T_ONE)
1756#define CALL_MMAP(s) MMAP(s)
1758#define CALL_MMAP(s) MMAP_DEFAULT(s)
1761#define CALL_MUNMAP(a, s) arax_ummap((a), (s))
1763#define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
1766#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1768#define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
1771#define USE_MMAP_BIT (SIZE_T_ZERO)
1773#define MMAP(s) MFAIL
1774#define MUNMAP(a, s) (-1)
1775#define DIRECT_MMAP(s) MFAIL
1776#define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
1777#define CALL_MMAP(s) MMAP(s)
1778#define CALL_MUNMAP(a, s) MUNMAP((a), (s))
1784#if HAVE_MMAP && HAVE_MREMAP
1786#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
1788#define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
1791#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1795#define USE_NONCONTIGUOUS_BIT (4U)
1798#define EXTERN_BIT (8U)
1832#define USE_LOCK_BIT (0U)
1833#define INITIAL_LOCK(l) (0)
1834#define DESTROY_LOCK(l) (0)
1835#define ACQUIRE_MALLOC_GLOBAL_LOCK()
1836#define RELEASE_MALLOC_GLOBAL_LOCK()
1854#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
1855#define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
1856#define CLEAR_LOCK(sl) __sync_lock_release(sl)
1858#elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
1866 __asm__ __volatile__ (
"lock; cmpxchgl %1, %2"
1868 :
"r" (val),
"m" (*(sl)),
"0" (cmp)
1879 __asm__ __volatile__ (
"lock; xchgl %0, %1"
1881 :
"m" (*(sl)),
"0" (prev)
1885#define CAS_LOCK(sl) x86_cas_lock(sl)
1886#define CLEAR_LOCK(sl) x86_clear_lock(sl)
1889#define CAS_LOCK(sl) interlockedexchange(sl, (LONG) 1)
1890#define CLEAR_LOCK(sl) interlockedexchange(sl, (LONG) 0)
1895#define SPINS_PER_YIELD 63
1896#if defined(_MSC_VER)
1897#define SLEEP_EX_DURATION 50
1898#define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
1899#elif defined(__SVR4) && defined(__sun)
1900#define SPIN_LOCK_YIELD thr_yield();
1901#elif !defined(LACKS_SCHED_H)
1902#define SPIN_LOCK_YIELD sched_yield();
1904#define SPIN_LOCK_YIELD
1907#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
1909static int spin_acquire_lock(
int *sl)
1913 while (*(
volatile int *) sl != 0 || CAS_LOCK(sl)) {
1914 if ((++spins & SPINS_PER_YIELD) == 0) {
1922#define TRY_LOCK(sl) !CAS_LOCK(sl)
1923#define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
1924#define ACQUIRE_LOCK(sl) (CAS_LOCK(sl) ? spin_acquire_lock(sl) : 0)
1925#define INITIAL_LOCK(sl) (*sl = 0)
1926#define DESTROY_LOCK(sl) (0)
1927static MLOCK_T malloc_global_mutex = 0;
1932#define THREAD_ID_T DWORD
1933#define CURRENT_THREAD GetCurrentThreadId()
1934#define EQ_OWNER(X, Y) ((X) == (Y))
1942#define THREAD_ID_T pthread_t
1943#define CURRENT_THREAD pthread_self()
1944#define EQ_OWNER(X, Y) pthread_equal(X, Y)
1947struct malloc_recursive_lock
1951 THREAD_ID_T threadid;
1954#define MLOCK_T struct malloc_recursive_lock
1955static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T) 0 };
1957static FORCEINLINE void recursive_release_lock(MLOCK_T *lk)
1961 CLEAR_LOCK(&lk->sl);
1965static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk)
1967 THREAD_ID_T mythreadid = CURRENT_THREAD;
1971 if (*((
volatile int *) (&lk->sl)) == 0) {
1972 if (!CAS_LOCK(&lk->sl)) {
1973 lk->threadid = mythreadid;
1977 }
else if (EQ_OWNER(lk->threadid, mythreadid)) {
1981 if ((++spins & SPINS_PER_YIELD) == 0) {
1987static FORCEINLINE int recursive_try_lock(MLOCK_T *lk)
1989 THREAD_ID_T mythreadid = CURRENT_THREAD;
1991 if (*((
volatile int *) (&lk->sl)) == 0) {
1992 if (!CAS_LOCK(&lk->sl)) {
1993 lk->threadid = mythreadid;
1997 }
else if (EQ_OWNER(lk->threadid, mythreadid)) {
2004#define RELEASE_LOCK(lk) recursive_release_lock(lk)
2005#define TRY_LOCK(lk) recursive_try_lock(lk)
2006#define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
2007#define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T) 0, (lk)->sl = 0, (lk)->c = 0)
2008#define DESTROY_LOCK(lk) (0)
2012#define MLOCK_T CRITICAL_SECTION
2013#define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
2014#define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
2015#define TRY_LOCK(lk) TryEnterCriticalSection(lk)
2016#define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000 | 4000))
2017#define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
2018#define NEED_GLOBAL_LOCK_INIT
2020static MLOCK_T malloc_global_mutex;
2021static volatile LONG malloc_global_mutex_status;
2024static void init_malloc_global_mutex()
2027 long stat = malloc_global_mutex_status;
2033 interlockedcompareexchange(&malloc_global_mutex_status, (LONG) -1, (LONG) 0) == 0)
2035 InitializeCriticalSection(&malloc_global_mutex);
2036 interlockedexchange(&malloc_global_mutex_status, (LONG) 1);
2044#define MLOCK_T pthread_mutex_t
2045#define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
2046#define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
2047#define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
2048#define INITIAL_LOCK(lk) pthread_init_lock(lk)
2049#define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
2051#if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
2054extern int pthread_mutexattr_setkind_np __P((pthread_mutexattr_t *__attr,
2056#define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
2057#define pthread_mutexattr_settype(x, y) pthread_mutexattr_setkind_np(x, y)
2060static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
2062static int pthread_init_lock(MLOCK_T *lk)
2064 pthread_mutexattr_t attr;
2066 if (pthread_mutexattr_init(&attr))
return 1;
2068 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
2069 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
return 1;
2072 if (pthread_mutex_init(lk, &attr))
return 1;
2074 if (pthread_mutexattr_destroy(&attr))
return 1;
2082#define USE_LOCK_BIT (2U)
2084#ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
2085#define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
2088#ifndef RELEASE_MALLOC_GLOBAL_LOCK
2089#define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
2248#define MCHUNK_SIZE (sizeof(mchunk))
2251#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2253#define CHUNK_OVERHEAD (SIZE_T_SIZE)
2257#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
2259#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
2262#define MIN_CHUNK_SIZE \
2263 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2266#define chunk2mem(p) ((void *) ((char *) (p) + TWO_SIZE_T_SIZES))
2267#define mem2chunk(mem) ((mchunkptr) ((char *) (mem) - TWO_SIZE_T_SIZES))
2269#define align_as_chunk(A) (mchunkptr) ((A) + align_offset(chunk2mem(A)))
2272#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
2273#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
2276#define pad_request(req) \
2277 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
2280#define request2size(req) \
2281 (((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req))
2294#define PINUSE_BIT (SIZE_T_ONE)
2295#define CINUSE_BIT (SIZE_T_TWO)
2296#define FLAG4_BIT (SIZE_T_FOUR)
2297#define INUSE_BITS (PINUSE_BIT | CINUSE_BIT)
2298#define FLAG_BITS (PINUSE_BIT | CINUSE_BIT | FLAG4_BIT)
2301#define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE)
2304#define cinuse(p) ((p)->head & CINUSE_BIT)
2305#define pinuse(p) ((p)->head & PINUSE_BIT)
2306#define flag4inuse(p) ((p)->head & FLAG4_BIT)
2307#define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
2308#define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
2310#define chunksize(p) ((p)->head & ~(FLAG_BITS))
2312#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
2313#define set_flag4(p) ((p)->head |= FLAG4_BIT)
2314#define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
2317#define chunk_plus_offset(p, s) ((mchunkptr) (((char *) (p)) + (s)))
2318#define chunk_minus_offset(p, s) ((mchunkptr) (((char *) (p)) - (s)))
2321#define next_chunk(p) ((mchunkptr) ( ((char *) (p)) + ((p)->head & ~FLAG_BITS)))
2322#define prev_chunk(p) ((mchunkptr) ( ((char *) (p)) - ((p)->prev_foot) ))
2325#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
2328#define get_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->prev_foot)
2329#define set_foot(p, s) (((mchunkptr) ((char *) (p) + (s)))->prev_foot = (s))
2332#define set_size_and_pinuse_of_free_chunk(p, s) \
2333 ((p)->head = (s | PINUSE_BIT), set_foot(p, s))
2336#define set_free_with_pinuse(p, s, n) \
2337 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
2340#define overhead_for(p) \
2341 (is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
2345#define calloc_must_clear(p) (!is_mmapped(p))
2347#define calloc_must_clear(p) (1)
2459#define leftmost_child(t) ((t)->child[0] != 0 ? (t)->child[0] : (t)->child[1])
2526#define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
2527#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
2620#define NSMALLBINS (32U)
2621#define NTREEBINS (32U)
2622#define SMALLBIN_SHIFT (3U)
2623#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2624#define TREEBIN_SHIFT (8U)
2625#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2626#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2627#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2679#define ensure_initialization() (void) (mparams.magic != 0 || init_mparams())
2686#define is_global(M) ((M) == &_gm_)
2690#define is_initialized(M) ((M)->top != 0)
2696#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2697#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2699#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2701#define disable_lock(M)
2704#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2705#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2707#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2709#define disable_mmap(M)
2712#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2713#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2715#define set_lock(M, L) \
2716 ((M)->mflags = (L) ? \
2717 ((M)->mflags | USE_LOCK_BIT) : \
2718 ((M)->mflags & ~USE_LOCK_BIT))
2721#define page_align(S) \
2722 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
2725#define granularity_align(S) \
2726 (((S) + (mparams.granularity - SIZE_T_ONE)) \
2727 & ~(mparams.granularity - SIZE_T_ONE))
2732#define mmap_align(S) granularity_align(S)
2734#define mmap_align(S) page_align(S)
2738#define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
2740#define is_page_aligned(S) \
2741 (((size_t) (S) &(mparams.page_size - SIZE_T_ONE)) == 0)
2742#define is_granularity_aligned(S) \
2743 (((size_t) (S) &(mparams.granularity - SIZE_T_ONE)) == 0)
2746#define segment_holds(S, A) \
2747 ((char *) (A) >= S->base && (char *) (A) < S->base + S->size)
2755 if (addr >= sp->
base && addr < sp->base + sp->
size)
2758 if ((sp = sp->
next) == 0)
2769 if ((
char *) sp >= ss->
base && (
char *) sp < ss->base + ss->
size)
2772 if ((sp = sp->
next) == 0)
2777#ifndef MORECORE_CANNOT_TRIM
2778#define should_trim(M, s) ((s) > (M)->trim_check)
2780#define should_trim(M, s) (0)
2788#define TOP_FOOT_SIZE \
2789 (align_offset(chunk2mem(0)) + pad_request(sizeof(struct malloc_segment)) + MIN_CHUNK_SIZE)
2801#define PREACTION(M) ((use_lock(M)) ? ACQUIRE_LOCK(&(M)->mutex) : 0)
2802#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2806#define PREACTION(M) (0)
2810#define POSTACTION(M)
2826int malloc_corruption_error_count;
2829static void reset_on_error(
mstate m);
2831#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2832#define USAGE_ERROR_ACTION(m, p)
2836#ifndef CORRUPTION_ERROR_ACTION
2837#define CORRUPTION_ERROR_ACTION(m) ABORT
2840#ifndef USAGE_ERROR_ACTION
2841#define USAGE_ERROR_ACTION(m, p) ABORT
2851#define check_free_chunk(M, P)
2852#define check_inuse_chunk(M, P)
2853#define check_malloced_chunk(M, P, N)
2854#define check_mmapped_chunk(M, P)
2855#define check_malloc_state(M)
2856#define check_top_chunk(M, P)
2859#define check_free_chunk(M, P) do_check_free_chunk(M, P)
2860#define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P)
2861#define check_top_chunk(M, P) do_check_top_chunk(M, P)
2862#define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N)
2863#define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P)
2864#define check_malloc_state(M) do_check_malloc_state(M)
2871static void do_check_malloced_chunk(
mstate m,
void *mem,
size_t s);
2875static void do_check_malloc_state(
mstate m);
2877static size_t traverse_and_check(
mstate m);
2882#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2883#define small_index(s) (bindex_t) ((s) >> SMALLBIN_SHIFT)
2884#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2885#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2888#define smallbin_at(M, i) ((sbinptr) ((char *) &((M)->smallbins[(i) << 1])))
2889#define treebin_at(M, i) (&((M)->treebins[i]))
2892#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2893#define compute_tree_index(S, I) \
2895 unsigned int X = S >> TREEBIN_SHIFT; \
2898 else if (X > 0xFFFF) \
2899 I = NTREEBINS - 1; \
2901 unsigned int K = (unsigned) sizeof(X) * __CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
2902 I = (bindex_t) ((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \
2906#elif defined(__INTEL_COMPILER)
2907#define compute_tree_index(S, I) \
2909 size_t X = S >> TREEBIN_SHIFT; \
2912 else if (X > 0xFFFF) \
2913 I = NTREEBINS - 1; \
2915 unsigned int K = _bit_scan_reverse(X); \
2916 I = (bindex_t) ((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \
2920#elif defined(_MSC_VER) && _MSC_VER >= 1300
2921#define compute_tree_index(S, I) \
2923 size_t X = S >> TREEBIN_SHIFT; \
2926 else if (X > 0xFFFF) \
2927 I = NTREEBINS - 1; \
2930 _BitScanReverse((DWORD *) &K, (DWORD) X); \
2931 I = (bindex_t) ((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \
2936#define compute_tree_index(S, I) \
2938 size_t X = S >> TREEBIN_SHIFT; \
2941 else if (X > 0xFFFF) \
2942 I = NTREEBINS - 1; \
2944 unsigned int Y = (unsigned int) X; \
2945 unsigned int N = ((Y - 0x100) >> 16) & 8; \
2946 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4; \
2948 N += K = (((Y <<= K) - 0x4000) >> 16) & 2; \
2949 K = 14 - N + ((Y <<= K) >> 15); \
2950 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1)); \
2956#define bit_for_tree_index(i) \
2957 (i == NTREEBINS - 1) ? (SIZE_T_BITSIZE - 1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2960#define leftshift_for_tree_index(i) \
2961 ((i == NTREEBINS - 1) ? 0 : \
2962 ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2965#define minsize_for_tree_index(i) \
2966 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) \
2967 | (((size_t) ((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2973#define idx2bit(i) ((binmap_t) (1) << (i))
2976#define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i))
2977#define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i))
2978#define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i))
2980#define mark_treemap(M, i) ((M)->treemap |= idx2bit(i))
2981#define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i))
2982#define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i))
2985#define least_bit(x) ((x) & -(x))
2988#define left_bits(x) ((x << 1) | -(x << 1))
2991#define same_or_left_bits(x) ((x) | -(x))
2995#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
2996#define compute_bit2idx(X, I) \
2999 J = __builtin_ctz(X); \
3003#elif defined(__INTEL_COMPILER)
3004#define compute_bit2idx(X, I) \
3007 J = _bit_scan_forward(X); \
3011#elif defined(_MSC_VER) && _MSC_VER >= 1300
3012#define compute_bit2idx(X, I) \
3015 _BitScanForward((DWORD *) &J, X); \
3019#elif USE_BUILTIN_FFS
3020#define compute_bit2idx(X, I) I = ffs(X) - 1
3023#define compute_bit2idx(X, I) \
3025 unsigned int Y = X - 1; \
3026 unsigned int K = Y >> (16 - 4) & 16; \
3027 unsigned int N = K; Y >>= K; \
3028 N += K = Y >> (8 - 3) & 8; Y >>= K; \
3029 N += K = Y >> (4 - 2) & 4; Y >>= K; \
3030 N += K = Y >> (2 - 1) & 2; Y >>= K; \
3031 N += K = Y >> (1 - 0) & 1; Y >>= K; \
3032 I = (bindex_t) (N + Y); \
3067#define ok_address(M, a) ((char *) (a) >= (M)->least_addr)
3069#define ok_next(p, n) ((char *) (p) < (char *) (n))
3071#define ok_inuse(p) is_inuse(p)
3073#define ok_pinuse(p) pinuse(p)
3076#define ok_address(M, a) (1)
3077#define ok_next(b, n) (1)
3078#define ok_inuse(p) (1)
3079#define ok_pinuse(p) (1)
3082#if (FOOTERS && !INSECURE)
3084#define ok_magic(M) ((M)->magic == mparams.magic)
3086#define ok_magic(M) (1)
3091#if defined(__GNUC__) && __GNUC__ >= 3
3092#define RTCHECK(e) __builtin_expect(e, 1)
3094#define RTCHECK(e) (e)
3097#define RTCHECK(e) (1)
3104#define mark_inuse_foot(M, p, s)
3109#define set_inuse(M, p, s) \
3110 ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \
3111 ((mchunkptr) (((char *) (p)) + (s)))->head |= PINUSE_BIT)
3114#define set_inuse_and_pinuse(M, p, s) \
3115 ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \
3116 ((mchunkptr) (((char *) (p)) + (s)))->head |= PINUSE_BIT)
3119#define set_size_and_pinuse_of_inuse_chunk(M, p, s) \
3120 ((p)->head = (s | PINUSE_BIT | CINUSE_BIT))
3125#define mark_inuse_foot(M, p, s) \
3126 (((mchunkptr) ((char *) (p) + (s)))->prev_foot = ((size_t) (M) ^ mparams.magic))
3128#define get_mstate_for(p) \
3129 ((mstate) (((mchunkptr) ((char *) (p) \
3130 + (chunksize(p))))->prev_foot ^ mparams.magic))
3132#define set_inuse(M, p, s) \
3133 ((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \
3134 (((mchunkptr) (((char *) (p)) + (s)))->head |= PINUSE_BIT), \
3135 mark_inuse_foot(M, p, s))
3137#define set_inuse_and_pinuse(M, p, s) \
3138 ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \
3139 (((mchunkptr) (((char *) (p)) + (s)))->head |= PINUSE_BIT), \
3140 mark_inuse_foot(M, p, s))
3142#define set_size_and_pinuse_of_inuse_chunk(M, p, s) \
3143 ((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \
3144 mark_inuse_foot(M, p, s))
3151static void pre_fork(
void){ ACQUIRE_LOCK(&(
gm)->mutex); }
3153static void post_fork_parent(
void){ RELEASE_LOCK(&(
gm)->mutex); }
3162 #ifdef NEED_GLOBAL_LOCK_INIT
3163 if (malloc_global_mutex_status <= 0)
3164 init_malloc_global_mutex();
3178 SYSTEM_INFO system_info;
3179 GetSystemInfo(&system_info);
3180 psize = system_info.dwPageSize;
3192 if ((
sizeof(
size_t) !=
sizeof(
char *)) ||
3194 (
sizeof(
int) < 4) ||
3205 #if MORECORE_CONTIGUOUS
3217 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
3223 unsigned char buf[
sizeof(size_t)];
3225 if ((
fd = open(
"/dev/urandom", O_RDONLY)) >= 0 &&
3226 read(
fd, buf,
sizeof(buf)) ==
sizeof(buf))
3228 magic = *((
size_t *) buf);
3233 magic = (size_t) (GetTickCount() ^ (
size_t) 0x55555555U);
3234 #elif defined(LACKS_TIME_H)
3235 magic = (size_t) &
magic ^ (
size_t) 0x55555555U;
3237 magic = (size_t) (time(0) ^ (
size_t) 0x55555555U);
3239 magic |= (size_t) 8U;
3240 magic &= ~(size_t) 7U;
3256 val = (value == -1) ?
MAX_SIZE_T : (size_t) value;
3257 switch (param_number) {
3263 if (val >=
mparams.page_size && ((val & (val - 1)) == 0)) {
3323 do_check_any_chunk(m, p);
3329 do_check_mmapped_chunk(m, p);
3338 do_check_any_chunk(m, p);
3342 if (p != m->
dv && p != m->
top) {
3358static void do_check_malloced_chunk(
mstate m,
void *mem,
size_t s)
3363 do_check_inuse_chunk(m, p);
3405 if (u->
child[0] != 0) {
3408 do_check_tree(m, u->
child[0]);
3410 if (u->
child[1] != 0) {
3413 do_check_tree(m, u->
child[1]);
3429 int empty = (m->
treemap & (1U << i)) == 0;
3434 do_check_tree(m, t);
3442 unsigned int empty = (m->
smallmap & (1U << i)) == 0;
3447 for (; p != b; p = p->
bk) {
3451 do_check_free_chunk(m, p);
3458 do_check_inuse_chunk(m, q);
3476 }
while ((p = p->
fd) != b);
3484 while (t != 0 &&
chunksize(t) != size) {
3493 }
while ((u = u->
fd) != t);
3501static size_t traverse_and_check(
mstate m)
3518 do_check_inuse_chunk(m, q);
3520 assert(q == m->
dv || bin_find(m, q));
3522 do_check_free_chunk(m, q);
3534static void do_check_malloc_state(
mstate m)
3541 do_check_smallbin(m, i);
3543 do_check_treebin(m, i);
3546 do_check_any_chunk(m, m->
dv);
3553 do_check_top_chunk(m, m->
top);
3559 total = traverse_and_check(m);
3571 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
3641 fprintf(stderr,
"max system bytes = %10lu\n", (
unsigned long) (maxfp));
3642 fprintf(stderr,
"system bytes = %10lu\n", (
unsigned long) (fp));
3643 fprintf(stderr,
"in use bytes = %10lu\n", (
unsigned long) (used));
3659#define insert_small_chunk(M, P, S) \
3661 bindex_t I = small_index(S); \
3662 mchunkptr B = smallbin_at(M, I); \
3664 assert(S >= MIN_CHUNK_SIZE); \
3665 if (!smallmap_is_marked(M, I)) \
3666 mark_smallmap(M, I); \
3667 else if (RTCHECK(ok_address(M, B->fd))) \
3670 CORRUPTION_ERROR_ACTION(M); \
3679#define unlink_small_chunk(M, P, S) \
3681 mchunkptr F = P->fd; \
3682 mchunkptr B = P->bk; \
3683 bindex_t I = small_index(S); \
3686 assert(chunksize(P) == small_index2size(I)); \
3687 if (RTCHECK(F == smallbin_at(M, I) || (ok_address(M, F) && F->bk == P))) { \
3689 clear_smallmap(M, I); \
3691 else if (RTCHECK(B == smallbin_at(M, I) || \
3692 (ok_address(M, B) && B->fd == P))) { \
3697 CORRUPTION_ERROR_ACTION(M); \
3701 CORRUPTION_ERROR_ACTION(M); \
3706#define unlink_first_small_chunk(M, B, P, I) \
3708 mchunkptr F = P->fd; \
3711 assert(chunksize(P) == small_index2size(I)); \
3713 clear_smallmap(M, I); \
3715 else if (RTCHECK(ok_address(M, F) && F->bk == P)) { \
3720 CORRUPTION_ERROR_ACTION(M); \
3726#define replace_dv(M, P, S) \
3728 size_t DVS = M->dvsize; \
3729 assert(is_small(DVS)); \
3731 mchunkptr DV = M->dv; \
3732 insert_small_chunk(M, DV, DVS); \
3741#define insert_large_chunk(M, X, S) \
3745 compute_tree_index(S, I); \
3746 H = treebin_at(M, I); \
3748 X->child[0] = X->child[1] = 0; \
3749 if (!treemap_is_marked(M, I)) { \
3750 mark_treemap(M, I); \
3752 X->parent = (tchunkptr) H; \
3753 X->fd = X->bk = X; \
3757 size_t K = S << leftshift_for_tree_index(I); \
3759 if (chunksize(T) != S) { \
3760 tchunkptr *C = &(T->child[(K >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1]); \
3764 else if (RTCHECK(ok_address(M, C))) { \
3767 X->fd = X->bk = X; \
3771 CORRUPTION_ERROR_ACTION(M); \
3776 tchunkptr F = T->fd; \
3777 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) { \
3778 T->fd = F->bk = X; \
3785 CORRUPTION_ERROR_ACTION(M); \
3810#define unlink_large_chunk(M, X) \
3812 tchunkptr XP = X->parent; \
3815 tchunkptr F = X->fd; \
3817 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) { \
3822 CORRUPTION_ERROR_ACTION(M); \
3827 if (((R = *(RP = &(X->child[1]))) != 0) || \
3828 ((R = *(RP = &(X->child[0]))) != 0)) { \
3830 while ((*(CP = &(R->child[1])) != 0) || \
3831 (*(CP = &(R->child[0])) != 0)) { \
3834 if (RTCHECK(ok_address(M, RP))) \
3837 CORRUPTION_ERROR_ACTION(M); \
3842 tbinptr *H = treebin_at(M, X->index); \
3844 if ((*H = R) == 0) \
3845 clear_treemap(M, X->index); \
3847 else if (RTCHECK(ok_address(M, XP))) { \
3848 if (XP->child[0] == X) \
3854 CORRUPTION_ERROR_ACTION(M); \
3856 if (RTCHECK(ok_address(M, R))) { \
3859 if ((C0 = X->child[0]) != 0) { \
3860 if (RTCHECK(ok_address(M, C0))) { \
3865 CORRUPTION_ERROR_ACTION(M); \
3867 if ((C1 = X->child[1]) != 0) { \
3868 if (RTCHECK(ok_address(M, C1))) { \
3873 CORRUPTION_ERROR_ACTION(M); \
3877 CORRUPTION_ERROR_ACTION(M); \
3884#define insert_chunk(M, P, S) \
3885 if (is_small(S)) insert_small_chunk(M, P, S) \
3886 else { tchunkptr TP = (tchunkptr) (P); insert_large_chunk(M, TP, S); }
3888#define unlink_chunk(M, P, S) \
3889 if (is_small(S)) unlink_small_chunk(M, P, S) \
3890 else { tchunkptr TP = (tchunkptr) (P); unlink_large_chunk(M, TP); }
3896#define internal_malloc(m, b) mspace_malloc(m, b)
3897#define internal_free(m, mem) mspace_free(m, mem);
3900#define internal_malloc(m, b) \
3901 ((m == gm) ? dlmalloc(b) : mspace_malloc(m, b))
3902#define internal_free(m, mem) \
3903 if (m == gm) dlfree(mem); else mspace_free(m, mem);
3905#define internal_malloc(m, b) dlmalloc(b)
3906#define internal_free(m, mem) dlfree(mem)
3942 if (m->
least_addr == 0 || mm < m->least_addr)
3965 (oldsize - nb) <= (
mparams.granularity << 1))
3972 char *cp = (
char *)
CALL_MREMAP((
char *) oldp - offset,
3973 oldmmsize, newmmsize, flags);
3982 if (cp < m->least_addr)
4020 bin->
fd = bin->
bk = bin;
4027static void reset_on_error(
mstate m)
4031 ++malloc_corruption_error_count;
4052 size_t psize = (
char *) oldfirst - (
char *) p;
4054 size_t qsize = psize - nb;
4058 assert((
char *) oldfirst > (
char *) q);
4063 if (oldfirst == m->
top) {
4064 size_t tsize = m->
topsize += qsize;
4068 }
else if (oldfirst == m->
dv) {
4069 size_t dsize = m->
dvsize += qsize;
4092 char *old_top = (
char *) m->
top;
4094 char *old_end = oldsp->
base + oldsp->
size;
4098 char *asp = rawsp + offset;
4123 if ((
char *) (&(nextp->
head)) < old_end)
4131 if (csp != old_top) {
4133 size_t psize = csp - old_top;
4195 size_t ssize = asize;
4205 ssize += (
page_align((
size_t) base) - (size_t) base);
4209 (fp > m->
footprint && fp <= m->footprint_limit)) &&
4274 size_t ssize = end - br;
4288 if (m->
least_addr == 0 || tbase < m->least_addr)
4310 while (sp != 0 && tbase != sp->
base + sp->
size)
4320 if (tbase < m->least_addr)
4323 while (sp != 0 && sp->
base != tbase + tsize)
4329 char *oldbase = sp->
base;
4339 if (nb < m->topsize) {
4340 size_t rsize = m->
topsize -= nb;
4360 size_t released = 0;
4366 char *base = sp->
base;
4367 size_t size = sp->
size;
4407 size_t released = 0;
4415 size_t unit =
mparams.granularity;
4423 sp->
size >= extra &&
4426 size_t newsize = sp->
size - extra;
4442 if (old_br == sp->
base + sp->
size) {
4445 if (rel_br !=
CMFAIL && new_br < old_br)
4446 released = old_br - new_br;
4453 if (released != 0) {
4454 sp->
size -= released;
4470 return (released != 0) ? 1 : 0;
4485 if (
CALL_MUNMAP((
char *) p - prevsize, psize) == 0)
4507 if (next == m->
top) {
4508 size_t tsize = m->
topsize += psize;
4516 }
else if (next == m->
dv) {
4517 size_t dsize = m->
dvsize += psize;
4560 if ((rsize = trem) == 0)
4565 if (rt != 0 && rt != t)
4574 if (t == 0 && v == 0) {
4576 if (leftbits != 0) {
4594 if (v != 0 && rsize < (
size_t) (m->
dvsize - nb)) {
4694 smallbits =
gm->smallmap >> idx;
4696 if ((smallbits & 0x3U) != 0) {
4698 idx += ~smallbits & 1;
4707 }
else if (nb >
gm->dvsize) {
4708 if (smallbits != 0) {
4747 if (nb <= gm->dvsize) {
4748 size_t rsize =
gm->dvsize - nb;
4756 size_t dvs =
gm->dvsize;
4764 }
else if (nb < gm->topsize) {
4765 size_t rsize =
gm->topsize -= nb;
4816 if (
CALL_MUNMAP((
char *) p - prevsize, psize) == 0)
4817 fm->footprint -= psize;
4839 if (next ==
fm->top) {
4840 size_t tsize =
fm->topsize += psize;
4850 }
else if (next ==
fm->dv) {
4851 size_t dsize =
fm->dvsize += psize;
4876 if (--
fm->release_checks == 0)
4898 if (n_elements != 0) {
4899 req = n_elements * elem_size;
4900 if (((n_elements | elem_size) & ~(
size_t) 0xffff) &&
4901 (req / n_elements != elem_size))
4906 memset(mem, 0, req);
4927 }
else if (oldsize >= nb) {
4928 size_t rsize = oldsize - nb;
4936 }
else if (next == m->
top) {
4937 if (oldsize + m->
topsize > nb) {
4938 size_t newsize = oldsize + m->
topsize;
4939 size_t newtopsize = newsize - nb;
4947 }
else if (next == m->
dv) {
4949 if (oldsize + dvs >= nb) {
4950 size_t dsize = oldsize + dvs - nb;
4960 size_t newsize = oldsize + dvs;
4967 }
else if (!
cinuse(next)) {
4969 if (oldsize + nextsize >= nb) {
4970 size_t rsize = oldsize + nextsize - nb;
4973 size_t newsize = oldsize + nextsize;
4996 if ((alignment & (alignment -
SIZE_T_ONE)) != 0) {
4998 while (a < alignment) a <<= 1;
5014 if ((((
size_t) (mem)) & (alignment - 1)) != 0) {
5023 char *br = (
char *)
mem2chunk((
size_t) (((
size_t) ((
char *) mem + alignment
5027 br : br + alignment;
5029 size_t leadsize = pos - (
char *) (p);
5030 size_t newsize =
chunksize(p) - leadsize;
5034 newp->
head = newsize;
5047 size_t remainder_size = size - nb;
5050 set_inuse(m, remainder, remainder_size);
5057 assert(((
size_t) mem & (alignment - 1)) == 0);
5078 size_t element_size;
5079 size_t contents_size;
5083 size_t remainder_size;
5093 if (n_elements == 0)
5100 if (n_elements == 0)
5104 array_size =
request2size(n_elements * (
sizeof(
void *)));
5110 contents_size = n_elements * element_size;
5114 for (i = 0; i != n_elements; ++i)
5118 size = contents_size + array_size;
5141 memset((
size_t *) mem, 0, remainder_size -
SIZE_T_SIZE - array_size);
5146 size_t array_chunk_size;
5148 array_chunk_size = remainder_size - contents_size;
5149 marray = (
void **) (
chunk2mem(array_chunk));
5151 remainder_size = contents_size;
5157 if (i != n_elements - 1) {
5158 if (element_size != 0)
5159 size = element_size;
5162 remainder_size -= size;
5172 if (marray != chunks) {
5174 if (element_size != 0) {
5175 assert(remainder_size == element_size);
5181 for (i = 0; i != n_elements; ++i)
5203 void **fence = &(array[nelem]);
5204 for (a = array; a != fence; ++a) {
5210 if (get_mstate_for(p) != m) {
5220 if (b != fence && *b ==
chunk2mem(next)) {
5221 size_t newsize =
chunksize(next) + psize;
5241#if MALLOC_INSPECT_ALL
5242static void internal_inspect_all(
mstate m,
5243 void ( * handler )(
void *start,
5246 void * callback_arg),
5252 for (s = &m->
seg; s != 0; s = s->
next) {
5265 start = (
void *) ((
char *) q +
sizeof(
struct malloc_chunk));
5270 if (start < (
void *) next)
5271 handler(start, next, used, arg);
5295 #ifdef REALLOC_ZERO_BYTES_FREES
5296 else if (bytes == 0) {
5306 mstate m = get_mstate_for(oldp);
5322 memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
5344 mstate m = get_mstate_for(oldp);
5378 size_t d = alignment /
sizeof(
void *);
5379 size_t r = alignment %
sizeof(
void *);
5380 if (r != 0 || d == 0 || (d & (d -
SIZE_T_ONE)) != 0) {
5417 size_t sz = elem_size;
5419 return ialloc(
gm, n_elements, &sz, 3, chunks);
5425 return ialloc(
gm, n_elements, sizes, 0, chunks);
5433#if MALLOC_INSPECT_ALL
5437 void * callback_arg),
5442 internal_inspect_all(
gm, handler, arg);
5463 return gm->footprint;
5468 return gm->max_footprint;
5473 size_t maf =
gm->footprint_limit;
5488 return gm->footprint_limit = result;
5528static mstate init_user_mstate(
char *tbase,
size_t tsize)
5535 memset(m, 0, msize);
5553mspace create_mspace(
size_t capacity,
int locked)
5561 size_t rs = ((capacity == 0) ?
mparams.granularity :
5564 char *tbase = (
char *) (
CALL_MMAP(tsize));
5566 m = init_user_mstate(tbase, tsize);
5574mspace create_mspace_with_base(
void *base,
size_t capacity,
int locked)
5584 m = init_user_mstate((
char *) base, capacity);
5591int mspace_track_large_chunks(mspace msp,
int enable)
5610size_t destroy_mspace(mspace msp)
5619 char *base = sp->
base;
5620 size_t size = sp->
size;
5639void* mspace_malloc(mspace msp,
size_t bytes)
5657 if ((smallbits & 0x3U) != 0) {
5659 idx += ~smallbits & 1;
5668 }
else if (nb > ms->
dvsize) {
5669 if (smallbits != 0) {
5708 if (nb <= ms->dvsize) {
5709 size_t rsize = ms->
dvsize - nb;
5725 }
else if (nb < ms->topsize) {
5726 size_t rsize = ms->
topsize -= nb;
5747void mspace_free(mspace msp,
void *mem)
5770 if (
CALL_MUNMAP((
char *) p - prevsize, psize) == 0)
5771 fm->footprint -= psize;
5793 if (next ==
fm->top) {
5794 size_t tsize =
fm->topsize += psize;
5804 }
else if (next ==
fm->dv) {
5805 size_t dsize =
fm->dvsize += psize;
5830 if (--
fm->release_checks == 0)
5844void* mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size)
5854 if (n_elements != 0) {
5855 req = n_elements * elem_size;
5856 if (((n_elements | elem_size) & ~(
size_t) 0xffff) &&
5857 (req / n_elements != elem_size))
5862 memset(mem, 0, req);
5866void* mspace_realloc(mspace msp,
void *oldmem,
size_t bytes)
5871 mem = mspace_malloc(msp, bytes);
5875 #ifdef REALLOC_ZERO_BYTES_FREES
5876 else if (bytes == 0) {
5877 mspace_free(msp, oldmem);
5886 mstate m = get_mstate_for(oldp);
5899 mem = mspace_malloc(m, bytes);
5902 memcpy(mem, oldmem, (oc < bytes) ? oc : bytes);
5903 mspace_free(m, oldmem);
5911void* mspace_realloc_in_place(mspace msp,
void *oldmem,
size_t bytes)
5924 mstate m = get_mstate_for(oldp);
5944void* mspace_memalign(mspace msp,
size_t alignment,
size_t bytes)
5953 return mspace_malloc(msp, bytes);
5958void** mspace_independent_calloc(mspace msp,
size_t n_elements,
5959 size_t elem_size,
void *chunks[])
5961 size_t sz = elem_size;
5968 return ialloc(ms, n_elements, &sz, 3, chunks);
5971void** mspace_independent_comalloc(mspace msp,
size_t n_elements,
5972 size_t sizes[],
void *chunks[])
5980 return ialloc(ms, n_elements, sizes, 0, chunks);
5983size_t mspace_bulk_free(mspace msp,
void *array[],
size_t nelem)
5988#if MALLOC_INSPECT_ALL
5989void mspace_inspect_all(mspace msp,
5990 void ( * handler )(
void *start,
5993 void * callback_arg),
6000 internal_inspect_all(ms, handler, arg);
6010int mspace_trim(mspace msp,
size_t pad)
6027void mspace_malloc_stats(mspace msp)
6040size_t mspace_footprint(mspace msp)
6053size_t mspace_max_footprint(mspace msp)
6066size_t mspace_footprint_limit(mspace msp)
6080size_t mspace_set_footprint_limit(mspace msp,
size_t bytes)
6100struct mallinfo mspace_mallinfo(mspace msp)
6112size_t mspace_usable_size(
const void *mem)
6122int mspace_mallopt(
int param_number,
int value)
#define compute_tree_index(S, I)
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
#define is_page_aligned(S)
#define DEFAULT_GRANULARITY
static void init_top(mstate m, mchunkptr p, size_t psize)
#define treemap_is_marked(M, i)
static void * tmalloc_small(mstate m, size_t nb)
#define dlmalloc_set_footprint_limit
static void * mmap_alloc(mstate m, size_t nb)
struct malloc_tree_chunk tchunk
#define dlindependent_calloc
#define set_size_and_pinuse_of_free_chunk(p, s)
struct malloc_tree_chunk * tbinptr
#define disable_contiguous(M)
#define set_inuse(M, p, s)
#define internal_free(m, mem)
#define chunk_plus_offset(p, s)
#define CALL_DIRECT_MMAP(s)
#define chunk_minus_offset(p, s)
#define FOUR_SIZE_T_SIZES
#define unlink_large_chunk(M, X)
static void dispose_chunk(mstate m, mchunkptr p, size_t psize)
#define RELEASE_MALLOC_GLOBAL_LOCK()
#define ensure_initialization()
#define segment_holds(S, A)
#define replace_dv(M, P, S)
#define small_index2size(i)
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
static void * tmalloc_large(mstate m, size_t nb)
#define DEFAULT_MMAP_THRESHOLD
static int sys_trim(mstate m, size_t pad)
#define set_free_with_pinuse(p, s, n)
#define unlink_first_small_chunk(M, B, P, I)
#define granularity_align(S)
#define check_inuse_chunk(M, P)
#define is_extern_segment(S)
#define check_free_chunk(M, P)
#define dlrealloc_in_place
#define internal_malloc(m, b)
#define smallmap_is_marked(M, i)
static void init_bins(mstate m)
#define CALL_MUNMAP(a, s)
#define minsize_for_tree_index(i)
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags)
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
static struct malloc_state _gm_
#define leftmost_child(t)
struct malloc_chunk mchunk
#define NO_SEGMENT_TRAVERSAL
#define leftshift_for_tree_index(i)
#define set_inuse_and_pinuse(M, p, s)
#define use_noncontiguous(M)
static void * sys_alloc(mstate m, size_t nb)
#define MAX_SMALL_REQUEST
struct malloc_state * mstate
static void * internal_memalign(mstate m, size_t alignment, size_t bytes)
#define dlmalloc_footprint
#define smallbin_at(M, i)
#define align_as_chunk(A)
#define should_trim(M, s)
#define dlmalloc_max_footprint
static size_t release_unused_segments(mstate m)
static int change_mparam(int param_number, int value)
#define is_mmapped_segment(S)
#define check_top_chunk(M, P)
static size_t internal_bulk_free(mstate m, void *array[], size_t nelem)
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
#define dlindependent_comalloc
#define calloc_must_clear(p)
struct malloc_chunk * mchunkptr
static int init_mparams(void)
#define USAGE_ERROR_ACTION(m, p)
static int has_segment_link(mstate m, msegmentptr ss)
#define check_mmapped_chunk(M, P)
#define request2size(req)
#define dlmalloc_usable_size
#define dlmalloc_inspect_all
struct malloc_segment * msegmentptr
#define insert_large_chunk(M, X, S)
#define SYS_ALLOC_PADDING
struct malloc_chunk * sbinptr
#define USE_NONCONTIGUOUS_BIT
#define compute_bit2idx(X, I)
#define CORRUPTION_ERROR_ACTION(m)
#define unlink_chunk(M, P, S)
#define malloc_getpagesize
static struct mallinfo internal_mallinfo(mstate m)
static struct malloc_params mparams
#define MORECORE_CONTIGUOUS
#define insert_small_chunk(M, P, S)
static msegmentptr segment_holding(mstate m, char *addr)
struct malloc_tree_chunk * tchunkptr
#define check_malloced_chunk(M, P, N)
#define dlmalloc_footprint_limit
#define mark_inuse_foot(M, p, s)
#define MALLOC_FAILURE_ACTION
#define insert_chunk(M, P, S)
static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
#define DEFAULT_TRIM_THRESHOLD
struct malloc_segment msegment
#define MAX_RELEASE_CHECK_RATE
#define is_initialized(M)
static void internal_malloc_stats(mstate m)
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move)
#define check_malloc_state(M)
#define CALL_MREMAP(addr, osz, nsz, mv)
#define MALLINFO_FIELD_TYPE
MALLINFO_FIELD_TYPE hblkhd
MALLINFO_FIELD_TYPE ordblks
MALLINFO_FIELD_TYPE smblks
MALLINFO_FIELD_TYPE fsmblks
MALLINFO_FIELD_TYPE hblks
MALLINFO_FIELD_TYPE arena
MALLINFO_FIELD_TYPE keepcost
MALLINFO_FIELD_TYPE fordblks
MALLINFO_FIELD_TYPE uordblks
MALLINFO_FIELD_TYPE usmblks
struct malloc_tree_chunk * child[2]
struct malloc_tree_chunk * fd
struct malloc_tree_chunk * parent
struct malloc_tree_chunk * bk
struct malloc_segment * next
mchunkptr smallbins[(NSMALLBINS+1) *2]
tbinptr treebins[NTREEBINS]