40 #if defined(_SC_PAGE_SIZE)
41 #define MHD_SC_PAGESIZE _SC_PAGE_SIZE
42 #elif defined(_SC_PAGESIZE)
43 #define MHD_SC_PAGESIZE _SC_PAGESIZE
48 #if defined(MAP_ANON) && ! defined(MAP_ANONYMOUS)
49 #define MAP_ANONYMOUS MAP_ANON
52 #define MAP_FAILED NULL
53 #elif ! defined(MAP_FAILED)
54 #define MAP_FAILED ((void*) -1)
60 #define ALIGN_SIZE (2 * sizeof(void*))
65 #define ROUND_TO_ALIGN(n) (((n) + (ALIGN_SIZE - 1)) \
66 / (ALIGN_SIZE) *(ALIGN_SIZE))
68 #if defined(PAGE_SIZE) && (0 < (PAGE_SIZE + 0))
69 #define MHD_DEF_PAGE_SIZE_ PAGE_SIZE
70 #elif defined(PAGESIZE) && (0 < (PAGESIZE + 0))
71 #define MHD_DEF_PAGE_SIZE_ PAGESIZE
73 #define MHD_DEF_PAGE_SIZE_ (4096)
87 #ifdef MHD_SC_PAGESIZE
89 result = sysconf (MHD_SC_PAGESIZE);
148 struct MemoryPool *pool;
152 pool = malloc (
sizeof (
struct MemoryPool));
155 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
156 if ( (max <= 32 * 1024) ||
166 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
167 pool->memory = mmap (
NULL,
169 PROT_READ | PROT_WRITE,
170 MAP_PRIVATE | MAP_ANONYMOUS,
173 #elif defined(_WIN32)
174 pool->memory = VirtualAlloc (
NULL,
176 MEM_COMMIT | MEM_RESERVE,
186 pool->memory = malloc (alloc_size);
187 if (
NULL == pool->memory)
192 pool->is_mmap =
false;
194 #if defined(MAP_ANONYMOUS) || defined(_WIN32)
197 pool->is_mmap =
true;
202 pool->end = alloc_size;
203 pool->size = alloc_size;
220 mhd_assert (pool->size >= pool->end - pool->pos);
224 #if defined(MAP_ANONYMOUS) && ! defined(_WIN32)
225 munmap (pool->memory,
227 #elif defined(_WIN32)
228 VirtualFree (pool->memory,
248 mhd_assert (pool->size >= pool->end - pool->pos);
249 return (pool->end - pool->pos);
273 mhd_assert (pool->size >= pool->end - pool->pos);
275 if ( (0 == asize) && (0 != size) )
277 if ( (pool->pos + asize > pool->end) ||
278 (pool->pos + asize < pool->pos))
282 ret = &pool->memory[pool->end - asize];
287 ret = &pool->memory[pool->pos];
321 mhd_assert (pool->size >= pool->end - pool->pos);
324 mhd_assert (old ==
NULL || pool->memory + pool->size >= (uint8_t*) old
328 pool->memory + pool->pos > (uint8_t*) old);
332 const size_t old_offset = (uint8_t*) old - pool->memory;
333 const bool shrinking = (old_size > new_size);
337 memset ((uint8_t*) old + new_size, 0, old_size - new_size);
344 if ( (new_apos > pool->end) ||
345 (new_apos < pool->pos) )
349 pool->pos = new_apos;
357 if ( ( (0 == asize) &&
359 (asize > pool->end - pool->pos) )
362 new_blc = pool->memory + pool->pos;
368 memcpy (new_blc, old, old_size);
370 memset (old, 0, old_size);
396 mhd_assert (pool->size >= pool->end - pool->pos);
400 mhd_assert (keep ==
NULL || pool->memory + pool->size >= (uint8_t*) keep
402 if ( (
NULL != keep) &&
403 (keep != pool->memory) )
406 memmove (pool->memory,
411 if (pool->size > copy_bytes)
415 to_zero = pool->size - copy_bytes;
420 uint8_t *recommit_addr;
423 recommit_addr = pool->memory + pool->size - to_recommit;
427 if (VirtualFree (recommit_addr,
431 to_zero -= to_recommit;
433 if (recommit_addr != VirtualAlloc (recommit_addr,
441 memset (&pool->memory[copy_bytes],
446 pool->end = pool->size;
void MHD_pool_destroy(struct MemoryPool *pool)
void * MHD_pool_reallocate(struct MemoryPool *pool, void *old, size_t old_size, size_t new_size)
size_t MHD_pool_get_free(struct MemoryPool *pool)
struct MemoryPool * MHD_pool_create(size_t max)
void * MHD_pool_reset(struct MemoryPool *pool, void *keep, size_t copy_bytes, size_t new_size)
void * MHD_pool_allocate(struct MemoryPool *pool, size_t size, int from_end)
void MHD_init_mem_pools_(void)
#define MHD_DEF_PAGE_SIZE_
static size_t MHD_sys_page_size_
#define ROUND_TO_ALIGN(n)
memory pool; mostly used for efficient (de)allocation for each connection and bounding memory use for...