Contiki-NG
heapmem.c
Go to the documentation of this file.
1/*
2 * Copyright (c) 2005, Nicolas Tsiftes
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of the contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
20 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
24 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31/**
32 * \file
33 * Dynamic memory allocation module.
34 * \author
35 * Nicolas Tsiftes <nvt@acm.org>
36 */
37
38/* Log configuration */
39#include "sys/log.h"
40#define LOG_MODULE "HeapMem"
41#define LOG_LEVEL LOG_LEVEL_WARN
42
43#ifndef HEAPMEM_DEBUG
44#define HEAPMEM_DEBUG 0
45#endif
46
47#include <stdint.h>
48#include <string.h>
49
50#include "lib/heapmem.h"
51#include "lib/assert.h"
52#include "sys/cc.h"
53
54/* The HEAPMEM_CONF_ARENA_SIZE parameter determines the size of the
55 space that will be statically allocated in this module. */
56#ifdef HEAPMEM_CONF_ARENA_SIZE
57#define HEAPMEM_ARENA_SIZE HEAPMEM_CONF_ARENA_SIZE
58#else
59/* If the heap size is not set, we use a minimal size that will ensure
60 that all allocation attempts fail. */
61#define HEAPMEM_ARENA_SIZE 1
62#endif
63/* HEAPMEM_CONF_ARENA_SIZE */
64
65/*
66 * The HEAPMEM_CONF_SEARCH_MAX parameter limits the time spent on
67 * chunk allocation and defragmentation. The lower this number is, the
68 * faster the operations become. The cost of this speedup, however, is
69 * that the space overhead might increase.
70 */
71#ifdef HEAPMEM_CONF_SEARCH_MAX
72#define CHUNK_SEARCH_MAX HEAPMEM_CONF_SEARCH_MAX
73#else
74#define CHUNK_SEARCH_MAX 16
75#endif /* HEAPMEM_CONF_SEARCH_MAX */
76
77/*
78 * The HEAPMEM_CONF_REALLOC parameter determines whether heapmem_realloc() is
79 * enabled (non-zero value) or not (zero value).
80 */
81#ifdef HEAPMEM_CONF_REALLOC
82#define HEAPMEM_REALLOC HEAPMEM_CONF_REALLOC
83#else
84#define HEAPMEM_REALLOC 1
85#endif /* HEAPMEM_CONF_REALLOC */
86
87/*
88 * The HEAPMEM_CONF_ALIGNMENT parameter decides what the minimum alignment
89 * for allocated data should be.
90 */
91#ifdef HEAPMEM_CONF_ALIGNMENT
92#define HEAPMEM_ALIGNMENT HEAPMEM_CONF_ALIGNMENT
93#else
94#define HEAPMEM_ALIGNMENT sizeof(int)
95#endif /* HEAPMEM_CONF_ALIGNMENT */
96
97#define ALIGN(size) \
98 (((size) + (HEAPMEM_ALIGNMENT - 1)) & ~(HEAPMEM_ALIGNMENT - 1))
99
100/* Macros for chunk iteration. */
101#define NEXT_CHUNK(chunk) \
102 ((chunk_t *)((char *)(chunk) + sizeof(chunk_t) + (chunk)->size))
103#define IS_LAST_CHUNK(chunk) \
104 ((char *)NEXT_CHUNK(chunk) == &heap_base[heap_usage])
105
106/* Macros for retrieving the data pointer from a chunk,
107 and the other way around. */
108#define GET_CHUNK(ptr) \
109 ((chunk_t *)((char *)(ptr) - sizeof(chunk_t)))
110#define GET_PTR(chunk) \
111 (char *)((chunk) + 1)
112
113/* Macros for determining the status of a chunk. */
114#define CHUNK_FLAG_ALLOCATED 0x1
115
116#define CHUNK_ALLOCATED(chunk) \
117 ((chunk)->flags & CHUNK_FLAG_ALLOCATED)
118#define CHUNK_FREE(chunk) \
119 (~(chunk)->flags & CHUNK_FLAG_ALLOCATED)
120
121/*
122 * We use a double-linked list of chunks, with a slight space overhead compared
123 * to a single-linked list, but with the advantage of having much faster
124 * list removals.
125 */
126typedef struct chunk {
127 struct chunk *prev;
128 struct chunk *next;
129 size_t size;
130 uint8_t flags;
131#if HEAPMEM_DEBUG
132 const char *file;
133 unsigned line;
134#endif
135} chunk_t;
136
137/* All allocated space is located within an "heap", which is statically
138 allocated with a pre-configured size. */
139static char heap_base[HEAPMEM_ARENA_SIZE] CC_ALIGN(HEAPMEM_ALIGNMENT);
140static size_t heap_usage;
141
142static chunk_t *first_chunk = (chunk_t *)heap_base;
143static chunk_t *free_list;
144
145#define IN_HEAP(ptr) ((char *)(ptr) >= (char *)heap_base) && \
146 ((char *)(ptr) < (char *)heap_base + heap_usage)
147
148/* extend_space: Increases the current footprint used in the heap, and
149 returns a pointer to the old end. */
150static void *
151extend_space(size_t size)
152{
153 if(size > HEAPMEM_ARENA_SIZE - heap_usage) {
154 return NULL;
155 }
156
157 char *old_usage = &heap_base[heap_usage];
158 heap_usage += size;
159
160 return old_usage;
161}
162
163/* free_chunk: Mark a chunk as being free, and put it on the free list. */
164static void
165free_chunk(chunk_t * const chunk)
166{
167 chunk->flags &= ~CHUNK_FLAG_ALLOCATED;
168
169 if(IS_LAST_CHUNK(chunk)) {
170 /* Release the chunk back into the wilderness. */
171 heap_usage -= sizeof(chunk_t) + chunk->size;
172 } else {
173 /* Put the chunk on the free list. */
174 chunk->prev = NULL;
175 chunk->next = free_list;
176 if(free_list != NULL) {
177 free_list->prev = chunk;
178 }
179 free_list = chunk;
180 }
181}
182
183/* remove_chunk_from_free_list: Mark a chunk as being allocated, and remove it
184 from the free list. */
185static void
186remove_chunk_from_free_list(chunk_t * const chunk)
187{
188 if(chunk == free_list) {
189 free_list = chunk->next;
190 if(free_list != NULL) {
191 free_list->prev = NULL;
192 }
193 } else {
194 chunk->prev->next = chunk->next;
195 }
196
197 if(chunk->next != NULL) {
198 chunk->next->prev = chunk->prev;
199 }
200}
201
202/*
203 * split_chunk: When allocating a chunk, we may have found one that is
204 * larger than needed, so this function is called to keep the rest of
205 * the original chunk free.
206 */
207static void
208split_chunk(chunk_t * const chunk, size_t offset)
209{
210 offset = ALIGN(offset);
211
212 if(offset + sizeof(chunk_t) < chunk->size) {
213 chunk_t *new_chunk = (chunk_t *)(GET_PTR(chunk) + offset);
214 new_chunk->size = chunk->size - sizeof(chunk_t) - offset;
215 new_chunk->flags = 0;
216 free_chunk(new_chunk);
217
218 chunk->size = offset;
219 chunk->next = chunk->prev = NULL;
220 }
221}
222
223/* coalesce_chunks: Coalesce a specific free chunk with as many adjacent
224 free chunks as possible. */
225static void
226coalesce_chunks(chunk_t *chunk)
227{
228 for(chunk_t *next = NEXT_CHUNK(chunk);
229 (char *)next < &heap_base[heap_usage] && CHUNK_FREE(next);
230 next = NEXT_CHUNK(next)) {
231 chunk->size += sizeof(chunk_t) + next->size;
232 LOG_DBG("Coalesce chunk of %zu bytes\n", next->size);
233 remove_chunk_from_free_list(next);
234 }
235}
236
237/* defrag_chunks: Scan the free list for chunks that can be coalesced,
238 and stop within a bounded time. */
239static void
240defrag_chunks(void)
241{
242 /* Limit the time we spend on searching the free list. */
243 int i = CHUNK_SEARCH_MAX;
244 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
245 if(i-- == 0) {
246 break;
247 }
248 coalesce_chunks(chunk);
249 }
250}
251
252/* get_free_chunk: Search the free list for the most suitable chunk, as
253 determined by its size, to satisfy an allocation request. */
254static chunk_t *
255get_free_chunk(const size_t size)
256{
257 /* Defragment chunks only right before they are needed for allocation. */
258 defrag_chunks();
259
260 chunk_t *best = NULL;
261 /* Limit the time we spend on searching the free list. */
262 int i = CHUNK_SEARCH_MAX;
263 for(chunk_t *chunk = free_list; chunk != NULL; chunk = chunk->next) {
264 if(i-- == 0) {
265 break;
266 }
267
268 /*
269 * To avoid fragmenting large chunks, we select the chunk with the
270 * smallest size that is larger than or equal to the requested size.
271 */
272 if(size <= chunk->size) {
273 if(best == NULL || chunk->size < best->size) {
274 best = chunk;
275 }
276 if(best->size == size) {
277 /* We found a perfect chunk -- stop the search. */
278 break;
279 }
280 }
281 }
282
283 if(best != NULL) {
284 /* We found a chunk for the allocation. Split it if necessary. */
285 remove_chunk_from_free_list(best);
286 split_chunk(best, size);
287 }
288
289 return best;
290}
291
292/*
293 * heapmem_alloc: Allocate an object of the specified size, returning
294 * a pointer to it in case of success, and NULL in case of failure.
295 *
296 * When allocating memory, heapmem_alloc() will first try to find a
297 * free chunk of the same size and the requested one. If none can be
298 * find, we pick a larger chunk that is as close in size as possible,
299 * and possibly split it so that the remaining part becomes a chunk
300 * available for allocation. At most CHUNK_SEARCH_MAX chunks on the
301 * free list will be examined.
302 *
303 * As a last resort, heapmem_alloc() will try to extend the heap
304 * space, and thereby create a new chunk available for use.
305 */
306void *
307#if HEAPMEM_DEBUG
308heapmem_alloc_debug(size_t size, const char *file, const unsigned line)
309#else
310heapmem_alloc(size_t size)
311#endif
312{
313 /* Fail early on too large allocation requests to prevent wrapping values. */
314 if(size > HEAPMEM_ARENA_SIZE) {
315 return NULL;
316 }
317
318 size = ALIGN(size);
319
320 chunk_t *chunk = get_free_chunk(size);
321 if(chunk == NULL) {
322 chunk = extend_space(sizeof(chunk_t) + size);
323 if(chunk == NULL) {
324 return NULL;
325 }
326 chunk->size = size;
327 }
328
329 chunk->flags = CHUNK_FLAG_ALLOCATED;
330
331#if HEAPMEM_DEBUG
332 chunk->file = file;
333 chunk->line = line;
334#endif
335
336 LOG_DBG("%s ptr %p size %zu\n", __func__, GET_PTR(chunk), size);
337
338 return GET_PTR(chunk);
339}
340
341/*
342 * heapmem_free: Deallocate a previously allocated object.
343 *
344 * The pointer must exactly match one returned from an earlier call
345 * from heapmem_alloc or heapmem_realloc, without any call to
346 * heapmem_free in between.
347 *
348 * When performing a deallocation of a chunk, the chunk will be put on
349 * a list of free chunks internally. All free chunks that are adjacent
350 * in memory will be merged into a single chunk in order to mitigate
351 * fragmentation.
352 */
353bool
354#if HEAPMEM_DEBUG
355heapmem_free_debug(void *ptr, const char *file, const unsigned line)
356#else
357heapmem_free(void *ptr)
358#endif
359{
360 if(!IN_HEAP(ptr)) {
361 LOG_WARN("%s: ptr %p is not in the heap\n", __func__, ptr);
362 return false;
363 }
364
365 chunk_t *chunk = GET_CHUNK(ptr);
366 if(!CHUNK_ALLOCATED(chunk)) {
367 LOG_WARN("%s: ptr %p has already been deallocated\n", __func__, ptr);
368 return false;
369 }
370
371#if HEAPMEM_DEBUG
372 LOG_DBG("%s: ptr %p, allocated at %s:%u\n", __func__, ptr,
373 chunk->file, chunk->line);
374#endif
375
376 free_chunk(chunk);
377 return true;
378}
379
380#if HEAPMEM_REALLOC
381/*
382 * heapmem_realloc: Reallocate an object with a different size,
383 * possibly moving it in memory. In case of success, the function
384 * returns a pointer to the objects new location. In case of failure,
385 * it returns NULL.
386 *
387 * If the size of the new chunk is larger than that of the allocated
388 * chunk, heapmem_realloc() will first attempt to extend the currently
389 * allocated chunk. If that memory is not free, heapmem_ralloc() will
390 * attempt to allocate a completely new chunk, copy the old data to
391 * the new chunk, and deallocate the old chunk.
392 *
393 * If the size of the new chunk is smaller than the allocated one, we
394 * split the allocated chunk if the remaining chunk would be large
395 * enough to justify the overhead of creating a new chunk.
396 */
397void *
398#if HEAPMEM_DEBUG
399heapmem_realloc_debug(void *ptr, size_t size,
400 const char *file, const unsigned line)
401#else
402heapmem_realloc(void *ptr, size_t size)
403#endif
404{
405 if(!IN_HEAP(ptr)) {
406 LOG_WARN("%s: ptr %p is not in the heap\n", __func__, ptr);
407 return NULL;
408 }
409
410#if HEAPMEM_DEBUG
411 LOG_DBG("%s: ptr %p size %zu at %s:%u\n",
412 __func__, ptr, size, file, line);
413#endif
414
415 /* Fail early on too large allocation requests to prevent wrapping values. */
416 if(size > HEAPMEM_ARENA_SIZE) {
417 return NULL;
418 }
419
420 /* Special cases in which we can hand off the execution to other functions. */
421 if(ptr == NULL) {
422 return heapmem_alloc(size);
423 } else if(size == 0) {
424 heapmem_free(ptr);
425 return NULL;
426 }
427
428 chunk_t *chunk = GET_CHUNK(ptr);
429 if(!CHUNK_ALLOCATED(chunk)) {
430 LOG_WARN("%s: ptr %p is not allocated\n", __func__, ptr);
431 return false;
432 }
433
434#if HEAPMEM_DEBUG
435 chunk->file = file;
436 chunk->line = line;
437#endif
438
439 size = ALIGN(size);
440 int size_adj = size - chunk->size;
441
442 if(size_adj <= 0) {
443 /* Request to make the object smaller or to keep its size.
444 In the former case, the chunk will be split if possible. */
445 split_chunk(chunk, size);
446 return ptr;
447 }
448
449 /* Request to make the object larger. (size_adj > 0) */
450 if(IS_LAST_CHUNK(chunk)) {
451 /*
452 * If the object is within the last allocated chunk (i.e., the
453 * one before the end of the heap footprint, we just attempt to
454 * extend the heap.
455 */
456 if(extend_space(size_adj) != NULL) {
457 chunk->size = size;
458 return ptr;
459 }
460 } else {
461 /*
462 * Here we attempt to enlarge an allocated object, whose
463 * adjacent space may already be allocated. We attempt to
464 * coalesce chunks in order to make as much room as possible.
465 */
466 coalesce_chunks(chunk);
467 if(chunk->size >= size) {
468 /* There was enough free adjacent space to extend the chunk in
469 its current place. */
470 split_chunk(chunk, size);
471 return ptr;
472 }
473 }
474
475 /*
476 * Failed to enlarge the object in its current place, since the
477 * adjacent chunk is allocated. Hence, we try to place the new
478 * object elsewhere in the heap, and remove the old chunk that was
479 * holding it.
480 */
481 void *newptr = heapmem_alloc(size);
482 if(newptr == NULL) {
483 return NULL;
484 }
485
486 memcpy(newptr, ptr, chunk->size);
487 free_chunk(chunk);
488
489 return newptr;
490}
491#endif /* HEAPMEM_REALLOC */
492
493/* heapmem_stats: Calculate statistics regarding memory usage. */
494void
495heapmem_stats(heapmem_stats_t *stats)
496{
497 memset(stats, 0, sizeof(*stats));
498
499 for(chunk_t *chunk = first_chunk;
500 (char *)chunk < &heap_base[heap_usage];
501 chunk = NEXT_CHUNK(chunk)) {
502 if(CHUNK_ALLOCATED(chunk)) {
503 stats->allocated += chunk->size;
504 stats->overhead += sizeof(chunk_t);
505 } else {
506 coalesce_chunks(chunk);
507 stats->available += chunk->size;
508 }
509 }
510 stats->available += HEAPMEM_ARENA_SIZE - heap_usage;
511 stats->footprint = heap_usage;
512 stats->chunks = stats->overhead / sizeof(chunk_t);
513}
Default definitions of C compiler quirk work-arounds.
void * heapmem_realloc(void *ptr, size_t size)
Reallocate a chunk of memory in the heap.
Definition: heapmem.c:402
void * heapmem_alloc(size_t size)
Allocate a chunk of memory in the heap.
Definition: heapmem.c:310
void heapmem_stats(heapmem_stats_t *stats)
Obtain internal heapmem statistics regarding the allocated chunks.
Definition: heapmem.c:495
bool heapmem_free(void *ptr)
Deallocate a chunk of memory.
Definition: heapmem.c:357
Header file for the dynamic heap memory allocator.
Header file for the logging system.