Contiki-NG
heapmem.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005, Nicolas Tsiftes
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of the contributors
14  * may be used to endorse or promote products derived from this software
15  * without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
20  * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
24  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /**
32  * \file
33  * Dynamic memory allocation module.
34  * \author
35  * Nicolas Tsiftes <nvt@acm.org>
36  */
37 
38 #ifndef DEBUG
39 #define DEBUG 0
40 #endif
41 
42 #if DEBUG
43 #include <stdio.h>
44 #define PRINTF(...) printf(__VA_ARGS__)
45 #undef HEAPMEM_DEBUG
46 #define HEAPMEM_DEBUG 1
47 #else
48 #define PRINTF(...)
49 #endif
50 
51 #ifdef PROJECT_CONF_PATH
52 /* Load the heapmem configuration from a project configuration file. */
53 #include PROJECT_CONF_PATH
54 #endif
55 
56 #include <stdint.h>
57 #include <string.h>
58 
59 #include "heapmem.h"
60 
61 /* The HEAPMEM_CONF_ARENA_SIZE parameter determines the size of the
62  space that will be statically allocated in this module. */
63 #ifdef HEAPMEM_CONF_ARENA_SIZE
64 #define HEAPMEM_ARENA_SIZE HEAPMEM_CONF_ARENA_SIZE
65 #else
66 /* If the heap size is not set, we use a minimal size that will ensure
67  that all allocation attempts fail. */
68 #define HEAPMEM_ARENA_SIZE 1
69 #endif
70 /* HEAPMEM_CONF_ARENA_SIZE */
71 
72 /*
73  * The HEAPMEM_CONF_SEARCH_MAX parameter limits the time spent on
74  * chunk allocation and defragmentation. The lower this number is, the
75  * faster the operations become. The cost of this speedup, however, is
76  * that the space overhead might increase.
77  */
78 #ifdef HEAPMEM_CONF_SEARCH_MAX
79 #define CHUNK_SEARCH_MAX HEAPMEM_CONF_SEARCH_MAX
80 #else
81 #define CHUNK_SEARCH_MAX 16
82 #endif /* HEAPMEM_CONF_SEARCH_MAX */
83 
84 /*
85  * The HEAPMEM_CONF_REALLOC parameter determines whether heapmem_realloc() is
86  * enabled (non-zero value) or not (zero value).
87  */
88 #ifdef HEAPMEM_CONF_REALLOC
89 #define HEAPMEM_REALLOC HEAPMEM_CONF_REALLOC
90 #else
91 #define HEAPMEM_REALLOC 1
92 #endif /* HEAPMEM_CONF_REALLOC */
93 
94 /*
95  * The HEAPMEM_CONF_ALIGNMENT parameter decides what the minimum alignment
96  * for allocated data should be.
97  */
98 #ifdef HEAPMEM_CONF_ALIGNMENT
99 #define HEAPMEM_ALIGNMENT HEAPMEM_CONF_ALIGNMENT
100 #else
101 #define HEAPMEM_ALIGNMENT sizeof(int)
102 #endif /* HEAPMEM_CONF_ALIGNMENT */
103 
104 #define ALIGN(size) \
105  (((size) + (HEAPMEM_ALIGNMENT - 1)) & ~(HEAPMEM_ALIGNMENT - 1))
106 
107 /* Macros for chunk iteration. */
108 #define NEXT_CHUNK(chunk) \
109  ((chunk_t *)((char *)(chunk) + sizeof(chunk_t) + (chunk)->size))
110 #define IS_LAST_CHUNK(chunk) \
111  ((char *)NEXT_CHUNK(chunk) == &heap_base[heap_usage])
112 
113 /* Macros for retrieving the data pointer from a chunk,
114  and the other way around. */
115 #define GET_CHUNK(ptr) \
116  ((chunk_t *)((char *)(ptr) - sizeof(chunk_t)))
117 #define GET_PTR(chunk) \
118  (char *)((chunk) + 1)
119 
120 /* Macros for determining the status of a chunk. */
121 #define CHUNK_FLAG_ALLOCATED 0x1
122 
123 #define CHUNK_ALLOCATED(chunk) \
124  ((chunk)->flags & CHUNK_FLAG_ALLOCATED)
125 #define CHUNK_FREE(chunk) \
126  (~(chunk)->flags & CHUNK_FLAG_ALLOCATED)
127 
128 /*
129  * We use a double-linked list of chunks, with a slight space overhead compared
130  * to a single-linked list, but with the advantage of having much faster
131  * list removals.
132  */
133 typedef struct chunk {
134  struct chunk *prev;
135  struct chunk *next;
136  size_t size;
137  uint8_t flags;
138 #if HEAPMEM_DEBUG
139  const char *file;
140  unsigned line;
141 #endif
142 } chunk_t;
143 
144 /* All allocated space is located within an "heap", which is statically
145  allocated with a pre-configured size. */
146 static char heap_base[HEAPMEM_ARENA_SIZE];
147 static size_t heap_usage;
148 
149 static chunk_t *first_chunk = (chunk_t *)heap_base;
150 static chunk_t *free_list;
151 
152 /* extend_space: Increases the current footprint used in the heap, and
153  returns a pointer to the old end. */
154 static void *
155 extend_space(size_t size)
156 {
157  char *old_usage;
158 
159  if(heap_usage + size > HEAPMEM_ARENA_SIZE) {
160  return NULL;
161  }
162 
163  old_usage = &heap_base[heap_usage];
164  heap_usage += size;
165 
166  return old_usage;
167 }
168 
169 /* free_chunk: Mark a chunk as being free, and put it on the free list. */
170 static void
171 free_chunk(chunk_t * const chunk)
172 {
173  chunk->flags &= ~CHUNK_FLAG_ALLOCATED;
174 
175  if(IS_LAST_CHUNK(chunk)) {
176  /* Release the chunk back into the wilderness. */
177  heap_usage -= sizeof(chunk_t) + chunk->size;
178  } else {
179  /* Put the chunk on the free list. */
180  chunk->prev = NULL;
181  chunk->next = free_list;
182  if(free_list != NULL) {
183  free_list->prev = chunk;
184  }
185  free_list = chunk;
186  }
187 }
188 
189 /* allocate_chunk: Mark a chunk as being allocated, and remove it
190  from the free list. */
191 static void
192 allocate_chunk(chunk_t * const chunk)
193 {
194  chunk->flags |= CHUNK_FLAG_ALLOCATED;
195 
196  if(chunk == free_list) {
197  free_list = chunk->next;
198  if(free_list != NULL) {
199  free_list->prev = NULL;
200  }
201  } else {
202  chunk->prev->next = chunk->next;
203  }
204 
205  if(chunk->next != NULL) {
206  chunk->next->prev = chunk->prev;
207  }
208 }
209 
210 /*
211  * split_chunk: When allocating a chunk, we may have found one that is
212  * larger than needed, so this function is called to keep the rest of
213  * the original chunk free.
214  */
215 static void
216 split_chunk(chunk_t * const chunk, size_t offset)
217 {
218  chunk_t *new_chunk;
219 
220  offset = ALIGN(offset);
221 
222  if(offset + sizeof(chunk_t) < chunk->size) {
223  new_chunk = (chunk_t *)(GET_PTR(chunk) + offset);
224  new_chunk->size = chunk->size - sizeof(chunk_t) - offset;
225  new_chunk->flags = 0;
226  free_chunk(new_chunk);
227 
228  chunk->size = offset;
229  chunk->next = chunk->prev = NULL;
230  }
231 }
232 
233 /* coalesce_chunks: Coalesce a specific free chunk with as many adjacent
234  free chunks as possible. */
235 static void
236 coalesce_chunks(chunk_t *chunk)
237 {
238  chunk_t *next;
239 
240  for(next = NEXT_CHUNK(chunk);
241  (char *)next < &heap_base[heap_usage] && CHUNK_FREE(next);
242  next = NEXT_CHUNK(next)) {
243  chunk->size += sizeof(chunk_t) + next->size;
244  allocate_chunk(next);
245  }
246 }
247 
248 /* defrag_chunks: Scan the free list for chunks that can be coalesced,
249  and stop within a bounded time. */
250 static void
251 defrag_chunks(void)
252 {
253  int i;
254  chunk_t *chunk;
255 
256  /* Limit the time we spend on searching the free list. */
257  i = CHUNK_SEARCH_MAX;
258  for(chunk = free_list; chunk != NULL; chunk = chunk->next) {
259  if(i-- == 0) {
260  break;
261  }
262  coalesce_chunks(chunk);
263  }
264 }
265 
266 /* get_free_chunk: Search the free list for the most suitable chunk, as
267  determined by its size, to satisfy an allocation request. */
268 static chunk_t *
269 get_free_chunk(const size_t size)
270 {
271  int i;
272  chunk_t *chunk, *best;
273 
274  /* Defragment chunks only right before they are needed for allocation. */
275  defrag_chunks();
276 
277  best = NULL;
278  /* Limit the time we spend on searching the free list. */
279  i = CHUNK_SEARCH_MAX;
280  for(chunk = free_list; chunk != NULL; chunk = chunk->next) {
281  if(i-- == 0) {
282  break;
283  }
284 
285  /*
286  * To avoid fragmenting large chunks, we select the chunk with the
287  * smallest size that is larger than or equal to the requested size.
288  */
289  if(size <= chunk->size) {
290  if(best == NULL || chunk->size < best->size) {
291  best = chunk;
292  }
293  if(best->size == size) {
294  /* We found a perfect chunk -- stop the search. */
295  break;
296  }
297  }
298  }
299 
300  if(best != NULL) {
301  /* We found a chunk for the allocation. Split it if necessary. */
302  allocate_chunk(best);
303  split_chunk(best, size);
304  }
305 
306  return best;
307 }
308 
309 /*
310  * heapmem_alloc: Allocate an object of the specified size, returning
311  * a pointer to it in case of success, and NULL in case of failure.
312  *
313  * When allocating memory, heapmem_alloc() will first try to find a
314  * free chunk of the same size and the requested one. If none can be
315  * find, we pick a larger chunk that is as close in size as possible,
316  * and possibly split it so that the remaining part becomes a chunk
317  * available for allocation. At most CHUNK_SEARCH_MAX chunks on the
318  * free list will be examined.
319  *
320  * As a last resort, heapmem_alloc() will try to extend the heap
321  * space, and thereby create a new chunk available for use.
322  */
323 void *
324 #if HEAPMEM_DEBUG
325 heapmem_alloc_debug(size_t size, const char *file, const unsigned line)
326 #else
327 heapmem_alloc(size_t size)
328 #endif
329 {
330  chunk_t *chunk;
331 
332  size = ALIGN(size);
333 
334  chunk = get_free_chunk(size);
335  if(chunk == NULL) {
336  chunk = extend_space(sizeof(chunk_t) + size);
337  if(chunk == NULL) {
338  return NULL;
339  }
340  chunk->size = size;
341  }
342 
343  chunk->flags = CHUNK_FLAG_ALLOCATED;
344 
345 #if HEAPMEM_DEBUG
346  chunk->file = file;
347  chunk->line = line;
348 #endif
349 
350  PRINTF("%s ptr %p size %lu\n", __func__, GET_PTR(chunk), (unsigned long)size);
351 
352  return GET_PTR(chunk);
353 }
354 
355 /*
356  * heapmem_free: Deallocate a previously allocated object.
357  *
358  * The pointer must exactly match one returned from an earlier call
359  * from heapmem_alloc or heapmem_realloc, without any call to
360  * heapmem_free in between.
361  *
362  * When performing a deallocation of a chunk, the chunk will be put on
363  * a list of free chunks internally. All free chunks that are adjacent
364  * in memory will be merged into a single chunk in order to mitigate
365  * fragmentation.
366  */
367 void
368 #if HEAPMEM_DEBUG
369 heapmem_free_debug(void *ptr, const char *file, const unsigned line)
370 #else
371 heapmem_free(void *ptr)
372 #endif
373 {
374  chunk_t *chunk;
375 
376  if(ptr) {
377  chunk = GET_CHUNK(ptr);
378 
379  PRINTF("%s ptr %p, allocated at %s:%u\n", __func__, ptr,
380  chunk->file, chunk->line);
381 
382  free_chunk(chunk);
383  }
384 }
385 
386 #if HEAPMEM_REALLOC
387 /*
388  * heapmem_realloc: Reallocate an object with a different size,
389  * possibly moving it in memory. In case of success, the function
390  * returns a pointer to the objects new location. In case of failure,
391  * it returns NULL.
392  *
393  * If the size of the new chunk is larger than that of the allocated
394  * chunk, heapmem_realloc() will first attempt to extend the currently
395  * allocated chunk. If that memory is not free, heapmem_ralloc() will
396  * attempt to allocate a completely new chunk, copy the old data to
397  * the new chunk, and deallocate the old chunk.
398  *
399  * If the size of the new chunk is smaller than the allocated one, we
400  * split the allocated chunk if the remaining chunk would be large
401  * enough to justify the overhead of creating a new chunk.
402  */
403 void *
404 #if HEAPMEM_DEBUG
405 heapmem_realloc_debug(void *ptr, size_t size,
406  const char *file, const unsigned line)
407 #else
408 heapmem_realloc(void *ptr, size_t size)
409 #endif
410 {
411  void *newptr;
412  chunk_t *chunk;
413  int size_adj;
414 
415  PRINTF("%s ptr %p size %u at %s:%u\n",
416  __func__, ptr, (unsigned)size, file, line);
417 
418  /* Special cases in which we can hand off the execution to other functions. */
419  if(ptr == NULL) {
420  return heapmem_alloc(size);
421  } else if(size == 0) {
422  heapmem_free(ptr);
423  return NULL;
424  }
425 
426  chunk = GET_CHUNK(ptr);
427 #if HEAPMEM_DEBUG
428  chunk->file = file;
429  chunk->line = line;
430 #endif
431 
432  size = ALIGN(size);
433  size_adj = size - chunk->size;
434 
435  if(size_adj <= 0) {
436  /* Request to make the object smaller or to keep its size.
437  In the former case, the chunk will be split if possible. */
438  split_chunk(chunk, size);
439  return ptr;
440  }
441 
442  /* Request to make the object larger. (size_adj > 0) */
443  if(IS_LAST_CHUNK(chunk)) {
444  /*
445  * If the object is within the last allocated chunk (i.e., the
446  * one before the end of the heap footprint, we just attempt to
447  * extend the heap.
448  */
449  if(extend_space(size_adj) != NULL) {
450  chunk->size = size;
451  return ptr;
452  }
453  } else {
454  /*
455  * Here we attempt to enlarge an allocated object, whose
456  * adjacent space may already be allocated. We attempt to
457  * coalesce chunks in order to make as much room as possible.
458  */
459  coalesce_chunks(chunk);
460  if(chunk->size >= size) {
461  /* There was enough free adjacent space to extend the chunk in
462  its current place. */
463  split_chunk(chunk, size);
464  return ptr;
465  }
466  }
467 
468  /*
469  * Failed to enlarge the object in its current place, since the
470  * adjacent chunk is allocated. Hence, we try to place the new
471  * object elsewhere in the heap, and remove the old chunk that was
472  * holding it.
473  */
474  newptr = heapmem_alloc(size);
475  if(newptr == NULL) {
476  return NULL;
477  }
478 
479  memcpy(newptr, ptr, chunk->size);
480  free_chunk(chunk);
481 
482  return newptr;
483 }
484 #endif /* HEAPMEM_REALLOC */
485 
486 /* heapmem_stats: Calculate statistics regarding memory usage. */
487 void
488 heapmem_stats(heapmem_stats_t *stats)
489 {
490  chunk_t *chunk;
491 
492  memset(stats, 0, sizeof(*stats));
493 
494  for(chunk = first_chunk;
495  (char *)chunk < &heap_base[heap_usage];
496  chunk = NEXT_CHUNK(chunk)) {
497  if(CHUNK_ALLOCATED(chunk)) {
498  stats->allocated += chunk->size;
499  } else {
500  coalesce_chunks(chunk);
501  stats->available += chunk->size;
502  }
503  stats->overhead += sizeof(chunk_t);
504  }
505  stats->available += HEAPMEM_ARENA_SIZE - heap_usage;
506  stats->footprint = heap_usage;
507  stats->chunks = stats->overhead / sizeof(chunk_t);
508 }
void heapmem_free(void *ptr)
Deallocate a chunk of memory.
Definition: heapmem.c:371
void * heapmem_alloc(size_t size)
Allocate a chunk of memory in the heap.
Definition: heapmem.c:327
Header file for the dynamic heap memory allocator.
void heapmem_stats(heapmem_stats_t *stats)
Obtain internal heapmem statistics regarding the allocated chunks.
Definition: heapmem.c:488
void * heapmem_realloc(void *ptr, size_t size)
Reallocate a chunk of memory in the heap.
Definition: heapmem.c:408