Contiki-NG
heapmem.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005, Nicolas Tsiftes
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of the contributors
14  * may be used to endorse or promote products derived from this software
15  * without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
18  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
20  * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
21  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
24  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
27  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /**
32  * \file
33  * Dynamic memory allocation module.
34  * \author
35  * Nicolas Tsiftes <nvt@acm.org>
36  */
37 
38 #ifndef DEBUG
39 #define DEBUG 0
40 #endif
41 
42 #if DEBUG
43 #include <stdio.h>
44 #define PRINTF(...) printf(__VA_ARGS__)
45 #undef HEAPMEM_DEBUG
46 #define HEAPMEM_DEBUG 1
47 #else
48 #define PRINTF(...)
49 #endif
50 
51 #ifdef PROJECT_CONF_PATH
52 /* Load the heapmem configuration from a project configuration file. */
53 #include PROJECT_CONF_PATH
54 #endif
55 
56 #include <stdint.h>
57 #include <string.h>
58 
59 #include "heapmem.h"
60 
61 #include "sys/cc.h"
62 
63 /* The HEAPMEM_CONF_ARENA_SIZE parameter determines the size of the
64  space that will be statically allocated in this module. */
65 #ifdef HEAPMEM_CONF_ARENA_SIZE
66 #define HEAPMEM_ARENA_SIZE HEAPMEM_CONF_ARENA_SIZE
67 #else
68 /* If the heap size is not set, we use a minimal size that will ensure
69  that all allocation attempts fail. */
70 #define HEAPMEM_ARENA_SIZE 1
71 #endif
72 /* HEAPMEM_CONF_ARENA_SIZE */
73 
74 /*
75  * The HEAPMEM_CONF_SEARCH_MAX parameter limits the time spent on
76  * chunk allocation and defragmentation. The lower this number is, the
77  * faster the operations become. The cost of this speedup, however, is
78  * that the space overhead might increase.
79  */
80 #ifdef HEAPMEM_CONF_SEARCH_MAX
81 #define CHUNK_SEARCH_MAX HEAPMEM_CONF_SEARCH_MAX
82 #else
83 #define CHUNK_SEARCH_MAX 16
84 #endif /* HEAPMEM_CONF_SEARCH_MAX */
85 
86 /*
87  * The HEAPMEM_CONF_REALLOC parameter determines whether heapmem_realloc() is
88  * enabled (non-zero value) or not (zero value).
89  */
90 #ifdef HEAPMEM_CONF_REALLOC
91 #define HEAPMEM_REALLOC HEAPMEM_CONF_REALLOC
92 #else
93 #define HEAPMEM_REALLOC 1
94 #endif /* HEAPMEM_CONF_REALLOC */
95 
96 /*
97  * The HEAPMEM_CONF_ALIGNMENT parameter decides what the minimum alignment
98  * for allocated data should be.
99  */
100 #ifdef HEAPMEM_CONF_ALIGNMENT
101 #define HEAPMEM_ALIGNMENT HEAPMEM_CONF_ALIGNMENT
102 #else
103 #define HEAPMEM_ALIGNMENT sizeof(int)
104 #endif /* HEAPMEM_CONF_ALIGNMENT */
105 
106 #define ALIGN(size) \
107  (((size) + (HEAPMEM_ALIGNMENT - 1)) & ~(HEAPMEM_ALIGNMENT - 1))
108 
109 /* Macros for chunk iteration. */
110 #define NEXT_CHUNK(chunk) \
111  ((chunk_t *)((char *)(chunk) + sizeof(chunk_t) + (chunk)->size))
112 #define IS_LAST_CHUNK(chunk) \
113  ((char *)NEXT_CHUNK(chunk) == &heap_base[heap_usage])
114 
115 /* Macros for retrieving the data pointer from a chunk,
116  and the other way around. */
117 #define GET_CHUNK(ptr) \
118  ((chunk_t *)((char *)(ptr) - sizeof(chunk_t)))
119 #define GET_PTR(chunk) \
120  (char *)((chunk) + 1)
121 
122 /* Macros for determining the status of a chunk. */
123 #define CHUNK_FLAG_ALLOCATED 0x1
124 
125 #define CHUNK_ALLOCATED(chunk) \
126  ((chunk)->flags & CHUNK_FLAG_ALLOCATED)
127 #define CHUNK_FREE(chunk) \
128  (~(chunk)->flags & CHUNK_FLAG_ALLOCATED)
129 
130 /*
131  * We use a double-linked list of chunks, with a slight space overhead compared
132  * to a single-linked list, but with the advantage of having much faster
133  * list removals.
134  */
135 typedef struct chunk {
136  struct chunk *prev;
137  struct chunk *next;
138  size_t size;
139  uint8_t flags;
140 #if HEAPMEM_DEBUG
141  const char *file;
142  unsigned line;
143 #endif
144 } chunk_t;
145 
146 /* All allocated space is located within an "heap", which is statically
147  allocated with a pre-configured size. */
148 static char heap_base[HEAPMEM_ARENA_SIZE] CC_ALIGN(HEAPMEM_ALIGNMENT);
149 static size_t heap_usage;
150 
151 static chunk_t *first_chunk = (chunk_t *)heap_base;
152 static chunk_t *free_list;
153 
154 /* extend_space: Increases the current footprint used in the heap, and
155  returns a pointer to the old end. */
156 static void *
157 extend_space(size_t size)
158 {
159  char *old_usage;
160 
161  if(size > HEAPMEM_ARENA_SIZE - heap_usage) {
162  return NULL;
163  }
164 
165  old_usage = &heap_base[heap_usage];
166  heap_usage += size;
167 
168  return old_usage;
169 }
170 
171 /* free_chunk: Mark a chunk as being free, and put it on the free list. */
172 static void
173 free_chunk(chunk_t * const chunk)
174 {
175  chunk->flags &= ~CHUNK_FLAG_ALLOCATED;
176 
177  if(IS_LAST_CHUNK(chunk)) {
178  /* Release the chunk back into the wilderness. */
179  heap_usage -= sizeof(chunk_t) + chunk->size;
180  } else {
181  /* Put the chunk on the free list. */
182  chunk->prev = NULL;
183  chunk->next = free_list;
184  if(free_list != NULL) {
185  free_list->prev = chunk;
186  }
187  free_list = chunk;
188  }
189 }
190 
191 /* allocate_chunk: Mark a chunk as being allocated, and remove it
192  from the free list. */
193 static void
194 allocate_chunk(chunk_t * const chunk)
195 {
196  chunk->flags |= CHUNK_FLAG_ALLOCATED;
197 
198  if(chunk == free_list) {
199  free_list = chunk->next;
200  if(free_list != NULL) {
201  free_list->prev = NULL;
202  }
203  } else {
204  chunk->prev->next = chunk->next;
205  }
206 
207  if(chunk->next != NULL) {
208  chunk->next->prev = chunk->prev;
209  }
210 }
211 
212 /*
213  * split_chunk: When allocating a chunk, we may have found one that is
214  * larger than needed, so this function is called to keep the rest of
215  * the original chunk free.
216  */
217 static void
218 split_chunk(chunk_t * const chunk, size_t offset)
219 {
220  chunk_t *new_chunk;
221 
222  offset = ALIGN(offset);
223 
224  if(offset + sizeof(chunk_t) < chunk->size) {
225  new_chunk = (chunk_t *)(GET_PTR(chunk) + offset);
226  new_chunk->size = chunk->size - sizeof(chunk_t) - offset;
227  new_chunk->flags = 0;
228  free_chunk(new_chunk);
229 
230  chunk->size = offset;
231  chunk->next = chunk->prev = NULL;
232  }
233 }
234 
235 /* coalesce_chunks: Coalesce a specific free chunk with as many adjacent
236  free chunks as possible. */
237 static void
238 coalesce_chunks(chunk_t *chunk)
239 {
240  chunk_t *next;
241 
242  for(next = NEXT_CHUNK(chunk);
243  (char *)next < &heap_base[heap_usage] && CHUNK_FREE(next);
244  next = NEXT_CHUNK(next)) {
245  chunk->size += sizeof(chunk_t) + next->size;
246  allocate_chunk(next);
247  }
248 }
249 
250 /* defrag_chunks: Scan the free list for chunks that can be coalesced,
251  and stop within a bounded time. */
252 static void
253 defrag_chunks(void)
254 {
255  int i;
256  chunk_t *chunk;
257 
258  /* Limit the time we spend on searching the free list. */
259  i = CHUNK_SEARCH_MAX;
260  for(chunk = free_list; chunk != NULL; chunk = chunk->next) {
261  if(i-- == 0) {
262  break;
263  }
264  coalesce_chunks(chunk);
265  }
266 }
267 
268 /* get_free_chunk: Search the free list for the most suitable chunk, as
269  determined by its size, to satisfy an allocation request. */
270 static chunk_t *
271 get_free_chunk(const size_t size)
272 {
273  int i;
274  chunk_t *chunk, *best;
275 
276  /* Defragment chunks only right before they are needed for allocation. */
277  defrag_chunks();
278 
279  best = NULL;
280  /* Limit the time we spend on searching the free list. */
281  i = CHUNK_SEARCH_MAX;
282  for(chunk = free_list; chunk != NULL; chunk = chunk->next) {
283  if(i-- == 0) {
284  break;
285  }
286 
287  /*
288  * To avoid fragmenting large chunks, we select the chunk with the
289  * smallest size that is larger than or equal to the requested size.
290  */
291  if(size <= chunk->size) {
292  if(best == NULL || chunk->size < best->size) {
293  best = chunk;
294  }
295  if(best->size == size) {
296  /* We found a perfect chunk -- stop the search. */
297  break;
298  }
299  }
300  }
301 
302  if(best != NULL) {
303  /* We found a chunk for the allocation. Split it if necessary. */
304  allocate_chunk(best);
305  split_chunk(best, size);
306  }
307 
308  return best;
309 }
310 
311 /*
312  * heapmem_alloc: Allocate an object of the specified size, returning
313  * a pointer to it in case of success, and NULL in case of failure.
314  *
315  * When allocating memory, heapmem_alloc() will first try to find a
316  * free chunk of the same size and the requested one. If none can be
317  * find, we pick a larger chunk that is as close in size as possible,
318  * and possibly split it so that the remaining part becomes a chunk
319  * available for allocation. At most CHUNK_SEARCH_MAX chunks on the
320  * free list will be examined.
321  *
322  * As a last resort, heapmem_alloc() will try to extend the heap
323  * space, and thereby create a new chunk available for use.
324  */
325 void *
326 #if HEAPMEM_DEBUG
327 heapmem_alloc_debug(size_t size, const char *file, const unsigned line)
328 #else
329 heapmem_alloc(size_t size)
330 #endif
331 {
332  chunk_t *chunk;
333 
334  /* Fail early on too large allocation requests to prevent wrapping values. */
335  if(size > HEAPMEM_ARENA_SIZE) {
336  return NULL;
337  }
338 
339  size = ALIGN(size);
340 
341  chunk = get_free_chunk(size);
342  if(chunk == NULL) {
343  chunk = extend_space(sizeof(chunk_t) + size);
344  if(chunk == NULL) {
345  return NULL;
346  }
347  chunk->size = size;
348  }
349 
350  chunk->flags = CHUNK_FLAG_ALLOCATED;
351 
352 #if HEAPMEM_DEBUG
353  chunk->file = file;
354  chunk->line = line;
355 #endif
356 
357  PRINTF("%s ptr %p size %lu\n", __func__, GET_PTR(chunk), (unsigned long)size);
358 
359  return GET_PTR(chunk);
360 }
361 
362 /*
363  * heapmem_free: Deallocate a previously allocated object.
364  *
365  * The pointer must exactly match one returned from an earlier call
366  * from heapmem_alloc or heapmem_realloc, without any call to
367  * heapmem_free in between.
368  *
369  * When performing a deallocation of a chunk, the chunk will be put on
370  * a list of free chunks internally. All free chunks that are adjacent
371  * in memory will be merged into a single chunk in order to mitigate
372  * fragmentation.
373  */
374 void
375 #if HEAPMEM_DEBUG
376 heapmem_free_debug(void *ptr, const char *file, const unsigned line)
377 #else
378 heapmem_free(void *ptr)
379 #endif
380 {
381  chunk_t *chunk;
382 
383  if(ptr) {
384  chunk = GET_CHUNK(ptr);
385 
386  PRINTF("%s ptr %p, allocated at %s:%u\n", __func__, ptr,
387  chunk->file, chunk->line);
388 
389  free_chunk(chunk);
390  }
391 }
392 
393 #if HEAPMEM_REALLOC
394 /*
395  * heapmem_realloc: Reallocate an object with a different size,
396  * possibly moving it in memory. In case of success, the function
397  * returns a pointer to the objects new location. In case of failure,
398  * it returns NULL.
399  *
400  * If the size of the new chunk is larger than that of the allocated
401  * chunk, heapmem_realloc() will first attempt to extend the currently
402  * allocated chunk. If that memory is not free, heapmem_ralloc() will
403  * attempt to allocate a completely new chunk, copy the old data to
404  * the new chunk, and deallocate the old chunk.
405  *
406  * If the size of the new chunk is smaller than the allocated one, we
407  * split the allocated chunk if the remaining chunk would be large
408  * enough to justify the overhead of creating a new chunk.
409  */
410 void *
411 #if HEAPMEM_DEBUG
412 heapmem_realloc_debug(void *ptr, size_t size,
413  const char *file, const unsigned line)
414 #else
415 heapmem_realloc(void *ptr, size_t size)
416 #endif
417 {
418  void *newptr;
419  chunk_t *chunk;
420  int size_adj;
421 
422  PRINTF("%s ptr %p size %u at %s:%u\n",
423  __func__, ptr, (unsigned)size, file, line);
424 
425  /* Fail early on too large allocation requests to prevent wrapping values. */
426  if(size > HEAPMEM_ARENA_SIZE) {
427  return NULL;
428  }
429 
430  /* Special cases in which we can hand off the execution to other functions. */
431  if(ptr == NULL) {
432  return heapmem_alloc(size);
433  } else if(size == 0) {
434  heapmem_free(ptr);
435  return NULL;
436  }
437 
438  chunk = GET_CHUNK(ptr);
439 #if HEAPMEM_DEBUG
440  chunk->file = file;
441  chunk->line = line;
442 #endif
443 
444  size = ALIGN(size);
445  size_adj = size - chunk->size;
446 
447  if(size_adj <= 0) {
448  /* Request to make the object smaller or to keep its size.
449  In the former case, the chunk will be split if possible. */
450  split_chunk(chunk, size);
451  return ptr;
452  }
453 
454  /* Request to make the object larger. (size_adj > 0) */
455  if(IS_LAST_CHUNK(chunk)) {
456  /*
457  * If the object is within the last allocated chunk (i.e., the
458  * one before the end of the heap footprint, we just attempt to
459  * extend the heap.
460  */
461  if(extend_space(size_adj) != NULL) {
462  chunk->size = size;
463  return ptr;
464  }
465  } else {
466  /*
467  * Here we attempt to enlarge an allocated object, whose
468  * adjacent space may already be allocated. We attempt to
469  * coalesce chunks in order to make as much room as possible.
470  */
471  coalesce_chunks(chunk);
472  if(chunk->size >= size) {
473  /* There was enough free adjacent space to extend the chunk in
474  its current place. */
475  split_chunk(chunk, size);
476  return ptr;
477  }
478  }
479 
480  /*
481  * Failed to enlarge the object in its current place, since the
482  * adjacent chunk is allocated. Hence, we try to place the new
483  * object elsewhere in the heap, and remove the old chunk that was
484  * holding it.
485  */
486  newptr = heapmem_alloc(size);
487  if(newptr == NULL) {
488  return NULL;
489  }
490 
491  memcpy(newptr, ptr, chunk->size);
492  free_chunk(chunk);
493 
494  return newptr;
495 }
496 #endif /* HEAPMEM_REALLOC */
497 
498 /* heapmem_stats: Calculate statistics regarding memory usage. */
499 void
500 heapmem_stats(heapmem_stats_t *stats)
501 {
502  chunk_t *chunk;
503 
504  memset(stats, 0, sizeof(*stats));
505 
506  for(chunk = first_chunk;
507  (char *)chunk < &heap_base[heap_usage];
508  chunk = NEXT_CHUNK(chunk)) {
509  if(CHUNK_ALLOCATED(chunk)) {
510  stats->allocated += chunk->size;
511  } else {
512  coalesce_chunks(chunk);
513  stats->available += chunk->size;
514  }
515  stats->overhead += sizeof(chunk_t);
516  }
517  stats->available += HEAPMEM_ARENA_SIZE - heap_usage;
518  stats->footprint = heap_usage;
519  stats->chunks = stats->overhead / sizeof(chunk_t);
520 }
void heapmem_free(void *ptr)
Deallocate a chunk of memory.
Definition: heapmem.c:378
void * heapmem_alloc(size_t size)
Allocate a chunk of memory in the heap.
Definition: heapmem.c:329
Header file for the dynamic heap memory allocator.
Default definitions of C compiler quirk work-arounds.
void heapmem_stats(heapmem_stats_t *stats)
Obtain internal heapmem statistics regarding the allocated chunks.
Definition: heapmem.c:500
void * heapmem_realloc(void *ptr, size_t size)
Reallocate a chunk of memory in the heap.
Definition: heapmem.c:415