You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

malloc.c 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. */
  19. FILE_LICENCE ( GPL2_OR_LATER );
  20. #include <stddef.h>
  21. #include <stdint.h>
  22. #include <string.h>
  23. #include <strings.h>
  24. #include <ipxe/io.h>
  25. #include <ipxe/list.h>
  26. #include <ipxe/init.h>
  27. #include <ipxe/refcnt.h>
  28. #include <ipxe/malloc.h>
  29. #include <valgrind/memcheck.h>
  30. /** @file
  31. *
  32. * Dynamic memory allocation
  33. *
  34. */
  35. /** A free block of memory */
  36. struct memory_block {
  37. /** Size of this block */
  38. size_t size;
  39. /** Padding
  40. *
  41. * This padding exists to cover the "count" field of a
  42. * reference counter, in the common case where a reference
  43. * counter is the first element of a dynamically-allocated
  44. * object. It avoids clobbering the "count" field as soon as
  45. * the memory is freed, and so allows for the possibility of
  46. * detecting reference counting errors.
  47. */
  48. char pad[ offsetof ( struct refcnt, count ) +
  49. sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
  50. /** List of free blocks */
  51. struct list_head list;
  52. };
  53. #define MIN_MEMBLOCK_SIZE \
  54. ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
  55. /** A block of allocated memory complete with size information */
  56. struct autosized_block {
  57. /** Size of this block */
  58. size_t size;
  59. /** Remaining data */
  60. char data[0];
  61. };
  62. /**
  63. * Address for zero-length memory blocks
  64. *
  65. * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
  66. * NOWHERE. Calling @c free(NOWHERE) will have no effect.
  67. *
  68. * This is consistent with the ANSI C standards, which state that
  69. * "either NULL or a pointer suitable to be passed to free()" must be
  70. * returned in these cases. Using a special non-NULL value means that
  71. * the caller can take a NULL return value to indicate failure,
  72. * without first having to check for a requested size of zero.
  73. *
  74. * Code outside of malloc.c do not ever need to refer to the actual
  75. * value of @c NOWHERE; this is an internal definition.
  76. */
  77. #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
  78. /** List of free memory blocks */
  79. static LIST_HEAD ( free_blocks );
  80. /** Total amount of free memory */
  81. size_t freemem;
  82. /**
  83. * Heap size
  84. *
  85. * Currently fixed at 512kB.
  86. */
  87. #define HEAP_SIZE ( 512 * 1024 )
  88. /** The heap itself */
  89. static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
  90. /**
  91. * Mark all blocks in free list as defined
  92. *
  93. */
  94. static inline void valgrind_make_blocks_defined ( void ) {
  95. struct memory_block *block;
  96. if ( RUNNING_ON_VALGRIND <= 0 )
  97. return;
  98. /* Traverse free block list, marking each block structure as
  99. * defined. Some contortions are necessary to avoid errors
  100. * from list_check().
  101. */
  102. /* Mark block list itself as defined */
  103. VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
  104. /* Mark areas accessed by list_check() as defined */
  105. VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
  106. sizeof ( free_blocks.prev->next ) );
  107. VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
  108. sizeof ( *free_blocks.next ) );
  109. VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
  110. sizeof ( free_blocks.next->next->prev ) );
  111. /* Mark each block in list as defined */
  112. list_for_each_entry ( block, &free_blocks, list ) {
  113. /* Mark block as defined */
  114. VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
  115. /* Mark areas accessed by list_check() as defined */
  116. VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
  117. sizeof ( *block->list.next ) );
  118. VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
  119. sizeof ( block->list.next->next->prev ) );
  120. }
  121. }
  122. /**
  123. * Mark all blocks in free list as inaccessible
  124. *
  125. */
  126. static inline void valgrind_make_blocks_noaccess ( void ) {
  127. struct memory_block *block;
  128. struct memory_block *prev = NULL;
  129. if ( RUNNING_ON_VALGRIND <= 0 )
  130. return;
  131. /* Traverse free block list, marking each block structure as
  132. * inaccessible. Some contortions are necessary to avoid
  133. * errors from list_check().
  134. */
  135. /* Mark each block in list as inaccessible */
  136. list_for_each_entry ( block, &free_blocks, list ) {
  137. /* Mark previous block (if any) as inaccessible. (Current
  138. * block will be accessed by list_check().)
  139. */
  140. if ( prev )
  141. VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
  142. prev = block;
  143. /* At the end of the list, list_check() will end up
  144. * accessing the first list item. Temporarily mark
  145. * this area as defined.
  146. */
  147. VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
  148. sizeof ( free_blocks.next->prev ) );
  149. }
  150. /* Mark last block (if any) as inaccessible */
  151. if ( prev )
  152. VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
  153. /* Mark as inaccessible the area that was temporarily marked
  154. * as defined to avoid errors from list_check().
  155. */
  156. VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
  157. sizeof ( free_blocks.next->prev ) );
  158. /* Mark block list itself as inaccessible */
  159. VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
  160. }
  161. /**
  162. * Check integrity of the blocks in the free list
  163. *
  164. */
  165. static inline void check_blocks ( void ) {
  166. struct memory_block *block;
  167. struct memory_block *prev = NULL;
  168. if ( ! ASSERTING )
  169. return;
  170. list_for_each_entry ( block, &free_blocks, list ) {
  171. /* Check that list structure is intact */
  172. list_check ( &block->list );
  173. /* Check that block size is not too small */
  174. assert ( block->size >= sizeof ( *block ) );
  175. assert ( block->size >= MIN_MEMBLOCK_SIZE );
  176. /* Check that block does not wrap beyond end of address space */
  177. assert ( ( ( void * ) block + block->size ) >
  178. ( ( void * ) block ) );
  179. /* Check that blocks remain in ascending order, and
  180. * that adjacent blocks have been merged.
  181. */
  182. if ( prev ) {
  183. assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
  184. assert ( ( ( void * ) block ) >
  185. ( ( ( void * ) prev ) + prev->size ) );
  186. }
  187. prev = block;
  188. }
  189. }
  190. /**
  191. * Discard some cached data
  192. *
  193. * @ret discarded Number of cached items discarded
  194. */
  195. static unsigned int discard_cache ( void ) {
  196. struct cache_discarder *discarder;
  197. unsigned int discarded;
  198. for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
  199. discarded = discarder->discard();
  200. if ( discarded )
  201. return discarded;
  202. }
  203. return 0;
  204. }
  205. /**
  206. * Discard all cached data
  207. *
  208. */
  209. static void discard_all_cache ( void ) {
  210. unsigned int discarded;
  211. do {
  212. discarded = discard_cache();
  213. } while ( discarded );
  214. }
  215. /**
  216. * Allocate a memory block
  217. *
  218. * @v size Requested size
  219. * @v align Physical alignment
  220. * @v offset Offset from physical alignment
  221. * @ret ptr Memory block, or NULL
  222. *
  223. * Allocates a memory block @b physically aligned as requested. No
  224. * guarantees are provided for the alignment of the virtual address.
  225. *
  226. * @c align must be a power of two. @c size may not be zero.
  227. */
  228. void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
  229. struct memory_block *block;
  230. size_t align_mask;
  231. size_t pre_size;
  232. ssize_t post_size;
  233. struct memory_block *pre;
  234. struct memory_block *post;
  235. struct memory_block *ptr;
  236. /* Sanity checks */
  237. assert ( size != 0 );
  238. assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
  239. valgrind_make_blocks_defined();
  240. check_blocks();
  241. /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
  242. * calculate alignment mask.
  243. */
  244. size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
  245. align_mask = ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 );
  246. DBGC2 ( &heap, "Allocating %#zx (aligned %#zx+%zx)\n",
  247. size, align, offset );
  248. while ( 1 ) {
  249. /* Search through blocks for the first one with enough space */
  250. list_for_each_entry ( block, &free_blocks, list ) {
  251. pre_size = ( ( offset - virt_to_phys ( block ) )
  252. & align_mask );
  253. post_size = ( block->size - pre_size - size );
  254. if ( post_size >= 0 ) {
  255. /* Split block into pre-block, block, and
  256. * post-block. After this split, the "pre"
  257. * block is the one currently linked into the
  258. * free list.
  259. */
  260. pre = block;
  261. block = ( ( ( void * ) pre ) + pre_size );
  262. post = ( ( ( void * ) block ) + size );
  263. DBGC2 ( &heap, "[%p,%p) -> [%p,%p) + [%p,%p)\n",
  264. pre, ( ( ( void * ) pre ) + pre->size ),
  265. pre, block, post,
  266. ( ( ( void * ) pre ) + pre->size ) );
  267. /* If there is a "post" block, add it in to
  268. * the free list. Leak it if it is too small
  269. * (which can happen only at the very end of
  270. * the heap).
  271. */
  272. if ( (size_t) post_size >= MIN_MEMBLOCK_SIZE ) {
  273. VALGRIND_MAKE_MEM_DEFINED ( post,
  274. sizeof ( *post ) );
  275. post->size = post_size;
  276. list_add ( &post->list, &pre->list );
  277. }
  278. /* Shrink "pre" block, leaving the main block
  279. * isolated and no longer part of the free
  280. * list.
  281. */
  282. pre->size = pre_size;
  283. /* If there is no "pre" block, remove it from
  284. * the list. Also remove it (i.e. leak it) if
  285. * it is too small, which can happen only at
  286. * the very start of the heap.
  287. */
  288. if ( pre_size < MIN_MEMBLOCK_SIZE )
  289. list_del ( &pre->list );
  290. /* Update total free memory */
  291. freemem -= size;
  292. /* Return allocated block */
  293. DBGC2 ( &heap, "Allocated [%p,%p)\n", block,
  294. ( ( ( void * ) block ) + size ) );
  295. ptr = block;
  296. goto done;
  297. }
  298. }
  299. /* Try discarding some cached data to free up memory */
  300. if ( ! discard_cache() ) {
  301. /* Nothing available to discard */
  302. DBGC ( &heap, "Failed to allocate %#zx (aligned "
  303. "%#zx)\n", size, align );
  304. ptr = NULL;
  305. goto done;
  306. }
  307. }
  308. done:
  309. check_blocks();
  310. valgrind_make_blocks_noaccess();
  311. return ptr;
  312. }
  313. /**
  314. * Free a memory block
  315. *
  316. * @v ptr Memory allocated by alloc_memblock(), or NULL
  317. * @v size Size of the memory
  318. *
  319. * If @c ptr is NULL, no action is taken.
  320. */
  321. void free_memblock ( void *ptr, size_t size ) {
  322. struct memory_block *freeing;
  323. struct memory_block *block;
  324. struct memory_block *tmp;
  325. ssize_t gap_before;
  326. ssize_t gap_after = -1;
  327. /* Allow for ptr==NULL */
  328. if ( ! ptr )
  329. return;
  330. valgrind_make_blocks_defined();
  331. check_blocks();
  332. /* Round up size to match actual size that alloc_memblock()
  333. * would have used.
  334. */
  335. assert ( size != 0 );
  336. size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
  337. freeing = ptr;
  338. VALGRIND_MAKE_MEM_DEFINED ( freeing, sizeof ( *freeing ) );
  339. DBGC2 ( &heap, "Freeing [%p,%p)\n",
  340. freeing, ( ( ( void * ) freeing ) + size ) );
  341. /* Check that this block does not overlap the free list */
  342. if ( ASSERTING ) {
  343. list_for_each_entry ( block, &free_blocks, list ) {
  344. if ( ( ( ( void * ) block ) <
  345. ( ( void * ) freeing + size ) ) &&
  346. ( ( void * ) freeing <
  347. ( ( void * ) block + block->size ) ) ) {
  348. assert ( 0 );
  349. DBGC ( &heap, "Double free of [%p,%p) "
  350. "overlapping [%p,%p) detected from %p\n",
  351. freeing,
  352. ( ( ( void * ) freeing ) + size ), block,
  353. ( ( void * ) block + block->size ),
  354. __builtin_return_address ( 0 ) );
  355. }
  356. }
  357. }
  358. /* Insert/merge into free list */
  359. freeing->size = size;
  360. list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
  361. /* Calculate gaps before and after the "freeing" block */
  362. gap_before = ( ( ( void * ) freeing ) -
  363. ( ( ( void * ) block ) + block->size ) );
  364. gap_after = ( ( ( void * ) block ) -
  365. ( ( ( void * ) freeing ) + freeing->size ) );
  366. /* Merge with immediately preceding block, if possible */
  367. if ( gap_before == 0 ) {
  368. DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
  369. ( ( ( void * ) block ) + block->size ), freeing,
  370. ( ( ( void * ) freeing ) + freeing->size ),
  371. block,
  372. ( ( ( void * ) freeing ) + freeing->size ) );
  373. block->size += size;
  374. list_del ( &block->list );
  375. freeing = block;
  376. }
  377. /* Stop processing as soon as we reach a following block */
  378. if ( gap_after >= 0 )
  379. break;
  380. }
  381. /* Insert before the immediately following block. If
  382. * possible, merge the following block into the "freeing"
  383. * block.
  384. */
  385. DBGC2 ( &heap, "[%p,%p)\n",
  386. freeing, ( ( ( void * ) freeing ) + freeing->size ) );
  387. list_add_tail ( &freeing->list, &block->list );
  388. if ( gap_after == 0 ) {
  389. DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
  390. ( ( ( void * ) freeing ) + freeing->size ), block,
  391. ( ( ( void * ) block ) + block->size ), freeing,
  392. ( ( ( void * ) block ) + block->size ) );
  393. freeing->size += block->size;
  394. list_del ( &block->list );
  395. }
  396. /* Update free memory counter */
  397. freemem += size;
  398. check_blocks();
  399. valgrind_make_blocks_noaccess();
  400. }
  401. /**
  402. * Reallocate memory
  403. *
  404. * @v old_ptr Memory previously allocated by malloc(), or NULL
  405. * @v new_size Requested size
  406. * @ret new_ptr Allocated memory, or NULL
  407. *
  408. * Allocates memory with no particular alignment requirement. @c
  409. * new_ptr will be aligned to at least a multiple of sizeof(void*).
  410. * If @c old_ptr is non-NULL, then the contents of the newly allocated
  411. * memory will be the same as the contents of the previously allocated
  412. * memory, up to the minimum of the old and new sizes. The old memory
  413. * will be freed.
  414. *
  415. * If allocation fails the previously allocated block is left
  416. * untouched and NULL is returned.
  417. *
  418. * Calling realloc() with a new size of zero is a valid way to free a
  419. * memory block.
  420. */
  421. void * realloc ( void *old_ptr, size_t new_size ) {
  422. struct autosized_block *old_block;
  423. struct autosized_block *new_block;
  424. size_t old_total_size;
  425. size_t new_total_size;
  426. size_t old_size;
  427. void *new_ptr = NOWHERE;
  428. /* Allocate new memory if necessary. If allocation fails,
  429. * return without touching the old block.
  430. */
  431. if ( new_size ) {
  432. new_total_size = ( new_size +
  433. offsetof ( struct autosized_block, data ) );
  434. new_block = alloc_memblock ( new_total_size, 1, 0 );
  435. if ( ! new_block )
  436. return NULL;
  437. VALGRIND_MAKE_MEM_UNDEFINED ( new_block, offsetof ( struct autosized_block, data ) );
  438. new_block->size = new_total_size;
  439. VALGRIND_MAKE_MEM_NOACCESS ( new_block, offsetof ( struct autosized_block, data ) );
  440. new_ptr = &new_block->data;
  441. VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
  442. }
  443. /* Copy across relevant part of the old data region (if any),
  444. * then free it. Note that at this point either (a) new_ptr
  445. * is valid, or (b) new_size is 0; either way, the memcpy() is
  446. * valid.
  447. */
  448. if ( old_ptr && ( old_ptr != NOWHERE ) ) {
  449. old_block = container_of ( old_ptr, struct autosized_block,
  450. data );
  451. VALGRIND_MAKE_MEM_DEFINED ( old_block, offsetof ( struct autosized_block, data ) );
  452. old_total_size = old_block->size;
  453. assert ( old_total_size != 0 );
  454. old_size = ( old_total_size -
  455. offsetof ( struct autosized_block, data ) );
  456. memcpy ( new_ptr, old_ptr,
  457. ( ( old_size < new_size ) ? old_size : new_size ) );
  458. free_memblock ( old_block, old_total_size );
  459. VALGRIND_MAKE_MEM_NOACCESS ( old_block, offsetof ( struct autosized_block, data ) );
  460. VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
  461. }
  462. if ( ASSERTED ) {
  463. DBGC ( &heap, "Possible memory corruption detected from %p\n",
  464. __builtin_return_address ( 0 ) );
  465. }
  466. return new_ptr;
  467. }
  468. /**
  469. * Allocate memory
  470. *
  471. * @v size Requested size
  472. * @ret ptr Memory, or NULL
  473. *
  474. * Allocates memory with no particular alignment requirement. @c ptr
  475. * will be aligned to at least a multiple of sizeof(void*).
  476. */
  477. void * malloc ( size_t size ) {
  478. void *ptr;
  479. ptr = realloc ( NULL, size );
  480. if ( ASSERTED ) {
  481. DBGC ( &heap, "Possible memory corruption detected from %p\n",
  482. __builtin_return_address ( 0 ) );
  483. }
  484. return ptr;
  485. }
  486. /**
  487. * Free memory
  488. *
  489. * @v ptr Memory allocated by malloc(), or NULL
  490. *
  491. * Memory allocated with malloc_dma() cannot be freed with free(); it
  492. * must be freed with free_dma() instead.
  493. *
  494. * If @c ptr is NULL, no action is taken.
  495. */
  496. void free ( void *ptr ) {
  497. realloc ( ptr, 0 );
  498. if ( ASSERTED ) {
  499. DBGC ( &heap, "Possible memory corruption detected from %p\n",
  500. __builtin_return_address ( 0 ) );
  501. }
  502. }
  503. /**
  504. * Allocate cleared memory
  505. *
  506. * @v size Requested size
  507. * @ret ptr Allocated memory
  508. *
  509. * Allocate memory as per malloc(), and zero it.
  510. *
  511. * This function name is non-standard, but pretty intuitive.
  512. * zalloc(size) is always equivalent to calloc(1,size)
  513. */
  514. void * zalloc ( size_t size ) {
  515. void *data;
  516. data = malloc ( size );
  517. if ( data )
  518. memset ( data, 0, size );
  519. if ( ASSERTED ) {
  520. DBGC ( &heap, "Possible memory corruption detected from %p\n",
  521. __builtin_return_address ( 0 ) );
  522. }
  523. return data;
  524. }
  525. /**
  526. * Add memory to allocation pool
  527. *
  528. * @v start Start address
  529. * @v end End address
  530. *
  531. * Adds a block of memory [start,end) to the allocation pool. This is
  532. * a one-way operation; there is no way to reclaim this memory.
  533. *
  534. * @c start must be aligned to at least a multiple of sizeof(void*).
  535. */
  536. void mpopulate ( void *start, size_t len ) {
  537. /* Prevent free_memblock() from rounding up len beyond the end
  538. * of what we were actually given...
  539. */
  540. free_memblock ( start, ( len & ~( MIN_MEMBLOCK_SIZE - 1 ) ) );
  541. }
  542. /**
  543. * Initialise the heap
  544. *
  545. */
  546. static void init_heap ( void ) {
  547. VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
  548. mpopulate ( heap, sizeof ( heap ) );
  549. }
  550. /** Memory allocator initialisation function */
  551. struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
  552. .initialise = init_heap,
  553. };
  554. /**
  555. * Discard all cached data on shutdown
  556. *
  557. */
  558. static void shutdown_cache ( int booting __unused ) {
  559. discard_all_cache();
  560. }
  561. /** Memory allocator shutdown function */
  562. struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
  563. .shutdown = shutdown_cache,
  564. };
  565. #if 0
  566. #include <stdio.h>
  567. /**
  568. * Dump free block list
  569. *
  570. */
  571. void mdumpfree ( void ) {
  572. struct memory_block *block;
  573. printf ( "Free block list:\n" );
  574. list_for_each_entry ( block, &free_blocks, list ) {
  575. printf ( "[%p,%p] (size %#zx)\n", block,
  576. ( ( ( void * ) block ) + block->size ), block->size );
  577. }
  578. }
  579. #endif