You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. * Copyright (C) 2006 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stddef.h>
  25. #include <stdint.h>
  26. #include <string.h>
  27. #include <strings.h>
  28. #include <ipxe/io.h>
  29. #include <ipxe/list.h>
  30. #include <ipxe/init.h>
  31. #include <ipxe/refcnt.h>
  32. #include <ipxe/malloc.h>
  33. #include <valgrind/memcheck.h>
  34. /** @file
  35. *
  36. * Dynamic memory allocation
  37. *
  38. */
  39. /** A free block of memory */
  40. struct memory_block {
  41. /** Size of this block */
  42. size_t size;
  43. /** Padding
  44. *
  45. * This padding exists to cover the "count" field of a
  46. * reference counter, in the common case where a reference
  47. * counter is the first element of a dynamically-allocated
  48. * object. It avoids clobbering the "count" field as soon as
  49. * the memory is freed, and so allows for the possibility of
  50. * detecting reference counting errors.
  51. */
  52. char pad[ offsetof ( struct refcnt, count ) +
  53. sizeof ( ( ( struct refcnt * ) NULL )->count ) ];
  54. /** List of free blocks */
  55. struct list_head list;
  56. };
  57. #define MIN_MEMBLOCK_SIZE \
  58. ( ( size_t ) ( 1 << ( fls ( sizeof ( struct memory_block ) - 1 ) ) ) )
  59. /** A block of allocated memory complete with size information */
  60. struct autosized_block {
  61. /** Size of this block */
  62. size_t size;
  63. /** Remaining data */
  64. char data[0];
  65. };
  66. /**
  67. * Address for zero-length memory blocks
  68. *
  69. * @c malloc(0) or @c realloc(ptr,0) will return the special value @c
  70. * NOWHERE. Calling @c free(NOWHERE) will have no effect.
  71. *
  72. * This is consistent with the ANSI C standards, which state that
  73. * "either NULL or a pointer suitable to be passed to free()" must be
  74. * returned in these cases. Using a special non-NULL value means that
  75. * the caller can take a NULL return value to indicate failure,
  76. * without first having to check for a requested size of zero.
  77. *
  78. * Code outside of malloc.c do not ever need to refer to the actual
  79. * value of @c NOWHERE; this is an internal definition.
  80. */
  81. #define NOWHERE ( ( void * ) ~( ( intptr_t ) 0 ) )
  82. /** List of free memory blocks */
  83. static LIST_HEAD ( free_blocks );
  84. /** Total amount of free memory */
  85. size_t freemem;
  86. /** Total amount of used memory */
  87. size_t usedmem;
  88. /** Maximum amount of used memory */
  89. size_t maxusedmem;
  90. /**
  91. * Heap size
  92. *
  93. * Currently fixed at 512kB.
  94. */
  95. #define HEAP_SIZE ( 512 * 1024 )
  96. /** The heap itself */
  97. static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
  98. /**
  99. * Mark all blocks in free list as defined
  100. *
  101. */
  102. static inline void valgrind_make_blocks_defined ( void ) {
  103. struct memory_block *block;
  104. /* Do nothing unless running under Valgrind */
  105. if ( RUNNING_ON_VALGRIND <= 0 )
  106. return;
  107. /* Traverse free block list, marking each block structure as
  108. * defined. Some contortions are necessary to avoid errors
  109. * from list_check().
  110. */
  111. /* Mark block list itself as defined */
  112. VALGRIND_MAKE_MEM_DEFINED ( &free_blocks, sizeof ( free_blocks ) );
  113. /* Mark areas accessed by list_check() as defined */
  114. VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.prev->next,
  115. sizeof ( free_blocks.prev->next ) );
  116. VALGRIND_MAKE_MEM_DEFINED ( free_blocks.next,
  117. sizeof ( *free_blocks.next ) );
  118. VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->next->prev,
  119. sizeof ( free_blocks.next->next->prev ) );
  120. /* Mark each block in list as defined */
  121. list_for_each_entry ( block, &free_blocks, list ) {
  122. /* Mark block as defined */
  123. VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
  124. /* Mark areas accessed by list_check() as defined */
  125. VALGRIND_MAKE_MEM_DEFINED ( block->list.next,
  126. sizeof ( *block->list.next ) );
  127. VALGRIND_MAKE_MEM_DEFINED ( &block->list.next->next->prev,
  128. sizeof ( block->list.next->next->prev ) );
  129. }
  130. }
  131. /**
  132. * Mark all blocks in free list as inaccessible
  133. *
  134. */
  135. static inline void valgrind_make_blocks_noaccess ( void ) {
  136. struct memory_block *block;
  137. struct memory_block *prev = NULL;
  138. /* Do nothing unless running under Valgrind */
  139. if ( RUNNING_ON_VALGRIND <= 0 )
  140. return;
  141. /* Traverse free block list, marking each block structure as
  142. * inaccessible. Some contortions are necessary to avoid
  143. * errors from list_check().
  144. */
  145. /* Mark each block in list as inaccessible */
  146. list_for_each_entry ( block, &free_blocks, list ) {
  147. /* Mark previous block (if any) as inaccessible. (Current
  148. * block will be accessed by list_check().)
  149. */
  150. if ( prev )
  151. VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
  152. prev = block;
  153. /* At the end of the list, list_check() will end up
  154. * accessing the first list item. Temporarily mark
  155. * this area as defined.
  156. */
  157. VALGRIND_MAKE_MEM_DEFINED ( &free_blocks.next->prev,
  158. sizeof ( free_blocks.next->prev ) );
  159. }
  160. /* Mark last block (if any) as inaccessible */
  161. if ( prev )
  162. VALGRIND_MAKE_MEM_NOACCESS ( prev, sizeof ( *prev ) );
  163. /* Mark as inaccessible the area that was temporarily marked
  164. * as defined to avoid errors from list_check().
  165. */
  166. VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks.next->prev,
  167. sizeof ( free_blocks.next->prev ) );
  168. /* Mark block list itself as inaccessible */
  169. VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
  170. }
  171. /**
  172. * Check integrity of the blocks in the free list
  173. *
  174. */
  175. static inline void check_blocks ( void ) {
  176. struct memory_block *block;
  177. struct memory_block *prev = NULL;
  178. if ( ! ASSERTING )
  179. return;
  180. list_for_each_entry ( block, &free_blocks, list ) {
  181. /* Check that list structure is intact */
  182. list_check ( &block->list );
  183. /* Check that block size is not too small */
  184. assert ( block->size >= sizeof ( *block ) );
  185. assert ( block->size >= MIN_MEMBLOCK_SIZE );
  186. /* Check that block does not wrap beyond end of address space */
  187. assert ( ( ( void * ) block + block->size ) >
  188. ( ( void * ) block ) );
  189. /* Check that blocks remain in ascending order, and
  190. * that adjacent blocks have been merged.
  191. */
  192. if ( prev ) {
  193. assert ( ( ( void * ) block ) > ( ( void * ) prev ) );
  194. assert ( ( ( void * ) block ) >
  195. ( ( ( void * ) prev ) + prev->size ) );
  196. }
  197. prev = block;
  198. }
  199. }
  200. /**
  201. * Discard some cached data
  202. *
  203. * @ret discarded Number of cached items discarded
  204. */
  205. static unsigned int discard_cache ( void ) {
  206. struct cache_discarder *discarder;
  207. unsigned int discarded;
  208. for_each_table_entry ( discarder, CACHE_DISCARDERS ) {
  209. discarded = discarder->discard();
  210. if ( discarded )
  211. return discarded;
  212. }
  213. return 0;
  214. }
  215. /**
  216. * Discard all cached data
  217. *
  218. */
  219. static void discard_all_cache ( void ) {
  220. unsigned int discarded;
  221. do {
  222. discarded = discard_cache();
  223. } while ( discarded );
  224. }
  225. /**
  226. * Allocate a memory block
  227. *
  228. * @v size Requested size
  229. * @v align Physical alignment
  230. * @v offset Offset from physical alignment
  231. * @ret ptr Memory block, or NULL
  232. *
  233. * Allocates a memory block @b physically aligned as requested. No
  234. * guarantees are provided for the alignment of the virtual address.
  235. *
  236. * @c align must be a power of two. @c size may not be zero.
  237. */
  238. void * alloc_memblock ( size_t size, size_t align, size_t offset ) {
  239. struct memory_block *block;
  240. size_t align_mask;
  241. size_t actual_size;
  242. size_t pre_size;
  243. size_t post_size;
  244. struct memory_block *pre;
  245. struct memory_block *post;
  246. unsigned int discarded;
  247. void *ptr;
  248. /* Sanity checks */
  249. assert ( size != 0 );
  250. assert ( ( align == 0 ) || ( ( align & ( align - 1 ) ) == 0 ) );
  251. valgrind_make_blocks_defined();
  252. check_blocks();
  253. /* Round up size to multiple of MIN_MEMBLOCK_SIZE and
  254. * calculate alignment mask.
  255. */
  256. actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
  257. ~( MIN_MEMBLOCK_SIZE - 1 ) );
  258. if ( ! actual_size ) {
  259. /* The requested size is not permitted to be zero. A
  260. * zero result at this point indicates that either the
  261. * original requested size was zero, or that unsigned
  262. * integer overflow has occurred.
  263. */
  264. ptr = NULL;
  265. goto done;
  266. }
  267. assert ( actual_size >= size );
  268. align_mask = ( ( align - 1 ) | ( MIN_MEMBLOCK_SIZE - 1 ) );
  269. DBGC2 ( &heap, "Allocating %#zx (aligned %#zx+%zx)\n",
  270. size, align, offset );
  271. while ( 1 ) {
  272. /* Search through blocks for the first one with enough space */
  273. list_for_each_entry ( block, &free_blocks, list ) {
  274. pre_size = ( ( offset - virt_to_phys ( block ) )
  275. & align_mask );
  276. if ( ( block->size < pre_size ) ||
  277. ( ( block->size - pre_size ) < actual_size ) )
  278. continue;
  279. post_size = ( block->size - pre_size - actual_size );
  280. /* Split block into pre-block, block, and
  281. * post-block. After this split, the "pre"
  282. * block is the one currently linked into the
  283. * free list.
  284. */
  285. pre = block;
  286. block = ( ( ( void * ) pre ) + pre_size );
  287. post = ( ( ( void * ) block ) + actual_size );
  288. DBGC2 ( &heap, "[%p,%p) -> [%p,%p) + [%p,%p)\n", pre,
  289. ( ( ( void * ) pre ) + pre->size ), pre, block,
  290. post, ( ( ( void * ) pre ) + pre->size ) );
  291. /* If there is a "post" block, add it in to
  292. * the free list. Leak it if it is too small
  293. * (which can happen only at the very end of
  294. * the heap).
  295. */
  296. if ( post_size >= MIN_MEMBLOCK_SIZE ) {
  297. VALGRIND_MAKE_MEM_UNDEFINED ( post,
  298. sizeof ( *post ));
  299. post->size = post_size;
  300. list_add ( &post->list, &pre->list );
  301. }
  302. /* Shrink "pre" block, leaving the main block
  303. * isolated and no longer part of the free
  304. * list.
  305. */
  306. pre->size = pre_size;
  307. /* If there is no "pre" block, remove it from
  308. * the list. Also remove it (i.e. leak it) if
  309. * it is too small, which can happen only at
  310. * the very start of the heap.
  311. */
  312. if ( pre_size < MIN_MEMBLOCK_SIZE ) {
  313. list_del ( &pre->list );
  314. VALGRIND_MAKE_MEM_NOACCESS ( pre,
  315. sizeof ( *pre ) );
  316. }
  317. /* Update memory usage statistics */
  318. freemem -= actual_size;
  319. usedmem += actual_size;
  320. if ( usedmem > maxusedmem )
  321. maxusedmem = usedmem;
  322. /* Return allocated block */
  323. DBGC2 ( &heap, "Allocated [%p,%p)\n", block,
  324. ( ( ( void * ) block ) + size ) );
  325. ptr = block;
  326. VALGRIND_MAKE_MEM_UNDEFINED ( ptr, size );
  327. goto done;
  328. }
  329. /* Try discarding some cached data to free up memory */
  330. DBGC ( &heap, "Attempting discard for %#zx (aligned %#zx+%zx), "
  331. "used %zdkB\n", size, align, offset, ( usedmem >> 10 ) );
  332. valgrind_make_blocks_noaccess();
  333. discarded = discard_cache();
  334. valgrind_make_blocks_defined();
  335. check_blocks();
  336. if ( ! discarded ) {
  337. /* Nothing available to discard */
  338. DBGC ( &heap, "Failed to allocate %#zx (aligned "
  339. "%#zx)\n", size, align );
  340. ptr = NULL;
  341. goto done;
  342. }
  343. }
  344. done:
  345. check_blocks();
  346. valgrind_make_blocks_noaccess();
  347. return ptr;
  348. }
  349. /**
  350. * Free a memory block
  351. *
  352. * @v ptr Memory allocated by alloc_memblock(), or NULL
  353. * @v size Size of the memory
  354. *
  355. * If @c ptr is NULL, no action is taken.
  356. */
  357. void free_memblock ( void *ptr, size_t size ) {
  358. struct memory_block *freeing;
  359. struct memory_block *block;
  360. struct memory_block *tmp;
  361. size_t actual_size;
  362. ssize_t gap_before;
  363. ssize_t gap_after = -1;
  364. /* Allow for ptr==NULL */
  365. if ( ! ptr )
  366. return;
  367. VALGRIND_MAKE_MEM_NOACCESS ( ptr, size );
  368. /* Sanity checks */
  369. valgrind_make_blocks_defined();
  370. check_blocks();
  371. /* Round up size to match actual size that alloc_memblock()
  372. * would have used.
  373. */
  374. assert ( size != 0 );
  375. actual_size = ( ( size + MIN_MEMBLOCK_SIZE - 1 ) &
  376. ~( MIN_MEMBLOCK_SIZE - 1 ) );
  377. freeing = ptr;
  378. VALGRIND_MAKE_MEM_UNDEFINED ( freeing, sizeof ( *freeing ) );
  379. DBGC2 ( &heap, "Freeing [%p,%p)\n",
  380. freeing, ( ( ( void * ) freeing ) + size ) );
  381. /* Check that this block does not overlap the free list */
  382. if ( ASSERTING ) {
  383. list_for_each_entry ( block, &free_blocks, list ) {
  384. if ( ( ( ( void * ) block ) <
  385. ( ( void * ) freeing + actual_size ) ) &&
  386. ( ( void * ) freeing <
  387. ( ( void * ) block + block->size ) ) ) {
  388. assert ( 0 );
  389. DBGC ( &heap, "Double free of [%p,%p) "
  390. "overlapping [%p,%p) detected from %p\n",
  391. freeing,
  392. ( ( ( void * ) freeing ) + size ), block,
  393. ( ( void * ) block + block->size ),
  394. __builtin_return_address ( 0 ) );
  395. }
  396. }
  397. }
  398. /* Insert/merge into free list */
  399. freeing->size = actual_size;
  400. list_for_each_entry_safe ( block, tmp, &free_blocks, list ) {
  401. /* Calculate gaps before and after the "freeing" block */
  402. gap_before = ( ( ( void * ) freeing ) -
  403. ( ( ( void * ) block ) + block->size ) );
  404. gap_after = ( ( ( void * ) block ) -
  405. ( ( ( void * ) freeing ) + freeing->size ) );
  406. /* Merge with immediately preceding block, if possible */
  407. if ( gap_before == 0 ) {
  408. DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", block,
  409. ( ( ( void * ) block ) + block->size ), freeing,
  410. ( ( ( void * ) freeing ) + freeing->size ),
  411. block,
  412. ( ( ( void * ) freeing ) + freeing->size ) );
  413. block->size += actual_size;
  414. list_del ( &block->list );
  415. VALGRIND_MAKE_MEM_NOACCESS ( freeing,
  416. sizeof ( *freeing ) );
  417. freeing = block;
  418. }
  419. /* Stop processing as soon as we reach a following block */
  420. if ( gap_after >= 0 )
  421. break;
  422. }
  423. /* Insert before the immediately following block. If
  424. * possible, merge the following block into the "freeing"
  425. * block.
  426. */
  427. DBGC2 ( &heap, "[%p,%p)\n",
  428. freeing, ( ( ( void * ) freeing ) + freeing->size ) );
  429. list_add_tail ( &freeing->list, &block->list );
  430. if ( gap_after == 0 ) {
  431. DBGC2 ( &heap, "[%p,%p) + [%p,%p) -> [%p,%p)\n", freeing,
  432. ( ( ( void * ) freeing ) + freeing->size ), block,
  433. ( ( ( void * ) block ) + block->size ), freeing,
  434. ( ( ( void * ) block ) + block->size ) );
  435. freeing->size += block->size;
  436. list_del ( &block->list );
  437. VALGRIND_MAKE_MEM_NOACCESS ( block, sizeof ( *block ) );
  438. }
  439. /* Update memory usage statistics */
  440. freemem += actual_size;
  441. usedmem -= actual_size;
  442. check_blocks();
  443. valgrind_make_blocks_noaccess();
  444. }
  445. /**
  446. * Reallocate memory
  447. *
  448. * @v old_ptr Memory previously allocated by malloc(), or NULL
  449. * @v new_size Requested size
  450. * @ret new_ptr Allocated memory, or NULL
  451. *
  452. * Allocates memory with no particular alignment requirement. @c
  453. * new_ptr will be aligned to at least a multiple of sizeof(void*).
  454. * If @c old_ptr is non-NULL, then the contents of the newly allocated
  455. * memory will be the same as the contents of the previously allocated
  456. * memory, up to the minimum of the old and new sizes. The old memory
  457. * will be freed.
  458. *
  459. * If allocation fails the previously allocated block is left
  460. * untouched and NULL is returned.
  461. *
  462. * Calling realloc() with a new size of zero is a valid way to free a
  463. * memory block.
  464. */
  465. void * realloc ( void *old_ptr, size_t new_size ) {
  466. struct autosized_block *old_block;
  467. struct autosized_block *new_block;
  468. size_t old_total_size;
  469. size_t new_total_size;
  470. size_t old_size;
  471. void *new_ptr = NOWHERE;
  472. /* Allocate new memory if necessary. If allocation fails,
  473. * return without touching the old block.
  474. */
  475. if ( new_size ) {
  476. new_total_size = ( new_size +
  477. offsetof ( struct autosized_block, data ) );
  478. if ( new_total_size < new_size )
  479. return NULL;
  480. new_block = alloc_memblock ( new_total_size, 1, 0 );
  481. if ( ! new_block )
  482. return NULL;
  483. new_block->size = new_total_size;
  484. VALGRIND_MAKE_MEM_NOACCESS ( &new_block->size,
  485. sizeof ( new_block->size ) );
  486. new_ptr = &new_block->data;
  487. VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
  488. }
  489. /* Copy across relevant part of the old data region (if any),
  490. * then free it. Note that at this point either (a) new_ptr
  491. * is valid, or (b) new_size is 0; either way, the memcpy() is
  492. * valid.
  493. */
  494. if ( old_ptr && ( old_ptr != NOWHERE ) ) {
  495. old_block = container_of ( old_ptr, struct autosized_block,
  496. data );
  497. VALGRIND_MAKE_MEM_DEFINED ( &old_block->size,
  498. sizeof ( old_block->size ) );
  499. old_total_size = old_block->size;
  500. assert ( old_total_size != 0 );
  501. old_size = ( old_total_size -
  502. offsetof ( struct autosized_block, data ) );
  503. memcpy ( new_ptr, old_ptr,
  504. ( ( old_size < new_size ) ? old_size : new_size ) );
  505. VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
  506. free_memblock ( old_block, old_total_size );
  507. }
  508. if ( ASSERTED ) {
  509. DBGC ( &heap, "Possible memory corruption detected from %p\n",
  510. __builtin_return_address ( 0 ) );
  511. }
  512. return new_ptr;
  513. }
  514. /**
  515. * Allocate memory
  516. *
  517. * @v size Requested size
  518. * @ret ptr Memory, or NULL
  519. *
  520. * Allocates memory with no particular alignment requirement. @c ptr
  521. * will be aligned to at least a multiple of sizeof(void*).
  522. */
  523. void * malloc ( size_t size ) {
  524. void *ptr;
  525. ptr = realloc ( NULL, size );
  526. if ( ASSERTED ) {
  527. DBGC ( &heap, "Possible memory corruption detected from %p\n",
  528. __builtin_return_address ( 0 ) );
  529. }
  530. return ptr;
  531. }
  532. /**
  533. * Free memory
  534. *
  535. * @v ptr Memory allocated by malloc(), or NULL
  536. *
  537. * Memory allocated with malloc_dma() cannot be freed with free(); it
  538. * must be freed with free_dma() instead.
  539. *
  540. * If @c ptr is NULL, no action is taken.
  541. */
  542. void free ( void *ptr ) {
  543. realloc ( ptr, 0 );
  544. if ( ASSERTED ) {
  545. DBGC ( &heap, "Possible memory corruption detected from %p\n",
  546. __builtin_return_address ( 0 ) );
  547. }
  548. }
  549. /**
  550. * Allocate cleared memory
  551. *
  552. * @v size Requested size
  553. * @ret ptr Allocated memory
  554. *
  555. * Allocate memory as per malloc(), and zero it.
  556. *
  557. * This function name is non-standard, but pretty intuitive.
  558. * zalloc(size) is always equivalent to calloc(1,size)
  559. */
  560. void * zalloc ( size_t size ) {
  561. void *data;
  562. data = malloc ( size );
  563. if ( data )
  564. memset ( data, 0, size );
  565. if ( ASSERTED ) {
  566. DBGC ( &heap, "Possible memory corruption detected from %p\n",
  567. __builtin_return_address ( 0 ) );
  568. }
  569. return data;
  570. }
  571. /**
  572. * Add memory to allocation pool
  573. *
  574. * @v start Start address
  575. * @v end End address
  576. *
  577. * Adds a block of memory [start,end) to the allocation pool. This is
  578. * a one-way operation; there is no way to reclaim this memory.
  579. *
  580. * @c start must be aligned to at least a multiple of sizeof(void*).
  581. */
  582. void mpopulate ( void *start, size_t len ) {
  583. /* Prevent free_memblock() from rounding up len beyond the end
  584. * of what we were actually given...
  585. */
  586. len &= ~( MIN_MEMBLOCK_SIZE - 1 );
  587. /* Add to allocation pool */
  588. free_memblock ( start, len );
  589. /* Fix up memory usage statistics */
  590. usedmem += len;
  591. }
  592. /**
  593. * Initialise the heap
  594. *
  595. */
  596. static void init_heap ( void ) {
  597. VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
  598. VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks, sizeof ( free_blocks ) );
  599. mpopulate ( heap, sizeof ( heap ) );
  600. }
  601. /** Memory allocator initialisation function */
  602. struct init_fn heap_init_fn __init_fn ( INIT_EARLY ) = {
  603. .initialise = init_heap,
  604. };
  605. /**
  606. * Discard all cached data on shutdown
  607. *
  608. */
  609. static void shutdown_cache ( int booting __unused ) {
  610. discard_all_cache();
  611. DBGC ( &heap, "Maximum heap usage %zdkB\n", ( maxusedmem >> 10 ) );
  612. }
  613. /** Memory allocator shutdown function */
  614. struct startup_fn heap_startup_fn __startup_fn ( STARTUP_EARLY ) = {
  615. .shutdown = shutdown_cache,
  616. };
  617. #if 0
  618. #include <stdio.h>
  619. /**
  620. * Dump free block list
  621. *
  622. */
  623. void mdumpfree ( void ) {
  624. struct memory_block *block;
  625. printf ( "Free block list:\n" );
  626. list_for_each_entry ( block, &free_blocks, list ) {
  627. printf ( "[%p,%p] (size %#zx)\n", block,
  628. ( ( ( void * ) block ) + block->size ), block->size );
  629. }
  630. }
  631. #endif