Browse Source

[linux] Make malloc and linux_umalloc valgrindable

Make the allocators used by malloc and linux_umalloc valgrindable.
Include valgrind headers in the codebase to avoid a build dependency
on valgrind.

Signed-off-by: Piotr Jaroszyński <p.jaroszynski@gmail.com>
Modified-by: Michael Brown <mcb30@ipxe.org>
Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Piotr Jaroszyński 14 years ago
parent
commit
b604e8a388

+ 4
- 3
src/arch/i386/scripts/linux.lds View File

42
 	/*
42
 	/*
43
 	 * The data section
43
 	 * The data section
44
 	 *
44
 	 *
45
+	 * Adjust the address for the data segment.  We want to adjust up to
46
+	 * the same address within the page on the next page up.
45
 	 */
47
 	 */
46
 
48
 
47
-	. = ALIGN ( _max_align );
49
+	. = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1));
50
+	. = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
48
 	.data : {
51
 	.data : {
49
 		_data = .;
52
 		_data = .;
50
 		*(.data)
53
 		*(.data)
91
 		*(.comment.*)
94
 		*(.comment.*)
92
 		*(.note)
95
 		*(.note)
93
 		*(.note.*)
96
 		*(.note.*)
94
-		*(.eh_frame)
95
-		*(.eh_frame.*)
96
 		*(.rel)
97
 		*(.rel)
97
 		*(.rel.*)
98
 		*(.rel.*)
98
 		*(.discard)
99
 		*(.discard)

+ 3
- 0
src/arch/x86/Makefile View File

10
 
10
 
11
 # breaks building some of the linux-related objects
11
 # breaks building some of the linux-related objects
12
 CFLAGS		+= -Ulinux
12
 CFLAGS		+= -Ulinux
13
+
14
+# disable valgrind
15
+CFLAGS		+= -DNVALGRIND

+ 3
- 0
src/arch/x86/Makefile.linux View File

1
 MEDIA = linux
1
 MEDIA = linux
2
 
2
 
3
+# enable valgrind
4
+CFLAGS += -UNVALGRIND
5
+
3
 INCDIRS += arch/x86/include/linux
6
 INCDIRS += arch/x86/include/linux
4
 SRCDIRS += interface/linux
7
 SRCDIRS += interface/linux
5
 SRCDIRS += drivers/linux
8
 SRCDIRS += drivers/linux

+ 309
- 0
src/arch/x86/include/valgrind/memcheck.h View File

1
+
2
+/*
3
+   ----------------------------------------------------------------
4
+
5
+   Notice that the following BSD-style license applies to this one
6
+   file (memcheck.h) only.  The rest of Valgrind is licensed under the
7
+   terms of the GNU General Public License, version 2, unless
8
+   otherwise indicated.  See the COPYING file in the source
9
+   distribution for details.
10
+
11
+   ----------------------------------------------------------------
12
+
13
+   This file is part of MemCheck, a heavyweight Valgrind tool for
14
+   detecting memory errors.
15
+
16
+   Copyright (C) 2000-2010 Julian Seward.  All rights reserved.
17
+
18
+   Redistribution and use in source and binary forms, with or without
19
+   modification, are permitted provided that the following conditions
20
+   are met:
21
+
22
+   1. Redistributions of source code must retain the above copyright
23
+      notice, this list of conditions and the following disclaimer.
24
+
25
+   2. The origin of this software must not be misrepresented; you must 
26
+      not claim that you wrote the original software.  If you use this 
27
+      software in a product, an acknowledgment in the product 
28
+      documentation would be appreciated but is not required.
29
+
30
+   3. Altered source versions must be plainly marked as such, and must
31
+      not be misrepresented as being the original software.
32
+
33
+   4. The name of the author may not be used to endorse or promote 
34
+      products derived from this software without specific prior written 
35
+      permission.
36
+
37
+   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
38
+   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
39
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40
+   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
41
+   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42
+   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
43
+   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
44
+   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
45
+   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
46
+   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
47
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48
+
49
+   ----------------------------------------------------------------
50
+
51
+   Notice that the above BSD-style license applies to this one file
52
+   (memcheck.h) only.  The entire rest of Valgrind is licensed under
53
+   the terms of the GNU General Public License, version 2.  See the
54
+   COPYING file in the source distribution for details.
55
+
56
+   ---------------------------------------------------------------- 
57
+*/
58
+
59
+
60
+#ifndef __MEMCHECK_H
61
+#define __MEMCHECK_H
62
+
63
+
64
+/* This file is for inclusion into client (your!) code.
65
+
66
+   You can use these macros to manipulate and query memory permissions
67
+   inside your own programs.
68
+
69
+   See comment near the top of valgrind.h on how to use them.
70
+*/
71
+
72
+#include "valgrind.h"
73
+
74
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! 
75
+   This enum comprises an ABI exported by Valgrind to programs
76
+   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
77
+   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
78
+typedef
79
+   enum { 
80
+      VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
81
+      VG_USERREQ__MAKE_MEM_UNDEFINED,
82
+      VG_USERREQ__MAKE_MEM_DEFINED,
83
+      VG_USERREQ__DISCARD,
84
+      VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
85
+      VG_USERREQ__CHECK_MEM_IS_DEFINED,
86
+      VG_USERREQ__DO_LEAK_CHECK,
87
+      VG_USERREQ__COUNT_LEAKS,
88
+
89
+      VG_USERREQ__GET_VBITS,
90
+      VG_USERREQ__SET_VBITS,
91
+
92
+      VG_USERREQ__CREATE_BLOCK,
93
+
94
+      VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
95
+
96
+      /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
97
+      VG_USERREQ__COUNT_LEAK_BLOCKS,
98
+
99
+      /* This is just for memcheck's internal use - don't use it */
100
+      _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR 
101
+         = VG_USERREQ_TOOL_BASE('M','C') + 256
102
+   } Vg_MemCheckClientRequest;
103
+
104
+
105
+
106
+/* Client-code macros to manipulate the state of memory. */
107
+
108
+/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
109
+#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len)           \
110
+   (__extension__({unsigned long _qzz_res;                       \
111
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
112
+                            VG_USERREQ__MAKE_MEM_NOACCESS,       \
113
+                            _qzz_addr, _qzz_len, 0, 0, 0);       \
114
+    _qzz_res;                                                    \
115
+   }))
116
+      
117
+/* Similarly, mark memory at _qzz_addr as addressable but undefined
118
+   for _qzz_len bytes. */
119
+#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len)          \
120
+   (__extension__({unsigned long _qzz_res;                       \
121
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
122
+                            VG_USERREQ__MAKE_MEM_UNDEFINED,      \
123
+                            _qzz_addr, _qzz_len, 0, 0, 0);       \
124
+    _qzz_res;                                                    \
125
+   }))
126
+
127
+/* Similarly, mark memory at _qzz_addr as addressable and defined
128
+   for _qzz_len bytes. */
129
+#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len)            \
130
+   (__extension__({unsigned long _qzz_res;                       \
131
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
132
+                            VG_USERREQ__MAKE_MEM_DEFINED,        \
133
+                            _qzz_addr, _qzz_len, 0, 0, 0);       \
134
+    _qzz_res;                                                    \
135
+   }))
136
+
137
+/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
138
+   not altered: bytes which are addressable are marked as defined,
139
+   but those which are not addressable are left unchanged. */
140
+#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
141
+   (__extension__({unsigned long _qzz_res;                       \
142
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
143
+                            VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
144
+                            _qzz_addr, _qzz_len, 0, 0, 0);       \
145
+    _qzz_res;                                                    \
146
+   }))
147
+
148
+/* Create a block-description handle.  The description is an ascii
149
+   string which is included in any messages pertaining to addresses
150
+   within the specified memory range.  Has no other effect on the
151
+   properties of the memory range. */
152
+#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc)	 \
153
+	(__extension__({unsigned long _qzz_res;			 \
154
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
155
+                            VG_USERREQ__CREATE_BLOCK,            \
156
+                            _qzz_addr, _qzz_len, _qzz_desc,      \
157
+                            0, 0);                               \
158
+    _qzz_res;							 \
159
+   }))
160
+
161
+/* Discard a block-description-handle. Returns 1 for an
162
+   invalid handle, 0 for a valid handle. */
163
+#define VALGRIND_DISCARD(_qzz_blkindex)                          \
164
+   (__extension__ ({unsigned long _qzz_res;                      \
165
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
166
+                            VG_USERREQ__DISCARD,                 \
167
+                            0, _qzz_blkindex, 0, 0, 0);          \
168
+    _qzz_res;                                                    \
169
+   }))
170
+
171
+
172
+/* Client-code macros to check the state of memory. */
173
+
174
+/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
175
+   If suitable addressibility is not established, Valgrind prints an
176
+   error message and returns the address of the first offending byte.
177
+   Otherwise it returns zero. */
178
+#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len)    \
179
+   (__extension__({unsigned long _qzz_res;                       \
180
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
181
+                            VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,\
182
+                            _qzz_addr, _qzz_len, 0, 0, 0);       \
183
+    _qzz_res;                                                    \
184
+   }))
185
+
186
+/* Check that memory at _qzz_addr is addressable and defined for
187
+   _qzz_len bytes.  If suitable addressibility and definedness are not
188
+   established, Valgrind prints an error message and returns the
189
+   address of the first offending byte.  Otherwise it returns zero. */
190
+#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len)        \
191
+   (__extension__({unsigned long _qzz_res;                       \
192
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
193
+                            VG_USERREQ__CHECK_MEM_IS_DEFINED,    \
194
+                            _qzz_addr, _qzz_len, 0, 0, 0);       \
195
+    _qzz_res;                                                    \
196
+   }))
197
+
198
+/* Use this macro to force the definedness and addressibility of an
199
+   lvalue to be checked.  If suitable addressibility and definedness
200
+   are not established, Valgrind prints an error message and returns
201
+   the address of the first offending byte.  Otherwise it returns
202
+   zero. */
203
+#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue)                \
204
+   VALGRIND_CHECK_MEM_IS_DEFINED(                                \
205
+      (volatile unsigned char *)&(__lvalue),                     \
206
+                      (unsigned long)(sizeof (__lvalue)))
207
+
208
+
209
+/* Do a full memory leak check (like --leak-check=full) mid-execution. */
210
+#define VALGRIND_DO_LEAK_CHECK                                   \
211
+   {unsigned long _qzz_res;                                      \
212
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
213
+                            VG_USERREQ__DO_LEAK_CHECK,           \
214
+                            0, 0, 0, 0, 0);                      \
215
+   }
216
+
217
+/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
218
+#define VALGRIND_DO_QUICK_LEAK_CHECK				 \
219
+   {unsigned long _qzz_res;                                      \
220
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
221
+                            VG_USERREQ__DO_LEAK_CHECK,           \
222
+                            1, 0, 0, 0, 0);                      \
223
+   }
224
+
225
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
226
+   all previous leak checks.  They must be lvalues.  */
227
+#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed)     \
228
+   /* For safety on 64-bit platforms we assign the results to private
229
+      unsigned long variables, then assign these to the lvalues the user
230
+      specified, which works no matter what type 'leaked', 'dubious', etc
231
+      are.  We also initialise '_qzz_leaked', etc because
232
+      VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
233
+      defined. */                                                        \
234
+   {unsigned long _qzz_res;                                              \
235
+    unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
236
+    unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
237
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                              \
238
+                               VG_USERREQ__COUNT_LEAKS,                  \
239
+                               &_qzz_leaked, &_qzz_dubious,              \
240
+                               &_qzz_reachable, &_qzz_suppressed, 0);    \
241
+    leaked     = _qzz_leaked;                                            \
242
+    dubious    = _qzz_dubious;                                           \
243
+    reachable  = _qzz_reachable;                                         \
244
+    suppressed = _qzz_suppressed;                                        \
245
+   }
246
+
247
+/* Return number of leaked, dubious, reachable and suppressed bytes found by
248
+   all previous leak checks.  They must be lvalues.  */
249
+#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
250
+   /* For safety on 64-bit platforms we assign the results to private
251
+      unsigned long variables, then assign these to the lvalues the user
252
+      specified, which works no matter what type 'leaked', 'dubious', etc
253
+      are.  We also initialise '_qzz_leaked', etc because
254
+      VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
255
+      defined. */                                                        \
256
+   {unsigned long _qzz_res;                                              \
257
+    unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
258
+    unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
259
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                              \
260
+                               VG_USERREQ__COUNT_LEAK_BLOCKS,            \
261
+                               &_qzz_leaked, &_qzz_dubious,              \
262
+                               &_qzz_reachable, &_qzz_suppressed, 0);    \
263
+    leaked     = _qzz_leaked;                                            \
264
+    dubious    = _qzz_dubious;                                           \
265
+    reachable  = _qzz_reachable;                                         \
266
+    suppressed = _qzz_suppressed;                                        \
267
+   }
268
+
269
+
270
+/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
271
+   into the provided zzvbits array.  Return values:
272
+      0   if not running on valgrind
273
+      1   success
274
+      2   [previously indicated unaligned arrays;  these are now allowed]
275
+      3   if any parts of zzsrc/zzvbits are not addressable.
276
+   The metadata is not copied in cases 0, 2 or 3 so it should be
277
+   impossible to segfault your system by using this call.
278
+*/
279
+#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes)                 \
280
+   (__extension__({unsigned long _qzz_res;                       \
281
+    char* czza     = (char*)zza;                                 \
282
+    char* czzvbits = (char*)zzvbits;                             \
283
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
284
+                            VG_USERREQ__GET_VBITS,               \
285
+                            czza, czzvbits, zznbytes, 0, 0 );    \
286
+    _qzz_res;                                                    \
287
+   }))
288
+
289
+/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
290
+   from the provided zzvbits array.  Return values:
291
+      0   if not running on valgrind
292
+      1   success
293
+      2   [previously indicated unaligned arrays;  these are now allowed]
294
+      3   if any parts of zza/zzvbits are not addressable.
295
+   The metadata is not copied in cases 0, 2 or 3 so it should be
296
+   impossible to segfault your system by using this call.
297
+*/
298
+#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes)                 \
299
+   (__extension__({unsigned int _qzz_res;                        \
300
+    char* czza     = (char*)zza;                                 \
301
+    char* czzvbits = (char*)zzvbits;                             \
302
+    VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0,                      \
303
+                            VG_USERREQ__SET_VBITS,               \
304
+                            czza, czzvbits, zznbytes, 0, 0 );    \
305
+    _qzz_res;                                                    \
306
+   }))
307
+
308
+#endif
309
+

+ 4536
- 0
src/arch/x86/include/valgrind/valgrind.h
File diff suppressed because it is too large
View File


+ 4
- 3
src/arch/x86_64/scripts/linux.lds View File

42
 	/*
42
 	/*
43
 	 * The data section
43
 	 * The data section
44
 	 *
44
 	 *
45
+	 * Adjust the address for the data segment.  We want to adjust up to
46
+	 * the same address within the page on the next page up.
45
 	 */
47
 	 */
46
 
48
 
47
-	. = ALIGN ( _max_align );
49
+	. = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1));
50
+	. = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
48
 	.data : {
51
 	.data : {
49
 		_data = .;
52
 		_data = .;
50
 		*(.data)
53
 		*(.data)
91
 		*(.comment.*)
94
 		*(.comment.*)
92
 		*(.note)
95
 		*(.note)
93
 		*(.note.*)
96
 		*(.note.*)
94
-		*(.eh_frame)
95
-		*(.eh_frame.*)
96
 		*(.rel)
97
 		*(.rel)
97
 		*(.rel.*)
98
 		*(.rel.*)
98
 		*(.discard)
99
 		*(.discard)

+ 57
- 2
src/core/malloc.c View File

27
 #include <ipxe/init.h>
27
 #include <ipxe/init.h>
28
 #include <ipxe/refcnt.h>
28
 #include <ipxe/refcnt.h>
29
 #include <ipxe/malloc.h>
29
 #include <ipxe/malloc.h>
30
+#include <valgrind/memcheck.h>
30
 
31
 
31
 /** @file
32
 /** @file
32
  *
33
  *
97
 /** The heap itself */
98
 /** The heap itself */
98
 static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
99
 static char heap[HEAP_SIZE] __attribute__ (( aligned ( __alignof__(void *) )));
99
 
100
 
101
+/**
102
+ * Mark all blocks in free list as defined
103
+ *
104
+ */
105
+static inline void valgrind_make_blocks_defined ( void ) {
106
+	struct memory_block *block;
107
+
108
+	if ( RUNNING_ON_VALGRIND > 0 ) {
109
+		VALGRIND_MAKE_MEM_DEFINED ( &free_blocks,
110
+					    sizeof ( free_blocks ) );
111
+		list_for_each_entry ( block, &free_blocks, list )
112
+			VALGRIND_MAKE_MEM_DEFINED ( block, sizeof ( *block ) );
113
+	}
114
+}
115
+
116
+/**
117
+ * Mark all blocks in free list as inaccessible
118
+ *
119
+ */
120
+static inline void valgrind_make_blocks_noaccess ( void ) {
121
+	struct memory_block *block;
122
+	struct memory_block *tmp;
123
+
124
+	if ( RUNNING_ON_VALGRIND > 0 ) {
125
+		list_for_each_entry_safe ( block, tmp, &free_blocks, list )
126
+			VALGRIND_MAKE_MEM_NOACCESS ( block, sizeof ( *block ) );
127
+		VALGRIND_MAKE_MEM_NOACCESS ( &free_blocks,
128
+					     sizeof ( free_blocks ) );
129
+	}
130
+}
131
+
100
 /**
132
 /**
101
  * Discard some cached data
133
  * Discard some cached data
102
  *
134
  *
131
 	ssize_t post_size;
163
 	ssize_t post_size;
132
 	struct memory_block *pre;
164
 	struct memory_block *pre;
133
 	struct memory_block *post;
165
 	struct memory_block *post;
166
+	struct memory_block *ptr;
167
+
168
+	valgrind_make_blocks_defined();
134
 
169
 
135
 	/* Round up size to multiple of MIN_MEMBLOCK_SIZE and
170
 	/* Round up size to multiple of MIN_MEMBLOCK_SIZE and
136
 	 * calculate alignment mask.
171
 	 * calculate alignment mask.
163
 				 * the heap).
198
 				 * the heap).
164
 				 */
199
 				 */
165
 				if ( (size_t) post_size >= MIN_MEMBLOCK_SIZE ) {
200
 				if ( (size_t) post_size >= MIN_MEMBLOCK_SIZE ) {
201
+					VALGRIND_MAKE_MEM_DEFINED ( post,
202
+							     sizeof ( *post ) );
166
 					post->size = post_size;
203
 					post->size = post_size;
167
 					list_add ( &post->list, &pre->list );
204
 					list_add ( &post->list, &pre->list );
168
 				}
205
 				}
183
 				/* Return allocated block */
220
 				/* Return allocated block */
184
 				DBG ( "Allocated [%p,%p)\n", block,
221
 				DBG ( "Allocated [%p,%p)\n", block,
185
 				      ( ( ( void * ) block ) + size ) );
222
 				      ( ( ( void * ) block ) + size ) );
186
-				return block;
223
+				ptr = block;
224
+				goto done;
187
 			}
225
 			}
188
 		}
226
 		}
189
 
227
 
192
 			/* Nothing available to discard */
230
 			/* Nothing available to discard */
193
 			DBG ( "Failed to allocate %#zx (aligned %#zx)\n",
231
 			DBG ( "Failed to allocate %#zx (aligned %#zx)\n",
194
 			      size, align );
232
 			      size, align );
195
-			return NULL;
233
+			ptr = NULL;
234
+			goto done;
196
 		}
235
 		}
197
 	}
236
 	}
237
+
238
+ done:
239
+	valgrind_make_blocks_noaccess();
240
+	return ptr;
198
 }
241
 }
199
 
242
 
200
 /**
243
 /**
216
 	if ( ! ptr )
259
 	if ( ! ptr )
217
 		return;
260
 		return;
218
 
261
 
262
+	valgrind_make_blocks_defined();
263
+
219
 	/* Round up size to match actual size that alloc_memblock()
264
 	/* Round up size to match actual size that alloc_memblock()
220
 	 * would have used.
265
 	 * would have used.
221
 	 */
266
 	 */
222
 	size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
267
 	size = ( size + MIN_MEMBLOCK_SIZE - 1 ) & ~( MIN_MEMBLOCK_SIZE - 1 );
223
 	freeing = ptr;
268
 	freeing = ptr;
269
+	VALGRIND_MAKE_MEM_DEFINED ( freeing, sizeof ( *freeing ) );
224
 	freeing->size = size;
270
 	freeing->size = size;
225
 	DBG ( "Freeing [%p,%p)\n", freeing, ( ( ( void * ) freeing ) + size ));
271
 	DBG ( "Freeing [%p,%p)\n", freeing, ( ( ( void * ) freeing ) + size ));
226
 
272
 
263
 
309
 
264
 	/* Update free memory counter */
310
 	/* Update free memory counter */
265
 	freemem += size;
311
 	freemem += size;
312
+
313
+	valgrind_make_blocks_noaccess();
266
 }
314
 }
267
 
315
 
268
 /**
316
 /**
302
 		new_block = alloc_memblock ( new_total_size, 1 );
350
 		new_block = alloc_memblock ( new_total_size, 1 );
303
 		if ( ! new_block )
351
 		if ( ! new_block )
304
 			return NULL;
352
 			return NULL;
353
+		VALGRIND_MAKE_MEM_UNDEFINED ( new_block, offsetof ( struct autosized_block, data ) );
305
 		new_block->size = new_total_size;
354
 		new_block->size = new_total_size;
355
+		VALGRIND_MAKE_MEM_NOACCESS ( new_block, offsetof ( struct autosized_block, data ) );
306
 		new_ptr = &new_block->data;
356
 		new_ptr = &new_block->data;
357
+		VALGRIND_MALLOCLIKE_BLOCK ( new_ptr, new_size, 0, 0 );
307
 	}
358
 	}
308
 	
359
 	
309
 	/* Copy across relevant part of the old data region (if any),
360
 	/* Copy across relevant part of the old data region (if any),
314
 	if ( old_ptr && ( old_ptr != NOWHERE ) ) {
365
 	if ( old_ptr && ( old_ptr != NOWHERE ) ) {
315
 		old_block = container_of ( old_ptr, struct autosized_block,
366
 		old_block = container_of ( old_ptr, struct autosized_block,
316
 					   data );
367
 					   data );
368
+		VALGRIND_MAKE_MEM_DEFINED ( old_block, offsetof ( struct autosized_block, data ) );
317
 		old_total_size = old_block->size;
369
 		old_total_size = old_block->size;
318
 		old_size = ( old_total_size -
370
 		old_size = ( old_total_size -
319
 			     offsetof ( struct autosized_block, data ) );
371
 			     offsetof ( struct autosized_block, data ) );
320
 		memcpy ( new_ptr, old_ptr,
372
 		memcpy ( new_ptr, old_ptr,
321
 			 ( ( old_size < new_size ) ? old_size : new_size ) );
373
 			 ( ( old_size < new_size ) ? old_size : new_size ) );
322
 		free_memblock ( old_block, old_total_size );
374
 		free_memblock ( old_block, old_total_size );
375
+		VALGRIND_MAKE_MEM_NOACCESS ( old_block, offsetof ( struct autosized_block, data ) );
376
+		VALGRIND_FREELIKE_BLOCK ( old_ptr, 0 );
323
 	}
377
 	}
324
 
378
 
325
 	return new_ptr;
379
 	return new_ptr;
395
  *
449
  *
396
  */
450
  */
397
 static void init_heap ( void ) {
451
 static void init_heap ( void ) {
452
+	VALGRIND_MAKE_MEM_NOACCESS ( heap, sizeof ( heap ) );
398
 	mpopulate ( heap, sizeof ( heap ) );
453
 	mpopulate ( heap, sizeof ( heap ) );
399
 }
454
 }
400
 
455
 

+ 6
- 1
src/include/ipxe/malloc.h View File

19
  */
19
  */
20
 #include <stdlib.h>
20
 #include <stdlib.h>
21
 #include <ipxe/tables.h>
21
 #include <ipxe/tables.h>
22
+#include <valgrind/memcheck.h>
22
 
23
 
23
 extern size_t freemem;
24
 extern size_t freemem;
24
 
25
 
39
  * @c align must be a power of two.  @c size may not be zero.
40
  * @c align must be a power of two.  @c size may not be zero.
40
  */
41
  */
41
 static inline void * __malloc malloc_dma ( size_t size, size_t phys_align ) {
42
 static inline void * __malloc malloc_dma ( size_t size, size_t phys_align ) {
42
-	return alloc_memblock ( size, phys_align );
43
+	void * ptr = alloc_memblock ( size, phys_align );
44
+	if ( ptr && size )
45
+		VALGRIND_MALLOCLIKE_BLOCK ( ptr, size, 0, 0 );
46
+	return ptr;
43
 }
47
 }
44
 
48
 
45
 /**
49
 /**
55
  */
59
  */
56
 static inline void free_dma ( void *ptr, size_t size ) {
60
 static inline void free_dma ( void *ptr, size_t size ) {
57
 	free_memblock ( ptr, size );
61
 	free_memblock ( ptr, size );
62
+	VALGRIND_FREELIKE_BLOCK ( ptr, 0 );
58
 }
63
 }
59
 
64
 
60
 /** A cache discarder */
65
 /** A cache discarder */

+ 31
- 3
src/interface/linux/linux_umalloc.c View File

18
 
18
 
19
 FILE_LICENCE(GPL2_OR_LATER);
19
 FILE_LICENCE(GPL2_OR_LATER);
20
 
20
 
21
+#include <valgrind/memcheck.h>
22
+
21
 /** @file
23
 /** @file
22
  *
24
  *
23
  * iPXE user memory allocation API for linux
25
  * iPXE user memory allocation API for linux
56
 	/* Check whether we have a valid pointer */
58
 	/* Check whether we have a valid pointer */
57
 	if (ptr != NULL && ptr != NOWHERE) {
59
 	if (ptr != NULL && ptr != NOWHERE) {
58
 		mdptr = ptr - SIZE_MD;
60
 		mdptr = ptr - SIZE_MD;
61
+		VALGRIND_MAKE_MEM_DEFINED(mdptr, SIZE_MD);
59
 		md = *mdptr;
62
 		md = *mdptr;
63
+		VALGRIND_MAKE_MEM_NOACCESS(mdptr, SIZE_MD);
60
 
64
 
61
 		/* Check for poison in the metadata */
65
 		/* Check for poison in the metadata */
62
 		if (md.poison != POISON) {
66
 		if (md.poison != POISON) {
78
 		if (mdptr) {
82
 		if (mdptr) {
79
 			if (linux_munmap(mdptr, md.size))
83
 			if (linux_munmap(mdptr, md.size))
80
 				DBG("linux_realloc munmap failed: %s\n", linux_strerror(linux_errno));
84
 				DBG("linux_realloc munmap failed: %s\n", linux_strerror(linux_errno));
85
+			VALGRIND_FREELIKE_BLOCK(ptr, sizeof(*mdptr));
81
 		}
86
 		}
82
 		return NOWHERE;
87
 		return NOWHERE;
83
 	}
88
 	}
84
 
89
 
85
 	if (ptr) {
90
 	if (ptr) {
86
-		/* ptr is pointing to an already allocated memory, mremap() it with new size */
91
+		char *vbits = NULL;
92
+
93
+		if (RUNNING_ON_VALGRIND > 0)
94
+			vbits = linux_realloc(NULL, min(size, md.size));
95
+
96
+/* prevent an unused variable warning when building w/o valgrind support */
97
+#ifndef NVALGRIND
98
+		VALGRIND_GET_VBITS(ptr, vbits, min(size, md.size));
99
+#endif
100
+
101
+		VALGRIND_FREELIKE_BLOCK(ptr, SIZE_MD);
102
+
87
 		mdptr = linux_mremap(mdptr, md.size + SIZE_MD, size + SIZE_MD, MREMAP_MAYMOVE);
103
 		mdptr = linux_mremap(mdptr, md.size + SIZE_MD, size + SIZE_MD, MREMAP_MAYMOVE);
88
 		if (mdptr == MAP_FAILED) {
104
 		if (mdptr == MAP_FAILED) {
89
 			DBG("linux_realloc mremap failed: %s\n", linux_strerror(linux_errno));
105
 			DBG("linux_realloc mremap failed: %s\n", linux_strerror(linux_errno));
90
 			return NULL;
106
 			return NULL;
91
 		}
107
 		}
92
-
93
 		ptr = ((void *)mdptr) + SIZE_MD;
108
 		ptr = ((void *)mdptr) + SIZE_MD;
109
+
110
+		VALGRIND_MALLOCLIKE_BLOCK(ptr, size, SIZE_MD, 0);
111
+/* prevent an unused variable warning when building w/o valgrind support */
112
+#ifndef NVALGRIND
113
+		VALGRIND_SET_VBITS(ptr, vbits, min(size, md.size));
114
+#endif
115
+
116
+		if (RUNNING_ON_VALGRIND > 0)
117
+			linux_realloc(vbits, 0);
94
 	} else {
118
 	} else {
95
-		/* allocate new memory with mmap() */
96
 		mdptr = linux_mmap(NULL, size + SIZE_MD, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
119
 		mdptr = linux_mmap(NULL, size + SIZE_MD, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
97
 		if (mdptr == MAP_FAILED) {
120
 		if (mdptr == MAP_FAILED) {
98
 			DBG("linux_realloc mmap failed: %s\n", linux_strerror(linux_errno));
121
 			DBG("linux_realloc mmap failed: %s\n", linux_strerror(linux_errno));
99
 			return NULL;
122
 			return NULL;
100
 		}
123
 		}
101
 		ptr = ((void *)mdptr) + SIZE_MD;
124
 		ptr = ((void *)mdptr) + SIZE_MD;
125
+		VALGRIND_MALLOCLIKE_BLOCK(ptr, size, SIZE_MD, 0);
102
 	}
126
 	}
103
 
127
 
104
 	/* Update the metadata */
128
 	/* Update the metadata */
129
+	VALGRIND_MAKE_MEM_DEFINED(mdptr, SIZE_MD);
105
 	mdptr->poison = POISON;
130
 	mdptr->poison = POISON;
106
 	mdptr->size = size;
131
 	mdptr->size = size;
132
+	VALGRIND_MAKE_MEM_NOACCESS(mdptr, SIZE_MD);
133
+	// VALGRIND_MALLOCLIKE_BLOCK ignores redzones currently, make our own
134
+	VALGRIND_MAKE_MEM_NOACCESS(ptr + size, SIZE_MD);
107
 
135
 
108
 	return ptr;
136
 	return ptr;
109
 }
137
 }

Loading…
Cancel
Save