Browse Source

[librm] Support ioremap() for addresses above 4GB in a 64-bit build

Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Michael Brown 8 years ago
parent
commit
99b5216b1c

+ 2
- 0
src/arch/x86/include/bits/iomap.h View File

@@ -9,4 +9,6 @@
9 9
 
10 10
 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
11 11
 
12
+#include <ipxe/iomap_pages.h>
13
+
12 14
 #endif /* _BITS_IOMAP_H */

+ 24
- 0
src/arch/x86/include/ipxe/iomap_pages.h View File

@@ -0,0 +1,24 @@
1
+#ifndef _IPXE_IOMAP_PAGES_H
2
+#define _IPXE_IOMAP_PAGES_H
3
+
4
+/** @file
5
+ *
6
+ * I/O mapping API using page tables
7
+ *
8
+ */
9
+
10
+FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
11
+
12
+#ifdef IOMAP_PAGES
13
+#define IOMAP_PREFIX_pages
14
+#else
15
+#define IOMAP_PREFIX_pages __pages_
16
+#endif
17
+
18
+static inline __always_inline unsigned long
19
+IOMAP_INLINE ( pages, io_to_bus ) ( volatile const void *io_addr ) {
20
+	/* Not easy to do; just return the CPU address for debugging purposes */
21
+	return ( ( intptr_t ) io_addr );
22
+}
23
+
24
+#endif /* _IPXE_IOMAP_PAGES_H */

+ 45
- 0
src/arch/x86/include/librm.h View File

@@ -378,6 +378,51 @@ struct interrupt_vector {
378 378
 
379 379
 extern void set_interrupt_vector ( unsigned int intr, void *vector );
380 380
 
381
+/** A page table */
382
+struct page_table {
383
+	/** Page address and flags */
384
+	uint64_t page[512];
385
+};
386
+
387
+/** Page flags */
388
+enum page_flags {
389
+	/** Page is present */
390
+	PAGE_P = 0x01,
391
+	/** Page is writable */
392
+	PAGE_RW = 0x02,
393
+	/** Page is accessible by user code */
394
+	PAGE_US = 0x04,
395
+	/** Page-level write-through */
396
+	PAGE_PWT = 0x08,
397
+	/** Page-level cache disable */
398
+	PAGE_PCD = 0x10,
399
+	/** Page is a large page */
400
+	PAGE_PS = 0x80,
401
+	/** Page is the last page in an allocation
402
+	 *
403
+	 * This bit is ignored by the hardware.  We use it to track
404
+	 * the size of allocations made by ioremap().
405
+	 */
406
+	PAGE_LAST = 0x800,
407
+};
408
+
409
+/** The I/O space page table */
410
+extern struct page_table io_pages;
411
+
412
+/** I/O page size
413
+ *
414
+ * We choose to use 2MB pages for I/O space, to minimise the number of
415
+ * page table entries required.
416
+ */
417
+#define IO_PAGE_SIZE 0x200000UL
418
+
419
+/** I/O page base address
420
+ *
421
+ * We choose to place I/O space immediately above the identity-mapped
422
+ * 32-bit address space.
423
+ */
424
+#define IO_BASE ( ( void * ) 0x100000000ULL )
425
+
381 426
 #endif /* ASSEMBLY */
382 427
 
383 428
 #endif /* LIBRM_H */

+ 17
- 0
src/arch/x86/transitions/librm.S View File

@@ -1340,11 +1340,19 @@ interrupt_wrapper:
1340 1340
 	 * These point to the PDPT.  This creates some aliased
1341 1341
 	 * addresses within unused portions of the 64-bit address
1342 1342
 	 * space, but allows us to use just a single PDPT.
1343
+	 *
1344
+	 * - PDE[...] covering arbitrary 2MB portions of I/O space
1345
+	 *
1346
+	 * These are 2MB pages created by ioremap() to cover I/O
1347
+	 * device addresses.
1343 1348
 	 */
1344 1349
 pml4e:
1345 1350
 	.space	SIZEOF_PT
1346 1351
 	.size	pml4e, . - pml4e
1347 1352
 
1353
+	.globl	io_pages
1354
+	.equ	io_pages, pml4e
1355
+
1348 1356
 	/* Page directory pointer table entries (PDPTEs)
1349 1357
 	 *
1350 1358
 	 * This comprises:
@@ -1357,6 +1365,11 @@ pml4e:
1357 1365
 	 * These point to the appropriate page directories (in pde_low)
1358 1366
 	 * used to identity-map the whole of the 32-bit address space.
1359 1367
 	 *
1368
+	 * - PDPTE[0x004] covering [0x0000000100000000-0x000000013fffffff]
1369
+	 *
1370
+	 * This points back to the PML4, allowing the PML4 to be
1371
+	 * (ab)used to hold 2MB pages used for I/O device addresses.
1372
+	 *
1360 1373
 	 * - PDPTE[0x1ff] covering [0xffffffffc0000000-0xffffffffffffffff]
1361 1374
 	 *
1362 1375
 	 * This points back to the PDPT itself, allowing the PDPT to be
@@ -1421,6 +1434,10 @@ init_pages:
1421 1434
 	/* Initialise PDPTE for negative 1GB */
1422 1435
 	movl	%eax, ( VIRTUAL(pdpte) + SIZEOF_PT - SIZEOF_PTE )
1423 1436
 
1437
+	/* Initialise PDPTE for I/O space */
1438
+	leal	( VIRTUAL(pml4e) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
1439
+	movl	%eax, ( VIRTUAL(pdpte) + ( PDE_LOW_PTS * SIZEOF_PTE ) )
1440
+
1424 1441
 	/* Initialise PDPTEs for low 4GB */
1425 1442
 	movl	$PDE_LOW_PTS, %ecx
1426 1443
 	leal	( VIRTUAL(pde_low) + ( PDE_LOW_PTS * SIZEOF_PT ) + \

+ 122
- 0
src/arch/x86/transitions/librm_mgmt.c View File

@@ -8,6 +8,8 @@
8 8
 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
9 9
 
10 10
 #include <stdint.h>
11
+#include <strings.h>
12
+#include <assert.h>
11 13
 #include <ipxe/profile.h>
12 14
 #include <realmode.h>
13 15
 #include <pic8259.h>
@@ -176,6 +178,123 @@ void __attribute__ (( regparm ( 1 ) )) interrupt ( int intr ) {
176 178
 	profile_exclude ( profiler );
177 179
 }
178 180
 
181
+/**
182
+ * Map pages for I/O
183
+ *
184
+ * @v bus_addr		Bus address
185
+ * @v len		Length of region
186
+ * @ret io_addr		I/O address
187
+ */
188
+static void * ioremap_pages ( unsigned long bus_addr, size_t len ) {
189
+	unsigned long start;
190
+	unsigned int count;
191
+	unsigned int stride;
192
+	unsigned int first;
193
+	unsigned int i;
194
+	size_t offset;
195
+	void *io_addr;
196
+
197
+	DBGC ( &io_pages, "IO mapping %08lx+%zx\n", bus_addr, len );
198
+
199
+	/* Sanity check */
200
+	assert ( len != 0 );
201
+
202
+	/* Round down start address to a page boundary */
203
+	start = ( bus_addr & ~( IO_PAGE_SIZE - 1 ) );
204
+	offset = ( bus_addr - start );
205
+	assert ( offset < IO_PAGE_SIZE );
206
+
207
+	/* Calculate number of pages required */
208
+	count = ( ( offset + len + IO_PAGE_SIZE - 1 ) / IO_PAGE_SIZE );
209
+	assert ( count != 0 );
210
+	assert ( count < ( sizeof ( io_pages.page ) /
211
+			   sizeof ( io_pages.page[0] ) ) );
212
+
213
+	/* Round up number of pages to a power of two */
214
+	stride = ( 1 << ( fls ( count ) - 1 ) );
215
+	assert ( count <= stride );
216
+
217
+	/* Allocate pages */
218
+	for ( first = 0 ; first < ( sizeof ( io_pages.page ) /
219
+				    sizeof ( io_pages.page[0] ) ) ;
220
+	      first += stride ) {
221
+
222
+		/* Calculate I/O address */
223
+		io_addr = ( IO_BASE + ( first * IO_PAGE_SIZE ) + offset );
224
+
225
+		/* Check that page table entries are available */
226
+		for ( i = first ; i < ( first + count ) ; i++ ) {
227
+			if ( io_pages.page[i] & PAGE_P ) {
228
+				io_addr = NULL;
229
+				break;
230
+			}
231
+		}
232
+		if ( ! io_addr )
233
+			continue;
234
+
235
+		/* Create page table entries */
236
+		for ( i = first ; i < ( first + count ) ; i++ ) {
237
+			io_pages.page[i] = ( start | PAGE_P | PAGE_RW |
238
+					     PAGE_US | PAGE_PWT | PAGE_PCD |
239
+					     PAGE_PS );
240
+			start += IO_PAGE_SIZE;
241
+		}
242
+
243
+		/* Mark last page as being the last in this allocation */
244
+		io_pages.page[ i - 1 ] |= PAGE_LAST;
245
+
246
+		/* Return I/O address */
247
+		DBGC ( &io_pages, "IO mapped %08lx+%zx to %p using PTEs "
248
+		       "[%d-%d]\n", bus_addr, len, io_addr, first,
249
+		       ( first + count - 1 ) );
250
+		return io_addr;
251
+	}
252
+
253
+	DBGC ( &io_pages, "IO could not map %08lx+%zx\n", bus_addr, len );
254
+	return NULL;
255
+}
256
+
257
+/**
258
+ * Unmap pages for I/O
259
+ *
260
+ * @v io_addr		I/O address
261
+ */
262
+static void iounmap_pages ( volatile const void *io_addr ) {
263
+	volatile const void *invalidate = io_addr;
264
+	unsigned int first;
265
+	unsigned int i;
266
+	int is_last;
267
+
268
+	DBGC ( &io_pages, "IO unmapping %p\n", io_addr );
269
+
270
+	/* Calculate first page table entry */
271
+	first = ( ( io_addr - IO_BASE ) / IO_PAGE_SIZE );
272
+
273
+	/* Clear page table entries */
274
+	for ( i = first ; ; i++ ) {
275
+
276
+		/* Sanity check */
277
+		assert ( io_pages.page[i] & PAGE_P );
278
+
279
+		/* Check if this is the last page in this allocation */
280
+		is_last = ( io_pages.page[i] & PAGE_LAST );
281
+
282
+		/* Clear page table entry */
283
+		io_pages.page[i] = 0;
284
+
285
+		/* Invalidate TLB for this page */
286
+		__asm__ __volatile__ ( "invlpg (%0)" : : "r" ( invalidate ) );
287
+		invalidate += IO_PAGE_SIZE;
288
+
289
+		/* Terminate if this was the last page */
290
+		if ( is_last )
291
+			break;
292
+	}
293
+
294
+	DBGC ( &io_pages, "IO unmapped %p using PTEs [%d-%d]\n",
295
+	       io_addr, first, i );
296
+}
297
+
179 298
 PROVIDE_UACCESS_INLINE ( librm, phys_to_user );
180 299
 PROVIDE_UACCESS_INLINE ( librm, user_to_phys );
181 300
 PROVIDE_UACCESS_INLINE ( librm, virt_to_user );
@@ -186,3 +305,6 @@ PROVIDE_UACCESS_INLINE ( librm, memmove_user );
186 305
 PROVIDE_UACCESS_INLINE ( librm, memset_user );
187 306
 PROVIDE_UACCESS_INLINE ( librm, strlen_user );
188 307
 PROVIDE_UACCESS_INLINE ( librm, memchr_user );
308
+PROVIDE_IOMAP_INLINE ( pages, io_to_bus );
309
+PROVIDE_IOMAP ( pages, ioremap, ioremap_pages );
310
+PROVIDE_IOMAP ( pages, iounmap, iounmap_pages );

+ 6
- 1
src/config/defaults/pcbios.h View File

@@ -11,7 +11,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
11 11
 
12 12
 #define UACCESS_LIBRM
13 13
 #define IOAPI_X86
14
-#define IOMAP_VIRT
15 14
 #define PCIAPI_PCBIOS
16 15
 #define TIMER_PCBIOS
17 16
 #define CONSOLE_PCBIOS
@@ -23,6 +22,12 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
23 22
 #define TIME_RTC
24 23
 #define REBOOT_PCBIOS
25 24
 
25
+#ifdef __x86_64__
26
+#define IOMAP_PAGES
27
+#else
28
+#define IOMAP_VIRT
29
+#endif
30
+
26 31
 #define	IMAGE_ELF		/* ELF image support */
27 32
 #define	IMAGE_MULTIBOOT		/* MultiBoot image support */
28 33
 #define	IMAGE_PXE		/* PXE image support */

Loading…
Cancel
Save