Browse Source

[librm] Prepare for long-mode memory map

The bulk of the iPXE binary (the .textdata section) is physically
relocated at runtime to the top of the 32-bit address space in order
to allow space for an OS to be loaded.  The relocation is achieved
with the assistance of segmentation: we adjust the code and data
segment bases so that the link-time addresses remain valid.

Segmentation is not available (for normal code and data segments) in
long mode.  We choose to compile the C code with -mcmodel=kernel and
use a link-time address of 0xffffffffeb000000.  This choice allows us
to identity-map the entirety of the 32-bit address space, and to alias
our chosen link-time address to the physical location of our .textdata
section.  (This requires the .textdata section to always be aligned to
a page boundary.)

We simultaneously choose to set the 32-bit virtual address segment
bases such that the link-time addresses may simply be truncated to 32
bits in order to generate a valid 32-bit virtual address.  This allows
symbols in .textdata to be trivially accessed by both 32-bit and
64-bit code.

There is no (sensible) way in 32-bit assembly code to generate the
required R_X86_64_32S relocation records for these truncated symbols.
However, subtracting the fixed constant 0xffffffff00000000 has the
same effect as truncation, and can be represented in a standard
R_X86_64_32 relocation record.  We define the VIRTUAL() macro to
abstract away this truncation operation, and apply it to all
references by 32-bit (or 16-bit) assembly code to any symbols within
the .textdata section.

We define "virt_offset" for a 64-bit build as "the value to be added
to an address within .textdata in order to obtain its physical
address".  With this definition, the low 32 bits of "virt_offset" can
be treated by 32-bit code as functionally equivalent to "virt_offset"
in a 32-bit build.

We define "text16" and "data16" for a 64-bit build as the physical
addresses of the .text16 and .data16 sections.  Since a physical
address within the 32-bit address space may be used directly as a
64-bit virtual address (thanks to the identity map), this definition
provides the most natural access to variables in .text16 and .data16.
Note that this requires a minor adjustment in prot_to_real(), which
accesses .text16 using 32-bit virtual addresses.

Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Michael Brown 8 years ago
parent
commit
d1562c38a6

+ 5
- 5
src/arch/x86/core/virtaddr.S View File

@@ -32,13 +32,13 @@ _virt_to_phys:
32 32
 	pushl	%ebp
33 33
 
34 34
 	/* Change return address to a physical address */
35
-	movl	virt_offset, %ebp
35
+	movl	VIRTUAL(virt_offset), %ebp
36 36
 	addl	%ebp, 12(%esp)
37 37
 
38 38
 	/* Switch to physical code segment */
39 39
 	cli
40 40
 	pushl	$PHYSICAL_CS
41
-	leal	1f(%ebp), %eax
41
+	leal	VIRTUAL(1f)(%ebp), %eax
42 42
 	pushl	%eax
43 43
 	lret
44 44
 1:
@@ -78,7 +78,7 @@ _phys_to_virt:
78 78
 
79 79
 	/* Switch to virtual code segment */
80 80
 	cli
81
-	ljmp	$VIRTUAL_CS, $1f
81
+	ljmp	$VIRTUAL_CS, $VIRTUAL(1f)
82 82
 1:
83 83
 	/* Reload data segment registers */
84 84
 	movl	$VIRTUAL_DS, %eax
@@ -88,7 +88,7 @@ _phys_to_virt:
88 88
 	movl	%eax, %gs
89 89
 
90 90
 	/* Reload stack segment and adjust %esp */
91
-	movl	virt_offset, %ebp
91
+	movl	VIRTUAL(virt_offset), %ebp
92 92
 	movl	%eax, %ss
93 93
 	subl	%ebp, %esp
94 94
 
@@ -134,7 +134,7 @@ _intr_to_virt:
134 134
 
135 135
 	/* Reload stack segment and adjust %esp if necessary */
136 136
 	je	1f
137
-	movl	virt_offset, %ebp
137
+	movl	VIRTUAL(virt_offset), %ebp
138 138
 	movl	%eax, %ss
139 139
 	subl	%ebp, %esp
140 140
 1:

+ 36
- 3
src/arch/x86/include/librm.h View File

@@ -7,7 +7,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
7 7
  *
8 8
  * Don't change these unless you really know what you're doing.
9 9
  */
10
-
11 10
 #define VIRTUAL_CS 0x08
12 11
 #define VIRTUAL_DS 0x10
13 12
 #define PHYSICAL_CS 0x18
@@ -16,6 +15,40 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
16 15
 #define REAL_DS 0x30
17 16
 #define P2R_DS 0x38
18 17
 
18
+/* Calculate symbol address within VIRTUAL_CS or VIRTUAL_DS
19
+ *
20
+ * In a 64-bit build, we set the bases of VIRTUAL_CS and VIRTUAL_DS
21
+ * such that truncating a .textdata symbol value to 32 bits gives a
22
+ * valid 32-bit virtual address.
23
+ *
24
+ * The C code is compiled with -mcmodel=kernel and so we must place
25
+ * all .textdata symbols within the negative 2GB of the 64-bit address
26
+ * space.  Consequently, all .textdata symbols will have the MSB set
27
+ * after truncation to 32 bits.  This means that a straightforward
28
+ * R_X86_64_32 relocation record for the symbol will fail, since the
29
+ * truncated symbol value will not correctly zero-extend to the
30
+ * original 64-bit value.
31
+ *
32
+ * Using an R_X86_64_32S relocation record would work, but there is no
33
+ * (sensible) way to generate these relocation records within 32-bit
34
+ * or 16-bit code.
35
+ *
36
+ * The simplest solution is to generate an R_X86_64_32 relocation
37
+ * record with an addend of (-0xffffffff00000000).  Since all
38
+ * .textdata symbols are within the negative 2GB of the 64-bit address
39
+ * space, this addend acts to effectively truncate the symbol to 32
40
+ * bits, thereby matching the semantics of the R_X86_64_32 relocation
41
+ * records generated for 32-bit and 16-bit code.
42
+ *
43
+ * In a 32-bit build, this problem does not exist, and we can just use
44
+ * the .textdata symbol values directly.
45
+ */
46
+#ifdef __x86_64__
47
+#define VIRTUAL(address) ( (address) - 0xffffffff00000000 )
48
+#else
49
+#define VIRTUAL(address) (address)
50
+#endif
51
+
19 52
 #ifdef ASSEMBLY
20 53
 
21 54
 /**
@@ -24,7 +57,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
24 57
  * @v function		C function
25 58
  */
26 59
 .macro virtcall function
27
-	pushl	$\function
60
+	pushl	$VIRTUAL(\function)
28 61
 	call	prot_call
29 62
 .endm
30 63
 
@@ -42,7 +75,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
42 75
  * @v function		C function
43 76
  */
44 77
 #define VIRT_CALL( function )						\
45
-	"pushl $( " #function " )\n\t"					\
78
+	"pushl $( " _S2 ( VIRTUAL ( function ) ) " )\n\t"		\
46 79
 	"call prot_call\n\t"
47 80
 
48 81
 /* Variables in librm.S */

+ 42
- 23
src/arch/x86/transitions/librm.S View File

@@ -19,8 +19,22 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
19 19
 #define SIZEOF_REAL_MODE_REGS	( SIZEOF_I386_SEG_REGS + SIZEOF_I386_REGS )
20 20
 #define SIZEOF_I386_FLAGS	4
21 21
 #define SIZEOF_I386_ALL_REGS	( SIZEOF_REAL_MODE_REGS + SIZEOF_I386_FLAGS )
22
-	
23
-	.arch i386
22
+
23
+/* Size of an address */
24
+#ifdef __x86_64__
25
+#define SIZEOF_ADDR 8
26
+#else
27
+#define SIZEOF_ADDR 4
28
+#endif
29
+
30
+/* Selectively assemble code for 32-bit/64-bit builds */
31
+#ifdef __x86_64__
32
+#define if32 if 0
33
+#define if64 if 1
34
+#else
35
+#define if32 if 1
36
+#define if64 if 0
37
+#endif
24 38
 
25 39
 /****************************************************************************
26 40
  * Global descriptor table
@@ -126,7 +140,7 @@ rm_sp:	.word 0
126 140
 rm_ss:	.word 0
127 141
 
128 142
 	.section ".data.pm_esp", "aw", @progbits
129
-pm_esp:	.long _estack
143
+pm_esp:	.long VIRTUAL(_estack)
130 144
 
131 145
 /****************************************************************************
132 146
  * Virtual address offsets
@@ -137,9 +151,9 @@ pm_esp:	.long _estack
137 151
  ****************************************************************************
138 152
  */
139 153
 	.struct 0
140
-VA_VIRT_OFFSET:	.space	4
141
-VA_TEXT16:	.space	4
142
-VA_DATA16:	.space	4
154
+VA_VIRT_OFFSET:	.space	SIZEOF_ADDR
155
+VA_TEXT16:	.space	SIZEOF_ADDR
156
+VA_DATA16:	.space	SIZEOF_ADDR
143 157
 VA_SIZE:
144 158
 	.previous
145 159
 
@@ -168,7 +182,7 @@ virt_addrs:	.space	VA_SIZE
168 182
  * Parameters:
169 183
  *   %cs : .text16 segment
170 184
  *   %ds : .data16 segment
171
- *   %edi : Physical base of protected-mode code (virt_offset)
185
+ *   %edi : Physical base of protected-mode code
172 186
  ****************************************************************************
173 187
  */
174 188
 	.section ".text16.init_librm", "ax", @progbits
@@ -181,7 +195,9 @@ init_librm:
181 195
 	pushl	%edi
182 196
 
183 197
 	/* Store rm_virt_offset and set up virtual_cs and virtual_ds segments */
198
+	subl	$VIRTUAL(_textdata), %edi
184 199
 	movl	%edi, rm_virt_offset
200
+.if64 ;	setae	(rm_virt_offset+4) ; .endif
185 201
 	movl	%edi, %eax
186 202
 	movw	$virtual_cs, %bx
187 203
 	call	set_seg_base
@@ -195,7 +211,7 @@ init_librm:
195 211
 	shll	$4, %eax
196 212
 	movw	$real_cs, %bx
197 213
 	call	set_seg_base
198
-	subl	%edi, %eax
214
+.if32 ;	subl	%edi, %eax ; .endif
199 215
 	movl	%eax, rm_text16
200 216
 
201 217
 	/* Store rm_ds and rm_data16, set up real_ds segment and GDT base */
@@ -207,7 +223,7 @@ init_librm:
207 223
 	call	set_seg_base
208 224
 	movl	%eax, gdt_base
209 225
 	addl	$gdt, gdt_base
210
-	subl	%edi, %eax
226
+.if32 ;	subl	%edi, %eax ; .endif
211 227
 	movl	%eax, rm_data16
212 228
 
213 229
 	/* Switch to protected mode */
@@ -221,7 +237,7 @@ init_librm_pmode:
221 237
 	movw	$REAL_DS, %ax
222 238
 	movw	%ax, %ds
223 239
 	movl	$rm_virt_addrs, %esi
224
-	movl	$virt_addrs, %edi
240
+	movl	$VIRTUAL(virt_addrs), %edi
225 241
 	movl	$( VA_SIZE / 4 ), %ecx
226 242
 	rep movsl
227 243
 	popw	%ds
@@ -312,7 +328,7 @@ real_to_prot:
312 328
 	movl	%cr0, %eax
313 329
 	orb	$CR0_PE, %al
314 330
 	movl	%eax, %cr0
315
-	data32 ljmp	$VIRTUAL_CS, $r2p_pmode
331
+	data32 ljmp	$VIRTUAL_CS, $VIRTUAL(r2p_pmode)
316 332
 	.section ".text.real_to_prot", "ax", @progbits
317 333
 	.code32
318 334
 r2p_pmode:
@@ -323,15 +339,15 @@ r2p_pmode:
323 339
 	movw	%ax, %fs
324 340
 	movw	%ax, %gs
325 341
 	movw	%ax, %ss
326
-	movl	pm_esp, %esp
342
+	movl	VIRTUAL(pm_esp), %esp
327 343
 
328 344
 	/* Load protected-mode interrupt descriptor table */
329
-	lidt	idtr
345
+	lidt	VIRTUAL(idtr)
330 346
 
331 347
 	/* Record real-mode %ss:sp (after removal of data) */
332
-	movw	%bp, rm_ss
348
+	movw	%bp, VIRTUAL(rm_ss)
333 349
 	addl	%ecx, %edx
334
-	movw	%dx, rm_sp
350
+	movw	%dx, VIRTUAL(rm_sp)
335 351
 
336 352
 	/* Move data from RM stack to PM stack */
337 353
 	subl	%ecx, %esp
@@ -365,7 +381,8 @@ r2p_pmode:
365 381
 	.code32
366 382
 prot_to_real:
367 383
 	/* Copy real-mode global descriptor table register to RM code segment */
368
-	movl	text16, %edi
384
+	movl	VIRTUAL(text16), %edi
385
+.if64 ;	subl	VIRTUAL(virt_offset), %edi ; .endif
369 386
 	leal	rm_gdtr(%edi), %edi
370 387
 	movsw
371 388
 	movsl
@@ -377,20 +394,20 @@ prot_to_real:
377 394
 	addl	$4, %ecx
378 395
 
379 396
 	/* Real-mode %ss:sp => %ebp:edx and virtual address => %edi */
380
-	movzwl	rm_ss, %ebp
381
-	movzwl	rm_sp, %edx
397
+	movzwl	VIRTUAL(rm_ss), %ebp
398
+	movzwl	VIRTUAL(rm_sp), %edx
382 399
 	subl	%ecx, %edx
383 400
 	movl	%ebp, %eax
384 401
 	shll	$4, %eax
385 402
 	leal	(%eax,%edx), %edi
386
-	subl	virt_offset, %edi
403
+	subl	VIRTUAL(virt_offset), %edi
387 404
 
388 405
 	/* Move data from PM stack to RM stack */
389 406
 	movl	%esp, %esi
390 407
 	rep movsb
391 408
 
392 409
 	/* Record protected-mode %esp (after removal of data) */
393
-	movl	%esi, pm_esp
410
+	movl	%esi, VIRTUAL(pm_esp)
394 411
 
395 412
 	/* Load real-mode segment limits */
396 413
 	movw	$P2R_DS, %ax
@@ -512,7 +529,7 @@ prot_call:
512 529
 
513 530
 	/* Switch to protected mode and move register dump to PM stack */
514 531
 	movl	$PC_OFFSET_END, %ecx
515
-	pushl	$pc_pmode
532
+	pushl	$VIRTUAL(pc_pmode)
516 533
 	jmp	real_to_prot
517 534
 	.section ".text.prot_call", "ax", @progbits
518 535
 	.code32
@@ -589,7 +606,7 @@ real_call:
589 606
 	/* Switch to real mode and move register dump to RM stack  */
590 607
 	movl	$( RC_OFFSET_REGS_END + 4 /* function pointer copy */ ), %ecx
591 608
 	pushl	$rc_rmode
592
-	movl	$rm_default_gdtr_idtr, %esi
609
+	movl	$VIRTUAL(rm_default_gdtr_idtr), %esi
593 610
 	jmp	prot_to_real
594 611
 	.section ".text16.real_call", "ax", @progbits
595 612
 	.code16
@@ -605,7 +622,7 @@ rc_rmode:
605 622
 
606 623
 	/* Switch to protected mode and move register dump back to PM stack */
607 624
 	movl	$RC_OFFSET_REGS_END, %ecx
608
-	pushl	$rc_pmode
625
+	pushl	$VIRTUAL(rc_pmode)
609 626
 	jmp	real_to_prot
610 627
 	.section ".text.real_call", "ax", @progbits
611 628
 	.code32
@@ -665,6 +682,8 @@ flatten_dummy:
665 682
  * May be entered with either physical or virtual stack segment.
666 683
  ****************************************************************************
667 684
  */
685
+	.section ".text.interrupt_wrapper", "ax", @progbits
686
+	.code32
668 687
 	.globl interrupt_wrapper
669 688
 interrupt_wrapper:
670 689
 	/* Preserve segment registers and original %esp */

+ 0
- 4
src/arch/x86_64/Makefile View File

@@ -7,10 +7,6 @@ CFLAGS		+= -fstrength-reduce -fomit-frame-pointer
7 7
 #
8 8
 CFLAGS		+= -falign-jumps=1 -falign-loops=1 -falign-functions=1
9 9
 
10
-# Use %rip-relative addressing wherever possible.
11
-#
12
-CFLAGS		+= -fpie
13
-
14 10
 # Force 64-bit code
15 11
 #
16 12
 CFLAGS		+= -m64

+ 4
- 0
src/arch/x86_64/Makefile.efi View File

@@ -1,5 +1,9 @@
1 1
 # -*- makefile -*- : Force emacs to use Makefile mode
2 2
 
3
+# Use %rip-relative addressing wherever possible.
4
+#
5
+CFLAGS		+= -fpie
6
+
3 7
 # EFI probably doesn't guarantee us a red zone, so let's not rely on it.
4 8
 #
5 9
 CFLAGS		+= -mno-red-zone

+ 9
- 0
src/arch/x86_64/Makefile.pcbios View File

@@ -1,5 +1,14 @@
1 1
 # -*- makefile -*- : Force emacs to use Makefile mode
2 2
 
3
+# Place .textdata in negative 2GB of address space
4
+#
5
+CFLAGS		+= -mcmodel=kernel
6
+LDFLAGS		+= --section-start=.textdata=0xffffffffeb000000
7
+
8
+# Assembly code does not respect a red zone.
9
+#
10
+CFLAGS		+= -mno-red-zone
11
+
3 12
 # Include generic BIOS Makefile
4 13
 #
5 14
 MAKEDEPS	+= arch/x86/Makefile.pcbios

Loading…
Cancel
Save