|
@@ -19,6 +19,12 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
|
19
|
19
|
/* CR4: physical address extensions */
|
20
|
20
|
#define CR4_PAE ( 1 << 5 )
|
21
|
21
|
|
|
22
|
+/* Extended feature enable MSR (EFER) */
|
|
23
|
+#define MSR_EFER 0xc0000080
|
|
24
|
+
|
|
25
|
+/* EFER: long mode enable */
|
|
26
|
+#define EFER_LME ( 1 << 8 )
|
|
27
|
+
|
22
|
28
|
/* Page: present */
|
23
|
29
|
#define PG_P 0x01
|
24
|
30
|
|
|
@@ -49,6 +55,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
|
49
|
55
|
#define SIZEOF_REAL_MODE_REGS ( SIZEOF_I386_SEG_REGS + SIZEOF_I386_REGS )
|
50
|
56
|
#define SIZEOF_I386_FLAGS 4
|
51
|
57
|
#define SIZEOF_I386_ALL_REGS ( SIZEOF_REAL_MODE_REGS + SIZEOF_I386_FLAGS )
|
|
58
|
+#define SIZEOF_X86_64_REGS 128
|
52
|
59
|
|
53
|
60
|
/* Size of an address */
|
54
|
61
|
#ifdef __x86_64__
|
|
@@ -57,6 +64,13 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
|
57
|
64
|
#define SIZEOF_ADDR 4
|
58
|
65
|
#endif
|
59
|
66
|
|
|
67
|
+/* Default code size */
|
|
68
|
+#ifdef __x86_64__
|
|
69
|
+#define CODE_DEFAULT code64
|
|
70
|
+#else
|
|
71
|
+#define CODE_DEFAULT code32
|
|
72
|
+#endif
|
|
73
|
+
|
60
|
74
|
/* Selectively assemble code for 32-bit/64-bit builds */
|
61
|
75
|
#ifdef __x86_64__
|
62
|
76
|
#define if32 if 0
|
|
@@ -124,6 +138,11 @@ p2r_ds: /* 16 bit real mode data segment for prot_to_real transition */
|
124
|
138
|
.word 0xffff, ( P2R_DS << 4 )
|
125
|
139
|
.byte 0, 0x93, 0x00, 0
|
126
|
140
|
|
|
141
|
+ .org gdt + LONG_CS, 0
|
|
142
|
+long_cs: /* 64 bit long mode code segment */
|
|
143
|
+ .word 0, 0
|
|
144
|
+ .byte 0, 0x9a, 0x20, 0
|
|
145
|
+
|
127
|
146
|
gdt_end:
|
128
|
147
|
.equ gdt_length, gdt_end - gdt
|
129
|
148
|
|
|
@@ -256,10 +275,9 @@ init_librm:
|
256
|
275
|
.if32 ; subl %edi, %eax ; .endif
|
257
|
276
|
movl %eax, rm_data16
|
258
|
277
|
|
259
|
|
-.if64 ; /* Reset page tables, if applicable */
|
260
|
|
- xorl %eax, %eax
|
261
|
|
- movl %eax, pml4
|
262
|
|
-.endif
|
|
278
|
+ /* Configure virt_call for protected mode, if applicable */
|
|
279
|
+.if64 ; movl $VIRTUAL(vc_pmode), %cs:vc_jmp_offset ; .endif
|
|
280
|
+
|
263
|
281
|
/* Switch to protected mode */
|
264
|
282
|
virtcall init_librm_pmode
|
265
|
283
|
.section ".text.init_librm", "ax", @progbits
|
|
@@ -276,8 +294,10 @@ init_librm_pmode:
|
276
|
294
|
rep movsl
|
277
|
295
|
popw %ds
|
278
|
296
|
|
279
|
|
-.if64 ; /* Initialise page tables, if applicable */
|
|
297
|
+.if64 ; /* Initialise long mode, if applicable */
|
280
|
298
|
movl VIRTUAL(virt_offset), %edi
|
|
299
|
+ leal VIRTUAL(p2l_ljmp_target)(%edi), %eax
|
|
300
|
+ movl %eax, VIRTUAL(p2l_ljmp_offset)
|
281
|
301
|
call init_pages
|
282
|
302
|
.endif
|
283
|
303
|
/* Return to real mode */
|
|
@@ -286,6 +306,9 @@ init_librm_pmode:
|
286
|
306
|
.code16
|
287
|
307
|
init_librm_rmode:
|
288
|
308
|
|
|
309
|
+ /* Configure virt_call for long mode, if applicable */
|
|
310
|
+.if64 ; movl $VIRTUAL(vc_lmode), %cs:vc_jmp_offset ; .endif
|
|
311
|
+
|
289
|
312
|
/* Initialise IDT */
|
290
|
313
|
virtcall init_idt
|
291
|
314
|
|
|
@@ -361,9 +384,10 @@ real_to_prot:
|
361
|
384
|
movw %ax, %gs
|
362
|
385
|
movw %ax, %ss
|
363
|
386
|
|
364
|
|
- /* Switch to protected mode */
|
|
387
|
+ /* Switch to protected mode (with paging disabled if applicable) */
|
365
|
388
|
cli
|
366
|
389
|
movl %cr0, %eax
|
|
390
|
+.if64 ; andl $~CR0_PG, %eax ; .endif
|
367
|
391
|
orb $CR0_PE, %al
|
368
|
392
|
movl %eax, %cr0
|
369
|
393
|
data32 ljmp $VIRTUAL_CS, $VIRTUAL(r2p_pmode)
|
|
@@ -380,7 +404,7 @@ r2p_pmode:
|
380
|
404
|
movl VIRTUAL(pm_esp), %esp
|
381
|
405
|
|
382
|
406
|
/* Load protected-mode interrupt descriptor table */
|
383
|
|
- lidt VIRTUAL(idtr)
|
|
407
|
+ lidt VIRTUAL(idtr32)
|
384
|
408
|
|
385
|
409
|
/* Record real-mode %ss:sp (after removal of data) */
|
386
|
410
|
movw %bp, VIRTUAL(rm_ss)
|
|
@@ -639,11 +663,234 @@ intr_to_prot:
|
639
|
663
|
.globl _intr_to_virt
|
640
|
664
|
.equ _intr_to_virt, intr_to_prot
|
641
|
665
|
|
|
666
|
+/****************************************************************************
|
|
667
|
+ * prot_to_long (protected-mode near call, 32-bit virtual return address)
|
|
668
|
+ *
|
|
669
|
+ * Switch from 32-bit protected mode with virtual addresses to 64-bit
|
|
670
|
+ * long mode. The protected-mode %esp is adjusted to a physical
|
|
671
|
+ * address. All other registers are preserved.
|
|
672
|
+ *
|
|
673
|
+ * The return address for this function should be a 32-bit (sic)
|
|
674
|
+ * virtual address.
|
|
675
|
+ *
|
|
676
|
+ ****************************************************************************
|
|
677
|
+ */
|
|
678
|
+ .if64
|
|
679
|
+
|
|
680
|
+ .section ".text.prot_to_long", "ax", @progbits
|
|
681
|
+ .code32
|
|
682
|
+prot_to_long:
|
|
683
|
+ /* Preserve registers */
|
|
684
|
+ pushl %eax
|
|
685
|
+ pushl %ecx
|
|
686
|
+ pushl %edx
|
|
687
|
+
|
|
688
|
+ /* Set up PML4 */
|
|
689
|
+ movl VIRTUAL(pml4), %eax
|
|
690
|
+ movl %eax, %cr3
|
|
691
|
+
|
|
692
|
+ /* Enable PAE */
|
|
693
|
+ movl %cr4, %eax
|
|
694
|
+ orb $CR4_PAE, %al
|
|
695
|
+ movl %eax, %cr4
|
|
696
|
+
|
|
697
|
+ /* Enable long mode */
|
|
698
|
+ movl $MSR_EFER, %ecx
|
|
699
|
+ rdmsr
|
|
700
|
+ orw $EFER_LME, %ax
|
|
701
|
+ wrmsr
|
|
702
|
+
|
|
703
|
+ /* Enable paging */
|
|
704
|
+ movl %cr0, %eax
|
|
705
|
+ orl $CR0_PG, %eax
|
|
706
|
+ movl %eax, %cr0
|
|
707
|
+
|
|
708
|
+ /* Restore registers */
|
|
709
|
+ popl %edx
|
|
710
|
+ popl %ecx
|
|
711
|
+ popl %eax
|
|
712
|
+
|
|
713
|
+ /* Construct 64-bit return address */
|
|
714
|
+ pushl (%esp)
|
|
715
|
+ movl $0xffffffff, 4(%esp)
|
|
716
|
+p2l_ljmp:
|
|
717
|
+ /* Switch to long mode (using a physical %rip) */
|
|
718
|
+ ljmp $LONG_CS, $0
|
|
719
|
+ .code64
|
|
720
|
+p2l_lmode:
|
|
721
|
+ /* Adjust and zero-extend %esp to a physical address */
|
|
722
|
+ addl virt_offset, %esp
|
|
723
|
+
|
|
724
|
+ /* Use long-mode IDT */
|
|
725
|
+ lidt idtr64
|
|
726
|
+
|
|
727
|
+ /* Return to virtual address */
|
|
728
|
+ ret
|
|
729
|
+
|
|
730
|
+ /* Long mode jump offset and target. Required since an ljmp
|
|
731
|
+ * in protected mode will zero-extend the offset, and so
|
|
732
|
+ * cannot reach an address within the negative 2GB as used by
|
|
733
|
+ * -mcmodel=kernel. Assigned by the call to init_librm.
|
|
734
|
+ */
|
|
735
|
+ .equ p2l_ljmp_offset, ( p2l_ljmp + 1 )
|
|
736
|
+ .equ p2l_ljmp_target, p2l_lmode
|
|
737
|
+
|
|
738
|
+ .endif
|
|
739
|
+
|
|
740
|
+/****************************************************************************
|
|
741
|
+ * long_to_prot (long-mode near call, 64-bit virtual return address)
|
|
742
|
+ *
|
|
743
|
+ * Switch from 64-bit long mode to 32-bit protected mode with virtual
|
|
744
|
+ * addresses. The long-mode %rsp is adjusted to a virtual address.
|
|
745
|
+ * All other registers are preserved.
|
|
746
|
+ *
|
|
747
|
+ * The return address for this function should be a 64-bit (sic)
|
|
748
|
+ * virtual address.
|
|
749
|
+ *
|
|
750
|
+ ****************************************************************************
|
|
751
|
+ */
|
|
752
|
+ .if64
|
|
753
|
+
|
|
754
|
+ .section ".text.long_to_prot", "ax", @progbits
|
|
755
|
+ .code64
|
|
756
|
+long_to_prot:
|
|
757
|
+ /* Switch to protected mode */
|
|
758
|
+ ljmp *l2p_vector
|
|
759
|
+ .code32
|
|
760
|
+l2p_pmode:
|
|
761
|
+ /* Adjust %esp to a virtual address */
|
|
762
|
+ subl VIRTUAL(virt_offset), %esp
|
|
763
|
+
|
|
764
|
+ /* Preserve registers */
|
|
765
|
+ pushl %eax
|
|
766
|
+ pushl %ecx
|
|
767
|
+ pushl %edx
|
|
768
|
+
|
|
769
|
+ /* Disable paging */
|
|
770
|
+ movl %cr0, %eax
|
|
771
|
+ andl $~CR0_PG, %eax
|
|
772
|
+ movl %eax, %cr0
|
|
773
|
+
|
|
774
|
+ /* Disable PAE (in case external non-PAE-aware code enables paging) */
|
|
775
|
+ movl %cr4, %eax
|
|
776
|
+ andb $~CR4_PAE, %al
|
|
777
|
+ movl %eax, %cr4
|
|
778
|
+
|
|
779
|
+ /* Disable long mode */
|
|
780
|
+ movl $MSR_EFER, %ecx
|
|
781
|
+ rdmsr
|
|
782
|
+ andw $~EFER_LME, %ax
|
|
783
|
+ wrmsr
|
|
784
|
+
|
|
785
|
+ /* Restore registers */
|
|
786
|
+ popl %edx
|
|
787
|
+ popl %ecx
|
|
788
|
+ popl %eax
|
|
789
|
+
|
|
790
|
+ /* Use protected-mode IDT */
|
|
791
|
+ lidt VIRTUAL(idtr32)
|
|
792
|
+
|
|
793
|
+ /* Return */
|
|
794
|
+ ret $4
|
|
795
|
+
|
|
796
|
+ /* Long mode jump vector. Required since there is no "ljmp
|
|
797
|
+ * immediate" instruction in long mode.
|
|
798
|
+ */
|
|
799
|
+ .section ".data.l2p_vector", "aw", @progbits
|
|
800
|
+l2p_vector:
|
|
801
|
+ .long VIRTUAL(l2p_pmode), VIRTUAL_CS
|
|
802
|
+
|
|
803
|
+ .endif
|
|
804
|
+
|
|
805
|
+/****************************************************************************
|
|
806
|
+ * long_save_regs (long-mode near call, 64-bit virtual return address)
|
|
807
|
+ *
|
|
808
|
+ * Preserve registers that are accessible only in long mode. This
|
|
809
|
+ * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
|
|
810
|
+ * %rsi, %rdi, and %rbp.
|
|
811
|
+ *
|
|
812
|
+ ****************************************************************************
|
|
813
|
+ */
|
|
814
|
+ .if64
|
|
815
|
+
|
|
816
|
+ .section ".text.long_preserve_regs", "ax", @progbits
|
|
817
|
+ .code64
|
|
818
|
+long_preserve_regs:
|
|
819
|
+ /* Preserve registers */
|
|
820
|
+ pushq %rax
|
|
821
|
+ pushq %rcx
|
|
822
|
+ pushq %rdx
|
|
823
|
+ pushq %rbx
|
|
824
|
+ pushq %rsp
|
|
825
|
+ pushq %rbp
|
|
826
|
+ pushq %rsi
|
|
827
|
+ pushq %rdi
|
|
828
|
+ pushq %r8
|
|
829
|
+ pushq %r9
|
|
830
|
+ pushq %r10
|
|
831
|
+ pushq %r11
|
|
832
|
+ pushq %r12
|
|
833
|
+ pushq %r13
|
|
834
|
+ pushq %r14
|
|
835
|
+ pushq %r15
|
|
836
|
+
|
|
837
|
+ /* Return */
|
|
838
|
+ jmp *SIZEOF_X86_64_REGS(%rsp)
|
|
839
|
+
|
|
840
|
+ .endif
|
|
841
|
+
|
|
842
|
+/****************************************************************************
|
|
843
|
+ * long_restore_regs (long-mode near call, 64-bit virtual return address)
|
|
844
|
+ *
|
|
845
|
+ * Restore registers that are accessible only in long mode. This
|
|
846
|
+ * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
|
|
847
|
+ * %rsi, %rdi, and %rbp.
|
|
848
|
+ *
|
|
849
|
+ ****************************************************************************
|
|
850
|
+ */
|
|
851
|
+ .if64
|
|
852
|
+
|
|
853
|
+ .section ".text.long_restore_regs", "ax", @progbits
|
|
854
|
+ .code64
|
|
855
|
+long_restore_regs:
|
|
856
|
+ /* Move return address above register dump */
|
|
857
|
+ popq SIZEOF_X86_64_REGS(%rsp)
|
|
858
|
+
|
|
859
|
+ /* Restore registers */
|
|
860
|
+ popq %r15
|
|
861
|
+ popq %r14
|
|
862
|
+ popq %r13
|
|
863
|
+ popq %r12
|
|
864
|
+ popq %r11
|
|
865
|
+ popq %r10
|
|
866
|
+ popq %r9
|
|
867
|
+ popq %r8
|
|
868
|
+ movl %edi, (%rsp)
|
|
869
|
+ popq %rdi
|
|
870
|
+ movl %esi, (%rsp)
|
|
871
|
+ popq %rsi
|
|
872
|
+ movl %ebp, (%rsp)
|
|
873
|
+ popq %rbp
|
|
874
|
+ leaq 8(%rsp), %rsp /* discard */
|
|
875
|
+ movl %ebx, (%rsp)
|
|
876
|
+ popq %rbx
|
|
877
|
+ movl %edx, (%rsp)
|
|
878
|
+ popq %rdx
|
|
879
|
+ movl %ecx, (%rsp)
|
|
880
|
+ popq %rcx
|
|
881
|
+ movl %eax, (%rsp)
|
|
882
|
+ popq %rax
|
|
883
|
+
|
|
884
|
+ /* Return */
|
|
885
|
+ ret
|
|
886
|
+
|
|
887
|
+ .endif
|
|
888
|
+
|
642
|
889
|
/****************************************************************************
|
643
|
890
|
* virt_call (real-mode near call, 16-bit real-mode near return address)
|
644
|
891
|
*
|
645
|
|
- * Call a specific C function in the protected-mode code. The
|
646
|
|
- * prototype of the C function must be
|
|
892
|
+ * Call a specific C function in 32-bit protected mode or 64-bit long
|
|
893
|
+ * mode (as applicable). The prototype of the C function must be
|
647
|
894
|
* void function ( struct i386_all_regs *ix86 );
|
648
|
895
|
* ix86 will point to a struct containing the real-mode registers
|
649
|
896
|
* at entry to virt_call().
|
|
@@ -662,7 +909,7 @@ intr_to_prot:
|
662
|
909
|
* critical data to registers before calling main()).
|
663
|
910
|
*
|
664
|
911
|
* Parameters:
|
665
|
|
- * function : virtual address of protected-mode function to call
|
|
912
|
+ * function : 32-bit virtual address of function to call
|
666
|
913
|
*
|
667
|
914
|
* Example usage:
|
668
|
915
|
* pushl $pxe_api_call
|
|
@@ -674,6 +921,12 @@ intr_to_prot:
|
674
|
921
|
.struct 0
|
675
|
922
|
VC_OFFSET_GDT: .space 6
|
676
|
923
|
VC_OFFSET_IDT: .space 6
|
|
924
|
+.if64
|
|
925
|
+VC_OFFSET_PADDING64: .space 4 /* for alignment */
|
|
926
|
+VC_OFFSET_CR3: .space 4
|
|
927
|
+VC_OFFSET_CR4: .space 4
|
|
928
|
+VC_OFFSET_EMER: .space 8
|
|
929
|
+.endif
|
677
|
930
|
VC_OFFSET_IX86: .space SIZEOF_I386_ALL_REGS
|
678
|
931
|
VC_OFFSET_PADDING: .space 2 /* for alignment */
|
679
|
932
|
VC_OFFSET_RETADDR: .space 2
|
|
@@ -701,22 +954,49 @@ virt_call:
|
701
|
954
|
sidt VC_OFFSET_IDT(%bp)
|
702
|
955
|
sgdt VC_OFFSET_GDT(%bp)
|
703
|
956
|
|
|
957
|
+.if64 ; /* Preserve control registers, if applicable */
|
|
958
|
+ movl $MSR_EFER, %ecx
|
|
959
|
+ rdmsr
|
|
960
|
+ movl %eax, (VC_OFFSET_EMER+0)(%bp)
|
|
961
|
+ movl %edx, (VC_OFFSET_EMER+4)(%bp)
|
|
962
|
+ movl %cr4, %eax
|
|
963
|
+ movl %eax, VC_OFFSET_CR4(%bp)
|
|
964
|
+ movl %cr3, %eax
|
|
965
|
+ movl %eax, VC_OFFSET_CR3(%bp)
|
|
966
|
+.endif
|
704
|
967
|
/* For sanity's sake, clear the direction flag as soon as possible */
|
705
|
968
|
cld
|
706
|
969
|
|
707
|
970
|
/* Switch to protected mode and move register dump to PM stack */
|
708
|
971
|
movl $VC_OFFSET_END, %ecx
|
709
|
972
|
pushl $VIRTUAL(vc_pmode)
|
710
|
|
- jmp real_to_prot
|
|
973
|
+vc_jmp: jmp real_to_prot
|
711
|
974
|
.section ".text.virt_call", "ax", @progbits
|
712
|
975
|
.code32
|
713
|
976
|
vc_pmode:
|
714
|
|
- /* Call function */
|
|
977
|
+ /* Call function (in protected mode) */
|
715
|
978
|
leal VC_OFFSET_IX86(%esp), %eax
|
716
|
979
|
pushl %eax
|
717
|
980
|
call *(VC_OFFSET_FUNCTION+4)(%esp)
|
718
|
981
|
popl %eax /* discard */
|
719
|
982
|
|
|
983
|
+.if64 ; /* Switch to long mode */
|
|
984
|
+ jmp 1f
|
|
985
|
+vc_lmode:
|
|
986
|
+ call prot_to_long
|
|
987
|
+ .code64
|
|
988
|
+
|
|
989
|
+ /* Call function (in long mode) */
|
|
990
|
+ leaq VC_OFFSET_IX86(%rsp), %rdi
|
|
991
|
+ pushq %rdi
|
|
992
|
+ movslq (VC_OFFSET_FUNCTION+8)(%rsp), %rax
|
|
993
|
+ callq *%rax
|
|
994
|
+ popq %rdi /* discard */
|
|
995
|
+
|
|
996
|
+ /* Switch to protected mode */
|
|
997
|
+ call long_to_prot
|
|
998
|
+1: .code32
|
|
999
|
+.endif
|
720
|
1000
|
/* Switch to real mode and move register dump back to RM stack */
|
721
|
1001
|
movl $VC_OFFSET_END, %ecx
|
722
|
1002
|
movl %esp, %esi
|
|
@@ -725,6 +1005,17 @@ vc_pmode:
|
725
|
1005
|
.section ".text16.virt_call", "ax", @progbits
|
726
|
1006
|
.code16
|
727
|
1007
|
vc_rmode:
|
|
1008
|
+.if64 ; /* Restore control registers, if applicable */
|
|
1009
|
+ movw %sp, %bp
|
|
1010
|
+ movl VC_OFFSET_CR3(%bp), %eax
|
|
1011
|
+ movl %eax, %cr3
|
|
1012
|
+ movl VC_OFFSET_CR4(%bp), %eax
|
|
1013
|
+ movl %eax, %cr4
|
|
1014
|
+ movl (VC_OFFSET_EMER+0)(%bp), %eax
|
|
1015
|
+ movl (VC_OFFSET_EMER+4)(%bp), %edx
|
|
1016
|
+ movl $MSR_EFER, %ecx
|
|
1017
|
+ wrmsr
|
|
1018
|
+.endif
|
728
|
1019
|
/* Restore registers and flags and return */
|
729
|
1020
|
addw $( VC_OFFSET_IX86 + 4 /* also skip %cs and %ss */ ), %sp
|
730
|
1021
|
popw %ds
|
|
@@ -744,18 +1035,23 @@ vc_rmode:
|
744
|
1035
|
/* Return and discard function parameters */
|
745
|
1036
|
ret $( VC_OFFSET_END - VC_OFFSET_PARAMS )
|
746
|
1037
|
|
|
1038
|
+
|
|
1039
|
+ /* Protected-mode jump target */
|
|
1040
|
+ .equ vc_jmp_offset, ( vc_jmp - 4 )
|
|
1041
|
+
|
747
|
1042
|
/****************************************************************************
|
748
|
1043
|
* real_call (protected-mode near call, 32-bit virtual return address)
|
|
1044
|
+ * real_call (long-mode near call, 64-bit virtual return address)
|
749
|
1045
|
*
|
750
|
|
- * Call a real-mode function from protected-mode code.
|
|
1046
|
+ * Call a real-mode function from protected-mode or long-mode code.
|
751
|
1047
|
*
|
752
|
1048
|
* The non-segment register values will be passed directly to the
|
753
|
1049
|
* real-mode code. The segment registers will be set as per
|
754
|
1050
|
* prot_to_real. The non-segment register values set by the real-mode
|
755
|
|
- * function will be passed back to the protected-mode caller. A
|
756
|
|
- * result of this is that this routine cannot be called directly from
|
757
|
|
- * C code, since it clobbers registers that the C ABI expects the
|
758
|
|
- * callee to preserve.
|
|
1051
|
+ * function will be passed back to the protected-mode or long-mode
|
|
1052
|
+ * caller. A result of this is that this routine cannot be called
|
|
1053
|
+ * directly from C code, since it clobbers registers that the C ABI
|
|
1054
|
+ * expects the callee to preserve.
|
759
|
1055
|
*
|
760
|
1056
|
* librm.h defines a convenient macro REAL_CODE() for using real_call.
|
761
|
1057
|
* See librm.h and realmode.h for details and examples.
|
|
@@ -769,16 +1065,25 @@ vc_rmode:
|
769
|
1065
|
.struct 0
|
770
|
1066
|
RC_OFFSET_REGS: .space SIZEOF_I386_REGS
|
771
|
1067
|
RC_OFFSET_REGS_END:
|
772
|
|
-RC_OFFSET_RETADDR: .space 4
|
|
1068
|
+.if64
|
|
1069
|
+RC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
|
|
1070
|
+RC_OFFSET_LREG_RETADDR: .space SIZEOF_ADDR
|
|
1071
|
+.endif
|
|
1072
|
+RC_OFFSET_RETADDR: .space SIZEOF_ADDR
|
773
|
1073
|
RC_OFFSET_PARAMS:
|
774
|
|
-RC_OFFSET_FUNCTION: .space 4
|
|
1074
|
+RC_OFFSET_FUNCTION: .space SIZEOF_ADDR
|
775
|
1075
|
RC_OFFSET_END:
|
776
|
1076
|
.previous
|
777
|
1077
|
|
778
|
1078
|
.section ".text.real_call", "ax", @progbits
|
779
|
|
- .code32
|
|
1079
|
+ .CODE_DEFAULT
|
780
|
1080
|
.globl real_call
|
781
|
1081
|
real_call:
|
|
1082
|
+.if64 ; /* Preserve registers and switch to protected mode, if applicable */
|
|
1083
|
+ call long_preserve_regs
|
|
1084
|
+ call long_to_prot
|
|
1085
|
+ .code32
|
|
1086
|
+.endif
|
782
|
1087
|
/* Create register dump and function pointer copy on PM stack */
|
783
|
1088
|
pushal
|
784
|
1089
|
pushl RC_OFFSET_FUNCTION(%esp)
|
|
@@ -810,6 +1115,11 @@ rc_pmode:
|
810
|
1115
|
/* Restore registers */
|
811
|
1116
|
popal
|
812
|
1117
|
|
|
1118
|
+.if64 ; /* Switch to long mode and restore registers, if applicable */
|
|
1119
|
+ call prot_to_long
|
|
1120
|
+ .code64
|
|
1121
|
+ call long_restore_regs
|
|
1122
|
+.endif
|
813
|
1123
|
/* Return and discard function parameters */
|
814
|
1124
|
ret $( RC_OFFSET_END - RC_OFFSET_PARAMS )
|
815
|
1125
|
|
|
@@ -830,6 +1140,7 @@ rm_default_gdtr_idtr:
|
830
|
1140
|
|
831
|
1141
|
/****************************************************************************
|
832
|
1142
|
* phys_call (protected-mode near call, 32-bit virtual return address)
|
|
1143
|
+ * phys_call (long-mode near call, 64-bit virtual return address)
|
833
|
1144
|
*
|
834
|
1145
|
* Call a function with flat 32-bit physical addressing
|
835
|
1146
|
*
|
|
@@ -846,16 +1157,25 @@ rm_default_gdtr_idtr:
|
846
|
1157
|
****************************************************************************
|
847
|
1158
|
*/
|
848
|
1159
|
.struct 0
|
849
|
|
-PHC_OFFSET_RETADDR: .space 4
|
|
1160
|
+.if64
|
|
1161
|
+PHC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
|
|
1162
|
+PHC_OFFSET_LREG_RETADDR:.space SIZEOF_ADDR
|
|
1163
|
+.endif
|
|
1164
|
+PHC_OFFSET_RETADDR: .space SIZEOF_ADDR
|
850
|
1165
|
PHC_OFFSET_PARAMS:
|
851
|
|
-PHC_OFFSET_FUNCTION: .space 4
|
|
1166
|
+PHC_OFFSET_FUNCTION: .space SIZEOF_ADDR
|
852
|
1167
|
PHC_OFFSET_END:
|
853
|
1168
|
.previous
|
854
|
1169
|
|
855
|
1170
|
.section ".text.phys_call", "ax", @progbits
|
856
|
|
- .code32
|
|
1171
|
+ .CODE_DEFAULT
|
857
|
1172
|
.globl phys_call
|
858
|
1173
|
phys_call:
|
|
1174
|
+.if64 ; /* Preserve registers and switch to protected mode, if applicable */
|
|
1175
|
+ call long_preserve_regs
|
|
1176
|
+ call long_to_prot
|
|
1177
|
+ .code32
|
|
1178
|
+.endif
|
859
|
1179
|
/* Adjust function pointer to a physical address */
|
860
|
1180
|
pushl %ebp
|
861
|
1181
|
movl VIRTUAL(virt_offset), %ebp
|
|
@@ -874,6 +1194,11 @@ phys_call:
|
874
|
1194
|
/* Switch to virtual addresses */
|
875
|
1195
|
call phys_to_prot
|
876
|
1196
|
|
|
1197
|
+.if64 ; /* Switch to long mode and restore registers, if applicable */
|
|
1198
|
+ call prot_to_long
|
|
1199
|
+ .code64
|
|
1200
|
+ call long_restore_regs
|
|
1201
|
+.endif
|
877
|
1202
|
/* Return and discard function parameters */
|
878
|
1203
|
ret $( PHC_OFFSET_END - PHC_OFFSET_PARAMS )
|
879
|
1204
|
|
|
@@ -900,15 +1225,15 @@ flatten_real_mode:
|
900
|
1225
|
ret
|
901
|
1226
|
|
902
|
1227
|
.section ".text.flatten_dummy", "ax", @progbits
|
903
|
|
- .code32
|
|
1228
|
+ .CODE_DEFAULT
|
904
|
1229
|
flatten_dummy:
|
905
|
1230
|
ret
|
906
|
1231
|
|
907
|
1232
|
/****************************************************************************
|
908
|
1233
|
* Interrupt wrapper
|
909
|
1234
|
*
|
910
|
|
- * Used by the protected-mode interrupt vectors to call the
|
911
|
|
- * interrupt() function.
|
|
1235
|
+ * Used by the protected-mode and long-mode interrupt vectors to call
|
|
1236
|
+ * the interrupt() function.
|
912
|
1237
|
*
|
913
|
1238
|
* May be entered with either physical or virtual stack segment.
|
914
|
1239
|
****************************************************************************
|
|
@@ -917,6 +1242,24 @@ flatten_dummy:
|
917
|
1242
|
.code32
|
918
|
1243
|
.globl interrupt_wrapper
|
919
|
1244
|
interrupt_wrapper:
|
|
1245
|
+ /* Preserve registers (excluding already-saved %eax and
|
|
1246
|
+ * otherwise unused registers which are callee-save for both
|
|
1247
|
+ * 32-bit and 64-bit ABIs).
|
|
1248
|
+ */
|
|
1249
|
+ pushl %ebx
|
|
1250
|
+ pushl %ecx
|
|
1251
|
+ pushl %edx
|
|
1252
|
+ pushl %esi
|
|
1253
|
+ pushl %edi
|
|
1254
|
+
|
|
1255
|
+ /* Expand IRQ number to whole %eax register */
|
|
1256
|
+ movzbl %al, %eax
|
|
1257
|
+
|
|
1258
|
+.if64 ; /* Skip transition to long mode, if applicable */
|
|
1259
|
+ movw %cs, %bx
|
|
1260
|
+ cmpw $LONG_CS, %bx
|
|
1261
|
+ je 1f
|
|
1262
|
+.endif
|
920
|
1263
|
/* Preserve segment registers and original %esp */
|
921
|
1264
|
pushl %ds
|
922
|
1265
|
pushl %es
|
|
@@ -927,14 +1270,39 @@ interrupt_wrapper:
|
927
|
1270
|
|
928
|
1271
|
/* Switch to virtual addressing */
|
929
|
1272
|
call intr_to_prot
|
930
|
|
-
|
931
|
|
- /* Expand IRQ number to whole %eax register */
|
932
|
|
- movzbl %al, %eax
|
933
|
|
-
|
|
1273
|
+.if64
|
|
1274
|
+ /* Switch to long mode */
|
|
1275
|
+ call prot_to_long
|
|
1276
|
+ .code64
|
|
1277
|
+
|
|
1278
|
+1: /* Preserve long-mode caller-save registers */
|
|
1279
|
+ pushq %r8
|
|
1280
|
+ pushq %r9
|
|
1281
|
+ pushq %r10
|
|
1282
|
+ pushq %r11
|
|
1283
|
+
|
|
1284
|
+ /* Expand IRQ number to whole %rdi register */
|
|
1285
|
+ movl %eax, %edi
|
|
1286
|
+.endif
|
934
|
1287
|
/* Call interrupt handler */
|
935
|
1288
|
call interrupt
|
|
1289
|
+.if64
|
|
1290
|
+ /* Restore long-mode caller-save registers */
|
|
1291
|
+ popq %r11
|
|
1292
|
+ popq %r10
|
|
1293
|
+ popq %r9
|
|
1294
|
+ popq %r8
|
|
1295
|
+
|
|
1296
|
+ /* Skip transition back to protected mode, if applicable */
|
|
1297
|
+ cmpw $LONG_CS, %bx
|
|
1298
|
+ je 1f
|
936
|
1299
|
|
937
|
|
- /* Restore original stack and segment registers */
|
|
1300
|
+ /* Switch to protected mode */
|
|
1301
|
+ call long_to_prot
|
|
1302
|
+ .code32
|
|
1303
|
+ cmpw $LONG_CS, %bx
|
|
1304
|
+.endif
|
|
1305
|
+ /* Restore segment registers and original %esp */
|
938
|
1306
|
lss (%esp), %esp
|
939
|
1307
|
popl %ss
|
940
|
1308
|
popl %gs
|
|
@@ -942,9 +1310,17 @@ interrupt_wrapper:
|
942
|
1310
|
popl %es
|
943
|
1311
|
popl %ds
|
944
|
1312
|
|
945
|
|
- /* Restore registers and return */
|
946
|
|
- popal
|
947
|
|
- iret
|
|
1313
|
+1: /* Restore registers */
|
|
1314
|
+ popl %edi
|
|
1315
|
+ popl %esi
|
|
1316
|
+ popl %edx
|
|
1317
|
+ popl %ecx
|
|
1318
|
+ popl %ebx
|
|
1319
|
+ popl %eax
|
|
1320
|
+
|
|
1321
|
+ /* Return from interrupt (with REX prefix if required) */
|
|
1322
|
+.if64 ; jne 1f ; .byte 0x48 ; .endif
|
|
1323
|
+1: iret
|
948
|
1324
|
|
949
|
1325
|
/****************************************************************************
|
950
|
1326
|
* Page tables
|
|
@@ -1022,7 +1398,7 @@ pde_low:
|
1022
|
1398
|
pte_textdata:
|
1023
|
1399
|
/* Allocated by linker script; must be at the end of .textdata */
|
1024
|
1400
|
|
1025
|
|
- .section ".bss16.pml4", "aw", @nobits
|
|
1401
|
+ .section ".bss.pml4", "aw", @nobits
|
1026
|
1402
|
pml4: .long 0
|
1027
|
1403
|
|
1028
|
1404
|
/****************************************************************************
|
|
@@ -1080,9 +1456,7 @@ init_pages:
|
1080
|
1456
|
|
1081
|
1457
|
/* Record PML4 physical address */
|
1082
|
1458
|
leal VIRTUAL(pml4e)(%edi), %eax
|
1083
|
|
- movl VIRTUAL(data16), %ebx
|
1084
|
|
- subl %edi, %ebx
|
1085
|
|
- movl %eax, pml4(%ebx)
|
|
1459
|
+ movl %eax, VIRTUAL(pml4)
|
1086
|
1460
|
|
1087
|
1461
|
/* Return */
|
1088
|
1462
|
ret
|