You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

librm.S 42KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621
  1. /*
  2. * librm: a library for interfacing to real-mode code
  3. *
  4. * Michael Brown <mbrown@fensystems.co.uk>
  5. *
  6. */
  7. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
  8. /* Drag in general configuration */
  9. #include <config/general.h>
  10. /* Drag in local definitions */
  11. #include "librm.h"
  12. /* CR0: protection enabled */
  13. #define CR0_PE ( 1 << 0 )
  14. /* CR0: paging */
  15. #define CR0_PG ( 1 << 31 )
  16. /* CR4: physical address extensions */
  17. #define CR4_PAE ( 1 << 5 )
  18. /* Extended feature enable MSR (EFER) */
  19. #define MSR_EFER 0xc0000080
  20. /* EFER: long mode enable */
  21. #define EFER_LME ( 1 << 8 )
  22. /* Page: present */
  23. #define PG_P 0x01
  24. /* Page: read/write */
  25. #define PG_RW 0x02
  26. /* Page: user/supervisor */
  27. #define PG_US 0x04
  28. /* Page: page size */
  29. #define PG_PS 0x80
  30. /* Size of various paging-related data structures */
  31. #define SIZEOF_PTE_LOG2 3
  32. #define SIZEOF_PTE ( 1 << SIZEOF_PTE_LOG2 )
  33. #define SIZEOF_PT_LOG2 12
  34. #define SIZEOF_PT ( 1 << SIZEOF_PT_LOG2 )
  35. #define SIZEOF_4KB_PAGE_LOG2 12
  36. #define SIZEOF_4KB_PAGE ( 1 << SIZEOF_4KB_PAGE_LOG2 )
  37. #define SIZEOF_2MB_PAGE_LOG2 21
  38. #define SIZEOF_2MB_PAGE ( 1 << SIZEOF_2MB_PAGE_LOG2 )
  39. #define SIZEOF_LOW_4GB_LOG2 32
  40. #define SIZEOF_LOW_4GB ( 1 << SIZEOF_LOW_4GB_LOG2 )
  41. /* Size of various C data structures */
  42. #define SIZEOF_I386_SEG_REGS 12
  43. #define SIZEOF_I386_REGS 32
  44. #define SIZEOF_REAL_MODE_REGS ( SIZEOF_I386_SEG_REGS + SIZEOF_I386_REGS )
  45. #define SIZEOF_I386_FLAGS 4
  46. #define SIZEOF_I386_ALL_REGS ( SIZEOF_REAL_MODE_REGS + SIZEOF_I386_FLAGS )
  47. #define SIZEOF_X86_64_REGS 128
  48. /* Size of an address */
  49. #ifdef __x86_64__
  50. #define SIZEOF_ADDR 8
  51. #else
  52. #define SIZEOF_ADDR 4
  53. #endif
  54. /* Default code size */
  55. #ifdef __x86_64__
  56. #define CODE_DEFAULT code64
  57. #else
  58. #define CODE_DEFAULT code32
  59. #endif
  60. /* Selectively assemble code for 32-bit/64-bit builds */
  61. #ifdef __x86_64__
  62. #define if32 if 0
  63. #define if64 if 1
  64. #else
  65. #define if32 if 1
  66. #define if64 if 0
  67. #endif
  68. /****************************************************************************
  69. * Global descriptor table
  70. *
  71. * Call init_librm to set up the GDT before attempting to use any
  72. * protected-mode code.
  73. *
  74. * NOTE: This must be located before prot_to_real, otherwise gas
  75. * throws a "can't handle non absolute segment in `ljmp'" error due to
  76. * not knowing the value of REAL_CS when the ljmp is encountered.
  77. *
  78. * Note also that putting ".word gdt_end - gdt - 1" directly into
  79. * gdt_limit, rather than going via gdt_length, will also produce the
  80. * "non absolute segment" error. This is most probably a bug in gas.
  81. ****************************************************************************
  82. */
  83. .section ".data16.gdt", "aw", @progbits
  84. .align 16
  85. gdt:
  86. gdtr: /* The first GDT entry is unused, the GDTR can fit here. */
  87. gdt_limit: .word gdt_length - 1
  88. gdt_base: .long 0
  89. .word 0 /* padding */
  90. .org gdt + VIRTUAL_CS, 0
  91. virtual_cs: /* 32 bit protected mode code segment, virtual addresses */
  92. .word 0xffff, 0
  93. .byte 0, 0x9f, 0xcf, 0
  94. .org gdt + VIRTUAL_DS, 0
  95. virtual_ds: /* 32 bit protected mode data segment, virtual addresses */
  96. .word 0xffff, 0
  97. .byte 0, 0x93, 0xcf, 0
  98. .org gdt + PHYSICAL_CS, 0
  99. physical_cs: /* 32 bit protected mode code segment, physical addresses */
  100. .word 0xffff, 0
  101. .byte 0, 0x9f, 0xcf, 0
  102. .org gdt + PHYSICAL_DS, 0
  103. physical_ds: /* 32 bit protected mode data segment, physical addresses */
  104. .word 0xffff, 0
  105. .byte 0, 0x93, 0xcf, 0
  106. .org gdt + REAL_CS, 0
  107. real_cs: /* 16 bit real mode code segment */
  108. .word 0xffff, 0
  109. .byte 0, 0x9b, 0x00, 0
  110. .org gdt + REAL_DS, 0
  111. real_ds: /* 16 bit real mode data segment */
  112. .word 0xffff, 0
  113. .byte 0, 0x93, 0x00, 0
  114. .org gdt + P2R_DS, 0
  115. p2r_ds: /* 16 bit real mode data segment for prot_to_real transition */
  116. .word 0xffff, ( P2R_DS << 4 )
  117. .byte 0, 0x93, 0x00, 0
  118. .org gdt + LONG_CS, 0
  119. long_cs: /* 64 bit long mode code segment */
  120. .word 0, 0
  121. .byte 0, 0x9a, 0x20, 0
  122. gdt_end:
  123. .equ gdt_length, gdt_end - gdt
  124. /****************************************************************************
  125. * Stored real-mode and protected-mode stack pointers
  126. *
  127. * The real-mode stack pointer is stored here whenever real_to_prot
  128. * is called and restored whenever prot_to_real is called. The
  129. * converse happens for the protected-mode stack pointer.
  130. *
  131. * Despite initial appearances this scheme is, in fact re-entrant,
  132. * because program flow dictates that we always return via the point
  133. * we left by. For example:
  134. * PXE API call entry
  135. * 1 real => prot
  136. * ...
  137. * Print a text string
  138. * ...
  139. * 2 prot => real
  140. * INT 10
  141. * 3 real => prot
  142. * ...
  143. * ...
  144. * 4 prot => real
  145. * PXE API call exit
  146. *
  147. * At point 1, the RM mode stack value, say RPXE, is stored in
  148. * rm_ss,sp. We want this value to still be present in rm_ss,sp when
  149. * we reach point 4.
  150. *
  151. * At point 2, the RM stack value is restored from RPXE. At point 3,
  152. * the RM stack value is again stored in rm_ss,sp. This *does*
  153. * overwrite the RPXE that we have stored there, but it's the same
  154. * value, since the code between points 2 and 3 has managed to return
  155. * to us.
  156. ****************************************************************************
  157. */
  158. .section ".bss.rm_ss_sp", "aw", @nobits
  159. .globl rm_sp
  160. rm_sp: .word 0
  161. .globl rm_ss
  162. rm_ss: .word 0
  163. .section ".data.pm_esp", "aw", @progbits
  164. pm_esp: .long VIRTUAL(_estack)
  165. /****************************************************************************
  166. * Temporary static data buffer
  167. *
  168. * This is used to reduce the amount of real-mode stack space consumed
  169. * during mode transitions, since we are sometimes called with very
  170. * little real-mode stack space available.
  171. ****************************************************************************
  172. */
  173. /* Temporary static buffer usage by virt_call */
  174. .struct 0
  175. VC_TMP_GDT: .space 6
  176. VC_TMP_IDT: .space 6
  177. VC_TMP_PAD: .space 4 /* for alignment */
  178. .if64
  179. VC_TMP_CR3: .space 4
  180. VC_TMP_CR4: .space 4
  181. VC_TMP_EMER: .space 8
  182. .endif
  183. #ifdef TIVOLI_VMM_WORKAROUND
  184. VC_TMP_FXSAVE: .space 512
  185. #endif
  186. VC_TMP_END:
  187. .previous
  188. /* Temporary static buffer usage by real_call */
  189. .struct 0
  190. RC_TMP_FUNCTION: .space 4
  191. RC_TMP_END:
  192. .previous
  193. /* Shared temporary static buffer */
  194. .section ".bss16.rm_tmpbuf", "aw", @nobits
  195. .align 16
  196. rm_tmpbuf:
  197. .space VC_TMP_END
  198. .size rm_tmpbuf, . - rm_tmpbuf
  199. /****************************************************************************
  200. * Virtual address offsets
  201. *
  202. * These are used by the protected-mode code to map between virtual
  203. * and physical addresses, and to access variables in the .text16 or
  204. * .data16 segments.
  205. ****************************************************************************
  206. */
  207. .struct 0
  208. VA_VIRT_OFFSET: .space SIZEOF_ADDR
  209. VA_TEXT16: .space SIZEOF_ADDR
  210. VA_DATA16: .space SIZEOF_ADDR
  211. VA_SIZE:
  212. .previous
  213. /* Internal copies, used only by librm itself */
  214. .section ".bss16.rm_virt_addrs", "aw", @nobits
  215. rm_virt_addrs: .space VA_SIZE
  216. .equ rm_virt_offset, ( rm_virt_addrs + VA_VIRT_OFFSET )
  217. .equ rm_text16, ( rm_virt_addrs + VA_TEXT16 )
  218. .equ rm_data16, ( rm_virt_addrs + VA_DATA16 )
  219. /* Externally visible variables, used by C code */
  220. .section ".bss.virt_addrs", "aw", @nobits
  221. virt_addrs: .space VA_SIZE
  222. .globl virt_offset
  223. .equ virt_offset, ( virt_addrs + VA_VIRT_OFFSET )
  224. .globl text16
  225. .equ text16, ( virt_addrs + VA_TEXT16 )
  226. .globl data16
  227. .equ data16, ( virt_addrs + VA_DATA16 )
  228. /****************************************************************************
  229. * init_librm (real-mode far call, 16-bit real-mode far return address)
  230. *
  231. * Initialise the GDT ready for transitions to protected mode.
  232. *
  233. * Parameters:
  234. * %cs : .text16 segment
  235. * %ds : .data16 segment
  236. * %edi : Physical base of protected-mode code
  237. ****************************************************************************
  238. */
  239. .section ".text16.init_librm", "ax", @progbits
  240. .code16
  241. .globl init_librm
  242. init_librm:
  243. /* Preserve registers */
  244. pushl %eax
  245. pushl %ebx
  246. pushl %edi
  247. /* Store rm_virt_offset and set up virtual_cs and virtual_ds segments */
  248. subl $VIRTUAL(_textdata), %edi
  249. movl %edi, rm_virt_offset
  250. .if64 ; setae (rm_virt_offset+4) ; .endif
  251. movl %edi, %eax
  252. movw $virtual_cs, %bx
  253. call set_seg_base
  254. movw $virtual_ds, %bx
  255. call set_seg_base
  256. /* Store rm_cs and rm_text16, set up real_cs segment */
  257. xorl %eax, %eax
  258. movw %cs, %ax
  259. movw %ax, %cs:rm_cs
  260. shll $4, %eax
  261. movw $real_cs, %bx
  262. call set_seg_base
  263. .if32 ; subl %edi, %eax ; .endif
  264. movl %eax, rm_text16
  265. /* Store rm_ds and rm_data16, set up real_ds segment and GDT base */
  266. xorl %eax, %eax
  267. movw %ds, %ax
  268. movw %ax, %cs:rm_ds
  269. shll $4, %eax
  270. movw $real_ds, %bx
  271. call set_seg_base
  272. movl %eax, gdt_base
  273. addl $gdt, gdt_base
  274. .if32 ; subl %edi, %eax ; .endif
  275. movl %eax, rm_data16
  276. /* Configure virt_call for protected mode, if applicable */
  277. .if64 ; movl $VIRTUAL(vc_pmode), %cs:vc_jmp_offset ; .endif
  278. /* Switch to protected mode */
  279. virtcall init_librm_pmode
  280. .section ".text.init_librm", "ax", @progbits
  281. .code32
  282. init_librm_pmode:
  283. /* Store virt_offset, text16, and data16 */
  284. pushw %ds
  285. movw $REAL_DS, %ax
  286. movw %ax, %ds
  287. movl $rm_virt_addrs, %esi
  288. movl $VIRTUAL(virt_addrs), %edi
  289. movl $( VA_SIZE / 4 ), %ecx
  290. rep movsl
  291. popw %ds
  292. .if64 ; /* Initialise long mode, if applicable */
  293. movl VIRTUAL(virt_offset), %edi
  294. leal VIRTUAL(p2l_ljmp_target)(%edi), %eax
  295. movl %eax, VIRTUAL(p2l_ljmp_offset)
  296. call init_pages
  297. .endif
  298. /* Return to real mode */
  299. ret
  300. .section ".text16.init_librm", "ax", @progbits
  301. .code16
  302. init_librm_rmode:
  303. /* Configure virt_call for long mode, if applicable */
  304. .if64 ; movl $VIRTUAL(vc_lmode), %cs:vc_jmp_offset ; .endif
  305. /* Initialise IDT */
  306. virtcall init_idt
  307. /* Restore registers */
  308. popl %edi
  309. popl %ebx
  310. popl %eax
  311. lret
  312. .section ".text16.set_seg_base", "ax", @progbits
  313. .code16
  314. set_seg_base:
  315. 1: movw %ax, 2(%bx)
  316. rorl $16, %eax
  317. movb %al, 4(%bx)
  318. movb %ah, 7(%bx)
  319. roll $16, %eax
  320. ret
  321. /****************************************************************************
  322. * real_to_prot (real-mode near call, 32-bit virtual return address)
  323. *
  324. * Switch from 16-bit real-mode to 32-bit protected mode with virtual
  325. * addresses. The real-mode %ss:sp is stored in rm_ss and rm_sp, and
  326. * the protected-mode %esp is restored from the saved pm_esp.
  327. * Interrupts are disabled. All other registers may be destroyed.
  328. *
  329. * The return address for this function should be a 32-bit virtual
  330. * address.
  331. *
  332. * Parameters:
  333. * %ecx : number of bytes to move from RM stack to PM stack
  334. * %edx : number of bytes to copy from RM temporary buffer to PM stack
  335. *
  336. ****************************************************************************
  337. */
  338. .section ".text16.real_to_prot", "ax", @progbits
  339. .code16
  340. real_to_prot:
  341. /* Enable A20 line */
  342. call enable_a20
  343. /* A failure at this point is fatal, and there's nothing we
  344. * can do about it other than lock the machine to make the
  345. * problem immediately visible.
  346. */
  347. 1: jc 1b
  348. /* Make sure we have our data segment available */
  349. movw %cs:rm_ds, %ds
  350. /* Add protected-mode return address to length of data to be copied */
  351. addw $4, %cx /* %ecx must be less than 64kB anyway */
  352. /* Real-mode %ss:%sp => %ebp and virtual address => %esi */
  353. xorl %eax, %eax
  354. movw %ss, %ax
  355. shll $4, %eax
  356. movzwl %sp, %ebp
  357. addr32 leal (%eax,%ebp), %esi
  358. subl rm_virt_offset, %esi
  359. shll $12, %eax
  360. orl %eax, %ebp
  361. /* Real-mode data segment virtual address => %ebx */
  362. movl rm_data16, %ebx
  363. .if64 ; subl rm_virt_offset, %ebx ; .endif
  364. /* Load protected-mode global descriptor table */
  365. data32 lgdt gdtr
  366. /* Zero segment registers. This wastes around 12 cycles on
  367. * real hardware, but saves a substantial number of emulated
  368. * instructions under KVM.
  369. */
  370. xorw %ax, %ax
  371. movw %ax, %ds
  372. movw %ax, %es
  373. movw %ax, %fs
  374. movw %ax, %gs
  375. movw %ax, %ss
  376. /* Switch to protected mode (with paging disabled if applicable) */
  377. cli
  378. movl %cr0, %eax
  379. .if64 ; andl $~CR0_PG, %eax ; .endif
  380. orb $CR0_PE, %al
  381. movl %eax, %cr0
  382. data32 ljmp $VIRTUAL_CS, $VIRTUAL(r2p_pmode)
  383. .section ".text.real_to_prot", "ax", @progbits
  384. .code32
  385. r2p_pmode:
  386. /* Set up protected-mode data segments and stack pointer */
  387. movw $VIRTUAL_DS, %ax
  388. movw %ax, %ds
  389. movw %ax, %es
  390. movw %ax, %fs
  391. movw %ax, %gs
  392. movw %ax, %ss
  393. movl VIRTUAL(pm_esp), %esp
  394. /* Load protected-mode interrupt descriptor table */
  395. lidt VIRTUAL(idtr32)
  396. /* Record real-mode %ss:sp (after removal of data) */
  397. addl %ecx, %ebp
  398. movl %ebp, VIRTUAL(rm_sp)
  399. /* Move data from RM stack to PM stack */
  400. subl %edx, %esp
  401. subl %ecx, %esp
  402. movl %esp, %edi
  403. rep movsb
  404. /* Copy data from RM temporary buffer to PM stack */
  405. leal rm_tmpbuf(%ebx), %esi
  406. movl %edx, %ecx
  407. rep movsb
  408. /* Return to virtual address */
  409. ret
  410. /****************************************************************************
  411. * prot_to_real (protected-mode near call, 32-bit real-mode return address)
  412. *
  413. * Switch from 32-bit protected mode with virtual addresses to 16-bit
  414. * real mode. The protected-mode %esp is stored in pm_esp and the
  415. * real-mode %ss:sp is restored from the saved rm_ss and rm_sp. The
  416. * high word of the real-mode %esp is set to zero. All real-mode data
  417. * segment registers are loaded from the saved rm_ds. Interrupts are
  418. * *not* enabled, since we want to be able to use prot_to_real in an
  419. * ISR. All other registers may be destroyed.
  420. *
  421. * The return address for this function should be a 32-bit (sic)
  422. * real-mode offset within .code16.
  423. *
  424. * Parameters:
  425. * %ecx : number of bytes to move from PM stack to RM stack
  426. * %edx : number of bytes to move from PM stack to RM temporary buffer
  427. * %esi : real-mode global and interrupt descriptor table registers
  428. *
  429. ****************************************************************************
  430. */
  431. .section ".text.prot_to_real", "ax", @progbits
  432. .code32
  433. prot_to_real:
  434. /* Copy real-mode global descriptor table register to RM code segment */
  435. movl VIRTUAL(text16), %edi
  436. .if64 ; subl VIRTUAL(virt_offset), %edi ; .endif
  437. leal rm_gdtr(%edi), %edi
  438. movsw
  439. movsl
  440. /* Load real-mode interrupt descriptor table register */
  441. lidt (%esi)
  442. /* Add return address to data to be moved to RM stack */
  443. addl $4, %ecx
  444. /* Real-mode %ss:sp => %ebp and virtual address => %edi */
  445. movl VIRTUAL(rm_sp), %ebp
  446. subl %ecx, %ebp
  447. movzwl VIRTUAL(rm_ss), %eax
  448. shll $4, %eax
  449. movzwl %bp, %edi
  450. addl %eax, %edi
  451. subl VIRTUAL(virt_offset), %edi
  452. /* Move data from PM stack to RM stack */
  453. movl %esp, %esi
  454. rep movsb
  455. /* Move data from PM stack to RM temporary buffer */
  456. movl VIRTUAL(data16), %edi
  457. .if64 ; subl VIRTUAL(virt_offset), %edi ; .endif
  458. addl $rm_tmpbuf, %edi
  459. movl %edx, %ecx
  460. rep movsb
  461. /* Record protected-mode %esp (after removal of data) */
  462. movl %esi, VIRTUAL(pm_esp)
  463. /* Load real-mode segment limits */
  464. movw $P2R_DS, %ax
  465. movw %ax, %ds
  466. movw %ax, %es
  467. movw %ax, %fs
  468. movw %ax, %gs
  469. movw %ax, %ss
  470. ljmp $REAL_CS, $p2r_rmode
  471. .section ".text16.prot_to_real", "ax", @progbits
  472. .code16
  473. p2r_rmode:
  474. /* Load real-mode GDT */
  475. data32 lgdt %cs:rm_gdtr
  476. /* Switch to real mode */
  477. movl %cr0, %eax
  478. andb $0!CR0_PE, %al
  479. movl %eax, %cr0
  480. p2r_ljmp_rm_cs:
  481. ljmp $0, $1f
  482. 1:
  483. /* Set up real-mode data segments and stack pointer */
  484. movw %cs:rm_ds, %ax
  485. movw %ax, %ds
  486. movw %ax, %es
  487. movw %ax, %fs
  488. movw %ax, %gs
  489. movl %ebp, %eax
  490. shrl $16, %eax
  491. movw %ax, %ss
  492. movzwl %bp, %esp
  493. /* Return to real-mode address */
  494. data32 ret
  495. /* Real-mode code and data segments. Assigned by the call to
  496. * init_librm. rm_cs doubles as the segment part of the jump
  497. * instruction used by prot_to_real. Both are located in
  498. * .text16 rather than .data16: rm_cs since it forms part of
  499. * the jump instruction within the code segment, and rm_ds
  500. * since real-mode code needs to be able to locate the data
  501. * segment with no other reference available.
  502. */
  503. .globl rm_cs
  504. .equ rm_cs, ( p2r_ljmp_rm_cs + 3 )
  505. .section ".text16.data.rm_ds", "aw", @progbits
  506. .globl rm_ds
  507. rm_ds: .word 0
  508. /* Real-mode global and interrupt descriptor table registers */
  509. .section ".text16.data.rm_gdtr", "aw", @progbits
  510. rm_gdtr:
  511. .word 0 /* Limit */
  512. .long 0 /* Base */
  513. /****************************************************************************
  514. * phys_to_prot (protected-mode near call, 32-bit physical return address)
  515. *
  516. * Switch from 32-bit protected mode with physical addresses to 32-bit
  517. * protected mode with virtual addresses. %esp is adjusted to a
  518. * virtual address. All other registers are preserved.
  519. *
  520. * The return address for this function should be a 32-bit physical
  521. * (sic) address.
  522. *
  523. ****************************************************************************
  524. */
  525. .section ".text.phys_to_prot", "ax", @progbits
  526. .code32
  527. .globl phys_to_prot
  528. phys_to_prot:
  529. /* Preserve registers */
  530. pushl %eax
  531. pushl %ebp
  532. /* Switch to virtual code segment */
  533. cli
  534. ljmp $VIRTUAL_CS, $VIRTUAL(1f)
  535. 1:
  536. /* Switch to virtual data segment and adjust %esp */
  537. movw $VIRTUAL_DS, %ax
  538. movw %ax, %ds
  539. movw %ax, %es
  540. movw %ax, %fs
  541. movw %ax, %gs
  542. movw %ax, %ss
  543. movl VIRTUAL(virt_offset), %ebp
  544. subl %ebp, %esp
  545. /* Adjust return address to a virtual address */
  546. subl %ebp, 8(%esp)
  547. /* Restore registers and return */
  548. popl %ebp
  549. popl %eax
  550. ret
  551. .if32 /* Expose as _phys_to_virt for use by COMBOOT, if applicable */
  552. .globl _phys_to_virt
  553. .equ _phys_to_virt, phys_to_prot
  554. .endif
  555. /****************************************************************************
  556. * prot_to_phys (protected-mode near call, 32-bit virtual return address)
  557. *
  558. * Switch from 32-bit protected mode with virtual addresses to 32-bit
  559. * protected mode with physical addresses. %esp is adjusted to a
  560. * physical address. All other registers are preserved.
  561. *
  562. * The return address for this function should be a 32-bit virtual
  563. * (sic) address.
  564. *
  565. ****************************************************************************
  566. */
  567. .section ".text.prot_to_phys", "ax", @progbits
  568. .code32
  569. prot_to_phys:
  570. /* Preserve registers */
  571. pushl %eax
  572. pushl %ebp
  573. /* Adjust return address to a physical address */
  574. movl VIRTUAL(virt_offset), %ebp
  575. addl %ebp, 8(%esp)
  576. /* Switch to physical code segment */
  577. cli
  578. pushl $PHYSICAL_CS
  579. leal VIRTUAL(1f)(%ebp), %eax
  580. pushl %eax
  581. lret
  582. 1:
  583. /* Switch to physical data segment and adjust %esp */
  584. movw $PHYSICAL_DS, %ax
  585. movw %ax, %ds
  586. movw %ax, %es
  587. movw %ax, %fs
  588. movw %ax, %gs
  589. movw %ax, %ss
  590. addl %ebp, %esp
  591. /* Restore registers and return */
  592. popl %ebp
  593. popl %eax
  594. ret
  595. .if32 /* Expose as _virt_to_phys for use by COMBOOT, if applicable */
  596. .globl _virt_to_phys
  597. .equ _virt_to_phys, prot_to_phys
  598. .endif
  599. /****************************************************************************
  600. * intr_to_prot (protected-mode near call, 32-bit virtual return address)
  601. *
  602. * Switch from 32-bit protected mode with a virtual code segment and
  603. * either a physical or virtual stack segment to 32-bit protected mode
  604. * with normal virtual addresses. %esp is adjusted if necessary to a
  605. * virtual address. All other registers are preserved.
  606. *
  607. * The return address for this function should be a 32-bit virtual
  608. * address.
  609. *
  610. ****************************************************************************
  611. */
  612. .section ".text.intr_to_prot", "ax", @progbits
  613. .code32
  614. .globl intr_to_prot
  615. intr_to_prot:
  616. /* Preserve registers */
  617. pushl %eax
  618. /* Check whether stack segment is physical or virtual */
  619. movw %ss, %ax
  620. cmpw $VIRTUAL_DS, %ax
  621. movw $VIRTUAL_DS, %ax
  622. /* Reload data segment registers */
  623. movw %ax, %ds
  624. movw %ax, %es
  625. movw %ax, %fs
  626. movw %ax, %gs
  627. /* Reload stack segment and adjust %esp if necessary */
  628. je 1f
  629. movw %ax, %ss
  630. subl VIRTUAL(virt_offset), %esp
  631. 1:
  632. /* Restore registers and return */
  633. popl %eax
  634. ret
  635. /* Expose as _intr_to_virt for use by GDB */
  636. .globl _intr_to_virt
  637. .equ _intr_to_virt, intr_to_prot
  638. /****************************************************************************
  639. * prot_to_long (protected-mode near call, 32-bit virtual return address)
  640. *
  641. * Switch from 32-bit protected mode with virtual addresses to 64-bit
  642. * long mode. The protected-mode %esp is adjusted to a physical
  643. * address. All other registers are preserved.
  644. *
  645. * The return address for this function should be a 32-bit (sic)
  646. * virtual address.
  647. *
  648. ****************************************************************************
  649. */
  650. .if64
  651. .section ".text.prot_to_long", "ax", @progbits
  652. .code32
  653. prot_to_long:
  654. /* Preserve registers */
  655. pushl %eax
  656. pushl %ecx
  657. pushl %edx
  658. /* Set up PML4 */
  659. movl VIRTUAL(pml4), %eax
  660. movl %eax, %cr3
  661. /* Enable PAE */
  662. movl %cr4, %eax
  663. orb $CR4_PAE, %al
  664. movl %eax, %cr4
  665. /* Enable long mode */
  666. movl $MSR_EFER, %ecx
  667. rdmsr
  668. orw $EFER_LME, %ax
  669. wrmsr
  670. /* Enable paging */
  671. movl %cr0, %eax
  672. orl $CR0_PG, %eax
  673. movl %eax, %cr0
  674. /* Restore registers */
  675. popl %edx
  676. popl %ecx
  677. popl %eax
  678. /* Construct 64-bit return address */
  679. pushl (%esp)
  680. movl $0xffffffff, 4(%esp)
  681. p2l_ljmp:
  682. /* Switch to long mode (using a physical %rip) */
  683. ljmp $LONG_CS, $0
  684. .code64
  685. p2l_lmode:
  686. /* Adjust and zero-extend %esp to a physical address */
  687. addl virt_offset, %esp
  688. /* Use long-mode IDT */
  689. lidt idtr64
  690. /* Return to virtual address */
  691. ret
  692. /* Long mode jump offset and target. Required since an ljmp
  693. * in protected mode will zero-extend the offset, and so
  694. * cannot reach an address within the negative 2GB as used by
  695. * -mcmodel=kernel. Assigned by the call to init_librm.
  696. */
  697. .equ p2l_ljmp_offset, ( p2l_ljmp + 1 )
  698. .equ p2l_ljmp_target, p2l_lmode
  699. .endif
  700. /****************************************************************************
  701. * long_to_prot (long-mode near call, 64-bit virtual return address)
  702. *
  703. * Switch from 64-bit long mode to 32-bit protected mode with virtual
  704. * addresses. The long-mode %rsp is adjusted to a virtual address.
  705. * All other registers are preserved.
  706. *
  707. * The return address for this function should be a 64-bit (sic)
  708. * virtual address.
  709. *
  710. ****************************************************************************
  711. */
  712. .if64
  713. .section ".text.long_to_prot", "ax", @progbits
  714. .code64
  715. long_to_prot:
  716. /* Switch to protected mode */
  717. ljmp *l2p_vector
  718. .code32
  719. l2p_pmode:
  720. /* Adjust %esp to a virtual address */
  721. subl VIRTUAL(virt_offset), %esp
  722. /* Preserve registers */
  723. pushl %eax
  724. pushl %ecx
  725. pushl %edx
  726. /* Disable paging */
  727. movl %cr0, %eax
  728. andl $~CR0_PG, %eax
  729. movl %eax, %cr0
  730. /* Disable PAE (in case external non-PAE-aware code enables paging) */
  731. movl %cr4, %eax
  732. andb $~CR4_PAE, %al
  733. movl %eax, %cr4
  734. /* Disable long mode */
  735. movl $MSR_EFER, %ecx
  736. rdmsr
  737. andw $~EFER_LME, %ax
  738. wrmsr
  739. /* Restore registers */
  740. popl %edx
  741. popl %ecx
  742. popl %eax
  743. /* Use protected-mode IDT */
  744. lidt VIRTUAL(idtr32)
  745. /* Return */
  746. ret $4
  747. /* Long mode jump vector. Required since there is no "ljmp
  748. * immediate" instruction in long mode.
  749. */
  750. .section ".data.l2p_vector", "aw", @progbits
  751. l2p_vector:
  752. .long VIRTUAL(l2p_pmode), VIRTUAL_CS
  753. .endif
  754. /****************************************************************************
  755. * long_save_regs (long-mode near call, 64-bit virtual return address)
  756. *
  757. * Preserve registers that are accessible only in long mode. This
  758. * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
  759. * %rsi, %rdi, and %rbp.
  760. *
  761. ****************************************************************************
  762. */
  763. .if64
  764. .section ".text.long_preserve_regs", "ax", @progbits
  765. .code64
  766. long_preserve_regs:
  767. /* Preserve registers */
  768. pushq %rax
  769. pushq %rcx
  770. pushq %rdx
  771. pushq %rbx
  772. pushq %rsp
  773. pushq %rbp
  774. pushq %rsi
  775. pushq %rdi
  776. pushq %r8
  777. pushq %r9
  778. pushq %r10
  779. pushq %r11
  780. pushq %r12
  781. pushq %r13
  782. pushq %r14
  783. pushq %r15
  784. /* Return */
  785. jmp *SIZEOF_X86_64_REGS(%rsp)
  786. .endif
  787. /****************************************************************************
  788. * long_restore_regs (long-mode near call, 64-bit virtual return address)
  789. *
  790. * Restore registers that are accessible only in long mode. This
  791. * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
  792. * %rsi, %rdi, and %rbp.
  793. *
  794. ****************************************************************************
  795. */
  796. .if64
  797. .section ".text.long_restore_regs", "ax", @progbits
  798. .code64
  799. long_restore_regs:
  800. /* Move return address above register dump */
  801. popq SIZEOF_X86_64_REGS(%rsp)
  802. /* Restore registers */
  803. popq %r15
  804. popq %r14
  805. popq %r13
  806. popq %r12
  807. popq %r11
  808. popq %r10
  809. popq %r9
  810. popq %r8
  811. movl %edi, (%rsp)
  812. popq %rdi
  813. movl %esi, (%rsp)
  814. popq %rsi
  815. movl %ebp, (%rsp)
  816. popq %rbp
  817. leaq 8(%rsp), %rsp /* discard */
  818. movl %ebx, (%rsp)
  819. popq %rbx
  820. movl %edx, (%rsp)
  821. popq %rdx
  822. movl %ecx, (%rsp)
  823. popq %rcx
  824. movl %eax, (%rsp)
  825. popq %rax
  826. /* Return */
  827. ret
  828. .endif
  829. /****************************************************************************
  830. * virt_call (real-mode near call, 16-bit real-mode near return address)
  831. *
  832. * Call a specific C function in 32-bit protected mode or 64-bit long
  833. * mode (as applicable). The prototype of the C function must be
  834. * void function ( struct i386_all_regs *ix86 );
  835. * ix86 will point to a struct containing the real-mode registers
  836. * at entry to virt_call().
  837. *
  838. * All registers will be preserved across virt_call(), unless the C
  839. * function explicitly overwrites values in ix86. Interrupt status
  840. * and GDT will also be preserved. Gate A20 will be enabled.
  841. *
  842. * Note that virt_call() does not rely on the real-mode stack
  843. * remaining intact in order to return, since everything relevant is
  844. * copied to the protected-mode stack for the duration of the call.
  845. * In particular, this means that a real-mode prefix can make a call
  846. * to main() which will return correctly even if the prefix's stack
  847. * gets vapourised during the Etherboot run. (The prefix cannot rely
  848. * on anything else on the stack being preserved, so should move any
  849. * critical data to registers before calling main()).
  850. *
  851. * Parameters:
  852. * function : 32-bit virtual address of function to call
  853. *
  854. * Example usage:
  855. * pushl $pxe_api_call
  856. * call virt_call
  857. * to call in to the C function
  858. * void pxe_api_call ( struct i386_all_regs *ix86 );
  859. ****************************************************************************
  860. */
  861. .struct 0
  862. VC_OFFSET_IX86: .space SIZEOF_I386_ALL_REGS
  863. VC_OFFSET_PADDING: .space 2 /* for alignment */
  864. VC_OFFSET_RETADDR: .space 2
  865. VC_OFFSET_PARAMS:
  866. VC_OFFSET_FUNCTION: .space 4
  867. VC_OFFSET_END:
  868. .previous
  869. .section ".text16.virt_call", "ax", @progbits
  870. .code16
  871. .globl virt_call
  872. virt_call:
  873. /* Preserve registers and flags on external RM stack */
  874. pushw %ss /* padding */
  875. pushfl
  876. pushal
  877. pushw %gs
  878. pushw %fs
  879. pushw %es
  880. pushw %ds
  881. pushw %ss
  882. pushw %cs
  883. /* Claim ownership of temporary static buffer */
  884. cli
  885. movw %cs:rm_ds, %ds
  886. #ifdef TIVOLI_VMM_WORKAROUND
  887. /* Preserve FPU, MMX and SSE state in temporary static buffer */
  888. fxsave ( rm_tmpbuf + VC_TMP_FXSAVE )
  889. #endif
  890. /* Preserve GDT and IDT in temporary static buffer */
  891. sidt ( rm_tmpbuf + VC_TMP_IDT )
  892. sgdt ( rm_tmpbuf + VC_TMP_GDT )
  893. .if64 ; /* Preserve control registers, if applicable */
  894. movl $MSR_EFER, %ecx
  895. rdmsr
  896. movl %eax, ( rm_tmpbuf + VC_TMP_EMER + 0 )
  897. movl %edx, ( rm_tmpbuf + VC_TMP_EMER + 4 )
  898. movl %cr4, %eax
  899. movl %eax, ( rm_tmpbuf + VC_TMP_CR4 )
  900. movl %cr3, %eax
  901. movl %eax, ( rm_tmpbuf + VC_TMP_CR3 )
  902. .endif
  903. /* For sanity's sake, clear the direction flag as soon as possible */
  904. cld
  905. /* Switch to protected mode and move register dump to PM stack */
  906. movl $VC_OFFSET_END, %ecx
  907. movl $VC_TMP_END, %edx
  908. pushl $VIRTUAL(vc_pmode)
  909. vc_jmp: jmp real_to_prot
  910. .section ".text.virt_call", "ax", @progbits
  911. .code32
  912. vc_pmode:
  913. /* Call function (in protected mode) */
  914. pushl %esp
  915. call *(VC_OFFSET_FUNCTION+4)(%esp)
  916. popl %eax /* discard */
  917. .if64 ; /* Switch to long mode */
  918. jmp 1f
  919. vc_lmode:
  920. call prot_to_long
  921. .code64
  922. /* Call function (in long mode) */
  923. movq %rsp, %rdi
  924. movslq VC_OFFSET_FUNCTION(%rsp), %rax
  925. callq *%rax
  926. /* Switch to protected mode */
  927. call long_to_prot
  928. 1: .code32
  929. .endif
  930. /* Switch to real mode and move register dump back to RM stack */
  931. movl $VC_OFFSET_END, %ecx
  932. movl $VC_TMP_END, %edx
  933. leal VC_TMP_GDT(%esp, %ecx), %esi
  934. pushl $vc_rmode
  935. jmp prot_to_real
  936. .section ".text16.virt_call", "ax", @progbits
  937. .code16
  938. vc_rmode:
  939. .if64 ; /* Restore control registers, if applicable */
  940. movw %sp, %bp
  941. movl ( rm_tmpbuf + VC_TMP_CR3 ), %eax
  942. movl %eax, %cr3
  943. movl ( rm_tmpbuf + VC_TMP_CR4 ), %eax
  944. movl %eax, %cr4
  945. movl ( rm_tmpbuf + VC_TMP_EMER + 0 ), %eax
  946. movl ( rm_tmpbuf + VC_TMP_EMER + 4 ), %edx
  947. movl $MSR_EFER, %ecx
  948. wrmsr
  949. .endif
  950. #ifdef TIVOLI_VMM_WORKAROUND
  951. /* Restore FPU, MMX and SSE state from temporary static buffer */
  952. fxrstor ( rm_tmpbuf + VC_TMP_FXSAVE )
  953. #endif
  954. /* Restore registers and flags and return */
  955. popl %eax /* skip %cs and %ss */
  956. popw %ds
  957. popw %es
  958. popw %fs
  959. popw %gs
  960. popal
  961. /* popal skips %esp. We therefore want to do "movl -20(%sp),
  962. * %esp", but -20(%sp) is not a valid 80386 expression.
  963. * Fortunately, prot_to_real() zeroes the high word of %esp, so
  964. * we can just use -20(%esp) instead.
  965. */
  966. addr32 movl -20(%esp), %esp
  967. popfl
  968. popw %ss /* padding */
  969. /* Return and discard function parameters */
  970. ret $( VC_OFFSET_END - VC_OFFSET_PARAMS )
  971. /* Protected-mode jump target */
  972. .equ vc_jmp_offset, ( vc_jmp - 4 )
  973. /****************************************************************************
  974. * real_call (protected-mode near call, 32-bit virtual return address)
  975. * real_call (long-mode near call, 64-bit virtual return address)
  976. *
  977. * Call a real-mode function from protected-mode or long-mode code.
  978. *
  979. * The non-segment register values will be passed directly to the
  980. * real-mode code. The segment registers will be set as per
  981. * prot_to_real. The non-segment register values set by the real-mode
  982. * function will be passed back to the protected-mode or long-mode
  983. * caller. A result of this is that this routine cannot be called
  984. * directly from C code, since it clobbers registers that the C ABI
  985. * expects the callee to preserve.
  986. *
  987. * librm.h defines a convenient macro REAL_CODE() for using real_call.
  988. * See librm.h and realmode.h for details and examples.
  989. *
  990. * Parameters:
  991. * function : offset within .text16 of real-mode function to call
  992. *
  993. * Returns: none
  994. ****************************************************************************
  995. */
  996. .struct 0
  997. RC_OFFSET_REGS: .space SIZEOF_I386_REGS
  998. RC_OFFSET_REGS_END:
  999. RC_OFFSET_FUNCTION_COPY:.space 4
  1000. .if64
  1001. RC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
  1002. RC_OFFSET_LREG_RETADDR: .space SIZEOF_ADDR
  1003. .endif
  1004. RC_OFFSET_RETADDR: .space SIZEOF_ADDR
  1005. RC_OFFSET_PARAMS:
  1006. RC_OFFSET_FUNCTION: .space SIZEOF_ADDR
  1007. RC_OFFSET_END:
  1008. .previous
  1009. .section ".text.real_call", "ax", @progbits
  1010. .CODE_DEFAULT
  1011. .globl real_call
  1012. real_call:
  1013. .if64 ; /* Preserve registers and switch to protected mode, if applicable */
  1014. call long_preserve_regs
  1015. call long_to_prot
  1016. .code32
  1017. .endif
  1018. /* Create register dump and function pointer copy on PM stack */
  1019. pushl ( RC_OFFSET_FUNCTION - RC_OFFSET_FUNCTION_COPY - 4 )(%esp)
  1020. pushal
  1021. /* Switch to real mode and move register dump to RM stack */
  1022. movl $RC_OFFSET_REGS_END, %ecx
  1023. movl $RC_TMP_END, %edx
  1024. pushl $rc_rmode
  1025. movl $VIRTUAL(rm_default_gdtr_idtr), %esi
  1026. jmp prot_to_real
  1027. .section ".text16.real_call", "ax", @progbits
  1028. .code16
  1029. rc_rmode:
  1030. /* Call real-mode function */
  1031. popal
  1032. call *( rm_tmpbuf + RC_TMP_FUNCTION )
  1033. pushal
  1034. /* For sanity's sake, clear the direction flag as soon as possible */
  1035. cld
  1036. /* Switch to protected mode and move register dump back to PM stack */
  1037. movl $RC_OFFSET_REGS_END, %ecx
  1038. xorl %edx, %edx
  1039. pushl $VIRTUAL(rc_pmode)
  1040. jmp real_to_prot
  1041. .section ".text.real_call", "ax", @progbits
  1042. .code32
  1043. rc_pmode:
  1044. /* Restore registers */
  1045. popal
  1046. .if64 ; /* Switch to long mode and restore registers, if applicable */
  1047. call prot_to_long
  1048. .code64
  1049. call long_restore_regs
  1050. .endif
  1051. /* Return and discard function parameters */
  1052. ret $( RC_OFFSET_END - RC_OFFSET_PARAMS )
  1053. /* Default real-mode global and interrupt descriptor table registers */
  1054. .section ".data.rm_default_gdtr_idtr", "aw", @progbits
  1055. rm_default_gdtr_idtr:
  1056. .word 0 /* Global descriptor table limit */
  1057. .long 0 /* Global descriptor table base */
  1058. .word 0x03ff /* Interrupt descriptor table limit */
  1059. .long 0 /* Interrupt descriptor table base */
  1060. /****************************************************************************
  1061. * phys_call (protected-mode near call, 32-bit virtual return address)
  1062. * phys_call (long-mode near call, 64-bit virtual return address)
  1063. *
  1064. * Call a function with flat 32-bit physical addressing
  1065. *
  1066. * The non-segment register values will be passed directly to the
  1067. * function. The segment registers will be set for flat 32-bit
  1068. * physical addressing. The non-segment register values set by the
  1069. * function will be passed back to the caller.
  1070. *
  1071. * librm.h defines a convenient macro PHYS_CODE() for using phys_call.
  1072. *
  1073. * Parameters:
  1074. * function : virtual (sic) address of function to call
  1075. *
  1076. ****************************************************************************
  1077. */
  1078. .struct 0
  1079. .if64
  1080. PHC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
  1081. PHC_OFFSET_LREG_RETADDR:.space SIZEOF_ADDR
  1082. .endif
  1083. PHC_OFFSET_RETADDR: .space SIZEOF_ADDR
  1084. PHC_OFFSET_PARAMS:
  1085. PHC_OFFSET_FUNCTION: .space SIZEOF_ADDR
  1086. PHC_OFFSET_END:
  1087. .previous
  1088. .section ".text.phys_call", "ax", @progbits
  1089. .CODE_DEFAULT
  1090. .globl phys_call
  1091. phys_call:
  1092. .if64 ; /* Preserve registers and switch to protected mode, if applicable */
  1093. call long_preserve_regs
  1094. call long_to_prot
  1095. .code32
  1096. .endif
  1097. /* Adjust function pointer to a physical address */
  1098. pushl %ebp
  1099. movl VIRTUAL(virt_offset), %ebp
  1100. addl %ebp, ( PHC_OFFSET_FUNCTION + 4 /* saved %ebp */ )(%esp)
  1101. popl %ebp
  1102. /* Switch to physical addresses */
  1103. call prot_to_phys
  1104. /* Call function */
  1105. call *PHC_OFFSET_FUNCTION(%esp)
  1106. /* For sanity's sake, clear the direction flag as soon as possible */
  1107. cld
  1108. /* Switch to virtual addresses */
  1109. call phys_to_prot
  1110. .if64 ; /* Switch to long mode and restore registers, if applicable */
  1111. call prot_to_long
  1112. .code64
  1113. call long_restore_regs
  1114. .endif
  1115. /* Return and discard function parameters */
  1116. ret $( PHC_OFFSET_END - PHC_OFFSET_PARAMS )
  1117. /****************************************************************************
  1118. * phys_to_long (protected-mode near call, 32-bit physical return address)
  1119. *
  1120. * Used by COMBOOT.
  1121. *
  1122. ****************************************************************************
  1123. */
  1124. .if64
  1125. .section ".text.phys_to_long", "ax", @progbits
  1126. .code32
  1127. phys_to_long:
  1128. /* Switch to virtual addresses */
  1129. call phys_to_prot
  1130. /* Convert to 32-bit virtual return address */
  1131. pushl %eax
  1132. movl VIRTUAL(virt_offset), %eax
  1133. subl %eax, 4(%esp)
  1134. popl %eax
  1135. /* Switch to long mode and return */
  1136. jmp prot_to_long
  1137. /* Expose as _phys_to_virt for use by COMBOOT */
  1138. .globl _phys_to_virt
  1139. .equ _phys_to_virt, phys_to_long
  1140. .endif
  1141. /****************************************************************************
  1142. * long_to_phys (long-mode near call, 64-bit virtual return address)
  1143. *
  1144. * Used by COMBOOT.
  1145. *
  1146. ****************************************************************************
  1147. */
  1148. .if64
  1149. .section ".text.long_to_phys", "ax", @progbits
  1150. .code64
  1151. long_to_phys:
  1152. /* Switch to protected mode */
  1153. call long_to_prot
  1154. .code32
  1155. /* Convert to 32-bit virtual return address */
  1156. popl (%esp)
  1157. /* Switch to physical addresses and return */
  1158. jmp prot_to_phys
  1159. /* Expose as _virt_to_phys for use by COMBOOT */
  1160. .globl _virt_to_phys
  1161. .equ _virt_to_phys, long_to_phys
  1162. .endif
  1163. /****************************************************************************
  1164. * flatten_real_mode (real-mode near call)
  1165. *
  1166. * Switch to flat real mode
  1167. *
  1168. ****************************************************************************
  1169. */
  1170. .section ".text16.flatten_real_mode", "ax", @progbits
  1171. .code16
  1172. .globl flatten_real_mode
  1173. flatten_real_mode:
  1174. /* Modify GDT to use flat real mode */
  1175. movb $0x8f, real_cs + 6
  1176. movb $0x8f, real_ds + 6
  1177. /* Call dummy protected-mode function */
  1178. virtcall flatten_dummy
  1179. /* Restore GDT */
  1180. movb $0x00, real_cs + 6
  1181. movb $0x00, real_ds + 6
  1182. /* Return */
  1183. ret
  1184. .section ".text.flatten_dummy", "ax", @progbits
  1185. .CODE_DEFAULT
  1186. flatten_dummy:
  1187. ret
  1188. /****************************************************************************
  1189. * Interrupt wrapper
  1190. *
  1191. * Used by the protected-mode and long-mode interrupt vectors to call
  1192. * the interrupt() function.
  1193. *
  1194. * May be entered with either physical or virtual stack segment.
  1195. ****************************************************************************
  1196. */
  1197. .section ".text.interrupt_wrapper", "ax", @progbits
  1198. .code32
  1199. .globl interrupt_wrapper
  1200. interrupt_wrapper:
  1201. /* Preserve registers (excluding already-saved %eax) */
  1202. pushl %ebx
  1203. pushl %ecx
  1204. pushl %edx
  1205. pushl %esi
  1206. pushl %edi
  1207. pushl %ebp
  1208. /* Expand IRQ number to whole %eax register */
  1209. movzbl %al, %eax
  1210. .if64 ; /* Skip transition to long mode, if applicable */
  1211. xorl %edx, %edx
  1212. movw %cs, %bx
  1213. cmpw $LONG_CS, %bx
  1214. je 1f
  1215. .endif
  1216. /* Preserve segment registers and original %esp */
  1217. pushl %ds
  1218. pushl %es
  1219. pushl %fs
  1220. pushl %gs
  1221. pushl %ss
  1222. pushl %esp
  1223. /* Switch to virtual addressing */
  1224. call intr_to_prot
  1225. /* Pass 32-bit interrupt frame pointer in %edx */
  1226. movl %esp, %edx
  1227. xorl %ecx, %ecx
  1228. .if64
  1229. /* Switch to long mode */
  1230. call prot_to_long
  1231. .code64
  1232. 1: /* Preserve long-mode registers */
  1233. pushq %r8
  1234. pushq %r9
  1235. pushq %r10
  1236. pushq %r11
  1237. pushq %r12
  1238. pushq %r13
  1239. pushq %r14
  1240. pushq %r15
  1241. /* Expand IRQ number to whole %rdi register */
  1242. movl %eax, %edi
  1243. /* Pass 32-bit interrupt frame pointer (if applicable) in %rsi */
  1244. testl %edx, %edx
  1245. je 1f
  1246. movl %edx, %esi
  1247. addl virt_offset, %esi
  1248. 1:
  1249. /* Pass 64-bit interrupt frame pointer in %rdx */
  1250. movq %rsp, %rdx
  1251. .endif
  1252. /* Call interrupt handler */
  1253. call interrupt
  1254. .if64
  1255. /* Restore long-mode registers */
  1256. popq %r15
  1257. popq %r14
  1258. popq %r13
  1259. popq %r12
  1260. popq %r11
  1261. popq %r10
  1262. popq %r9
  1263. popq %r8
  1264. /* Skip transition back to protected mode, if applicable */
  1265. cmpw $LONG_CS, %bx
  1266. je 1f
  1267. /* Switch to protected mode */
  1268. call long_to_prot
  1269. .code32
  1270. cmpw $LONG_CS, %bx
  1271. .endif
  1272. /* Restore segment registers and original %esp */
  1273. lss (%esp), %esp
  1274. popl %ss
  1275. popl %gs
  1276. popl %fs
  1277. popl %es
  1278. popl %ds
  1279. 1: /* Restore registers */
  1280. popl %ebp
  1281. popl %edi
  1282. popl %esi
  1283. popl %edx
  1284. popl %ecx
  1285. popl %ebx
  1286. popl %eax
  1287. /* Return from interrupt (with REX prefix if required) */
  1288. .if64 ; jne 1f ; .byte 0x48 ; .endif
  1289. 1: iret
  1290. /****************************************************************************
  1291. * Page tables
  1292. *
  1293. ****************************************************************************
  1294. */
  1295. .section ".pages", "aw", @nobits
  1296. .align SIZEOF_PT
  1297. /* Page map level 4 entries (PML4Es)
  1298. *
  1299. * This comprises
  1300. *
  1301. * - PML4E[0x000] covering [0x0000000000000000-0x0000007fffffffff]
  1302. * - PML4E[0x1ff] covering [0xffffff8000000000-0xffffffffffffffff]
  1303. *
  1304. * These point to the PDPT. This creates some aliased
  1305. * addresses within unused portions of the 64-bit address
  1306. * space, but allows us to use just a single PDPT.
  1307. *
  1308. * - PDE[...] covering arbitrary 2MB portions of I/O space
  1309. *
  1310. * These are 2MB pages created by ioremap() to cover I/O
  1311. * device addresses.
  1312. */
  1313. pml4e:
  1314. .space SIZEOF_PT
  1315. .size pml4e, . - pml4e
  1316. .globl io_pages
  1317. .equ io_pages, pml4e
  1318. /* Page directory pointer table entries (PDPTEs)
  1319. *
  1320. * This comprises:
  1321. *
  1322. * - PDPTE[0x000] covering [0x0000000000000000-0x000000003fffffff]
  1323. * - PDPTE[0x001] covering [0x0000000040000000-0x000000007fffffff]
  1324. * - PDPTE[0x002] covering [0x0000000080000000-0x00000000bfffffff]
  1325. * - PDPTE[0x003] covering [0x00000000c0000000-0x00000000ffffffff]
  1326. *
  1327. * These point to the appropriate page directories (in pde_low)
  1328. * used to identity-map the whole of the 32-bit address space.
  1329. *
  1330. * - PDPTE[0x004] covering [0x0000000100000000-0x000000013fffffff]
  1331. *
  1332. * This points back to the PML4, allowing the PML4 to be
  1333. * (ab)used to hold 2MB pages used for I/O device addresses.
  1334. *
  1335. * - PDPTE[0x1ff] covering [0xffffffffc0000000-0xffffffffffffffff]
  1336. *
  1337. * This points back to the PDPT itself, allowing the PDPT to be
  1338. * (ab)used to hold PDEs covering .textdata.
  1339. *
  1340. * - PDE[N-M] covering [_textdata,_end)
  1341. *
  1342. * These are used to point to the page tables (in pte_textdata)
  1343. * used to map our .textdata section. Note that each PDE
  1344. * covers 2MB, so we are likely to use only a single PDE in
  1345. * practice.
  1346. */
  1347. pdpte:
  1348. .space SIZEOF_PT
  1349. .size pdpte, . - pdpte
  1350. .equ pde_textdata, pdpte /* (ab)use */
  1351. /* Page directory entries (PDEs) for the low 4GB
  1352. *
  1353. * This comprises 2048 2MB pages to identity-map the whole of
  1354. * the 32-bit address space.
  1355. */
  1356. pde_low:
  1357. .equ PDE_LOW_PTES, ( SIZEOF_LOW_4GB / SIZEOF_2MB_PAGE )
  1358. .equ PDE_LOW_PTS, ( ( PDE_LOW_PTES * SIZEOF_PTE ) / SIZEOF_PT )
  1359. .space ( PDE_LOW_PTS * SIZEOF_PT )
  1360. .size pde_low, . - pde_low
  1361. /* Page table entries (PTEs) for .textdata
  1362. *
  1363. * This comprises enough 4kB pages to map the whole of
  1364. * .textdata. The required number of PTEs is calculated by
  1365. * the linker script.
  1366. *
  1367. * Note that these mappings do not cover the PTEs themselves.
  1368. * This does not matter, since code running with paging
  1369. * enabled never needs to access these PTEs.
  1370. */
  1371. pte_textdata:
  1372. /* Allocated by linker script; must be at the end of .textdata */
  1373. .section ".bss.pml4", "aw", @nobits
  1374. pml4: .long 0
  1375. /****************************************************************************
  1376. * init_pages (protected-mode near call)
  1377. *
  1378. * Initialise the page tables ready for long mode.
  1379. *
  1380. * Parameters:
  1381. * %edi : virt_offset
  1382. ****************************************************************************
  1383. */
  1384. .section ".text.init_pages", "ax", @progbits
  1385. .code32
  1386. init_pages:
  1387. /* Initialise PML4Es for low 4GB and negative 2GB */
  1388. leal ( VIRTUAL(pdpte) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1389. movl %eax, VIRTUAL(pml4e)
  1390. movl %eax, ( VIRTUAL(pml4e) + SIZEOF_PT - SIZEOF_PTE )
  1391. /* Initialise PDPTE for negative 1GB */
  1392. movl %eax, ( VIRTUAL(pdpte) + SIZEOF_PT - SIZEOF_PTE )
  1393. /* Initialise PDPTE for I/O space */
  1394. leal ( VIRTUAL(pml4e) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1395. movl %eax, ( VIRTUAL(pdpte) + ( PDE_LOW_PTS * SIZEOF_PTE ) )
  1396. /* Initialise PDPTEs for low 4GB */
  1397. movl $PDE_LOW_PTS, %ecx
  1398. leal ( VIRTUAL(pde_low) + ( PDE_LOW_PTS * SIZEOF_PT ) + \
  1399. ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1400. 1: subl $SIZEOF_PT, %eax
  1401. movl %eax, ( VIRTUAL(pdpte) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
  1402. loop 1b
  1403. /* Initialise PDEs for low 4GB */
  1404. movl $PDE_LOW_PTES, %ecx
  1405. leal ( 0 + ( PG_P | PG_RW | PG_US | PG_PS ) ), %eax
  1406. 1: subl $SIZEOF_2MB_PAGE, %eax
  1407. movl %eax, ( VIRTUAL(pde_low) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
  1408. loop 1b
  1409. /* Initialise PDEs for .textdata */
  1410. movl $_textdata_pdes, %ecx
  1411. leal ( VIRTUAL(_etextdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1412. movl $VIRTUAL(_textdata), %ebx
  1413. shrl $( SIZEOF_2MB_PAGE_LOG2 - SIZEOF_PTE_LOG2 ), %ebx
  1414. andl $( SIZEOF_PT - 1 ), %ebx
  1415. 1: subl $SIZEOF_PT, %eax
  1416. movl %eax, (VIRTUAL(pde_textdata) - SIZEOF_PTE)(%ebx,%ecx,SIZEOF_PTE)
  1417. loop 1b
  1418. /* Initialise PTEs for .textdata */
  1419. movl $_textdata_ptes, %ecx
  1420. leal ( VIRTUAL(_textdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1421. addl $_textdata_paged_len, %eax
  1422. 1: subl $SIZEOF_4KB_PAGE, %eax
  1423. movl %eax, ( VIRTUAL(pte_textdata) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
  1424. loop 1b
  1425. /* Record PML4 physical address */
  1426. leal VIRTUAL(pml4e)(%edi), %eax
  1427. movl %eax, VIRTUAL(pml4)
  1428. /* Return */
  1429. ret