You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

librm.S 41KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592
  1. /*
  2. * librm: a library for interfacing to real-mode code
  3. *
  4. * Michael Brown <mbrown@fensystems.co.uk>
  5. *
  6. */
  7. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
  8. /* Drag in local definitions */
  9. #include "librm.h"
  10. /* CR0: protection enabled */
  11. #define CR0_PE ( 1 << 0 )
  12. /* CR0: paging */
  13. #define CR0_PG ( 1 << 31 )
  14. /* CR4: physical address extensions */
  15. #define CR4_PAE ( 1 << 5 )
  16. /* Extended feature enable MSR (EFER) */
  17. #define MSR_EFER 0xc0000080
  18. /* EFER: long mode enable */
  19. #define EFER_LME ( 1 << 8 )
  20. /* Page: present */
  21. #define PG_P 0x01
  22. /* Page: read/write */
  23. #define PG_RW 0x02
  24. /* Page: user/supervisor */
  25. #define PG_US 0x04
  26. /* Page: page size */
  27. #define PG_PS 0x80
  28. /* Size of various paging-related data structures */
  29. #define SIZEOF_PTE_LOG2 3
  30. #define SIZEOF_PTE ( 1 << SIZEOF_PTE_LOG2 )
  31. #define SIZEOF_PT_LOG2 12
  32. #define SIZEOF_PT ( 1 << SIZEOF_PT_LOG2 )
  33. #define SIZEOF_4KB_PAGE_LOG2 12
  34. #define SIZEOF_4KB_PAGE ( 1 << SIZEOF_4KB_PAGE_LOG2 )
  35. #define SIZEOF_2MB_PAGE_LOG2 21
  36. #define SIZEOF_2MB_PAGE ( 1 << SIZEOF_2MB_PAGE_LOG2 )
  37. #define SIZEOF_LOW_4GB_LOG2 32
  38. #define SIZEOF_LOW_4GB ( 1 << SIZEOF_LOW_4GB_LOG2 )
  39. /* Size of various C data structures */
  40. #define SIZEOF_I386_SEG_REGS 12
  41. #define SIZEOF_I386_REGS 32
  42. #define SIZEOF_REAL_MODE_REGS ( SIZEOF_I386_SEG_REGS + SIZEOF_I386_REGS )
  43. #define SIZEOF_I386_FLAGS 4
  44. #define SIZEOF_I386_ALL_REGS ( SIZEOF_REAL_MODE_REGS + SIZEOF_I386_FLAGS )
  45. #define SIZEOF_X86_64_REGS 128
  46. /* Size of an address */
  47. #ifdef __x86_64__
  48. #define SIZEOF_ADDR 8
  49. #else
  50. #define SIZEOF_ADDR 4
  51. #endif
  52. /* Default code size */
  53. #ifdef __x86_64__
  54. #define CODE_DEFAULT code64
  55. #else
  56. #define CODE_DEFAULT code32
  57. #endif
  58. /* Selectively assemble code for 32-bit/64-bit builds */
  59. #ifdef __x86_64__
  60. #define if32 if 0
  61. #define if64 if 1
  62. #else
  63. #define if32 if 1
  64. #define if64 if 0
  65. #endif
  66. /****************************************************************************
  67. * Global descriptor table
  68. *
  69. * Call init_librm to set up the GDT before attempting to use any
  70. * protected-mode code.
  71. *
  72. * NOTE: This must be located before prot_to_real, otherwise gas
  73. * throws a "can't handle non absolute segment in `ljmp'" error due to
  74. * not knowing the value of REAL_CS when the ljmp is encountered.
  75. *
  76. * Note also that putting ".word gdt_end - gdt - 1" directly into
  77. * gdt_limit, rather than going via gdt_length, will also produce the
  78. * "non absolute segment" error. This is most probably a bug in gas.
  79. ****************************************************************************
  80. */
  81. .section ".data16.gdt", "aw", @progbits
  82. .align 16
  83. gdt:
  84. gdtr: /* The first GDT entry is unused, the GDTR can fit here. */
  85. gdt_limit: .word gdt_length - 1
  86. gdt_base: .long 0
  87. .word 0 /* padding */
  88. .org gdt + VIRTUAL_CS, 0
  89. virtual_cs: /* 32 bit protected mode code segment, virtual addresses */
  90. .word 0xffff, 0
  91. .byte 0, 0x9f, 0xcf, 0
  92. .org gdt + VIRTUAL_DS, 0
  93. virtual_ds: /* 32 bit protected mode data segment, virtual addresses */
  94. .word 0xffff, 0
  95. .byte 0, 0x93, 0xcf, 0
  96. .org gdt + PHYSICAL_CS, 0
  97. physical_cs: /* 32 bit protected mode code segment, physical addresses */
  98. .word 0xffff, 0
  99. .byte 0, 0x9f, 0xcf, 0
  100. .org gdt + PHYSICAL_DS, 0
  101. physical_ds: /* 32 bit protected mode data segment, physical addresses */
  102. .word 0xffff, 0
  103. .byte 0, 0x93, 0xcf, 0
  104. .org gdt + REAL_CS, 0
  105. real_cs: /* 16 bit real mode code segment */
  106. .word 0xffff, 0
  107. .byte 0, 0x9b, 0x00, 0
  108. .org gdt + REAL_DS, 0
  109. real_ds: /* 16 bit real mode data segment */
  110. .word 0xffff, 0
  111. .byte 0, 0x93, 0x00, 0
  112. .org gdt + P2R_DS, 0
  113. p2r_ds: /* 16 bit real mode data segment for prot_to_real transition */
  114. .word 0xffff, ( P2R_DS << 4 )
  115. .byte 0, 0x93, 0x00, 0
  116. .org gdt + LONG_CS, 0
  117. long_cs: /* 64 bit long mode code segment */
  118. .word 0, 0
  119. .byte 0, 0x9a, 0x20, 0
  120. gdt_end:
  121. .equ gdt_length, gdt_end - gdt
  122. /****************************************************************************
  123. * Stored real-mode and protected-mode stack pointers
  124. *
  125. * The real-mode stack pointer is stored here whenever real_to_prot
  126. * is called and restored whenever prot_to_real is called. The
  127. * converse happens for the protected-mode stack pointer.
  128. *
  129. * Despite initial appearances this scheme is, in fact re-entrant,
  130. * because program flow dictates that we always return via the point
  131. * we left by. For example:
  132. * PXE API call entry
  133. * 1 real => prot
  134. * ...
  135. * Print a text string
  136. * ...
  137. * 2 prot => real
  138. * INT 10
  139. * 3 real => prot
  140. * ...
  141. * ...
  142. * 4 prot => real
  143. * PXE API call exit
  144. *
  145. * At point 1, the RM mode stack value, say RPXE, is stored in
  146. * rm_ss,sp. We want this value to still be present in rm_ss,sp when
  147. * we reach point 4.
  148. *
  149. * At point 2, the RM stack value is restored from RPXE. At point 3,
  150. * the RM stack value is again stored in rm_ss,sp. This *does*
  151. * overwrite the RPXE that we have stored there, but it's the same
  152. * value, since the code between points 2 and 3 has managed to return
  153. * to us.
  154. ****************************************************************************
  155. */
  156. .section ".bss.rm_ss_sp", "aw", @nobits
  157. .globl rm_sp
  158. rm_sp: .word 0
  159. .globl rm_ss
  160. rm_ss: .word 0
  161. .section ".data.pm_esp", "aw", @progbits
  162. pm_esp: .long VIRTUAL(_estack)
  163. /****************************************************************************
  164. * Temporary static data buffer
  165. *
  166. * This is used to reduce the amount of real-mode stack space consumed
  167. * during mode transitions, since we are sometimes called with very
  168. * little real-mode stack space available.
  169. ****************************************************************************
  170. */
  171. /* Temporary static buffer usage by virt_call */
  172. .struct 0
  173. VC_TMP_GDT: .space 6
  174. VC_TMP_IDT: .space 6
  175. VC_TMP_PAD: .space 4 /* for alignment */
  176. .if64
  177. VC_TMP_CR3: .space 4
  178. VC_TMP_CR4: .space 4
  179. VC_TMP_EMER: .space 8
  180. .endif
  181. VC_TMP_FXSAVE: .space 512
  182. VC_TMP_END:
  183. .previous
  184. /* Temporary static buffer usage by real_call */
  185. .struct 0
  186. RC_TMP_FUNCTION: .space 4
  187. RC_TMP_END:
  188. .previous
  189. /* Shared temporary static buffer */
  190. .section ".bss16.rm_tmpbuf", "aw", @nobits
  191. .align 16
  192. rm_tmpbuf:
  193. .space VC_TMP_END
  194. .size rm_tmpbuf, . - rm_tmpbuf
  195. /****************************************************************************
  196. * Virtual address offsets
  197. *
  198. * These are used by the protected-mode code to map between virtual
  199. * and physical addresses, and to access variables in the .text16 or
  200. * .data16 segments.
  201. ****************************************************************************
  202. */
  203. .struct 0
  204. VA_VIRT_OFFSET: .space SIZEOF_ADDR
  205. VA_TEXT16: .space SIZEOF_ADDR
  206. VA_DATA16: .space SIZEOF_ADDR
  207. VA_SIZE:
  208. .previous
  209. /* Internal copies, used only by librm itself */
  210. .section ".bss16.rm_virt_addrs", "aw", @nobits
  211. rm_virt_addrs: .space VA_SIZE
  212. .equ rm_virt_offset, ( rm_virt_addrs + VA_VIRT_OFFSET )
  213. .equ rm_text16, ( rm_virt_addrs + VA_TEXT16 )
  214. .equ rm_data16, ( rm_virt_addrs + VA_DATA16 )
  215. /* Externally visible variables, used by C code */
  216. .section ".bss.virt_addrs", "aw", @nobits
  217. virt_addrs: .space VA_SIZE
  218. .globl virt_offset
  219. .equ virt_offset, ( virt_addrs + VA_VIRT_OFFSET )
  220. .globl text16
  221. .equ text16, ( virt_addrs + VA_TEXT16 )
  222. .globl data16
  223. .equ data16, ( virt_addrs + VA_DATA16 )
  224. /****************************************************************************
  225. * init_librm (real-mode far call, 16-bit real-mode far return address)
  226. *
  227. * Initialise the GDT ready for transitions to protected mode.
  228. *
  229. * Parameters:
  230. * %cs : .text16 segment
  231. * %ds : .data16 segment
  232. * %edi : Physical base of protected-mode code
  233. ****************************************************************************
  234. */
  235. .section ".text16.init_librm", "ax", @progbits
  236. .code16
  237. .globl init_librm
  238. init_librm:
  239. /* Preserve registers */
  240. pushl %eax
  241. pushl %ebx
  242. pushl %edi
  243. /* Store rm_virt_offset and set up virtual_cs and virtual_ds segments */
  244. subl $VIRTUAL(_textdata), %edi
  245. movl %edi, rm_virt_offset
  246. .if64 ; setae (rm_virt_offset+4) ; .endif
  247. movl %edi, %eax
  248. movw $virtual_cs, %bx
  249. call set_seg_base
  250. movw $virtual_ds, %bx
  251. call set_seg_base
  252. /* Store rm_cs and rm_text16, set up real_cs segment */
  253. xorl %eax, %eax
  254. movw %cs, %ax
  255. movw %ax, %cs:rm_cs
  256. shll $4, %eax
  257. movw $real_cs, %bx
  258. call set_seg_base
  259. .if32 ; subl %edi, %eax ; .endif
  260. movl %eax, rm_text16
  261. /* Store rm_ds and rm_data16, set up real_ds segment and GDT base */
  262. xorl %eax, %eax
  263. movw %ds, %ax
  264. movw %ax, %cs:rm_ds
  265. shll $4, %eax
  266. movw $real_ds, %bx
  267. call set_seg_base
  268. movl %eax, gdt_base
  269. addl $gdt, gdt_base
  270. .if32 ; subl %edi, %eax ; .endif
  271. movl %eax, rm_data16
  272. /* Configure virt_call for protected mode, if applicable */
  273. .if64 ; movl $VIRTUAL(vc_pmode), %cs:vc_jmp_offset ; .endif
  274. /* Switch to protected mode */
  275. virtcall init_librm_pmode
  276. .section ".text.init_librm", "ax", @progbits
  277. .code32
  278. init_librm_pmode:
  279. /* Store virt_offset, text16, and data16 */
  280. pushw %ds
  281. movw $REAL_DS, %ax
  282. movw %ax, %ds
  283. movl $rm_virt_addrs, %esi
  284. movl $VIRTUAL(virt_addrs), %edi
  285. movl $( VA_SIZE / 4 ), %ecx
  286. rep movsl
  287. popw %ds
  288. .if64 ; /* Initialise long mode, if applicable */
  289. movl VIRTUAL(virt_offset), %edi
  290. leal VIRTUAL(p2l_ljmp_target)(%edi), %eax
  291. movl %eax, VIRTUAL(p2l_ljmp_offset)
  292. call init_pages
  293. .endif
  294. /* Return to real mode */
  295. ret
  296. .section ".text16.init_librm", "ax", @progbits
  297. .code16
  298. init_librm_rmode:
  299. /* Configure virt_call for long mode, if applicable */
  300. .if64 ; movl $VIRTUAL(vc_lmode), %cs:vc_jmp_offset ; .endif
  301. /* Initialise IDT */
  302. virtcall init_idt
  303. /* Restore registers */
  304. popl %edi
  305. popl %ebx
  306. popl %eax
  307. lret
  308. .section ".text16.set_seg_base", "ax", @progbits
  309. .code16
  310. set_seg_base:
  311. 1: movw %ax, 2(%bx)
  312. rorl $16, %eax
  313. movb %al, 4(%bx)
  314. movb %ah, 7(%bx)
  315. roll $16, %eax
  316. ret
  317. /****************************************************************************
  318. * real_to_prot (real-mode near call, 32-bit virtual return address)
  319. *
  320. * Switch from 16-bit real-mode to 32-bit protected mode with virtual
  321. * addresses. The real-mode %ss:sp is stored in rm_ss and rm_sp, and
  322. * the protected-mode %esp is restored from the saved pm_esp.
  323. * Interrupts are disabled. All other registers may be destroyed.
  324. *
  325. * The return address for this function should be a 32-bit virtual
  326. * address.
  327. *
  328. * Parameters:
  329. * %ecx : number of bytes to move from RM stack to PM stack
  330. * %edx : number of bytes to copy from RM temporary buffer to PM stack
  331. *
  332. ****************************************************************************
  333. */
  334. .section ".text16.real_to_prot", "ax", @progbits
  335. .code16
  336. real_to_prot:
  337. /* Enable A20 line */
  338. call enable_a20
  339. /* A failure at this point is fatal, and there's nothing we
  340. * can do about it other than lock the machine to make the
  341. * problem immediately visible.
  342. */
  343. 1: jc 1b
  344. /* Make sure we have our data segment available */
  345. movw %cs:rm_ds, %ds
  346. /* Add protected-mode return address to length of data to be copied */
  347. addw $4, %cx /* %ecx must be less than 64kB anyway */
  348. /* Real-mode %ss:%sp => %ebp and virtual address => %esi */
  349. xorl %eax, %eax
  350. movw %ss, %ax
  351. shll $4, %eax
  352. movzwl %sp, %ebp
  353. addr32 leal (%eax,%ebp), %esi
  354. subl rm_virt_offset, %esi
  355. shll $12, %eax
  356. orl %eax, %ebp
  357. /* Real-mode data segment virtual address => %ebx */
  358. movl rm_data16, %ebx
  359. .if64 ; subl rm_virt_offset, %ebx ; .endif
  360. /* Load protected-mode global descriptor table */
  361. data32 lgdt gdtr
  362. /* Zero segment registers. This wastes around 12 cycles on
  363. * real hardware, but saves a substantial number of emulated
  364. * instructions under KVM.
  365. */
  366. xorw %ax, %ax
  367. movw %ax, %ds
  368. movw %ax, %es
  369. movw %ax, %fs
  370. movw %ax, %gs
  371. movw %ax, %ss
  372. /* Switch to protected mode (with paging disabled if applicable) */
  373. cli
  374. movl %cr0, %eax
  375. .if64 ; andl $~CR0_PG, %eax ; .endif
  376. orb $CR0_PE, %al
  377. movl %eax, %cr0
  378. data32 ljmp $VIRTUAL_CS, $VIRTUAL(r2p_pmode)
  379. .section ".text.real_to_prot", "ax", @progbits
  380. .code32
  381. r2p_pmode:
  382. /* Set up protected-mode data segments and stack pointer */
  383. movw $VIRTUAL_DS, %ax
  384. movw %ax, %ds
  385. movw %ax, %es
  386. movw %ax, %fs
  387. movw %ax, %gs
  388. movw %ax, %ss
  389. movl VIRTUAL(pm_esp), %esp
  390. /* Load protected-mode interrupt descriptor table */
  391. lidt VIRTUAL(idtr32)
  392. /* Record real-mode %ss:sp (after removal of data) */
  393. addl %ecx, %ebp
  394. movl %ebp, VIRTUAL(rm_sp)
  395. /* Move data from RM stack to PM stack */
  396. subl %edx, %esp
  397. subl %ecx, %esp
  398. movl %esp, %edi
  399. rep movsb
  400. /* Copy data from RM temporary buffer to PM stack */
  401. leal rm_tmpbuf(%ebx), %esi
  402. movl %edx, %ecx
  403. rep movsb
  404. /* Return to virtual address */
  405. ret
  406. /****************************************************************************
  407. * prot_to_real (protected-mode near call, 32-bit real-mode return address)
  408. *
  409. * Switch from 32-bit protected mode with virtual addresses to 16-bit
  410. * real mode. The protected-mode %esp is stored in pm_esp and the
  411. * real-mode %ss:sp is restored from the saved rm_ss and rm_sp. The
  412. * high word of the real-mode %esp is set to zero. All real-mode data
  413. * segment registers are loaded from the saved rm_ds. Interrupts are
  414. * *not* enabled, since we want to be able to use prot_to_real in an
  415. * ISR. All other registers may be destroyed.
  416. *
  417. * The return address for this function should be a 32-bit (sic)
  418. * real-mode offset within .code16.
  419. *
  420. * Parameters:
  421. * %ecx : number of bytes to move from PM stack to RM stack
  422. * %edx : number of bytes to move from PM stack to RM temporary buffer
  423. * %esi : real-mode global and interrupt descriptor table registers
  424. *
  425. ****************************************************************************
  426. */
  427. .section ".text.prot_to_real", "ax", @progbits
  428. .code32
  429. prot_to_real:
  430. /* Copy real-mode global descriptor table register to RM code segment */
  431. movl VIRTUAL(text16), %edi
  432. .if64 ; subl VIRTUAL(virt_offset), %edi ; .endif
  433. leal rm_gdtr(%edi), %edi
  434. movsw
  435. movsl
  436. /* Load real-mode interrupt descriptor table register */
  437. lidt (%esi)
  438. /* Add return address to data to be moved to RM stack */
  439. addl $4, %ecx
  440. /* Real-mode %ss:sp => %ebp and virtual address => %edi */
  441. movl VIRTUAL(rm_sp), %ebp
  442. subl %ecx, %ebp
  443. movzwl VIRTUAL(rm_ss), %eax
  444. shll $4, %eax
  445. movzwl %bp, %edi
  446. addl %eax, %edi
  447. subl VIRTUAL(virt_offset), %edi
  448. /* Move data from PM stack to RM stack */
  449. movl %esp, %esi
  450. rep movsb
  451. /* Move data from PM stack to RM temporary buffer */
  452. movl VIRTUAL(data16), %edi
  453. .if64 ; subl VIRTUAL(virt_offset), %edi ; .endif
  454. addl $rm_tmpbuf, %edi
  455. movl %edx, %ecx
  456. rep movsb
  457. /* Record protected-mode %esp (after removal of data) */
  458. movl %esi, VIRTUAL(pm_esp)
  459. /* Load real-mode segment limits */
  460. movw $P2R_DS, %ax
  461. movw %ax, %ds
  462. movw %ax, %es
  463. movw %ax, %fs
  464. movw %ax, %gs
  465. movw %ax, %ss
  466. ljmp $REAL_CS, $p2r_rmode
  467. .section ".text16.prot_to_real", "ax", @progbits
  468. .code16
  469. p2r_rmode:
  470. /* Load real-mode GDT */
  471. data32 lgdt %cs:rm_gdtr
  472. /* Switch to real mode */
  473. movl %cr0, %eax
  474. andb $0!CR0_PE, %al
  475. movl %eax, %cr0
  476. p2r_ljmp_rm_cs:
  477. ljmp $0, $1f
  478. 1:
  479. /* Set up real-mode data segments and stack pointer */
  480. movw %cs:rm_ds, %ax
  481. movw %ax, %ds
  482. movw %ax, %es
  483. movw %ax, %fs
  484. movw %ax, %gs
  485. movl %ebp, %eax
  486. shrl $16, %eax
  487. movw %ax, %ss
  488. movzwl %bp, %esp
  489. /* Return to real-mode address */
  490. data32 ret
  491. /* Real-mode code and data segments. Assigned by the call to
  492. * init_librm. rm_cs doubles as the segment part of the jump
  493. * instruction used by prot_to_real. Both are located in
  494. * .text16 rather than .data16: rm_cs since it forms part of
  495. * the jump instruction within the code segment, and rm_ds
  496. * since real-mode code needs to be able to locate the data
  497. * segment with no other reference available.
  498. */
  499. .globl rm_cs
  500. .equ rm_cs, ( p2r_ljmp_rm_cs + 3 )
  501. .section ".text16.data.rm_ds", "aw", @progbits
  502. .globl rm_ds
  503. rm_ds: .word 0
  504. /* Real-mode global and interrupt descriptor table registers */
  505. .section ".text16.data.rm_gdtr", "aw", @progbits
  506. rm_gdtr:
  507. .word 0 /* Limit */
  508. .long 0 /* Base */
  509. /****************************************************************************
  510. * phys_to_prot (protected-mode near call, 32-bit physical return address)
  511. *
  512. * Switch from 32-bit protected mode with physical addresses to 32-bit
  513. * protected mode with virtual addresses. %esp is adjusted to a
  514. * virtual address. All other registers are preserved.
  515. *
  516. * The return address for this function should be a 32-bit physical
  517. * (sic) address.
  518. *
  519. ****************************************************************************
  520. */
  521. .section ".text.phys_to_prot", "ax", @progbits
  522. .code32
  523. .globl phys_to_prot
  524. phys_to_prot:
  525. /* Preserve registers */
  526. pushl %eax
  527. pushl %ebp
  528. /* Switch to virtual code segment */
  529. cli
  530. ljmp $VIRTUAL_CS, $VIRTUAL(1f)
  531. 1:
  532. /* Switch to virtual data segment and adjust %esp */
  533. movw $VIRTUAL_DS, %ax
  534. movw %ax, %ds
  535. movw %ax, %es
  536. movw %ax, %fs
  537. movw %ax, %gs
  538. movw %ax, %ss
  539. movl VIRTUAL(virt_offset), %ebp
  540. subl %ebp, %esp
  541. /* Adjust return address to a virtual address */
  542. subl %ebp, 8(%esp)
  543. /* Restore registers and return */
  544. popl %ebp
  545. popl %eax
  546. ret
  547. .if32 /* Expose as _phys_to_virt for use by COMBOOT, if applicable */
  548. .globl _phys_to_virt
  549. .equ _phys_to_virt, phys_to_prot
  550. .endif
  551. /****************************************************************************
  552. * prot_to_phys (protected-mode near call, 32-bit virtual return address)
  553. *
  554. * Switch from 32-bit protected mode with virtual addresses to 32-bit
  555. * protected mode with physical addresses. %esp is adjusted to a
  556. * physical address. All other registers are preserved.
  557. *
  558. * The return address for this function should be a 32-bit virtual
  559. * (sic) address.
  560. *
  561. ****************************************************************************
  562. */
  563. .section ".text.prot_to_phys", "ax", @progbits
  564. .code32
  565. prot_to_phys:
  566. /* Preserve registers */
  567. pushl %eax
  568. pushl %ebp
  569. /* Adjust return address to a physical address */
  570. movl VIRTUAL(virt_offset), %ebp
  571. addl %ebp, 8(%esp)
  572. /* Switch to physical code segment */
  573. cli
  574. pushl $PHYSICAL_CS
  575. leal VIRTUAL(1f)(%ebp), %eax
  576. pushl %eax
  577. lret
  578. 1:
  579. /* Switch to physical data segment and adjust %esp */
  580. movw $PHYSICAL_DS, %ax
  581. movw %ax, %ds
  582. movw %ax, %es
  583. movw %ax, %fs
  584. movw %ax, %gs
  585. movw %ax, %ss
  586. addl %ebp, %esp
  587. /* Restore registers and return */
  588. popl %ebp
  589. popl %eax
  590. ret
  591. .if32 /* Expose as _virt_to_phys for use by COMBOOT, if applicable */
  592. .globl _virt_to_phys
  593. .equ _virt_to_phys, prot_to_phys
  594. .endif
  595. /****************************************************************************
  596. * intr_to_prot (protected-mode near call, 32-bit virtual return address)
  597. *
  598. * Switch from 32-bit protected mode with a virtual code segment and
  599. * either a physical or virtual stack segment to 32-bit protected mode
  600. * with normal virtual addresses. %esp is adjusted if necessary to a
  601. * virtual address. All other registers are preserved.
  602. *
  603. * The return address for this function should be a 32-bit virtual
  604. * address.
  605. *
  606. ****************************************************************************
  607. */
  608. .section ".text.intr_to_prot", "ax", @progbits
  609. .code32
  610. .globl intr_to_prot
  611. intr_to_prot:
  612. /* Preserve registers */
  613. pushl %eax
  614. /* Check whether stack segment is physical or virtual */
  615. movw %ss, %ax
  616. cmpw $VIRTUAL_DS, %ax
  617. movw $VIRTUAL_DS, %ax
  618. /* Reload data segment registers */
  619. movw %ax, %ds
  620. movw %ax, %es
  621. movw %ax, %fs
  622. movw %ax, %gs
  623. /* Reload stack segment and adjust %esp if necessary */
  624. je 1f
  625. movw %ax, %ss
  626. subl VIRTUAL(virt_offset), %esp
  627. 1:
  628. /* Restore registers and return */
  629. popl %eax
  630. ret
  631. /* Expose as _intr_to_virt for use by GDB */
  632. .globl _intr_to_virt
  633. .equ _intr_to_virt, intr_to_prot
  634. /****************************************************************************
  635. * prot_to_long (protected-mode near call, 32-bit virtual return address)
  636. *
  637. * Switch from 32-bit protected mode with virtual addresses to 64-bit
  638. * long mode. The protected-mode %esp is adjusted to a physical
  639. * address. All other registers are preserved.
  640. *
  641. * The return address for this function should be a 32-bit (sic)
  642. * virtual address.
  643. *
  644. ****************************************************************************
  645. */
  646. .if64
  647. .section ".text.prot_to_long", "ax", @progbits
  648. .code32
  649. prot_to_long:
  650. /* Preserve registers */
  651. pushl %eax
  652. pushl %ecx
  653. pushl %edx
  654. /* Set up PML4 */
  655. movl VIRTUAL(pml4), %eax
  656. movl %eax, %cr3
  657. /* Enable PAE */
  658. movl %cr4, %eax
  659. orb $CR4_PAE, %al
  660. movl %eax, %cr4
  661. /* Enable long mode */
  662. movl $MSR_EFER, %ecx
  663. rdmsr
  664. orw $EFER_LME, %ax
  665. wrmsr
  666. /* Enable paging */
  667. movl %cr0, %eax
  668. orl $CR0_PG, %eax
  669. movl %eax, %cr0
  670. /* Restore registers */
  671. popl %edx
  672. popl %ecx
  673. popl %eax
  674. /* Construct 64-bit return address */
  675. pushl (%esp)
  676. movl $0xffffffff, 4(%esp)
  677. p2l_ljmp:
  678. /* Switch to long mode (using a physical %rip) */
  679. ljmp $LONG_CS, $0
  680. .code64
  681. p2l_lmode:
  682. /* Adjust and zero-extend %esp to a physical address */
  683. addl virt_offset, %esp
  684. /* Use long-mode IDT */
  685. lidt idtr64
  686. /* Return to virtual address */
  687. ret
  688. /* Long mode jump offset and target. Required since an ljmp
  689. * in protected mode will zero-extend the offset, and so
  690. * cannot reach an address within the negative 2GB as used by
  691. * -mcmodel=kernel. Assigned by the call to init_librm.
  692. */
  693. .equ p2l_ljmp_offset, ( p2l_ljmp + 1 )
  694. .equ p2l_ljmp_target, p2l_lmode
  695. .endif
  696. /****************************************************************************
  697. * long_to_prot (long-mode near call, 64-bit virtual return address)
  698. *
  699. * Switch from 64-bit long mode to 32-bit protected mode with virtual
  700. * addresses. The long-mode %rsp is adjusted to a virtual address.
  701. * All other registers are preserved.
  702. *
  703. * The return address for this function should be a 64-bit (sic)
  704. * virtual address.
  705. *
  706. ****************************************************************************
  707. */
  708. .if64
  709. .section ".text.long_to_prot", "ax", @progbits
  710. .code64
  711. long_to_prot:
  712. /* Switch to protected mode */
  713. ljmp *l2p_vector
  714. .code32
  715. l2p_pmode:
  716. /* Adjust %esp to a virtual address */
  717. subl VIRTUAL(virt_offset), %esp
  718. /* Preserve registers */
  719. pushl %eax
  720. pushl %ecx
  721. pushl %edx
  722. /* Disable paging */
  723. movl %cr0, %eax
  724. andl $~CR0_PG, %eax
  725. movl %eax, %cr0
  726. /* Disable PAE (in case external non-PAE-aware code enables paging) */
  727. movl %cr4, %eax
  728. andb $~CR4_PAE, %al
  729. movl %eax, %cr4
  730. /* Disable long mode */
  731. movl $MSR_EFER, %ecx
  732. rdmsr
  733. andw $~EFER_LME, %ax
  734. wrmsr
  735. /* Restore registers */
  736. popl %edx
  737. popl %ecx
  738. popl %eax
  739. /* Use protected-mode IDT */
  740. lidt VIRTUAL(idtr32)
  741. /* Return */
  742. ret $4
  743. /* Long mode jump vector. Required since there is no "ljmp
  744. * immediate" instruction in long mode.
  745. */
  746. .section ".data.l2p_vector", "aw", @progbits
  747. l2p_vector:
  748. .long VIRTUAL(l2p_pmode), VIRTUAL_CS
  749. .endif
  750. /****************************************************************************
  751. * long_save_regs (long-mode near call, 64-bit virtual return address)
  752. *
  753. * Preserve registers that are accessible only in long mode. This
  754. * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
  755. * %rsi, %rdi, and %rbp.
  756. *
  757. ****************************************************************************
  758. */
  759. .if64
  760. .section ".text.long_preserve_regs", "ax", @progbits
  761. .code64
  762. long_preserve_regs:
  763. /* Preserve registers */
  764. pushq %rax
  765. pushq %rcx
  766. pushq %rdx
  767. pushq %rbx
  768. pushq %rsp
  769. pushq %rbp
  770. pushq %rsi
  771. pushq %rdi
  772. pushq %r8
  773. pushq %r9
  774. pushq %r10
  775. pushq %r11
  776. pushq %r12
  777. pushq %r13
  778. pushq %r14
  779. pushq %r15
  780. /* Return */
  781. jmp *SIZEOF_X86_64_REGS(%rsp)
  782. .endif
  783. /****************************************************************************
  784. * long_restore_regs (long-mode near call, 64-bit virtual return address)
  785. *
  786. * Restore registers that are accessible only in long mode. This
  787. * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
  788. * %rsi, %rdi, and %rbp.
  789. *
  790. ****************************************************************************
  791. */
  792. .if64
  793. .section ".text.long_restore_regs", "ax", @progbits
  794. .code64
  795. long_restore_regs:
  796. /* Move return address above register dump */
  797. popq SIZEOF_X86_64_REGS(%rsp)
  798. /* Restore registers */
  799. popq %r15
  800. popq %r14
  801. popq %r13
  802. popq %r12
  803. popq %r11
  804. popq %r10
  805. popq %r9
  806. popq %r8
  807. movl %edi, (%rsp)
  808. popq %rdi
  809. movl %esi, (%rsp)
  810. popq %rsi
  811. movl %ebp, (%rsp)
  812. popq %rbp
  813. leaq 8(%rsp), %rsp /* discard */
  814. movl %ebx, (%rsp)
  815. popq %rbx
  816. movl %edx, (%rsp)
  817. popq %rdx
  818. movl %ecx, (%rsp)
  819. popq %rcx
  820. movl %eax, (%rsp)
  821. popq %rax
  822. /* Return */
  823. ret
  824. .endif
  825. /****************************************************************************
  826. * virt_call (real-mode near call, 16-bit real-mode near return address)
  827. *
  828. * Call a specific C function in 32-bit protected mode or 64-bit long
  829. * mode (as applicable). The prototype of the C function must be
  830. * void function ( struct i386_all_regs *ix86 );
  831. * ix86 will point to a struct containing the real-mode registers
  832. * at entry to virt_call().
  833. *
  834. * All registers will be preserved across virt_call(), unless the C
  835. * function explicitly overwrites values in ix86. Interrupt status
  836. * and GDT will also be preserved. Gate A20 will be enabled.
  837. *
  838. * Note that virt_call() does not rely on the real-mode stack
  839. * remaining intact in order to return, since everything relevant is
  840. * copied to the protected-mode stack for the duration of the call.
  841. * In particular, this means that a real-mode prefix can make a call
  842. * to main() which will return correctly even if the prefix's stack
  843. * gets vapourised during the Etherboot run. (The prefix cannot rely
  844. * on anything else on the stack being preserved, so should move any
  845. * critical data to registers before calling main()).
  846. *
  847. * Parameters:
  848. * function : 32-bit virtual address of function to call
  849. *
  850. * Example usage:
  851. * pushl $pxe_api_call
  852. * call virt_call
  853. * to call in to the C function
  854. * void pxe_api_call ( struct i386_all_regs *ix86 );
  855. ****************************************************************************
  856. */
  857. .struct 0
  858. VC_OFFSET_IX86: .space SIZEOF_I386_ALL_REGS
  859. VC_OFFSET_PADDING: .space 2 /* for alignment */
  860. VC_OFFSET_RETADDR: .space 2
  861. VC_OFFSET_PARAMS:
  862. VC_OFFSET_FUNCTION: .space 4
  863. VC_OFFSET_END:
  864. .previous
  865. .section ".text16.virt_call", "ax", @progbits
  866. .code16
  867. .globl virt_call
  868. virt_call:
  869. /* Preserve registers and flags on external RM stack */
  870. pushw %ss /* padding */
  871. pushfl
  872. pushal
  873. pushw %gs
  874. pushw %fs
  875. pushw %es
  876. pushw %ds
  877. pushw %ss
  878. pushw %cs
  879. /* Claim ownership of temporary static buffer */
  880. cli
  881. /* Preserve FPU, MMX and SSE state in temporary static buffer */
  882. movw %cs:rm_ds, %ds
  883. fxsave ( rm_tmpbuf + VC_TMP_FXSAVE )
  884. /* Preserve GDT and IDT in temporary static buffer */
  885. sidt ( rm_tmpbuf + VC_TMP_IDT )
  886. sgdt ( rm_tmpbuf + VC_TMP_GDT )
  887. .if64 ; /* Preserve control registers, if applicable */
  888. movl $MSR_EFER, %ecx
  889. rdmsr
  890. movl %eax, ( rm_tmpbuf + VC_TMP_EMER + 0 )
  891. movl %edx, ( rm_tmpbuf + VC_TMP_EMER + 4 )
  892. movl %cr4, %eax
  893. movl %eax, ( rm_tmpbuf + VC_TMP_CR4 )
  894. movl %cr3, %eax
  895. movl %eax, ( rm_tmpbuf + VC_TMP_CR3 )
  896. .endif
  897. /* For sanity's sake, clear the direction flag as soon as possible */
  898. cld
  899. /* Switch to protected mode and move register dump to PM stack */
  900. movl $VC_OFFSET_END, %ecx
  901. movl $VC_TMP_END, %edx
  902. pushl $VIRTUAL(vc_pmode)
  903. vc_jmp: jmp real_to_prot
  904. .section ".text.virt_call", "ax", @progbits
  905. .code32
  906. vc_pmode:
  907. /* Call function (in protected mode) */
  908. pushl %esp
  909. call *(VC_OFFSET_FUNCTION+4)(%esp)
  910. popl %eax /* discard */
  911. .if64 ; /* Switch to long mode */
  912. jmp 1f
  913. vc_lmode:
  914. call prot_to_long
  915. .code64
  916. /* Call function (in long mode) */
  917. movq %rsp, %rdi
  918. movslq VC_OFFSET_FUNCTION(%rsp), %rax
  919. callq *%rax
  920. /* Switch to protected mode */
  921. call long_to_prot
  922. 1: .code32
  923. .endif
  924. /* Switch to real mode and move register dump back to RM stack */
  925. movl $VC_OFFSET_END, %ecx
  926. movl $VC_TMP_END, %edx
  927. leal VC_TMP_GDT(%esp, %ecx), %esi
  928. pushl $vc_rmode
  929. jmp prot_to_real
  930. .section ".text16.virt_call", "ax", @progbits
  931. .code16
  932. vc_rmode:
  933. .if64 ; /* Restore control registers, if applicable */
  934. movw %sp, %bp
  935. movl ( rm_tmpbuf + VC_TMP_CR3 ), %eax
  936. movl %eax, %cr3
  937. movl ( rm_tmpbuf + VC_TMP_CR4 ), %eax
  938. movl %eax, %cr4
  939. movl ( rm_tmpbuf + VC_TMP_EMER + 0 ), %eax
  940. movl ( rm_tmpbuf + VC_TMP_EMER + 4 ), %edx
  941. movl $MSR_EFER, %ecx
  942. wrmsr
  943. .endif
  944. /* Restore FPU, MMX and SSE state from temporary static buffer */
  945. fxrstor ( rm_tmpbuf + VC_TMP_FXSAVE )
  946. /* Restore registers and flags and return */
  947. popl %eax /* skip %cs and %ss */
  948. popw %ds
  949. popw %es
  950. popw %fs
  951. popw %gs
  952. popal
  953. /* popal skips %esp. We therefore want to do "movl -20(%sp),
  954. * %esp", but -20(%sp) is not a valid 80386 expression.
  955. * Fortunately, prot_to_real() zeroes the high word of %esp, so
  956. * we can just use -20(%esp) instead.
  957. */
  958. addr32 movl -20(%esp), %esp
  959. popfl
  960. popw %ss /* padding */
  961. /* Return and discard function parameters */
  962. ret $( VC_OFFSET_END - VC_OFFSET_PARAMS )
  963. /* Protected-mode jump target */
  964. .equ vc_jmp_offset, ( vc_jmp - 4 )
  965. /****************************************************************************
  966. * real_call (protected-mode near call, 32-bit virtual return address)
  967. * real_call (long-mode near call, 64-bit virtual return address)
  968. *
  969. * Call a real-mode function from protected-mode or long-mode code.
  970. *
  971. * The non-segment register values will be passed directly to the
  972. * real-mode code. The segment registers will be set as per
  973. * prot_to_real. The non-segment register values set by the real-mode
  974. * function will be passed back to the protected-mode or long-mode
  975. * caller. A result of this is that this routine cannot be called
  976. * directly from C code, since it clobbers registers that the C ABI
  977. * expects the callee to preserve.
  978. *
  979. * librm.h defines a convenient macro REAL_CODE() for using real_call.
  980. * See librm.h and realmode.h for details and examples.
  981. *
  982. * Parameters:
  983. * function : offset within .text16 of real-mode function to call
  984. *
  985. * Returns: none
  986. ****************************************************************************
  987. */
  988. .struct 0
  989. RC_OFFSET_REGS: .space SIZEOF_I386_REGS
  990. RC_OFFSET_REGS_END:
  991. RC_OFFSET_FUNCTION_COPY:.space 4
  992. .if64
  993. RC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
  994. RC_OFFSET_LREG_RETADDR: .space SIZEOF_ADDR
  995. .endif
  996. RC_OFFSET_RETADDR: .space SIZEOF_ADDR
  997. RC_OFFSET_PARAMS:
  998. RC_OFFSET_FUNCTION: .space SIZEOF_ADDR
  999. RC_OFFSET_END:
  1000. .previous
  1001. .section ".text.real_call", "ax", @progbits
  1002. .CODE_DEFAULT
  1003. .globl real_call
  1004. real_call:
  1005. .if64 ; /* Preserve registers and switch to protected mode, if applicable */
  1006. call long_preserve_regs
  1007. call long_to_prot
  1008. .code32
  1009. .endif
  1010. /* Create register dump and function pointer copy on PM stack */
  1011. pushl ( RC_OFFSET_FUNCTION - RC_OFFSET_FUNCTION_COPY - 4 )(%esp)
  1012. pushal
  1013. /* Switch to real mode and move register dump to RM stack */
  1014. movl $RC_OFFSET_REGS_END, %ecx
  1015. movl $RC_TMP_END, %edx
  1016. pushl $rc_rmode
  1017. movl $VIRTUAL(rm_default_gdtr_idtr), %esi
  1018. jmp prot_to_real
  1019. .section ".text16.real_call", "ax", @progbits
  1020. .code16
  1021. rc_rmode:
  1022. /* Call real-mode function */
  1023. popal
  1024. call *( rm_tmpbuf + RC_TMP_FUNCTION )
  1025. pushal
  1026. /* For sanity's sake, clear the direction flag as soon as possible */
  1027. cld
  1028. /* Switch to protected mode and move register dump back to PM stack */
  1029. movl $RC_OFFSET_REGS_END, %ecx
  1030. xorl %edx, %edx
  1031. pushl $VIRTUAL(rc_pmode)
  1032. jmp real_to_prot
  1033. .section ".text.real_call", "ax", @progbits
  1034. .code32
  1035. rc_pmode:
  1036. /* Restore registers */
  1037. popal
  1038. .if64 ; /* Switch to long mode and restore registers, if applicable */
  1039. call prot_to_long
  1040. .code64
  1041. call long_restore_regs
  1042. .endif
  1043. /* Return and discard function parameters */
  1044. ret $( RC_OFFSET_END - RC_OFFSET_PARAMS )
  1045. /* Default real-mode global and interrupt descriptor table registers */
  1046. .section ".data.rm_default_gdtr_idtr", "aw", @progbits
  1047. rm_default_gdtr_idtr:
  1048. .word 0 /* Global descriptor table limit */
  1049. .long 0 /* Global descriptor table base */
  1050. .word 0x03ff /* Interrupt descriptor table limit */
  1051. .long 0 /* Interrupt descriptor table base */
  1052. /****************************************************************************
  1053. * phys_call (protected-mode near call, 32-bit virtual return address)
  1054. * phys_call (long-mode near call, 64-bit virtual return address)
  1055. *
  1056. * Call a function with flat 32-bit physical addressing
  1057. *
  1058. * The non-segment register values will be passed directly to the
  1059. * function. The segment registers will be set for flat 32-bit
  1060. * physical addressing. The non-segment register values set by the
  1061. * function will be passed back to the caller.
  1062. *
  1063. * librm.h defines a convenient macro PHYS_CODE() for using phys_call.
  1064. *
  1065. * Parameters:
  1066. * function : virtual (sic) address of function to call
  1067. *
  1068. ****************************************************************************
  1069. */
  1070. .struct 0
  1071. .if64
  1072. PHC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
  1073. PHC_OFFSET_LREG_RETADDR:.space SIZEOF_ADDR
  1074. .endif
  1075. PHC_OFFSET_RETADDR: .space SIZEOF_ADDR
  1076. PHC_OFFSET_PARAMS:
  1077. PHC_OFFSET_FUNCTION: .space SIZEOF_ADDR
  1078. PHC_OFFSET_END:
  1079. .previous
  1080. .section ".text.phys_call", "ax", @progbits
  1081. .CODE_DEFAULT
  1082. .globl phys_call
  1083. phys_call:
  1084. .if64 ; /* Preserve registers and switch to protected mode, if applicable */
  1085. call long_preserve_regs
  1086. call long_to_prot
  1087. .code32
  1088. .endif
  1089. /* Adjust function pointer to a physical address */
  1090. pushl %ebp
  1091. movl VIRTUAL(virt_offset), %ebp
  1092. addl %ebp, ( PHC_OFFSET_FUNCTION + 4 /* saved %ebp */ )(%esp)
  1093. popl %ebp
  1094. /* Switch to physical addresses */
  1095. call prot_to_phys
  1096. /* Call function */
  1097. call *PHC_OFFSET_FUNCTION(%esp)
  1098. /* For sanity's sake, clear the direction flag as soon as possible */
  1099. cld
  1100. /* Switch to virtual addresses */
  1101. call phys_to_prot
  1102. .if64 ; /* Switch to long mode and restore registers, if applicable */
  1103. call prot_to_long
  1104. .code64
  1105. call long_restore_regs
  1106. .endif
  1107. /* Return and discard function parameters */
  1108. ret $( PHC_OFFSET_END - PHC_OFFSET_PARAMS )
  1109. /****************************************************************************
  1110. * phys_to_long (protected-mode near call, 32-bit physical return address)
  1111. *
  1112. * Used by COMBOOT.
  1113. *
  1114. ****************************************************************************
  1115. */
  1116. .if64
  1117. .section ".text.phys_to_long", "ax", @progbits
  1118. .code32
  1119. phys_to_long:
  1120. /* Switch to virtual addresses */
  1121. call phys_to_prot
  1122. /* Convert to 32-bit virtual return address */
  1123. pushl %eax
  1124. movl VIRTUAL(virt_offset), %eax
  1125. subl %eax, 4(%esp)
  1126. popl %eax
  1127. /* Switch to long mode and return */
  1128. jmp prot_to_long
  1129. /* Expose as _phys_to_virt for use by COMBOOT */
  1130. .globl _phys_to_virt
  1131. .equ _phys_to_virt, phys_to_long
  1132. .endif
  1133. /****************************************************************************
  1134. * long_to_phys (long-mode near call, 64-bit virtual return address)
  1135. *
  1136. * Used by COMBOOT.
  1137. *
  1138. ****************************************************************************
  1139. */
  1140. .if64
  1141. .section ".text.long_to_phys", "ax", @progbits
  1142. .code64
  1143. long_to_phys:
  1144. /* Switch to protected mode */
  1145. call long_to_prot
  1146. .code32
  1147. /* Convert to 32-bit virtual return address */
  1148. popl (%esp)
  1149. /* Switch to physical addresses and return */
  1150. jmp prot_to_phys
  1151. /* Expose as _virt_to_phys for use by COMBOOT */
  1152. .globl _virt_to_phys
  1153. .equ _virt_to_phys, long_to_phys
  1154. .endif
  1155. /****************************************************************************
  1156. * flatten_real_mode (real-mode near call)
  1157. *
  1158. * Switch to flat real mode
  1159. *
  1160. ****************************************************************************
  1161. */
  1162. .section ".text16.flatten_real_mode", "ax", @progbits
  1163. .code16
  1164. .globl flatten_real_mode
  1165. flatten_real_mode:
  1166. /* Modify GDT to use flat real mode */
  1167. movb $0x8f, real_cs + 6
  1168. movb $0x8f, real_ds + 6
  1169. /* Call dummy protected-mode function */
  1170. virtcall flatten_dummy
  1171. /* Restore GDT */
  1172. movb $0x00, real_cs + 6
  1173. movb $0x00, real_ds + 6
  1174. /* Return */
  1175. ret
  1176. .section ".text.flatten_dummy", "ax", @progbits
  1177. .CODE_DEFAULT
  1178. flatten_dummy:
  1179. ret
  1180. /****************************************************************************
  1181. * Interrupt wrapper
  1182. *
  1183. * Used by the protected-mode and long-mode interrupt vectors to call
  1184. * the interrupt() function.
  1185. *
  1186. * May be entered with either physical or virtual stack segment.
  1187. ****************************************************************************
  1188. */
  1189. .section ".text.interrupt_wrapper", "ax", @progbits
  1190. .code32
  1191. .globl interrupt_wrapper
  1192. interrupt_wrapper:
  1193. /* Preserve registers (excluding already-saved %eax and
  1194. * otherwise unused registers which are callee-save for both
  1195. * 32-bit and 64-bit ABIs).
  1196. */
  1197. pushl %ebx
  1198. pushl %ecx
  1199. pushl %edx
  1200. pushl %esi
  1201. pushl %edi
  1202. /* Expand IRQ number to whole %eax register */
  1203. movzbl %al, %eax
  1204. .if64 ; /* Skip transition to long mode, if applicable */
  1205. movw %cs, %bx
  1206. cmpw $LONG_CS, %bx
  1207. je 1f
  1208. .endif
  1209. /* Preserve segment registers and original %esp */
  1210. pushl %ds
  1211. pushl %es
  1212. pushl %fs
  1213. pushl %gs
  1214. pushl %ss
  1215. pushl %esp
  1216. /* Switch to virtual addressing */
  1217. call intr_to_prot
  1218. .if64
  1219. /* Switch to long mode */
  1220. call prot_to_long
  1221. .code64
  1222. 1: /* Preserve long-mode caller-save registers */
  1223. pushq %r8
  1224. pushq %r9
  1225. pushq %r10
  1226. pushq %r11
  1227. /* Expand IRQ number to whole %rdi register */
  1228. movl %eax, %edi
  1229. .endif
  1230. /* Call interrupt handler */
  1231. call interrupt
  1232. .if64
  1233. /* Restore long-mode caller-save registers */
  1234. popq %r11
  1235. popq %r10
  1236. popq %r9
  1237. popq %r8
  1238. /* Skip transition back to protected mode, if applicable */
  1239. cmpw $LONG_CS, %bx
  1240. je 1f
  1241. /* Switch to protected mode */
  1242. call long_to_prot
  1243. .code32
  1244. cmpw $LONG_CS, %bx
  1245. .endif
  1246. /* Restore segment registers and original %esp */
  1247. lss (%esp), %esp
  1248. popl %ss
  1249. popl %gs
  1250. popl %fs
  1251. popl %es
  1252. popl %ds
  1253. 1: /* Restore registers */
  1254. popl %edi
  1255. popl %esi
  1256. popl %edx
  1257. popl %ecx
  1258. popl %ebx
  1259. popl %eax
  1260. /* Return from interrupt (with REX prefix if required) */
  1261. .if64 ; jne 1f ; .byte 0x48 ; .endif
  1262. 1: iret
  1263. /****************************************************************************
  1264. * Page tables
  1265. *
  1266. ****************************************************************************
  1267. */
  1268. .section ".pages", "aw", @nobits
  1269. .align SIZEOF_PT
  1270. /* Page map level 4 entries (PML4Es)
  1271. *
  1272. * This comprises
  1273. *
  1274. * - PML4E[0x000] covering [0x0000000000000000-0x0000007fffffffff]
  1275. * - PML4E[0x1ff] covering [0xffffff8000000000-0xffffffffffffffff]
  1276. *
  1277. * These point to the PDPT. This creates some aliased
  1278. * addresses within unused portions of the 64-bit address
  1279. * space, but allows us to use just a single PDPT.
  1280. *
  1281. * - PDE[...] covering arbitrary 2MB portions of I/O space
  1282. *
  1283. * These are 2MB pages created by ioremap() to cover I/O
  1284. * device addresses.
  1285. */
  1286. pml4e:
  1287. .space SIZEOF_PT
  1288. .size pml4e, . - pml4e
  1289. .globl io_pages
  1290. .equ io_pages, pml4e
  1291. /* Page directory pointer table entries (PDPTEs)
  1292. *
  1293. * This comprises:
  1294. *
  1295. * - PDPTE[0x000] covering [0x0000000000000000-0x000000003fffffff]
  1296. * - PDPTE[0x001] covering [0x0000000040000000-0x000000007fffffff]
  1297. * - PDPTE[0x002] covering [0x0000000080000000-0x00000000bfffffff]
  1298. * - PDPTE[0x003] covering [0x00000000c0000000-0x00000000ffffffff]
  1299. *
  1300. * These point to the appropriate page directories (in pde_low)
  1301. * used to identity-map the whole of the 32-bit address space.
  1302. *
  1303. * - PDPTE[0x004] covering [0x0000000100000000-0x000000013fffffff]
  1304. *
  1305. * This points back to the PML4, allowing the PML4 to be
  1306. * (ab)used to hold 2MB pages used for I/O device addresses.
  1307. *
  1308. * - PDPTE[0x1ff] covering [0xffffffffc0000000-0xffffffffffffffff]
  1309. *
  1310. * This points back to the PDPT itself, allowing the PDPT to be
  1311. * (ab)used to hold PDEs covering .textdata.
  1312. *
  1313. * - PDE[N-M] covering [_textdata,_end)
  1314. *
  1315. * These are used to point to the page tables (in pte_textdata)
  1316. * used to map our .textdata section. Note that each PDE
  1317. * covers 2MB, so we are likely to use only a single PDE in
  1318. * practice.
  1319. */
  1320. pdpte:
  1321. .space SIZEOF_PT
  1322. .size pdpte, . - pdpte
  1323. .equ pde_textdata, pdpte /* (ab)use */
  1324. /* Page directory entries (PDEs) for the low 4GB
  1325. *
  1326. * This comprises 2048 2MB pages to identity-map the whole of
  1327. * the 32-bit address space.
  1328. */
  1329. pde_low:
  1330. .equ PDE_LOW_PTES, ( SIZEOF_LOW_4GB / SIZEOF_2MB_PAGE )
  1331. .equ PDE_LOW_PTS, ( ( PDE_LOW_PTES * SIZEOF_PTE ) / SIZEOF_PT )
  1332. .space ( PDE_LOW_PTS * SIZEOF_PT )
  1333. .size pde_low, . - pde_low
  1334. /* Page table entries (PTEs) for .textdata
  1335. *
  1336. * This comprises enough 4kB pages to map the whole of
  1337. * .textdata. The required number of PTEs is calculated by
  1338. * the linker script.
  1339. *
  1340. * Note that these mappings do not cover the PTEs themselves.
  1341. * This does not matter, since code running with paging
  1342. * enabled never needs to access these PTEs.
  1343. */
  1344. pte_textdata:
  1345. /* Allocated by linker script; must be at the end of .textdata */
  1346. .section ".bss.pml4", "aw", @nobits
  1347. pml4: .long 0
  1348. /****************************************************************************
  1349. * init_pages (protected-mode near call)
  1350. *
  1351. * Initialise the page tables ready for long mode.
  1352. *
  1353. * Parameters:
  1354. * %edi : virt_offset
  1355. ****************************************************************************
  1356. */
  1357. .section ".text.init_pages", "ax", @progbits
  1358. .code32
  1359. init_pages:
  1360. /* Initialise PML4Es for low 4GB and negative 2GB */
  1361. leal ( VIRTUAL(pdpte) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1362. movl %eax, VIRTUAL(pml4e)
  1363. movl %eax, ( VIRTUAL(pml4e) + SIZEOF_PT - SIZEOF_PTE )
  1364. /* Initialise PDPTE for negative 1GB */
  1365. movl %eax, ( VIRTUAL(pdpte) + SIZEOF_PT - SIZEOF_PTE )
  1366. /* Initialise PDPTE for I/O space */
  1367. leal ( VIRTUAL(pml4e) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1368. movl %eax, ( VIRTUAL(pdpte) + ( PDE_LOW_PTS * SIZEOF_PTE ) )
  1369. /* Initialise PDPTEs for low 4GB */
  1370. movl $PDE_LOW_PTS, %ecx
  1371. leal ( VIRTUAL(pde_low) + ( PDE_LOW_PTS * SIZEOF_PT ) + \
  1372. ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1373. 1: subl $SIZEOF_PT, %eax
  1374. movl %eax, ( VIRTUAL(pdpte) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
  1375. loop 1b
  1376. /* Initialise PDEs for low 4GB */
  1377. movl $PDE_LOW_PTES, %ecx
  1378. leal ( 0 + ( PG_P | PG_RW | PG_US | PG_PS ) ), %eax
  1379. 1: subl $SIZEOF_2MB_PAGE, %eax
  1380. movl %eax, ( VIRTUAL(pde_low) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
  1381. loop 1b
  1382. /* Initialise PDEs for .textdata */
  1383. movl $_textdata_pdes, %ecx
  1384. leal ( VIRTUAL(_etextdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1385. movl $VIRTUAL(_textdata), %ebx
  1386. shrl $( SIZEOF_2MB_PAGE_LOG2 - SIZEOF_PTE_LOG2 ), %ebx
  1387. andl $( SIZEOF_PT - 1 ), %ebx
  1388. 1: subl $SIZEOF_PT, %eax
  1389. movl %eax, (VIRTUAL(pde_textdata) - SIZEOF_PTE)(%ebx,%ecx,SIZEOF_PTE)
  1390. loop 1b
  1391. /* Initialise PTEs for .textdata */
  1392. movl $_textdata_ptes, %ecx
  1393. leal ( VIRTUAL(_textdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1394. addl $_textdata_paged_len, %eax
  1395. 1: subl $SIZEOF_4KB_PAGE, %eax
  1396. movl %eax, ( VIRTUAL(pte_textdata) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
  1397. loop 1b
  1398. /* Record PML4 physical address */
  1399. leal VIRTUAL(pml4e)(%edi), %eax
  1400. movl %eax, VIRTUAL(pml4)
  1401. /* Return */
  1402. ret