Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

librm.S 40KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541
  1. /*
  2. * librm: a library for interfacing to real-mode code
  3. *
  4. * Michael Brown <mbrown@fensystems.co.uk>
  5. *
  6. */
  7. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
  8. /* Drag in local definitions */
  9. #include "librm.h"
  10. /* CR0: protection enabled */
  11. #define CR0_PE ( 1 << 0 )
  12. /* CR0: paging */
  13. #define CR0_PG ( 1 << 31 )
  14. /* CR4: physical address extensions */
  15. #define CR4_PAE ( 1 << 5 )
  16. /* Extended feature enable MSR (EFER) */
  17. #define MSR_EFER 0xc0000080
  18. /* EFER: long mode enable */
  19. #define EFER_LME ( 1 << 8 )
  20. /* Page: present */
  21. #define PG_P 0x01
  22. /* Page: read/write */
  23. #define PG_RW 0x02
  24. /* Page: user/supervisor */
  25. #define PG_US 0x04
  26. /* Page: page size */
  27. #define PG_PS 0x80
  28. /* Size of various paging-related data structures */
  29. #define SIZEOF_PTE_LOG2 3
  30. #define SIZEOF_PTE ( 1 << SIZEOF_PTE_LOG2 )
  31. #define SIZEOF_PT_LOG2 12
  32. #define SIZEOF_PT ( 1 << SIZEOF_PT_LOG2 )
  33. #define SIZEOF_4KB_PAGE_LOG2 12
  34. #define SIZEOF_4KB_PAGE ( 1 << SIZEOF_4KB_PAGE_LOG2 )
  35. #define SIZEOF_2MB_PAGE_LOG2 21
  36. #define SIZEOF_2MB_PAGE ( 1 << SIZEOF_2MB_PAGE_LOG2 )
  37. #define SIZEOF_LOW_4GB_LOG2 32
  38. #define SIZEOF_LOW_4GB ( 1 << SIZEOF_LOW_4GB_LOG2 )
  39. /* Size of various C data structures */
  40. #define SIZEOF_I386_SEG_REGS 12
  41. #define SIZEOF_I386_REGS 32
  42. #define SIZEOF_REAL_MODE_REGS ( SIZEOF_I386_SEG_REGS + SIZEOF_I386_REGS )
  43. #define SIZEOF_I386_FLAGS 4
  44. #define SIZEOF_I386_ALL_REGS ( SIZEOF_REAL_MODE_REGS + SIZEOF_I386_FLAGS )
  45. #define SIZEOF_X86_64_REGS 128
  46. /* Size of an address */
  47. #ifdef __x86_64__
  48. #define SIZEOF_ADDR 8
  49. #else
  50. #define SIZEOF_ADDR 4
  51. #endif
  52. /* Default code size */
  53. #ifdef __x86_64__
  54. #define CODE_DEFAULT code64
  55. #else
  56. #define CODE_DEFAULT code32
  57. #endif
  58. /* Selectively assemble code for 32-bit/64-bit builds */
  59. #ifdef __x86_64__
  60. #define if32 if 0
  61. #define if64 if 1
  62. #else
  63. #define if32 if 1
  64. #define if64 if 0
  65. #endif
  66. /****************************************************************************
  67. * Global descriptor table
  68. *
  69. * Call init_librm to set up the GDT before attempting to use any
  70. * protected-mode code.
  71. *
  72. * NOTE: This must be located before prot_to_real, otherwise gas
  73. * throws a "can't handle non absolute segment in `ljmp'" error due to
  74. * not knowing the value of REAL_CS when the ljmp is encountered.
  75. *
  76. * Note also that putting ".word gdt_end - gdt - 1" directly into
  77. * gdt_limit, rather than going via gdt_length, will also produce the
  78. * "non absolute segment" error. This is most probably a bug in gas.
  79. ****************************************************************************
  80. */
  81. .section ".data16.gdt", "aw", @progbits
  82. .align 16
  83. gdt:
  84. gdtr: /* The first GDT entry is unused, the GDTR can fit here. */
  85. gdt_limit: .word gdt_length - 1
  86. gdt_base: .long 0
  87. .word 0 /* padding */
  88. .org gdt + VIRTUAL_CS, 0
  89. virtual_cs: /* 32 bit protected mode code segment, virtual addresses */
  90. .word 0xffff, 0
  91. .byte 0, 0x9f, 0xcf, 0
  92. .org gdt + VIRTUAL_DS, 0
  93. virtual_ds: /* 32 bit protected mode data segment, virtual addresses */
  94. .word 0xffff, 0
  95. .byte 0, 0x93, 0xcf, 0
  96. .org gdt + PHYSICAL_CS, 0
  97. physical_cs: /* 32 bit protected mode code segment, physical addresses */
  98. .word 0xffff, 0
  99. .byte 0, 0x9f, 0xcf, 0
  100. .org gdt + PHYSICAL_DS, 0
  101. physical_ds: /* 32 bit protected mode data segment, physical addresses */
  102. .word 0xffff, 0
  103. .byte 0, 0x93, 0xcf, 0
  104. .org gdt + REAL_CS, 0
  105. real_cs: /* 16 bit real mode code segment */
  106. .word 0xffff, 0
  107. .byte 0, 0x9b, 0x00, 0
  108. .org gdt + REAL_DS, 0
  109. real_ds: /* 16 bit real mode data segment */
  110. .word 0xffff, 0
  111. .byte 0, 0x93, 0x00, 0
  112. .org gdt + P2R_DS, 0
  113. p2r_ds: /* 16 bit real mode data segment for prot_to_real transition */
  114. .word 0xffff, ( P2R_DS << 4 )
  115. .byte 0, 0x93, 0x00, 0
  116. .org gdt + LONG_CS, 0
  117. long_cs: /* 64 bit long mode code segment */
  118. .word 0, 0
  119. .byte 0, 0x9a, 0x20, 0
  120. gdt_end:
  121. .equ gdt_length, gdt_end - gdt
  122. /****************************************************************************
  123. * Stored real-mode and protected-mode stack pointers
  124. *
  125. * The real-mode stack pointer is stored here whenever real_to_prot
  126. * is called and restored whenever prot_to_real is called. The
  127. * converse happens for the protected-mode stack pointer.
  128. *
  129. * Despite initial appearances this scheme is, in fact re-entrant,
  130. * because program flow dictates that we always return via the point
  131. * we left by. For example:
  132. * PXE API call entry
  133. * 1 real => prot
  134. * ...
  135. * Print a text string
  136. * ...
  137. * 2 prot => real
  138. * INT 10
  139. * 3 real => prot
  140. * ...
  141. * ...
  142. * 4 prot => real
  143. * PXE API call exit
  144. *
  145. * At point 1, the RM mode stack value, say RPXE, is stored in
  146. * rm_ss,sp. We want this value to still be present in rm_ss,sp when
  147. * we reach point 4.
  148. *
  149. * At point 2, the RM stack value is restored from RPXE. At point 3,
  150. * the RM stack value is again stored in rm_ss,sp. This *does*
  151. * overwrite the RPXE that we have stored there, but it's the same
  152. * value, since the code between points 2 and 3 has managed to return
  153. * to us.
  154. ****************************************************************************
  155. */
  156. .section ".bss.rm_sp", "aw", @nobits
  157. .globl rm_sp
  158. rm_sp: .word 0
  159. .section ".bss.rm_ss", "aw", @nobits
  160. .globl rm_ss
  161. rm_ss: .word 0
  162. .section ".data.pm_esp", "aw", @progbits
  163. pm_esp: .long VIRTUAL(_estack)
  164. /****************************************************************************
  165. * Virtual address offsets
  166. *
  167. * These are used by the protected-mode code to map between virtual
  168. * and physical addresses, and to access variables in the .text16 or
  169. * .data16 segments.
  170. ****************************************************************************
  171. */
  172. .struct 0
  173. VA_VIRT_OFFSET: .space SIZEOF_ADDR
  174. VA_TEXT16: .space SIZEOF_ADDR
  175. VA_DATA16: .space SIZEOF_ADDR
  176. VA_SIZE:
  177. .previous
  178. /* Internal copies, used only by librm itself */
  179. .section ".bss16.rm_virt_addrs", "aw", @nobits
  180. rm_virt_addrs: .space VA_SIZE
  181. .equ rm_virt_offset, ( rm_virt_addrs + VA_VIRT_OFFSET )
  182. .equ rm_text16, ( rm_virt_addrs + VA_TEXT16 )
  183. .equ rm_data16, ( rm_virt_addrs + VA_DATA16 )
  184. /* Externally visible variables, used by C code */
  185. .section ".bss.virt_addrs", "aw", @nobits
  186. virt_addrs: .space VA_SIZE
  187. .globl virt_offset
  188. .equ virt_offset, ( virt_addrs + VA_VIRT_OFFSET )
  189. .globl text16
  190. .equ text16, ( virt_addrs + VA_TEXT16 )
  191. .globl data16
  192. .equ data16, ( virt_addrs + VA_DATA16 )
  193. /****************************************************************************
  194. * init_librm (real-mode far call, 16-bit real-mode far return address)
  195. *
  196. * Initialise the GDT ready for transitions to protected mode.
  197. *
  198. * Parameters:
  199. * %cs : .text16 segment
  200. * %ds : .data16 segment
  201. * %edi : Physical base of protected-mode code
  202. ****************************************************************************
  203. */
  204. .section ".text16.init_librm", "ax", @progbits
  205. .code16
  206. .globl init_librm
  207. init_librm:
  208. /* Preserve registers */
  209. pushl %eax
  210. pushl %ebx
  211. pushl %edi
  212. /* Store rm_virt_offset and set up virtual_cs and virtual_ds segments */
  213. subl $VIRTUAL(_textdata), %edi
  214. movl %edi, rm_virt_offset
  215. .if64 ; setae (rm_virt_offset+4) ; .endif
  216. movl %edi, %eax
  217. movw $virtual_cs, %bx
  218. call set_seg_base
  219. movw $virtual_ds, %bx
  220. call set_seg_base
  221. /* Store rm_cs and rm_text16, set up real_cs segment */
  222. xorl %eax, %eax
  223. movw %cs, %ax
  224. movw %ax, %cs:rm_cs
  225. shll $4, %eax
  226. movw $real_cs, %bx
  227. call set_seg_base
  228. .if32 ; subl %edi, %eax ; .endif
  229. movl %eax, rm_text16
  230. /* Store rm_ds and rm_data16, set up real_ds segment and GDT base */
  231. xorl %eax, %eax
  232. movw %ds, %ax
  233. movw %ax, %cs:rm_ds
  234. shll $4, %eax
  235. movw $real_ds, %bx
  236. call set_seg_base
  237. movl %eax, gdt_base
  238. addl $gdt, gdt_base
  239. .if32 ; subl %edi, %eax ; .endif
  240. movl %eax, rm_data16
  241. /* Configure virt_call for protected mode, if applicable */
  242. .if64 ; movl $VIRTUAL(vc_pmode), %cs:vc_jmp_offset ; .endif
  243. /* Switch to protected mode */
  244. virtcall init_librm_pmode
  245. .section ".text.init_librm", "ax", @progbits
  246. .code32
  247. init_librm_pmode:
  248. /* Store virt_offset, text16, and data16 */
  249. pushw %ds
  250. movw $REAL_DS, %ax
  251. movw %ax, %ds
  252. movl $rm_virt_addrs, %esi
  253. movl $VIRTUAL(virt_addrs), %edi
  254. movl $( VA_SIZE / 4 ), %ecx
  255. rep movsl
  256. popw %ds
  257. .if64 ; /* Initialise long mode, if applicable */
  258. movl VIRTUAL(virt_offset), %edi
  259. leal VIRTUAL(p2l_ljmp_target)(%edi), %eax
  260. movl %eax, VIRTUAL(p2l_ljmp_offset)
  261. call init_pages
  262. .endif
  263. /* Return to real mode */
  264. ret
  265. .section ".text16.init_librm", "ax", @progbits
  266. .code16
  267. init_librm_rmode:
  268. /* Configure virt_call for long mode, if applicable */
  269. .if64 ; movl $VIRTUAL(vc_lmode), %cs:vc_jmp_offset ; .endif
  270. /* Initialise IDT */
  271. virtcall init_idt
  272. /* Restore registers */
  273. popl %edi
  274. popl %ebx
  275. popl %eax
  276. lret
  277. .section ".text16.set_seg_base", "ax", @progbits
  278. .code16
  279. set_seg_base:
  280. 1: movw %ax, 2(%bx)
  281. rorl $16, %eax
  282. movb %al, 4(%bx)
  283. movb %ah, 7(%bx)
  284. roll $16, %eax
  285. ret
  286. /****************************************************************************
  287. * real_to_prot (real-mode near call, 32-bit virtual return address)
  288. *
  289. * Switch from 16-bit real-mode to 32-bit protected mode with virtual
  290. * addresses. The real-mode %ss:sp is stored in rm_ss and rm_sp, and
  291. * the protected-mode %esp is restored from the saved pm_esp.
  292. * Interrupts are disabled. All other registers may be destroyed.
  293. *
  294. * The return address for this function should be a 32-bit virtual
  295. * address.
  296. *
  297. * Parameters:
  298. * %ecx : number of bytes to move from RM stack to PM stack
  299. *
  300. ****************************************************************************
  301. */
  302. .section ".text16.real_to_prot", "ax", @progbits
  303. .code16
  304. real_to_prot:
  305. /* Enable A20 line */
  306. call enable_a20
  307. /* A failure at this point is fatal, and there's nothing we
  308. * can do about it other than lock the machine to make the
  309. * problem immediately visible.
  310. */
  311. 1: jc 1b
  312. /* Make sure we have our data segment available */
  313. movw %cs:rm_ds, %ds
  314. /* Add protected-mode return address to length of data to be copied */
  315. addw $4, %cx /* %ecx must be less than 64kB anyway */
  316. /* Real-mode %ss:%sp => %ebp:%edx and virtual address => %esi */
  317. xorl %ebp, %ebp
  318. movw %ss, %bp
  319. movzwl %sp, %edx
  320. movl %ebp, %eax
  321. shll $4, %eax
  322. addr32 leal (%eax,%edx), %esi
  323. subl rm_virt_offset, %esi
  324. /* Load protected-mode global descriptor table */
  325. data32 lgdt gdtr
  326. /* Zero segment registers. This wastes around 12 cycles on
  327. * real hardware, but saves a substantial number of emulated
  328. * instructions under KVM.
  329. */
  330. xorw %ax, %ax
  331. movw %ax, %ds
  332. movw %ax, %es
  333. movw %ax, %fs
  334. movw %ax, %gs
  335. movw %ax, %ss
  336. /* Switch to protected mode (with paging disabled if applicable) */
  337. cli
  338. movl %cr0, %eax
  339. .if64 ; andl $~CR0_PG, %eax ; .endif
  340. orb $CR0_PE, %al
  341. movl %eax, %cr0
  342. data32 ljmp $VIRTUAL_CS, $VIRTUAL(r2p_pmode)
  343. .section ".text.real_to_prot", "ax", @progbits
  344. .code32
  345. r2p_pmode:
  346. /* Set up protected-mode data segments and stack pointer */
  347. movw $VIRTUAL_DS, %ax
  348. movw %ax, %ds
  349. movw %ax, %es
  350. movw %ax, %fs
  351. movw %ax, %gs
  352. movw %ax, %ss
  353. movl VIRTUAL(pm_esp), %esp
  354. /* Load protected-mode interrupt descriptor table */
  355. lidt VIRTUAL(idtr32)
  356. /* Record real-mode %ss:sp (after removal of data) */
  357. movw %bp, VIRTUAL(rm_ss)
  358. addl %ecx, %edx
  359. movw %dx, VIRTUAL(rm_sp)
  360. /* Move data from RM stack to PM stack */
  361. subl %ecx, %esp
  362. movl %esp, %edi
  363. rep movsb
  364. /* Return to virtual address */
  365. ret
  366. /****************************************************************************
  367. * prot_to_real (protected-mode near call, 32-bit real-mode return address)
  368. *
  369. * Switch from 32-bit protected mode with virtual addresses to 16-bit
  370. * real mode. The protected-mode %esp is stored in pm_esp and the
  371. * real-mode %ss:sp is restored from the saved rm_ss and rm_sp. The
  372. * high word of the real-mode %esp is set to zero. All real-mode data
  373. * segment registers are loaded from the saved rm_ds. Interrupts are
  374. * *not* enabled, since we want to be able to use prot_to_real in an
  375. * ISR. All other registers may be destroyed.
  376. *
  377. * The return address for this function should be a 32-bit (sic)
  378. * real-mode offset within .code16.
  379. *
  380. * Parameters:
  381. * %ecx : number of bytes to move from PM stack to RM stack
  382. * %esi : real-mode global and interrupt descriptor table registers
  383. *
  384. ****************************************************************************
  385. */
  386. .section ".text.prot_to_real", "ax", @progbits
  387. .code32
  388. prot_to_real:
  389. /* Copy real-mode global descriptor table register to RM code segment */
  390. movl VIRTUAL(text16), %edi
  391. .if64 ; subl VIRTUAL(virt_offset), %edi ; .endif
  392. leal rm_gdtr(%edi), %edi
  393. movsw
  394. movsl
  395. /* Load real-mode interrupt descriptor table register */
  396. lidt (%esi)
  397. /* Add return address to data to be moved to RM stack */
  398. addl $4, %ecx
  399. /* Real-mode %ss:sp => %ebp:edx and virtual address => %edi */
  400. movzwl VIRTUAL(rm_ss), %ebp
  401. movzwl VIRTUAL(rm_sp), %edx
  402. subl %ecx, %edx
  403. movl %ebp, %eax
  404. shll $4, %eax
  405. leal (%eax,%edx), %edi
  406. subl VIRTUAL(virt_offset), %edi
  407. /* Move data from PM stack to RM stack */
  408. movl %esp, %esi
  409. rep movsb
  410. /* Record protected-mode %esp (after removal of data) */
  411. movl %esi, VIRTUAL(pm_esp)
  412. /* Load real-mode segment limits */
  413. movw $P2R_DS, %ax
  414. movw %ax, %ds
  415. movw %ax, %es
  416. movw %ax, %fs
  417. movw %ax, %gs
  418. movw %ax, %ss
  419. ljmp $REAL_CS, $p2r_rmode
  420. .section ".text16.prot_to_real", "ax", @progbits
  421. .code16
  422. p2r_rmode:
  423. /* Load real-mode GDT */
  424. data32 lgdt %cs:rm_gdtr
  425. /* Switch to real mode */
  426. movl %cr0, %eax
  427. andb $0!CR0_PE, %al
  428. movl %eax, %cr0
  429. p2r_ljmp_rm_cs:
  430. ljmp $0, $1f
  431. 1:
  432. /* Set up real-mode data segments and stack pointer */
  433. movw %cs:rm_ds, %ax
  434. movw %ax, %ds
  435. movw %ax, %es
  436. movw %ax, %fs
  437. movw %ax, %gs
  438. movw %bp, %ss
  439. movl %edx, %esp
  440. /* Return to real-mode address */
  441. data32 ret
  442. /* Real-mode code and data segments. Assigned by the call to
  443. * init_librm. rm_cs doubles as the segment part of the jump
  444. * instruction used by prot_to_real. Both are located in
  445. * .text16 rather than .data16: rm_cs since it forms part of
  446. * the jump instruction within the code segment, and rm_ds
  447. * since real-mode code needs to be able to locate the data
  448. * segment with no other reference available.
  449. */
  450. .globl rm_cs
  451. .equ rm_cs, ( p2r_ljmp_rm_cs + 3 )
  452. .section ".text16.data.rm_ds", "aw", @progbits
  453. .globl rm_ds
  454. rm_ds: .word 0
  455. /* Real-mode global and interrupt descriptor table registers */
  456. .section ".text16.data.rm_gdtr", "aw", @progbits
  457. rm_gdtr:
  458. .word 0 /* Limit */
  459. .long 0 /* Base */
  460. /****************************************************************************
  461. * phys_to_prot (protected-mode near call, 32-bit physical return address)
  462. *
  463. * Switch from 32-bit protected mode with physical addresses to 32-bit
  464. * protected mode with virtual addresses. %esp is adjusted to a
  465. * virtual address. All other registers are preserved.
  466. *
  467. * The return address for this function should be a 32-bit physical
  468. * (sic) address.
  469. *
  470. ****************************************************************************
  471. */
  472. .section ".text.phys_to_prot", "ax", @progbits
  473. .code32
  474. .globl phys_to_prot
  475. phys_to_prot:
  476. /* Preserve registers */
  477. pushl %eax
  478. pushl %ebp
  479. /* Switch to virtual code segment */
  480. cli
  481. ljmp $VIRTUAL_CS, $VIRTUAL(1f)
  482. 1:
  483. /* Switch to virtual data segment and adjust %esp */
  484. movw $VIRTUAL_DS, %ax
  485. movw %ax, %ds
  486. movw %ax, %es
  487. movw %ax, %fs
  488. movw %ax, %gs
  489. movw %ax, %ss
  490. movl VIRTUAL(virt_offset), %ebp
  491. subl %ebp, %esp
  492. /* Adjust return address to a virtual address */
  493. subl %ebp, 8(%esp)
  494. /* Restore registers and return */
  495. popl %ebp
  496. popl %eax
  497. ret
  498. .if32 /* Expose as _phys_to_virt for use by COMBOOT, if applicable */
  499. .globl _phys_to_virt
  500. .equ _phys_to_virt, phys_to_prot
  501. .endif
  502. /****************************************************************************
  503. * prot_to_phys (protected-mode near call, 32-bit virtual return address)
  504. *
  505. * Switch from 32-bit protected mode with virtual addresses to 32-bit
  506. * protected mode with physical addresses. %esp is adjusted to a
  507. * physical address. All other registers are preserved.
  508. *
  509. * The return address for this function should be a 32-bit virtual
  510. * (sic) address.
  511. *
  512. ****************************************************************************
  513. */
  514. .section ".text.prot_to_phys", "ax", @progbits
  515. .code32
  516. prot_to_phys:
  517. /* Preserve registers */
  518. pushl %eax
  519. pushl %ebp
  520. /* Adjust return address to a physical address */
  521. movl VIRTUAL(virt_offset), %ebp
  522. addl %ebp, 8(%esp)
  523. /* Switch to physical code segment */
  524. cli
  525. pushl $PHYSICAL_CS
  526. leal VIRTUAL(1f)(%ebp), %eax
  527. pushl %eax
  528. lret
  529. 1:
  530. /* Switch to physical data segment and adjust %esp */
  531. movw $PHYSICAL_DS, %ax
  532. movw %ax, %ds
  533. movw %ax, %es
  534. movw %ax, %fs
  535. movw %ax, %gs
  536. movw %ax, %ss
  537. addl %ebp, %esp
  538. /* Restore registers and return */
  539. popl %ebp
  540. popl %eax
  541. ret
  542. .if32 /* Expose as _virt_to_phys for use by COMBOOT, if applicable */
  543. .globl _virt_to_phys
  544. .equ _virt_to_phys, prot_to_phys
  545. .endif
  546. /****************************************************************************
  547. * intr_to_prot (protected-mode near call, 32-bit virtual return address)
  548. *
  549. * Switch from 32-bit protected mode with a virtual code segment and
  550. * either a physical or virtual stack segment to 32-bit protected mode
  551. * with normal virtual addresses. %esp is adjusted if necessary to a
  552. * virtual address. All other registers are preserved.
  553. *
  554. * The return address for this function should be a 32-bit virtual
  555. * address.
  556. *
  557. ****************************************************************************
  558. */
  559. .section ".text.intr_to_prot", "ax", @progbits
  560. .code32
  561. .globl intr_to_prot
  562. intr_to_prot:
  563. /* Preserve registers */
  564. pushl %eax
  565. /* Check whether stack segment is physical or virtual */
  566. movw %ss, %ax
  567. cmpw $VIRTUAL_DS, %ax
  568. movw $VIRTUAL_DS, %ax
  569. /* Reload data segment registers */
  570. movw %ax, %ds
  571. movw %ax, %es
  572. movw %ax, %fs
  573. movw %ax, %gs
  574. /* Reload stack segment and adjust %esp if necessary */
  575. je 1f
  576. movw %ax, %ss
  577. subl VIRTUAL(virt_offset), %esp
  578. 1:
  579. /* Restore registers and return */
  580. popl %eax
  581. ret
  582. /* Expose as _intr_to_virt for use by GDB */
  583. .globl _intr_to_virt
  584. .equ _intr_to_virt, intr_to_prot
  585. /****************************************************************************
  586. * prot_to_long (protected-mode near call, 32-bit virtual return address)
  587. *
  588. * Switch from 32-bit protected mode with virtual addresses to 64-bit
  589. * long mode. The protected-mode %esp is adjusted to a physical
  590. * address. All other registers are preserved.
  591. *
  592. * The return address for this function should be a 32-bit (sic)
  593. * virtual address.
  594. *
  595. ****************************************************************************
  596. */
  597. .if64
  598. .section ".text.prot_to_long", "ax", @progbits
  599. .code32
  600. prot_to_long:
  601. /* Preserve registers */
  602. pushl %eax
  603. pushl %ecx
  604. pushl %edx
  605. /* Set up PML4 */
  606. movl VIRTUAL(pml4), %eax
  607. movl %eax, %cr3
  608. /* Enable PAE */
  609. movl %cr4, %eax
  610. orb $CR4_PAE, %al
  611. movl %eax, %cr4
  612. /* Enable long mode */
  613. movl $MSR_EFER, %ecx
  614. rdmsr
  615. orw $EFER_LME, %ax
  616. wrmsr
  617. /* Enable paging */
  618. movl %cr0, %eax
  619. orl $CR0_PG, %eax
  620. movl %eax, %cr0
  621. /* Restore registers */
  622. popl %edx
  623. popl %ecx
  624. popl %eax
  625. /* Construct 64-bit return address */
  626. pushl (%esp)
  627. movl $0xffffffff, 4(%esp)
  628. p2l_ljmp:
  629. /* Switch to long mode (using a physical %rip) */
  630. ljmp $LONG_CS, $0
  631. .code64
  632. p2l_lmode:
  633. /* Adjust and zero-extend %esp to a physical address */
  634. addl virt_offset, %esp
  635. /* Use long-mode IDT */
  636. lidt idtr64
  637. /* Return to virtual address */
  638. ret
  639. /* Long mode jump offset and target. Required since an ljmp
  640. * in protected mode will zero-extend the offset, and so
  641. * cannot reach an address within the negative 2GB as used by
  642. * -mcmodel=kernel. Assigned by the call to init_librm.
  643. */
  644. .equ p2l_ljmp_offset, ( p2l_ljmp + 1 )
  645. .equ p2l_ljmp_target, p2l_lmode
  646. .endif
  647. /****************************************************************************
  648. * long_to_prot (long-mode near call, 64-bit virtual return address)
  649. *
  650. * Switch from 64-bit long mode to 32-bit protected mode with virtual
  651. * addresses. The long-mode %rsp is adjusted to a virtual address.
  652. * All other registers are preserved.
  653. *
  654. * The return address for this function should be a 64-bit (sic)
  655. * virtual address.
  656. *
  657. ****************************************************************************
  658. */
  659. .if64
  660. .section ".text.long_to_prot", "ax", @progbits
  661. .code64
  662. long_to_prot:
  663. /* Switch to protected mode */
  664. ljmp *l2p_vector
  665. .code32
  666. l2p_pmode:
  667. /* Adjust %esp to a virtual address */
  668. subl VIRTUAL(virt_offset), %esp
  669. /* Preserve registers */
  670. pushl %eax
  671. pushl %ecx
  672. pushl %edx
  673. /* Disable paging */
  674. movl %cr0, %eax
  675. andl $~CR0_PG, %eax
  676. movl %eax, %cr0
  677. /* Disable PAE (in case external non-PAE-aware code enables paging) */
  678. movl %cr4, %eax
  679. andb $~CR4_PAE, %al
  680. movl %eax, %cr4
  681. /* Disable long mode */
  682. movl $MSR_EFER, %ecx
  683. rdmsr
  684. andw $~EFER_LME, %ax
  685. wrmsr
  686. /* Restore registers */
  687. popl %edx
  688. popl %ecx
  689. popl %eax
  690. /* Use protected-mode IDT */
  691. lidt VIRTUAL(idtr32)
  692. /* Return */
  693. ret $4
  694. /* Long mode jump vector. Required since there is no "ljmp
  695. * immediate" instruction in long mode.
  696. */
  697. .section ".data.l2p_vector", "aw", @progbits
  698. l2p_vector:
  699. .long VIRTUAL(l2p_pmode), VIRTUAL_CS
  700. .endif
  701. /****************************************************************************
  702. * long_save_regs (long-mode near call, 64-bit virtual return address)
  703. *
  704. * Preserve registers that are accessible only in long mode. This
  705. * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
  706. * %rsi, %rdi, and %rbp.
  707. *
  708. ****************************************************************************
  709. */
  710. .if64
  711. .section ".text.long_preserve_regs", "ax", @progbits
  712. .code64
  713. long_preserve_regs:
  714. /* Preserve registers */
  715. pushq %rax
  716. pushq %rcx
  717. pushq %rdx
  718. pushq %rbx
  719. pushq %rsp
  720. pushq %rbp
  721. pushq %rsi
  722. pushq %rdi
  723. pushq %r8
  724. pushq %r9
  725. pushq %r10
  726. pushq %r11
  727. pushq %r12
  728. pushq %r13
  729. pushq %r14
  730. pushq %r15
  731. /* Return */
  732. jmp *SIZEOF_X86_64_REGS(%rsp)
  733. .endif
  734. /****************************************************************************
  735. * long_restore_regs (long-mode near call, 64-bit virtual return address)
  736. *
  737. * Restore registers that are accessible only in long mode. This
  738. * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
  739. * %rsi, %rdi, and %rbp.
  740. *
  741. ****************************************************************************
  742. */
  743. .if64
  744. .section ".text.long_restore_regs", "ax", @progbits
  745. .code64
  746. long_restore_regs:
  747. /* Move return address above register dump */
  748. popq SIZEOF_X86_64_REGS(%rsp)
  749. /* Restore registers */
  750. popq %r15
  751. popq %r14
  752. popq %r13
  753. popq %r12
  754. popq %r11
  755. popq %r10
  756. popq %r9
  757. popq %r8
  758. movl %edi, (%rsp)
  759. popq %rdi
  760. movl %esi, (%rsp)
  761. popq %rsi
  762. movl %ebp, (%rsp)
  763. popq %rbp
  764. leaq 8(%rsp), %rsp /* discard */
  765. movl %ebx, (%rsp)
  766. popq %rbx
  767. movl %edx, (%rsp)
  768. popq %rdx
  769. movl %ecx, (%rsp)
  770. popq %rcx
  771. movl %eax, (%rsp)
  772. popq %rax
  773. /* Return */
  774. ret
  775. .endif
  776. /****************************************************************************
  777. * virt_call (real-mode near call, 16-bit real-mode near return address)
  778. *
  779. * Call a specific C function in 32-bit protected mode or 64-bit long
  780. * mode (as applicable). The prototype of the C function must be
  781. * void function ( struct i386_all_regs *ix86 );
  782. * ix86 will point to a struct containing the real-mode registers
  783. * at entry to virt_call().
  784. *
  785. * All registers will be preserved across virt_call(), unless the C
  786. * function explicitly overwrites values in ix86. Interrupt status
  787. * and GDT will also be preserved. Gate A20 will be enabled.
  788. *
  789. * Note that virt_call() does not rely on the real-mode stack
  790. * remaining intact in order to return, since everything relevant is
  791. * copied to the protected-mode stack for the duration of the call.
  792. * In particular, this means that a real-mode prefix can make a call
  793. * to main() which will return correctly even if the prefix's stack
  794. * gets vapourised during the Etherboot run. (The prefix cannot rely
  795. * on anything else on the stack being preserved, so should move any
  796. * critical data to registers before calling main()).
  797. *
  798. * Parameters:
  799. * function : 32-bit virtual address of function to call
  800. *
  801. * Example usage:
  802. * pushl $pxe_api_call
  803. * call virt_call
  804. * to call in to the C function
  805. * void pxe_api_call ( struct i386_all_regs *ix86 );
  806. ****************************************************************************
  807. */
  808. .struct 0
  809. VC_OFFSET_GDT: .space 6
  810. VC_OFFSET_IDT: .space 6
  811. .if64
  812. VC_OFFSET_PADDING64: .space 4 /* for alignment */
  813. VC_OFFSET_CR3: .space 4
  814. VC_OFFSET_CR4: .space 4
  815. VC_OFFSET_EMER: .space 8
  816. .endif
  817. VC_OFFSET_IX86: .space SIZEOF_I386_ALL_REGS
  818. VC_OFFSET_PADDING: .space 2 /* for alignment */
  819. VC_OFFSET_RETADDR: .space 2
  820. VC_OFFSET_PARAMS:
  821. VC_OFFSET_FUNCTION: .space 4
  822. VC_OFFSET_END:
  823. .previous
  824. .section ".text16.virt_call", "ax", @progbits
  825. .code16
  826. .globl virt_call
  827. virt_call:
  828. /* Preserve registers, flags and GDT on external RM stack */
  829. pushw %ss /* padding */
  830. pushfl
  831. pushal
  832. pushw %gs
  833. pushw %fs
  834. pushw %es
  835. pushw %ds
  836. pushw %ss
  837. pushw %cs
  838. subw $VC_OFFSET_IX86, %sp
  839. movw %sp, %bp
  840. sidt VC_OFFSET_IDT(%bp)
  841. sgdt VC_OFFSET_GDT(%bp)
  842. .if64 ; /* Preserve control registers, if applicable */
  843. movl $MSR_EFER, %ecx
  844. rdmsr
  845. movl %eax, (VC_OFFSET_EMER+0)(%bp)
  846. movl %edx, (VC_OFFSET_EMER+4)(%bp)
  847. movl %cr4, %eax
  848. movl %eax, VC_OFFSET_CR4(%bp)
  849. movl %cr3, %eax
  850. movl %eax, VC_OFFSET_CR3(%bp)
  851. .endif
  852. /* For sanity's sake, clear the direction flag as soon as possible */
  853. cld
  854. /* Switch to protected mode and move register dump to PM stack */
  855. movl $VC_OFFSET_END, %ecx
  856. pushl $VIRTUAL(vc_pmode)
  857. vc_jmp: jmp real_to_prot
  858. .section ".text.virt_call", "ax", @progbits
  859. .code32
  860. vc_pmode:
  861. /* Call function (in protected mode) */
  862. leal VC_OFFSET_IX86(%esp), %eax
  863. pushl %eax
  864. call *(VC_OFFSET_FUNCTION+4)(%esp)
  865. popl %eax /* discard */
  866. .if64 ; /* Switch to long mode */
  867. jmp 1f
  868. vc_lmode:
  869. call prot_to_long
  870. .code64
  871. /* Call function (in long mode) */
  872. leaq VC_OFFSET_IX86(%rsp), %rdi
  873. pushq %rdi
  874. movslq (VC_OFFSET_FUNCTION+8)(%rsp), %rax
  875. callq *%rax
  876. popq %rdi /* discard */
  877. /* Switch to protected mode */
  878. call long_to_prot
  879. 1: .code32
  880. .endif
  881. /* Switch to real mode and move register dump back to RM stack */
  882. movl $VC_OFFSET_END, %ecx
  883. movl %esp, %esi
  884. pushl $vc_rmode
  885. jmp prot_to_real
  886. .section ".text16.virt_call", "ax", @progbits
  887. .code16
  888. vc_rmode:
  889. .if64 ; /* Restore control registers, if applicable */
  890. movw %sp, %bp
  891. movl VC_OFFSET_CR3(%bp), %eax
  892. movl %eax, %cr3
  893. movl VC_OFFSET_CR4(%bp), %eax
  894. movl %eax, %cr4
  895. movl (VC_OFFSET_EMER+0)(%bp), %eax
  896. movl (VC_OFFSET_EMER+4)(%bp), %edx
  897. movl $MSR_EFER, %ecx
  898. wrmsr
  899. .endif
  900. /* Restore registers and flags and return */
  901. addw $( VC_OFFSET_IX86 + 4 /* also skip %cs and %ss */ ), %sp
  902. popw %ds
  903. popw %es
  904. popw %fs
  905. popw %gs
  906. popal
  907. /* popal skips %esp. We therefore want to do "movl -20(%sp),
  908. * %esp", but -20(%sp) is not a valid 80386 expression.
  909. * Fortunately, prot_to_real() zeroes the high word of %esp, so
  910. * we can just use -20(%esp) instead.
  911. */
  912. addr32 movl -20(%esp), %esp
  913. popfl
  914. popw %ss /* padding */
  915. /* Return and discard function parameters */
  916. ret $( VC_OFFSET_END - VC_OFFSET_PARAMS )
  917. /* Protected-mode jump target */
  918. .equ vc_jmp_offset, ( vc_jmp - 4 )
  919. /****************************************************************************
  920. * real_call (protected-mode near call, 32-bit virtual return address)
  921. * real_call (long-mode near call, 64-bit virtual return address)
  922. *
  923. * Call a real-mode function from protected-mode or long-mode code.
  924. *
  925. * The non-segment register values will be passed directly to the
  926. * real-mode code. The segment registers will be set as per
  927. * prot_to_real. The non-segment register values set by the real-mode
  928. * function will be passed back to the protected-mode or long-mode
  929. * caller. A result of this is that this routine cannot be called
  930. * directly from C code, since it clobbers registers that the C ABI
  931. * expects the callee to preserve.
  932. *
  933. * librm.h defines a convenient macro REAL_CODE() for using real_call.
  934. * See librm.h and realmode.h for details and examples.
  935. *
  936. * Parameters:
  937. * function : offset within .text16 of real-mode function to call
  938. *
  939. * Returns: none
  940. ****************************************************************************
  941. */
  942. .struct 0
  943. RC_OFFSET_REGS: .space SIZEOF_I386_REGS
  944. RC_OFFSET_REGS_END:
  945. .if64
  946. RC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
  947. RC_OFFSET_LREG_RETADDR: .space SIZEOF_ADDR
  948. .endif
  949. RC_OFFSET_RETADDR: .space SIZEOF_ADDR
  950. RC_OFFSET_PARAMS:
  951. RC_OFFSET_FUNCTION: .space SIZEOF_ADDR
  952. RC_OFFSET_END:
  953. .previous
  954. .section ".text.real_call", "ax", @progbits
  955. .CODE_DEFAULT
  956. .globl real_call
  957. real_call:
  958. .if64 ; /* Preserve registers and switch to protected mode, if applicable */
  959. call long_preserve_regs
  960. call long_to_prot
  961. .code32
  962. .endif
  963. /* Create register dump and function pointer copy on PM stack */
  964. pushal
  965. pushl RC_OFFSET_FUNCTION(%esp)
  966. /* Switch to real mode and move register dump to RM stack */
  967. movl $( RC_OFFSET_REGS_END + 4 /* function pointer copy */ ), %ecx
  968. pushl $rc_rmode
  969. movl $VIRTUAL(rm_default_gdtr_idtr), %esi
  970. jmp prot_to_real
  971. .section ".text16.real_call", "ax", @progbits
  972. .code16
  973. rc_rmode:
  974. /* Call real-mode function */
  975. popl rc_function
  976. popal
  977. call *rc_function
  978. pushal
  979. /* For sanity's sake, clear the direction flag as soon as possible */
  980. cld
  981. /* Switch to protected mode and move register dump back to PM stack */
  982. movl $RC_OFFSET_REGS_END, %ecx
  983. pushl $VIRTUAL(rc_pmode)
  984. jmp real_to_prot
  985. .section ".text.real_call", "ax", @progbits
  986. .code32
  987. rc_pmode:
  988. /* Restore registers */
  989. popal
  990. .if64 ; /* Switch to long mode and restore registers, if applicable */
  991. call prot_to_long
  992. .code64
  993. call long_restore_regs
  994. .endif
  995. /* Return and discard function parameters */
  996. ret $( RC_OFFSET_END - RC_OFFSET_PARAMS )
  997. /* Function vector, used because "call xx(%sp)" is not a valid
  998. * 16-bit expression.
  999. */
  1000. .section ".bss16.rc_function", "aw", @nobits
  1001. rc_function: .word 0, 0
  1002. /* Default real-mode global and interrupt descriptor table registers */
  1003. .section ".data.rm_default_gdtr_idtr", "aw", @progbits
  1004. rm_default_gdtr_idtr:
  1005. .word 0 /* Global descriptor table limit */
  1006. .long 0 /* Global descriptor table base */
  1007. .word 0x03ff /* Interrupt descriptor table limit */
  1008. .long 0 /* Interrupt descriptor table base */
  1009. /****************************************************************************
  1010. * phys_call (protected-mode near call, 32-bit virtual return address)
  1011. * phys_call (long-mode near call, 64-bit virtual return address)
  1012. *
  1013. * Call a function with flat 32-bit physical addressing
  1014. *
  1015. * The non-segment register values will be passed directly to the
  1016. * function. The segment registers will be set for flat 32-bit
  1017. * physical addressing. The non-segment register values set by the
  1018. * function will be passed back to the caller.
  1019. *
  1020. * librm.h defines a convenient macro PHYS_CODE() for using phys_call.
  1021. *
  1022. * Parameters:
  1023. * function : virtual (sic) address of function to call
  1024. *
  1025. ****************************************************************************
  1026. */
  1027. .struct 0
  1028. .if64
  1029. PHC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
  1030. PHC_OFFSET_LREG_RETADDR:.space SIZEOF_ADDR
  1031. .endif
  1032. PHC_OFFSET_RETADDR: .space SIZEOF_ADDR
  1033. PHC_OFFSET_PARAMS:
  1034. PHC_OFFSET_FUNCTION: .space SIZEOF_ADDR
  1035. PHC_OFFSET_END:
  1036. .previous
  1037. .section ".text.phys_call", "ax", @progbits
  1038. .CODE_DEFAULT
  1039. .globl phys_call
  1040. phys_call:
  1041. .if64 ; /* Preserve registers and switch to protected mode, if applicable */
  1042. call long_preserve_regs
  1043. call long_to_prot
  1044. .code32
  1045. .endif
  1046. /* Adjust function pointer to a physical address */
  1047. pushl %ebp
  1048. movl VIRTUAL(virt_offset), %ebp
  1049. addl %ebp, ( PHC_OFFSET_FUNCTION + 4 /* saved %ebp */ )(%esp)
  1050. popl %ebp
  1051. /* Switch to physical addresses */
  1052. call prot_to_phys
  1053. /* Call function */
  1054. call *PHC_OFFSET_FUNCTION(%esp)
  1055. /* For sanity's sake, clear the direction flag as soon as possible */
  1056. cld
  1057. /* Switch to virtual addresses */
  1058. call phys_to_prot
  1059. .if64 ; /* Switch to long mode and restore registers, if applicable */
  1060. call prot_to_long
  1061. .code64
  1062. call long_restore_regs
  1063. .endif
  1064. /* Return and discard function parameters */
  1065. ret $( PHC_OFFSET_END - PHC_OFFSET_PARAMS )
  1066. /****************************************************************************
  1067. * phys_to_long (protected-mode near call, 32-bit physical return address)
  1068. *
  1069. * Used by COMBOOT.
  1070. *
  1071. ****************************************************************************
  1072. */
  1073. .if64
  1074. .section ".text.phys_to_long", "ax", @progbits
  1075. .code32
  1076. phys_to_long:
  1077. /* Switch to virtual addresses */
  1078. call phys_to_prot
  1079. /* Convert to 32-bit virtual return address */
  1080. pushl %eax
  1081. movl VIRTUAL(virt_offset), %eax
  1082. subl %eax, 4(%esp)
  1083. popl %eax
  1084. /* Switch to long mode and return */
  1085. jmp prot_to_long
  1086. /* Expose as _phys_to_virt for use by COMBOOT */
  1087. .globl _phys_to_virt
  1088. .equ _phys_to_virt, phys_to_long
  1089. .endif
  1090. /****************************************************************************
  1091. * long_to_phys (long-mode near call, 64-bit virtual return address)
  1092. *
  1093. * Used by COMBOOT.
  1094. *
  1095. ****************************************************************************
  1096. */
  1097. .if64
  1098. .section ".text.long_to_phys", "ax", @progbits
  1099. .code64
  1100. long_to_phys:
  1101. /* Switch to protected mode */
  1102. call long_to_prot
  1103. .code32
  1104. /* Convert to 32-bit virtual return address */
  1105. popl (%esp)
  1106. /* Switch to physical addresses and return */
  1107. jmp prot_to_phys
  1108. /* Expose as _virt_to_phys for use by COMBOOT */
  1109. .globl _virt_to_phys
  1110. .equ _virt_to_phys, long_to_phys
  1111. .endif
  1112. /****************************************************************************
  1113. * flatten_real_mode (real-mode near call)
  1114. *
  1115. * Switch to flat real mode
  1116. *
  1117. ****************************************************************************
  1118. */
  1119. .section ".text16.flatten_real_mode", "ax", @progbits
  1120. .code16
  1121. .globl flatten_real_mode
  1122. flatten_real_mode:
  1123. /* Modify GDT to use flat real mode */
  1124. movb $0x8f, real_cs + 6
  1125. movb $0x8f, real_ds + 6
  1126. /* Call dummy protected-mode function */
  1127. virtcall flatten_dummy
  1128. /* Restore GDT */
  1129. movb $0x00, real_cs + 6
  1130. movb $0x00, real_ds + 6
  1131. /* Return */
  1132. ret
  1133. .section ".text.flatten_dummy", "ax", @progbits
  1134. .CODE_DEFAULT
  1135. flatten_dummy:
  1136. ret
  1137. /****************************************************************************
  1138. * Interrupt wrapper
  1139. *
  1140. * Used by the protected-mode and long-mode interrupt vectors to call
  1141. * the interrupt() function.
  1142. *
  1143. * May be entered with either physical or virtual stack segment.
  1144. ****************************************************************************
  1145. */
  1146. .section ".text.interrupt_wrapper", "ax", @progbits
  1147. .code32
  1148. .globl interrupt_wrapper
  1149. interrupt_wrapper:
  1150. /* Preserve registers (excluding already-saved %eax and
  1151. * otherwise unused registers which are callee-save for both
  1152. * 32-bit and 64-bit ABIs).
  1153. */
  1154. pushl %ebx
  1155. pushl %ecx
  1156. pushl %edx
  1157. pushl %esi
  1158. pushl %edi
  1159. /* Expand IRQ number to whole %eax register */
  1160. movzbl %al, %eax
  1161. .if64 ; /* Skip transition to long mode, if applicable */
  1162. movw %cs, %bx
  1163. cmpw $LONG_CS, %bx
  1164. je 1f
  1165. .endif
  1166. /* Preserve segment registers and original %esp */
  1167. pushl %ds
  1168. pushl %es
  1169. pushl %fs
  1170. pushl %gs
  1171. pushl %ss
  1172. pushl %esp
  1173. /* Switch to virtual addressing */
  1174. call intr_to_prot
  1175. .if64
  1176. /* Switch to long mode */
  1177. call prot_to_long
  1178. .code64
  1179. 1: /* Preserve long-mode caller-save registers */
  1180. pushq %r8
  1181. pushq %r9
  1182. pushq %r10
  1183. pushq %r11
  1184. /* Expand IRQ number to whole %rdi register */
  1185. movl %eax, %edi
  1186. .endif
  1187. /* Call interrupt handler */
  1188. call interrupt
  1189. .if64
  1190. /* Restore long-mode caller-save registers */
  1191. popq %r11
  1192. popq %r10
  1193. popq %r9
  1194. popq %r8
  1195. /* Skip transition back to protected mode, if applicable */
  1196. cmpw $LONG_CS, %bx
  1197. je 1f
  1198. /* Switch to protected mode */
  1199. call long_to_prot
  1200. .code32
  1201. cmpw $LONG_CS, %bx
  1202. .endif
  1203. /* Restore segment registers and original %esp */
  1204. lss (%esp), %esp
  1205. popl %ss
  1206. popl %gs
  1207. popl %fs
  1208. popl %es
  1209. popl %ds
  1210. 1: /* Restore registers */
  1211. popl %edi
  1212. popl %esi
  1213. popl %edx
  1214. popl %ecx
  1215. popl %ebx
  1216. popl %eax
  1217. /* Return from interrupt (with REX prefix if required) */
  1218. .if64 ; jne 1f ; .byte 0x48 ; .endif
  1219. 1: iret
  1220. /****************************************************************************
  1221. * Page tables
  1222. *
  1223. ****************************************************************************
  1224. */
  1225. .section ".pages", "aw", @nobits
  1226. .align SIZEOF_PT
  1227. /* Page map level 4 entries (PML4Es)
  1228. *
  1229. * This comprises
  1230. *
  1231. * - PML4E[0x000] covering [0x0000000000000000-0x0000007fffffffff]
  1232. * - PML4E[0x1ff] covering [0xffffff8000000000-0xffffffffffffffff]
  1233. *
  1234. * These point to the PDPT. This creates some aliased
  1235. * addresses within unused portions of the 64-bit address
  1236. * space, but allows us to use just a single PDPT.
  1237. *
  1238. * - PDE[...] covering arbitrary 2MB portions of I/O space
  1239. *
  1240. * These are 2MB pages created by ioremap() to cover I/O
  1241. * device addresses.
  1242. */
  1243. pml4e:
  1244. .space SIZEOF_PT
  1245. .size pml4e, . - pml4e
  1246. .globl io_pages
  1247. .equ io_pages, pml4e
  1248. /* Page directory pointer table entries (PDPTEs)
  1249. *
  1250. * This comprises:
  1251. *
  1252. * - PDPTE[0x000] covering [0x0000000000000000-0x000000003fffffff]
  1253. * - PDPTE[0x001] covering [0x0000000040000000-0x000000007fffffff]
  1254. * - PDPTE[0x002] covering [0x0000000080000000-0x00000000bfffffff]
  1255. * - PDPTE[0x003] covering [0x00000000c0000000-0x00000000ffffffff]
  1256. *
  1257. * These point to the appropriate page directories (in pde_low)
  1258. * used to identity-map the whole of the 32-bit address space.
  1259. *
  1260. * - PDPTE[0x004] covering [0x0000000100000000-0x000000013fffffff]
  1261. *
  1262. * This points back to the PML4, allowing the PML4 to be
  1263. * (ab)used to hold 2MB pages used for I/O device addresses.
  1264. *
  1265. * - PDPTE[0x1ff] covering [0xffffffffc0000000-0xffffffffffffffff]
  1266. *
  1267. * This points back to the PDPT itself, allowing the PDPT to be
  1268. * (ab)used to hold PDEs covering .textdata.
  1269. *
  1270. * - PDE[N-M] covering [_textdata,_end)
  1271. *
  1272. * These are used to point to the page tables (in pte_textdata)
  1273. * used to map our .textdata section. Note that each PDE
  1274. * covers 2MB, so we are likely to use only a single PDE in
  1275. * practice.
  1276. */
  1277. pdpte:
  1278. .space SIZEOF_PT
  1279. .size pdpte, . - pdpte
  1280. .equ pde_textdata, pdpte /* (ab)use */
  1281. /* Page directory entries (PDEs) for the low 4GB
  1282. *
  1283. * This comprises 2048 2MB pages to identity-map the whole of
  1284. * the 32-bit address space.
  1285. */
  1286. pde_low:
  1287. .equ PDE_LOW_PTES, ( SIZEOF_LOW_4GB / SIZEOF_2MB_PAGE )
  1288. .equ PDE_LOW_PTS, ( ( PDE_LOW_PTES * SIZEOF_PTE ) / SIZEOF_PT )
  1289. .space ( PDE_LOW_PTS * SIZEOF_PT )
  1290. .size pde_low, . - pde_low
  1291. /* Page table entries (PTEs) for .textdata
  1292. *
  1293. * This comprises enough 4kB pages to map the whole of
  1294. * .textdata. The required number of PTEs is calculated by
  1295. * the linker script.
  1296. *
  1297. * Note that these mappings do not cover the PTEs themselves.
  1298. * This does not matter, since code running with paging
  1299. * enabled never needs to access these PTEs.
  1300. */
  1301. pte_textdata:
  1302. /* Allocated by linker script; must be at the end of .textdata */
  1303. .section ".bss.pml4", "aw", @nobits
  1304. pml4: .long 0
  1305. /****************************************************************************
  1306. * init_pages (protected-mode near call)
  1307. *
  1308. * Initialise the page tables ready for long mode.
  1309. *
  1310. * Parameters:
  1311. * %edi : virt_offset
  1312. ****************************************************************************
  1313. */
  1314. .section ".text.init_pages", "ax", @progbits
  1315. .code32
  1316. init_pages:
  1317. /* Initialise PML4Es for low 4GB and negative 2GB */
  1318. leal ( VIRTUAL(pdpte) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1319. movl %eax, VIRTUAL(pml4e)
  1320. movl %eax, ( VIRTUAL(pml4e) + SIZEOF_PT - SIZEOF_PTE )
  1321. /* Initialise PDPTE for negative 1GB */
  1322. movl %eax, ( VIRTUAL(pdpte) + SIZEOF_PT - SIZEOF_PTE )
  1323. /* Initialise PDPTE for I/O space */
  1324. leal ( VIRTUAL(pml4e) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1325. movl %eax, ( VIRTUAL(pdpte) + ( PDE_LOW_PTS * SIZEOF_PTE ) )
  1326. /* Initialise PDPTEs for low 4GB */
  1327. movl $PDE_LOW_PTS, %ecx
  1328. leal ( VIRTUAL(pde_low) + ( PDE_LOW_PTS * SIZEOF_PT ) + \
  1329. ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1330. 1: subl $SIZEOF_PT, %eax
  1331. movl %eax, ( VIRTUAL(pdpte) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
  1332. loop 1b
  1333. /* Initialise PDEs for low 4GB */
  1334. movl $PDE_LOW_PTES, %ecx
  1335. leal ( 0 + ( PG_P | PG_RW | PG_US | PG_PS ) ), %eax
  1336. 1: subl $SIZEOF_2MB_PAGE, %eax
  1337. movl %eax, ( VIRTUAL(pde_low) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
  1338. loop 1b
  1339. /* Initialise PDEs for .textdata */
  1340. movl $_textdata_pdes, %ecx
  1341. leal ( VIRTUAL(_etextdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1342. movl $VIRTUAL(_textdata), %ebx
  1343. shrl $( SIZEOF_2MB_PAGE_LOG2 - SIZEOF_PTE_LOG2 ), %ebx
  1344. andl $( SIZEOF_PT - 1 ), %ebx
  1345. 1: subl $SIZEOF_PT, %eax
  1346. movl %eax, (VIRTUAL(pde_textdata) - SIZEOF_PTE)(%ebx,%ecx,SIZEOF_PTE)
  1347. loop 1b
  1348. /* Initialise PTEs for .textdata */
  1349. movl $_textdata_ptes, %ecx
  1350. leal ( VIRTUAL(_textdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
  1351. addl $_textdata_paged_len, %eax
  1352. 1: subl $SIZEOF_4KB_PAGE, %eax
  1353. movl %eax, ( VIRTUAL(pte_textdata) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
  1354. loop 1b
  1355. /* Record PML4 physical address */
  1356. leal VIRTUAL(pml4e)(%edi), %eax
  1357. movl %eax, VIRTUAL(pml4)
  1358. /* Return */
  1359. ret