You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

start32.S 18KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767
  1. /* #defines because ljmp wants a number, probably gas bug */
  2. /* .equ KERN_CODE_SEG,_pmcs-_gdt */
  3. #define KERN_CODE_SEG 0x08
  4. .equ KERN_DATA_SEG,_pmds-_gdt
  5. /* .equ REAL_CODE_SEG,_rmcs-_gdt */
  6. #define REAL_CODE_SEG 0x18
  7. .equ REAL_DATA_SEG,_rmds-_gdt
  8. .equ FLAT_CODE_SEG,_pmcs2-_gdt
  9. .equ FLAT_DATA_SEG,_pmds2-_gdt
  10. .equ CR0_PE,1
  11. #ifdef CONFIG_X86_64
  12. .equ LM_CODE_SEG, _lmcs-_gdt
  13. .equ LM_DATA_SEG, _lmds-_gdt
  14. #endif
  15. .equ MSR_K6_EFER, 0xC0000080
  16. .equ EFER_LME, 0x00000100
  17. .equ X86_CR4_PAE, 0x00000020
  18. .equ CR0_PG, 0x80000000
  19. #ifdef GAS291
  20. #define DATA32 data32;
  21. #define ADDR32 addr32;
  22. #define LJMPI(x) ljmp x
  23. #else
  24. #define DATA32 data32
  25. #define ADDR32 addr32
  26. /* newer GAS295 require #define LJMPI(x) ljmp *x */
  27. #define LJMPI(x) ljmp x
  28. #endif
  29. #define BOCHSBP xchgw %bx, %bx
  30. #include "callbacks.h"
  31. #define NUM_PUSHA_REGS (8)
  32. #define NUM_SEG_REGS (6)
  33. /*
  34. * NOTE: if you write a subroutine that is called from C code (gcc/egcs),
  35. * then you only have to take care of %ebx, %esi, %edi and %ebp. These
  36. * registers must not be altered under any circumstance. All other registers
  37. * may be clobbered without any negative side effects. If you don't follow
  38. * this rule then you'll run into strange effects that only occur on some
  39. * gcc versions (because the register allocator may use different registers).
  40. *
  41. * All the data32 prefixes for the ljmp instructions are necessary, because
  42. * the assembler emits code with a relocation address of 0. This means that
  43. * all destinations are initially negative, which the assembler doesn't grok,
  44. * because for some reason negative numbers don't fit into 16 bits. The addr32
  45. * prefixes are there for the same reasons, because otherwise the memory
  46. * references are only 16 bit wide. Theoretically they are all superfluous.
  47. * One last note about prefixes: the data32 prefixes on all call _real_to_prot
  48. * instructions could be removed if the _real_to_prot function is changed to
  49. * deal correctly with 16 bit return addresses. I tried it, but failed.
  50. */
  51. /**************************************************************************
  52. * START
  53. *
  54. * This file is no longer enterered from the top. init.S will jump to
  55. * either _in_call or _rm_in_call, depending on the processor mode
  56. * when init.S was entered.
  57. **************************************************************************/
  58. .text
  59. .arch i386
  60. .code32
  61. /**************************************************************************
  62. _IN_CALL - make a call in to Etherboot.
  63. **************************************************************************/
  64. /* There are two 32-bit entry points: _in_call and _in_call_far, for
  65. * near calls and far calls respectively. Both should be called with
  66. * flat physical addresses. They will result in a call to the C
  67. * routine in_call(); see there for API details.
  68. *
  69. * Note that this routine makes fairly heavy use of the stack and no
  70. * use of fixed data areas. This is because it must be re-entrant;
  71. * there may be more than one concurrent call in to Etherboot.
  72. */
  73. #define IC_OFFSET_VA_LIST_PTR ( 0 )
  74. #define IC_OFFSET_VA_LIST_PTR_E ( IC_OFFSET_VA_LIST_PTR + 4 )
  75. #define IC_OFFSET_REGISTERS ( IC_OFFSET_VA_LIST_PTR_E )
  76. #define IC_OFFSET_REGISTERS_E ( IC_OFFSET_REGISTERS + ( NUM_PUSHA_REGS * 4 ) )
  77. #define IC_OFFSET_SEG_REGS ( IC_OFFSET_REGISTERS_E )
  78. #define IC_OFFSET_SEG_REGS_E ( IC_OFFSET_SEG_REGS + ( NUM_SEG_REGS * 2 ) )
  79. #define IC_OFFSET_GDT ( IC_OFFSET_SEG_REGS_E )
  80. #define IC_OFFSET_GDT_E ( IC_OFFSET_GDT + 8 )
  81. #define IC_OFFSET_FLAGS ( IC_OFFSET_GDT_E )
  82. #define IC_OFFSET_FLAGS_E ( IC_OFFSET_FLAGS + 4 )
  83. #define IC_OFFSET_RETADDR ( IC_OFFSET_FLAGS_E )
  84. #define IC_OFFSET_RETADDR_E ( IC_OFFSET_RETADDR + 8 )
  85. #define IC_OFFSET_ORIG_STACK ( IC_OFFSET_RETADDR )
  86. #define IC_OFFSET_OPCODE ( IC_OFFSET_ORIG_STACK + 8 )
  87. #define IC_OFFSET_OPCODE_E ( IC_OFFSET_OPCODE + 4 )
  88. #define IC_OFFSET_VA_LIST ( IC_OFFSET_OPCODE_E )
  89. .code32
  90. .globl _in_call
  91. .globl _in_call_far
  92. _in_call:
  93. /* Expand to far return address */
  94. pushl %eax /* Store %eax */
  95. xorl %eax, %eax
  96. movw %cs, %ax
  97. xchgl %eax, 4(%esp) /* 4(%esp) = %cs, %eax = ret addr */
  98. xchgl %eax, 0(%esp) /* 0(%esp) = ret addr, restore %eax */
  99. _in_call_far:
  100. /* Store flags */
  101. pushfl
  102. /* Store the GDT */
  103. subl $8, %esp
  104. sgdt 0(%esp)
  105. /* Store segment register values */
  106. pushw %gs
  107. pushw %fs
  108. pushw %es
  109. pushw %ds
  110. pushw %ss
  111. pushw %cs
  112. /* Store general-purpose register values */
  113. pushal
  114. /* Replace %esp in store with physical %esp value on entry */
  115. leal (IC_OFFSET_ORIG_STACK - IC_OFFSET_REGISTERS)(%esp), %eax
  116. movl %eax, (IC_OFFSET_REGISTERS - IC_OFFSET_REGISTERS + 12)(%esp)
  117. /* Store va_list pointer (physical address) */
  118. leal (IC_OFFSET_VA_LIST - IC_OFFSET_VA_LIST_PTR_E)(%esp), %eax
  119. pushl %eax
  120. /* IC_OFFSET_*(%esp) are now valid */
  121. /* Switch to virtual addresses */
  122. call _phys_to_virt
  123. /* Fixup the va_list pointer */
  124. movl virt_offset, %ebp
  125. subl %ebp, IC_OFFSET_VA_LIST_PTR(%esp)
  126. /* Check opcode for EB_USE_INTERNAL_STACK flag */
  127. movl IC_OFFSET_OPCODE(%esp), %eax
  128. testl $EB_USE_INTERNAL_STACK, %eax
  129. je 2f
  130. /* Use internal stack flag set */
  131. /* Check %esp is not already in internal stack range */
  132. leal _stack, %esi /* %esi = bottom of internal stack */
  133. leal _estack, %edi /* %edi = top of internal stack */
  134. cmpl %esi, %esp
  135. jb 1f
  136. cmpl %edi, %esp
  137. jbe 2f
  138. 1: /* %esp not currently in internal stack range */
  139. movl %esp, %esi /* %esi = original stack */
  140. movl $IC_OFFSET_OPCODE_E, %ecx /* %ecx = length to transfer */
  141. subl %ecx, %edi /* %edi = internal stack pos */
  142. movl %edi, %esp /* = new %esp */
  143. rep movsb /* Copy data to internal stack */
  144. 2:
  145. /* Call to C code */
  146. call i386_in_call
  147. /* Set %eax (return code from C) in registers structure on
  148. * stack, so that we return it to the caller.
  149. */
  150. movl %eax, (IC_OFFSET_REGISTERS + 28)(%esp)
  151. /* Calculate physical continuation address */
  152. movl virt_offset, %ebp
  153. movzwl (IC_OFFSET_SEG_REGS + 0)(%esp), %eax /* %cs */
  154. movzwl (IC_OFFSET_SEG_REGS + 2)(%esp), %ebx /* %ss */
  155. pushl %eax /* Continuation segment */
  156. leal 1f(%ebp), %eax
  157. pushl %eax /* Continuation offset */
  158. /* Restore caller's GDT */
  159. cli /* Temporarily disable interrupts */
  160. lgdt (8+IC_OFFSET_GDT)(%esp)
  161. /* Reset %ss and adjust %esp */
  162. movw %bx, %ss
  163. addl %ebp, %esp
  164. lret /* Reload %cs:eip, flush prefetch */
  165. 1:
  166. /* Skip va_list ptr */
  167. popl %eax
  168. /* Reload general-purpose registers to be returned */
  169. popal
  170. /* Reload segment registers as passed in from caller */
  171. popw %gs
  172. popw %fs
  173. popw %es
  174. popw %ds
  175. addl $(4+8), %esp /* Skip %cs, %ss and GDT (already reloaded) */
  176. /* Restore flags (including revert of interrupt status) */
  177. popfl
  178. /* Restore physical %esp from entry. It will only be
  179. * different if EB_USE_INTERNAL_STACK was specified.
  180. */
  181. movl ( 12 + IC_OFFSET_REGISTERS - IC_OFFSET_RETADDR )(%esp), %esp
  182. /* Check for EB_SKIP_OPCODE */
  183. pushfl
  184. testl $EB_SKIP_OPCODE, 12(%esp)
  185. jnz 1f
  186. /* Normal return */
  187. popfl
  188. lret
  189. 1: /* Return and skip opcode */
  190. popfl
  191. lret $4
  192. /**************************************************************************
  193. RELOCATE_TO - relocate etherboot to the specified address
  194. **************************************************************************/
  195. .globl relocate_to
  196. relocate_to:
  197. /* Save the callee save registers */
  198. pushl %ebp
  199. pushl %esi
  200. pushl %edi
  201. /* Compute the virtual destination address */
  202. movl 16(%esp), %edi # dest
  203. subl virt_offset, %edi
  204. /* Compute the new value of virt_offset */
  205. movl 16(%esp), %ebp # virt_offset
  206. subl $_text, %ebp
  207. /* Fixup the gdt */
  208. pushl $_pmcs
  209. pushl %ebp # virt_offset
  210. call set_seg_base
  211. addl $8, %esp
  212. /* Fixup gdtarg */
  213. leal _gdt(%ebp), %eax
  214. movl %eax, gdtarg +2
  215. /* Fixup virt_offset */
  216. movl %ebp, virt_offset
  217. /* Load the move parameters */
  218. movl $_text, %esi
  219. movl $_end, %ecx
  220. subl %esi, %ecx
  221. /* Move etherboot uses %esi, %edi, %ecx */
  222. rep
  223. movsb
  224. /* Reload the gdt */
  225. cs
  226. lgdt gdtarg
  227. /* Reload %cs */
  228. ljmp $KERN_CODE_SEG, $1f
  229. 1:
  230. /* reload other segment registers */
  231. movl $KERN_DATA_SEG, %eax
  232. movl %eax,%ds
  233. movl %eax,%es
  234. movl %eax,%ss
  235. movl %eax,%fs
  236. movl %eax,%gs
  237. /* Restore the callee save registers */
  238. popl %edi
  239. popl %esi
  240. popl %ebp
  241. /* return */
  242. ret
  243. /**************************************************************************
  244. XSTART32 - Transfer control to the kernel just loaded
  245. **************************************************************************/
  246. .globl xstart32
  247. xstart32:
  248. /* Save the callee save registers */
  249. movl %ebp, os_regs + 32
  250. movl %esi, os_regs + 36
  251. movl %edi, os_regs + 40
  252. movl %ebx, os_regs + 44
  253. /* save the return address */
  254. popl %eax
  255. movl %eax, os_regs + 48
  256. /* save the stack pointer */
  257. movl %esp, os_regs + 52
  258. /* Get the new destination address */
  259. popl %ecx
  260. /* Store the physical address of xend on the stack */
  261. movl $xend32, %ebx
  262. addl virt_offset, %ebx
  263. pushl %ebx
  264. /* Store the destination address on the stack */
  265. pushl $FLAT_CODE_SEG
  266. pushl %ecx
  267. /* Cache virt_offset */
  268. movl virt_offset, %ebp
  269. /* Switch to using physical addresses */
  270. call _virt_to_phys
  271. /* Save the target stack pointer */
  272. movl %esp, os_regs + 12(%ebp)
  273. leal os_regs(%ebp), %esp
  274. /* Store the pointer to os_regs */
  275. movl %esp, os_regs_ptr(%ebp)
  276. /* Load my new registers */
  277. popal
  278. movl (-32 + 12)(%esp), %esp
  279. /* Jump to the new kernel
  280. * The lret switches to a flat code segment
  281. */
  282. lret
  283. .balign 4
  284. .globl xend32
  285. xend32:
  286. /* Fixup %eflags */
  287. nop
  288. cli
  289. cld
  290. /* Load %esp with &os_regs + virt_offset */
  291. .byte 0xbc /* movl $0, %esp */
  292. os_regs_ptr:
  293. .long 0
  294. /* Save the result registers */
  295. addl $32, %esp
  296. pushal
  297. /* Compute virt_offset */
  298. movl %esp, %ebp
  299. subl $os_regs, %ebp
  300. /* Load the stack pointer */
  301. movl 52(%esp), %esp
  302. /* Enable the virtual addresses */
  303. leal _phys_to_virt(%ebp), %eax
  304. call *%eax
  305. /* Restore the callee save registers */
  306. movl os_regs + 32, %ebp
  307. movl os_regs + 36, %esi
  308. movl os_regs + 40, %edi
  309. movl os_regs + 44, %ebx
  310. movl os_regs + 48, %edx
  311. movl os_regs + 52, %esp
  312. /* Get the C return value */
  313. movl os_regs + 28, %eax
  314. jmpl *%edx
  315. #ifdef CONFIG_X86_64
  316. .arch sledgehammer
  317. /**************************************************************************
  318. XSTART_lm - Transfer control to the kernel just loaded in long mode
  319. **************************************************************************/
  320. .globl xstart_lm
  321. xstart_lm:
  322. /* Save the callee save registers */
  323. pushl %ebp
  324. pushl %esi
  325. pushl %edi
  326. pushl %ebx
  327. /* Cache virt_offset && (virt_offset & 0xfffff000) */
  328. movl virt_offset, %ebp
  329. movl %ebp, %ebx
  330. andl $0xfffff000, %ebx
  331. /* Switch to using physical addresses */
  332. call _virt_to_phys
  333. /* Initialize the page tables */
  334. /* Level 4 */
  335. leal 0x23 + pgt_level3(%ebx), %eax
  336. leal pgt_level4(%ebx), %edi
  337. movl %eax, (%edi)
  338. /* Level 3 */
  339. leal 0x23 + pgt_level2(%ebx), %eax
  340. leal pgt_level3(%ebx), %edi
  341. movl %eax, 0x00(%edi)
  342. addl $4096, %eax
  343. movl %eax, 0x08(%edi)
  344. addl $4096, %eax
  345. movl %eax, 0x10(%edi)
  346. addl $4096, %eax
  347. movl %eax, 0x18(%edi)
  348. /* Level 2 */
  349. movl $0xe3, %eax
  350. leal pgt_level2(%ebx), %edi
  351. leal 16384(%edi), %esi
  352. pgt_level2_loop:
  353. movl %eax, (%edi)
  354. addl $8, %edi
  355. addl $0x200000, %eax
  356. cmp %esi, %edi
  357. jne pgt_level2_loop
  358. /* Point at the x86_64 page tables */
  359. leal pgt_level4(%ebx), %edi
  360. movl %edi, %cr3
  361. /* Setup for the return from 64bit mode */
  362. /* 64bit align the stack */
  363. movl %esp, %ebx /* original stack pointer + 16 */
  364. andl $0xfffffff8, %esp
  365. /* Save original stack pointer + 16 */
  366. pushl %ebx
  367. /* Save virt_offset */
  368. pushl %ebp
  369. /* Setup for the jmp to 64bit long mode */
  370. leal start_lm(%ebp), %eax
  371. movl %eax, 0x00 + start_lm_addr(%ebp)
  372. movl $LM_CODE_SEG, %eax
  373. movl %eax, 0x04 + start_lm_addr(%ebp)
  374. /* Setup for the jump out of 64bit long mode */
  375. leal end_lm(%ebp), %eax
  376. movl %eax, 0x00 + end_lm_addr(%ebp)
  377. movl $FLAT_CODE_SEG, %eax
  378. movl %eax, 0x04 + end_lm_addr(%ebp)
  379. /* Enable PAE mode */
  380. movl %cr4, %eax
  381. orl $X86_CR4_PAE, %eax
  382. movl %eax, %cr4
  383. /* Enable long mode */
  384. movl $MSR_K6_EFER, %ecx
  385. rdmsr
  386. orl $EFER_LME, %eax
  387. wrmsr
  388. /* Start paging, entering 32bit compatiblity mode */
  389. movl %cr0, %eax
  390. orl $CR0_PG, %eax
  391. movl %eax, %cr0
  392. /* Enter 64bit long mode */
  393. ljmp *start_lm_addr(%ebp)
  394. .code64
  395. start_lm:
  396. /* Load 64bit data segments */
  397. movl $LM_DATA_SEG, %eax
  398. movl %eax, %ds
  399. movl %eax, %es
  400. movl %eax, %ss
  401. andq $0xffffffff, %rbx
  402. /* Get the address to jump to */
  403. movl 20(%rbx), %edx
  404. andq $0xffffffff, %rdx
  405. /* Get the argument pointer */
  406. movl 24(%rbx), %ebx
  407. andq $0xffffffff, %rbx
  408. /* Jump to the 64bit code */
  409. call *%rdx
  410. /* Preserve the result */
  411. movl %eax, %edx
  412. /* Fixup %eflags */
  413. cli
  414. cld
  415. /* Switch to 32bit compatibility mode */
  416. ljmp *end_lm_addr(%rip)
  417. .code32
  418. end_lm:
  419. /* Disable paging */
  420. movl %cr0, %eax
  421. andl $~CR0_PG, %eax
  422. movl %eax, %cr0
  423. /* Disable long mode */
  424. movl $MSR_K6_EFER, %ecx
  425. rdmsr
  426. andl $~EFER_LME, %eax
  427. wrmsr
  428. /* Disable PAE */
  429. movl %cr4, %eax
  430. andl $~X86_CR4_PAE, %eax
  431. movl %eax, %cr4
  432. /* Compute virt_offset */
  433. popl %ebp
  434. /* Compute the original stack pointer + 16 */
  435. popl %ebx
  436. movl %ebx, %esp
  437. /* Enable the virtual addresses */
  438. leal _phys_to_virt(%ebp), %eax
  439. call *%eax
  440. /* Restore the callee save registers */
  441. popl %ebx
  442. popl %esi
  443. popl %edi
  444. popl %ebp
  445. /* Get the C return value */
  446. movl %edx, %eax
  447. /* Return */
  448. ret
  449. .arch i386
  450. #endif /* CONFIG_X86_64 */
  451. /**************************************************************************
  452. SETJMP - Save stack context for non-local goto
  453. **************************************************************************/
  454. .globl setjmp
  455. setjmp:
  456. movl 4(%esp),%ecx /* jmpbuf */
  457. movl 0(%esp),%edx /* return address */
  458. movl %edx,0(%ecx)
  459. movl %ebx,4(%ecx)
  460. movl %esp,8(%ecx)
  461. movl %ebp,12(%ecx)
  462. movl %esi,16(%ecx)
  463. movl %edi,20(%ecx)
  464. movl $0,%eax
  465. ret
  466. /**************************************************************************
  467. LONGJMP - Non-local jump to a saved stack context
  468. **************************************************************************/
  469. .globl longjmp
  470. longjmp:
  471. movl 4(%esp),%edx /* jumpbuf */
  472. movl 8(%esp),%eax /* result */
  473. movl 0(%edx),%ecx
  474. movl 4(%edx),%ebx
  475. movl 8(%edx),%esp
  476. movl 12(%edx),%ebp
  477. movl 16(%edx),%esi
  478. movl 20(%edx),%edi
  479. cmpl $0,%eax
  480. jne 1f
  481. movl $1,%eax
  482. 1: movl %ecx,0(%esp)
  483. ret
  484. /**************************************************************************
  485. _VIRT_TO_PHYS - Transition from virtual to physical addresses
  486. Preserves all preservable registers and flags
  487. **************************************************************************/
  488. .globl _virt_to_phys
  489. _virt_to_phys:
  490. pushfl
  491. pushl %ebp
  492. pushl %eax
  493. movl virt_offset, %ebp /* Load virt_offset */
  494. addl %ebp, 12(%esp) /* Adjust the return address */
  495. /* reload the code segment */
  496. pushl $FLAT_CODE_SEG
  497. leal 1f(%ebp), %eax
  498. pushl %eax
  499. lret
  500. 1:
  501. /* reload other segment registers */
  502. movl $FLAT_DATA_SEG, %eax
  503. movl %eax, %ds
  504. movl %eax, %es
  505. movl %eax, %ss
  506. addl %ebp, %esp /* Adjust the stack pointer */
  507. movl %eax, %fs
  508. movl %eax, %gs
  509. popl %eax
  510. popl %ebp
  511. popfl
  512. ret
  513. /**************************************************************************
  514. _PHYS_TO_VIRT - Transition from using physical to virtual addresses
  515. Preserves all preservable registers and flags
  516. **************************************************************************/
  517. .globl _phys_to_virt
  518. _phys_to_virt:
  519. pushfl
  520. pushl %ebp
  521. pushl %eax
  522. call 1f
  523. 1: popl %ebp
  524. subl $1b, %ebp
  525. movl %ebp, virt_offset(%ebp)
  526. /* Fixup the gdt */
  527. leal _pmcs(%ebp), %eax
  528. pushl %eax
  529. pushl %ebp
  530. call set_seg_base
  531. addl $8, %esp
  532. /* Fixup gdtarg */
  533. leal _gdt(%ebp), %eax
  534. movl %eax, (gdtarg+2)(%ebp)
  535. /* Load the global descriptor table */
  536. cli
  537. lgdt %cs:gdtarg(%ebp)
  538. ljmp $KERN_CODE_SEG, $1f
  539. 1:
  540. /* reload other segment regsters */
  541. movl $KERN_DATA_SEG, %eax
  542. movl %eax, %ds
  543. movl %eax, %es
  544. movl %eax, %ss
  545. subl %ebp, %esp /* Adjust the stack pointer */
  546. movl %eax, %fs
  547. movl %eax, %gs
  548. subl %ebp, 12(%esp) /* Adjust the return address */
  549. popl %eax
  550. popl %ebp
  551. popfl
  552. ret
  553. /**************************************************************************
  554. SET_SEG_BASE - Set the base address of a segment register
  555. **************************************************************************/
  556. .globl set_seg_base
  557. set_seg_base:
  558. pushl %eax
  559. pushl %ebx
  560. movl 12(%esp), %eax /* %eax = base address */
  561. movl 16(%esp), %ebx /* %ebx = &code_descriptor */
  562. movw %ax, (0+2)(%ebx) /* CS base bits 0-15 */
  563. movw %ax, (8+2)(%ebx) /* DS base bits 0-15 */
  564. shrl $16, %eax
  565. movb %al, (0+4)(%ebx) /* CS base bits 16-23 */
  566. movb %al, (8+4)(%ebx) /* DS base bits 16-23 */
  567. movb %ah, (0+7)(%ebx) /* CS base bits 24-31 */
  568. movb %ah, (8+7)(%ebx) /* DS base bits 24-31 */
  569. popl %ebx
  570. popl %eax
  571. ret
  572. /**************************************************************************
  573. GLOBAL DESCRIPTOR TABLE
  574. **************************************************************************/
  575. .data
  576. .align 4
  577. .globl _gdt
  578. .globl gdtarg
  579. _gdt:
  580. gdtarg:
  581. .word _gdt_end - _gdt - 1 /* limit */
  582. .long _gdt /* addr */
  583. .word 0
  584. .globl _pmcs
  585. _pmcs:
  586. /* 32 bit protected mode code segment */
  587. .word 0xffff,0
  588. .byte 0,0x9f,0xcf,0
  589. _pmds:
  590. /* 32 bit protected mode data segment */
  591. .word 0xffff,0
  592. .byte 0,0x93,0xcf,0
  593. _rmcs:
  594. /* 16 bit real mode code segment */
  595. .word 0xffff,(0&0xffff)
  596. .byte (0>>16),0x9b,0x00,(0>>24)
  597. _rmds:
  598. /* 16 bit real mode data segment */
  599. .word 0xffff,(0&0xffff)
  600. .byte (0>>16),0x93,0x00,(0>>24)
  601. _pmcs2:
  602. /* 32 bit protected mode code segment, base 0 */
  603. .word 0xffff,0
  604. .byte 0,0x9f,0xcf,0
  605. _pmds2:
  606. /* 32 bit protected mode data segment, base 0 */
  607. .word 0xffff,0
  608. .byte 0,0x93,0xcf,0
  609. #ifdef CONFIG_X86_64
  610. _lmcs:
  611. /* 64bit long mode code segment, base 0 */
  612. .word 0xffff, 0
  613. .byte 0x00, 0x9f, 0xaf , 0x00
  614. _lmds:
  615. /* 64bit long mode data segment, base 0 */
  616. .word 0xffff, 0
  617. .byte 0x00, 0x93, 0xcf, 0x00
  618. #endif
  619. _gdt_end:
  620. /* The initial register contents */
  621. .balign 4
  622. .globl initial_regs
  623. initial_regs:
  624. .fill 8, 4, 0
  625. /* The virtual address offset */
  626. .globl virt_offset
  627. virt_offset:
  628. .long 0
  629. .section ".stack"
  630. .p2align 3
  631. /* allocate a 4K stack in the stack segment */
  632. .globl _stack
  633. _stack:
  634. .space 4096
  635. .globl _estack
  636. _estack:
  637. #ifdef CONFIG_X86_64
  638. .section ".bss"
  639. .p2align 12
  640. /* Include a dummy space in case we are loaded badly aligned */
  641. .space 4096
  642. /* Reserve enough space for a page table convering 4GB with 2MB pages */
  643. pgt_level4:
  644. .space 4096
  645. pgt_level3:
  646. .space 4096
  647. pgt_level2:
  648. .space 16384
  649. start_lm_addr:
  650. .space 8
  651. end_lm_addr:
  652. .space 8
  653. #endif