You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

start32.S 6.9KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. #include "virtaddr.h"
  2. .equ MSR_K6_EFER, 0xC0000080
  3. .equ EFER_LME, 0x00000100
  4. .equ X86_CR4_PAE, 0x00000020
  5. .equ CR0_PG, 0x80000000
  6. #ifdef GAS291
  7. #define DATA32 data32;
  8. #define ADDR32 addr32;
  9. #define LJMPI(x) ljmp x
  10. #else
  11. #define DATA32 data32
  12. #define ADDR32 addr32
  13. /* newer GAS295 require #define LJMPI(x) ljmp *x */
  14. #define LJMPI(x) ljmp x
  15. #endif
  16. /*
  17. * NOTE: if you write a subroutine that is called from C code (gcc/egcs),
  18. * then you only have to take care of %ebx, %esi, %edi and %ebp. These
  19. * registers must not be altered under any circumstance. All other registers
  20. * may be clobbered without any negative side effects. If you don't follow
  21. * this rule then you'll run into strange effects that only occur on some
  22. * gcc versions (because the register allocator may use different registers).
  23. *
  24. * All the data32 prefixes for the ljmp instructions are necessary, because
  25. * the assembler emits code with a relocation address of 0. This means that
  26. * all destinations are initially negative, which the assembler doesn't grok,
  27. * because for some reason negative numbers don't fit into 16 bits. The addr32
  28. * prefixes are there for the same reasons, because otherwise the memory
  29. * references are only 16 bit wide. Theoretically they are all superfluous.
  30. * One last note about prefixes: the data32 prefixes on all call _real_to_prot
  31. * instructions could be removed if the _real_to_prot function is changed to
  32. * deal correctly with 16 bit return addresses. I tried it, but failed.
  33. */
  34. .text
  35. .arch i386
  36. .code32
  37. /**************************************************************************
  38. XSTART32 - Transfer control to the kernel just loaded
  39. **************************************************************************/
  40. .globl xstart32
  41. xstart32:
  42. /* Save the callee save registers */
  43. movl %ebp, os_regs + 32
  44. movl %esi, os_regs + 36
  45. movl %edi, os_regs + 40
  46. movl %ebx, os_regs + 44
  47. /* save the return address */
  48. popl %eax
  49. movl %eax, os_regs + 48
  50. /* save the stack pointer */
  51. movl %esp, os_regs + 52
  52. /* Get the new destination address */
  53. popl %ecx
  54. /* Store the physical address of xend on the stack */
  55. movl $xend32, %ebx
  56. addl virt_offset, %ebx
  57. pushl %ebx
  58. /* Store the destination address on the stack */
  59. pushl $PHYSICAL_CS
  60. pushl %ecx
  61. /* Cache virt_offset */
  62. movl virt_offset, %ebp
  63. /* Switch to using physical addresses */
  64. call _virt_to_phys
  65. /* Save the target stack pointer */
  66. movl %esp, os_regs + 12(%ebp)
  67. leal os_regs(%ebp), %esp
  68. /* Store the pointer to os_regs */
  69. movl %esp, os_regs_ptr(%ebp)
  70. /* Load my new registers */
  71. popal
  72. movl (-32 + 12)(%esp), %esp
  73. /* Jump to the new kernel
  74. * The lret switches to a flat code segment
  75. */
  76. lret
  77. .balign 4
  78. .globl xend32
  79. xend32:
  80. /* Fixup %eflags */
  81. nop
  82. cli
  83. cld
  84. /* Load %esp with &os_regs + virt_offset */
  85. .byte 0xbc /* movl $0, %esp */
  86. os_regs_ptr:
  87. .long 0
  88. /* Save the result registers */
  89. addl $32, %esp
  90. pushal
  91. /* Compute virt_offset */
  92. movl %esp, %ebp
  93. subl $os_regs, %ebp
  94. /* Load the stack pointer */
  95. movl 52(%esp), %esp
  96. /* Enable the virtual addresses */
  97. leal _phys_to_virt(%ebp), %eax
  98. call *%eax
  99. /* Restore the callee save registers */
  100. movl os_regs + 32, %ebp
  101. movl os_regs + 36, %esi
  102. movl os_regs + 40, %edi
  103. movl os_regs + 44, %ebx
  104. movl os_regs + 48, %edx
  105. movl os_regs + 52, %esp
  106. /* Get the C return value */
  107. movl os_regs + 28, %eax
  108. jmpl *%edx
  109. #ifdef CONFIG_X86_64
  110. .arch sledgehammer
  111. /**************************************************************************
  112. XSTART_lm - Transfer control to the kernel just loaded in long mode
  113. **************************************************************************/
  114. .globl xstart_lm
  115. xstart_lm:
  116. /* Save the callee save registers */
  117. pushl %ebp
  118. pushl %esi
  119. pushl %edi
  120. pushl %ebx
  121. /* Cache virt_offset && (virt_offset & 0xfffff000) */
  122. movl virt_offset, %ebp
  123. movl %ebp, %ebx
  124. andl $0xfffff000, %ebx
  125. /* Switch to using physical addresses */
  126. call _virt_to_phys
  127. /* Initialize the page tables */
  128. /* Level 4 */
  129. leal 0x23 + pgt_level3(%ebx), %eax
  130. leal pgt_level4(%ebx), %edi
  131. movl %eax, (%edi)
  132. /* Level 3 */
  133. leal 0x23 + pgt_level2(%ebx), %eax
  134. leal pgt_level3(%ebx), %edi
  135. movl %eax, 0x00(%edi)
  136. addl $4096, %eax
  137. movl %eax, 0x08(%edi)
  138. addl $4096, %eax
  139. movl %eax, 0x10(%edi)
  140. addl $4096, %eax
  141. movl %eax, 0x18(%edi)
  142. /* Level 2 */
  143. movl $0xe3, %eax
  144. leal pgt_level2(%ebx), %edi
  145. leal 16384(%edi), %esi
  146. pgt_level2_loop:
  147. movl %eax, (%edi)
  148. addl $8, %edi
  149. addl $0x200000, %eax
  150. cmp %esi, %edi
  151. jne pgt_level2_loop
  152. /* Point at the x86_64 page tables */
  153. leal pgt_level4(%ebx), %edi
  154. movl %edi, %cr3
  155. /* Setup for the return from 64bit mode */
  156. /* 64bit align the stack */
  157. movl %esp, %ebx /* original stack pointer + 16 */
  158. andl $0xfffffff8, %esp
  159. /* Save original stack pointer + 16 */
  160. pushl %ebx
  161. /* Save virt_offset */
  162. pushl %ebp
  163. /* Setup for the jmp to 64bit long mode */
  164. leal start_lm(%ebp), %eax
  165. movl %eax, 0x00 + start_lm_addr(%ebp)
  166. movl $LM_CODE_SEG, %eax
  167. movl %eax, 0x04 + start_lm_addr(%ebp)
  168. /* Setup for the jump out of 64bit long mode */
  169. leal end_lm(%ebp), %eax
  170. movl %eax, 0x00 + end_lm_addr(%ebp)
  171. movl $FLAT_CODE_SEG, %eax
  172. movl %eax, 0x04 + end_lm_addr(%ebp)
  173. /* Enable PAE mode */
  174. movl %cr4, %eax
  175. orl $X86_CR4_PAE, %eax
  176. movl %eax, %cr4
  177. /* Enable long mode */
  178. movl $MSR_K6_EFER, %ecx
  179. rdmsr
  180. orl $EFER_LME, %eax
  181. wrmsr
  182. /* Start paging, entering 32bit compatiblity mode */
  183. movl %cr0, %eax
  184. orl $CR0_PG, %eax
  185. movl %eax, %cr0
  186. /* Enter 64bit long mode */
  187. ljmp *start_lm_addr(%ebp)
  188. .code64
  189. start_lm:
  190. /* Load 64bit data segments */
  191. movl $LM_DATA_SEG, %eax
  192. movl %eax, %ds
  193. movl %eax, %es
  194. movl %eax, %ss
  195. andq $0xffffffff, %rbx
  196. /* Get the address to jump to */
  197. movl 20(%rbx), %edx
  198. andq $0xffffffff, %rdx
  199. /* Get the argument pointer */
  200. movl 24(%rbx), %ebx
  201. andq $0xffffffff, %rbx
  202. /* Jump to the 64bit code */
  203. call *%rdx
  204. /* Preserve the result */
  205. movl %eax, %edx
  206. /* Fixup %eflags */
  207. cli
  208. cld
  209. /* Switch to 32bit compatibility mode */
  210. ljmp *end_lm_addr(%rip)
  211. .code32
  212. end_lm:
  213. /* Disable paging */
  214. movl %cr0, %eax
  215. andl $~CR0_PG, %eax
  216. movl %eax, %cr0
  217. /* Disable long mode */
  218. movl $MSR_K6_EFER, %ecx
  219. rdmsr
  220. andl $~EFER_LME, %eax
  221. wrmsr
  222. /* Disable PAE */
  223. movl %cr4, %eax
  224. andl $~X86_CR4_PAE, %eax
  225. movl %eax, %cr4
  226. /* Compute virt_offset */
  227. popl %ebp
  228. /* Compute the original stack pointer + 16 */
  229. popl %ebx
  230. movl %ebx, %esp
  231. /* Enable the virtual addresses */
  232. leal _phys_to_virt(%ebp), %eax
  233. call *%eax
  234. /* Restore the callee save registers */
  235. popl %ebx
  236. popl %esi
  237. popl %edi
  238. popl %ebp
  239. /* Get the C return value */
  240. movl %edx, %eax
  241. /* Return */
  242. ret
  243. .arch i386
  244. #endif /* CONFIG_X86_64 */
  245. #ifdef CONFIG_X86_64
  246. .section ".bss"
  247. .p2align 12
  248. /* Include a dummy space in case we are loaded badly aligned */
  249. .space 4096
  250. /* Reserve enough space for a page table convering 4GB with 2MB pages */
  251. pgt_level4:
  252. .space 4096
  253. pgt_level3:
  254. .space 4096
  255. pgt_level2:
  256. .space 16384
  257. start_lm_addr:
  258. .space 8
  259. end_lm_addr:
  260. .space 8
  261. #endif