You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

hvm.c 13KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. /*
  2. * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. *
  19. * You can also choose to distribute this program under the terms of
  20. * the Unmodified Binary Distribution Licence (as given in the file
  21. * COPYING.UBDL), provided that you have satisfied its requirements.
  22. */
  23. FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
  24. #include <stdint.h>
  25. #include <stdio.h>
  26. #include <errno.h>
  27. #include <ipxe/malloc.h>
  28. #include <ipxe/pci.h>
  29. #include <ipxe/cpuid.h>
  30. #include <ipxe/msr.h>
  31. #include <ipxe/xen.h>
  32. #include <ipxe/xenver.h>
  33. #include <ipxe/xenmem.h>
  34. #include <ipxe/xenstore.h>
  35. #include <ipxe/xenbus.h>
  36. #include <ipxe/xengrant.h>
  37. #include "hvm.h"
  38. /** @file
  39. *
  40. * Xen HVM driver
  41. *
  42. */
  43. /**
  44. * Get CPUID base
  45. *
  46. * @v hvm HVM device
  47. * @ret rc Return status code
  48. */
  49. static int hvm_cpuid_base ( struct hvm_device *hvm ) {
  50. struct {
  51. uint32_t ebx;
  52. uint32_t ecx;
  53. uint32_t edx;
  54. } __attribute__ (( packed )) signature;
  55. uint32_t base;
  56. uint32_t version;
  57. uint32_t discard_eax;
  58. uint32_t discard_ebx;
  59. uint32_t discard_ecx;
  60. uint32_t discard_edx;
  61. /* Scan for magic signature */
  62. for ( base = HVM_CPUID_MIN ; base <= HVM_CPUID_MAX ;
  63. base += HVM_CPUID_STEP ) {
  64. cpuid ( base, &discard_eax, &signature.ebx, &signature.ecx,
  65. &signature.edx );
  66. if ( memcmp ( &signature, HVM_CPUID_MAGIC,
  67. sizeof ( signature ) ) == 0 ) {
  68. hvm->cpuid_base = base;
  69. cpuid ( ( base + HVM_CPUID_VERSION ), &version,
  70. &discard_ebx, &discard_ecx, &discard_edx );
  71. DBGC2 ( hvm, "HVM using CPUID base %#08x (v%d.%d)\n",
  72. base, ( version >> 16 ), ( version & 0xffff ) );
  73. return 0;
  74. }
  75. }
  76. DBGC ( hvm, "HVM could not find hypervisor\n" );
  77. return -ENODEV;
  78. }
  79. /**
  80. * Map hypercall page(s)
  81. *
  82. * @v hvm HVM device
  83. * @ret rc Return status code
  84. */
  85. static int hvm_map_hypercall ( struct hvm_device *hvm ) {
  86. uint32_t pages;
  87. uint32_t msr;
  88. uint32_t discard_ecx;
  89. uint32_t discard_edx;
  90. physaddr_t hypercall_phys;
  91. uint32_t version;
  92. static xen_extraversion_t extraversion;
  93. int xenrc;
  94. int rc;
  95. /* Get number of hypercall pages and MSR to use */
  96. cpuid ( ( hvm->cpuid_base + HVM_CPUID_PAGES ), &pages, &msr,
  97. &discard_ecx, &discard_edx );
  98. /* Allocate pages */
  99. hvm->hypercall_len = ( pages * PAGE_SIZE );
  100. hvm->xen.hypercall = malloc_dma ( hvm->hypercall_len, PAGE_SIZE );
  101. if ( ! hvm->xen.hypercall ) {
  102. DBGC ( hvm, "HVM could not allocate %d hypercall page(s)\n",
  103. pages );
  104. return -ENOMEM;
  105. }
  106. hypercall_phys = virt_to_phys ( hvm->xen.hypercall );
  107. DBGC2 ( hvm, "HVM hypercall page(s) at [%#08lx,%#08lx) via MSR %#08x\n",
  108. hypercall_phys, ( hypercall_phys + hvm->hypercall_len ), msr );
  109. /* Write to MSR */
  110. wrmsr ( msr, hypercall_phys );
  111. /* Check that hypercall mechanism is working */
  112. version = xenver_version ( &hvm->xen );
  113. if ( ( xenrc = xenver_extraversion ( &hvm->xen, &extraversion ) ) != 0){
  114. rc = -EXEN ( xenrc );
  115. DBGC ( hvm, "HVM could not get extraversion: %s\n",
  116. strerror ( rc ) );
  117. return rc;
  118. }
  119. DBGC2 ( hvm, "HVM found Xen version %d.%d%s\n",
  120. ( version >> 16 ), ( version & 0xffff ) , extraversion );
  121. return 0;
  122. }
  123. /**
  124. * Unmap hypercall page(s)
  125. *
  126. * @v hvm HVM device
  127. */
  128. static void hvm_unmap_hypercall ( struct hvm_device *hvm ) {
  129. /* Free pages */
  130. free_dma ( hvm->xen.hypercall, hvm->hypercall_len );
  131. }
  132. /**
  133. * Allocate and map MMIO space
  134. *
  135. * @v hvm HVM device
  136. * @v space Source mapping space
  137. * @v len Length (must be a multiple of PAGE_SIZE)
  138. * @ret mmio MMIO space address, or NULL on error
  139. */
  140. static void * hvm_ioremap ( struct hvm_device *hvm, unsigned int space,
  141. size_t len ) {
  142. struct xen_add_to_physmap add;
  143. struct xen_remove_from_physmap remove;
  144. unsigned int pages = ( len / PAGE_SIZE );
  145. physaddr_t mmio_phys;
  146. unsigned int i;
  147. void *mmio;
  148. int xenrc;
  149. int rc;
  150. /* Sanity check */
  151. assert ( ( len % PAGE_SIZE ) == 0 );
  152. /* Check for available space */
  153. if ( ( hvm->mmio_offset + len ) > hvm->mmio_len ) {
  154. DBGC ( hvm, "HVM could not allocate %zd bytes of MMIO space "
  155. "(%zd of %zd remaining)\n", len,
  156. ( hvm->mmio_len - hvm->mmio_offset ), hvm->mmio_len );
  157. goto err_no_space;
  158. }
  159. /* Map this space */
  160. mmio = ioremap ( ( hvm->mmio + hvm->mmio_offset ), len );
  161. if ( ! mmio ) {
  162. DBGC ( hvm, "HVM could not map MMIO space [%08lx,%08lx)\n",
  163. ( hvm->mmio + hvm->mmio_offset ),
  164. ( hvm->mmio + hvm->mmio_offset + len ) );
  165. goto err_ioremap;
  166. }
  167. mmio_phys = virt_to_phys ( mmio );
  168. /* Add to physical address space */
  169. for ( i = 0 ; i < pages ; i++ ) {
  170. add.domid = DOMID_SELF;
  171. add.idx = i;
  172. add.space = space;
  173. add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
  174. if ( ( xenrc = xenmem_add_to_physmap ( &hvm->xen, &add ) ) !=0){
  175. rc = -EXEN ( xenrc );
  176. DBGC ( hvm, "HVM could not add space %d idx %d at "
  177. "[%08lx,%08lx): %s\n", space, i,
  178. ( mmio_phys + ( i * PAGE_SIZE ) ),
  179. ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ),
  180. strerror ( rc ) );
  181. goto err_add_to_physmap;
  182. }
  183. }
  184. /* Update offset */
  185. hvm->mmio_offset += len;
  186. return mmio;
  187. i = pages;
  188. err_add_to_physmap:
  189. for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
  190. remove.domid = DOMID_SELF;
  191. add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
  192. xenmem_remove_from_physmap ( &hvm->xen, &remove );
  193. }
  194. iounmap ( mmio );
  195. err_ioremap:
  196. err_no_space:
  197. return NULL;
  198. }
  199. /**
  200. * Unmap MMIO space
  201. *
  202. * @v hvm HVM device
  203. * @v mmio MMIO space address
  204. * @v len Length (must be a multiple of PAGE_SIZE)
  205. */
  206. static void hvm_iounmap ( struct hvm_device *hvm, void *mmio, size_t len ) {
  207. struct xen_remove_from_physmap remove;
  208. physaddr_t mmio_phys = virt_to_phys ( mmio );
  209. unsigned int pages = ( len / PAGE_SIZE );
  210. unsigned int i;
  211. int xenrc;
  212. int rc;
  213. /* Unmap this space */
  214. iounmap ( mmio );
  215. /* Remove from physical address space */
  216. for ( i = 0 ; i < pages ; i++ ) {
  217. remove.domid = DOMID_SELF;
  218. remove.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
  219. if ( ( xenrc = xenmem_remove_from_physmap ( &hvm->xen,
  220. &remove ) ) != 0 ) {
  221. rc = -EXEN ( xenrc );
  222. DBGC ( hvm, "HVM could not remove space [%08lx,%08lx): "
  223. "%s\n", ( mmio_phys + ( i * PAGE_SIZE ) ),
  224. ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ),
  225. strerror ( rc ) );
  226. /* Nothing we can do about this */
  227. }
  228. }
  229. }
  230. /**
  231. * Map shared info page
  232. *
  233. * @v hvm HVM device
  234. * @ret rc Return status code
  235. */
  236. static int hvm_map_shared_info ( struct hvm_device *hvm ) {
  237. physaddr_t shared_info_phys;
  238. int rc;
  239. /* Map shared info page */
  240. hvm->xen.shared = hvm_ioremap ( hvm, XENMAPSPACE_shared_info,
  241. PAGE_SIZE );
  242. if ( ! hvm->xen.shared ) {
  243. rc = -ENOMEM;
  244. goto err_alloc;
  245. }
  246. shared_info_phys = virt_to_phys ( hvm->xen.shared );
  247. DBGC2 ( hvm, "HVM shared info page at [%#08lx,%#08lx)\n",
  248. shared_info_phys, ( shared_info_phys + PAGE_SIZE ) );
  249. /* Sanity check */
  250. DBGC2 ( hvm, "HVM wallclock time is %d\n",
  251. readl ( &hvm->xen.shared->wc_sec ) );
  252. return 0;
  253. hvm_iounmap ( hvm, hvm->xen.shared, PAGE_SIZE );
  254. err_alloc:
  255. return rc;
  256. }
  257. /**
  258. * Unmap shared info page
  259. *
  260. * @v hvm HVM device
  261. */
  262. static void hvm_unmap_shared_info ( struct hvm_device *hvm ) {
  263. /* Unmap shared info page */
  264. hvm_iounmap ( hvm, hvm->xen.shared, PAGE_SIZE );
  265. }
  266. /**
  267. * Map grant table
  268. *
  269. * @v hvm HVM device
  270. * @ret rc Return status code
  271. */
  272. static int hvm_map_grant ( struct hvm_device *hvm ) {
  273. physaddr_t grant_phys;
  274. int rc;
  275. /* Initialise grant table */
  276. if ( ( rc = xengrant_init ( &hvm->xen ) ) != 0 ) {
  277. DBGC ( hvm, "HVM could not initialise grant table: %s\n",
  278. strerror ( rc ) );
  279. return rc;
  280. }
  281. /* Map grant table */
  282. hvm->xen.grant.table = hvm_ioremap ( hvm, XENMAPSPACE_grant_table,
  283. hvm->xen.grant.len );
  284. if ( ! hvm->xen.grant.table )
  285. return -ENODEV;
  286. grant_phys = virt_to_phys ( hvm->xen.grant.table );
  287. DBGC2 ( hvm, "HVM mapped grant table at [%08lx,%08lx)\n",
  288. grant_phys, ( grant_phys + hvm->xen.grant.len ) );
  289. return 0;
  290. }
  291. /**
  292. * Unmap grant table
  293. *
  294. * @v hvm HVM device
  295. */
  296. static void hvm_unmap_grant ( struct hvm_device *hvm ) {
  297. /* Unmap grant table */
  298. hvm_iounmap ( hvm, hvm->xen.grant.table, hvm->xen.grant.len );
  299. }
  300. /**
  301. * Map XenStore
  302. *
  303. * @v hvm HVM device
  304. * @ret rc Return status code
  305. */
  306. static int hvm_map_xenstore ( struct hvm_device *hvm ) {
  307. uint64_t xenstore_evtchn;
  308. uint64_t xenstore_pfn;
  309. physaddr_t xenstore_phys;
  310. char *name;
  311. int xenrc;
  312. int rc;
  313. /* Get XenStore event channel */
  314. if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_EVTCHN,
  315. &xenstore_evtchn ) ) != 0 ) {
  316. rc = -EXEN ( xenrc );
  317. DBGC ( hvm, "HVM could not get XenStore event channel: %s\n",
  318. strerror ( rc ) );
  319. return rc;
  320. }
  321. hvm->xen.store.port = xenstore_evtchn;
  322. /* Get XenStore PFN */
  323. if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_PFN,
  324. &xenstore_pfn ) ) != 0 ) {
  325. rc = -EXEN ( xenrc );
  326. DBGC ( hvm, "HVM could not get XenStore PFN: %s\n",
  327. strerror ( rc ) );
  328. return rc;
  329. }
  330. xenstore_phys = ( xenstore_pfn * PAGE_SIZE );
  331. /* Map XenStore */
  332. hvm->xen.store.intf = ioremap ( xenstore_phys, PAGE_SIZE );
  333. if ( ! hvm->xen.store.intf ) {
  334. DBGC ( hvm, "HVM could not map XenStore at [%08lx,%08lx)\n",
  335. xenstore_phys, ( xenstore_phys + PAGE_SIZE ) );
  336. return -ENODEV;
  337. }
  338. DBGC2 ( hvm, "HVM mapped XenStore at [%08lx,%08lx) with event port "
  339. "%d\n", xenstore_phys, ( xenstore_phys + PAGE_SIZE ),
  340. hvm->xen.store.port );
  341. /* Check that XenStore is working */
  342. if ( ( rc = xenstore_read ( &hvm->xen, &name, "name", NULL ) ) != 0 ) {
  343. DBGC ( hvm, "HVM could not read domain name: %s\n",
  344. strerror ( rc ) );
  345. return rc;
  346. }
  347. DBGC2 ( hvm, "HVM running in domain \"%s\"\n", name );
  348. free ( name );
  349. return 0;
  350. }
  351. /**
  352. * Unmap XenStore
  353. *
  354. * @v hvm HVM device
  355. */
  356. static void hvm_unmap_xenstore ( struct hvm_device *hvm ) {
  357. /* Unmap XenStore */
  358. iounmap ( hvm->xen.store.intf );
  359. }
  360. /**
  361. * Probe PCI device
  362. *
  363. * @v pci PCI device
  364. * @ret rc Return status code
  365. */
  366. static int hvm_probe ( struct pci_device *pci ) {
  367. struct hvm_device *hvm;
  368. int rc;
  369. /* Allocate and initialise structure */
  370. hvm = zalloc ( sizeof ( *hvm ) );
  371. if ( ! hvm ) {
  372. rc = -ENOMEM;
  373. goto err_alloc;
  374. }
  375. hvm->mmio = pci_bar_start ( pci, HVM_MMIO_BAR );
  376. hvm->mmio_len = pci_bar_size ( pci, HVM_MMIO_BAR );
  377. DBGC2 ( hvm, "HVM has MMIO space [%08lx,%08lx)\n",
  378. hvm->mmio, ( hvm->mmio + hvm->mmio_len ) );
  379. /* Fix up PCI device */
  380. adjust_pci_device ( pci );
  381. /* Attach to hypervisor */
  382. if ( ( rc = hvm_cpuid_base ( hvm ) ) != 0 )
  383. goto err_cpuid_base;
  384. if ( ( rc = hvm_map_hypercall ( hvm ) ) != 0 )
  385. goto err_map_hypercall;
  386. if ( ( rc = hvm_map_shared_info ( hvm ) ) != 0 )
  387. goto err_map_shared_info;
  388. if ( ( rc = hvm_map_grant ( hvm ) ) != 0 )
  389. goto err_map_grant;
  390. if ( ( rc = hvm_map_xenstore ( hvm ) ) != 0 )
  391. goto err_map_xenstore;
  392. /* Probe Xen devices */
  393. if ( ( rc = xenbus_probe ( &hvm->xen, &pci->dev ) ) != 0 ) {
  394. DBGC ( hvm, "HVM could not probe Xen bus: %s\n",
  395. strerror ( rc ) );
  396. goto err_xenbus_probe;
  397. }
  398. pci_set_drvdata ( pci, hvm );
  399. return 0;
  400. xenbus_remove ( &hvm->xen, &pci->dev );
  401. err_xenbus_probe:
  402. hvm_unmap_xenstore ( hvm );
  403. err_map_xenstore:
  404. hvm_unmap_grant ( hvm );
  405. err_map_grant:
  406. hvm_unmap_shared_info ( hvm );
  407. err_map_shared_info:
  408. hvm_unmap_hypercall ( hvm );
  409. err_map_hypercall:
  410. err_cpuid_base:
  411. free ( hvm );
  412. err_alloc:
  413. return rc;
  414. }
  415. /**
  416. * Remove PCI device
  417. *
  418. * @v pci PCI device
  419. */
  420. static void hvm_remove ( struct pci_device *pci ) {
  421. struct hvm_device *hvm = pci_get_drvdata ( pci );
  422. xenbus_remove ( &hvm->xen, &pci->dev );
  423. hvm_unmap_xenstore ( hvm );
  424. hvm_unmap_grant ( hvm );
  425. hvm_unmap_shared_info ( hvm );
  426. hvm_unmap_hypercall ( hvm );
  427. free ( hvm );
  428. }
  429. /** PCI device IDs */
  430. static struct pci_device_id hvm_ids[] = {
  431. PCI_ROM ( 0x5853, 0x0001, "hvm", "hvm", 0 ),
  432. PCI_ROM ( 0x5853, 0x0002, "hvm2", "hvm2", 0 ),
  433. };
  434. /** PCI driver */
  435. struct pci_driver hvm_driver __pci_driver = {
  436. .ids = hvm_ids,
  437. .id_count = ( sizeof ( hvm_ids ) / sizeof ( hvm_ids[0] ) ),
  438. .probe = hvm_probe,
  439. .remove = hvm_remove,
  440. };
  441. /* Drag in objects via hvm_driver */
  442. REQUIRING_SYMBOL ( hvm_driver );
  443. /* Drag in netfront driver */
  444. REQUIRE_OBJECT ( netfront );