You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

hvm.c 13KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. /*
  2. * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA.
  18. */
  19. FILE_LICENCE ( GPL2_OR_LATER );
  20. #include <stdint.h>
  21. #include <stdio.h>
  22. #include <errno.h>
  23. #include <ipxe/malloc.h>
  24. #include <ipxe/pci.h>
  25. #include <ipxe/cpuid.h>
  26. #include <ipxe/msr.h>
  27. #include <ipxe/xen.h>
  28. #include <ipxe/xenver.h>
  29. #include <ipxe/xenmem.h>
  30. #include <ipxe/xenstore.h>
  31. #include <ipxe/xenbus.h>
  32. #include <ipxe/xengrant.h>
  33. #include "hvm.h"
  34. /** @file
  35. *
  36. * Xen HVM driver
  37. *
  38. */
  39. /**
  40. * Get CPUID base
  41. *
  42. * @v hvm HVM device
  43. * @ret rc Return status code
  44. */
  45. static int hvm_cpuid_base ( struct hvm_device *hvm ) {
  46. struct {
  47. uint32_t ebx;
  48. uint32_t ecx;
  49. uint32_t edx;
  50. } __attribute__ (( packed )) signature;
  51. uint32_t base;
  52. uint32_t version;
  53. uint32_t discard_eax;
  54. uint32_t discard_ebx;
  55. uint32_t discard_ecx;
  56. uint32_t discard_edx;
  57. /* Scan for magic signature */
  58. for ( base = HVM_CPUID_MIN ; base <= HVM_CPUID_MAX ;
  59. base += HVM_CPUID_STEP ) {
  60. cpuid ( base, &discard_eax, &signature.ebx, &signature.ecx,
  61. &signature.edx );
  62. if ( memcmp ( &signature, HVM_CPUID_MAGIC,
  63. sizeof ( signature ) ) == 0 ) {
  64. hvm->cpuid_base = base;
  65. cpuid ( ( base + HVM_CPUID_VERSION ), &version,
  66. &discard_ebx, &discard_ecx, &discard_edx );
  67. DBGC2 ( hvm, "HVM using CPUID base %#08x (v%d.%d)\n",
  68. base, ( version >> 16 ), ( version & 0xffff ) );
  69. return 0;
  70. }
  71. }
  72. DBGC ( hvm, "HVM could not find hypervisor\n" );
  73. return -ENODEV;
  74. }
  75. /**
  76. * Map hypercall page(s)
  77. *
  78. * @v hvm HVM device
  79. * @ret rc Return status code
  80. */
  81. static int hvm_map_hypercall ( struct hvm_device *hvm ) {
  82. uint32_t pages;
  83. uint32_t msr;
  84. uint32_t discard_ecx;
  85. uint32_t discard_edx;
  86. physaddr_t hypercall_phys;
  87. uint32_t version;
  88. static xen_extraversion_t extraversion;
  89. int xenrc;
  90. int rc;
  91. /* Get number of hypercall pages and MSR to use */
  92. cpuid ( ( hvm->cpuid_base + HVM_CPUID_PAGES ), &pages, &msr,
  93. &discard_ecx, &discard_edx );
  94. /* Allocate pages */
  95. hvm->hypercall_len = ( pages * PAGE_SIZE );
  96. hvm->xen.hypercall = malloc_dma ( hvm->hypercall_len, PAGE_SIZE );
  97. if ( ! hvm->xen.hypercall ) {
  98. DBGC ( hvm, "HVM could not allocate %d hypercall page(s)\n",
  99. pages );
  100. return -ENOMEM;
  101. }
  102. hypercall_phys = virt_to_phys ( hvm->xen.hypercall );
  103. DBGC2 ( hvm, "HVM hypercall page(s) at [%#08lx,%#08lx) via MSR %#08x\n",
  104. hypercall_phys, ( hypercall_phys + hvm->hypercall_len ), msr );
  105. /* Write to MSR */
  106. wrmsr ( msr, hypercall_phys );
  107. /* Check that hypercall mechanism is working */
  108. version = xenver_version ( &hvm->xen );
  109. if ( ( xenrc = xenver_extraversion ( &hvm->xen, &extraversion ) ) != 0){
  110. rc = -EXEN ( xenrc );
  111. DBGC ( hvm, "HVM could not get extraversion: %s\n",
  112. strerror ( rc ) );
  113. return rc;
  114. }
  115. DBGC2 ( hvm, "HVM found Xen version %d.%d%s\n",
  116. ( version >> 16 ), ( version & 0xffff ) , extraversion );
  117. return 0;
  118. }
  119. /**
  120. * Unmap hypercall page(s)
  121. *
  122. * @v hvm HVM device
  123. */
  124. static void hvm_unmap_hypercall ( struct hvm_device *hvm ) {
  125. /* Free pages */
  126. free_dma ( hvm->xen.hypercall, hvm->hypercall_len );
  127. }
  128. /**
  129. * Allocate and map MMIO space
  130. *
  131. * @v hvm HVM device
  132. * @v space Source mapping space
  133. * @v pages Number of pages
  134. * @ret mmio MMIO space address, or NULL on error
  135. */
  136. static void * hvm_ioremap ( struct hvm_device *hvm, unsigned int space,
  137. unsigned int pages ) {
  138. struct xen_add_to_physmap add;
  139. struct xen_remove_from_physmap remove;
  140. physaddr_t mmio_phys;
  141. unsigned int i;
  142. size_t len;
  143. void *mmio;
  144. int xenrc;
  145. int rc;
  146. /* Check for available space */
  147. len = ( pages * PAGE_SIZE );
  148. if ( ( hvm->mmio_offset + len ) > hvm->mmio_len ) {
  149. DBGC ( hvm, "HVM could not allocate %zd bytes of MMIO space "
  150. "(%zd of %zd remaining)\n", len,
  151. ( hvm->mmio_len - hvm->mmio_offset ), hvm->mmio_len );
  152. goto err_no_space;
  153. }
  154. /* Map this space */
  155. mmio = ioremap ( ( hvm->mmio + hvm->mmio_offset ), len );
  156. if ( ! mmio ) {
  157. DBGC ( hvm, "HVM could not map MMIO space [%08lx,%08lx)\n",
  158. ( hvm->mmio + hvm->mmio_offset ),
  159. ( hvm->mmio + hvm->mmio_offset + len ) );
  160. goto err_ioremap;
  161. }
  162. mmio_phys = virt_to_phys ( mmio );
  163. /* Add to physical address space */
  164. for ( i = 0 ; i < pages ; i++ ) {
  165. add.domid = DOMID_SELF;
  166. add.idx = i;
  167. add.space = space;
  168. add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
  169. if ( ( xenrc = xenmem_add_to_physmap ( &hvm->xen, &add ) ) !=0){
  170. rc = -EXEN ( xenrc );
  171. DBGC ( hvm, "HVM could not add space %d idx %d at "
  172. "[%08lx,%08lx): %s\n", space, i,
  173. ( mmio_phys + ( i * PAGE_SIZE ) ),
  174. ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ),
  175. strerror ( rc ) );
  176. goto err_add_to_physmap;
  177. }
  178. }
  179. /* Update offset */
  180. hvm->mmio_offset += len;
  181. return mmio;
  182. i = pages;
  183. err_add_to_physmap:
  184. for ( i-- ; ( signed int ) i >= 0 ; i-- ) {
  185. remove.domid = DOMID_SELF;
  186. add.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
  187. xenmem_remove_from_physmap ( &hvm->xen, &remove );
  188. }
  189. iounmap ( mmio );
  190. err_ioremap:
  191. err_no_space:
  192. return NULL;
  193. }
  194. /**
  195. * Unmap MMIO space
  196. *
  197. * @v hvm HVM device
  198. * @v mmio MMIO space address
  199. * @v pages Number of pages
  200. */
  201. static void hvm_iounmap ( struct hvm_device *hvm, void *mmio,
  202. unsigned int pages ) {
  203. struct xen_remove_from_physmap remove;
  204. physaddr_t mmio_phys = virt_to_phys ( mmio );
  205. unsigned int i;
  206. int xenrc;
  207. int rc;
  208. /* Unmap this space */
  209. iounmap ( mmio );
  210. /* Remove from physical address space */
  211. for ( i = 0 ; i < pages ; i++ ) {
  212. remove.domid = DOMID_SELF;
  213. remove.gpfn = ( ( mmio_phys / PAGE_SIZE ) + i );
  214. if ( ( xenrc = xenmem_remove_from_physmap ( &hvm->xen,
  215. &remove ) ) != 0 ) {
  216. rc = -EXEN ( xenrc );
  217. DBGC ( hvm, "HVM could not remove space [%08lx,%08lx): "
  218. "%s\n", ( mmio_phys + ( i * PAGE_SIZE ) ),
  219. ( mmio_phys + ( ( i + 1 ) * PAGE_SIZE ) ),
  220. strerror ( rc ) );
  221. /* Nothing we can do about this */
  222. }
  223. }
  224. }
  225. /**
  226. * Map shared info page
  227. *
  228. * @v hvm HVM device
  229. * @ret rc Return status code
  230. */
  231. static int hvm_map_shared_info ( struct hvm_device *hvm ) {
  232. physaddr_t shared_info_phys;
  233. int rc;
  234. /* Map shared info page */
  235. hvm->xen.shared = hvm_ioremap ( hvm, XENMAPSPACE_shared_info, 1 );
  236. if ( ! hvm->xen.shared ) {
  237. rc = -ENOMEM;
  238. goto err_alloc;
  239. }
  240. shared_info_phys = virt_to_phys ( hvm->xen.shared );
  241. DBGC2 ( hvm, "HVM shared info page at [%#08lx,%#08lx)\n",
  242. shared_info_phys, ( shared_info_phys + PAGE_SIZE ) );
  243. /* Sanity check */
  244. DBGC2 ( hvm, "HVM wallclock time is %d\n",
  245. readl ( &hvm->xen.shared->wc_sec ) );
  246. return 0;
  247. hvm_iounmap ( hvm, hvm->xen.shared, 1 );
  248. err_alloc:
  249. return rc;
  250. }
  251. /**
  252. * Unmap shared info page
  253. *
  254. * @v hvm HVM device
  255. */
  256. static void hvm_unmap_shared_info ( struct hvm_device *hvm ) {
  257. /* Unmap shared info page */
  258. hvm_iounmap ( hvm, hvm->xen.shared, 1 );
  259. }
  260. /**
  261. * Map grant table
  262. *
  263. * @v hvm HVM device
  264. * @ret rc Return status code
  265. */
  266. static int hvm_map_grant ( struct hvm_device *hvm ) {
  267. struct gnttab_query_size size;
  268. struct gnttab_set_version version;
  269. physaddr_t grant_phys;
  270. size_t len;
  271. int xenrc;
  272. int rc;
  273. /* Get grant table size */
  274. size.dom = DOMID_SELF;
  275. if ( ( xenrc = xengrant_query_size ( &hvm->xen, &size ) ) != 0 ) {
  276. rc = -EXEN ( xenrc );
  277. DBGC ( hvm, "HVM could not get grant table size: %s\n",
  278. strerror ( rc ) );
  279. goto err_query_size;
  280. }
  281. len = ( size.nr_frames * PAGE_SIZE );
  282. /* Configure to use version 2 tables */
  283. version.version = 2;
  284. if ( ( xenrc = xengrant_set_version ( &hvm->xen, &version ) ) != 0 ) {
  285. rc = -EXEN ( xenrc );
  286. DBGC ( hvm, "HVM could not set version 2 grant table: %s\n",
  287. strerror ( rc ) );
  288. goto err_set_version;
  289. }
  290. if ( version.version != 2 ) {
  291. DBGC ( hvm, "HVM could not set version 2 grant table\n" );
  292. rc = -ENOTTY;
  293. goto err_set_version;
  294. }
  295. /* Map grant table */
  296. hvm->xen.grant.table = hvm_ioremap ( hvm, XENMAPSPACE_grant_table,
  297. size.nr_frames );
  298. if ( ! hvm->xen.grant.table ) {
  299. rc = -ENODEV;
  300. goto err_ioremap;
  301. }
  302. grant_phys = virt_to_phys ( hvm->xen.grant.table );
  303. DBGC2 ( hvm, "HVM mapped grant table at [%08lx,%08lx)\n",
  304. grant_phys, ( grant_phys + len ) );
  305. hvm->xen.grant.count = ( len / sizeof ( hvm->xen.grant.table[0] ) );
  306. return 0;
  307. hvm_iounmap ( hvm, hvm->xen.grant.table, size.nr_frames );
  308. err_ioremap:
  309. err_set_version:
  310. err_query_size:
  311. return rc;
  312. }
  313. /**
  314. * Unmap grant table
  315. *
  316. * @v hvm HVM device
  317. */
  318. static void hvm_unmap_grant ( struct hvm_device *hvm ) {
  319. size_t len;
  320. /* Unmap grant table */
  321. len = ( hvm->xen.grant.count * sizeof ( hvm->xen.grant.table[0] ) );
  322. hvm_iounmap ( hvm, hvm->xen.grant.table, ( len / PAGE_SIZE ) );
  323. }
  324. /**
  325. * Map XenStore
  326. *
  327. * @v hvm HVM device
  328. * @ret rc Return status code
  329. */
  330. static int hvm_map_xenstore ( struct hvm_device *hvm ) {
  331. uint64_t xenstore_evtchn;
  332. uint64_t xenstore_pfn;
  333. physaddr_t xenstore_phys;
  334. char *name;
  335. int xenrc;
  336. int rc;
  337. /* Get XenStore event channel */
  338. if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_EVTCHN,
  339. &xenstore_evtchn ) ) != 0 ) {
  340. rc = -EXEN ( xenrc );
  341. DBGC ( hvm, "HVM could not get XenStore event channel: %s\n",
  342. strerror ( rc ) );
  343. return rc;
  344. }
  345. hvm->xen.store.port = xenstore_evtchn;
  346. /* Get XenStore PFN */
  347. if ( ( xenrc = xen_hvm_get_param ( &hvm->xen, HVM_PARAM_STORE_PFN,
  348. &xenstore_pfn ) ) != 0 ) {
  349. rc = -EXEN ( xenrc );
  350. DBGC ( hvm, "HVM could not get XenStore PFN: %s\n",
  351. strerror ( rc ) );
  352. return rc;
  353. }
  354. xenstore_phys = ( xenstore_pfn * PAGE_SIZE );
  355. /* Map XenStore */
  356. hvm->xen.store.intf = ioremap ( xenstore_phys, PAGE_SIZE );
  357. if ( ! hvm->xen.store.intf ) {
  358. DBGC ( hvm, "HVM could not map XenStore at [%08lx,%08lx)\n",
  359. xenstore_phys, ( xenstore_phys + PAGE_SIZE ) );
  360. return -ENODEV;
  361. }
  362. DBGC2 ( hvm, "HVM mapped XenStore at [%08lx,%08lx) with event port "
  363. "%d\n", xenstore_phys, ( xenstore_phys + PAGE_SIZE ),
  364. hvm->xen.store.port );
  365. /* Check that XenStore is working */
  366. if ( ( rc = xenstore_read ( &hvm->xen, &name, "name", NULL ) ) != 0 ) {
  367. DBGC ( hvm, "HVM could not read domain name: %s\n",
  368. strerror ( rc ) );
  369. return rc;
  370. }
  371. DBGC2 ( hvm, "HVM running in domain \"%s\"\n", name );
  372. free ( name );
  373. return 0;
  374. }
  375. /**
  376. * Unmap XenStore
  377. *
  378. * @v hvm HVM device
  379. */
  380. static void hvm_unmap_xenstore ( struct hvm_device *hvm ) {
  381. /* Unmap XenStore */
  382. iounmap ( hvm->xen.store.intf );
  383. }
  384. /**
  385. * Probe PCI device
  386. *
  387. * @v pci PCI device
  388. * @ret rc Return status code
  389. */
  390. static int hvm_probe ( struct pci_device *pci ) {
  391. struct hvm_device *hvm;
  392. int rc;
  393. /* Allocate and initialise structure */
  394. hvm = zalloc ( sizeof ( *hvm ) );
  395. if ( ! hvm ) {
  396. rc = -ENOMEM;
  397. goto err_alloc;
  398. }
  399. hvm->mmio = pci_bar_start ( pci, HVM_MMIO_BAR );
  400. hvm->mmio_len = pci_bar_size ( pci, HVM_MMIO_BAR );
  401. DBGC2 ( hvm, "HVM has MMIO space [%08lx,%08lx)\n",
  402. hvm->mmio, ( hvm->mmio + hvm->mmio_len ) );
  403. /* Fix up PCI device */
  404. adjust_pci_device ( pci );
  405. /* Attach to hypervisor */
  406. if ( ( rc = hvm_cpuid_base ( hvm ) ) != 0 )
  407. goto err_cpuid_base;
  408. if ( ( rc = hvm_map_hypercall ( hvm ) ) != 0 )
  409. goto err_map_hypercall;
  410. if ( ( rc = hvm_map_shared_info ( hvm ) ) != 0 )
  411. goto err_map_shared_info;
  412. if ( ( rc = hvm_map_grant ( hvm ) ) != 0 )
  413. goto err_map_grant;
  414. if ( ( rc = hvm_map_xenstore ( hvm ) ) != 0 )
  415. goto err_map_xenstore;
  416. /* Probe Xen devices */
  417. if ( ( rc = xenbus_probe ( &hvm->xen, &pci->dev ) ) != 0 ) {
  418. DBGC ( hvm, "HVM could not probe Xen bus: %s\n",
  419. strerror ( rc ) );
  420. goto err_xenbus_probe;
  421. }
  422. pci_set_drvdata ( pci, hvm );
  423. return 0;
  424. xenbus_remove ( &hvm->xen, &pci->dev );
  425. err_xenbus_probe:
  426. hvm_unmap_xenstore ( hvm );
  427. err_map_xenstore:
  428. hvm_unmap_grant ( hvm );
  429. err_map_grant:
  430. hvm_unmap_shared_info ( hvm );
  431. err_map_shared_info:
  432. hvm_unmap_hypercall ( hvm );
  433. err_map_hypercall:
  434. err_cpuid_base:
  435. free ( hvm );
  436. err_alloc:
  437. return rc;
  438. }
  439. /**
  440. * Remove PCI device
  441. *
  442. * @v pci PCI device
  443. */
  444. static void hvm_remove ( struct pci_device *pci ) {
  445. struct hvm_device *hvm = pci_get_drvdata ( pci );
  446. xenbus_remove ( &hvm->xen, &pci->dev );
  447. hvm_unmap_xenstore ( hvm );
  448. hvm_unmap_grant ( hvm );
  449. hvm_unmap_shared_info ( hvm );
  450. hvm_unmap_hypercall ( hvm );
  451. free ( hvm );
  452. }
  453. /** PCI device IDs */
  454. static struct pci_device_id hvm_ids[] = {
  455. PCI_ROM ( 0x5853, 0x0001, "hvm", "hvm", 0 ),
  456. };
  457. /** PCI driver */
  458. struct pci_driver hvm_driver __pci_driver = {
  459. .ids = hvm_ids,
  460. .id_count = ( sizeof ( hvm_ids ) / sizeof ( hvm_ids[0] ) ),
  461. .probe = hvm_probe,
  462. .remove = hvm_remove,
  463. };
  464. /* Drag in netfront driver */
  465. REQUIRE_OBJECT ( netfront );