You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

virtio-pci.c 12KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /* virtio-pci.c - pci interface for virtio interface
  2. *
  3. * (c) Copyright 2008 Bull S.A.S.
  4. *
  5. * Author: Laurent Vivier <Laurent.Vivier@bull.net>
  6. *
  7. * some parts from Linux Virtio PCI driver
  8. *
  9. * Copyright IBM Corp. 2007
  10. * Authors: Anthony Liguori <aliguori@us.ibm.com>
  11. *
  12. */
  13. #include "errno.h"
  14. #include "byteswap.h"
  15. #include "etherboot.h"
  16. #include "ipxe/io.h"
  17. #include "ipxe/iomap.h"
  18. #include "ipxe/pci.h"
  19. #include "ipxe/reboot.h"
  20. #include "ipxe/virtio-pci.h"
  21. #include "ipxe/virtio-ring.h"
  22. int vp_find_vq(unsigned int ioaddr, int queue_index,
  23. struct vring_virtqueue *vq)
  24. {
  25. struct vring * vr = &vq->vring;
  26. u16 num;
  27. /* select the queue */
  28. outw(queue_index, ioaddr + VIRTIO_PCI_QUEUE_SEL);
  29. /* check if the queue is available */
  30. num = inw(ioaddr + VIRTIO_PCI_QUEUE_NUM);
  31. if (!num) {
  32. DBG("VIRTIO-PCI ERROR: queue size is 0\n");
  33. return -1;
  34. }
  35. if (num > MAX_QUEUE_NUM) {
  36. DBG("VIRTIO-PCI ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM);
  37. return -1;
  38. }
  39. /* check if the queue is already active */
  40. if (inl(ioaddr + VIRTIO_PCI_QUEUE_PFN)) {
  41. DBG("VIRTIO-PCI ERROR: queue already active\n");
  42. return -1;
  43. }
  44. vq->queue_index = queue_index;
  45. /* initialize the queue */
  46. vring_init(vr, num, (unsigned char*)&vq->queue);
  47. /* activate the queue
  48. *
  49. * NOTE: vr->desc is initialized by vring_init()
  50. */
  51. outl((unsigned long)virt_to_phys(vr->desc) >> PAGE_SHIFT,
  52. ioaddr + VIRTIO_PCI_QUEUE_PFN);
  53. return num;
  54. }
  55. #define CFG_POS(vdev, field) \
  56. (vdev->cfg_cap_pos + offsetof(struct virtio_pci_cfg_cap, field))
  57. static void prep_pci_cfg_cap(struct virtio_pci_modern_device *vdev,
  58. struct virtio_pci_region *region,
  59. size_t offset, u32 length)
  60. {
  61. pci_write_config_byte(vdev->pci, CFG_POS(vdev, cap.bar), region->bar);
  62. pci_write_config_dword(vdev->pci, CFG_POS(vdev, cap.length), length);
  63. pci_write_config_dword(vdev->pci, CFG_POS(vdev, cap.offset),
  64. (intptr_t)(region->base + offset));
  65. }
  66. void vpm_iowrite8(struct virtio_pci_modern_device *vdev,
  67. struct virtio_pci_region *region, u8 data, size_t offset)
  68. {
  69. switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
  70. case VIRTIO_PCI_REGION_MEMORY:
  71. writeb(data, region->base + offset);
  72. break;
  73. case VIRTIO_PCI_REGION_PORT:
  74. outb(data, region->base + offset);
  75. break;
  76. case VIRTIO_PCI_REGION_PCI_CONFIG:
  77. prep_pci_cfg_cap(vdev, region, offset, 1);
  78. pci_write_config_byte(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
  79. break;
  80. default:
  81. assert(0);
  82. break;
  83. }
  84. }
  85. void vpm_iowrite16(struct virtio_pci_modern_device *vdev,
  86. struct virtio_pci_region *region, u16 data, size_t offset)
  87. {
  88. data = cpu_to_le16(data);
  89. switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
  90. case VIRTIO_PCI_REGION_MEMORY:
  91. writew(data, region->base + offset);
  92. break;
  93. case VIRTIO_PCI_REGION_PORT:
  94. outw(data, region->base + offset);
  95. break;
  96. case VIRTIO_PCI_REGION_PCI_CONFIG:
  97. prep_pci_cfg_cap(vdev, region, offset, 2);
  98. pci_write_config_word(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
  99. break;
  100. default:
  101. assert(0);
  102. break;
  103. }
  104. }
  105. void vpm_iowrite32(struct virtio_pci_modern_device *vdev,
  106. struct virtio_pci_region *region, u32 data, size_t offset)
  107. {
  108. data = cpu_to_le32(data);
  109. switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
  110. case VIRTIO_PCI_REGION_MEMORY:
  111. writel(data, region->base + offset);
  112. break;
  113. case VIRTIO_PCI_REGION_PORT:
  114. outl(data, region->base + offset);
  115. break;
  116. case VIRTIO_PCI_REGION_PCI_CONFIG:
  117. prep_pci_cfg_cap(vdev, region, offset, 4);
  118. pci_write_config_dword(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
  119. break;
  120. default:
  121. assert(0);
  122. break;
  123. }
  124. }
  125. u8 vpm_ioread8(struct virtio_pci_modern_device *vdev,
  126. struct virtio_pci_region *region, size_t offset)
  127. {
  128. uint8_t data;
  129. switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
  130. case VIRTIO_PCI_REGION_MEMORY:
  131. data = readb(region->base + offset);
  132. break;
  133. case VIRTIO_PCI_REGION_PORT:
  134. data = inb(region->base + offset);
  135. break;
  136. case VIRTIO_PCI_REGION_PCI_CONFIG:
  137. prep_pci_cfg_cap(vdev, region, offset, 1);
  138. pci_read_config_byte(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
  139. break;
  140. default:
  141. assert(0);
  142. data = 0;
  143. break;
  144. }
  145. return data;
  146. }
  147. u16 vpm_ioread16(struct virtio_pci_modern_device *vdev,
  148. struct virtio_pci_region *region, size_t offset)
  149. {
  150. uint16_t data;
  151. switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
  152. case VIRTIO_PCI_REGION_MEMORY:
  153. data = readw(region->base + offset);
  154. break;
  155. case VIRTIO_PCI_REGION_PORT:
  156. data = inw(region->base + offset);
  157. break;
  158. case VIRTIO_PCI_REGION_PCI_CONFIG:
  159. prep_pci_cfg_cap(vdev, region, offset, 2);
  160. pci_read_config_word(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
  161. break;
  162. default:
  163. assert(0);
  164. data = 0;
  165. break;
  166. }
  167. return le16_to_cpu(data);
  168. }
  169. u32 vpm_ioread32(struct virtio_pci_modern_device *vdev,
  170. struct virtio_pci_region *region, size_t offset)
  171. {
  172. uint32_t data;
  173. switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
  174. case VIRTIO_PCI_REGION_MEMORY:
  175. data = readw(region->base + offset);
  176. break;
  177. case VIRTIO_PCI_REGION_PORT:
  178. data = inw(region->base + offset);
  179. break;
  180. case VIRTIO_PCI_REGION_PCI_CONFIG:
  181. prep_pci_cfg_cap(vdev, region, offset, 4);
  182. pci_read_config_dword(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
  183. break;
  184. default:
  185. assert(0);
  186. data = 0;
  187. break;
  188. }
  189. return le32_to_cpu(data);
  190. }
  191. int virtio_pci_find_capability(struct pci_device *pci, uint8_t cfg_type)
  192. {
  193. int pos;
  194. uint8_t type, bar;
  195. for (pos = pci_find_capability(pci, PCI_CAP_ID_VNDR);
  196. pos > 0;
  197. pos = pci_find_next_capability(pci, pos, PCI_CAP_ID_VNDR)) {
  198. pci_read_config_byte(pci, pos + offsetof(struct virtio_pci_cap,
  199. cfg_type), &type);
  200. pci_read_config_byte(pci, pos + offsetof(struct virtio_pci_cap,
  201. bar), &bar);
  202. /* Ignore structures with reserved BAR values */
  203. if (bar > 0x5) {
  204. continue;
  205. }
  206. if (type == cfg_type) {
  207. return pos;
  208. }
  209. }
  210. return 0;
  211. }
  212. int virtio_pci_map_capability(struct pci_device *pci, int cap, size_t minlen,
  213. u32 align, u32 start, u32 size,
  214. struct virtio_pci_region *region)
  215. {
  216. u8 bar;
  217. u32 offset, length, base_raw;
  218. unsigned long base;
  219. pci_read_config_byte(pci, cap + offsetof(struct virtio_pci_cap, bar), &bar);
  220. pci_read_config_dword(pci, cap + offsetof(struct virtio_pci_cap, offset),
  221. &offset);
  222. pci_read_config_dword(pci, cap + offsetof(struct virtio_pci_cap, length),
  223. &length);
  224. if (length <= start) {
  225. DBG("VIRTIO-PCI bad capability len %u (>%u expected)\n", length, start);
  226. return -EINVAL;
  227. }
  228. if (length - start < minlen) {
  229. DBG("VIRTIO-PCI bad capability len %u (>=%zu expected)\n", length, minlen);
  230. return -EINVAL;
  231. }
  232. length -= start;
  233. if (start + offset < offset) {
  234. DBG("VIRTIO-PCI map wrap-around %u+%u\n", start, offset);
  235. return -EINVAL;
  236. }
  237. offset += start;
  238. if (offset & (align - 1)) {
  239. DBG("VIRTIO-PCI offset %u not aligned to %u\n", offset, align);
  240. return -EINVAL;
  241. }
  242. if (length > size) {
  243. length = size;
  244. }
  245. if (minlen + offset < minlen ||
  246. minlen + offset > pci_bar_size(pci, PCI_BASE_ADDRESS(bar))) {
  247. DBG("VIRTIO-PCI map virtio %zu@%u out of range on bar %i length %lu\n",
  248. minlen, offset,
  249. bar, (unsigned long)pci_bar_size(pci, PCI_BASE_ADDRESS(bar)));
  250. return -EINVAL;
  251. }
  252. region->base = NULL;
  253. region->length = length;
  254. region->bar = bar;
  255. base = pci_bar_start(pci, PCI_BASE_ADDRESS(bar));
  256. if (base) {
  257. pci_read_config_dword(pci, PCI_BASE_ADDRESS(bar), &base_raw);
  258. if (base_raw & PCI_BASE_ADDRESS_SPACE_IO) {
  259. /* Region accessed using port I/O */
  260. region->base = (void *)(base + offset);
  261. region->flags = VIRTIO_PCI_REGION_PORT;
  262. } else {
  263. /* Region mapped into memory space */
  264. region->base = ioremap(base + offset, length);
  265. region->flags = VIRTIO_PCI_REGION_MEMORY;
  266. }
  267. }
  268. if (!region->base) {
  269. /* Region accessed via PCI config space window */
  270. region->base = (void *)(intptr_t)offset;
  271. region->flags = VIRTIO_PCI_REGION_PCI_CONFIG;
  272. }
  273. return 0;
  274. }
  275. void virtio_pci_unmap_capability(struct virtio_pci_region *region)
  276. {
  277. unsigned region_type = region->flags & VIRTIO_PCI_REGION_TYPE_MASK;
  278. if (region_type == VIRTIO_PCI_REGION_MEMORY) {
  279. iounmap(region->base);
  280. }
  281. }
  282. void vpm_notify(struct virtio_pci_modern_device *vdev,
  283. struct vring_virtqueue *vq)
  284. {
  285. vpm_iowrite16(vdev, &vq->notification, (u16)vq->queue_index, 0);
  286. }
  287. int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
  288. unsigned nvqs, struct vring_virtqueue *vqs)
  289. {
  290. unsigned i;
  291. struct vring_virtqueue *vq;
  292. u16 size, off;
  293. u32 notify_offset_multiplier;
  294. int err;
  295. if (nvqs > vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(num_queues))) {
  296. return -ENOENT;
  297. }
  298. /* Read notify_off_multiplier from config space. */
  299. pci_read_config_dword(vdev->pci,
  300. vdev->notify_cap_pos + offsetof(struct virtio_pci_notify_cap,
  301. notify_off_multiplier),
  302. &notify_offset_multiplier);
  303. for (i = 0; i < nvqs; i++) {
  304. /* Select the queue we're interested in */
  305. vpm_iowrite16(vdev, &vdev->common, (u16)i, COMMON_OFFSET(queue_select));
  306. /* Check if queue is either not available or already active. */
  307. size = vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(queue_size));
  308. /* QEMU has a bug where queues don't revert to inactive on device
  309. * reset. Skip checking the queue_enable field until it is fixed.
  310. */
  311. if (!size /*|| vpm_ioread16(vdev, &vdev->common.queue_enable)*/)
  312. return -ENOENT;
  313. if (size & (size - 1)) {
  314. DBG("VIRTIO-PCI %p: bad queue size %u", vdev, size);
  315. return -EINVAL;
  316. }
  317. vq = &vqs[i];
  318. vq->queue_index = i;
  319. /* get offset of notification word for this vq */
  320. off = vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(queue_notify_off));
  321. vq->vring.num = size;
  322. vring_init(&vq->vring, size, (unsigned char *)vq->queue);
  323. /* activate the queue */
  324. vpm_iowrite16(vdev, &vdev->common, size, COMMON_OFFSET(queue_size));
  325. vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.desc),
  326. COMMON_OFFSET(queue_desc_lo),
  327. COMMON_OFFSET(queue_desc_hi));
  328. vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.avail),
  329. COMMON_OFFSET(queue_avail_lo),
  330. COMMON_OFFSET(queue_avail_hi));
  331. vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.used),
  332. COMMON_OFFSET(queue_used_lo),
  333. COMMON_OFFSET(queue_used_hi));
  334. err = virtio_pci_map_capability(vdev->pci,
  335. vdev->notify_cap_pos, 2, 2,
  336. off * notify_offset_multiplier, 2,
  337. &vq->notification);
  338. if (err) {
  339. goto err_map_notify;
  340. }
  341. }
  342. /* Select and activate all queues. Has to be done last: once we do
  343. * this, there's no way to go back except reset.
  344. */
  345. for (i = 0; i < nvqs; i++) {
  346. vq = &vqs[i];
  347. vpm_iowrite16(vdev, &vdev->common, (u16)vq->queue_index,
  348. COMMON_OFFSET(queue_select));
  349. vpm_iowrite16(vdev, &vdev->common, 1, COMMON_OFFSET(queue_enable));
  350. }
  351. return 0;
  352. err_map_notify:
  353. /* Undo the virtio_pci_map_capability calls. */
  354. while (i-- > 0) {
  355. virtio_pci_unmap_capability(&vqs[i].notification);
  356. }
  357. return err;
  358. }