Parcourir la source

[virtio] Add virtio 1.0 PCI support

This commit adds support for driving virtio 1.0 PCI devices.  In
addition to various helpers, a number of vpm_ functions are introduced
to be used instead of their legacy vp_ counterparts when accessing
virtio 1.0 (aka modern) devices.

Signed-off-by: Ladi Prosek <lprosek@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Modified-by: Michael Brown <mcb30@ipxe.org>
Signed-off-by: Michael Brown <mcb30@ipxe.org>
tags/v1.20.1
Ladi Prosek il y a 8 ans
Parent
révision
8a055a2a70

+ 349
- 4
src/drivers/bus/virtio-pci.c Voir le fichier

@@ -11,10 +11,15 @@
11 11
  *
12 12
  */
13 13
 
14
+#include "errno.h"
15
+#include "byteswap.h"
14 16
 #include "etherboot.h"
15 17
 #include "ipxe/io.h"
16
-#include "ipxe/virtio-ring.h"
18
+#include "ipxe/iomap.h"
19
+#include "ipxe/pci.h"
20
+#include "ipxe/reboot.h"
17 21
 #include "ipxe/virtio-pci.h"
22
+#include "ipxe/virtio-ring.h"
18 23
 
19 24
 int vp_find_vq(unsigned int ioaddr, int queue_index,
20 25
                struct vring_virtqueue *vq)
@@ -30,19 +35,19 @@ int vp_find_vq(unsigned int ioaddr, int queue_index,
30 35
 
31 36
    num = inw(ioaddr + VIRTIO_PCI_QUEUE_NUM);
32 37
    if (!num) {
33
-           printf("ERROR: queue size is 0\n");
38
+           DBG("VIRTIO-PCI ERROR: queue size is 0\n");
34 39
            return -1;
35 40
    }
36 41
 
37 42
    if (num > MAX_QUEUE_NUM) {
38
-           printf("ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM);
43
+           DBG("VIRTIO-PCI ERROR: queue size %d > %d\n", num, MAX_QUEUE_NUM);
39 44
            return -1;
40 45
    }
41 46
 
42 47
    /* check if the queue is already active */
43 48
 
44 49
    if (inl(ioaddr + VIRTIO_PCI_QUEUE_PFN)) {
45
-           printf("ERROR: queue already active\n");
50
+           DBG("VIRTIO-PCI ERROR: queue already active\n");
46 51
            return -1;
47 52
    }
48 53
 
@@ -62,3 +67,343 @@ int vp_find_vq(unsigned int ioaddr, int queue_index,
62 67
 
63 68
    return num;
64 69
 }
70
+
71
+#define CFG_POS(vdev, field) \
72
+    (vdev->cfg_cap_pos + offsetof(struct virtio_pci_cfg_cap, field))
73
+
74
+static void prep_pci_cfg_cap(struct virtio_pci_modern_device *vdev,
75
+                             struct virtio_pci_region *region,
76
+                             size_t offset, u32 length)
77
+{
78
+    pci_write_config_byte(vdev->pci, CFG_POS(vdev, cap.bar), region->bar);
79
+    pci_write_config_dword(vdev->pci, CFG_POS(vdev, cap.length), length);
80
+    pci_write_config_dword(vdev->pci, CFG_POS(vdev, cap.offset),
81
+        (intptr_t)(region->base + offset));
82
+}
83
+
84
+void vpm_iowrite8(struct virtio_pci_modern_device *vdev,
85
+                  struct virtio_pci_region *region, u8 data, size_t offset)
86
+{
87
+    switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
88
+    case VIRTIO_PCI_REGION_MEMORY:
89
+        writeb(data, region->base + offset);
90
+        break;
91
+    case VIRTIO_PCI_REGION_PORT:
92
+        outb(data, region->base + offset);
93
+        break;
94
+    case VIRTIO_PCI_REGION_PCI_CONFIG:
95
+        prep_pci_cfg_cap(vdev, region, offset, 1);
96
+        pci_write_config_byte(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
97
+        break;
98
+    default:
99
+        assert(0);
100
+        break;
101
+    }
102
+}
103
+
104
+void vpm_iowrite16(struct virtio_pci_modern_device *vdev,
105
+                   struct virtio_pci_region *region, u16 data, size_t offset)
106
+{
107
+    data = cpu_to_le16(data);
108
+    switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
109
+    case VIRTIO_PCI_REGION_MEMORY:
110
+        writew(data, region->base + offset);
111
+        break;
112
+    case VIRTIO_PCI_REGION_PORT:
113
+        outw(data, region->base + offset);
114
+        break;
115
+    case VIRTIO_PCI_REGION_PCI_CONFIG:
116
+        prep_pci_cfg_cap(vdev, region, offset, 2);
117
+        pci_write_config_word(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
118
+        break;
119
+    default:
120
+        assert(0);
121
+        break;
122
+    }
123
+}
124
+
125
+void vpm_iowrite32(struct virtio_pci_modern_device *vdev,
126
+                   struct virtio_pci_region *region, u32 data, size_t offset)
127
+{
128
+    data = cpu_to_le32(data);
129
+    switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
130
+    case VIRTIO_PCI_REGION_MEMORY:
131
+        writel(data, region->base + offset);
132
+        break;
133
+    case VIRTIO_PCI_REGION_PORT:
134
+        outl(data, region->base + offset);
135
+        break;
136
+    case VIRTIO_PCI_REGION_PCI_CONFIG:
137
+        prep_pci_cfg_cap(vdev, region, offset, 4);
138
+        pci_write_config_dword(vdev->pci, CFG_POS(vdev, pci_cfg_data), data);
139
+        break;
140
+    default:
141
+        assert(0);
142
+        break;
143
+    }
144
+}
145
+
146
+u8 vpm_ioread8(struct virtio_pci_modern_device *vdev,
147
+               struct virtio_pci_region *region, size_t offset)
148
+{
149
+    uint8_t data;
150
+    switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
151
+    case VIRTIO_PCI_REGION_MEMORY:
152
+        data = readb(region->base + offset);
153
+        break;
154
+    case VIRTIO_PCI_REGION_PORT:
155
+        data = inb(region->base + offset);
156
+        break;
157
+    case VIRTIO_PCI_REGION_PCI_CONFIG:
158
+        prep_pci_cfg_cap(vdev, region, offset, 1);
159
+        pci_read_config_byte(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
160
+        break;
161
+    default:
162
+        assert(0);
163
+        data = 0;
164
+        break;
165
+    }
166
+    return data;
167
+}
168
+
169
+u16 vpm_ioread16(struct virtio_pci_modern_device *vdev,
170
+                 struct virtio_pci_region *region, size_t offset)
171
+{
172
+    uint16_t data;
173
+    switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
174
+    case VIRTIO_PCI_REGION_MEMORY:
175
+        data = readw(region->base + offset);
176
+        break;
177
+    case VIRTIO_PCI_REGION_PORT:
178
+        data = inw(region->base + offset);
179
+        break;
180
+    case VIRTIO_PCI_REGION_PCI_CONFIG:
181
+        prep_pci_cfg_cap(vdev, region, offset, 2);
182
+        pci_read_config_word(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
183
+        break;
184
+    default:
185
+        assert(0);
186
+        data = 0;
187
+        break;
188
+    }
189
+    return le16_to_cpu(data);
190
+}
191
+
192
+u32 vpm_ioread32(struct virtio_pci_modern_device *vdev,
193
+                 struct virtio_pci_region *region, size_t offset)
194
+{
195
+    uint32_t data;
196
+    switch (region->flags & VIRTIO_PCI_REGION_TYPE_MASK) {
197
+    case VIRTIO_PCI_REGION_MEMORY:
198
+        data = readw(region->base + offset);
199
+        break;
200
+    case VIRTIO_PCI_REGION_PORT:
201
+        data = inw(region->base + offset);
202
+        break;
203
+    case VIRTIO_PCI_REGION_PCI_CONFIG:
204
+        prep_pci_cfg_cap(vdev, region, offset, 4);
205
+        pci_read_config_dword(vdev->pci, CFG_POS(vdev, pci_cfg_data), &data);
206
+        break;
207
+    default:
208
+        assert(0);
209
+        data = 0;
210
+        break;
211
+    }
212
+    return le32_to_cpu(data);
213
+}
214
+
215
+int virtio_pci_find_capability(struct pci_device *pci, uint8_t cfg_type)
216
+{
217
+    int pos;
218
+    uint8_t type, bar;
219
+
220
+    for (pos = pci_find_capability(pci, PCI_CAP_ID_VNDR);
221
+         pos > 0;
222
+         pos = pci_find_next_capability(pci, pos, PCI_CAP_ID_VNDR)) {
223
+
224
+        pci_read_config_byte(pci, pos + offsetof(struct virtio_pci_cap,
225
+            cfg_type), &type);
226
+        pci_read_config_byte(pci, pos + offsetof(struct virtio_pci_cap,
227
+            bar), &bar);
228
+
229
+        /* Ignore structures with reserved BAR values */
230
+        if (bar > 0x5) {
231
+            continue;
232
+        }
233
+
234
+        if (type == cfg_type) {
235
+            return pos;
236
+        }
237
+    }
238
+    return 0;
239
+}
240
+
241
+int virtio_pci_map_capability(struct pci_device *pci, int cap, size_t minlen,
242
+                              u32 align, u32 start, u32 size,
243
+                              struct virtio_pci_region *region)
244
+{
245
+    u8 bar;
246
+    u32 offset, length, base_raw;
247
+    unsigned long base;
248
+
249
+    pci_read_config_byte(pci, cap + offsetof(struct virtio_pci_cap, bar), &bar);
250
+    pci_read_config_dword(pci, cap + offsetof(struct virtio_pci_cap, offset),
251
+                          &offset);
252
+    pci_read_config_dword(pci, cap + offsetof(struct virtio_pci_cap, length),
253
+                          &length);
254
+
255
+    if (length <= start) {
256
+        DBG("VIRTIO-PCI bad capability len %u (>%u expected)\n", length, start);
257
+        return -EINVAL;
258
+    }
259
+    if (length - start < minlen) {
260
+        DBG("VIRTIO-PCI bad capability len %u (>=%zu expected)\n", length, minlen);
261
+        return -EINVAL;
262
+    }
263
+    length -= start;
264
+    if (start + offset < offset) {
265
+        DBG("VIRTIO-PCI map wrap-around %u+%u\n", start, offset);
266
+        return -EINVAL;
267
+    }
268
+    offset += start;
269
+    if (offset & (align - 1)) {
270
+        DBG("VIRTIO-PCI offset %u not aligned to %u\n", offset, align);
271
+        return -EINVAL;
272
+    }
273
+    if (length > size) {
274
+        length = size;
275
+    }
276
+
277
+    if (minlen + offset < minlen ||
278
+        minlen + offset > pci_bar_size(pci, PCI_BASE_ADDRESS(bar))) {
279
+        DBG("VIRTIO-PCI map virtio %zu@%u out of range on bar %i length %lu\n",
280
+            minlen, offset,
281
+            bar, (unsigned long)pci_bar_size(pci, PCI_BASE_ADDRESS(bar)));
282
+        return -EINVAL;
283
+    }
284
+
285
+    region->base = NULL;
286
+    region->length = length;
287
+    region->bar = bar;
288
+
289
+    base = pci_bar_start(pci, PCI_BASE_ADDRESS(bar));
290
+    if (base) {
291
+        pci_read_config_dword(pci, PCI_BASE_ADDRESS(bar), &base_raw);
292
+
293
+        if (base_raw & PCI_BASE_ADDRESS_SPACE_IO) {
294
+            /* Region accessed using port I/O */
295
+            region->base = (void *)(base + offset);
296
+            region->flags = VIRTIO_PCI_REGION_PORT;
297
+        } else {
298
+            /* Region mapped into memory space */
299
+            region->base = ioremap(base + offset, length);
300
+            region->flags = VIRTIO_PCI_REGION_MEMORY;
301
+        }
302
+    }
303
+    if (!region->base) {
304
+        /* Region accessed via PCI config space window */
305
+	    region->base = (void *)(intptr_t)offset;
306
+        region->flags = VIRTIO_PCI_REGION_PCI_CONFIG;
307
+    }
308
+    return 0;
309
+}
310
+
311
+void virtio_pci_unmap_capability(struct virtio_pci_region *region)
312
+{
313
+    unsigned region_type = region->flags & VIRTIO_PCI_REGION_TYPE_MASK;
314
+    if (region_type == VIRTIO_PCI_REGION_MEMORY) {
315
+        iounmap(region->base);
316
+    }
317
+}
318
+
319
+void vpm_notify(struct virtio_pci_modern_device *vdev,
320
+                struct vring_virtqueue *vq)
321
+{
322
+    vpm_iowrite16(vdev, &vq->notification, (u16)vq->queue_index, 0);
323
+}
324
+
325
+int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
326
+                 unsigned nvqs, struct vring_virtqueue *vqs)
327
+{
328
+    unsigned i;
329
+    struct vring_virtqueue *vq;
330
+    u16 size, off;
331
+    u32 notify_offset_multiplier;
332
+    int err;
333
+
334
+    if (nvqs > vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(num_queues))) {
335
+        return -ENOENT;
336
+    }
337
+
338
+    /* Read notify_off_multiplier from config space. */
339
+    pci_read_config_dword(vdev->pci,
340
+        vdev->notify_cap_pos + offsetof(struct virtio_pci_notify_cap,
341
+        notify_off_multiplier),
342
+        &notify_offset_multiplier);
343
+
344
+    for (i = 0; i < nvqs; i++) {
345
+        /* Select the queue we're interested in */
346
+        vpm_iowrite16(vdev, &vdev->common, (u16)i, COMMON_OFFSET(queue_select));
347
+
348
+        /* Check if queue is either not available or already active. */
349
+        size = vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(queue_size));
350
+        /* QEMU has a bug where queues don't revert to inactive on device
351
+         * reset. Skip checking the queue_enable field until it is fixed.
352
+         */
353
+        if (!size /*|| vpm_ioread16(vdev, &vdev->common.queue_enable)*/)
354
+            return -ENOENT;
355
+
356
+        if (size & (size - 1)) {
357
+            DBG("VIRTIO-PCI %p: bad queue size %u", vdev, size);
358
+            return -EINVAL;
359
+        }
360
+
361
+        vq = &vqs[i];
362
+        vq->queue_index = i;
363
+
364
+        /* get offset of notification word for this vq */
365
+        off = vpm_ioread16(vdev, &vdev->common, COMMON_OFFSET(queue_notify_off));
366
+        vq->vring.num = size;
367
+
368
+        vring_init(&vq->vring, size, (unsigned char *)vq->queue);
369
+
370
+        /* activate the queue */
371
+        vpm_iowrite16(vdev, &vdev->common, size, COMMON_OFFSET(queue_size));
372
+
373
+        vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.desc),
374
+                      COMMON_OFFSET(queue_desc_lo),
375
+                      COMMON_OFFSET(queue_desc_hi));
376
+        vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.avail),
377
+                      COMMON_OFFSET(queue_avail_lo),
378
+                      COMMON_OFFSET(queue_avail_hi));
379
+        vpm_iowrite64(vdev, &vdev->common, virt_to_phys(vq->vring.used),
380
+                      COMMON_OFFSET(queue_used_lo),
381
+                      COMMON_OFFSET(queue_used_hi));
382
+
383
+        err = virtio_pci_map_capability(vdev->pci,
384
+            vdev->notify_cap_pos, 2, 2,
385
+            off * notify_offset_multiplier, 2,
386
+            &vq->notification);
387
+        if (err) {
388
+            goto err_map_notify;
389
+        }
390
+    }
391
+
392
+    /* Select and activate all queues. Has to be done last: once we do
393
+     * this, there's no way to go back except reset.
394
+     */
395
+    for (i = 0; i < nvqs; i++) {
396
+        vq = &vqs[i];
397
+        vpm_iowrite16(vdev, &vdev->common, (u16)vq->queue_index,
398
+                      COMMON_OFFSET(queue_select));
399
+        vpm_iowrite16(vdev, &vdev->common, 1, COMMON_OFFSET(queue_enable));
400
+    }
401
+    return 0;
402
+
403
+err_map_notify:
404
+    /* Undo the virtio_pci_map_capability calls. */
405
+    while (i-- > 0) {
406
+        virtio_pci_unmap_capability(&vqs[i].notification);
407
+    }
408
+    return err;
409
+}

+ 12
- 5
src/drivers/bus/virtio-ring.c Voir le fichier

@@ -18,8 +18,8 @@ FILE_LICENCE ( GPL2_OR_LATER );
18 18
 
19 19
 #include "etherboot.h"
20 20
 #include "ipxe/io.h"
21
-#include "ipxe/virtio-ring.h"
22 21
 #include "ipxe/virtio-pci.h"
22
+#include "ipxe/virtio-ring.h"
23 23
 
24 24
 #define BUG() do { \
25 25
    printf("BUG: failure at %s:%d/%s()!\n", \
@@ -122,7 +122,8 @@ void vring_add_buf(struct vring_virtqueue *vq,
122 122
    wmb();
123 123
 }
124 124
 
125
-void vring_kick(unsigned int ioaddr, struct vring_virtqueue *vq, int num_added)
125
+void vring_kick(struct virtio_pci_modern_device *vdev, unsigned int ioaddr,
126
+                struct vring_virtqueue *vq, int num_added)
126 127
 {
127 128
    struct vring *vr = &vq->vring;
128 129
 
@@ -130,7 +131,13 @@ void vring_kick(unsigned int ioaddr, struct vring_virtqueue *vq, int num_added)
130 131
    vr->avail->idx += num_added;
131 132
 
132 133
    mb();
133
-   if (!(vr->used->flags & VRING_USED_F_NO_NOTIFY))
134
-           vp_notify(ioaddr, vq->queue_index);
134
+   if (!(vr->used->flags & VRING_USED_F_NO_NOTIFY)) {
135
+           if (vdev) {
136
+                   /* virtio 1.0 */
137
+                   vpm_notify(vdev, vq);
138
+           } else {
139
+                   /* legacy virtio */
140
+                   vp_notify(ioaddr, vq->queue_index);
141
+           }
142
+   }
135 143
 }
136
-

+ 3
- 2
src/drivers/net/virtio-net.c Voir le fichier

@@ -24,14 +24,15 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
24 24
 
25 25
 #include <errno.h>
26 26
 #include <stdlib.h>
27
+#include <unistd.h>
27 28
 #include <ipxe/list.h>
28 29
 #include <ipxe/iobuf.h>
29 30
 #include <ipxe/netdevice.h>
30 31
 #include <ipxe/pci.h>
31 32
 #include <ipxe/if_ether.h>
32 33
 #include <ipxe/ethernet.h>
33
-#include <ipxe/virtio-ring.h>
34 34
 #include <ipxe/virtio-pci.h>
35
+#include <ipxe/virtio-ring.h>
35 36
 #include "virtio-net.h"
36 37
 
37 38
 /*
@@ -135,7 +136,7 @@ static void virtnet_enqueue_iob ( struct net_device *netdev,
135 136
 		virtnet, iobuf, vq_idx );
136 137
 
137 138
 	vring_add_buf ( vq, list, out, in, iobuf, 0 );
138
-	vring_kick ( virtnet->ioaddr, vq, 1 );
139
+	vring_kick ( NULL, virtnet->ioaddr, vq, 1 );
139 140
 }
140 141
 
141 142
 /** Try to keep rx virtqueue filled with iobufs

+ 1
- 0
src/include/ipxe/errfile.h Voir le fichier

@@ -188,6 +188,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
188 188
 #define ERRFILE_eoib		     ( ERRFILE_DRIVER | 0x007c0000 )
189 189
 #define ERRFILE_golan		     ( ERRFILE_DRIVER | 0x007d0000 )
190 190
 #define ERRFILE_flexboot_nodnic	     ( ERRFILE_DRIVER | 0x007e0000 )
191
+#define ERRFILE_virtio_pci	     ( ERRFILE_DRIVER | 0x007f0000 )
191 192
 
192 193
 #define ERRFILE_aoe			( ERRFILE_NET | 0x00000000 )
193 194
 #define ERRFILE_arp			( ERRFILE_NET | 0x00010000 )

+ 147
- 0
src/include/ipxe/virtio-pci.h Voir le fichier

@@ -97,6 +97,44 @@ struct virtio_pci_common_cfg {
97 97
     __le32 queue_used_hi;         /* read-write */
98 98
 };
99 99
 
100
+/* Virtio 1.0 PCI region descriptor. We support memory mapped I/O, port I/O,
101
+ * and PCI config space access via the cfg PCI capability as a fallback. */
102
+struct virtio_pci_region {
103
+    void *base;
104
+    size_t length;
105
+    u8 bar;
106
+
107
+/* How to interpret the base field */
108
+#define VIRTIO_PCI_REGION_TYPE_MASK  0x00000003
109
+/* The base field is a memory address */
110
+#define VIRTIO_PCI_REGION_MEMORY     0x00000000
111
+/* The base field is a port address */
112
+#define VIRTIO_PCI_REGION_PORT       0x00000001
113
+/* The base field is an offset within the PCI bar */
114
+#define VIRTIO_PCI_REGION_PCI_CONFIG 0x00000002
115
+    unsigned flags;
116
+};
117
+
118
+/* Virtio 1.0 device state */
119
+struct virtio_pci_modern_device {
120
+    struct pci_device *pci;
121
+
122
+    /* VIRTIO_PCI_CAP_PCI_CFG position */
123
+    int cfg_cap_pos;
124
+
125
+    /* VIRTIO_PCI_CAP_COMMON_CFG data */
126
+    struct virtio_pci_region common;
127
+
128
+    /* VIRTIO_PCI_CAP_DEVICE_CFG data */
129
+    struct virtio_pci_region device;
130
+
131
+    /* VIRTIO_PCI_CAP_ISR_CFG data */
132
+    struct virtio_pci_region isr;
133
+
134
+    /* VIRTIO_PCI_CAP_NOTIFY_CFG data */
135
+    int notify_cap_pos;
136
+};
137
+
100 138
 static inline u32 vp_get_features(unsigned int ioaddr)
101 139
 {
102 140
    return inl(ioaddr + VIRTIO_PCI_HOST_FEATURES);
@@ -156,6 +194,115 @@ static inline void vp_del_vq(unsigned int ioaddr, int queue_index)
156 194
    outl(0, ioaddr + VIRTIO_PCI_QUEUE_PFN);
157 195
 }
158 196
 
197
+struct vring_virtqueue;
198
+
159 199
 int vp_find_vq(unsigned int ioaddr, int queue_index,
160 200
                struct vring_virtqueue *vq);
201
+
202
+/* Virtio 1.0 I/O routines abstract away the three possible HW access
203
+ * mechanisms - memory, port I/O, and PCI cfg space access. Also built-in
204
+ * are endianness conversions - to LE on write and from LE on read. */
205
+
206
+void vpm_iowrite8(struct virtio_pci_modern_device *vdev,
207
+                  struct virtio_pci_region *region, u8 data, size_t offset);
208
+
209
+void vpm_iowrite16(struct virtio_pci_modern_device *vdev,
210
+                   struct virtio_pci_region *region, u16 data, size_t offset);
211
+
212
+void vpm_iowrite32(struct virtio_pci_modern_device *vdev,
213
+                   struct virtio_pci_region *region, u32 data, size_t offset);
214
+
215
+static inline void vpm_iowrite64(struct virtio_pci_modern_device *vdev,
216
+                                 struct virtio_pci_region *region,
217
+                                 u64 data, size_t offset_lo, size_t offset_hi)
218
+{
219
+    vpm_iowrite32(vdev, region, (u32)data, offset_lo);
220
+    vpm_iowrite32(vdev, region, data >> 32, offset_hi);
221
+}
222
+
223
+u8 vpm_ioread8(struct virtio_pci_modern_device *vdev,
224
+               struct virtio_pci_region *region, size_t offset);
225
+
226
+u16 vpm_ioread16(struct virtio_pci_modern_device *vdev,
227
+                 struct virtio_pci_region *region, size_t offset);
228
+
229
+u32 vpm_ioread32(struct virtio_pci_modern_device *vdev,
230
+                 struct virtio_pci_region *region, size_t offset);
231
+
232
+/* Virtio 1.0 device manipulation routines */
233
+
234
+#define COMMON_OFFSET(field) offsetof(struct virtio_pci_common_cfg, field)
235
+
236
+static inline void vpm_reset(struct virtio_pci_modern_device *vdev)
237
+{
238
+    vpm_iowrite8(vdev, &vdev->common, 0, COMMON_OFFSET(device_status));
239
+    while (vpm_ioread8(vdev, &vdev->common, COMMON_OFFSET(device_status)))
240
+        mdelay(1);
241
+}
242
+
243
+static inline u8 vpm_get_status(struct virtio_pci_modern_device *vdev)
244
+{
245
+    return vpm_ioread8(vdev, &vdev->common, COMMON_OFFSET(device_status));
246
+}
247
+
248
+static inline void vpm_add_status(struct virtio_pci_modern_device *vdev,
249
+                                  u8 status)
250
+{
251
+    u8 curr_status = vpm_ioread8(vdev, &vdev->common, COMMON_OFFSET(device_status));
252
+    vpm_iowrite8(vdev, &vdev->common,
253
+                 curr_status | status, COMMON_OFFSET(device_status));
254
+}
255
+
256
+static inline u64 vpm_get_features(struct virtio_pci_modern_device *vdev)
257
+{
258
+    u32 features_lo, features_hi;
259
+
260
+    vpm_iowrite32(vdev, &vdev->common, 0, COMMON_OFFSET(device_feature_select));
261
+    features_lo = vpm_ioread32(vdev, &vdev->common, COMMON_OFFSET(device_feature));
262
+    vpm_iowrite32(vdev, &vdev->common, 1, COMMON_OFFSET(device_feature_select));
263
+    features_hi = vpm_ioread32(vdev, &vdev->common, COMMON_OFFSET(device_feature));
264
+
265
+    return ((u64)features_hi << 32) | features_lo;
266
+}
267
+
268
+static inline void vpm_set_features(struct virtio_pci_modern_device *vdev,
269
+                                    u64 features)
270
+{
271
+    u32 features_lo = (u32)features;
272
+    u32 features_hi = features >> 32;
273
+
274
+    vpm_iowrite32(vdev, &vdev->common, 0, COMMON_OFFSET(guest_feature_select));
275
+    vpm_iowrite32(vdev, &vdev->common, features_lo, COMMON_OFFSET(guest_feature));
276
+    vpm_iowrite32(vdev, &vdev->common, 1, COMMON_OFFSET(guest_feature_select));
277
+    vpm_iowrite32(vdev, &vdev->common, features_hi, COMMON_OFFSET(guest_feature));
278
+}
279
+
280
+static inline void vpm_get(struct virtio_pci_modern_device *vdev,
281
+                           unsigned offset, void *buf, unsigned len)
282
+{
283
+    u8 *ptr = buf;
284
+    unsigned i;
285
+
286
+    for (i = 0; i < len; i++)
287
+        ptr[i] = vpm_ioread8(vdev, &vdev->device, offset + i);
288
+}
289
+
290
+static inline u8 vpm_get_isr(struct virtio_pci_modern_device *vdev)
291
+{
292
+    return vpm_ioread8(vdev, &vdev->isr, 0);
293
+}
294
+
295
+void vpm_notify(struct virtio_pci_modern_device *vdev,
296
+                struct vring_virtqueue *vq);
297
+
298
+int vpm_find_vqs(struct virtio_pci_modern_device *vdev,
299
+                 unsigned nvqs, struct vring_virtqueue *vqs);
300
+
301
+int virtio_pci_find_capability(struct pci_device *pci, uint8_t cfg_type);
302
+
303
+int virtio_pci_map_capability(struct pci_device *pci, int cap, size_t minlen,
304
+                              u32 align, u32 start, u32 size,
305
+                              struct virtio_pci_region *region);
306
+
307
+void virtio_pci_unmap_capability(struct virtio_pci_region *region);
161 308
 #endif /* _VIRTIO_PCI_H_ */

+ 5
- 1
src/include/ipxe/virtio-ring.h Voir le fichier

@@ -1,6 +1,8 @@
1 1
 #ifndef _VIRTIO_RING_H_
2 2
 # define _VIRTIO_RING_H_
3 3
 
4
+#include <ipxe/virtio-pci.h>
5
+
4 6
 /* Status byte for guest to report progress, and synchronize features. */
5 7
 /* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
6 8
 #define VIRTIO_CONFIG_S_ACKNOWLEDGE     1
@@ -79,6 +81,7 @@ struct vring_virtqueue {
79 81
    void *vdata[MAX_QUEUE_NUM];
80 82
    /* PCI */
81 83
    int queue_index;
84
+   struct virtio_pci_region notification;
82 85
 };
83 86
 
84 87
 struct vring_list {
@@ -142,6 +145,7 @@ void *vring_get_buf(struct vring_virtqueue *vq, unsigned int *len);
142 145
 void vring_add_buf(struct vring_virtqueue *vq, struct vring_list list[],
143 146
                    unsigned int out, unsigned int in,
144 147
                    void *index, int num_added);
145
-void vring_kick(unsigned int ioaddr, struct vring_virtqueue *vq, int num_added);
148
+void vring_kick(struct virtio_pci_modern_device *vdev, unsigned int ioaddr,
149
+                struct vring_virtqueue *vq, int num_added);
146 150
 
147 151
 #endif /* _VIRTIO_RING_H_ */

Chargement…
Annuler
Enregistrer