[SLOF] [PATCH v1 23/27] virtio-net: simplify and modularize driver
Nikunj A Dadhania
nikunj at linux.vnet.ibm.com
Wed Jan 13 22:17:10 AEDT 2016
Signed-off-by: Nikunj A Dadhania <nikunj at linux.vnet.ibm.com>
---
lib/libvirtio/virtio-net.c | 137 ++++++++++++++++++---------------------------
1 file changed, 54 insertions(+), 83 deletions(-)
diff --git a/lib/libvirtio/virtio-net.c b/lib/libvirtio/virtio-net.c
index 99c19d9..18ad2f7 100644
--- a/lib/libvirtio/virtio-net.c
+++ b/lib/libvirtio/virtio-net.c
@@ -21,6 +21,7 @@
#include <stdint.h>
#include <stdio.h>
#include <string.h>
+#include <stdbool.h>
#include <helpers.h>
#include <cache.h>
#include <byteorder.h>
@@ -37,20 +38,9 @@
#define sync() asm volatile (" sync \n" ::: "memory")
-/* PCI virtio header offsets */
-#define VIRTIOHDR_DEVICE_FEATURES 0
-#define VIRTIOHDR_GUEST_FEATURES 4
-#define VIRTIOHDR_QUEUE_ADDRESS 8
-#define VIRTIOHDR_QUEUE_SIZE 12
-#define VIRTIOHDR_QUEUE_SELECT 14
-#define VIRTIOHDR_QUEUE_NOTIFY 16
-#define VIRTIOHDR_DEVICE_STATUS 18
-#define VIRTIOHDR_ISR_STATUS 19
-#define VIRTIOHDR_DEVICE_CONFIG 20
-#define VIRTIOHDR_MAC_ADDRESS 20
-
struct virtio_device virtiodev;
-struct vqs vq[2]; /* Information about virtqueues */
+struct vqs vq_rx; /* Information about virtqueues */
+struct vqs vq_tx;
/* See Virtio Spec, appendix C, "Device Operation" */
struct virtio_net_hdr {
@@ -72,8 +62,6 @@ static uint16_t last_rx_idx; /* Last index in RX "used" ring */
*/
static int virtionet_init_pci(struct virtio_device *dev)
{
- int i;
-
dprintf("virtionet: doing virtionet_init_pci!\n");
if (!dev)
@@ -90,30 +78,9 @@ static int virtionet_init_pci(struct virtio_device *dev)
* second the transmit queue, and the forth is the control queue for
* networking options.
* We are only interested in the receive and transmit queue here. */
-
- for (i=VQ_RX; i<=VQ_TX; i++) {
- /* Select ring (0=RX, 1=TX): */
- vq[i].id = i-VQ_RX;
- ci_write_16(virtiodev.base+VIRTIOHDR_QUEUE_SELECT,
- cpu_to_le16(vq[i].id));
-
- vq[i].size = le16_to_cpu(ci_read_16(virtiodev.base+VIRTIOHDR_QUEUE_SIZE));
- vq[i].desc = SLOF_alloc_mem_aligned(virtio_vring_size(vq[i].size), 4096);
- if (!vq[i].desc) {
- printf("memory allocation failed!\n");
- return -1;
- }
- memset(vq[i].desc, 0, virtio_vring_size(vq[i].size));
- ci_write_32(virtiodev.base+VIRTIOHDR_QUEUE_ADDRESS,
- cpu_to_le32((long)vq[i].desc / 4096));
- vq[i].avail = (void*)vq[i].desc
- + vq[i].size * sizeof(struct vring_desc);
- vq[i].used = (void*)VQ_ALIGN((long)vq[i].avail
- + vq[i].size * sizeof(struct vring_avail));
-
- dprintf("%i: vq.id = %llx\nvq.size =%x\n vq.avail =%p\nvq.used=%p\n",
- i, vq[i].id, vq[i].size, vq[i].avail, vq[i].used);
- }
+ if (!virtio_queue_init_vq(dev, &vq_rx, VQ_RX) ||
+ !virtio_queue_init_vq(dev, &vq_tx, VQ_TX))
+ return -1;
/* Acknowledge device. */
virtio_set_status(&virtiodev, VIRTIO_STAT_ACKNOWLEDGE);
@@ -121,6 +88,23 @@ static int virtionet_init_pci(struct virtio_device *dev)
return 0;
}
+static void fill_desc(struct vring_desc *desc, uint32_t is_modern,
+ uint64_t addr, uint32_t len,
+ uint16_t flags, uint16_t next)
+{
+ if (is_modern) {
+ desc->addr = cpu_to_le64(addr);
+ desc->len = cpu_to_le32(len);
+ desc->flags = cpu_to_le16(flags);
+ desc->next = cpu_to_le16(next);
+ } else {
+ desc->addr = addr;
+ desc->len = len;
+ desc->flags = flags;
+ desc->next = next;
+ }
+}
+
/**
* Initialize the virtio-net device.
* See the Virtio Spec, chapter 2.2.1 and Appendix C "Device Initialization"
@@ -145,9 +129,9 @@ static int virtionet_init(net_driver_t *driver)
virtio_set_guest_features(&virtiodev, 0);
/* Allocate memory for one transmit an multiple receive buffers */
- vq[VQ_RX].buf_mem = SLOF_alloc_mem((BUFFER_ENTRY_SIZE+sizeof(struct virtio_net_hdr))
+ vq_rx.buf_mem = SLOF_alloc_mem((BUFFER_ENTRY_SIZE+sizeof(struct virtio_net_hdr))
* RX_QUEUE_SIZE);
- if (!vq[VQ_RX].buf_mem) {
+ if (!vq_rx.buf_mem) {
printf("virtionet: Failed to allocate buffers!\n");
virtio_set_status(&virtiodev, VIRTIO_STAT_FAILED);
return -1;
@@ -155,32 +139,27 @@ static int virtionet_init(net_driver_t *driver)
/* Prepare receive buffer queue */
for (i = 0; i < RX_QUEUE_SIZE; i++) {
- struct vring_desc *desc;
+ uint64_t addr = (uint64_t)vq_rx.buf_mem
+ + i * (BUFFER_ENTRY_SIZE+sizeof(struct virtio_net_hdr));
+ uint32_t id = i*2;
/* Descriptor for net_hdr: */
- desc = &vq[VQ_RX].desc[i*2];
- desc->addr = (uint64_t)vq[VQ_RX].buf_mem
- + i * (BUFFER_ENTRY_SIZE+sizeof(struct virtio_net_hdr));
- desc->len = sizeof(struct virtio_net_hdr);
- desc->flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE;
- desc->next = i*2+1;
+ fill_desc(&vq_rx.desc[id], 0, addr, sizeof(struct virtio_net_hdr),
+ VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, id + 1);
/* Descriptor for data: */
- desc = &vq[VQ_RX].desc[i*2+1];
- desc->addr = vq[VQ_RX].desc[i*2].addr + sizeof(struct virtio_net_hdr);
- desc->len = BUFFER_ENTRY_SIZE;
- desc->flags = VRING_DESC_F_WRITE;
- desc->next = 0;
+ fill_desc(&vq_rx.desc[id+1], 0, addr + sizeof(struct virtio_net_hdr),
+ BUFFER_ENTRY_SIZE, VRING_DESC_F_WRITE, 0);
- vq[VQ_RX].avail->ring[i] = i*2;
+ vq_rx.avail->ring[i] = id;
}
sync();
- vq[VQ_RX].avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
- vq[VQ_RX].avail->idx = RX_QUEUE_SIZE;
+ vq_rx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
+ vq_rx.avail->idx = RX_QUEUE_SIZE;
- last_rx_idx = vq[VQ_RX].used->idx;
+ last_rx_idx = vq_rx.used->idx;
- vq[VQ_TX].avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
- vq[VQ_TX].avail->idx = 0;
+ vq_tx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
+ vq_tx.avail->idx = 0;
/* Tell HV that setup succeeded */
virtio_set_status(&virtiodev, VIRTIO_STAT_ACKNOWLEDGE
@@ -225,7 +204,6 @@ static int virtionet_term(net_driver_t *driver)
*/
static int virtionet_xmit(char *buf, int len)
{
- struct vring_desc *desc;
int id;
static struct virtio_net_hdr nethdr;
@@ -239,25 +217,18 @@ static int virtionet_xmit(char *buf, int len)
memset(&nethdr, 0, sizeof(nethdr));
/* Determine descriptor index */
- id = (vq[VQ_TX].avail->idx * 2) % vq[VQ_TX].size;
+ id = (vq_tx.avail->idx * 2) % vq_tx.size;
/* Set up virtqueue descriptor for header */
- desc = &vq[VQ_TX].desc[id];
- desc->addr = (uint64_t)&nethdr;
- desc->len = sizeof(struct virtio_net_hdr);
- desc->flags = VRING_DESC_F_NEXT;
- desc->next = id + 1;
+ fill_desc(&vq_tx.desc[id], 0, (uint64_t)&nethdr,
+ sizeof(struct virtio_net_hdr), VRING_DESC_F_NEXT, id + 1);
/* Set up virtqueue descriptor for data */
- desc = &vq[VQ_TX].desc[id+1];
- desc->addr = (uint64_t)buf;
- desc->len = len;
- desc->flags = 0;
- desc->next = 0;
+ fill_desc(&vq_tx.desc[id+1], 0, (uint64_t)buf, len, 0, 0);
- vq[VQ_TX].avail->ring[vq[VQ_TX].avail->idx % vq[VQ_TX].size] = id;
+ vq_tx.avail->ring[vq_tx.avail->idx % vq_tx.size] = id;
sync();
- vq[VQ_TX].avail->idx += 1;
+ vq_tx.avail->idx += 1;
sync();
/* Tell HV that TX queue is ready */
@@ -275,18 +246,18 @@ static int virtionet_receive(char *buf, int maxlen)
int len = 0;
int id;
- if (last_rx_idx == vq[VQ_RX].used->idx) {
+ if (last_rx_idx == vq_rx.used->idx) {
/* Nothing received yet */
return 0;
}
- id = (vq[VQ_RX].used->ring[last_rx_idx % vq[VQ_RX].size].id + 1)
- % vq[VQ_RX].size;
- len = vq[VQ_RX].used->ring[last_rx_idx % vq[VQ_RX].size].len
+ id = (vq_rx.used->ring[last_rx_idx % vq_rx.size].id + 1)
+ % vq_rx.size;
+ len = vq_rx.used->ring[last_rx_idx % vq_rx.size].len
- sizeof(struct virtio_net_hdr);
- dprintf("virtionet_receive() last_rx_idx=%i, vq[VQ_RX].used->idx=%i,"
- " id=%i len=%i\n", last_rx_idx, vq[VQ_RX].used->idx, id, len);
+ dprintf("virtionet_receive() last_rx_idx=%i, vq_rx.used->idx=%i,"
+ " id=%i len=%i\n", last_rx_idx, vq_rx.used->idx, id, len);
if (len > maxlen) {
printf("virtio-net: Receive buffer not big enough!\n");
@@ -298,7 +269,7 @@ static int virtionet_receive(char *buf, int maxlen)
printf("\n");
int i;
for (i=0; i<64; i++) {
- printf(" %02x", *(uint8_t*)(vq[VQ_RX].desc[id].addr+i));
+ printf(" %02x", *(uint8_t*)(vq_rx.desc[id].addr+i));
if ((i%16)==15)
printf("\n");
}
@@ -306,14 +277,14 @@ static int virtionet_receive(char *buf, int maxlen)
#endif
/* Copy data to destination buffer */
- memcpy(buf, (void*)vq[VQ_RX].desc[id].addr, len);
+ memcpy(buf, (void*)vq_rx.desc[id].addr, len);
/* Move indices to next entries */
last_rx_idx = last_rx_idx + 1;
- vq[VQ_RX].avail->ring[vq[VQ_RX].avail->idx % vq[VQ_RX].size] = id - 1;
+ vq_rx.avail->ring[vq_rx.avail->idx % vq_rx.size] = id - 1;
sync();
- vq[VQ_RX].avail->idx += 1;
+ vq_rx.avail->idx += 1;
/* Tell HV that RX queue entry is ready */
virtio_queue_notify(&virtiodev, VQ_RX);
--
2.5.0
More information about the SLOF
mailing list