[SLOF] [PATCH v1 26/27] virtio-net: make driver virtio 1.0 compatible
Nikunj A Dadhania
nikunj at linux.vnet.ibm.com
Wed Jan 13 22:17:13 AEDT 2016
Signed-off-by: Nikunj A Dadhania <nikunj at linux.vnet.ibm.com>
---
lib/libvirtio/virtio-net.c | 170 +++++++++++++++++++++++++++++----------------
1 file changed, 111 insertions(+), 59 deletions(-)
diff --git a/lib/libvirtio/virtio-net.c b/lib/libvirtio/virtio-net.c
index 9c51a39..d789c38 100644
--- a/lib/libvirtio/virtio-net.c
+++ b/lib/libvirtio/virtio-net.c
@@ -42,7 +42,7 @@ struct virtio_device virtiodev;
struct vqs vq_rx; /* Information about virtqueues */
struct vqs vq_tx;
-/* See Virtio Spec, appendix C, "Device Operation" */
+/* See Virtio Spec, appendix C, "Device Operation" */
struct virtio_net_hdr {
uint8_t flags;
uint8_t gso_type;
@@ -55,6 +55,16 @@ struct virtio_net_hdr {
static unsigned int net_hdr_size;
+struct virtio_net_hdr_v1 {
+ uint8_t flags;
+ uint8_t gso_type;
+ uint16_t hdr_len;
+ uint16_t gso_size;
+ uint16_t csum_start;
+ uint16_t csum_offset;
+ uint16_t num_buffers;
+};
+
static uint16_t last_rx_idx; /* Last index in RX "used" ring */
/**
@@ -69,8 +79,7 @@ static int virtionet_init_pci(struct virtio_device *dev)
if (!dev)
return -1;
- virtiodev.base = dev->base;
- virtiodev.type = dev->type;
+ memcpy(&virtiodev, dev, sizeof(struct virtio_device));
/* Reset device */
virtio_reset_device(&virtiodev);
@@ -90,23 +99,6 @@ static int virtionet_init_pci(struct virtio_device *dev)
return 0;
}
-static void fill_desc(struct vring_desc *desc, uint32_t is_modern,
- uint64_t addr, uint32_t len,
- uint16_t flags, uint16_t next)
-{
- if (is_modern) {
- desc->addr = cpu_to_le64(addr);
- desc->len = cpu_to_le32(len);
- desc->flags = cpu_to_le16(flags);
- desc->next = cpu_to_le16(next);
- } else {
- desc->addr = addr;
- desc->len = len;
- desc->flags = flags;
- desc->next = next;
- }
-}
-
/**
* Initialize the virtio-net device.
* See the Virtio Spec, chapter 2.2.1 and Appendix C "Device Initialization"
@@ -115,6 +107,7 @@ static void fill_desc(struct vring_desc *desc, uint32_t is_modern,
static int virtionet_init(net_driver_t *driver)
{
int i;
+ uint32_t status;
dprintf("virtionet_init(%02x:%02x:%02x:%02x:%02x:%02x)\n",
driver->mac_addr[0], driver->mac_addr[1],
@@ -125,19 +118,27 @@ static int virtionet_init(net_driver_t *driver)
return 0;
/* Tell HV that we know how to drive the device. */
- virtio_set_status(&virtiodev, VIRTIO_STAT_ACKNOWLEDGE|VIRTIO_STAT_DRIVER);
+ status = VIRTIO_STAT_ACKNOWLEDGE | VIRTIO_STAT_DRIVER;
+ virtio_set_status(&virtiodev, status);
/* Device specific setup - we do not support special features right now */
- virtio_set_guest_features(&virtiodev, 0);
+ if (virtiodev.is_modern) {
+ if (!virtio_negotiate_guest_features(&virtiodev, VIRTIO_F_VERSION_1 | (BIT(5))))
+ goto dev_error;
+ net_hdr_size = sizeof(struct virtio_net_hdr_v1);
+ status |= VIRTIO_STAT_FEATURES_OK;
+ virtio_set_status(&virtiodev, status);
+ } else {
+ net_hdr_size = sizeof(struct virtio_net_hdr);
+ virtio_set_guest_features(&virtiodev, 0);
+ }
- net_hdr_size = sizeof(struct virtio_net_hdr);
/* Allocate memory for one transmit an multiple receive buffers */
vq_rx.buf_mem = SLOF_alloc_mem((BUFFER_ENTRY_SIZE+net_hdr_size)
* RX_QUEUE_SIZE);
if (!vq_rx.buf_mem) {
printf("virtionet: Failed to allocate buffers!\n");
- virtio_set_status(&virtiodev, VIRTIO_STAT_FAILED);
- return -1;
+ goto dev_error;
}
/* Prepare receive buffer queue */
@@ -146,35 +147,53 @@ static int virtionet_init(net_driver_t *driver)
+ i * (BUFFER_ENTRY_SIZE+net_hdr_size);
uint32_t id = i*2;
/* Descriptor for net_hdr: */
- fill_desc(&vq_rx.desc[id], 0, addr, net_hdr_size,
+ virtio_fill_desc(&vq_rx.desc[id], virtiodev.is_modern, addr, net_hdr_size,
VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, id + 1);
/* Descriptor for data: */
- fill_desc(&vq_rx.desc[id+1], 0, addr + net_hdr_size,
+ virtio_fill_desc(&vq_rx.desc[id+1], virtiodev.is_modern, addr + net_hdr_size,
BUFFER_ENTRY_SIZE, VRING_DESC_F_WRITE, 0);
- vq_rx.avail->ring[i] = id;
+ if (virtiodev.is_modern)
+ vq_rx.avail->ring[i] = cpu_to_le16(id);
+ else
+ vq_rx.avail->ring[i] = id;
}
sync();
- vq_rx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
- vq_rx.avail->idx = RX_QUEUE_SIZE;
+ if (virtiodev.is_modern) {
+ vq_rx.avail->flags = cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+ vq_rx.avail->idx = cpu_to_le16(RX_QUEUE_SIZE);
- last_rx_idx = vq_rx.used->idx;
+ last_rx_idx = le16_to_cpu(vq_rx.used->idx);
+
+ vq_tx.avail->flags = cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+ vq_tx.avail->idx = 0;
+ } else {
+ vq_rx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
+ vq_rx.avail->idx = RX_QUEUE_SIZE;
- vq_tx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
- vq_tx.avail->idx = 0;
+ last_rx_idx = vq_rx.used->idx;
+ vq_tx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
+ vq_tx.avail->idx = 0;
+ }
/* Tell HV that setup succeeded */
- virtio_set_status(&virtiodev, VIRTIO_STAT_ACKNOWLEDGE
- |VIRTIO_STAT_DRIVER
- |VIRTIO_STAT_DRIVER_OK);
+ status |= VIRTIO_STAT_DRIVER_OK;
+ virtio_set_status(&virtiodev, status);
/* Tell HV that RX queues are ready */
virtio_queue_notify(&virtiodev, VQ_RX);
driver->running = 1;
-
+ for(i=0;i<6;i++) {
+ driver->mac_addr[i] = virtio_get_config(&virtiodev, i, 1);
+ }
return 0;
+dev_error:
+ driver->running = 0;
+ status |= VIRTIO_STAT_FAILED;
+ virtio_set_status(&virtiodev, status);
+ return -1;
}
@@ -207,7 +226,7 @@ static int virtionet_term(net_driver_t *driver)
*/
static int virtionet_xmit(char *buf, int len)
{
- int id;
+ int id, idx;
static struct virtio_net_hdr nethdr;
if (len > BUFFER_ENTRY_SIZE) {
@@ -220,20 +239,32 @@ static int virtionet_xmit(char *buf, int len)
memset(&nethdr, 0, sizeof(nethdr));
/* Determine descriptor index */
- id = (vq_tx.avail->idx * 2) % vq_tx.size;
+ if (virtiodev.is_modern)
+ idx = le16_to_cpu(vq_tx.avail->idx);
+ else
+ idx = vq_tx.avail->idx;
+ id = (idx * 2) % vq_tx.size;
/* Set up virtqueue descriptor for header */
- fill_desc(&vq_tx.desc[id], 0, (uint64_t)&nethdr,
+ virtio_fill_desc(&vq_tx.desc[id], virtiodev.is_modern, (uint64_t)&nethdr,
net_hdr_size, VRING_DESC_F_NEXT, id + 1);
/* Set up virtqueue descriptor for data */
- fill_desc(&vq_tx.desc[id+1], 0, (uint64_t)buf, len, 0, 0);
+ virtio_fill_desc(&vq_tx.desc[id+1], virtiodev.is_modern, (uint64_t)buf, len, 0, 0);
- vq_tx.avail->ring[vq_tx.avail->idx % vq_tx.size] = id;
- sync();
- vq_tx.avail->idx += 1;
- sync();
+ if (virtiodev.is_modern) {
+ vq_tx.avail->ring[idx % vq_tx.size] = cpu_to_le16(id);
+ sync();
+ vq_tx.avail->idx = cpu_to_le16(idx + 1);
+ sync();
+ } else {
+ vq_tx.avail->ring[idx % vq_tx.size] = id;
+ sync();
+ vq_tx.avail->idx += 1;
+ sync();
+ }
+ dprintf("virtionet_xmit: id %d, idx %d\n", id, idx);
/* Tell HV that TX queue is ready */
virtio_queue_notify(&virtiodev, VQ_TX);
@@ -246,19 +277,31 @@ static int virtionet_xmit(char *buf, int len)
*/
static int virtionet_receive(char *buf, int maxlen)
{
- int len = 0;
- int id;
+ uint32_t len = 0;
+ uint32_t id, idx;
- if (last_rx_idx == vq_rx.used->idx) {
+ if (virtiodev.is_modern)
+ idx = le16_to_cpu(vq_rx.used->idx);
+ else
+ idx = vq_rx.used->idx;
+
+ if (last_rx_idx == idx) {
/* Nothing received yet */
return 0;
}
- id = (vq_rx.used->ring[last_rx_idx % vq_rx.size].id + 1)
- % vq_rx.size;
- len = vq_rx.used->ring[last_rx_idx % vq_rx.size].len
- - net_hdr_size;
-
+ if (virtiodev.is_modern) {
+ id = (le32_to_cpu(vq_rx.used->ring[last_rx_idx % vq_rx.size].id) + 1)
+ % vq_rx.size;
+ len = le32_to_cpu(vq_rx.used->ring[last_rx_idx % vq_rx.size].len)
+ - net_hdr_size;
+ dprintf("%p id %x len %x\n", vq_rx.used, vq_rx.used->ring[last_rx_idx % vq_rx.size].id, vq_rx.used->ring[last_rx_idx % vq_rx.size].len);
+ } else {
+ id = (vq_rx.used->ring[last_rx_idx % vq_rx.size].id + 1)
+ % vq_rx.size;
+ len = vq_rx.used->ring[last_rx_idx % vq_rx.size].len
+ - net_hdr_size;
+ }
dprintf("virtionet_receive() last_rx_idx=%i, vq_rx.used->idx=%i,"
" id=%i len=%i\n", last_rx_idx, vq_rx.used->idx, id, len);
@@ -276,19 +319,28 @@ static int virtionet_receive(char *buf, int maxlen)
if ((i%16)==15)
printf("\n");
}
- prinfk("\n");
+ printf("\n");
#endif
/* Copy data to destination buffer */
- memcpy(buf, (void*)vq_rx.desc[id].addr, len);
+ if (virtiodev.is_modern)
+ memcpy(buf, (void*)le64_to_cpu(vq_rx.desc[id].addr), len);
+ else
+ memcpy(buf, (void*)vq_rx.desc[id].addr, len);
/* Move indices to next entries */
last_rx_idx = last_rx_idx + 1;
- vq_rx.avail->ring[vq_rx.avail->idx % vq_rx.size] = id - 1;
- sync();
- vq_rx.avail->idx += 1;
-
+ if (virtiodev.is_modern) {
+ vq_rx.avail->ring[idx % vq_rx.size] = cpu_to_le16(id - 1);
+ sync();
+ vq_rx.avail->idx = cpu_to_le16(idx + 1);
+ }
+ else {
+ vq_rx.avail->ring[idx % vq_rx.size] = id - 1;
+ sync();
+ vq_rx.avail->idx += 1;
+ }
/* Tell HV that RX queue entry is ready */
virtio_queue_notify(&virtiodev, VQ_RX);
--
2.5.0
More information about the SLOF
mailing list