[SLOF] [PATCH v2 18/19] virtio-net: enable virtio 1.0
Nikunj A Dadhania
nikunj at linux.vnet.ibm.com
Wed Jan 20 23:10:32 AEDT 2016
Signed-off-by: Nikunj A Dadhania <nikunj at linux.vnet.ibm.com>
---
lib/libvirtio/virtio-net.c | 145 ++++++++++++++++++++++++++++++++-------------
1 file changed, 105 insertions(+), 40 deletions(-)
diff --git a/lib/libvirtio/virtio-net.c b/lib/libvirtio/virtio-net.c
index aad7f9b..da9d224 100644
--- a/lib/libvirtio/virtio-net.c
+++ b/lib/libvirtio/virtio-net.c
@@ -54,6 +54,16 @@ struct virtio_net_hdr {
static unsigned int net_hdr_size;
+struct virtio_net_hdr_v1 {
+ uint8_t flags;
+ uint8_t gso_type;
+ le16 hdr_len;
+ le16 gso_size;
+ le16 csum_start;
+ le16 csum_offset;
+ le16 num_buffers;
+};
+
static uint16_t last_rx_idx; /* Last index in RX "used" ring */
/**
@@ -68,11 +78,7 @@ static int virtionet_init_pci(struct virtio_device *dev)
if (!dev)
return -1;
- virtiodev.base = dev->base;
- virtiodev.type = dev->type;
-
- /* Keep it disabled until the driver is 1.0 capable */
- virtiodev.is_modern = false;
+ memcpy(&virtiodev, dev, sizeof(struct virtio_device));
/* Reset device */
virtio_reset_device(&virtiodev);
@@ -117,9 +123,16 @@ static int virtionet_init(net_driver_t *driver)
virtio_set_status(&virtiodev, status);
/* Device specific setup - we do not support special features right now */
- virtio_set_guest_features(&virtiodev, 0);
+ if (virtiodev.is_modern) {
+ if (!virtio_negotiate_guest_features(&virtiodev, VIRTIO_F_VERSION_1 | (BIT(5))))
+ goto dev_error;
+ net_hdr_size = sizeof(struct virtio_net_hdr_v1);
+ virtio_get_status(&virtiodev, &status);
+ } else {
+ net_hdr_size = sizeof(struct virtio_net_hdr);
+ virtio_set_guest_features(&virtiodev, 0);
+ }
- net_hdr_size = sizeof(struct virtio_net_hdr);
/* Allocate memory for one transmit an multiple receive buffers */
vq_rx.buf_mem = SLOF_alloc_mem((BUFFER_ENTRY_SIZE+net_hdr_size)
* RX_QUEUE_SIZE);
@@ -134,24 +147,36 @@ static int virtionet_init(net_driver_t *driver)
+ i * (BUFFER_ENTRY_SIZE+net_hdr_size);
uint32_t id = i*2;
/* Descriptor for net_hdr: */
- virtio_fill_desc(&vq_rx.desc[id], false, addr, net_hdr_size,
+ virtio_fill_desc(&vq_rx.desc[id], virtiodev.is_modern, addr, net_hdr_size,
VRING_DESC_F_NEXT | VRING_DESC_F_WRITE, id + 1);
/* Descriptor for data: */
- virtio_fill_desc(&vq_rx.desc[id+1], false, addr + net_hdr_size,
+ virtio_fill_desc(&vq_rx.desc[id+1], virtiodev.is_modern, addr + net_hdr_size,
BUFFER_ENTRY_SIZE, VRING_DESC_F_WRITE, 0);
- vq_rx.avail->ring[i] = id;
+ if (virtiodev.is_modern)
+ vq_rx.avail->ring[i] = cpu_to_le16(id);
+ else
+ vq_rx.avail->ring[i] = id;
}
sync();
- vq_rx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
- vq_rx.avail->idx = RX_QUEUE_SIZE;
+ if (virtiodev.is_modern) {
+ vq_rx.avail->flags = cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+ vq_rx.avail->idx = cpu_to_le16(RX_QUEUE_SIZE);
- last_rx_idx = vq_rx.used->idx;
+ last_rx_idx = le16_to_cpu(vq_rx.used->idx);
- vq_tx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
- vq_tx.avail->idx = 0;
+ vq_tx.avail->flags = cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT);
+ vq_tx.avail->idx = 0;
+ } else {
+ vq_rx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
+ vq_rx.avail->idx = RX_QUEUE_SIZE;
+ last_rx_idx = vq_rx.used->idx;
+
+ vq_tx.avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
+ vq_tx.avail->idx = 0;
+ }
/* Tell HV that setup succeeded */
status |= VIRTIO_STAT_DRIVER_OK;
virtio_set_status(&virtiodev, status);
@@ -160,7 +185,9 @@ static int virtionet_init(net_driver_t *driver)
virtio_queue_notify(&virtiodev, VQ_RX);
driver->running = 1;
-
+ for(i = 0; i < sizeof(driver->mac_addr); i++) {
+ driver->mac_addr[i] = virtio_get_config(&virtiodev, i, 1);
+ }
return 0;
dev_error:
@@ -200,8 +227,10 @@ static int virtionet_term(net_driver_t *driver)
*/
static int virtionet_xmit(char *buf, int len)
{
- int id;
- static struct virtio_net_hdr nethdr;
+ int id, idx;
+ static struct virtio_net_hdr_v1 nethdr_v1;
+ static struct virtio_net_hdr nethdr_legacy;
+ void *nethdr = &nethdr_legacy;
if (len > BUFFER_ENTRY_SIZE) {
printf("virtionet: Packet too big!\n");
@@ -210,22 +239,36 @@ static int virtionet_xmit(char *buf, int len)
dprintf("\nvirtionet_xmit(packet at %p, %d bytes)\n", buf, len);
- memset(&nethdr, 0, net_hdr_size);
+ if (virtiodev.is_modern)
+ nethdr = &nethdr_v1;
+
+ memset(nethdr, 0, net_hdr_size);
/* Determine descriptor index */
- id = (vq_tx.avail->idx * 2) % vq_tx.size;
+ if (virtiodev.is_modern)
+ idx = le16_to_cpu(vq_tx.avail->idx);
+ else
+ idx = vq_tx.avail->idx;
+ id = (idx * 2) % vq_tx.size;
/* Set up virtqueue descriptor for header */
- virtio_fill_desc(&vq_tx.desc[id], false, (uint64_t)&nethdr,
- net_hdr_size, VRING_DESC_F_NEXT, id + 1);
+ virtio_fill_desc(&vq_tx.desc[id], virtiodev.is_modern, (uint64_t)nethdr,
+ net_hdr_size, VRING_DESC_F_NEXT, id + 1);
/* Set up virtqueue descriptor for data */
- virtio_fill_desc(&vq_tx.desc[id+1], false, (uint64_t)buf, len, 0, 0);
-
- vq_tx.avail->ring[vq_tx.avail->idx % vq_tx.size] = id;
- sync();
- vq_tx.avail->idx += 1;
- sync();
+ virtio_fill_desc(&vq_tx.desc[id+1], virtiodev.is_modern, (uint64_t)buf, len, 0, 0);
+
+ if (virtiodev.is_modern) {
+ vq_tx.avail->ring[idx % vq_tx.size] = cpu_to_le16(id);
+ sync();
+ vq_tx.avail->idx = cpu_to_le16(idx + 1);
+ sync();
+ } else {
+ vq_tx.avail->ring[idx % vq_tx.size] = id;
+ sync();
+ vq_tx.avail->idx += 1;
+ sync();
+ }
/* Tell HV that TX queue is ready */
virtio_queue_notify(&virtiodev, VQ_TX);
@@ -239,19 +282,31 @@ static int virtionet_xmit(char *buf, int len)
*/
static int virtionet_receive(char *buf, int maxlen)
{
- int len = 0;
- int id;
+ uint32_t len = 0;
+ uint32_t id, idx;
+
+ if (virtiodev.is_modern)
+ idx = le16_to_cpu(vq_rx.used->idx);
+ else
+ idx = vq_rx.used->idx;
- if (last_rx_idx == vq_rx.used->idx) {
+ if (last_rx_idx == idx) {
/* Nothing received yet */
return 0;
}
- id = (vq_rx.used->ring[last_rx_idx % vq_rx.size].id + 1)
- % vq_rx.size;
- len = vq_rx.used->ring[last_rx_idx % vq_rx.size].len
- - net_hdr_size;
-
+ if (virtiodev.is_modern) {
+ id = (le32_to_cpu(vq_rx.used->ring[last_rx_idx % vq_rx.size].id) + 1)
+ % vq_rx.size;
+ len = le32_to_cpu(vq_rx.used->ring[last_rx_idx % vq_rx.size].len)
+ - net_hdr_size;
+ dprintf("%p id %x len %x\n", vq_rx.used, vq_rx.used->ring[last_rx_idx % vq_rx.size].id, vq_rx.used->ring[last_rx_idx % vq_rx.size].len);
+ } else {
+ id = (vq_rx.used->ring[last_rx_idx % vq_rx.size].id + 1)
+ % vq_rx.size;
+ len = vq_rx.used->ring[last_rx_idx % vq_rx.size].len
+ - net_hdr_size;
+ }
dprintf("virtionet_receive() last_rx_idx=%i, vq_rx.used->idx=%i,"
" id=%i len=%i\n", last_rx_idx, vq_rx.used->idx, id, len);
@@ -273,14 +328,24 @@ static int virtionet_receive(char *buf, int maxlen)
#endif
/* Copy data to destination buffer */
- memcpy(buf, (void*)vq_rx.desc[id].addr, len);
+ if (virtiodev.is_modern)
+ memcpy(buf, (void*)le64_to_cpu(vq_rx.desc[id].addr), len);
+ else
+ memcpy(buf, (void*)vq_rx.desc[id].addr, len);
/* Move indices to next entries */
last_rx_idx = last_rx_idx + 1;
- vq_rx.avail->ring[vq_rx.avail->idx % vq_rx.size] = id - 1;
- sync();
- vq_rx.avail->idx += 1;
+ if (virtiodev.is_modern) {
+ vq_rx.avail->ring[idx % vq_rx.size] = cpu_to_le16(id - 1);
+ sync();
+ vq_rx.avail->idx = cpu_to_le16(idx + 1);
+ }
+ else {
+ vq_rx.avail->ring[idx % vq_rx.size] = id - 1;
+ sync();
+ vq_rx.avail->idx += 1;
+ }
/* Tell HV that RX queue entry is ready */
virtio_queue_notify(&virtiodev, VQ_RX);
--
2.5.0
More information about the SLOF
mailing list