This is the the Ethernet over PCI-E driver. With it you get some reasonable performance. ~ $ netperf -H 10.0.0.1 -f M -t UDP_STREAM UDP UNIDIRECTIONAL SEND TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.0.0.1 (10.0.0.1) port 0 AF_INET Socket Message Elapsed Messages Size Size Time Okay Errors Throughput bytes bytes secs # # MBytes/sec 122880 65507 10.00 62886 0 392.81 129024 10.00 62871 392.72 ~ $ netperf -H 10.0.0.1 -f M -t TCP_STREAM TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.0.0.1 (10.0.0.1) port 0 AF_INET Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. MBytes/sec 87380 16384 16384 10.00 482.46 ~ $ Signed-Off-by: Jean-Christophe DUBOIS -- Index: linux-2.6.21/drivers/axon/nic/axon_nic.c =================================================================== --- /dev/null +++ linux-2.6.21/drivers/axon/nic/axon_nic.c @@ -0,0 +1,1167 @@ +/****************************************************************** + * Copyright (C) 2006 Mercury Computer Systems, Inc. + * 199 Riverneck Road + * Chelmsford, MA 01824-2820 + * (978) 256-1300 + * webinfo@mc.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * http://www.gnu.org/copyleft/gpl.html + ******************************************************************/ + + + +#include +#include + +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + + +MODULE_DESCRIPTION("Ethernet-o-PCIe CAB Driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jean-Chritophe DUBOIS (jdubois@mc.com)"); + +#include + + + + +#define AXON_NIC_SMS_INVALID 0 +#define AXON_NIC_SMS_SKB_AVAIL 1 +#define AXON_NIC_SMS_SKB_XFERD 2 +#define AXON_NIC_SMS_SKB_CANCEL 3 + +#define AXON_NIC_SMS_MAX AXON_NIC_SMS_SKB_CANCEL + + +#define AXON_NIC_SMS_BYTE_TYPE 0x0 +#define AXON_NIC_SMS_BYTE_NODE 0x1 +#define AXON_NIC_SMS_BYTE_SKB_TX_ID 0x2 +#define AXON_NIC_SMS_BYTE_SKB_SZ 0x3 +#define AXON_NIC_SMS_BYTE_SKB_PLB 0x7 + + +#define AXON_NIC_TX_QUEUE_LEN 255 + +#ifdef __powerpc__ +#define AXON_NIC_MAC_ADDR "\2AX0N0" +#else +#define AXON_NIC_MAC_ADDR "\2AX1N0" +#endif + +typedef struct axon_nic_stat_t { + u64 tx_max_time; + u64 tx_min_time; + u32 rx_cnt; +} axon_nic_stat_t; + +typedef struct axon_nic_skb_t { + struct sk_buff *skb; + dma_addr_t bus_addr; + u64 timestamp; + size_t size; + u32 id; + struct axon_nic_skb_t *next; +} axon_nic_skb_t; + + +typedef struct axon_nic_t { + + struct list_head list; + + + u8 id; + + enum { + AXON_NIC_UP, + AXON_NIC_DOWN, + } state; + + + axon_t *axon; + + + axon_sms_t *sms; + + + struct axon_dmax_t *dma; + + + axon_mbox_t *peer_mbox; + + + addr_xltr_t *xltr; + + + axon_nic_skb_t skb_tx_queue[AXON_NIC_TX_QUEUE_LEN]; + axon_nic_skb_t *free_slot; + atomic_t tx_cnt; + spinlock_t tx_lock; + + + struct net_device *dev; + + + struct net_device_stats net_stats; + + + axon_nic_stat_t axon_stats; + + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) + struct delayed_work wipe_out_task; +#else + struct work_struct wipe_out_task; +#endif + +#ifdef CONFIG_AXON_NAPI + + struct tasklet_struct reschedule_task; + + atomic_t loop; +#endif +} axon_nic_t; + +static struct list_head axon_nic_list; + + +static int axon_nic_timeout = 30; + +module_param(axon_nic_timeout, int, 0644); +MODULE_PARM_DESC(axon_nic_timeout, + "Defines the timeout (in sec) after which you can discard an unprocessed SKB. By default the value is 30 seconds"); + + + +#if defined(AXON_DEBUG_NIC) +static void axon_nic_skb_print(axon_nic_t * axon_nic, struct sk_buff *skb) +{ + + char store[skb->len * 3 + 10]; + char *buffer = store; + int i_byte; + + buffer += sprintf(buffer, "skb="); + for (i_byte = 0; i_byte < skb->len; i_byte++) { + buffer += + sprintf(buffer, "%02x:", (unsigned char)skb->data[i_byte]); + } + buffer += sprintf(buffer, "\n"); + dbg_log("%s", store); +} +#endif + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) +static void axon_skb_wipe_out_wq(struct work_struct *work) +{ + axon_nic_t *axon_nic = + container_of(work, axon_nic_t, wipe_out_task.work); +#else +static void axon_skb_wipe_out_wq(void *ptr) +{ + axon_nic_t *axon_nic = (axon_nic_t *) ptr; +#endif + int i_skb = 0; + + + + spin_lock_bh(&axon_nic->tx_lock); + + + + if ((axon_nic->state == AXON_NIC_DOWN) + || (atomic_read(&axon_nic->tx_cnt) < AXON_NIC_TX_QUEUE_LEN / 2)) { + + int count = 0; + + while (i_skb < AXON_NIC_TX_QUEUE_LEN) { + + if ((axon_nic->skb_tx_queue[i_skb].skb != NULL) && + ((axon_nic->skb_tx_queue[i_skb].timestamp < + get_jiffies_64()) + || (axon_nic->state == AXON_NIC_DOWN))) { + + struct sk_buff *skb; + + + dbg_log + ("timeout on TX SKB %d, releasing it\n", + i_skb); + + skb = axon_nic->skb_tx_queue[i_skb].skb; + + + dma_unmap_single(axon_nic->axon->get_device + (axon_nic->axon), + axon_nic-> + skb_tx_queue[i_skb]. + bus_addr, + axon_nic-> + skb_tx_queue[i_skb].size, + DMA_TO_DEVICE); + + axon_nic->skb_tx_queue[i_skb].timestamp = 0; + axon_nic->skb_tx_queue[i_skb].skb = NULL; + axon_nic->skb_tx_queue[i_skb].next = + axon_nic->free_slot; + axon_nic->skb_tx_queue[i_skb].bus_addr = 0; + axon_nic->skb_tx_queue[i_skb].size = 0; + axon_nic->free_slot = + &axon_nic->skb_tx_queue[i_skb]; + + dev_kfree_skb_any(skb); + + if (atomic_inc_return(&axon_nic->tx_cnt) == 1) { + if (netif_queue_stopped(axon_nic->dev)) { + dbg_inf + ("restarting the device\n"); + netif_wake_queue(axon_nic->dev); + } + } + + axon_nic->net_stats.tx_errors++; + + count++; + } + + i_skb++; + } + + dbg_inf("End of TX ring cleanup. %d SKBs released\n", count); + } + + spin_unlock_bh(&axon_nic->tx_lock); + + + if (axon_nic->state == AXON_NIC_UP) + schedule_delayed_work(&axon_nic->wipe_out_task, HZ); +} + + +static int axon_nic_skb_cancel(axon_nic_t * axon_nic, axon_sms_msg_t * msg) +{ + int ret = 0; + + u8 tx_skb_id = msg->payload[AXON_NIC_SMS_BYTE_SKB_TX_ID]; + + struct sk_buff *skb; + + spin_lock_bh(&axon_nic->tx_lock); + + skb = axon_nic->skb_tx_queue[tx_skb_id].skb; + + if (skb) { + + + + axon_nic->skb_tx_queue[tx_skb_id].timestamp = 0; + axon_nic->skb_tx_queue[tx_skb_id].skb = NULL; + axon_nic->skb_tx_queue[tx_skb_id].next = axon_nic->free_slot; + axon_nic->free_slot = &axon_nic->skb_tx_queue[tx_skb_id]; + axon_nic->net_stats.tx_dropped++; + + + if (atomic_inc_return(&axon_nic->tx_cnt) == 1) { + if (netif_queue_stopped(axon_nic->dev)) { + dbg_inf("restarting the device\n"); + netif_wake_queue(axon_nic->dev); + } + } + } else { + dbg_err("Received cancel for an empty slot: TX = %d\n", + tx_skb_id); + } + + spin_unlock_bh(&axon_nic->tx_lock); + + + if (skb) + dev_kfree_skb_any(skb); + + return ret; +} + + +static void axon_nic_dma_completion_handler(struct axon_dmax_t + *p_axon_dmax, struct axon_dma_req_t + *p_dma_req, void *context) +{ + axon_nic_skb_t *p_nic_skb = (axon_nic_skb_t *) context; + struct sk_buff *skb = p_nic_skb->skb; + + axon_nic_t *axon_nic = netdev_priv(skb->dev); + +#if defined(AXON_DEBUG_NIC) + + dbg_log("Skb 0x%p after DMA completion \n", skb); + + axon_nic_skb_print(axon_nic, skb); +#endif + + + skb->protocol = eth_type_trans(skb, skb->dev); + + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + axon_nic->net_stats.rx_packets++; + axon_nic->net_stats.rx_bytes += skb_headlen(skb); + + axon_nic->axon_stats.rx_cnt--; + + skb->dev->last_rx = jiffies; + + dma_unmap_single(axon_nic->axon->get_device(axon_nic->axon), + p_nic_skb->bus_addr, p_nic_skb->size, DMA_FROM_DEVICE); + + kfree(p_nic_skb); + + dbg_log("Skb 0x%p is passed to the network stack\n", skb); + +#ifdef CONFIG_AXON_NAPI + skb->dev->quota--; + + netif_receive_skb(skb); +#else + netif_rx(skb); +#endif +} + + +static int axon_nic_skb_avail(axon_nic_t * axon_nic, axon_sms_msg_t * msg) +{ + int ret = 0; + + u8 tx_skb_id = msg->payload[AXON_NIC_SMS_BYTE_SKB_TX_ID]; + + struct sk_buff *skb = NULL; + + + struct axon_dma_req_t *dma_req = NULL; + + u32 skb_size; + + plb_addr_t skb_dst_plb_addr; + plb_addr_t skb_src_plb_addr; + + axon_sms_msg_t msg_ans; + + u8 msg_encoded[AXON_SMS_SIZE]; + + + axon_dma_req_xfer_t dma_req_xfer = AXON_DMA_REQ_XFER_INIT; + axon_dma_req_mbox_t dma_req_mbox = AXON_DMA_REQ_MBOX_INIT; + + axon_nic_skb_t *p_nic_skb = NULL; + + dbg_log("Receiving SKB avail request \n"); + + + if (!netif_running(axon_nic->dev)) { + + + dbg_inf + ("Receiving avail req while network interface is stopped\n"); + + ret = -EBUSY; + + goto out; + } + + memcpy(&skb_size, msg->payload + AXON_NIC_SMS_BYTE_SKB_SZ, + sizeof(skb_size)); + skb_size = __be32_to_cpu(skb_size); + + dbg_log("SKB is %d byte long\n", skb_size); + + if ((p_nic_skb = kzalloc(sizeof(axon_nic_skb_t), GFP_ATOMIC)) == NULL) { + dbg_err("Failed to allocate SKB wrapper\n"); + + ret = -ENOMEM; + + goto out; + } + + if ((skb = dev_alloc_skb(skb_size + NET_IP_ALIGN)) == NULL) { + dbg_err("Failed to allocate SKB\n"); + + ret = -ENOMEM; + + goto out; + } + + + skb->dev = axon_nic->dev; + + p_nic_skb->skb = skb; + p_nic_skb->size = skb_size; + + + dma_req = axon_dma_request_create(axon_nic->dma, 10); + + if (dma_req == NULL) { + dbg_err("Failed to create a DMA req\n"); + ret = -ENOMEM; + goto out; + } + + dbg_log("Preparing DMA Req \n"); + + skb_reserve(skb, NET_IP_ALIGN); + + + p_nic_skb->bus_addr = + dma_map_single(axon_nic->axon->get_device(axon_nic->axon), + skb_put(skb, skb_size), skb_size, DMA_FROM_DEVICE); + + skb_dst_plb_addr = + axon_addr_xltr_to_plb(axon_nic->xltr, p_nic_skb->bus_addr); + + dbg_log("Destination buffer 0x%" AXON_DMA_ADDR_FMT_T + "(PLB=0x%016" AXON_PLB_ADDR_FMT_T ")\n", + p_nic_skb->bus_addr, __be64_to_cpu(skb_dst_plb_addr)); + + skb_src_plb_addr = 0; + *(u8 *) (&skb_src_plb_addr) = msg->payload[AXON_NIC_SMS_BYTE_SKB_PLB]; + memcpy(((u8 *) (&skb_src_plb_addr)) + 3, + msg->payload + AXON_NIC_SMS_BYTE_SKB_PLB, + sizeof(skb_src_plb_addr) - 3); + + dbg_log("Source buffer (PLB=0x%016" AXON_PLB_ADDR_FMT_T + ")\n", __be64_to_cpu(skb_src_plb_addr)); + + dma_req_xfer.src = skb_src_plb_addr; + + dma_req_xfer.dst = skb_dst_plb_addr; + + dma_req_xfer.size = skb_size; + + dma_req_xfer.intr = DMA_NO_INTR; + + ret = axon_dma_request_push_xfer(dma_req, &dma_req_xfer); + if (ret < 0) { + dbg_err("Error while stacking DMA request\n"); + goto out; + } + + + msg_ans.payload[AXON_NIC_SMS_BYTE_TYPE] = AXON_NIC_SMS_SKB_XFERD; + + msg_ans.payload[AXON_NIC_SMS_BYTE_SKB_TX_ID] = tx_skb_id; + + msg_ans.channel = AXON_SMS_CHANNEL_NIC; + + dbg_log("Encoding xfer completed SMS\n"); + ret = + axon_sms_encode(axon_nic->sms, &msg_ans, msg_encoded, 1, + AXON_SMS_SIZE); + if (ret < 0) { + dbg_err("Error while encoding message \n"); + goto out; + } + + + dma_req_mbox.dst_id = AXON_DMA_TARGET_PEER; + dma_req_mbox.msg = msg_encoded; + dma_req_mbox.msg_size = AXON_SMS_SIZE; + + ret = axon_dma_request_push_mbox(dma_req, &dma_req_mbox); + if (ret < 0) { + dbg_err("Error while stacking mbox \n"); + goto out; + } + + + dbg_log("Queueing xfer DMA request\n"); + ret = + axon_dma_request_queue(dma_req, + axon_nic_dma_completion_handler, p_nic_skb); + if (ret < 0) { + dbg_err("Unable to queue message \n"); + } + +out: + if (ret < 0) { + + + if (dma_req) { + axon_dma_request_destroy(dma_req); + } + + if (p_nic_skb) { + if (p_nic_skb->bus_addr) + dma_unmap_single(axon_nic->axon->get_device + (axon_nic->axon), + p_nic_skb->bus_addr, + p_nic_skb->size, + DMA_FROM_DEVICE); + kfree(p_nic_skb); + } + + + if (skb) { + dev_kfree_skb_any(skb); + } + + + msg_ans.payload[AXON_NIC_SMS_BYTE_TYPE] = + AXON_NIC_SMS_SKB_CANCEL; + msg_ans.payload[AXON_NIC_SMS_BYTE_SKB_TX_ID] = tx_skb_id; + msg_ans.channel = AXON_SMS_CHANNEL_NIC; + + + if (axon_sms_send + (axon_nic->sms, axon_nic->peer_mbox, &msg_ans, 3) < 0) { + dbg_err + ("Unable to send SMS for canceling skb buffer for %d.\n", + tx_skb_id); + } + + axon_nic->net_stats.rx_dropped++; + } else { + axon_nic->axon_stats.rx_cnt++; + } + + return ret; +} + + +static int axon_nic_skb_xferd(axon_nic_t * axon_nic, axon_sms_msg_t * msg) +{ + int ret = 0; + u64 time; + + u8 skb_tx_id = msg->payload[AXON_NIC_SMS_BYTE_SKB_TX_ID]; + + struct sk_buff *skb; + + dma_addr_t dma_addr; + size_t size; + + spin_lock_bh(&axon_nic->tx_lock); + + skb = axon_nic->skb_tx_queue[skb_tx_id].skb; + + dbg_log("skb=0x%p id=%d has been received\n", skb, skb_tx_id); + + if (skb == NULL) { + spin_unlock_bh(&axon_nic->tx_lock); + dbg_err("TX Skb slot %d is empty\n", skb_tx_id); + return -1; + } + + time = axon_nic->skb_tx_queue[skb_tx_id].timestamp; + + dma_addr = axon_nic->skb_tx_queue[skb_tx_id].bus_addr; + size = axon_nic->skb_tx_queue[skb_tx_id].size; + + + axon_nic->skb_tx_queue[skb_tx_id].timestamp = 0; + axon_nic->skb_tx_queue[skb_tx_id].skb = NULL; + axon_nic->skb_tx_queue[skb_tx_id].bus_addr = 0; + axon_nic->skb_tx_queue[skb_tx_id].size = 0; + axon_nic->skb_tx_queue[skb_tx_id].next = axon_nic->free_slot; + axon_nic->free_slot = &axon_nic->skb_tx_queue[skb_tx_id]; + + spin_unlock_bh(&axon_nic->tx_lock); + + + dma_unmap_single(axon_nic->axon->get_device(axon_nic->axon), + dma_addr, size, DMA_TO_DEVICE); + + + time = get_jiffies_64() + (HZ * axon_nic_timeout) - time; + +#if defined(AXON_DEBUG_NIC) + + dbg_log("Skb 0x%p after XFER completion \n", skb); + axon_nic_skb_print(axon_nic, skb); +#endif + + axon_nic->net_stats.tx_packets++; + axon_nic->net_stats.tx_bytes += skb_headlen(skb); + + if (atomic_inc_return(&axon_nic->tx_cnt) == 1) { + if (netif_queue_stopped(axon_nic->dev)) { + dbg_inf("restarting the device\n"); + netif_wake_queue(axon_nic->dev); + } + } + + if (time > axon_nic->axon_stats.tx_max_time) + axon_nic->axon_stats.tx_max_time = time; + + if (time < axon_nic->axon_stats.tx_min_time) + axon_nic->axon_stats.tx_min_time = time; + + dev_kfree_skb_any(skb); + + return ret; +} + +static int axon_nic_invalid(axon_nic_t * axon_nic, axon_sms_msg_t * msg) +{ + dbg_err("invalid message received\n"); + return -EINVAL; +} + +typedef int (*axon_nic_msg_function_t) (axon_nic_t * + axon_nic, axon_sms_msg_t * msg); + +static axon_nic_msg_function_t function_array[] = { + axon_nic_invalid, + axon_nic_skb_avail, + axon_nic_skb_xferd, + axon_nic_skb_cancel, + axon_nic_invalid, +}; + + +static int axon_nic_sms_handler(void *context, axon_sms_msg_t * msg) +{ + axon_nic_t *axon_nic = context; + int msg_id = msg->payload[AXON_NIC_SMS_BYTE_TYPE]; + dbg_log("Receiving SMS \n"); + msg_id = msg_id > AXON_NIC_SMS_MAX ? 0 : msg_id; + return function_array[msg_id] (axon_nic, msg); +} + + +static int axon_nic_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int ret = NETDEV_TX_OK; + axon_nic_t *axon_nic = netdev_priv(dev); + u8 i_skb = 0; + dbg_log("Starting transmission of skb 0x%p \n", skb); +#if defined(AXON_DEBUG_NIC) + + dbg_log("Skb 0x%p at xmit time \n", skb); + axon_nic_skb_print(axon_nic, skb); +#endif + + spin_lock_bh(&axon_nic->tx_lock); + + if (atomic_read(&axon_nic->tx_cnt) == 0) { + dbg_err("TX queue is full but still running (???)...\n"); + netif_stop_queue(dev); + dbg_err("TX queue stopped now\n"); + + ret = NETDEV_TX_BUSY; + } else { + + BUG_ON(axon_nic->free_slot == NULL); + + i_skb = axon_nic->free_slot->id; + axon_nic->free_slot = axon_nic->free_slot->next; + axon_nic->skb_tx_queue[i_skb].next = NULL; + + if (atomic_dec_return(&axon_nic->tx_cnt) == 0) { + dbg_inf("Stopping TX queue because it is now full\n"); + netif_stop_queue(dev); + } + } + + spin_unlock_bh(&axon_nic->tx_lock); + + if (ret == NETDEV_TX_OK) { + axon_sms_msg_t msg; + int size = __cpu_to_be32(skb->len); + plb_addr_t skb_plb_addr; + dbg_log("Skb 0x%p is stored in slot %d \n", skb, i_skb); + axon_nic->skb_tx_queue[i_skb].timestamp = + get_jiffies_64() + (HZ * axon_nic_timeout); + axon_nic->skb_tx_queue[i_skb].skb = skb; + axon_nic->skb_tx_queue[i_skb].size = skb->len; + + axon_nic->skb_tx_queue[i_skb].bus_addr = + dma_map_single(axon_nic->axon-> + get_device(axon_nic->axon), skb->data, + skb->len, DMA_TO_DEVICE); + + skb_plb_addr = + axon_addr_xltr_to_plb(axon_nic->xltr, + axon_nic->skb_tx_queue[i_skb]. + bus_addr); + + msg.payload[AXON_NIC_SMS_BYTE_TYPE] = AXON_NIC_SMS_SKB_AVAIL; + + msg.payload[AXON_NIC_SMS_BYTE_SKB_TX_ID] = i_skb; + + memcpy(msg.payload + AXON_NIC_SMS_BYTE_SKB_SZ, &size, + sizeof(int)); + + memcpy(msg.payload + AXON_NIC_SMS_BYTE_SKB_PLB, + ((u8 *) (&skb_plb_addr)) + 3, sizeof(skb_plb_addr) - 3); + + msg.channel = AXON_SMS_CHANNEL_NIC; + + dbg_log + ("Sendind Skb Avail request for %d bytes, id %d, at PLB=0x%016" + AXON_PLB_ADDR_FMT_T "\n", skb->len, i_skb, + __be64_to_cpu(skb_plb_addr)); + + + ret = + axon_sms_send(axon_nic->sms, axon_nic->peer_mbox, &msg, 12); + + if (ret < 0) { + dbg_err + ("Unable to send SMS for requesting skb allocation.\n"); + dma_unmap_single(axon_nic->axon-> + get_device(axon_nic->axon), + axon_nic->skb_tx_queue[i_skb]. + bus_addr, skb->len, DMA_TO_DEVICE); + + spin_lock_bh(&axon_nic->tx_lock); + + + axon_nic->skb_tx_queue[i_skb].timestamp = 0; + axon_nic->skb_tx_queue[i_skb].skb = NULL; + axon_nic->skb_tx_queue[i_skb].bus_addr = 0; + axon_nic->skb_tx_queue[i_skb].size = 0; + axon_nic->skb_tx_queue[i_skb].next = + axon_nic->free_slot; + axon_nic->free_slot = &axon_nic->skb_tx_queue[i_skb]; + + spin_unlock_bh(&axon_nic->tx_lock); + + + if (atomic_inc_return(&axon_nic->tx_cnt) == 1) { + if (netif_queue_stopped(axon_nic->dev)) { + dbg_inf("restarting the device\n"); + netif_wake_queue(axon_nic->dev); + } + } + + + dev_kfree_skb_any(skb); + + + ret = NETDEV_TX_OK; + } else + axon_nic->dev->trans_start = jiffies; + } + + return ret; +} + +#ifdef CONFIG_AXON_NAPI +static void axon_nic_reschedule_task_entry(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + axon_nic_t *axon_nic = netdev_priv(dev); + dbg_log("in reschedule tasklet\n"); + if (netif_rx_schedule_prep(dev)) { + + axon_sms_disable_hw_irqs(axon_nic->sms); + __netif_rx_schedule(dev); + } else { + dbg_log("Hum is it already polling?\n"); + } +} + +static irqreturn_t axon_nic_mbox_interrupt_handler(void + *data, int irq, void + *dev_id) +{ + dbg_log("in Eth interrupt\n"); + axon_nic_reschedule_task_entry((unsigned long)data); + return IRQ_HANDLED; +} + +#define AXON_NUMBER_OF_POOL 100 + +static int axon_nic_poll(struct net_device *dev, int *budget) +{ + axon_nic_t *axon_nic = netdev_priv(dev); + int to_be_received = dev->quota; + int ret; + dbg_log("in Eth poll\n"); + + atomic_set(&axon_nic->loop, AXON_NUMBER_OF_POOL); + + + while (((ret = axon_sms_process_next_msg(axon_nic->sms)) == 0) + || (atomic_dec_return(&axon_nic->loop))) { + + + if (ret == 0) + atomic_set(&axon_nic->loop, AXON_NUMBER_OF_POOL); + + if (dev->quota <= 0) { + goto not_done; + } + } + + *budget -= (to_be_received - dev->quota); + + netif_rx_complete(dev); + axon_sms_update_hw(axon_nic->sms); + axon_sms_enable_hw_irqs(axon_nic->sms); + + if (axon_sms_has_message(axon_nic->sms)) { + tasklet_schedule(&axon_nic->reschedule_task); + dbg_log("There were MBX remaining\n"); + } + + dbg_log("all done\n"); + return 0; + +not_done: + + *budget -= (to_be_received - dev->quota); + dbg_log("ran out of quota\n"); + return 1; +} +#endif + + +static int axon_nic_open(struct net_device *dev) +{ + int ret = 0; + axon_nic_t *axon_nic = netdev_priv(dev); + dbg_inf("Opening NIC driver \n"); + + ret = + axon_sms_subscribe(axon_nic->sms, AXON_SMS_CHANNEL_NIC, + axon_nic_sms_handler, axon_nic); + if (ret < 0) { + + dbg_err("Unable to subscribe to channel %d \n", + AXON_SMS_CHANNEL_NIC); + } + + axon_nic->state = AXON_NIC_UP; + dbg_log("Starting device queue \n"); + netif_start_queue(dev); + + schedule_delayed_work(&axon_nic->wipe_out_task, HZ); + +#ifdef CONFIG_AXON_NAPI + tasklet_init(&axon_nic->reschedule_task, + axon_nic_reschedule_task_entry, (unsigned long)dev); + axon_sms_add_intercept_handler(axon_nic->sms, + axon_nic_mbox_interrupt_handler, dev); +#endif + + return 0; +} + +static int axon_nic_stop(struct net_device *dev) +{ + axon_nic_t *axon_nic = netdev_priv(dev); + dbg_inf("Stopping NIC driver \n"); + + + netif_stop_queue(dev); + +#ifdef CONFIG_AXON_NAPI + dbg_log("removing intercept handler\n"); + axon_sms_remove_intercept_handler(axon_nic->sms, + axon_nic_mbox_interrupt_handler, dev); + tasklet_kill(&axon_nic->reschedule_task); +#endif + + dbg_log("Unsubscribing from SMS service \n"); + axon_sms_unsubscribe(axon_nic->sms, AXON_SMS_CHANNEL_NIC, + axon_nic_sms_handler); + axon_nic->state = AXON_NIC_DOWN; + + cancel_delayed_work(&axon_nic->wipe_out_task); + + if ((AXON_NIC_TX_QUEUE_LEN - atomic_read(&axon_nic->tx_cnt))) { + dbg_inf + ("There are still buffers tied to this interface: TX = %d\n", + AXON_NIC_TX_QUEUE_LEN - atomic_read(&axon_nic->tx_cnt)); + + + schedule_timeout(axon_nic_timeout * HZ); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) + axon_skb_wipe_out_wq((void *)&axon_nic->wipe_out_task.work); +#else + axon_skb_wipe_out_wq((void *)axon_nic); +#endif + + if ((AXON_NIC_TX_QUEUE_LEN - atomic_read(&axon_nic->tx_cnt))) { + int i_skb = 0; + dbg_err + ("There are still buffers tied to this interface after cleanup: " + "TX = %d\n", + AXON_NIC_TX_QUEUE_LEN - + atomic_read(&axon_nic->tx_cnt)); + + spin_lock_bh(&axon_nic->tx_lock); + + while (i_skb < AXON_NIC_TX_QUEUE_LEN) { + if (axon_nic->skb_tx_queue[i_skb].skb != NULL) { + dbg_err + ("Buffer TX %d is still there after %" + AXON_U64_FMT_T " jiffies\n", + i_skb, + get_jiffies_64() + + (HZ * axon_nic_timeout) - + axon_nic->skb_tx_queue[i_skb]. + timestamp); + } + i_skb++; + } + + spin_unlock_bh(&axon_nic->tx_lock); + } + + } + + return 0; +} + +static void axon_nic_tx_timeout(struct net_device *dev) +{ + axon_nic_t *axon_nic = netdev_priv(dev); + dbg_err("TX timeout has been called \n"); + + if (atomic_read(&axon_nic->tx_cnt)) { + if (netif_queue_stopped(axon_nic->dev)) { + netif_wake_queue(axon_nic->dev); + } + } else { + + dbg_err("The TX queue is full \n"); + dbg_err("We should clean it\n"); + + + cancel_delayed_work(&axon_nic->wipe_out_task); + + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) + schedule_work(&axon_nic->wipe_out_task.work); +#else + schedule_work(&axon_nic->wipe_out_task); +#endif + } +} + +static struct net_device_stats *axon_nic_get_stats(struct + net_device + *dev) +{ + axon_nic_t *axon_nic = netdev_priv(dev); + return &axon_nic->net_stats; +} + +static int axon_nic_read_proc(char *page, char **start, off_t offset, + int count, int *eof, void *data) +{ + axon_nic_t *axon_nic = (axon_nic_t *) data; + + int len = 0; + + len += sprintf(page + len, "NIC %d:\n", axon_nic->id); + len += + sprintf(page + len, "\ttx_max_time = %llu\n", + (long long unsigned int)axon_nic->axon_stats.tx_max_time); + len += + sprintf(page + len, "\ttx_min_time = %llu\n", + (long long unsigned int)axon_nic->axon_stats.tx_min_time); + len += + sprintf(page + len, "\tpending tx = %d\n", + AXON_NIC_TX_QUEUE_LEN - atomic_read(&axon_nic->tx_cnt)); + len += + sprintf(page + len, "\tpending rx = %d\n", + axon_nic->axon_stats.rx_cnt); + *eof = 1; + return len; +} + + +static void axon_nic_init(struct net_device *dev) +{ + axon_nic_t *axon_nic; + int i; + + dbg_log("Init of the NIC driver \n"); + ether_setup(dev); + dev->open = axon_nic_open; + dev->stop = axon_nic_stop; + dev->hard_start_xmit = axon_nic_hard_start_xmit; + dev->tx_timeout = axon_nic_tx_timeout; + dev->get_stats = axon_nic_get_stats; + +#ifdef CONFIG_AXON_NAPI + dev->poll = axon_nic_poll; + dev->weight = 256; + dbg_inf("We are in NAPI mode\n"); +#endif + + dev->features |= (NETIF_F_HIGHDMA); + dev->tx_queue_len = AXON_NIC_TX_QUEUE_LEN; + dev->mtu = 60000; + + axon_nic = netdev_priv(dev); + memset(axon_nic, 0, sizeof(axon_nic_t)); + + + axon_nic->id = 0; + + axon_nic->state = AXON_NIC_DOWN; + axon_nic->dev = dev; + + + memcpy(dev->dev_addr, AXON_NIC_MAC_ADDR, ETH_ALEN); + dev->dev_addr[ETH_ALEN - 1] = axon_nic->id; + spin_lock_init(&axon_nic->tx_lock); + atomic_set(&axon_nic->tx_cnt, AXON_NIC_TX_QUEUE_LEN); + + axon_nic->axon_stats.rx_cnt = 0; + axon_nic->axon_stats.tx_max_time = 0; + axon_nic->axon_stats.tx_min_time = 0xffffffffffffffffUL; + + + for (i = 0; i < AXON_NIC_TX_QUEUE_LEN; i++) + axon_nic->skb_tx_queue[i].id = i; + + + for (i = 0; i < (AXON_NIC_TX_QUEUE_LEN - 1); i++) + axon_nic->skb_tx_queue[i].next = &axon_nic->skb_tx_queue[i + 1]; + + axon_nic->free_slot = &axon_nic->skb_tx_queue[0]; + + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) + INIT_DELAYED_WORK(&axon_nic->wipe_out_task, axon_skb_wipe_out_wq); +#else + INIT_WORK(&axon_nic->wipe_out_task, axon_skb_wipe_out_wq, axon_nic); +#endif +} + +static int axon_nic_probe(axon_t * p_axon) +{ + int ret = 0; + struct net_device *ptr = + alloc_netdev(sizeof(axon_nic_t), "axon_nic%d", axon_nic_init); + axon_nic_t *axon_nic; + + dbg_inf("creating NIC device for Axon %d \n", p_axon->id); + + if (ptr == NULL) { + dbg_err + ("Unable to allocate netdev structure for board %d \n", + p_axon->id); + return -ENOMEM; + } + + axon_nic = netdev_priv(ptr); + + axon_nic->axon = p_axon; + axon_nic->sms = p_axon->sms_get(p_axon); + axon_nic->dma = p_axon->dmax_get(p_axon); + axon_nic->peer_mbox = axon_peer_mbox_get(p_axon); + axon_nic->xltr = p_axon->addr_xltr_get(p_axon); + + create_proc_read_entry("nic", 0, + axon_get_device_dir(p_axon), + axon_nic_read_proc, axon_nic); + + dbg_log("Registering NIC driver for board %d \n", p_axon->id); + + ret = register_netdev(ptr); + + if (ret != 0) { + dbg_err + ("Unable to register netdev structure for board %d \n", + p_axon->id); + return ret; + } + + list_add_tail(&axon_nic->list, &axon_nic_list); + + return 0; +} + +static int axon_nic_remove(axon_t * p_axon) +{ + axon_nic_t *axon_nic; + struct list_head *p_cursor; + struct list_head *p_next; + + list_for_each_safe(p_cursor, p_next, &axon_nic_list) { + axon_nic = list_entry(p_cursor, axon_nic_t, list); + + if (axon_nic->axon == p_axon) { + + unregister_netdev(axon_nic->dev); + + remove_proc_entry("nic", + axon_get_device_dir(axon_nic->axon)); + + list_del(p_cursor); + + free_netdev(axon_nic->dev); + + break; + } + } + + return 0; +} + +static axon_driver_t axon_nic_driver = { + .name = "Ethernet0PCI", + .probe = axon_nic_probe, + .remove = axon_nic_remove, +}; + + +static __exit void axon_nic_module_cleanup(void) +{ + axon_driver_unregister(&axon_nic_driver); +} + + +static __init int axon_nic_module_init(void) +{ + INIT_LIST_HEAD(&axon_nic_list); + + axon_driver_register(&axon_nic_driver); + + return 0; +} + +module_init(axon_nic_module_init); +module_exit(axon_nic_module_cleanup); --