This driver allows 2 applications (one running on the Cell and the other one running on the host) to exchange user allocated memory (read malloc or hugetlbfs) using simple read() or write() system calls. Here are some typical perf number (GB/s) I got on an HP93000 host. These results may vary depending on the performance of the PCI-E root complex chip on the host. transfer CAB write CAB read host write host read _size host host XDR XDR 1K 0,03 0,03 0,06 0,06 2K 0,06 0,06 0,11 0,11 4K 0,11 0,11 0,21 0,22 8K 0,21 0,21 0,39 0,4 16K 0,36 0,36 0,62 0,65 32K 0,55 0,55 0,89 0,94 64K 0,76 0,75 1,13 1,22 128K 0,94 0,91 1,3 1,44 256K 1,06 1 1,41 1,56 512K 1,17 1,1 1,53 1,65 1024K 1,57 1,46 1,59 1,41 2048K 1,91 1,75 1,62 1,38 4096K 2,09 1,89 1,71 1,57 8192K 2,18 1,96 1,73 1,63 16384K 2,25 2,02 1,74 1,61 Signed-Off-by: Jean-Christophe DUBOIS -- Index: linux-2.6.21/drivers/axon/usr/buffer/axon_usr_buffer.c =================================================================== --- /dev/null +++ linux-2.6.21/drivers/axon/usr/buffer/axon_usr_buffer.c @@ -0,0 +1,2328 @@ +/****************************************************************** + * Copyright (C) 2006 Mercury Computer Systems, Inc. + * 199 Riverneck Road + * Chelmsford, MA 01824-2820 + * (978) 256-1300 + * webinfo@mc.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * http://www.gnu.org/copyleft/gpl.html + ******************************************************************/ + + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("user space buffer driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jean-Christophe Dubois (jdubois@mc.com)"); + +#include + +static struct list_head axon_buffer_list; + + +static void axon_buffer_get_uniq_key(axon_usr_buffer_t * p_axon_usr, + axon_buffer_key_t * p_axon_map_key) +{ + axon_local_buffer_map_list_t *p_cur_map_list; + struct list_head *pos; + + dbg_log("begin\n"); + + *p_axon_map_key = (axon_buffer_key_t) + __cpu_to_be32(atomic_inc_return(&p_axon_usr->key)); + + + read_lock_bh(&(p_axon_usr->local_list_lock)); + + + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + + + if (p_cur_map_list->key == *p_axon_map_key) { + dbg_log("key 0x%08x is already in use\n", + __be32_to_cpu(*p_axon_map_key)); + + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + axon_buffer_get_uniq_key(p_axon_usr, p_axon_map_key); + + read_lock_bh(&(p_axon_usr->local_list_lock)); + + break; + } + + p_cur_map_list = NULL; + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); +} + + +static axon_buffer_key_t axon_buffer_extract_key(struct axon_sms_msg_t *p_msg) +{ + dbg_log("begin\n"); + + return *(axon_buffer_key_t *) (p_msg->payload + + AXON_BUFFER_SMS_ID_OFFSET); +} + + + + +static int axon_buffer_send_state_up(axon_usr_buffer_t * p_axon_usr, u8 flags) +{ + struct axon_sms_msg_t msg; + int ret; + + dbg_log("begin\n"); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_UP; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + msg.payload[AXON_BUFFER_SMS_UP_FLAG_OFFSET] = flags; + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send UP message\n"); + } + + return ret; +} + + +static int axon_buffer_send_state_down(axon_usr_buffer_t * p_axon_usr) +{ + struct axon_sms_msg_t msg; + int ret; + + dbg_log("begin\n"); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_DOWN; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send DOWN message\n"); + } + + return ret; +} + +static int axon_buffer_send_message_available(axon_usr_buffer_t * + p_axon_usr, + axon_local_buffer_map_list_t + * p_cur_map_list) +{ + int ret = 0; + struct axon_sms_msg_t msg; + __u64 addr = 0; + __u32 desc_size; + + dbg_log("begin\n"); + + desc_size = + __cpu_to_be32(sizeof(axon_buffer_desc_header_t) + + (__cpu_to_be32 + (p_cur_map_list->p_buffer_desc->nr_segments) * + sizeof(axon_buffer_segment_t))); + + + addr = + axon_addr_xltr_to_plb(p_axon_usr->xltr, + p_cur_map_list->phy_buffer_desc); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_AVAILABLE; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + memcpy(msg.payload + AXON_BUFFER_SMS_ID_OFFSET, + &p_cur_map_list->key, sizeof(p_cur_map_list->key)); + + memcpy(msg.payload + AXON_BUFFER_SMS_ADDR_OFFSET, + ((u8 *) (&addr)) + 3, sizeof(addr) - 3); + + memcpy(msg.payload + AXON_BUFFER_SMS_SIZE_OFFSET, &desc_size, + sizeof(desc_size)); + + dbg_log("sending bufferID 0x%08x at 0x%016" + AXON_PLB_ADDR_FMT_T ", desc_size = %d (0x%08x)\n", + __be32_to_cpu(p_cur_map_list->key), __be64_to_cpu(addr), + __be32_to_cpu(desc_size), desc_size); + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send AVAILABLE message\n"); + } + + return ret; +} + +static int axon_buffer_send_message_unavailable(axon_usr_buffer_t * + p_axon_usr, + axon_buffer_key_t key) +{ + int ret = 0; + struct axon_sms_msg_t msg; + + dbg_log("begin\n"); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = + AXON_SYSTEM_BUFFER_UNAVAILABLE; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + memcpy(msg.payload + AXON_BUFFER_SMS_ID_OFFSET, &key, sizeof(key)); + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send UNAVAILABLE message\n"); + } + + return ret; +} + + +static int axon_buffer_send_message_not_in_use(axon_usr_buffer_t * + p_axon_usr, + axon_buffer_key_t key) +{ + int ret = 0; + struct axon_sms_msg_t msg; + + dbg_log("begin\n"); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_NOT_IN_USE; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + memcpy(msg.payload + AXON_BUFFER_SMS_ID_OFFSET, &key, sizeof(key)); + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send NOT_IN_USE message\n"); + } + + return ret; +} + + +static int axon_buffer_free_remote_buffer(axon_remote_buffer_map_list_t * + p_cur_map_list, + axon_usr_buffer_t * p_axon_usr) +{ + dbg_log("begin\n"); + + if (atomic_read(&p_cur_map_list->handle_count)) { + dbg_log + ("Buffer 0x%08x is still in use, count = %d\n", + __be32_to_cpu(p_cur_map_list->key), + atomic_read(&p_cur_map_list->handle_count)); + return 0; + } + + + + + + if (p_cur_map_list->to_be_deleted == 0) { + dbg_err + ("Ref count is 0 for Buffer 0x%08x is not marked for deletion\n", + __be32_to_cpu(p_cur_map_list->key)); + } + + + if (atomic_read(&p_cur_map_list->use_count) != 0) { + dbg_err + ("Ref count is 0, buffer 0x%08x is marked for deletion, but usecount is not 0\n", + __be32_to_cpu(p_cur_map_list->key)); + } + + if (p_cur_map_list->p_buffer_desc) { + int size = + sizeof(axon_buffer_desc_header_t) + + (__be32_to_cpu + (p_cur_map_list->p_buffer_desc->nr_segments) * + sizeof(axon_buffer_segment_t)); + dma_free_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr->p_axon), size, + p_cur_map_list->p_buffer_desc, + p_cur_map_list->phy_buffer_desc); + p_cur_map_list->p_buffer_desc = NULL; + } + + if (!p_cur_map_list->is_locally_created) { + + list_del(&p_cur_map_list->list); + + + axon_buffer_send_message_not_in_use(p_axon_usr, + p_cur_map_list->key); + } + + dbg_log("Buffer 0x%08x is released\n", + __be32_to_cpu(p_cur_map_list->key)); + + kfree(p_cur_map_list); + + + return 1; + +} + + +static int +axon_buffer_dissociate_remote_buffer_from_file(axon_file_usr_buffer_t * + p_usr_file) +{ + int ret = 0; + dbg_log("begin\n"); + + if (p_usr_file->p_remote_buffer) { + + + if (atomic_read(&p_usr_file->p_remote_buffer->use_count) > 0) { + dbg_err("buffer is still used\n"); + ret = -EINVAL; + } else { + + write_lock_bh(& + (p_usr_file->p_axon_usr-> + remote_list_lock)); + + + + if ((atomic_dec_return + (&p_usr_file->p_remote_buffer->handle_count) + == 0) + && (p_usr_file->p_remote_buffer-> + to_be_deleted == 1)) { + + + + if (axon_buffer_free_remote_buffer + (p_usr_file->p_remote_buffer, + p_usr_file->p_axon_usr)) { + + + dbg_log + ("remote buffer has been released\n"); + } else { + + + dbg_log + ("remote buffer is still used\n"); + } + } + + + p_usr_file->p_remote_buffer = NULL; + + write_unlock_bh(& + (p_usr_file->p_axon_usr-> + remote_list_lock)); + } + } else { + dbg_inf("remote buff pointer is NULL !!!\n"); + } + + return ret; +} + + +static int axon_buffer_use_remote_buffer(axon_file_usr_buffer_t * p_usr_file) +{ + dbg_log("begin\n"); + + if ((p_usr_file->p_remote_buffer) + && (p_usr_file->p_remote_buffer->to_be_deleted == 0)) { + atomic_inc(&p_usr_file->p_remote_buffer->use_count); + return 0; + } else { + return -1; + } +} + + +static int axon_buffer_release_remote_buffer(axon_file_usr_buffer_t * + p_usr_file) +{ + dbg_log("begin\n"); + + if ((p_usr_file->p_remote_buffer->to_be_deleted == 1) && + (atomic_dec_return(&p_usr_file->p_remote_buffer->use_count) == 0)) { + + return + axon_buffer_dissociate_remote_buffer_from_file(p_usr_file); + } else { + atomic_dec(&p_usr_file->p_remote_buffer->use_count); + } + + return 0; +} + + +static int axon_buffer_handle_up_message(axon_usr_buffer_t * p_axon_usr, + struct axon_sms_msg_t *p_msg) +{ + int ret = 0; + struct list_head *pos; + + dbg_log("begin\n"); + + + + + if ((p_msg->payload[AXON_BUFFER_SMS_UP_FLAG_OFFSET] & + AXON_BUFFER_FLAG_RECEIVED_UP) != AXON_BUFFER_FLAG_RECEIVED_UP) { + ret = axon_buffer_send_state_up(p_axon_usr, + AXON_BUFFER_FLAG_RECEIVED_UP); + } + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) + if ((ret == 0) && (atomic_xchg(&p_axon_usr->remote_is_up, 1) == 0)) +#else + if ((ret == 0) + && (atomic_inc_return(&p_axon_usr->remote_is_up) == 1)) +#endif + { + read_lock_bh(&(p_axon_usr->local_list_lock)); + + + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + axon_local_buffer_map_list_t *p_cur_map_list; + + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + + if (p_cur_map_list->is_local_only == 0) + axon_buffer_send_message_available + (p_axon_usr, p_cur_map_list); + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + } + + return ret; +} + + +static int axon_buffer_handle_down_message(axon_usr_buffer_t * p_axon_usr) +{ + int ret = 0; + struct list_head *pos, *q; + + dbg_log("begin\n"); + + atomic_set(&p_axon_usr->remote_is_up, 0); + + write_lock_bh(&(p_axon_usr->remote_list_lock)); + + list_for_each_safe(pos, q, &(p_axon_usr->remote_map_list_head.list)) { + axon_remote_buffer_map_list_t *p_cur_map_list; + + p_cur_map_list = + list_entry(pos, axon_remote_buffer_map_list_t, list); + + + p_cur_map_list->to_be_deleted = 1; + + + if (atomic_read(&p_cur_map_list->handle_count) == 0) { + + if (axon_buffer_free_remote_buffer + (p_cur_map_list, p_axon_usr)) { + dbg_log + ("One remote buffer has been released \n"); + } else { + dbg_log + ("remote buffer still has handles opened \n"); + } + + } + } + + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + + return ret; +} + +static void axon_buffer_desc_dma_completion_handler(struct axon_dmax_t + *p_axon_dmax, struct axon_dma_req_t + *p_dma_req, void *context) +{ + axon_remote_buffer_map_list_t *p_cur_map_list = + (axon_remote_buffer_map_list_t *) context; + axon_usr_buffer_t *p_axon_usr = p_cur_map_list->p_axon_usr; + + dbg_log("begin\n"); + + + if (p_cur_map_list->key != p_cur_map_list->p_buffer_desc->key) { + dbg_err + ("The descriptor does not contain the correct ID 0x%08x != 0x%08x\n", + __be32_to_cpu(p_cur_map_list->p_buffer_desc->key), + __be32_to_cpu(p_cur_map_list->key)); + + p_cur_map_list->to_be_deleted = 1; + } + + wake_up(&p_axon_usr->waitq); + + write_lock_bh(&(p_axon_usr->remote_list_lock)); + + + if ((atomic_dec_return(&p_cur_map_list->handle_count) == + 0) && (p_cur_map_list->to_be_deleted == 1)) { + + if (axon_buffer_free_remote_buffer(p_cur_map_list, p_axon_usr)) { + dbg_log("remote buffer was released\n"); + } else { + dbg_log("remote buffer was not released yet\n"); + } + } + + write_unlock_bh(&(p_axon_usr->remote_list_lock)); +} + + +static int axon_buffer_handle_available_message(axon_usr_buffer_t * p_axon_usr, struct axon_sms_msg_t + *p_msg) +{ + int ret = 0; + axon_remote_buffer_map_list_t *p_cur_map_list = NULL; + struct list_head *pos; + axon_buffer_key_t key; + u32 desc_size; + struct axon_dma_req_t *p_dma_req; + axon_dma_req_xfer_t dma_req_xfer = AXON_DMA_REQ_XFER_INIT; + axon_dma_req_mbox_t dma_req_mbox = AXON_DMA_REQ_MBOX_INIT; + axon_sms_msg_t msg_ans; + u8 msg_encoded[AXON_SMS_SIZE]; + + dbg_log("begin\n"); + + key = axon_buffer_extract_key(p_msg); + + read_lock_bh(&(p_axon_usr->remote_list_lock)); + + + list_for_each(pos, &(p_axon_usr->remote_map_list_head.list)) { + + p_cur_map_list = + list_entry(pos, axon_remote_buffer_map_list_t, list); + + + if (p_cur_map_list->key == key) { + break; + } + + p_cur_map_list = NULL; + } + + read_unlock_bh(&(p_axon_usr->remote_list_lock)); + + + if (p_cur_map_list) { + + dbg_err + ("Buffer 0x%08x is already in the list of remote buffers\n", + __be32_to_cpu(__be32_to_cpu(key))); + return -EINVAL; + } + + + p_cur_map_list = + kzalloc(sizeof(axon_remote_buffer_map_list_t), GFP_ATOMIC); + + if (p_cur_map_list == NULL) { + dbg_err("Failed to allocate list element for remote buffer\n"); + return -ENOMEM; + } + + p_cur_map_list->key = key; + + + *((u8 *) (&p_cur_map_list->plb_addr)) = + p_msg->payload[AXON_BUFFER_SMS_ADDR_OFFSET]; + memcpy(((u8 *) (&p_cur_map_list->plb_addr)) + 3, + p_msg->payload + AXON_BUFFER_SMS_ADDR_OFFSET, + sizeof(p_cur_map_list->plb_addr) - 3); + + memcpy(&desc_size, p_msg->payload + AXON_BUFFER_SMS_SIZE_OFFSET, + sizeof(desc_size)); + + dbg_log("Buffer 0x%08x at 0x%016" + AXON_PLB_ADDR_FMT_T ", desc_size = %d (0x%08x)\n", + __be32_to_cpu(p_cur_map_list->key), + __be64_to_cpu(p_cur_map_list->plb_addr), + __be32_to_cpu(desc_size), desc_size); + + desc_size = __be32_to_cpu(desc_size); + + p_cur_map_list->p_buffer_desc = + dma_alloc_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr->p_axon), + desc_size, + &p_cur_map_list->phy_buffer_desc, GFP_ATOMIC); + + if (p_cur_map_list->p_buffer_desc == NULL) { + dbg_err("Failed to allocate buffer desc\n"); + ret = -ENOMEM; + goto free_list_element; + } + + memset(p_cur_map_list->p_buffer_desc, 0, desc_size); + + p_dma_req = axon_dma_request_create(p_axon_usr->p_dma, 128); + + if (p_dma_req == NULL) { + dbg_err("failed to create DMA req\n"); + ret = -ENOMEM; + goto free_buffer_desc; + } + + + msg_ans.channel = AXON_SMS_CHANNEL_USR_BUFFER; + msg_ans.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_IN_USE; + msg_ans.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + memcpy(msg_ans.payload + AXON_BUFFER_SMS_ID_OFFSET, &key, sizeof(key)); + + axon_sms_encode(p_axon_usr->sms, &msg_ans, + msg_encoded, 1, AXON_SMS_SIZE); + + dma_req_mbox.dst_id = AXON_DMA_TARGET_PEER; + dma_req_mbox.msg = msg_encoded; + dma_req_mbox.msg_size = AXON_SMS_SIZE; + + ret = axon_dma_request_push_mbox(p_dma_req, &dma_req_mbox); + + if (ret < 0) { + dbg_err("Failed to add the MBX command packet\n"); + goto free_dma_req; + } + + + dma_req_xfer.size = desc_size; + dma_req_xfer.intr = DMA_NO_INTR; + dma_req_xfer.src = p_cur_map_list->plb_addr; + dma_req_xfer.dst = axon_addr_xltr_to_plb(p_axon_usr->xltr, + p_cur_map_list-> + phy_buffer_desc); + + + ret = axon_dma_request_push_xfer(p_dma_req, &dma_req_xfer); + + if (ret < 0) { + dbg_err("Failed to add the data command packet\n"); + goto free_dma_req; + } + + + + atomic_set(&p_cur_map_list->handle_count, 1); + + atomic_set(&p_cur_map_list->use_count, 0); + + p_cur_map_list->p_axon_usr = p_axon_usr; + + write_lock_bh(&(p_axon_usr->remote_list_lock)); + + list_add_tail(&(p_cur_map_list->list), + &(p_axon_usr->remote_map_list_head.list)); + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + + + ret = + axon_dma_request_queue(p_dma_req, + axon_buffer_desc_dma_completion_handler, + p_cur_map_list); + + if (ret < 0) { + dbg_err("Failed to queue the DMA req\n"); + goto remove_from_list; + } + + return ret; + +remove_from_list: + write_lock_bh(&(p_axon_usr->remote_list_lock)); + list_del(&(p_cur_map_list->list)); + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + +free_dma_req: + axon_dma_request_destroy(p_dma_req); + +free_buffer_desc: + dma_free_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr-> + p_axon), + desc_size, + p_cur_map_list-> + p_buffer_desc, p_cur_map_list->phy_buffer_desc); +free_list_element: + kfree(p_cur_map_list); + + return ret; +} + + +static int axon_buffer_handle_unavailable_message(axon_usr_buffer_t * + p_axon_usr, + struct axon_sms_msg_t + *p_msg) +{ + int ret = 0; + axon_buffer_key_t key; + struct list_head *pos; + axon_remote_buffer_map_list_t *p_cur_map_list = NULL; + dbg_log("begin\n"); + + key = axon_buffer_extract_key(p_msg); + + write_lock_bh(&(p_axon_usr->remote_list_lock)); + + list_for_each(pos, &(p_axon_usr->remote_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, axon_remote_buffer_map_list_t, list); + + if (p_cur_map_list->key == key) { + + p_cur_map_list->to_be_deleted = 1; + + break; + } + p_cur_map_list = NULL; + } + + if (p_cur_map_list) { + + if (axon_buffer_free_remote_buffer(p_cur_map_list, p_axon_usr)) { + dbg_log + ("Remote buffer 0x%08x was sucessfully released\n", + __be32_to_cpu(key)); + ret = 0; + } else { + dbg_log + ("remote buffer 0x%08x can't be released yet\n", + __be32_to_cpu(key)); + ret = -EINVAL; + } + } else { + + dbg_inf("Buffer 0x%08x is unknown\n", __be32_to_cpu(key)); + ret = -EINVAL; + } + + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + + return ret; +} + + +static int axon_buffer_handle_in_use_message(axon_usr_buffer_t * + p_axon_usr, + struct axon_sms_msg_t *p_msg) +{ + int ret = 0; + axon_buffer_key_t key; + struct list_head *pos; + axon_local_buffer_map_list_t *p_cur_map_list = NULL; + dbg_log("begin\n"); + + key = axon_buffer_extract_key(p_msg); + read_lock_bh(&(p_axon_usr->local_list_lock)); + + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + + if (p_cur_map_list->key == key) { + + p_cur_map_list->is_released = 0; + atomic_inc(&p_cur_map_list->ref_count); + + break; + } + + p_cur_map_list = NULL; + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + if (p_cur_map_list) { + dbg_log("Found buffer 0x%08x\n", __be32_to_cpu(key)); + } else { + dbg_inf("Could not find buffer 0x%08x\n", __be32_to_cpu(key)); + } + + return ret; +} + + +static int axon_buffer_handle_not_in_use_message(axon_usr_buffer_t * p_axon_usr, struct axon_sms_msg_t + *p_msg) +{ + int ret = 0; + axon_buffer_key_t key; + struct list_head *pos; + axon_local_buffer_map_list_t *p_cur_map_list = NULL; + dbg_log("begin\n"); + + key = axon_buffer_extract_key(p_msg); + read_lock_bh(&(p_axon_usr->local_list_lock)); + + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + + if (p_cur_map_list->key == key) { + + break; + } + + p_cur_map_list = NULL; + } + + if (p_cur_map_list == NULL) { + + dbg_inf("Could not find buffer 0x%08x\n", __be32_to_cpu(key)); + + } else { + int ref_count = atomic_dec_return(&p_cur_map_list->ref_count); + if (ref_count == 0) { + dbg_log + ("Found buffer 0x%08x, waking up task \n", + __be32_to_cpu(key)); + + p_cur_map_list->is_released = 1; + + wake_up(&p_cur_map_list->p_usr_file->waitq); + } else { + dbg_log + ("Found buffer 0x%08x, but ref_count is %d\n", + __be32_to_cpu(key), ref_count); + } + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + return ret; +} + + +static int axon_buffer_sms_handler(void *context, struct axon_sms_msg_t *p_msg) +{ + int ret = 0; + axon_usr_buffer_t *p_axon_usr = (axon_usr_buffer_t *) context; + dbg_log("begin\n"); + switch (p_msg->payload[AXON_BUFFER_SMS_REQ_OFFSET]) { + + case AXON_SYSTEM_BUFFER_UP: + ret = axon_buffer_handle_up_message(p_axon_usr, p_msg); + break; + case AXON_SYSTEM_BUFFER_DOWN: + ret = axon_buffer_handle_down_message(p_axon_usr); + break; + case AXON_SYSTEM_BUFFER_AVAILABLE: + ret = axon_buffer_handle_available_message(p_axon_usr, p_msg); + break; + case AXON_SYSTEM_BUFFER_UNAVAILABLE: + ret = axon_buffer_handle_unavailable_message(p_axon_usr, p_msg); + break; + case AXON_SYSTEM_BUFFER_IN_USE: + ret = axon_buffer_handle_in_use_message(p_axon_usr, p_msg); + break; + case AXON_SYSTEM_BUFFER_NOT_IN_USE: + ret = axon_buffer_handle_not_in_use_message(p_axon_usr, p_msg); + break; + default: + dbg_err + ("Unexpected message received by Buffer service %d \n", + p_msg->payload[AXON_BUFFER_SMS_REQ_OFFSET]); + ret = -EINVAL; + break; + } + + return ret; +} + +static int axon_buffer_map_user_memory(axon_t * p_axon, + unsigned long uaddr, + unsigned long len, int writable, + axon_buffer_map_info_t * + p_user_map_info, int dma_direction) +{ + int ret = 0; + unsigned long start = uaddr >> PAGE_SHIFT; + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + const int nr_pages = end - start; + struct page **pp_pages; + struct scatterlist *p_sg; + int i; + + dbg_log("begin\n"); + dbg_log("trying to map vaddr=0x%016lx len=%lu \n", uaddr, len); + + pp_pages = + (struct page **)__get_free_pages(GFP_KERNEL, + get_order(nr_pages * + sizeof(struct page *))); + + if (pp_pages == NULL) { + dbg_err("failed to allocate the page table \n"); + return -ENOMEM; + } + + p_sg = + (struct scatterlist *)__get_free_pages(GFP_KERNEL, + get_order(nr_pages * + sizeof + (struct + scatterlist))); + + if (p_sg == NULL) { + dbg_err("failed to allocate the page table \n"); + ret = -ENOMEM; + goto free_pp_pages; + } + + + down_read(¤t->mm->mmap_sem); + + p_user_map_info->nr_pages = + get_user_pages(current, current->mm, uaddr, + nr_pages, writable, 0, pp_pages, NULL); + up_read(¤t->mm->mmap_sem); + + if (p_user_map_info->nr_pages != nr_pages) { + dbg_err + ("get_user_pages has failed with ret=%d, %d was expected \n", + p_user_map_info->nr_pages, nr_pages); + ret = -ENOMEM; + goto release_user_pages; + } + + dbg_log("The vaddr=0x%016lx is mapped by %d pages \n", uaddr, nr_pages); + + p_user_map_info->len = len; + p_user_map_info->offset = (uaddr & ~PAGE_MASK); + p_user_map_info->virt_user_addr = uaddr; + p_user_map_info->pp_pages = pp_pages; + + for (i = 0; i < nr_pages; i++) { + + p_sg[i].page = pp_pages[i]; + + if (i == 0) { + + + p_sg[i].length = PAGE_SIZE - p_user_map_info->offset; + p_sg[i].offset = p_user_map_info->offset; + + + if (p_sg[i].length > p_user_map_info->len) + p_sg[i].length = p_user_map_info->len; + + } else if (i == (nr_pages - 1)) { + + + p_sg[i].length = + p_user_map_info->len - + ((nr_pages - 1) * PAGE_SIZE) + + p_user_map_info->offset; + p_sg[i].offset = 0; + } else { + + p_sg[i].length = PAGE_SIZE; + p_sg[i].offset = 0; + } + } + + p_user_map_info->nr_sg = + dma_map_sg(p_axon->get_device(p_axon), p_sg, nr_pages, + dma_direction); + + p_user_map_info->p_sg = p_sg; + p_user_map_info->dma_direction = dma_direction; + + return ret; + +release_user_pages: + if (p_user_map_info->nr_pages > 0) { + int i_page; + for (i_page = 0; i_page < p_user_map_info->nr_pages; i_page++) { + page_cache_release(pp_pages[i_page]); + } + } + + free_pages((unsigned long)p_sg, + get_order(nr_pages * sizeof(struct scatterlist))); + +free_pp_pages: + free_pages((unsigned long)pp_pages, + get_order(nr_pages * sizeof(struct page *))); + + return ret; +} + +static int axon_buffer_unmap_user_memory(axon_t * p_axon, + axon_buffer_map_info_t * + p_user_map_info) +{ + int i_page; + + dbg_log("begin\n"); + + dma_unmap_sg(p_axon->get_device(p_axon), p_user_map_info->p_sg, + p_user_map_info->nr_pages, p_user_map_info->dma_direction); + + for (i_page = 0; i_page < p_user_map_info->nr_pages; i_page++) { + + + if (!PageReserved(p_user_map_info->pp_pages[i_page])) + SetPageDirty(p_user_map_info->pp_pages[i_page]); + + page_cache_release(p_user_map_info->pp_pages[i_page]); + } + + free_pages((unsigned + long)(p_user_map_info-> + p_sg), get_order(p_user_map_info-> + nr_pages * + sizeof(struct scatterlist))); + + free_pages((unsigned + long)(p_user_map_info-> + pp_pages), + get_order(p_user_map_info-> + nr_pages * sizeof(struct page *))); + + p_user_map_info->pp_pages = NULL; + p_user_map_info->p_sg = NULL; + p_user_map_info->nr_pages = 0; + p_user_map_info->nr_sg = 0; + p_user_map_info->offset = 0; + p_user_map_info->virt_user_addr = 0; + p_user_map_info->len = 0; + + return 0; +} + +static int axon_buffer_get_user_pages(axon_file_usr_buffer_t * p_usr_file, + axon_buffer_ioctl_map_t * + p_ioctl_map, __u8 is_local, + axon_local_buffer_map_list_t ** ptr, + int direction) +{ + int ret = 0; + axon_local_buffer_map_list_t *p_new_map_list; + axon_buffer_segment_t *seg_ptr; + struct scatterlist *sg; + int i; + int size; + + dbg_log("begin\n"); + + if (!access_ok + (VERIFY_WRITE, (void *)p_ioctl_map->addr.vaddr, p_ioctl_map->len)) { + dbg_err("access_ok failed for registered memory\n"); + return -EFAULT; + } + + p_new_map_list = + kzalloc(sizeof(axon_local_buffer_map_list_t), GFP_KERNEL); + + if (p_new_map_list == 0) { + dbg_err("kmalloc failed\n"); + return -ENOMEM; + } + + ret = + axon_buffer_map_user_memory(p_usr_file->p_axon_usr->p_axon, + p_ioctl_map->addr.vaddr, + p_ioctl_map->len, 1, + &(p_new_map_list->user_map_info), + direction); + + if (ret) { + dbg_err("Unable to map user memory\n"); + kfree(p_new_map_list); + return ret; + } + + size = + sizeof(axon_buffer_desc_header_t) + + p_new_map_list->user_map_info.nr_sg * sizeof(axon_buffer_segment_t); + + p_new_map_list->p_buffer_desc = + dma_alloc_coherent(p_usr_file->p_axon_usr->p_axon-> + get_device(p_usr_file->p_axon_usr->p_axon), + size, &p_new_map_list->phy_buffer_desc, + GFP_KERNEL); + + if (p_new_map_list->p_buffer_desc == NULL) { + dbg_err("Unable to get memory for shared buffer desc\n"); + axon_buffer_unmap_user_memory(p_usr_file->p_axon_usr-> + p_axon, + &(p_new_map_list->user_map_info)); + kfree(p_new_map_list); + p_new_map_list = NULL; + return -ENOMEM; + } + + memset(p_new_map_list->p_buffer_desc, 0, size); + + axon_buffer_get_uniq_key(p_usr_file->p_axon_usr, &p_new_map_list->key); + + p_ioctl_map->key = p_new_map_list->key; + dbg_log + ("Adding key=0x%08x in 0x%p into Ox%p \n", + __be32_to_cpu(p_ioctl_map->key), + &(p_new_map_list->list), + &(p_usr_file->p_axon_usr->local_map_list_head.list)); + dbg_log + ("Allocating key=0x%08x for virtual 0x%016" + AXON_VADDR_FMT_T "\n", + __be32_to_cpu(p_ioctl_map->key), p_ioctl_map->addr.vaddr); + p_new_map_list->p_usr_file = p_usr_file; + + p_new_map_list->p_buffer_desc->key = p_new_map_list->key; + + p_new_map_list->p_buffer_desc-> + nr_segments = __cpu_to_be32(p_new_map_list->user_map_info.nr_sg); + p_new_map_list->p_buffer_desc->len = + __cpu_to_be64(p_new_map_list->user_map_info.len); + strncpy(p_new_map_list->p_buffer_desc->name, + p_ioctl_map->name, AXON_BUFFER_NAME_LEN); + dbg_log + ("New desc: key = 0x%08x, nr_seg = %d, len = 0x%" + AXON_U64_FMT_T "\n", + __be32_to_cpu(p_new_map_list-> + p_buffer_desc->key), + __be32_to_cpu(p_new_map_list-> + p_buffer_desc->nr_segments), + __be64_to_cpu(p_new_map_list->p_buffer_desc->len)); + + seg_ptr = (axon_buffer_segment_t *) (p_new_map_list->p_buffer_desc + 1); + sg = p_new_map_list->user_map_info.p_sg; + + for (i = 0; i < p_new_map_list->user_map_info.nr_sg; + i++, seg_ptr++, sg++) { + + + seg_ptr->phys_addr = + axon_addr_xltr_to_plb(p_usr_file->p_axon_usr->xltr, + sg_dma_address(sg)); + seg_ptr->len = __cpu_to_be64(sg_dma_len(sg)); + } + + atomic_set(&p_new_map_list->ref_count, 0); + atomic_set(&p_new_map_list->is_being_deleted, 0); + p_new_map_list->is_local_only = is_local; + + + write_lock_bh(&(p_usr_file->p_axon_usr->local_list_lock)); + list_add_tail(&(p_new_map_list->list), + &(p_usr_file->p_axon_usr->local_map_list_head.list)); + write_unlock_bh(&(p_usr_file->p_axon_usr->local_list_lock)); + + + if ((p_new_map_list->is_local_only == 0) + && (atomic_read(&p_usr_file->p_axon_usr->remote_is_up) + != 0)) + axon_buffer_send_message_available + (p_usr_file->p_axon_usr, p_new_map_list); + if (ptr) + *ptr = p_new_map_list; + return ret; +} + +static int axon_buffer_release_user_pages(axon_file_usr_buffer_t * + p_usr_file, + axon_local_buffer_map_list_t * + p_cur_map_list) +{ + int ret = -EFAULT; axon_usr_buffer_t *p_axon_usr = p_usr_file->p_axon_usr; + dbg_log("begin\n"); + if (p_cur_map_list->is_local_only == 1) { + + p_cur_map_list->is_released = 1; + } else if (atomic_read(&p_axon_usr->remote_is_up) + == 0) { + + p_cur_map_list->is_released = 1; + } else { + + ret = + axon_buffer_send_message_unavailable + (p_axon_usr, p_cur_map_list->key); + if (ret < 0) { + + dbg_inf + ("We can't communicate with the remote so we free up buffer 0x%08x\n", + __be32_to_cpu(p_cur_map_list->key)); + p_cur_map_list->is_released = 1; + } else { + + ret = wait_event_timeout(p_usr_file->waitq, + p_cur_map_list-> + is_released == 1, HZ * 5); + if (p_cur_map_list->is_released == 0) { + + dbg_inf + ("We got the timeout so we are going to free up buffer 0x%08x anyway\n", + __be32_to_cpu(p_cur_map_list->key)); + p_cur_map_list->is_released = 1; + } + } + } + + + if (p_cur_map_list->is_released == 0) { + + dbg_inf + ("The buffer 0x%08x is still in use by the remote\n", + __be32_to_cpu(p_cur_map_list->key)); + } else { + dbg_log("buffer 0x%08x is not in use\n", + __be32_to_cpu(p_cur_map_list->key)); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) + if ((atomic_xchg(&p_cur_map_list->is_being_deleted, 1) == 0)) +#else + if ((atomic_inc_return(&p_cur_map_list->is_being_deleted) + == 1)) +#endif + { + write_lock_bh(&(p_axon_usr->local_list_lock)); + + list_del(&(p_cur_map_list->list)); + write_unlock_bh(&(p_axon_usr->local_list_lock)); + + ret = + axon_buffer_unmap_user_memory(p_axon_usr-> + p_axon, + & + (p_cur_map_list-> + user_map_info)); + + if (p_cur_map_list->p_buffer_desc) { + int size = + sizeof + (axon_buffer_desc_header_t) + + (__be32_to_cpu + (p_cur_map_list-> + p_buffer_desc->nr_segments) * + sizeof(axon_buffer_segment_t)); + dma_free_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr-> + p_axon), size, + p_cur_map_list-> + p_buffer_desc, + p_cur_map_list-> + phy_buffer_desc); + p_cur_map_list->p_buffer_desc = NULL; + } + + + kfree(p_cur_map_list); + p_cur_map_list = NULL; + } else { + dbg_inf + ("buffer 0x%08x is being deleted by another thread!\n", + __be32_to_cpu(p_cur_map_list->key)); + } + } + + return ret; +} + +static int axon_buffer_release_user_pages_by_key(axon_file_usr_buffer_t * + p_usr_file, + axon_buffer_key_t key) +{ + int ret = 0; + struct list_head *pos; + axon_local_buffer_map_list_t *p_cur_map_list = NULL; + axon_usr_buffer_t *p_axon_usr = p_usr_file->p_axon_usr; + + dbg_log("begin\n"); + + read_lock_bh(&(p_axon_usr->local_list_lock)); + + + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + + if (p_cur_map_list->key == key) { + break; + } + + p_cur_map_list = NULL; + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + if (p_cur_map_list) + ret = + axon_buffer_release_user_pages(p_usr_file, p_cur_map_list); + else { + dbg_err("Can't find buffer 0x%08x\n", __be32_to_cpu(key)); + ret = -EFAULT; + } + + return ret; +} + +static int axon_buffer_associate_remote_file(axon_file_usr_buffer_t * + p_usr_file, + axon_buffer_ioctl_map_t * + p_ioctl_map, unsigned int cmd) +{ + int ret = -EFAULT; + struct list_head *pos; + axon_usr_buffer_t *p_axon_usr = p_usr_file->p_axon_usr; + axon_remote_buffer_map_list_t *p_cur_map_list; + + dbg_log("begin\n"); + if (p_usr_file->p_remote_buffer) { + dbg_err("The file is already associated to a remote file\n"); + return -EFAULT; + } + + if ((cmd != MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME) + && (cmd != MC_AXON_REMOTE_BUFFER_ACCESS_BY_KEY) + && (cmd != MC_AXON_REMOTE_BUFFER_ACCESS_RAW)) { + dbg_err("Unknown remote buffer access cmd: 0x%08X\n", cmd); + return -EFAULT; + } + + if (cmd == MC_AXON_REMOTE_BUFFER_ACCESS_RAW) { + + int size; + axon_buffer_segment_t *ptr; + p_cur_map_list = + kzalloc(sizeof(axon_remote_buffer_map_list_t), GFP_KERNEL); + + if (p_cur_map_list == NULL) { + dbg_err + ("Failed to allocate buffer for remote buffer\n"); + return -ENOMEM; + } + + + p_cur_map_list->key = 0; + + p_cur_map_list->plb_addr = 0xffffffffffffffff; + p_cur_map_list->is_locally_created = 1; + dbg_log("Buffer 0x%08x at 0x%016" + AXON_PLB_ADDR_FMT_T "\n", + __be32_to_cpu(p_cur_map_list->key), + __be64_to_cpu(p_cur_map_list->plb_addr)); + size = + sizeof(axon_buffer_desc_header_t) + + sizeof(axon_buffer_segment_t); + p_cur_map_list->p_buffer_desc = + dma_alloc_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr-> + p_axon), size, + &p_cur_map_list-> + phy_buffer_desc, GFP_KERNEL); + + if (p_cur_map_list->p_buffer_desc == NULL) { + dbg_err + ("Failed to allocate buffer desc for remote buffer\n"); + kfree(p_cur_map_list); + return -ENOMEM; + } + + ptr = + (axon_buffer_segment_t *) (p_cur_map_list-> + p_buffer_desc + 1); + memset(p_cur_map_list->p_buffer_desc, 0, size); + p_cur_map_list->p_buffer_desc->key = p_cur_map_list->key; + p_cur_map_list->p_buffer_desc->nr_segments = __cpu_to_be32(1); + p_cur_map_list->p_buffer_desc-> + len = __cpu_to_be64(p_ioctl_map->len); + ptr->phys_addr = __cpu_to_be64(p_ioctl_map->addr.plb_addr); + ptr->len = __cpu_to_be64(p_ioctl_map->len); + + atomic_set(&p_cur_map_list->handle_count, 1); + + atomic_set(&p_cur_map_list->use_count, 0); + + p_cur_map_list->p_axon_usr = p_axon_usr; + p_ioctl_map->key = p_cur_map_list->key; + strncpy(p_cur_map_list-> + p_buffer_desc->name, + p_ioctl_map->name, AXON_BUFFER_NAME_LEN); + + p_usr_file->p_remote_buffer = p_cur_map_list; + return 0; + } else { + + read_lock_bh(&(p_axon_usr->remote_list_lock)); + + list_for_each(pos, &(p_axon_usr->remote_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, + axon_remote_buffer_map_list_t, list); + + + + + if ((cmd == MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME) + && + (strncmp + (p_ioctl_map->name, + p_cur_map_list->p_buffer_desc->name, + AXON_BUFFER_NAME_LEN) == 0)) + break; + else if (p_cur_map_list->key == p_ioctl_map->key) + break; + + p_cur_map_list = NULL; + } + + + if (p_cur_map_list) { + + atomic_inc(&p_cur_map_list->handle_count); + read_unlock_bh(&(p_axon_usr->remote_list_lock)); + } else { + char tmp[AXON_BUFFER_NAME_LEN + 1]; + + read_unlock_bh(&(p_axon_usr->remote_list_lock)); + + strncpy(tmp, p_ioctl_map->name, AXON_BUFFER_NAME_LEN); + tmp[AXON_BUFFER_NAME_LEN] = 0; + + if (cmd == MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME) { + dbg_err + ("Buffer name not found in list: %s\n", + tmp); + } else if (cmd == MC_AXON_REMOTE_BUFFER_ACCESS_BY_KEY) { + dbg_err + ("Buffer key not found in list: 0x%08X\n", + __be32_to_cpu(p_ioctl_map->key)); + } + return -EINVAL; + } + + + if (!p_cur_map_list->p_buffer_desc->len) { + dbg_log + ("the desc for buffer 0x%08x is not complete yet, we wait\n", + __be32_to_cpu(p_cur_map_list->key)); + + if (wait_event_interruptible + (p_axon_usr->waitq, + p_cur_map_list->p_buffer_desc->len != 0)) { + dbg_err + ("interrupted while waiting for desc of buffer 0x%08x\n", + __be32_to_cpu(p_cur_map_list->key)); + ret = -ERESTARTSYS; + goto out; + } + } + + + if (p_cur_map_list->p_buffer_desc->key != p_cur_map_list->key) { + dbg_err + ("The descriptor does not contain the correct ID 0x%08x != 0x%08x\n", + __be32_to_cpu(p_cur_map_list-> + p_buffer_desc-> + key), + __be32_to_cpu(p_cur_map_list->key)); + ret = -EFAULT; + goto out; + } + + + if (p_cur_map_list->to_be_deleted == 1) { + dbg_err + ("Buffer is marked for deletion: (0x%08X)\n", + __be32_to_cpu(p_cur_map_list->key)); + ret = -EFAULT; + goto out; + } + + p_ioctl_map->len = + be64_to_cpu(p_cur_map_list->p_buffer_desc->len); + p_ioctl_map->key = p_cur_map_list->key; + strncpy(p_ioctl_map->name, + p_cur_map_list->p_buffer_desc-> + name, AXON_BUFFER_NAME_LEN); + + p_usr_file->p_remote_buffer = p_cur_map_list; + + return 0; + } + +out: + + write_lock_bh(&(p_axon_usr->remote_list_lock)); + + + if ((atomic_dec_return(&p_cur_map_list->handle_count) == + 0) && (p_cur_map_list->to_be_deleted == 1)) { + + if (axon_buffer_free_remote_buffer(p_cur_map_list, p_axon_usr)) { + dbg_log("remote buffer was released\n"); + } else { + dbg_log("remote buffer was not released yet\n"); + } + } + + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + + return ret; +} + + +static int axon_buffer_compute_buffer_location(axon_buffer_desc_header_t * + p_buffer_desc, + loff_t offset, + __u64 * p_segment_offset) +{ + int i, nr_segment = __be32_to_cpu(p_buffer_desc->nr_segments); + __u64 segment_len = 0, total_len = 0; + axon_buffer_segment_t *p_segment = + (axon_buffer_segment_t *) (p_buffer_desc + 1); + dbg_log("begin\n"); + for (i = 0; i < nr_segment; i++, p_segment++, total_len += segment_len) { + segment_len = __be64_to_cpu(p_segment->len); + if ((total_len + segment_len) > offset) { + *p_segment_offset = offset - total_len; + return i; + } + } + + return -1; +} + +static void axon_buffer_dma_completion_handler(struct axon_dmax_t + *p_axon_dmax, struct axon_dma_req_t + *p_dma_req, void *context) +{ + axon_file_usr_buffer_t *p_usr_file = (axon_file_usr_buffer_t *) context; + + dbg_log("begin\n"); + + if (p_usr_file == NULL) { + dbg_err("p_usr_file is NULL \n"); + return; + } + + if (atomic_dec_return(&p_usr_file->req_count) == 0) { + wake_up(&p_usr_file->waitq); + } + + up(&p_usr_file->sem_dma_reqs); +} + +static int axon_buffer_do_transfer(axon_file_usr_buffer_t * p_usr_file, + axon_buffer_desc_header_t * + local_buffer, loff_t local_offset, + axon_buffer_desc_header_t * + remote_buffer, loff_t remote_offset, + size_t len, int direction) +{ + int ret = len; + size_t count = 0; + __u64 local_segment_offset, remote_segment_offset, transfer_size; + axon_buffer_segment_t *local_segment, *remote_segment; + axon_usr_buffer_t *p_axon_usr = p_usr_file->p_axon_usr; + struct axon_dma_req_t *p_dma_req; + int local_segment_id, remote_segment_id; + int local_segment_nr, remote_segment_nr; + + dbg_log("begin\n"); + + if (!local_buffer || !remote_buffer) { + dbg_err("Can't fetch one of the 2 descriptors\n"); + return -ENOSYS; + } + + local_segment_id = + axon_buffer_compute_buffer_location + (local_buffer, local_offset, &local_segment_offset); + remote_segment_id = + axon_buffer_compute_buffer_location + (remote_buffer, remote_offset, &remote_segment_offset); + + if ((local_segment_id == -1) + || (remote_segment_id == -1)) { + dbg_err("Bad value\n"); + return -EINVAL; + } + + local_segment = (axon_buffer_segment_t *) (local_buffer + 1); + remote_segment = (axon_buffer_segment_t *) (remote_buffer + 1); + local_segment_nr = __be32_to_cpu(local_buffer->nr_segments); + remote_segment_nr = __be32_to_cpu(remote_buffer->nr_segments); + + while (count < len) { + struct axon_dma_req_xfer_t dma_req_xfer = + AXON_DMA_REQ_XFER_INIT; + int i = 0; + size_t count2 = count; + p_dma_req = axon_dma_request_create(p_axon_usr->p_dma, 128); + + if (p_dma_req == NULL) { + dbg_err("Unable to create a DMA request\n"); + ret = -ENOMEM; + goto out; + } + + dbg_log("DMA descriptor\n"); + + while ((i < 120) && (count < len)) { + i++; + + transfer_size = + min((__be64_to_cpu + (local_segment[local_segment_id]. + len) - local_segment_offset), + (__be64_to_cpu + (remote_segment + [remote_segment_id].len) - + remote_segment_offset)); + transfer_size = min(transfer_size, len - count); + dma_req_xfer.size = transfer_size; + dma_req_xfer.intr = DMA_NO_INTR; + + if (direction == DMA_FROM_DEVICE) { + dma_req_xfer.src = + __cpu_to_be64(__be64_to_cpu + (remote_segment + [remote_segment_id]. + phys_addr) + + remote_segment_offset); + dma_req_xfer.dst = + __cpu_to_be64(__be64_to_cpu + (local_segment + [local_segment_id]. + phys_addr) + + local_segment_offset); + } else { + dma_req_xfer.dst = + __cpu_to_be64(__be64_to_cpu + (remote_segment + [remote_segment_id]. + phys_addr) + + remote_segment_offset); + dma_req_xfer.src = + __cpu_to_be64(__be64_to_cpu + (local_segment + [local_segment_id]. + phys_addr) + + local_segment_offset); + } + + dbg_log("Command packet %d\n", i); + dbg_log("PLB src address = 0x%016" + AXON_PLB_ADDR_FMT_T "\n", + __be64_to_cpu(dma_req_xfer.src)); + dbg_log("PLB dst address = 0x%016" + AXON_PLB_ADDR_FMT_T "\n", + __be64_to_cpu(dma_req_xfer.dst)); + dbg_log("size = 0x%016" + AXON_PLB_ADDR_FMT_T "\n", dma_req_xfer.size); + + ret = + axon_dma_request_push_xfer(p_dma_req, + &dma_req_xfer); + + if (ret != 0) { + dbg_err("Unable to build the DMA request \n"); + goto free_dma_req; + } + + count += transfer_size; + remote_segment_offset += transfer_size; + local_segment_offset += transfer_size; + + if (remote_segment_offset >= + __be64_to_cpu(remote_segment + [remote_segment_id].len)) { + remote_segment_id++; + remote_segment_offset = 0; + if (remote_segment_id == remote_segment_nr) + break; + } else { + dbg_log + ("%llu bytes left in remote_segment %d\n", + __be64_to_cpu(remote_segment + [remote_segment_id]. + len) - + remote_segment_offset, remote_segment_id); + } + + if (local_segment_offset >= + __be64_to_cpu(local_segment + [local_segment_id].len)) { + local_segment_id++; + local_segment_offset = 0; + if (local_segment_id == local_segment_nr) + break; + } else { + dbg_log + ("%llu bytes left in local_segment %d\n", + __be64_to_cpu(local_segment + [local_segment_id]. + len) - + local_segment_offset, local_segment_id); + } + + } + + dbg_log + ("dma_req is made of %d command packet for a total of %d bytes\n", + i, (int)(count - count2)); + + + if (down_interruptible(&p_usr_file->sem_dma_reqs)) { + dbg_err + ("we have been interrupted on semaphore: len = %d, count = %d, prending req = %d \n", + (int)len, (int)count, + atomic_read(&p_usr_file->req_count)); + ret = -ERESTARTSYS; + goto free_dma_req; + } + + atomic_inc(&p_usr_file->req_count); + + ret = axon_dma_request_queue(p_dma_req, + axon_buffer_dma_completion_handler, + p_usr_file); + + if (ret < 0) { + dbg_err("Unable to queue DMA request\n"); + up(&p_usr_file->sem_dma_reqs); + atomic_dec(&p_usr_file->req_count); + goto free_dma_req; + } + + dbg_log("dma_req is posted\n"); + } + + if (wait_event_interruptible + (p_usr_file->waitq, atomic_read(&p_usr_file->req_count) == 0)) { + dbg_err + ("we have been interrupted on wait queue: len = %d, count = %d, prending req = %d \n", + (int)len, (int)count, atomic_read(&p_usr_file->req_count)); + ret = -ERESTARTSYS; + goto out; + } + + dbg_log(" the all transfer is done\n"); + + return count; + +free_dma_req: + axon_dma_request_destroy(p_dma_req); + +out: + return ret; +} + + +static ssize_t axon_buffer_readwrite(struct file *file, char __user * buff, + size_t len, loff_t * offset, int direction) +{ + int ret = 0; + axon_file_usr_buffer_t *p_usr_file = + (axon_file_usr_buffer_t *) (file->private_data); + axon_buffer_ioctl_map_t ioctl_map; + axon_local_buffer_map_list_t *ptr; + dbg_log("begin\n"); + + + if (p_usr_file->p_remote_buffer == NULL) { + dbg_err("no remote buffer associated\n"); + return -ENOSYS; + } + + + if (axon_buffer_use_remote_buffer(p_usr_file) == -1) { + dbg_err("can't set the remote buff to busy\n"); + return -ENOSYS; + } + + if (*offset >= + __be64_to_cpu(p_usr_file->p_remote_buffer->p_buffer_desc->len)) { + dbg_inf("offset is out of limits\n"); + + goto release_remote_buff; + } + + + if ((*offset + len) > + __be64_to_cpu(p_usr_file->p_remote_buffer->p_buffer_desc->len)) { + len = + __be64_to_cpu(p_usr_file->p_remote_buffer-> + p_buffer_desc->len) - *offset; + } + + + ioctl_map.addr.vaddr = (axon_addr_ptr_t) buff; + ioctl_map.len = (axon_size_t) len; + + memset(ioctl_map.name, 0, AXON_BUFFER_NAME_LEN); + + if ((ret = + axon_buffer_get_user_pages(p_usr_file, + &ioctl_map, 1, &ptr, direction)) != 0) { + dbg_err("Failed to lock the local buffer in memory\n"); + goto release_remote_buff; + } + + + if ((ret = + axon_buffer_do_transfer(p_usr_file, + ptr-> + p_buffer_desc, + 0, + p_usr_file-> + p_remote_buffer-> + p_buffer_desc, + *offset, len, direction)) < 0) { + dbg_err("Failed to do the transfer\n"); + goto release_local_buff; + } + + *offset += ret; + +release_local_buff: + if (axon_buffer_release_user_pages(p_usr_file, ptr)) + dbg_err("Failed to release local user pages\n"); + ptr = NULL; + +release_remote_buff: + if (axon_buffer_release_remote_buffer(p_usr_file)) + dbg_err("Failed to release remote buff\n"); + + return ret; +} + + +static ssize_t axon_buffer_read(struct file *file, char __user * buff, + size_t len, loff_t * offset) +{ + dbg_log("begin\n"); + return axon_buffer_readwrite(file, buff, len, offset, DMA_FROM_DEVICE); +} + + +static ssize_t axon_buffer_write(struct file *file, + const char __user * buff, size_t len, + loff_t * offset) +{ + dbg_log("begin\n"); + return axon_buffer_readwrite(file, (char __user *) + buff, len, offset, DMA_TO_DEVICE); +} + +static int axon_buffer_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + int ret = 0; + axon_file_usr_buffer_t *p_usr_file = + (axon_file_usr_buffer_t *) (file->private_data); + axon_buffer_ioctl_map_t ioctl_map; + dbg_log("begin\n"); + + if ((_IOC_TYPE(cmd) != MC_AXON_BUFFER_MAGIC) + || (_IOC_NR(cmd) > MC_AXON_BUFFER_MAXNR)) { + dbg_log("Ioctl wrong command\n"); + return -ENOTTY; + } + + switch (cmd) { + + case MC_AXON_LOCAL_BUFFER_REGISTER: + dbg_log("Ioctl MC_AXON_LOCAL_BUFFER_REGISTER:\n"); + if (!access_ok + (VERIFY_WRITE, (void *)arg, + sizeof(axon_buffer_ioctl_map_t))) { + dbg_err + ("axon_buffer_ioctl_map_t struct is not writable\n"); + ret = -EFAULT; + break; + } + + ret = copy_from_user(&ioctl_map, (axon_buffer_ioctl_map_t *) + arg, sizeof(axon_buffer_ioctl_map_t)); + if (ret != 0) + break; + ret = + axon_buffer_get_user_pages(p_usr_file, + &ioctl_map, 0, NULL, + DMA_BIDIRECTIONAL); + if (ret != 0) + break; + ret = + copy_to_user((axon_buffer_ioctl_map_t *) arg, + &ioctl_map, sizeof(axon_buffer_ioctl_map_t)); + break; + case MC_AXON_LOCAL_BUFFER_UNREGISTER: + dbg_log("Ioctl MC_AXON_LOCAL_BUFFER_UNREGISTER:\n"); + if (!access_ok + (VERIFY_READ, (void *)arg, + sizeof(axon_buffer_ioctl_map_t))) { + dbg_err + ("axon_buffer_ioctl_map_t struct is not readable\n"); + ret = -EFAULT; + break; + } + + ret = copy_from_user(&ioctl_map, (axon_buffer_ioctl_map_t *) + arg, sizeof(axon_buffer_ioctl_map_t)); + if (ret != 0) + break; + ret = + axon_buffer_release_user_pages_by_key(p_usr_file, + ioctl_map.key); + break; + case MC_AXON_REMOTE_BUFFER_ACCESS_BY_KEY: + case MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME: + case MC_AXON_REMOTE_BUFFER_ACCESS_RAW: + dbg_log("Ioctl case MC_AXON_REMOTE_BUFFER_ACCESS:\n"); + if (!access_ok + (VERIFY_WRITE, (void *)arg, + sizeof(axon_buffer_ioctl_map_t))) { + dbg_err + ("axon_buffer_ioctl_map_t struct is not writable\n"); + ret = -EFAULT; + break; + } + + ret = copy_from_user(&ioctl_map, (axon_buffer_ioctl_map_t *) + arg, sizeof(axon_buffer_ioctl_map_t)); + if (ret != 0) + break; + ret = + axon_buffer_associate_remote_file + (p_usr_file, &ioctl_map, cmd); + if (ret == 0) { + ret = copy_to_user((axon_buffer_ioctl_map_t *) + arg, &ioctl_map, + sizeof(axon_buffer_ioctl_map_t)); + } + break; + default: + dbg_log("Ioctl unknown ioctl command \n"); + ret = -ENOTTY; + }; + return ret; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) +#ifdef CONFIG_COMPAT + + +typedef struct { + + + char name[AXON_BUFFER_NAME_LEN]; + + axon_buffer_key_t key; + + u32 len; + union { + + u32 vaddr; + + plb_addr_t plb_addr; + } addr; +} axon_buffer_ioctl_map_32_t; + +static long axon_buffer_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + axon_buffer_ioctl_map_32_t __user *p_ioctl32_u = compat_ptr(arg); + axon_buffer_ioctl_map_32_t ioctl32_k; + axon_buffer_ioctl_map_t __user *p_ioctl64; + long rval = -ENOIOCTLCMD; + axon_size_t len; + axon_buffer_key_t key; + + if (!access_ok + (VERIFY_WRITE, p_ioctl32_u, sizeof(axon_buffer_ioctl_map_32_t))) { + dbg_err("axon_buffer_ioctl_map_32_t struct is not writable\n"); + return -EFAULT; + } + + p_ioctl64 = compat_alloc_user_space(sizeof(axon_buffer_ioctl_map_t)); + if (!access_ok + (VERIFY_WRITE, p_ioctl64, sizeof(axon_buffer_ioctl_map_t))) { + dbg_err + ("internal axon_buffer_ioctl_map_t struct is not writable\n"); + return -EFAULT; + } + if (copy_from_user + (&ioctl32_k, p_ioctl32_u, sizeof(axon_buffer_ioctl_map_32_t))) + return -EFAULT; + if (copy_to_user(p_ioctl64->name, ioctl32_k.name, AXON_BUFFER_NAME_LEN)) + return -EFAULT; + if (__put_user(ioctl32_k.key, &p_ioctl64->key) + || __put_user((axon_size_t) + (ioctl32_k.len), &p_ioctl64->len) + || __put_user(ioctl32_k.addr.plb_addr, &p_ioctl64->addr.plb_addr)) + return -EFAULT; + switch (cmd) { + case MC_AXON_LOCAL_BUFFER_REGISTER: + + if (__put_user + (compat_ptr(ioctl32_k.addr.vaddr), &p_ioctl64->addr.vaddr)) + return -EFAULT; + + case MC_AXON_LOCAL_BUFFER_UNREGISTER: + case MC_AXON_REMOTE_BUFFER_ACCESS_BY_KEY: + case MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME: + case MC_AXON_REMOTE_BUFFER_ACCESS_RAW: + + lock_kernel(); + rval = + axon_buffer_ioctl(file->f_dentry->d_inode, + file, cmd, (unsigned long) + p_ioctl64); + unlock_kernel(); + if (__get_user(len, &p_ioctl64->len) + || __put_user((u32) len, &p_ioctl32_u->len) + || __get_user(key, &p_ioctl64->key) + || __put_user(key, &p_ioctl32_u->key)) + return -EFAULT; + break; + default: + break; + } + + return rval; +} + +#endif +#endif + + +static int axon_buffer_release(struct inode *inode, struct file *file) +{ + int ret = 0; + struct list_head *pos, *q; + axon_file_usr_buffer_t *p_usr_file = + (axon_file_usr_buffer_t *) (file->private_data); + dbg_log("begin\n"); + if (p_usr_file == NULL) { + dbg_err + ("No private struct associated to the file descriptor \n"); + module_put(THIS_MODULE); + return 0; + } + + + if (p_usr_file->p_remote_buffer) { + ret = + axon_buffer_dissociate_remote_buffer_from_file(p_usr_file); + if (ret) { + dbg_err("failed to release the remote buffer\n"); + + } + + p_usr_file->p_remote_buffer = NULL; + } + + + + write_lock_bh(&(p_usr_file->p_axon_usr->local_list_lock)); + + list_for_each_safe(pos, q, + &(p_usr_file-> + p_axon_usr->local_map_list_head.list)) { + axon_local_buffer_map_list_t *p_cur_map_list; + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + + if (p_cur_map_list->p_usr_file == p_usr_file) { + + dbg_log("Freeing buffer 0x%08x\n", + __be32_to_cpu(p_cur_map_list->key)); + write_unlock_bh(& + (p_usr_file->p_axon_usr-> + local_list_lock)); + if (axon_buffer_release_user_pages + (p_usr_file, p_cur_map_list)) { + dbg_err("Failed to release local user pages\n"); + } + + write_lock_bh(& + (p_usr_file->p_axon_usr-> + local_list_lock)); + } + } + + write_unlock_bh(&(p_usr_file->p_axon_usr->local_list_lock)); + + kfree(p_usr_file); + file->private_data = NULL; + module_put(THIS_MODULE); + return ret; +} + + +static int axon_buffer_open(struct inode *inode, struct file *file) +{ + int ret = 0; + axon_file_usr_buffer_t *p_usr_file = NULL; + dbg_log("begin\n"); + file->private_data = NULL; + try_module_get(THIS_MODULE); + + p_usr_file = kzalloc(sizeof(axon_file_usr_buffer_t), GFP_KERNEL); + if (p_usr_file == NULL) { + dbg_err("Failed to allocate axon_file_usr_buffer_t\n"); + module_put(THIS_MODULE); + return -ENOMEM; + } + + + p_usr_file->p_axon_usr = + container_of(inode->i_cdev, axon_usr_buffer_t, cdev); + file->private_data = p_usr_file; + init_waitqueue_head(&p_usr_file->waitq); + sema_init(&p_usr_file->sem_dma_reqs, 2); + atomic_set(&p_usr_file->req_count, 0); + return ret; +} + +static struct file_operations axon_buffer_fops = { + .read = axon_buffer_read,.write = + axon_buffer_write,.open = + axon_buffer_open,.release = + axon_buffer_release,.ioctl = axon_buffer_ioctl, +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) +#ifdef CONFIG_COMPAT + .compat_ioctl = axon_buffer_compat_ioctl, +#endif +#endif +}; +static dev_t buf_dev; + + +static int axon_buffer_remove(axon_t * p_axon) +{ + axon_usr_buffer_t *p_axon_usr; + struct list_head *p_cursor; + struct list_head *p_next; + dbg_log("begin\n"); + list_for_each_safe(p_cursor, p_next, &axon_buffer_list) { + p_axon_usr = list_entry(p_cursor, axon_usr_buffer_t, list); + if (p_axon_usr->p_axon == p_axon) { + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) + if (atomic_xchg(&p_axon_usr->remote_is_up, 0) == 1) +#else + if (atomic_dec_return(&p_axon_usr->remote_is_up) == 0) +#endif + { + axon_local_buffer_map_list_t *p_cur_map_list; + struct list_head *pos; + + + + + + read_lock_bh(&(p_axon_usr->local_list_lock)); + + + list_for_each(pos, + &(p_axon_usr->local_map_list_head. + list)) { + p_cur_map_list = + list_entry(pos, + axon_local_buffer_map_list_t, + list); + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + + axon_buffer_handle_down_message(p_axon_usr); + + + axon_buffer_send_state_down(p_axon_usr); + } + + + axon_sms_unsubscribe(p_axon_usr->sms, + AXON_SMS_CHANNEL_USR_BUFFER, + axon_buffer_sms_handler); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + class_device_destroy(axon_get_class(), + p_axon_usr->cdev_num); +#else + class_simple_device_remove(p_axon_usr->cdev_num); +#endif + + cdev_del(&p_axon_usr->cdev); + + list_del(p_cursor); + + kfree(p_axon_usr); + + break; + } + } + + return 0; +} + + +static int axon_buffer_probe(axon_t * p_axon) +{ + int ret = 0; + axon_usr_buffer_t *p_axon_usr; + dbg_log("begin\n"); + + p_axon_usr = kzalloc(sizeof(axon_usr_buffer_t), GFP_KERNEL); + if (p_axon_usr == NULL) { + dbg_err("failed to allocate memory\n"); + return -ENOMEM; + } + + + p_axon_usr->cdev_num = MKDEV(MAJOR(buf_dev), p_axon->id); + cdev_init(&p_axon_usr->cdev, &axon_buffer_fops); + ret = cdev_add(&p_axon_usr->cdev, p_axon_usr->cdev_num, 1); + if (ret < 0) { + dbg_err + ("Unable to add user space driver for board 0x%p, on minor %d\n", + p_axon_usr->p_axon, p_axon->id); + return ret; + } +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + class_device_create(axon_get_class(), NULL, + p_axon_usr->cdev_num, NULL, "buffer%d", p_axon->id); +#else + class_simple_device_add(axon_get_class(), + p_axon_usr-> + cdev_num, NULL, "buffer%d", p_axon->id); +#endif + + p_axon_usr->id = p_axon->id; + + p_axon_usr->p_axon = p_axon; + + p_axon_usr->sms = p_axon->sms_get(p_axon); + + p_axon_usr->peer_mbox = axon_peer_mbox_get(p_axon); + + p_axon_usr->p_pio = p_axon->pio_get(p_axon); + + p_axon_usr->xltr = p_axon->addr_xltr_get(p_axon); + + p_axon_usr->p_dma = p_axon->dmax_get(p_axon); + atomic_set(&p_axon_usr->remote_is_up, 0); + INIT_LIST_HEAD(&(p_axon_usr->local_map_list_head.list)); + rwlock_init(&(p_axon_usr->local_list_lock)); + INIT_LIST_HEAD(&(p_axon_usr->remote_map_list_head.list)); + rwlock_init(&(p_axon_usr->remote_list_lock)); + init_waitqueue_head(&p_axon_usr->waitq); + + ret = axon_sms_subscribe(p_axon_usr->sms, + AXON_SMS_CHANNEL_USR_BUFFER, + axon_buffer_sms_handler, p_axon_usr); + if (ret < 0) { + dbg_err + ("Unable to subscribe to channel %d \n", + AXON_SMS_CHANNEL_USR_BUFFER); + return ret; + } + + + axon_buffer_send_state_up(p_axon_usr, 0); + list_add_tail(&p_axon_usr->list, &axon_buffer_list); + return ret; +} + +static axon_driver_t axon_buffer_driver = { + .name = "buffer", + .probe = axon_buffer_probe, + .remove = axon_buffer_remove, +}; + + +static __init int axon_buffer_module_init(void) +{ + dbg_log("begin\n"); + INIT_LIST_HEAD(&axon_buffer_list); + alloc_chrdev_region(&buf_dev, 0, 256, "buffer"); + axon_driver_register(&axon_buffer_driver); + return 0; +} + + +static __exit void axon_buffer_module_cleanup(void) +{ + dbg_log("begin\n"); + axon_driver_unregister(&axon_buffer_driver); + unregister_chrdev_region(buf_dev, 256); +} + +module_init(axon_buffer_module_init); +module_exit(axon_buffer_module_cleanup); Index: linux-2.6.21/drivers/axon/usr/buffer/axon_usr_buffer.c~ =================================================================== --- /dev/null +++ linux-2.6.21/drivers/axon/usr/buffer/axon_usr_buffer.c~ @@ -0,0 +1,2778 @@ +/****************************************************************** + * Copyright (C) 2006 Mercury Computer Systems, Inc. + * 199 Riverneck Road + * Chelmsford, MA 01824-2820 + * (978) 256-1300 + * webinfo@mc.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * http://www.gnu.org/copyleft/gpl.html + ******************************************************************/ +/*************************************************************************** + * + * This driver implement local/remote buffer management service + * + ***************************************************************************/ + +//#define AXON_DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_DESCRIPTION("user space buffer driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jean-Christophe Dubois (jdubois@mc.com)"); + +//#define AXON_DEBUG +#include + +static struct list_head axon_buffer_list; + +/** + * generate a unique buffer ID + * The bufferID is always BE + * FIXME: we need to find a better implementation. + */ +static void axon_buffer_get_uniq_key(axon_usr_buffer_t * p_axon_usr, + axon_buffer_key_t * p_axon_map_key) +{ + axon_local_buffer_map_list_t *p_cur_map_list; + struct list_head *pos; + + dbg_log("begin\n"); + + *p_axon_map_key = (axon_buffer_key_t) + __cpu_to_be32(atomic_inc_return(&p_axon_usr->key)); + + /* + * We need to check if this key is not already in use. + * For now don't be clever and use the slow algorithm. + */ + read_lock_bh(&(p_axon_usr->local_list_lock)); + + /* + * We go through the buffer list associated to this file descriptor + */ + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + + /* + * If this key is already in use + */ + if (p_cur_map_list->key == *p_axon_map_key) { + dbg_log("key 0x%08x is already in use\n", + __be32_to_cpu(*p_axon_map_key)); + + /* + * try to get another one + * FIXME: we should set a limit in the number of time + * we call it recursively + */ + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + axon_buffer_get_uniq_key(p_axon_usr, p_axon_map_key); + + read_lock_bh(&(p_axon_usr->local_list_lock)); + + break; + } + + p_cur_map_list = NULL; + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); +} + +/** + * extract the buffer ID from the SMS + */ +static axon_buffer_key_t axon_buffer_extract_key(struct axon_sms_msg_t *p_msg) +{ + dbg_log("begin\n"); + + return *(axon_buffer_key_t *) (p_msg->payload + AXON_BUFFER_SMS_ID_OFFSET); +} + +/* + * The payload provide by the SMS service is 15 bytes + channel + * The easiest way to have its size is to do: sizeof( msg . payload). + * + * In the case of the buffer driver we are using a single channel ( defined + * into axon_sms_channels.h), and message type are encoded within the payload + * + * A buffer SMS is made of: + * + * 1 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 + * T|N|F|.|.|.|.|.|.|.|.|.|.|.|.| When T = AXON_SYSTEM_BUFFER_UP + * T|N|.|.|.|.|.|.|.|.|.|.|.|.|.| When T = AXON_SYSTEM_BUFFER_DOWN + * T|N| I D |PLB-ADDR |S I Z E| When T = AXON_SYSTEM_BUFFER_AVAILABLE + * T|N| I D |.|.|.|.|.|.|.|.|.| When T = AXON_SYSTEM_BUFFER_UNAVAILABLE + * T|N| I D |.|.|.|.|.|.|.|.|.| When T = AXON_SYSTEM_BUFFER_IN_USE + * T|N| I D |.|.|.|.|.|.|.|.|.| When T = AXON_SYSTEM_BUFFER_NOT_IN_USE + * + * Where: + * - T = Type of the request. One of + * AXON_SYSTEM_BUFFER_UP, AXON_SYSTEM_BUFFER_DOWN, + * AXON_SYSTEM_BUFFER_AVAILABLE, AXON_SYSTEM_BUFFER_UNAVAILABLE, + * AXON_SYSTEM_BUFFER_IN_USE, AXON_SYSTEM_BUFFER_NOT_IN_USE + * N = Node ID (to know where this message comes from) + * F = Flag specific to init time + * ID = identifier of the buffer on the related node + * PLB-ADDR = PLB address of the desciptor on 40 bits (Big Endian) + * SIZE = Size of the descriptor (Big Endian) + * + */ + +/** + * Send a UP service message to remote + */ +static int axon_buffer_send_state_up(axon_usr_buffer_t * p_axon_usr, u8 flags) +{ + struct axon_sms_msg_t msg; + int ret; + + dbg_log("begin\n"); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_UP; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + msg.payload[AXON_BUFFER_SMS_UP_FLAG_OFFSET] = flags; + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send UP message\n"); + } + + return ret; +} + +/** + * Send a DOWN service message to remote + */ +static int axon_buffer_send_state_down(axon_usr_buffer_t * p_axon_usr) +{ + struct axon_sms_msg_t msg; + int ret; + + dbg_log("begin\n"); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_DOWN; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send DOWN message\n"); + } + + return ret; +} + +static int axon_buffer_send_message_available(axon_usr_buffer_t * + p_axon_usr, + axon_local_buffer_map_list_t + * p_cur_map_list) +{ + int ret = 0; + struct axon_sms_msg_t msg; + __u64 addr = 0; + __u32 desc_size; + + dbg_log("begin\n"); + + desc_size = + __cpu_to_be32(sizeof(axon_buffer_desc_header_t) + + (__cpu_to_be32 + (p_cur_map_list->p_buffer_desc->nr_segments) * + sizeof(axon_buffer_segment_t))); + + /* + * tranlate to PLB BE + */ + addr = + axon_addr_xltr_to_plb(p_axon_usr->xltr, + p_cur_map_list->phy_buffer_desc); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_AVAILABLE; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + memcpy(msg.payload + AXON_BUFFER_SMS_ID_OFFSET, + &p_cur_map_list->key, sizeof(p_cur_map_list->key)); + + memcpy(msg.payload + AXON_BUFFER_SMS_ADDR_OFFSET, + ((u8 *) (&addr)) + 3, sizeof(addr) - 3); + + memcpy(msg.payload + AXON_BUFFER_SMS_SIZE_OFFSET, &desc_size, + sizeof(desc_size)); + + dbg_log("sending bufferID 0x%08x at 0x%016" + AXON_PLB_ADDR_FMT_T ", desc_size = %d (0x%08x)\n", + __be32_to_cpu(p_cur_map_list->key), __be64_to_cpu(addr), + __be32_to_cpu(desc_size), desc_size); + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send AVAILABLE message\n"); + } + + return ret; +} + +static int axon_buffer_send_message_unavailable(axon_usr_buffer_t * + p_axon_usr, axon_buffer_key_t key) +{ + int ret = 0; + struct axon_sms_msg_t msg; + + dbg_log("begin\n"); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = + AXON_SYSTEM_BUFFER_UNAVAILABLE; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + memcpy(msg.payload + AXON_BUFFER_SMS_ID_OFFSET, &key, sizeof(key)); + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send UNAVAILABLE message\n"); + } + + return ret; +} + +/** + * send the NOT_IN_USE message to the remote + */ +static int axon_buffer_send_message_not_in_use(axon_usr_buffer_t * + p_axon_usr, axon_buffer_key_t key) +{ + int ret = 0; + struct axon_sms_msg_t msg; + + dbg_log("begin\n"); + + msg.channel = AXON_SMS_CHANNEL_USR_BUFFER; + + msg.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_NOT_IN_USE; + msg.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + memcpy(msg.payload + AXON_BUFFER_SMS_ID_OFFSET, &key, sizeof(key)); + + ret = axon_sms_send(p_axon_usr->sms, p_axon_usr->peer_mbox, &msg, 15); + + if (ret < 0) { + dbg_err("Unable to send NOT_IN_USE message\n"); + } + + return ret; +} + +/** + * + */ +static int axon_buffer_free_remote_buffer(axon_remote_buffer_map_list_t * + p_cur_map_list, + axon_usr_buffer_t * p_axon_usr) +{ + dbg_log("begin\n"); + + if (atomic_read(&p_cur_map_list->handle_count)) { + dbg_log + ("Buffer 0x%08x is still in use, count = %d\n", + __be32_to_cpu(p_cur_map_list->key), + atomic_read(&p_cur_map_list->handle_count)); + return 0; + } + + /* + * We were the last one to hold a reference to the buffer. + * Even the driver has given up on it. + */ + + /* + * So at this point the to_be_deleted should be set + * let's do a sanity check + */ + + if (p_cur_map_list->to_be_deleted == 0) { + dbg_err + ("Ref count is 0 for Buffer 0x%08x is not marked for deletion\n", + __be32_to_cpu(p_cur_map_list->key)); + } + + /* + * Second sanity check. At this point the buffer should not + * be in use anymore + */ + if (atomic_read(&p_cur_map_list->use_count) != 0) { + dbg_err + ("Ref count is 0, buffer 0x%08x is marked for deletion, but usecount is not 0\n", + __be32_to_cpu(p_cur_map_list->key)); + } + + if (p_cur_map_list->p_buffer_desc) { + int size = + sizeof(axon_buffer_desc_header_t) + + (__be32_to_cpu + (p_cur_map_list->p_buffer_desc->nr_segments) * + sizeof(axon_buffer_segment_t)); + dma_free_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr->p_axon), size, + p_cur_map_list->p_buffer_desc, + p_cur_map_list->phy_buffer_desc); + p_cur_map_list->p_buffer_desc = NULL; + } + + if (!p_cur_map_list->is_locally_created) { + + list_del(&p_cur_map_list->list); + + /* + * If we were the last one to hold a reference to the + * buffer we need to free it and notify the remote + * system + */ + axon_buffer_send_message_not_in_use(p_axon_usr, + p_cur_map_list->key); + } + + dbg_log("Buffer 0x%08x is released\n", + __be32_to_cpu(p_cur_map_list->key)); + + kfree(p_cur_map_list); + + /* + * we return 1 to indicate the buffer is now released + */ + return 1; + +} + +/** + * dissociate the buffer desc from the file descriptor + * we send the not_in_use message if the buffer is marked for + * deletion and we are the last one to use it. + */ +static int +axon_buffer_dissociate_remote_buffer_from_file(axon_file_usr_buffer_t * + p_usr_file) +{ + int ret = 0; + dbg_log("begin\n"); + + if (p_usr_file->p_remote_buffer) { + + /* Sanity check, the buffer should not be in use any more */ + if (atomic_read(&p_usr_file->p_remote_buffer->use_count) > 0) { + dbg_err("buffer is still used\n"); + ret = -EINVAL; + } else { + + write_lock_bh(& + (p_usr_file->p_axon_usr-> + remote_list_lock)); + /* Are we the only local user of the remote buffer ? */ + /* The other user could/should be the wq if it is not over yet */ + /* also is the buffer to be deleted */ + if ((atomic_dec_return + (&p_usr_file->p_remote_buffer->handle_count) + == 0) + && (p_usr_file->p_remote_buffer-> + to_be_deleted == 1)) { + + /* so we are the only user, the wq is over */ + /* it needs to be deleted, try to do it */ + if (axon_buffer_free_remote_buffer + (p_usr_file->p_remote_buffer, + p_usr_file->p_axon_usr)) { + + /* the buffer is gone */ + dbg_log + ("remote buffer has been released\n"); + } else { + + /* hum ... the buffer is still there */ + dbg_log + ("remote buffer is still used\n"); + } + } + + /* clear the pointer in any case */ + p_usr_file->p_remote_buffer = NULL; + + write_unlock_bh(& + (p_usr_file->p_axon_usr-> + remote_list_lock)); + } + } else { + dbg_inf("remote buff pointer is NULL !!!\n"); + } + + return ret; +} + +/** + * increment the use count before read/write the file + */ +static int axon_buffer_use_remote_buffer(axon_file_usr_buffer_t * p_usr_file) +{ + dbg_log("begin\n"); + + if ((p_usr_file->p_remote_buffer) + && (p_usr_file->p_remote_buffer->to_be_deleted == 0)) { + atomic_inc(&p_usr_file->p_remote_buffer->use_count); + return 0; + } else { + return -1; + } +} + +/** + * decrement the use count after read/write the file + * We may have to release the buffer if it has been deleted by the remote + */ +static int axon_buffer_release_remote_buffer(axon_file_usr_buffer_t * + p_usr_file) +{ + dbg_log("begin\n"); + + if ((p_usr_file->p_remote_buffer->to_be_deleted == 1) && + (atomic_dec_return(&p_usr_file->p_remote_buffer->use_count) == 0)) { + /* + * We are the last one to use this buffer and it is + * going away. + * So we should try to free it. + */ + return + axon_buffer_dissociate_remote_buffer_from_file(p_usr_file); + } else { + atomic_dec(&p_usr_file->p_remote_buffer->use_count); + } + + return 0; +} + +/** + * handle a remote UP message + */ +static int axon_buffer_handle_up_message(axon_usr_buffer_t * p_axon_usr, + struct axon_sms_msg_t *p_msg) +{ + int ret = 0; + struct list_head *pos; + + dbg_log("begin\n"); + + /* + * If we hadn't received an UP message from the remote side before or + * if we haven't successfullly sent an UP message (could happen if + * remote maibox wasn't ready when we tried to send UP), send it now. + * Sending it when we get AVAILABLE is no good. What if the remote + * side uses our buffers but doesn't make buffers avaialble to us? + * There will then be no up message. We need to send it as soon as + * we konw that the remote can receive messages + */ + /* + * If we never heard of the remote so far, we sent it the UP message + * followed by the list of locally registered buffers. + * + * NOTE: With the actual scheme it is probable we will get the UP + * message twice. However we want to avoid to advertise our buffers + * twice. + */ + /* + * Send the UP message if the remote side hasn't already received + * one regardless of whether we received one before + */ + if ((p_msg->payload[AXON_BUFFER_SMS_UP_FLAG_OFFSET] & + AXON_BUFFER_FLAG_RECEIVED_UP) != AXON_BUFFER_FLAG_RECEIVED_UP) { + ret = axon_buffer_send_state_up(p_axon_usr, + AXON_BUFFER_FLAG_RECEIVED_UP); + } + + /* + * If we were not able to successfully send the UP message don't + * attempt to send the buffer information. Wait until the next UP + * message is received + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) + if ((ret == 0) && (atomic_xchg(&p_axon_usr->remote_is_up, 1) == 0)) +#else + if ((ret == 0) + && (atomic_inc_return(&p_axon_usr->remote_is_up) == 1)) +#endif + { + read_lock_bh(&(p_axon_usr->local_list_lock)); + + /* + * we now need to send an available message for all "global" + * buffers registered locally + */ + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + axon_local_buffer_map_list_t *p_cur_map_list; + + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + + if (p_cur_map_list->is_local_only == 0) + axon_buffer_send_message_available + (p_axon_usr, p_cur_map_list); + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + } + + return ret; +} + +/** + * handle a remote DOWN message + */ +static int axon_buffer_handle_down_message(axon_usr_buffer_t * p_axon_usr) +{ + int ret = 0; + struct list_head *pos, *q; + + dbg_log("begin\n"); + + atomic_set(&p_axon_usr->remote_is_up, 0); + + write_lock_bh(&(p_axon_usr->remote_list_lock)); + + list_for_each_safe(pos, q, &(p_axon_usr->remote_map_list_head.list)) { + axon_remote_buffer_map_list_t *p_cur_map_list; + + p_cur_map_list = + list_entry(pos, axon_remote_buffer_map_list_t, list); + + /* + * mark it for deletion + */ + p_cur_map_list->to_be_deleted = 1; + + /* + * Free the buffer + */ + if (atomic_read(&p_cur_map_list->handle_count) == 0) { + + if (axon_buffer_free_remote_buffer + (p_cur_map_list, p_axon_usr)) { + dbg_log + ("One remote buffer has been released \n"); + } else { + dbg_log + ("remote buffer still has handles opened \n"); + } + + } + } + + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + + return ret; +} + +static void axon_buffer_desc_dma_completion_handler(struct axon_dmax_t + *p_axon_dmax, struct axon_dma_req_t + *p_dma_req, void *context) +{ + axon_remote_buffer_map_list_t *p_cur_map_list = + (axon_remote_buffer_map_list_t *) context; + axon_usr_buffer_t *p_axon_usr = p_cur_map_list->p_axon_usr; + + dbg_log("begin\n"); + + /* The DMA is over, so the remote should know we are using its buffer + * and the buffer desc should be complete in our memory + */ + if (p_cur_map_list->key != p_cur_map_list->p_buffer_desc->key) { + dbg_err + ("The descriptor does not contain the correct ID 0x%08x != 0x%08x\n", + __be32_to_cpu(p_cur_map_list->p_buffer_desc->key), + __be32_to_cpu(p_cur_map_list->key)); + /* + * We are going to delete this remote buffer as there + * is nothing we can do with it + */ + p_cur_map_list->to_be_deleted = 1; + } + + wake_up(&p_axon_usr->waitq); + + write_lock_bh(&(p_axon_usr->remote_list_lock)); + + /* + * The work queue is done for this buffer so we need to dec + * the handle_count + */ + if ((atomic_dec_return(&p_cur_map_list->handle_count) == + 0) && (p_cur_map_list->to_be_deleted == 1)) { + /* + * If the buffer was deleted by the remote before we finished + * mapping it, we need to free it up + */ + if (axon_buffer_free_remote_buffer(p_cur_map_list, p_axon_usr)) { + dbg_log("remote buffer was released\n"); + } else { + dbg_log("remote buffer was not released yet\n"); + } + } + + write_unlock_bh(&(p_axon_usr->remote_list_lock)); +} + +/** + * handle a remote AVAILABLE message + */ +static int axon_buffer_handle_available_message(axon_usr_buffer_t * p_axon_usr, struct axon_sms_msg_t + *p_msg) +{ + int ret = 0; + axon_remote_buffer_map_list_t *p_cur_map_list = NULL; + struct list_head *pos; + axon_buffer_key_t key; + u32 desc_size; + struct axon_dma_req_t *p_dma_req; + axon_dma_req_xfer_t dma_req_xfer = AXON_DMA_REQ_XFER_INIT; + axon_dma_req_mbox_t dma_req_mbox = AXON_DMA_REQ_MBOX_INIT; + axon_sms_msg_t msg_ans; + u8 msg_encoded[AXON_SMS_SIZE]; + + dbg_log("begin\n"); + + key = axon_buffer_extract_key(p_msg); + + read_lock_bh(&(p_axon_usr->remote_list_lock)); + + /* + * We check if this ID is not already known, it should not + */ + list_for_each(pos, &(p_axon_usr->remote_map_list_head.list)) { + + p_cur_map_list = + list_entry(pos, axon_remote_buffer_map_list_t, list); + + /* + * Is it the requested buffer + */ + if (p_cur_map_list->key == key) { + break; + } + + p_cur_map_list = NULL; + } + + read_unlock_bh(&(p_axon_usr->remote_list_lock)); + + /* + * if the pointer is NULL, this is an unknown buffer + */ + if (p_cur_map_list) { + /* + * This is bad. the buffer should not be known. + */ + dbg_err + ("Buffer 0x%08x is already in the list of remote buffers\n", + __be32_to_cpu(__be32_to_cpu(key))); + return -EINVAL; + } + + /* + * we store the buffer identity for later use + */ + p_cur_map_list = + kzalloc(sizeof(axon_remote_buffer_map_list_t), GFP_ATOMIC); + + if (p_cur_map_list == NULL) { + dbg_err("Failed to allocate list element for remote buffer\n"); + return -ENOMEM; + } + + p_cur_map_list->key = key; + + /* we need to regenerate the 64 bits PLB address from 40 bits */ + *((u8 *) (&p_cur_map_list->plb_addr)) = + p_msg->payload[AXON_BUFFER_SMS_ADDR_OFFSET]; + memcpy(((u8 *) (&p_cur_map_list->plb_addr)) + 3, + p_msg->payload + AXON_BUFFER_SMS_ADDR_OFFSET, + sizeof(p_cur_map_list->plb_addr) - 3); + + memcpy(&desc_size, p_msg->payload + AXON_BUFFER_SMS_SIZE_OFFSET, + sizeof(desc_size)); + + dbg_log("Buffer 0x%08x at 0x%016" + AXON_PLB_ADDR_FMT_T ", desc_size = %d (0x%08x)\n", + __be32_to_cpu(p_cur_map_list->key), + __be64_to_cpu(p_cur_map_list->plb_addr), + __be32_to_cpu(desc_size), desc_size); + + desc_size = __be32_to_cpu(desc_size); + + p_cur_map_list->p_buffer_desc = + dma_alloc_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr->p_axon), + desc_size, + &p_cur_map_list->phy_buffer_desc, GFP_ATOMIC); + + if (p_cur_map_list->p_buffer_desc == NULL) { + dbg_err("Failed to allocate buffer desc\n"); + ret = -ENOMEM; + goto free_list_element; + } + + memset(p_cur_map_list->p_buffer_desc, 0, desc_size); + + p_dma_req = axon_dma_request_create(p_axon_usr->p_dma, 128); + + if (p_dma_req == NULL) { + dbg_err("failed to create DMA req\n"); + ret = -ENOMEM; + goto free_buffer_desc; + } + + /* + * first put a DMA command packet to tell the remote we are + * using the buffer + */ + msg_ans.channel = AXON_SMS_CHANNEL_USR_BUFFER; + msg_ans.payload[AXON_BUFFER_SMS_REQ_OFFSET] = AXON_SYSTEM_BUFFER_IN_USE; + msg_ans.payload[AXON_BUFFER_SMS_NODE_OFFSET] = 0; + + memcpy(msg_ans.payload + AXON_BUFFER_SMS_ID_OFFSET, &key, sizeof(key)); + + axon_sms_encode(p_axon_usr->sms, &msg_ans, + msg_encoded, 1, AXON_SMS_SIZE); + + dma_req_mbox.dst_id = AXON_DMA_TARGET_PEER; + dma_req_mbox.msg = msg_encoded; + dma_req_mbox.msg_size = AXON_SMS_SIZE; + + ret = axon_dma_request_push_mbox(p_dma_req, &dma_req_mbox); + + if (ret < 0) { + dbg_err("Failed to add the MBX command packet\n"); + goto free_dma_req; + } + + /* Now get the remote descriptor */ + dma_req_xfer.size = desc_size; + dma_req_xfer.intr = DMA_NO_INTR; + dma_req_xfer.src = p_cur_map_list->plb_addr; + dma_req_xfer.dst = axon_addr_xltr_to_plb(p_axon_usr->xltr, + p_cur_map_list-> + phy_buffer_desc); + + /* add this command packet to the DMA req */ + ret = axon_dma_request_push_xfer(p_dma_req, &dma_req_xfer); + + if (ret < 0) { + dbg_err("Failed to add the data command packet\n"); + goto free_dma_req; + } + + /* + * set the various counts + */ + /* + * We set the handle_count to 1 to prevent any + * deletion before the work queue is done. The work + * queue will decrement it by one when it is done + * retrieving the remote desc. + */ + atomic_set(&p_cur_map_list->handle_count, 1); + /* + * It is not yet associated to any local file + * descriptor. + */ + atomic_set(&p_cur_map_list->use_count, 0); + /* + * Remember the board providing services for this + * remote buffer + */ + p_cur_map_list->p_axon_usr = p_axon_usr; + + write_lock_bh(&(p_axon_usr->remote_list_lock)); + /* + * add it to the remote list + */ + list_add_tail(&(p_cur_map_list->list), + &(p_axon_usr->remote_map_list_head.list)); + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + + /* queue the DMA req */ + ret = + axon_dma_request_queue(p_dma_req, + axon_buffer_desc_dma_completion_handler, + p_cur_map_list); + + if (ret < 0) { + dbg_err("Failed to queue the DMA req\n"); + goto remove_from_list; + } + + return ret; + +remove_from_list: + write_lock_bh(&(p_axon_usr->remote_list_lock)); + list_del(&(p_cur_map_list->list)); + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + +free_dma_req: + axon_dma_request_destroy(p_dma_req); + +free_buffer_desc: + dma_free_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr-> + p_axon), + desc_size, + p_cur_map_list-> + p_buffer_desc, p_cur_map_list->phy_buffer_desc); +free_list_element: + kfree(p_cur_map_list); + + return ret; +} + +/** + * handle a remote UNAVAILABLE message + */ +static int axon_buffer_handle_unavailable_message(axon_usr_buffer_t * + p_axon_usr, + struct axon_sms_msg_t + *p_msg) +{ + int ret = 0; + axon_buffer_key_t key; + struct list_head *pos; + axon_remote_buffer_map_list_t *p_cur_map_list = NULL; + dbg_log("begin\n"); + /* + * We need to determine if this remote buffer is in use locally. + * if no, we can send the NOT_IN_USE message. + * if yes, we need to wait for the buffer to be released + */ + key = axon_buffer_extract_key(p_msg); + /* + * first check if this is a buffer we know about + */ + write_lock_bh(&(p_axon_usr->remote_list_lock)); + + list_for_each(pos, &(p_axon_usr->remote_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, axon_remote_buffer_map_list_t, list); + /* + * Is it the requested buffer + */ + if (p_cur_map_list->key == key) { + /* + * mark it for deletion + */ + p_cur_map_list->to_be_deleted = 1; + /* + * OK we handled our buffer, now exit from the loop + */ + break; + } + p_cur_map_list = NULL; + } + + if (p_cur_map_list) { + /* + * Free the buffer + */ + if (axon_buffer_free_remote_buffer(p_cur_map_list, p_axon_usr)) { + dbg_log + ("Remote buffer 0x%08x was sucessfully released\n", + __be32_to_cpu(key)); + ret = 0; + } else { + dbg_log + ("remote buffer 0x%08x can't be released yet\n", + __be32_to_cpu(key)); + ret = -EINVAL; + } + } else { + /* + * we didn't find the buffer !!! + * log it, but don't send the not_in_use message + * + * TODO: revisit this. Why is it bad to send the not_in_use + * message when it should be expected by the remote. + */ + dbg_inf("Buffer 0x%08x is unknown\n", __be32_to_cpu(key)); + ret = -EINVAL; + } + + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + + return ret; +} + +/** + * handle a remote IN_USE message + */ +static int axon_buffer_handle_in_use_message(axon_usr_buffer_t * + p_axon_usr, + struct axon_sms_msg_t *p_msg) +{ + int ret = 0; + axon_buffer_key_t key; + struct list_head *pos; + axon_local_buffer_map_list_t *p_cur_map_list = NULL; + dbg_log("begin\n"); + /* + * We can now free up the buffer locally. + */ + key = axon_buffer_extract_key(p_msg); + read_lock_bh(&(p_axon_usr->local_list_lock)); + /* + * We go through the buffer list + */ + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + /* + * Is it the requested buffer + */ + if (p_cur_map_list->key == key) { + /* + * mark the buffer as not released + */ + p_cur_map_list->is_released = 0; + atomic_inc(&p_cur_map_list->ref_count); + /* + * we are done + */ + break; + } + + p_cur_map_list = NULL; + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + if (p_cur_map_list) { + dbg_log("Found buffer 0x%08x\n", __be32_to_cpu(key)); + } else { + dbg_inf("Could not find buffer 0x%08x\n", __be32_to_cpu(key)); + } + + return ret; +} + +/** + * handle a remote NOT_IN_USE message + */ +static int axon_buffer_handle_not_in_use_message(axon_usr_buffer_t * p_axon_usr, struct axon_sms_msg_t + *p_msg) +{ + int ret = 0; + axon_buffer_key_t key; + struct list_head *pos; + axon_local_buffer_map_list_t *p_cur_map_list = NULL; + dbg_log("begin\n"); + /* + * We can now free up the buffer locally. + */ + key = axon_buffer_extract_key(p_msg); + read_lock_bh(&(p_axon_usr->local_list_lock)); + /* + * We go through the buffer list + */ + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + /* + * Is it the requested buffer + */ + if (p_cur_map_list->key == key) { + /* + * we are done + */ + break; + } + + p_cur_map_list = NULL; + } + + if (p_cur_map_list == NULL) { + + dbg_inf("Could not find buffer 0x%08x\n", __be32_to_cpu(key)); + + } else { + int ref_count = atomic_dec_return(&p_cur_map_list->ref_count); + if (ref_count == 0) { + dbg_log + ("Found buffer 0x%08x, waking up task \n", + __be32_to_cpu(key)); + /* + * mark the buffer as released + */ + p_cur_map_list->is_released = 1; + /* + * wake up the user task + */ + wake_up(&p_cur_map_list->p_usr_file->waitq); + } else { + dbg_log + ("Found buffer 0x%08x, but ref_count is %d\n", + __be32_to_cpu(key), ref_count); + } + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + return ret; +} + +/** + * Handling function responsible for dispatching any SMS received + */ +static int axon_buffer_sms_handler(void *context, struct axon_sms_msg_t *p_msg) +{ + int ret = 0; + axon_usr_buffer_t *p_axon_usr = (axon_usr_buffer_t *) context; + dbg_log("begin\n"); + switch (p_msg->payload[AXON_BUFFER_SMS_REQ_OFFSET]) { + + case AXON_SYSTEM_BUFFER_UP: + ret = axon_buffer_handle_up_message(p_axon_usr, p_msg); + break; + case AXON_SYSTEM_BUFFER_DOWN: + ret = axon_buffer_handle_down_message(p_axon_usr); + break; + case AXON_SYSTEM_BUFFER_AVAILABLE: + ret = axon_buffer_handle_available_message(p_axon_usr, p_msg); + break; + case AXON_SYSTEM_BUFFER_UNAVAILABLE: + ret = axon_buffer_handle_unavailable_message(p_axon_usr, p_msg); + break; + case AXON_SYSTEM_BUFFER_IN_USE: + ret = axon_buffer_handle_in_use_message(p_axon_usr, p_msg); + break; + case AXON_SYSTEM_BUFFER_NOT_IN_USE: + ret = axon_buffer_handle_not_in_use_message(p_axon_usr, p_msg); + break; + default: + dbg_err + ("Unexpected message received by Buffer service %d \n", + p_msg->payload[AXON_BUFFER_SMS_REQ_OFFSET]); + ret = -EINVAL; + break; + } + + return ret; +} + +static int axon_buffer_map_user_memory(axon_t * p_axon, + unsigned long uaddr, + unsigned long len, int writable, + axon_buffer_map_info_t * + p_user_map_info, int dma_direction) +{ + int ret = 0; + // Computation of the Page address containing the start ... + unsigned long start = uaddr >> PAGE_SHIFT; + // ... and the end of the user memory + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + // Number of pages containing the user memory + const int nr_pages = end - start; + struct page **pp_pages; + struct scatterlist *p_sg; + int i; + + dbg_log("begin\n"); + dbg_log("trying to map vaddr=0x%016lx len=%lu \n", uaddr, len); + + pp_pages = + (struct page **)__get_free_pages(GFP_KERNEL, + get_order(nr_pages * + sizeof(struct page *))); + + if (pp_pages == NULL) { + dbg_err("failed to allocate the page table \n"); + return -ENOMEM; + } + + p_sg = + (struct scatterlist *)__get_free_pages(GFP_KERNEL, + get_order(nr_pages * + sizeof + (struct + scatterlist))); + + if (p_sg == NULL) { + dbg_err("failed to allocate the page table \n"); + ret = -ENOMEM; + goto free_pp_pages; + } + + /* + * Before accessing the memory unit, we nee to lock it + */ + down_read(¤t->mm->mmap_sem); + /* + * Let's go to map and lock the user memory + */ + p_user_map_info->nr_pages = + get_user_pages(current, current->mm, uaddr, + nr_pages, writable, 0, pp_pages, NULL); + up_read(¤t->mm->mmap_sem); + + if (p_user_map_info->nr_pages != nr_pages) { + dbg_err + ("get_user_pages has failed with ret=%d, %d was expected \n", + p_user_map_info->nr_pages, nr_pages); + ret = -ENOMEM; + goto release_user_pages; + } + + dbg_log("The vaddr=0x%016lx is mapped by %d pages \n", uaddr, nr_pages); + + p_user_map_info->len = len; + p_user_map_info->offset = (uaddr & ~PAGE_MASK); + p_user_map_info->virt_user_addr = uaddr; + p_user_map_info->pp_pages = pp_pages; + + for (i = 0; i < nr_pages; i++) { + + p_sg[i].page = pp_pages[i]; + + if (i == 0) { + + /* + * If first segment we need to add the offset + */ + p_sg[i].length = PAGE_SIZE - p_user_map_info->offset; + p_sg[i].offset = p_user_map_info->offset; + + /* + * If only one page, then we may have to adjust + * the size + */ + if (p_sg[i].length > p_user_map_info->len) + p_sg[i].length = p_user_map_info->len; + + } else if (i == (nr_pages - 1)) { + + /* + * if last segment, we have to adjust the size + */ + p_sg[i].length = + p_user_map_info->len - + ((nr_pages - 1) * PAGE_SIZE) + + p_user_map_info->offset; + p_sg[i].offset = 0; + } else { + + p_sg[i].length = PAGE_SIZE; + p_sg[i].offset = 0; + } + } + + p_user_map_info->nr_sg = + dma_map_sg(p_axon->get_device(p_axon), p_sg, nr_pages, + dma_direction); + + p_user_map_info->p_sg = p_sg; + p_user_map_info->dma_direction = dma_direction; + + return ret; + +release_user_pages: + if (p_user_map_info->nr_pages > 0) { + int i_page; + for (i_page = 0; i_page < p_user_map_info->nr_pages; i_page++) { + page_cache_release(pp_pages[i_page]); + } + } + + free_pages((unsigned long)p_sg, + get_order(nr_pages * sizeof(struct scatterlist))); + +free_pp_pages: + free_pages((unsigned long)pp_pages, + get_order(nr_pages * sizeof(struct page *))); + + return ret; +} + +static int axon_buffer_unmap_user_memory(axon_t * p_axon, + axon_buffer_map_info_t * + p_user_map_info) +{ + int i_page; + + dbg_log("begin\n"); + + dma_unmap_sg(p_axon->get_device(p_axon), p_user_map_info->p_sg, + p_user_map_info->nr_pages, p_user_map_info->dma_direction); + + for (i_page = 0; i_page < p_user_map_info->nr_pages; i_page++) { + + /* Mark the released page as dirty by default */ + if (!PageReserved(p_user_map_info->pp_pages[i_page])) + SetPageDirty(p_user_map_info->pp_pages[i_page]); + + page_cache_release(p_user_map_info->pp_pages[i_page]); + } + + free_pages((unsigned + long)(p_user_map_info-> + p_sg), get_order(p_user_map_info-> + nr_pages * + sizeof(struct scatterlist))); + + free_pages((unsigned + long)(p_user_map_info-> + pp_pages), + get_order(p_user_map_info-> + nr_pages * sizeof(struct page *))); + + p_user_map_info->pp_pages = NULL; + p_user_map_info->p_sg = NULL; + p_user_map_info->nr_pages = 0; + p_user_map_info->nr_sg = 0; + p_user_map_info->offset = 0; + p_user_map_info->virt_user_addr = 0; + p_user_map_info->len = 0; + + return 0; +} + +static int axon_buffer_get_user_pages(axon_file_usr_buffer_t * p_usr_file, + axon_buffer_ioctl_map_t * + p_ioctl_map, __u8 is_local, + axon_local_buffer_map_list_t ** ptr, + int direction) +{ + int ret = 0; + axon_local_buffer_map_list_t *p_new_map_list; + axon_buffer_segment_t *seg_ptr; + struct scatterlist *sg; + int i; + int size; + + dbg_log("begin\n"); + + if (!access_ok + (VERIFY_WRITE, (void *)p_ioctl_map->addr.vaddr, p_ioctl_map->len)) { + dbg_err("access_ok failed for registered memory\n"); + return -EFAULT; + } + + p_new_map_list = + kzalloc(sizeof(axon_local_buffer_map_list_t), GFP_KERNEL); + + if (p_new_map_list == 0) { + dbg_err("kmalloc failed\n"); + return -ENOMEM; + } + + ret = + axon_buffer_map_user_memory(p_usr_file->p_axon_usr->p_axon, + p_ioctl_map->addr.vaddr, + p_ioctl_map->len, 1, + &(p_new_map_list->user_map_info), + direction); + + if (ret) { + dbg_err("Unable to map user memory\n"); + kfree(p_new_map_list); + return ret; + } + + size = + sizeof(axon_buffer_desc_header_t) + + p_new_map_list->user_map_info.nr_sg * sizeof(axon_buffer_segment_t); + /* + * allocate the buffer desc for remote access. + */ + p_new_map_list->p_buffer_desc = + dma_alloc_coherent(p_usr_file->p_axon_usr->p_axon-> + get_device(p_usr_file->p_axon_usr->p_axon), + size, &p_new_map_list->phy_buffer_desc, + GFP_KERNEL); + + if (p_new_map_list->p_buffer_desc == NULL) { + dbg_err("Unable to get memory for shared buffer desc\n"); + axon_buffer_unmap_user_memory(p_usr_file->p_axon_usr-> + p_axon, + &(p_new_map_list->user_map_info)); + kfree(p_new_map_list); + p_new_map_list = NULL; + return -ENOMEM; + } + + memset(p_new_map_list->p_buffer_desc, 0, size); + + axon_buffer_get_uniq_key(p_usr_file->p_axon_usr, &p_new_map_list->key); + /* + * We return the value of the allocated key for the user + */ + p_ioctl_map->key = p_new_map_list->key; + dbg_log + ("Adding key=0x%08x in 0x%p into Ox%p \n", + __be32_to_cpu(p_ioctl_map->key), + &(p_new_map_list->list), + &(p_usr_file->p_axon_usr->local_map_list_head.list)); + dbg_log + ("Allocating key=0x%08x for virtual 0x%016" + AXON_VADDR_FMT_T "\n", + __be32_to_cpu(p_ioctl_map->key), p_ioctl_map->addr.vaddr); + p_new_map_list->p_usr_file = p_usr_file; + /* + * This is already in BE form + */ + p_new_map_list->p_buffer_desc->key = p_new_map_list->key; + /* + * This is not + */ + p_new_map_list->p_buffer_desc-> + nr_segments = __cpu_to_be32(p_new_map_list->user_map_info.nr_sg); + p_new_map_list->p_buffer_desc->len = + __cpu_to_be64(p_new_map_list->user_map_info.len); + strncpy(p_new_map_list->p_buffer_desc->name, + p_ioctl_map->name, AXON_BUFFER_NAME_LEN); + dbg_log + ("New desc: key = 0x%08x, nr_seg = %d, len = 0x%" + AXON_U64_FMT_T "\n", + __be32_to_cpu(p_new_map_list-> + p_buffer_desc->key), + __be32_to_cpu(p_new_map_list-> + p_buffer_desc->nr_segments), + __be64_to_cpu(p_new_map_list->p_buffer_desc->len)); + + seg_ptr = (axon_buffer_segment_t *) (p_new_map_list->p_buffer_desc + 1); + sg = p_new_map_list->user_map_info.p_sg; + + for (i = 0; i < p_new_map_list->user_map_info.nr_sg; + i++, seg_ptr++, sg++) { + + /* Everything should be BE */ + seg_ptr->phys_addr = + axon_addr_xltr_to_plb(p_usr_file->p_axon_usr->xltr, + sg_dma_address(sg)); + seg_ptr->len = __cpu_to_be64(sg_dma_len(sg)); + } + + atomic_set(&p_new_map_list->ref_count, 0); + atomic_set(&p_new_map_list->is_being_deleted, 0); + p_new_map_list->is_local_only = is_local; + + /* add it to the remote list */ + write_lock_bh(&(p_usr_file->p_axon_usr->local_list_lock)); + list_add_tail(&(p_new_map_list->list), + &(p_usr_file->p_axon_usr->local_map_list_head.list)); + write_unlock_bh(&(p_usr_file->p_axon_usr->local_list_lock)); + + /* + * We should now notify the remote about this buffer + * We do this only if this is a "global" buffer and the remote + * is up + */ + if ((p_new_map_list->is_local_only == 0) + && (atomic_read(&p_usr_file->p_axon_usr->remote_is_up) + != 0)) + axon_buffer_send_message_available + (p_usr_file->p_axon_usr, p_new_map_list); + if (ptr) + *ptr = p_new_map_list; + return ret; +} + +static int axon_buffer_release_user_pages(axon_file_usr_buffer_t * + p_usr_file, + axon_local_buffer_map_list_t * + p_cur_map_list) +{ + int ret = -EFAULT; // By default we suppose the key invalid + axon_usr_buffer_t *p_axon_usr = p_usr_file->p_axon_usr; + dbg_log("begin\n"); + if (p_cur_map_list->is_local_only == 1) { + /* + * if this is a local only buffer we don't need to notify the + * remote + */ + p_cur_map_list->is_released = 1; + } else if (atomic_read(&p_axon_usr->remote_is_up) + == 0) { + /* + * if the remote is down we don't need to notify + */ + p_cur_map_list->is_released = 1; + } else { + /* + * We need to send an UNAVAILABLE message to the other side + */ + ret = + axon_buffer_send_message_unavailable + (p_axon_usr, p_cur_map_list->key); + if (ret < 0) { + /* + * We failed to send the SMS but we are going to + * release the buffer anyway as there is no hope to + * ever get the remote message to free it + * TODO: If we failed to send the SMS what should we + * do ??? Maybe freeing the buffer is still the best + * guess as our app is going away and we should not + * keep these pages forever to avoid some memory + * leak. + */ + dbg_inf + ("We can't communicate with the remote so we free up buffer 0x%08x\n", + __be32_to_cpu(p_cur_map_list->key)); + p_cur_map_list->is_released = 1; + } else { + /* + * Then we block until the NOT_IN_USE message comes + * back. + * timeout is 5 sec + * TODO: we could weight the timeout based on the + * buffer size. + * It seems legitimate for a big buffer to have a + * bigger timeout. + */ + ret = wait_event_timeout(p_usr_file->waitq, + p_cur_map_list-> + is_released == 1, HZ * 5); + if (p_cur_map_list->is_released == 0) { + /* + * this is the timeout case. 5 sec is long. We + * assume something is wrong with the remote. + */ + dbg_inf + ("We got the timeout so we are going to free up buffer 0x%08x anyway\n", + __be32_to_cpu(p_cur_map_list->key)); + p_cur_map_list->is_released = 1; + } + } + } + + /* + * Sanity check, this should not happen + */ + if (p_cur_map_list->is_released == 0) { + /* + * We should never get there + */ + dbg_inf + ("The buffer 0x%08x is still in use by the remote\n", + __be32_to_cpu(p_cur_map_list->key)); + } else { + dbg_log("buffer 0x%08x is not in use\n", + __be32_to_cpu(p_cur_map_list->key)); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) + if ((atomic_xchg(&p_cur_map_list->is_being_deleted, 1) == 0)) +#else + if ((atomic_inc_return(&p_cur_map_list->is_being_deleted) + == 1)) +#endif + { + write_lock_bh(&(p_axon_usr->local_list_lock)); + /* + * Remove the entry from the list + */ + list_del(&(p_cur_map_list->list)); + write_unlock_bh(&(p_axon_usr->local_list_lock)); + /* + * now unlock the memory + */ + ret = + axon_buffer_unmap_user_memory(p_axon_usr-> + p_axon, + & + (p_cur_map_list-> + user_map_info)); + /* + * freeing the shared desc + */ + if (p_cur_map_list->p_buffer_desc) { + int size = + sizeof + (axon_buffer_desc_header_t) + + (__be32_to_cpu + (p_cur_map_list-> + p_buffer_desc->nr_segments) * + sizeof(axon_buffer_segment_t)); + dma_free_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr-> + p_axon), size, + p_cur_map_list-> + p_buffer_desc, + p_cur_map_list-> + phy_buffer_desc); + p_cur_map_list->p_buffer_desc = NULL; + } + + /* + * freeing the buffer main node + */ + kfree(p_cur_map_list); + p_cur_map_list = NULL; + } else { + dbg_inf + ("buffer 0x%08x is being deleted by another thread!\n", + __be32_to_cpu(p_cur_map_list->key)); + } + } + + return ret; +} + +static int axon_buffer_release_user_pages_by_key(axon_file_usr_buffer_t * + p_usr_file, axon_buffer_key_t key) +{ + int ret = 0; + struct list_head *pos; + axon_local_buffer_map_list_t *p_cur_map_list = NULL; + axon_usr_buffer_t *p_axon_usr = p_usr_file->p_axon_usr; + + dbg_log("begin\n"); + + read_lock_bh(&(p_axon_usr->local_list_lock)); + + /* + * We go through the buffer list associated to this file descriptor + */ + list_for_each(pos, &(p_axon_usr->local_map_list_head.list)) { + + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + /* + * Is it the requested buffer + */ + if (p_cur_map_list->key == key) { + break; + } + + p_cur_map_list = NULL; + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + if (p_cur_map_list) + ret = + axon_buffer_release_user_pages(p_usr_file, p_cur_map_list); + else { + dbg_err("Can't find buffer 0x%08x\n", __be32_to_cpu(key)); + ret = -EFAULT; + } + + return ret; +} + +static int axon_buffer_associate_remote_file(axon_file_usr_buffer_t * + p_usr_file, + axon_buffer_ioctl_map_t * + p_ioctl_map, unsigned int cmd) +{ + // By default we suppose the key invalid + int ret = -EFAULT; + struct list_head *pos; + axon_usr_buffer_t *p_axon_usr = p_usr_file->p_axon_usr; + axon_remote_buffer_map_list_t *p_cur_map_list; + + dbg_log("begin\n"); + if (p_usr_file->p_remote_buffer) { + dbg_err("The file is already associated to a remote file\n"); + return -EFAULT; + } + + if ((cmd != MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME) + && (cmd != MC_AXON_REMOTE_BUFFER_ACCESS_BY_KEY) + && (cmd != MC_AXON_REMOTE_BUFFER_ACCESS_RAW)) { + dbg_err("Unknown remote buffer access cmd: 0x%08X\n", cmd); + return -EFAULT; + } + + if (cmd == MC_AXON_REMOTE_BUFFER_ACCESS_RAW) { + + int size; + axon_buffer_segment_t *ptr; + p_cur_map_list = + kzalloc(sizeof(axon_remote_buffer_map_list_t), GFP_KERNEL); + + if (p_cur_map_list == NULL) { + dbg_err + ("Failed to allocate buffer for remote buffer\n"); + return -ENOMEM; + } + + /* + * no legal remote key + */ + p_cur_map_list->key = 0; + /* + * no legal remote address + */ + p_cur_map_list->plb_addr = 0xffffffffffffffff; + p_cur_map_list->is_locally_created = 1; + dbg_log("Buffer 0x%08x at 0x%016" + AXON_PLB_ADDR_FMT_T "\n", + __be32_to_cpu(p_cur_map_list->key), + __be64_to_cpu(p_cur_map_list->plb_addr)); + size = + sizeof(axon_buffer_desc_header_t) + + sizeof(axon_buffer_segment_t); + p_cur_map_list->p_buffer_desc = + dma_alloc_coherent(p_axon_usr->p_axon-> + get_device(p_axon_usr-> + p_axon), size, + &p_cur_map_list-> + phy_buffer_desc, GFP_KERNEL); + + if (p_cur_map_list->p_buffer_desc == NULL) { + dbg_err + ("Failed to allocate buffer desc for remote buffer\n"); + kfree(p_cur_map_list); + return -ENOMEM; + } + + ptr = + (axon_buffer_segment_t *) (p_cur_map_list-> + p_buffer_desc + 1); + memset(p_cur_map_list->p_buffer_desc, 0, size); + p_cur_map_list->p_buffer_desc->key = p_cur_map_list->key; + p_cur_map_list->p_buffer_desc->nr_segments = __cpu_to_be32(1); + p_cur_map_list->p_buffer_desc-> + len = __cpu_to_be64(p_ioctl_map->len); + ptr->phys_addr = __cpu_to_be64(p_ioctl_map->addr.plb_addr); + ptr->len = __cpu_to_be64(p_ioctl_map->len); + /* + * We set the handle_count to 1 as it will be + * associated to this file descriptor + * immediately + */ + atomic_set(&p_cur_map_list->handle_count, 1); + /* + * It is not yet associated to any local file + * descriptor. + */ + atomic_set(&p_cur_map_list->use_count, 0); + /* + * Remember the board providing services for + * this remote buffer + */ + p_cur_map_list->p_axon_usr = p_axon_usr; + p_ioctl_map->key = p_cur_map_list->key; + strncpy(p_cur_map_list-> + p_buffer_desc->name, + p_ioctl_map->name, AXON_BUFFER_NAME_LEN); + /* + * OK we got the good descriptor. + */ + p_usr_file->p_remote_buffer = p_cur_map_list; + return 0; + } else { + + read_lock_bh(&(p_axon_usr->remote_list_lock)); + /* + * We go through the buffer list associated to this file + * descriptor + */ + list_for_each(pos, &(p_axon_usr->remote_map_list_head.list)) { + p_cur_map_list = + list_entry(pos, + axon_remote_buffer_map_list_t, list); + + /* + * Is it the requested buffer + */ + + /* + * If the user specified a name, search the list by + * name. otherwise, check the ID + */ + if ((cmd == MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME) + && + (strncmp + (p_ioctl_map->name, + p_cur_map_list->p_buffer_desc->name, + AXON_BUFFER_NAME_LEN) == 0)) + break; + else if (p_cur_map_list->key == p_ioctl_map->key) + break; + + p_cur_map_list = NULL; + } + + /* if we found it, prevent deletion รน */ + if (p_cur_map_list) { + /* + * increment the handle_count + */ + atomic_inc(&p_cur_map_list->handle_count); + read_unlock_bh(&(p_axon_usr->remote_list_lock)); + } else { + char tmp[AXON_BUFFER_NAME_LEN + 1]; + + read_unlock_bh(&(p_axon_usr->remote_list_lock)); + + strncpy(tmp, p_ioctl_map->name, AXON_BUFFER_NAME_LEN); + tmp[AXON_BUFFER_NAME_LEN] = 0; + + if (cmd == MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME) { + dbg_err + ("Buffer name not found in list: %s\n", + tmp); + } else if (cmd == MC_AXON_REMOTE_BUFFER_ACCESS_BY_KEY) { + dbg_err + ("Buffer key not found in list: 0x%08X\n", + __be32_to_cpu(p_ioctl_map->key)); + } + return -EINVAL; + } + + /* We need to check if the buffer desc is complete yet */ + if (!p_cur_map_list->p_buffer_desc->len) { + dbg_log + ("the desc for buffer 0x%08x is not complete yet, we wait\n", + __be32_to_cpu(p_cur_map_list->key)); + + if (wait_event_interruptible + (p_axon_usr->waitq, + p_cur_map_list->p_buffer_desc->len != 0)) { + dbg_err + ("interrupted while waiting for desc of buffer 0x%08x\n", + __be32_to_cpu(p_cur_map_list->key)); + ret = -ERESTARTSYS; + goto out; + } + } + + /* + * Sanity check. Make sure the descriptor and the map + * list entry agree about the buffer ID. Do not + * compare the key in the ioctl_map tructure. + * If registering by name, that field is not set. + */ + if (p_cur_map_list->p_buffer_desc->key != p_cur_map_list->key) { + dbg_err + ("The descriptor does not contain the correct ID 0x%08x != 0x%08x\n", + __be32_to_cpu(p_cur_map_list-> + p_buffer_desc-> + key), + __be32_to_cpu(p_cur_map_list->key)); + ret = -EFAULT; + goto out; + } + + /* + * if it is marked for deletion return an error + */ + if (p_cur_map_list->to_be_deleted == 1) { + dbg_err + ("Buffer is marked for deletion: (0x%08X)\n", + __be32_to_cpu(p_cur_map_list->key)); + ret = -EFAULT; + goto out; + } + + p_ioctl_map->len = + be64_to_cpu(p_cur_map_list->p_buffer_desc->len); + p_ioctl_map->key = p_cur_map_list->key; + strncpy(p_ioctl_map->name, + p_cur_map_list->p_buffer_desc-> + name, AXON_BUFFER_NAME_LEN); + /* + * OK we got the good descriptor. + */ + p_usr_file->p_remote_buffer = p_cur_map_list; + + return 0; + } + +out: + + write_lock_bh(&(p_axon_usr->remote_list_lock)); + + /* + * The work queue is done for this buffer so we need to dec + * the handle_count + */ + if ((atomic_dec_return(&p_cur_map_list->handle_count) == + 0) && (p_cur_map_list->to_be_deleted == 1)) { + /* + * If the buffer was deleted by the remote before we finished + * mapping it, we need to free it up + */ + if (axon_buffer_free_remote_buffer(p_cur_map_list, p_axon_usr)) { + dbg_log("remote buffer was released\n"); + } else { + dbg_log("remote buffer was not released yet\n"); + } + } + + write_unlock_bh(&(p_axon_usr->remote_list_lock)); + + return ret; +} + +/** + * + */ +static int axon_buffer_compute_buffer_location(axon_buffer_desc_header_t * + p_buffer_desc, + loff_t offset, + __u64 * p_segment_offset) +{ + int i, nr_segment = __be32_to_cpu(p_buffer_desc->nr_segments); + __u64 segment_len = 0, total_len = 0; + axon_buffer_segment_t *p_segment = + (axon_buffer_segment_t *) (p_buffer_desc + 1); + dbg_log("begin\n"); + for (i = 0; i < nr_segment; i++, p_segment++, total_len += segment_len) { + segment_len = __be64_to_cpu(p_segment->len); + if ((total_len + segment_len) > offset) { + *p_segment_offset = offset - total_len; + return i; + } + } + + return -1; +} + +static void axon_buffer_dma_completion_handler(struct axon_dmax_t + *p_axon_dmax, struct axon_dma_req_t + *p_dma_req, void *context) +{ + axon_file_usr_buffer_t *p_usr_file = (axon_file_usr_buffer_t *) context; + + dbg_log("begin\n"); + + if (p_usr_file == NULL) { + dbg_err("p_usr_file is NULL \n"); + return; + } + + if (atomic_dec_return(&p_usr_file->req_count) == 0) { + wake_up(&p_usr_file->waitq); + } + /* + * decrement the req count + */ + up(&p_usr_file->sem_dma_reqs); +} + +static int axon_buffer_do_transfer(axon_file_usr_buffer_t * p_usr_file, + axon_buffer_desc_header_t * + local_buffer, loff_t local_offset, + axon_buffer_desc_header_t * + remote_buffer, loff_t remote_offset, + size_t len, int direction) +{ + int ret = len; + size_t count = 0; + __u64 local_segment_offset, remote_segment_offset, transfer_size; + axon_buffer_segment_t *local_segment, *remote_segment; + axon_usr_buffer_t *p_axon_usr = p_usr_file->p_axon_usr; + struct axon_dma_req_t *p_dma_req; + int local_segment_id, remote_segment_id; + int local_segment_nr, remote_segment_nr; + + dbg_log("begin\n"); + + if (!local_buffer || !remote_buffer) { + dbg_err("Can't fetch one of the 2 descriptors\n"); + return -ENOSYS; + } + + local_segment_id = + axon_buffer_compute_buffer_location + (local_buffer, local_offset, &local_segment_offset); + remote_segment_id = + axon_buffer_compute_buffer_location + (remote_buffer, remote_offset, &remote_segment_offset); + + if ((local_segment_id == -1) + || (remote_segment_id == -1)) { + dbg_err("Bad value\n"); + return -EINVAL; + } + + local_segment = (axon_buffer_segment_t *) (local_buffer + 1); + remote_segment = (axon_buffer_segment_t *) (remote_buffer + 1); + local_segment_nr = __be32_to_cpu(local_buffer->nr_segments); + remote_segment_nr = __be32_to_cpu(remote_buffer->nr_segments); + + while (count < len) { + struct axon_dma_req_xfer_t dma_req_xfer = + AXON_DMA_REQ_XFER_INIT; + int i = 0; + size_t count2 = count; + p_dma_req = axon_dma_request_create(p_axon_usr->p_dma, 128); + + if (p_dma_req == NULL) { + dbg_err("Unable to create a DMA request\n"); + ret = -ENOMEM; + goto out; + } + + dbg_log("DMA descriptor\n"); + + while ((i < 120) && (count < len)) { + i++; + + transfer_size = + min((__be64_to_cpu + (local_segment[local_segment_id]. + len) - local_segment_offset), + (__be64_to_cpu + (remote_segment + [remote_segment_id].len) - + remote_segment_offset)); + transfer_size = min(transfer_size, len - count); + dma_req_xfer.size = transfer_size; + dma_req_xfer.intr = DMA_NO_INTR; + + if (direction == DMA_FROM_DEVICE) { + dma_req_xfer.src = + __cpu_to_be64(__be64_to_cpu + (remote_segment + [remote_segment_id]. + phys_addr) + + remote_segment_offset); + dma_req_xfer.dst = + __cpu_to_be64(__be64_to_cpu + (local_segment + [local_segment_id]. + phys_addr) + + local_segment_offset); + } else { + dma_req_xfer.dst = + __cpu_to_be64(__be64_to_cpu + (remote_segment + [remote_segment_id]. + phys_addr) + + remote_segment_offset); + dma_req_xfer.src = + __cpu_to_be64(__be64_to_cpu + (local_segment + [local_segment_id]. + phys_addr) + + local_segment_offset); + } + + dbg_log("Command packet %d\n", i); + dbg_log("PLB src address = 0x%016" + AXON_PLB_ADDR_FMT_T "\n", + __be64_to_cpu(dma_req_xfer.src)); + dbg_log("PLB dst address = 0x%016" + AXON_PLB_ADDR_FMT_T "\n", + __be64_to_cpu(dma_req_xfer.dst)); + dbg_log("size = 0x%016" + AXON_PLB_ADDR_FMT_T "\n", dma_req_xfer.size); + + ret = + axon_dma_request_push_xfer(p_dma_req, + &dma_req_xfer); + + if (ret != 0) { + dbg_err("Unable to build the DMA request \n"); + goto free_dma_req; + } + + count += transfer_size; + remote_segment_offset += transfer_size; + local_segment_offset += transfer_size; + + if (remote_segment_offset >= + __be64_to_cpu(remote_segment + [remote_segment_id].len)) { + remote_segment_id++; + remote_segment_offset = 0; + if (remote_segment_id == remote_segment_nr) + break; + } else { + dbg_log + ("%llu bytes left in remote_segment %d\n", + __be64_to_cpu(remote_segment + [remote_segment_id]. + len) - + remote_segment_offset, remote_segment_id); + } + + if (local_segment_offset >= + __be64_to_cpu(local_segment + [local_segment_id].len)) { + local_segment_id++; + local_segment_offset = 0; + if (local_segment_id == local_segment_nr) + break; + } else { + dbg_log + ("%llu bytes left in local_segment %d\n", + __be64_to_cpu(local_segment + [local_segment_id]. + len) - + local_segment_offset, local_segment_id); + } + + } + + dbg_log + ("dma_req is made of %d command packet for a total of %d bytes\n", + i, (int)(count - count2)); + + /* + * at most 2 req pending + */ + if (down_interruptible(&p_usr_file->sem_dma_reqs)) { + dbg_err + ("we have been interrupted on semaphore: len = %d, count = %d, prending req = %d \n", + (int)len, (int)count, + atomic_read(&p_usr_file->req_count)); + ret = -ERESTARTSYS; + goto free_dma_req; + } + + atomic_inc(&p_usr_file->req_count); + + ret = axon_dma_request_queue(p_dma_req, + axon_buffer_dma_completion_handler, + p_usr_file); + + if (ret < 0) { + dbg_err("Unable to queue DMA request\n"); + up(&p_usr_file->sem_dma_reqs); + atomic_dec(&p_usr_file->req_count); + goto free_dma_req; + } + + dbg_log("dma_req is posted\n"); + } + + if (wait_event_interruptible + (p_usr_file->waitq, atomic_read(&p_usr_file->req_count) == 0)) { + dbg_err + ("we have been interrupted on wait queue: len = %d, count = %d, prending req = %d \n", + (int)len, (int)count, atomic_read(&p_usr_file->req_count)); + ret = -ERESTARTSYS; + goto out; + } + + dbg_log(" the all transfer is done\n"); + + return count; + +free_dma_req: + axon_dma_request_destroy(p_dma_req); + +out: + return ret; +} + +/** + * genric read/write entry. + */ +static ssize_t axon_buffer_readwrite(struct file *file, char __user * buff, + size_t len, loff_t * offset, int direction) +{ + int ret = 0; + axon_file_usr_buffer_t *p_usr_file = + (axon_file_usr_buffer_t *) (file->private_data); + axon_buffer_ioctl_map_t ioctl_map; + axon_local_buffer_map_list_t *ptr; + dbg_log("begin\n"); + + /* + * If we are not associated to a remote buffer there is nothing to do + */ + if (p_usr_file->p_remote_buffer == NULL) { + dbg_err("no remote buffer associated\n"); + return -ENOSYS; + } + + /* + * if we can't increment the use count, it means this file + * cannot be used anymore (buffer deleted by the remote?) + */ + if (axon_buffer_use_remote_buffer(p_usr_file) == -1) { + dbg_err("can't set the remote buff to busy\n"); + return -ENOSYS; + } + + if (*offset >= + __be64_to_cpu(p_usr_file->p_remote_buffer->p_buffer_desc->len)) { + dbg_inf("offset is out of limits\n"); + /* + * We have reach the end of buffer + */ + goto release_remote_buff; + } + + /* + * should we reduce the lenght + */ + if ((*offset + len) > + __be64_to_cpu(p_usr_file->p_remote_buffer->p_buffer_desc->len)) { + len = + __be64_to_cpu(p_usr_file->p_remote_buffer-> + p_buffer_desc->len) - *offset; + } + + /* + * OK, lock the user buffer in memory + */ + ioctl_map.addr.vaddr = (axon_addr_ptr_t) buff; + ioctl_map.len = (axon_size_t) len; + /* + * This is anonmous, so no name needed + */ + memset(ioctl_map.name, 0, AXON_BUFFER_NAME_LEN); + /* + * We register the buffer locally but we don't advertise it to the + * remote + */ + if ((ret = + axon_buffer_get_user_pages(p_usr_file, + &ioctl_map, 1, &ptr, direction)) != 0) { + dbg_err("Failed to lock the local buffer in memory\n"); + goto release_remote_buff; + } + + /* + * OK now we have both the local and the remote buffer descriptors + * We can start the transfer. + */ + if ((ret = + axon_buffer_do_transfer(p_usr_file, + ptr-> + p_buffer_desc, + 0, + p_usr_file-> + p_remote_buffer-> + p_buffer_desc, + *offset, len, direction)) < 0) { + dbg_err("Failed to do the transfer\n"); + goto release_local_buff; + } + + *offset += ret; + +release_local_buff: + if (axon_buffer_release_user_pages(p_usr_file, ptr)) + dbg_err("Failed to release local user pages\n"); + ptr = NULL; + +release_remote_buff: + if (axon_buffer_release_remote_buffer(p_usr_file)) + dbg_err("Failed to release remote buff\n"); + + return ret; +} + +/** + * read entry. + */ +static ssize_t axon_buffer_read(struct file *file, char __user * buff, + size_t len, loff_t * offset) +{ + dbg_log("begin\n"); + return axon_buffer_readwrite(file, buff, len, offset, DMA_FROM_DEVICE); +} + +/** + * write entry. + */ +static ssize_t axon_buffer_write(struct file *file, + const char __user * buff, size_t len, + loff_t * offset) +{ + dbg_log("begin\n"); + return axon_buffer_readwrite(file, (char __user *) + buff, len, offset, DMA_TO_DEVICE); +} + +static int axon_buffer_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + int ret = 0; + axon_file_usr_buffer_t *p_usr_file = + (axon_file_usr_buffer_t *) (file->private_data); + axon_buffer_ioctl_map_t ioctl_map; + dbg_log("begin\n"); + /* + * extract the type and number bitfields, and don't decode + * wrong cmds: return ENOTTY (inappropriate ioctl) + */ + if ((_IOC_TYPE(cmd) != MC_AXON_BUFFER_MAGIC) + || (_IOC_NR(cmd) > MC_AXON_BUFFER_MAXNR)) { + dbg_log("Ioctl wrong command\n"); + return -ENOTTY; + } + + switch (cmd) { + + case MC_AXON_LOCAL_BUFFER_REGISTER: + dbg_log("Ioctl MC_AXON_LOCAL_BUFFER_REGISTER:\n"); + if (!access_ok + (VERIFY_WRITE, (void *)arg, + sizeof(axon_buffer_ioctl_map_t))) { + dbg_err + ("axon_buffer_ioctl_map_t struct is not writable\n"); + ret = -EFAULT; + break; + } + + ret = copy_from_user(&ioctl_map, (axon_buffer_ioctl_map_t *) + arg, sizeof(axon_buffer_ioctl_map_t)); + if (ret != 0) + break; + ret = + axon_buffer_get_user_pages(p_usr_file, + &ioctl_map, 0, NULL, + DMA_BIDIRECTIONAL); + if (ret != 0) + break; + ret = + copy_to_user((axon_buffer_ioctl_map_t *) arg, + &ioctl_map, sizeof(axon_buffer_ioctl_map_t)); + break; + case MC_AXON_LOCAL_BUFFER_UNREGISTER: + dbg_log("Ioctl MC_AXON_LOCAL_BUFFER_UNREGISTER:\n"); + if (!access_ok + (VERIFY_READ, (void *)arg, + sizeof(axon_buffer_ioctl_map_t))) { + dbg_err + ("axon_buffer_ioctl_map_t struct is not readable\n"); + ret = -EFAULT; + break; + } + + ret = copy_from_user(&ioctl_map, (axon_buffer_ioctl_map_t *) + arg, sizeof(axon_buffer_ioctl_map_t)); + if (ret != 0) + break; + ret = + axon_buffer_release_user_pages_by_key(p_usr_file, + ioctl_map.key); + break; + case MC_AXON_REMOTE_BUFFER_ACCESS_BY_KEY: + case MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME: + case MC_AXON_REMOTE_BUFFER_ACCESS_RAW: + dbg_log("Ioctl case MC_AXON_REMOTE_BUFFER_ACCESS:\n"); + if (!access_ok + (VERIFY_WRITE, (void *)arg, + sizeof(axon_buffer_ioctl_map_t))) { + dbg_err + ("axon_buffer_ioctl_map_t struct is not writable\n"); + ret = -EFAULT; + break; + } + + ret = copy_from_user(&ioctl_map, (axon_buffer_ioctl_map_t *) + arg, sizeof(axon_buffer_ioctl_map_t)); + if (ret != 0) + break; + ret = + axon_buffer_associate_remote_file + (p_usr_file, &ioctl_map, cmd); + if (ret == 0) { + ret = copy_to_user((axon_buffer_ioctl_map_t *) + arg, &ioctl_map, + sizeof(axon_buffer_ioctl_map_t)); + } + break; + default: + dbg_log("Ioctl unknown ioctl command \n"); + ret = -ENOTTY; + }; + return ret; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) +#ifdef CONFIG_COMPAT + +/** + * this is a 32 bits version of the IOCTL structure + */ +typedef struct { + + /** + * We can also have a literal name + */ + char name[AXON_BUFFER_NAME_LEN]; + /** + * Unique key returned by the driver representing the + * established mapping + */ + axon_buffer_key_t key; + /** + * Size of the space to be locked + */ + u32 len; + union { + /** + * User virtual address to be locked and mapped into the kernel + * For MC_AXON_LOCAL_BUFFER_REGISTER + */ + u32 vaddr; + /** + * This is for the MC_AXON_REMOTE_BUFFER_ACCESS_RAW + * It allows to access the remote side at a raw physical address + * even if there is no driver to talk to. + */ + plb_addr_t plb_addr; + } addr; +} axon_buffer_ioctl_map_32_t; + +static long axon_buffer_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + axon_buffer_ioctl_map_32_t __user *p_ioctl32_u = compat_ptr(arg); + axon_buffer_ioctl_map_32_t ioctl32_k; + axon_buffer_ioctl_map_t __user *p_ioctl64; + long rval = -ENOIOCTLCMD; + axon_size_t len; + axon_buffer_key_t key; + + if (!access_ok + (VERIFY_WRITE, p_ioctl32_u, sizeof(axon_buffer_ioctl_map_32_t))) { + dbg_err("axon_buffer_ioctl_map_32_t struct is not writable\n"); + return -EFAULT; + } + + p_ioctl64 = compat_alloc_user_space(sizeof(axon_buffer_ioctl_map_t)); + if (!access_ok + (VERIFY_WRITE, p_ioctl64, sizeof(axon_buffer_ioctl_map_t))) { + dbg_err + ("internal axon_buffer_ioctl_map_t struct is not writable\n"); + return -EFAULT; + } + if (copy_from_user + (&ioctl32_k, p_ioctl32_u, sizeof(axon_buffer_ioctl_map_32_t))) + return -EFAULT; + if (copy_to_user(p_ioctl64->name, ioctl32_k.name, AXON_BUFFER_NAME_LEN)) + return -EFAULT; + if (__put_user(ioctl32_k.key, &p_ioctl64->key) + || __put_user((axon_size_t) + (ioctl32_k.len), &p_ioctl64->len) + || __put_user(ioctl32_k.addr.plb_addr, &p_ioctl64->addr.plb_addr)) + return -EFAULT; + switch (cmd) { + case MC_AXON_LOCAL_BUFFER_REGISTER: + + if (__put_user + (compat_ptr(ioctl32_k.addr.vaddr), &p_ioctl64->addr.vaddr)) + return -EFAULT; + /* fall thru */ + case MC_AXON_LOCAL_BUFFER_UNREGISTER: + case MC_AXON_REMOTE_BUFFER_ACCESS_BY_KEY: + case MC_AXON_REMOTE_BUFFER_ACCESS_BY_NAME: + case MC_AXON_REMOTE_BUFFER_ACCESS_RAW: + + lock_kernel(); + rval = + axon_buffer_ioctl(file->f_dentry->d_inode, + file, cmd, (unsigned long) + p_ioctl64); + unlock_kernel(); + if (__get_user(len, &p_ioctl64->len) + || __put_user((u32) len, &p_ioctl32_u->len) + || __get_user(key, &p_ioctl64->key) + || __put_user(key, &p_ioctl32_u->key)) + return -EFAULT; + break; + default: + break; + } + + return rval; +} + +#endif +#endif + +/** + * close the file descriptor and release all buffer associated to it. + */ +static int axon_buffer_release(struct inode *inode, struct file *file) +{ + int ret = 0; + struct list_head *pos, *q; + axon_file_usr_buffer_t *p_usr_file = + (axon_file_usr_buffer_t *) (file->private_data); + dbg_log("begin\n"); + if (p_usr_file == NULL) { + dbg_err + ("No private struct associated to the file descriptor \n"); + module_put(THIS_MODULE); + return 0; + } + + /* + * Should we release a remote buffer + */ + if (p_usr_file->p_remote_buffer) { + ret = + axon_buffer_dissociate_remote_buffer_from_file(p_usr_file); + if (ret) { + dbg_err("failed to release the remote buffer\n"); + /* we continue anyway as there are local buffers to release */ + } + + p_usr_file->p_remote_buffer = NULL; + } + + /* + * Now we should release all local buffers registered through + * this file descriptor + */ + + write_lock_bh(&(p_usr_file->p_axon_usr->local_list_lock)); + /* + * We go through the buffer list + */ + list_for_each_safe(pos, q, + &(p_usr_file-> + p_axon_usr->local_map_list_head.list)) { + axon_local_buffer_map_list_t *p_cur_map_list; + p_cur_map_list = + list_entry(pos, axon_local_buffer_map_list_t, list); + /* + * Is the buffer associated to this file + */ + if (p_cur_map_list->p_usr_file == p_usr_file) { + + dbg_log("Freeing buffer 0x%08x\n", + __be32_to_cpu(p_cur_map_list->key)); + write_unlock_bh(& + (p_usr_file->p_axon_usr-> + local_list_lock)); + if (axon_buffer_release_user_pages + (p_usr_file, p_cur_map_list)) { + dbg_err("Failed to release local user pages\n"); + } + + write_lock_bh(& + (p_usr_file->p_axon_usr-> + local_list_lock)); + } + } + + write_unlock_bh(&(p_usr_file->p_axon_usr->local_list_lock)); + /* + * Now free the private data + */ + kfree(p_usr_file); + file->private_data = NULL; + module_put(THIS_MODULE); + return ret; +} + +/** + * open entry + */ +static int axon_buffer_open(struct inode *inode, struct file *file) +{ + int ret = 0; + axon_file_usr_buffer_t *p_usr_file = NULL; + dbg_log("begin\n"); + file->private_data = NULL; + try_module_get(THIS_MODULE); + /* + * get a private data area + */ + p_usr_file = kzalloc(sizeof(axon_file_usr_buffer_t), GFP_KERNEL); + if (p_usr_file == NULL) { + dbg_err("Failed to allocate axon_file_usr_buffer_t\n"); + module_put(THIS_MODULE); + return -ENOMEM; + } + + /* + * We retrieve the handle to the parent driver of the instance + */ + p_usr_file->p_axon_usr = + container_of(inode->i_cdev, axon_usr_buffer_t, cdev); + file->private_data = p_usr_file; + init_waitqueue_head(&p_usr_file->waitq); + sema_init(&p_usr_file->sem_dma_reqs, 2); + atomic_set(&p_usr_file->req_count, 0); + return ret; +} + +static struct file_operations axon_buffer_fops = { + .read = axon_buffer_read,.write = + axon_buffer_write,.open = + axon_buffer_open,.release = + axon_buffer_release,.ioctl = axon_buffer_ioctl, +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) +#ifdef CONFIG_COMPAT + .compat_ioctl = axon_buffer_compat_ioctl, +#endif +#endif +}; +static dev_t buf_dev; + +/** + * module clean up + */ +static int axon_buffer_remove(axon_t * p_axon) +{ + axon_usr_buffer_t *p_axon_usr; + struct list_head *p_cursor; + struct list_head *p_next; + dbg_log("begin\n"); + list_for_each_safe(p_cursor, p_next, &axon_buffer_list) { + p_axon_usr = list_entry(p_cursor, axon_usr_buffer_t, list); + if (p_axon_usr->p_axon == p_axon) { + /* + * send the DOWN message to the remote. + * From there no more traffic is expected on the + * AXON_SMS_CHANNEL_USR_BUFFER channel. + */ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) + if (atomic_xchg(&p_axon_usr->remote_is_up, 0) == 1) +#else + if (atomic_dec_return(&p_axon_usr->remote_is_up) == 0) +#endif + { + axon_local_buffer_map_list_t *p_cur_map_list; + struct list_head *pos; + + /* + * TODO: we should release/free up all buffers + * still present if any. + */ + + /* + * TODO: We should free up all local buffers + * associated to this axon + * We also need to make sure that opened + * file handle will not be valid anymore. + */ + + read_lock_bh(&(p_axon_usr->local_list_lock)); + + /* + * We go through the buffer list associated to this file descriptor + */ + list_for_each(pos, + &(p_axon_usr->local_map_list_head. + list)) { + p_cur_map_list = + list_entry(pos, + axon_local_buffer_map_list_t, + list); + } + + read_unlock_bh(&(p_axon_usr->local_list_lock)); + + /* + * We need to free up all "remote" buffers. + * So let's pretend we received a "down" + * message. This should free up all remote + * buffers. + */ + axon_buffer_handle_down_message(p_axon_usr); + + /* + * send the "down" message to the remote. + */ + axon_buffer_send_state_down(p_axon_usr); + } + + /* + * So we can unregister the channel + */ + axon_sms_unsubscribe(p_axon_usr->sms, + AXON_SMS_CHANNEL_USR_BUFFER, + axon_buffer_sms_handler); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + class_device_destroy(axon_get_class(), + p_axon_usr->cdev_num); +#else + class_simple_device_remove(p_axon_usr->cdev_num); +#endif + /* + * remove the device + */ + cdev_del(&p_axon_usr->cdev); + + list_del(p_cursor); + /* + * free the board structure + */ + kfree(p_axon_usr); + + break; + } + } + + return 0; +} + +/** + * module init + */ +static int axon_buffer_probe(axon_t * p_axon) +{ + int ret = 0; + axon_usr_buffer_t *p_axon_usr; + dbg_log("begin\n"); + /* + * allocate a board structure + */ + p_axon_usr = kzalloc(sizeof(axon_usr_buffer_t), GFP_KERNEL); + if (p_axon_usr == NULL) { + dbg_err("failed to allocate memory\n"); + return -ENOMEM; + } + + /* + * All the CAB board have the same Major number, they are + * accessible from user space through their Minor number + */ + p_axon_usr->cdev_num = MKDEV(MAJOR(buf_dev), p_axon->id); + cdev_init(&p_axon_usr->cdev, &axon_buffer_fops); + ret = cdev_add(&p_axon_usr->cdev, p_axon_usr->cdev_num, 1); + if (ret < 0) { + dbg_err + ("Unable to add user space driver for board 0x%p, on minor %d\n", + p_axon_usr->p_axon, p_axon->id); + return ret; + } +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + class_device_create(axon_get_class(), NULL, + p_axon_usr->cdev_num, NULL, "buffer%d", p_axon->id); +#else + class_simple_device_add(axon_get_class(), + p_axon_usr-> + cdev_num, NULL, "buffer%d", p_axon->id); +#endif + /* + * this is our board ID + */ + p_axon_usr->id = p_axon->id; + /* + * this is our board struct + */ + p_axon_usr->p_axon = p_axon; + /* + * a reference to our local SMS service + */ + p_axon_usr->sms = p_axon->sms_get(p_axon); + /* + * a reference to the remote mbox object + */ + p_axon_usr->peer_mbox = axon_peer_mbox_get(p_axon); + /* + * the PIO service object + */ + p_axon_usr->p_pio = p_axon->pio_get(p_axon); + /* + * a reference to the address translation service + */ + p_axon_usr->xltr = p_axon->addr_xltr_get(p_axon); + /* + * a reference to the dma service + */ + p_axon_usr->p_dma = p_axon->dmax_get(p_axon); + atomic_set(&p_axon_usr->remote_is_up, 0); + INIT_LIST_HEAD(&(p_axon_usr->local_map_list_head.list)); + rwlock_init(&(p_axon_usr->local_list_lock)); + INIT_LIST_HEAD(&(p_axon_usr->remote_map_list_head.list)); + rwlock_init(&(p_axon_usr->remote_list_lock)); + init_waitqueue_head(&p_axon_usr->waitq); + /* + * register for the BUFFER mailbox channel + */ + ret = axon_sms_subscribe(p_axon_usr->sms, + AXON_SMS_CHANNEL_USR_BUFFER, + axon_buffer_sms_handler, p_axon_usr); + if (ret < 0) { + dbg_err + ("Unable to subscribe to channel %d \n", + AXON_SMS_CHANNEL_USR_BUFFER); + return ret; + } + + /* + * Send a UP message to the remote. + * This will trigger the remote to send us all the already + * registered buffers + */ + axon_buffer_send_state_up(p_axon_usr, 0); + list_add_tail(&p_axon_usr->list, &axon_buffer_list); + return ret; +} + +static axon_driver_t axon_buffer_driver = { + .name = "buffer", + .probe = axon_buffer_probe, + .remove = axon_buffer_remove, +}; + +/** + * module init + */ +static __init int axon_buffer_module_init(void) +{ + dbg_log("begin\n"); + INIT_LIST_HEAD(&axon_buffer_list); + alloc_chrdev_region(&buf_dev, 0, 256, "buffer"); + axon_driver_register(&axon_buffer_driver); + return 0; +} + +/** + * module clean up + */ +static __exit void axon_buffer_module_cleanup(void) +{ + dbg_log("begin\n"); + axon_driver_unregister(&axon_buffer_driver); + unregister_chrdev_region(buf_dev, 256); +} + +module_init(axon_buffer_module_init); +module_exit(axon_buffer_module_cleanup); Index: linux-2.6.21/drivers/axon/usr/buffer/axon_usr_buffer_P.h =================================================================== --- /dev/null +++ linux-2.6.21/drivers/axon/usr/buffer/axon_usr_buffer_P.h @@ -0,0 +1,258 @@ +/****************************************************************** + * Copyright (C) 2006 Mercury Computer Systems, Inc. + * 199 Riverneck Road + * Chelmsford, MA 01824-2820 + * (978) 256-1300 + * webinfo@mc.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * http://www.gnu.org/copyleft/gpl.html + ******************************************************************/ + + +#ifndef AXON_USR_BUFFER_P_H +#define AXON_USR_BUFFER_P_H + +#include + +#include +#include + +#include "axon_usr_buffer.h" + + +#define AXON_SYSTEM_BUFFER_UP (u8)(0x1) +#define AXON_SYSTEM_BUFFER_DOWN (u8)(0x2) +#define AXON_SYSTEM_BUFFER_AVAILABLE (u8)(0x3) +#define AXON_SYSTEM_BUFFER_UNAVAILABLE (u8)(0x4) +#define AXON_SYSTEM_BUFFER_IN_USE (u8)(0x5) +#define AXON_SYSTEM_BUFFER_NOT_IN_USE (u8)(0x6) + + +#define AXON_BUFFER_SMS_REQ_OFFSET 0 +#define AXON_BUFFER_SMS_NODE_OFFSET 1 +#define AXON_BUFFER_SMS_ID_OFFSET 2 +#define AXON_BUFFER_SMS_ADDR_OFFSET 6 +#define AXON_BUFFER_SMS_SIZE_OFFSET 11 +#define AXON_BUFFER_SMS_UP_FLAG_OFFSET 2 + + +#define AXON_BUFFER_FLAG_RECEIVED_UP (u8)(0x01) + + +typedef struct { + + char name[AXON_BUFFER_NAME_LEN]; + + + axon_buffer_key_t key; + + + __u32 nr_segments; + + + __u64 len; + +} axon_buffer_desc_header_t; + + +typedef struct { + + __u64 phys_addr; + + + __u64 len; + +} axon_buffer_segment_t; + + + + +typedef struct { + + + struct page **pp_pages; + + + __u32 nr_pages; + + + struct scatterlist *p_sg; + + + __u32 nr_sg; + + + int dma_direction; + + + __u64 len; + + + __u32 offset; + + + unsigned long virt_user_addr; + +} axon_buffer_map_info_t; + + +struct axon_file_usr_buffer_t; + + +typedef struct { + + axon_buffer_key_t key; + + + atomic_t ref_count; + + + atomic_t is_being_deleted; + + + __u8 is_released; + + + __u8 is_local_only; + + + axon_buffer_map_info_t user_map_info; + + + axon_buffer_desc_header_t *p_buffer_desc; + + dma_addr_t phy_buffer_desc; + + + struct axon_file_usr_buffer_t *p_usr_file; + + + struct list_head list; + +} axon_local_buffer_map_list_t; + + +struct axon_usr_buffer_t; + + +typedef struct { + + axon_buffer_key_t key; + + + __u64 plb_addr; + + + atomic_t handle_count; + + + atomic_t use_count; + + + __u8 to_be_deleted; + + + __u8 is_locally_created; + + + axon_buffer_desc_header_t *p_buffer_desc; + + dma_addr_t phy_buffer_desc; + + + struct axon_usr_buffer_t *p_axon_usr; + + + struct list_head list; + +} axon_remote_buffer_map_list_t; + + +typedef struct axon_usr_buffer_t { + + struct list_head list; + + + struct cdev cdev; + + + dev_t cdev_num; + + + struct axon_t *p_axon; + + + int major; + + + __u8 id; + + + atomic_t remote_is_up; + + + struct axon_mbox_t *peer_mbox; + + + struct axon_sms_t *sms; + + + struct axon_pio_t *p_pio; + + + struct addr_xltr_t *xltr; + + + struct axon_dmax_t *p_dma; + + + atomic_t key; + + + axon_local_buffer_map_list_t local_map_list_head; + + rwlock_t local_list_lock; + + + axon_remote_buffer_map_list_t remote_map_list_head; + + rwlock_t remote_list_lock; + + wait_queue_head_t waitq; + +} axon_usr_buffer_t; + + +typedef struct axon_file_usr_buffer_t { + + wait_queue_head_t waitq; + + + axon_usr_buffer_t *p_axon_usr; + + + axon_remote_buffer_map_list_t *p_remote_buffer; + + + atomic_t req_count; + + + struct semaphore sem_dma_reqs; + +} axon_file_usr_buffer_t; + +#endif Index: linux-2.6.21/drivers/axon/usr/buffer/axon_usr_buffer_P.h~ =================================================================== --- /dev/null +++ linux-2.6.21/drivers/axon/usr/buffer/axon_usr_buffer_P.h~ @@ -0,0 +1,398 @@ +/****************************************************************** + * Copyright (C) 2006 Mercury Computer Systems, Inc. + * 199 Riverneck Road + * Chelmsford, MA 01824-2820 + * (978) 256-1300 + * webinfo@mc.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * http://www.gnu.org/copyleft/gpl.html + ******************************************************************/ + +#ifndef AXON_USR_BUFFER_P_H +#define AXON_USR_BUFFER_P_H + +#include + +#include +#include + +#include "axon_usr_buffer.h" + +/** + * The various request type on the BUFFER SMS channel + */ +#define AXON_SYSTEM_BUFFER_UP (u8)(0x1) +#define AXON_SYSTEM_BUFFER_DOWN (u8)(0x2) +#define AXON_SYSTEM_BUFFER_AVAILABLE (u8)(0x3) +#define AXON_SYSTEM_BUFFER_UNAVAILABLE (u8)(0x4) +#define AXON_SYSTEM_BUFFER_IN_USE (u8)(0x5) +#define AXON_SYSTEM_BUFFER_NOT_IN_USE (u8)(0x6) + +/** + * Offset of various info in SMS message + */ +#define AXON_BUFFER_SMS_REQ_OFFSET 0 +#define AXON_BUFFER_SMS_NODE_OFFSET 1 +#define AXON_BUFFER_SMS_ID_OFFSET 2 +#define AXON_BUFFER_SMS_ADDR_OFFSET 6 +#define AXON_BUFFER_SMS_SIZE_OFFSET 11 +#define AXON_BUFFER_SMS_UP_FLAG_OFFSET 2 + +/** + * Flag used in UP message used to indicate whether an UP message has been + * recevied remotely yet + */ +#define AXON_BUFFER_FLAG_RECEIVED_UP (u8)(0x01) + +/** + * shared header structure to expose the structure of a buffer + */ +typedef struct { + /** + * the name of the buffer (if any) + */ + char name[AXON_BUFFER_NAME_LEN]; + + /** + * The unique key for the buffer + */ + axon_buffer_key_t key; + + /** + * The number of segment that will make the user buffer + */ + __u32 nr_segments; + + /** + * The lenght of the user buffer + */ + __u64 len; + +} axon_buffer_desc_header_t; + +/** + * shared structure to define a segment + */ +typedef struct { + /** + * the physical address of the segment + */ + __u64 phys_addr; + + /** + * The size of this segment + */ + __u64 len; + +} axon_buffer_segment_t; + +/************************************************************ + * Everything bellow is linux/implementation specific + ************************************************************/ + +/** + * Linux specific structure to store a system buffer info + */ +typedef struct { + + /** + * Page list containining the user data + */ + struct page **pp_pages; + + /** + * Number of the page containing the user data + */ + __u32 nr_pages; + + /** + * Scatter/gather descriptor list + */ + struct scatterlist *p_sg; + + /** + * # of valid sg descriptors + */ + __u32 nr_sg; + + /** + * The DMA direction + */ + int dma_direction; + + /** + * How many bytes are used by the user in this set of pages + */ + __u64 len; + + /** + * Offset where the user address is starting in the first page + */ + __u32 offset; + + /** + * Address virtuel of the page seen by the user space + */ + unsigned long virt_user_addr; + +} axon_buffer_map_info_t; + +/* + * Forward declaration + */ +struct axon_file_usr_buffer_t; + +/** + * structure to store everything about a local buffer + */ +typedef struct { + /** + * a unique key describig the buffer + */ + axon_buffer_key_t key; + + /** + * maintains a reference count for how many times a local buffer is + * referenced by a remote system (in_use message was sent). Should + * only be once per remote node + */ + atomic_t ref_count; + + /** + * set to 1 when local buffer is actively being deleted from the list + * to prevent mutiple entry points from attempting to delete the + * same buffer. + */ + atomic_t is_being_deleted; + + /** + * set to 1 when local buffer can be physically released + */ + __u8 is_released; + + /** + * set to 1 if this local buffer should not be advetised to remote + * nodes + */ + __u8 is_local_only; + + /** + * The internal information about the mapping + */ + axon_buffer_map_info_t user_map_info; + + /** + * Pointer to the memory area where we will put the buffer + * desc for the remote + */ + axon_buffer_desc_header_t *p_buffer_desc; + + dma_addr_t phy_buffer_desc; + + /** + * a pointer to the file object owning this buffer + */ + struct axon_file_usr_buffer_t *p_usr_file; + + /** + * The list part for list chaining + */ + struct list_head list; + +} axon_local_buffer_map_list_t; + +/* + * Forward declaration + */ +struct axon_usr_buffer_t; + +/** + * Structure to store everything about a remote buffer + */ +typedef struct { + /** + * a unique key describig the buffer + */ + axon_buffer_key_t key; + + /** + * The PLB address where we can retrieve the buffer descriptor + */ + __u64 plb_addr; + + /** + * count of open handles associated with buffer + */ + atomic_t handle_count; + + /** + * reference count for use + */ + atomic_t use_count; + + /** + * was deletion requested + */ + __u8 to_be_deleted; + + /** + * was locally created (for raw buffers) + */ + __u8 is_locally_created; + + /** + * Pointer to the memory area where we will cache + * the remote buffer descriptor. + */ + axon_buffer_desc_header_t *p_buffer_desc; + + dma_addr_t phy_buffer_desc; + + /* + * a pointer to the board object + */ + struct axon_usr_buffer_t *p_axon_usr; + + /** + * The list part for list chaining + */ + struct list_head list; + +} axon_remote_buffer_map_list_t; + +/** + * This is a per Axon board structure. It holds reference to various + * board specific objects + */ +typedef struct axon_usr_buffer_t { + + struct list_head list; + + /** + * Char device providing driver entry points + */ + struct cdev cdev; + + /** + * Associated Major/minor + */ + dev_t cdev_num; + + /** + * Handle to the platform specific instance + */ + struct axon_t *p_axon; + + /** + * Minor and Major used to access this driver for the useer space + */ + int major; + + /** + * The board id in the board list + */ + __u8 id; + + /** + * is the remote buffer service up + */ + atomic_t remote_is_up; + + /** + * The peer mailbox object. For now this is OK as we don't have yet + * any PCI-E switched fabric. Should be revisited when we get there. + */ + struct axon_mbox_t *peer_mbox; + + /** + * The local SMS service reference. + */ + struct axon_sms_t *sms; + + /** + * pointer to PIO service + */ + struct axon_pio_t *p_pio; + + /** + * Handle to the address translator allowing to compute the PLB address + * from a local bus address, if the mapping is possible + */ + struct addr_xltr_t *xltr; + + /** + * The DMA service + */ + struct axon_dmax_t *p_dma; + + /** + * The reference key to generate unique keys for local buffers + */ + atomic_t key; + + /** + * The list of buffers registered locally. + */ + axon_local_buffer_map_list_t local_map_list_head; + + rwlock_t local_list_lock; + + /** + * A list of remote buffers we know about + */ + axon_remote_buffer_map_list_t remote_map_list_head; + + rwlock_t remote_list_lock; + + wait_queue_head_t waitq; + +} axon_usr_buffer_t; + +/** + * This is a per file descriptor structure. + */ +typedef struct axon_file_usr_buffer_t { + /** + * a wait queue to wait for incomming MBX + */ + wait_queue_head_t waitq; /* wait queue for read/write */ + + /** + * a reference to the board object + */ + axon_usr_buffer_t *p_axon_usr; + + /** + * pointer to the the remote buffer descriptor associated to + * this file descriptor is any + */ + axon_remote_buffer_map_list_t *p_remote_buffer; + + /** + * a counter for ongoing requests + */ + atomic_t req_count; + + /** + * A semaphore to guaranty the DMA request are serialized on this + * file desc + */ + struct semaphore sem_dma_reqs; + +} axon_file_usr_buffer_t; + +#endif /* AXON_USR_BUFFER_P_H */ --