From: Jean-Christophe DUBOIS This is an example driver that would allow a user application to program/use the Axon DMA explicitely. Signed-off-by: Jean-Christophe DUBOIS --- Index: linux/drivers/axon/usr/dma/axon_usr.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux/drivers/axon/usr/dma/axon_usr.c 2006-12-19 18:12:58.000000000 +0100 @@ -0,0 +1,1115 @@ +/****************************************************************** + * Copyright (C) 2006 Mercury Computer Systems, Inc. + * 199 Riverneck Road + * Chelmsford, MA 01824-2820 + * (978) 256-1300 + * webinfo@mc.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * http://www.gnu.org/copyleft/gpl.html + ******************************************************************/ + + + +#include +#include +#include +#include + +#include +#include + +#ifndef AXON_USR_LOOPBACK +#include +#include +#endif +#include +#include +#include +#include + + +MODULE_DESCRIPTION("User space CAB driver interface"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Arnaud Samama (asamama@mc.com)"); + + +#if defined(AXON_DEBUG_USR) +#define dbg_usr_log printk(KERN_DEBUG "AXON_USR:%s=>", __FUNCTION__);printk +#else +#define dbg_usr_log if (0) printk +#endif + +#define dbg_usr_err printk(KERN_EMERG "AXON_USR:%s=>", __FUNCTION__);printk +#define dbg_usr_inf printk(KERN_INFO "AXON_USR:%s=>", __FUNCTION__);printk + + +static int axon_usr_devs_count = 0; +static struct axon_usr_t **pp_axon_usr; + + +static int +axon_map_user_memory(unsigned long uaddr, unsigned long len, int writable, + axon_user_map_info_t * p_user_map_info) +{ + int ret = 0; + + unsigned long start = uaddr >> PAGE_SHIFT; + + unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + + const int nr_pages = end - start; + + struct page **pp_pages; + + dbg_usr_log("trying to map vaddr=0x%08lx len=%lu \n", uaddr, len); + + pp_pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); + + if (pp_pages != NULL) { + + down_read(¤t->mm->mmap_sem); + + ret = get_user_pages(current, + current->mm, + uaddr, nr_pages, writable, 0, + pp_pages, NULL); + + up_read(¤t->mm->mmap_sem); + + if (ret == nr_pages) { + + dbg_usr_log + ("The vaddr=0x%08lx is mapped by %d pages \n", + uaddr, nr_pages); + + p_user_map_info->pp_pages = pp_pages; + p_user_map_info->nr_pages = nr_pages; + p_user_map_info->len = len; + p_user_map_info->offset = (uaddr & ~PAGE_MASK); + p_user_map_info->virt_user_addr = uaddr; + ret = 0; + + } else { + dbg_usr_log + ("get_user_pages has failed with ret=%d \n", + ret); + + kfree(pp_pages); + ret = -EFAULT; + } + + } else { + + ret = -ENOMEM; + + } + + + return ret; + +} + +static int +axon_unmap_user_memory(axon_user_map_info_t * p_user_map_info) +{ + int ret = 0; + + int i_page; + + for (i_page = 0; i_page < p_user_map_info->nr_pages; i_page++) { + page_cache_release(p_user_map_info->pp_pages[i_page]); + } + + kfree(p_user_map_info->pp_pages); + p_user_map_info->nr_pages = 0; + + + return ret; + +} + +static void +axon_get_uniq_key(struct axon_usr_t *p_axon_usr, + axon_map_key_t * p_axon_map_key) +{ + + *p_axon_map_key = + (axon_map_key_t) atomic_inc_return(&p_axon_usr->key); + + } + + +static int +axon_release_user_page(struct axon_usr_file_t *p_usr_file, + struct list_head *map_list_entry, + axon_map_key_t key) +{ + int ret = -EINVAL; + axon_user_map_list_t *p_cur_map_list; + + dbg_usr_log("Trying to release pages whose key is 0x%08x \n", key); + p_cur_map_list = + list_entry(map_list_entry, axon_user_map_list_t, list); + + if ((key == 0x0) || (p_cur_map_list->key == key)) { + + dbg_usr_log("Found one mapping with key 0x%08x \n", key); + + ret = + axon_unmap_user_memory(& + (p_cur_map_list-> + user_map_info)); + list_del(map_list_entry); + kfree(p_cur_map_list); + } + return ret; +} + +static int +axon_release_user_pages(struct axon_usr_file_t *p_usr_file, + axon_ioctl_map_t * p_ioctl_map) +{ + int ret = 0; + + struct list_head *pos; + struct list_head *q; + + list_for_each_safe(pos, q, &(p_usr_file->map_list_head.list)) { + + ret = + axon_release_user_page(p_usr_file, pos, + p_ioctl_map->key); + + + if (ret != -EINVAL) { + break; + } + } + + return ret; +} + +static void +axon_release_all_pages(struct axon_usr_file_t *p_usr_file) +{ + + struct list_head *pos; + struct list_head *q; + + dbg_usr_log("Releasing all locked pages \n"); + + list_for_each_safe(pos, q, &(p_usr_file->map_list_head.list)) { + + axon_release_user_page(p_usr_file, pos, 0x0); + + } + +} + +static int +axon_get_page_count(struct axon_usr_file_t *p_usr_file, + axon_ioctl_page_count_t * p_ioctl_page_count) +{ + + int ret = -EINVAL; + + struct list_head *cursor; + + dbg_usr_log("Looking for page count of key 0x%08x in 0x%p \n", + p_ioctl_page_count->key, + &(p_usr_file->map_list_head.list)); + + list_for_each(cursor, &(p_usr_file->map_list_head.list)) { + + axon_user_map_list_t *p_user_map_list; + p_user_map_list = + list_entry(cursor, axon_user_map_list_t, list); + + dbg_usr_log("Found key 0x%08x\n", p_user_map_list->key); + if (p_user_map_list->key == p_ioctl_page_count->key) { + + p_ioctl_page_count->vaddr = + p_user_map_list->user_map_info.virt_user_addr; + p_ioctl_page_count->count = + p_user_map_list->user_map_info.nr_pages; + p_ioctl_page_count->vsize = + p_user_map_list->user_map_info.len; + dbg_usr_log + ("Returning for key 0x%08x: vaddr=0x%016" + AXON_VADDR_FMT_T " vsize=0x%016" + AXON_VADDR_FMT_T " count=%zd \n", + p_ioctl_page_count->key, + p_ioctl_page_count->vaddr, + p_ioctl_page_count->vsize, + p_ioctl_page_count->count); + + ret = 0; + break; + + } + + } + + return ret; + +} + + +static int +axon_get_page_info(struct axon_usr_file_t *p_usr_file, + axon_ioctl_page_info_t * p_ioctl_page_info) +{ + + int ret = -EINVAL; + + struct list_head *cursor; + + dbg_usr_log("Looking for page info of key 0x%08x, index=%d \n", + p_ioctl_page_info->key, p_ioctl_page_info->index); + + list_for_each(cursor, &(p_usr_file->map_list_head.list)) { + + axon_user_map_list_t *p_user_map_list; + p_user_map_list = + list_entry(cursor, axon_user_map_list_t, list); + + if (p_user_map_list->key == p_ioctl_page_info->key) { + + if (p_ioctl_page_info->index < + p_user_map_list->user_map_info.nr_pages) { + + + size_t page_offset; + + + size_t page_len; + + + void *virt_kern_addr; + + + if (p_ioctl_page_info->index == 0) { + + page_offset = + p_user_map_list->user_map_info. + offset; + + + if (p_user_map_list->user_map_info. + nr_pages == 1) { + + page_len = + p_user_map_list-> + user_map_info.len; + + + } else { + + page_len = + PAGE_SIZE - + page_offset; + + } + + + } else if (p_ioctl_page_info->index == + p_user_map_list->user_map_info. + nr_pages - 1) { + + + + page_offset = 0; + + + page_len = PAGE_SIZE - + + (p_user_map_list-> + user_map_info.len - ( + + PAGE_SIZE + * + (p_user_map_list-> + user_map_info. + nr_pages + - + 2) + + + + PAGE_SIZE + - + p_user_map_list-> + user_map_info. + offset)); + + + } else { + + page_offset = 0; + page_len = PAGE_SIZE; + } + + + virt_kern_addr = + page_address(p_user_map_list-> + user_map_info. + pp_pages + [p_ioctl_page_info-> + index]); + +#ifndef AXON_USR_LOOPBACK + p_ioctl_page_info->plb_addr = + axon_addr_xltr_to_plb(p_usr_file-> + p_xltr, + axon_virt_to_bus + (virt_kern_addr)); +#else + + p_ioctl_page_info->plb_addr = + cpu_to_be64(axon_virt_to_bus + (virt_kern_addr)); +#endif + p_ioctl_page_info->virt_addr = + (p_user_map_list->user_map_info. + virt_user_addr & PAGE_MASK) + + p_ioctl_page_info->index * PAGE_SIZE; + + p_ioctl_page_info->size = page_len; + p_ioctl_page_info->offset = page_offset; + + + dbg_usr_log + ("Key (0x%08x,%d) is (virt=0x%016" + AXON_VADDR_FMT_T ", plb=0x%016" + AXON_PLB_ADDR_FMT_T ", offset=0x%016" + AXON_PADDR_FMT_T ", size=0x%016" + AXON_PADDR_FMT_T ")\n", + p_ioctl_page_info->key, + p_ioctl_page_info->index, + p_ioctl_page_info->virt_addr, + p_ioctl_page_info->plb_addr, + p_ioctl_page_info->offset, + p_ioctl_page_info->size); + + ret = 0; + } + + break; + + } + + } + + return ret; + +} + + +static int +axon_get_user_pages(struct axon_usr_file_t *p_usr_file, + axon_ioctl_map_t * p_ioctl_map) +{ + int ret = 0; + axon_user_map_list_t *p_new_map_list; + + p_new_map_list = kmalloc(sizeof(axon_user_map_list_t), GFP_KERNEL); + + if (p_new_map_list != NULL) { + + ret = axon_map_user_memory(p_ioctl_map->vaddr, + p_ioctl_map->len, + 1, + &(p_new_map_list-> + user_map_info)); + if (ret == 0) { + axon_get_uniq_key(p_usr_file->p_axon_usr, + &p_new_map_list->key); + + list_add(&(p_new_map_list->list), + &(p_usr_file->map_list_head.list)); + + p_ioctl_map->key = p_new_map_list->key; + + dbg_usr_log + ("Adding key=0x%08x in 0x%p into Ox%p \n", + p_ioctl_map->key, &(p_new_map_list->list), + &(p_usr_file->map_list_head.list)); + dbg_usr_log + ("Allocating key=0x%08x for virtual 0x%016" + AXON_VADDR_FMT_T "\n", p_ioctl_map->key, + p_ioctl_map->vaddr); + } else { + dbg_usr_log("Unable to map user memory\n"); + kfree(p_new_map_list); + } + } else { + ret = -ENOMEM; + } + + return ret; +} + +static loff_t +axon_llseek(struct file *file, loff_t offset, int whence) +{ + return -ENOSYS; +} + + + +static ssize_t +axon_read(struct file *file, char __user * buff, size_t len, + loff_t * offset) +{ + return -ENOSYS; +} + + + +static ssize_t +axon_write(struct file *file, const char __user * buff, size_t len, + loff_t * offset) +{ + return -ENOSYS; +} + +#ifdef AXON_USR_LOOPBACK +static void +axon_usr_dma_xfer(void *data) +{ + int ret = 0; + + struct axon_usr_file_t *p_usr_file = data; + struct pending_dma_request *p_dma_request; + axon_ioctl_dma_req_t *p_ioctl_dma_req; + + void *p_src; + void *p_dst; + size_t size; + + down(&p_usr_file->sem_dma_reqs); + + + p_dma_request = + list_entry(p_usr_file->dma_xfer_list.next, + struct pending_dma_request, list); + + + list_del(p_usr_file->dma_xfer_list.next); + + up(&p_usr_file->sem_dma_reqs); + + + p_ioctl_dma_req = p_dma_request->ioctl_dma_req; + + + p_src = __va(be64_to_cpu(p_ioctl_dma_req->paddr_src)); + p_dst = __va(be64_to_cpu(p_ioctl_dma_req->paddr_dst)); + size = p_ioctl_dma_req->size; + + if (p_ioctl_dma_req->type == AXON_DMA_WRITE) { + + dbg_usr_log + ("Doing DMA Write from 0x%08lx to 0x%08lx of 0x%08lx bytes\n", + (unsigned long) p_src, (unsigned long) p_dst, size); + memcpy(p_dst, p_src, size); + + } else if (p_ioctl_dma_req->type == AXON_DMA_READ) { + + dbg_usr_log + ("Doing DMA Read from 0x%08lx to 0x%08lx of 0x%08lx bytes\n", + (unsigned long) p_dst, (unsigned long) p_src, size); + memcpy(p_src, p_dst, size); + + } else { + dbg_usr_log("DMA transfer direction is invalid \n"); + ret = -EINVAL; + } + + + p_dma_request->completed = 1; + wake_up(&p_usr_file->dma_wait_queue); + + + down(&p_usr_file->sem_dma_reqs); + if (!list_empty(&p_usr_file->dma_xfer_list)) { + if (!queue_work + (p_usr_file->dma_xfer_wq, &p_usr_file->dma_xfer)) { + dbg_usr_err + ("Unable to queue DMA transfer work from the queue \n"); + ret = -ENOSYS; + } + } + + up(&p_usr_file->sem_dma_reqs); + + +} + +static int +axon_do_dma(struct axon_usr_file_t *p_usr_file, + axon_ioctl_dma_req_t * p_ioctl_dma_req) +{ + + int ret = 0; + struct pending_dma_request *p_dma_request; + + dbg_usr_log("Creating loopback DMA request\n"); + p_dma_request = + kzalloc(sizeof(struct pending_dma_request), GFP_KERNEL); + if (p_dma_request != NULL) { + p_dma_request->completed = 0; + p_dma_request->ioctl_dma_req = p_ioctl_dma_req; + + down(&p_usr_file->sem_dma_reqs); + + dbg_usr_log("Adding DMA request 0x%p to the queue\n", + p_dma_request); + list_add_tail(&p_dma_request->list, + &p_usr_file->dma_xfer_list); + + up(&p_usr_file->sem_dma_reqs); + + if (queue_work + (p_usr_file->dma_xfer_wq, &p_usr_file->dma_xfer)) { + + dbg_usr_log + ("DMA request 0x%p has been submitted to the queue. WAiting its completion\n", + p_dma_request); + wait_event_interruptible(p_usr_file-> + dma_wait_queue, + p_dma_request-> + completed == 1); + dbg_usr_log("DMA request 0x%p has completed\n", + p_dma_request); + kfree(p_dma_request); + + } else { + ret = -ENOSYS; + dbg_usr_err + ("Unable to queue DMA transfer work \n"); + } + } else { + ret = -ENOMEM; + dbg_usr_err("Unable to allocate DMA transfer request \n"); + } + return ret; +} + +#else + +static void +axon_usr_dma_completion_handler(struct axon_dmax_t *p_axon_dmax, + struct axon_dma_req_t *p_dma_req, + void *context) +{ + struct axon_usr_file_t *p_usr_file = context; + int i_req = 0; + + + + while (i_req < AXON_USR_MAX_PENDING_DMA_REQUEST + && p_usr_file->p_dma_reqs[i_req].dma_req != p_dma_req) { + i_req++; + } + + if (i_req < AXON_USR_MAX_PENDING_DMA_REQUEST) { + + p_usr_file->p_dma_reqs[i_req].completed = 1; + + wake_up(&p_usr_file->dma_wait_queue); + + } else { + dbg_usr_err + ("DMA request 0x%p has finished but unable to find it in the pending list \n", + p_dma_req); + } + + +} + +static int +axon_do_dma(struct axon_usr_file_t *p_usr_file, + axon_ioctl_dma_req_t * p_ioctl_dma_req) +{ + int ret = 0; + int i_req = 0; + + struct axon_dma_req_xfer_t dma_req_xfer = AXON_DMA_REQ_XFER_INIT; + + + struct axon_dma_req_t *dma_req = NULL; + + ret = axon_dma_request_create(p_usr_file->p_dma, &dma_req, 128); + + if (ret != 0) { + dbg_usr_err + ("Unable to create a DMA request to process user command \n"); + goto out; + } + + + + down(&p_usr_file->sem_dma_reqs); + + while (i_req < AXON_USR_MAX_PENDING_DMA_REQUEST + && p_usr_file->p_dma_reqs[i_req].dma_req != NULL) { + i_req++; + } + + if (i_req >= AXON_USR_MAX_PENDING_DMA_REQUEST) { + dbg_usr_err("No more slot to store DMA request\n"); + ret = -ENOMEM; + up(&p_usr_file->sem_dma_reqs); + goto free_dma_req; + } + + + p_usr_file->p_dma_reqs[i_req].completed = 0; + p_usr_file->p_dma_reqs[i_req].dma_req = dma_req; + + up(&p_usr_file->sem_dma_reqs); + + dbg_usr_log("Creating xfer DMA request\n"); + + dma_req_xfer.size = p_ioctl_dma_req->size; + dma_req_xfer.intr = DMA_NO_INTR; + + + if (p_ioctl_dma_req->type == AXON_DMA_WRITE) { + + dma_req_xfer.src = p_ioctl_dma_req->paddr_src; + dma_req_xfer.dst = p_ioctl_dma_req->paddr_dst; + + } else if (p_ioctl_dma_req->type == AXON_DMA_READ) { + + dma_req_xfer.src = p_ioctl_dma_req->paddr_dst; + dma_req_xfer.dst = p_ioctl_dma_req->paddr_src; + + } else { + dbg_usr_err("DMA transfer direction is invalid \n"); + ret = -EINVAL; + goto free_slot; + } + + ret = axon_dma_request_push_xfer(dma_req, &dma_req_xfer); + + if (ret != 0) { + dbg_usr_err("Unable to build the DMA request \n"); + goto free_slot; + } + + + ret = axon_dma_request_queue(dma_req, + axon_usr_dma_completion_handler, + p_usr_file); + + if (ret < 0) { + dbg_usr_err + ("Unable to queue DMA request to process user command \n"); + goto free_slot; + } + + + wait_event_interruptible(p_usr_file->dma_wait_queue, + p_usr_file->p_dma_reqs[i_req].completed == + 1); + + + free_slot: + + p_usr_file->p_dma_reqs[i_req].dma_req = NULL; + + free_dma_req: + if (ret < 0) { + + if (dma_req) + axon_dma_request_destroy(dma_req); + } + + out: + return ret; + +} +#endif + +static int +axon_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + + int ret = 0; + + axon_ioctl_map_t ioctl_map; + axon_ioctl_dma_req_t ioctl_dma_req; + axon_ioctl_page_count_t ioctl_page_count; + axon_ioctl_page_info_t ioctl_page_info; + + struct axon_usr_file_t *p_usr_file = + (struct axon_usr_file_t *) (file->private_data); + + + if ((_IOC_TYPE(cmd) != MC_AXON_IOC_MAGIC) || + (_IOC_NR(cmd) > MC_AXON_IOC_MAXNR)) { + dbg_usr_log("Ioctl wrong command\n"); + ret = -ENOTTY; + } + + if (ret == 0) { + + int err = 0; + + if (_IOC_DIR(cmd) & _IOC_READ) { + err = + !access_ok(VERIFY_WRITE, (void *) arg, + _IOC_SIZE(cmd)); + } + + else if (_IOC_DIR(cmd) & _IOC_WRITE) { + err = + !access_ok(VERIFY_READ, (void *) arg, + _IOC_SIZE(cmd)); + } + + if (err) { + dbg_usr_log("Ioctl wrong access rights\n"); + ret = -EFAULT; + } + } + + if (ret >= 0) { + + switch (cmd) { + + case MC_AXON_IOC_GETPAGES: + dbg_usr_log("Ioctl MC_AXON_IOC_GETPAGES\n"); + ret = + copy_from_user(&ioctl_map, + (axon_ioctl_map_t *) arg, + sizeof(axon_ioctl_map_t)); + if (ret == 0) { + ret = + axon_get_user_pages(p_usr_file, + &ioctl_map); + if (ret == 0) { + ret = + copy_to_user((axon_ioctl_map_t + *) arg, + &ioctl_map, + sizeof + (axon_ioctl_map_t)); + } + } + + break; + + case MC_AXON_IOC_RELEASEPAGES: + dbg_usr_log("Ioctl MC_AXON_IOC_RELEASEPAGES\n"); + ret = + copy_from_user(&ioctl_map, + (axon_ioctl_map_t *) arg, + sizeof(axon_ioctl_map_t)); + if (ret == 0) { + ret = + axon_release_user_pages(p_usr_file, + &ioctl_map); + } + + break; + + case MC_AXON_IOC_GETPAGECOUNT: + dbg_usr_log("Ioctl MC_AXON_IOC_GETPAGECOUNT\n"); + ret = + copy_from_user(&ioctl_page_count, + (axon_ioctl_page_count_t *) arg, + sizeof + (axon_ioctl_page_count_t)); + if (ret == 0) { + + ret = + axon_get_page_count(p_usr_file, + &ioctl_page_count); + if (ret == 0) { + ret = + copy_to_user((axon_ioctl_page_count_t *) arg, &ioctl_page_count, sizeof(axon_ioctl_page_count_t)); + } + } + + break; + + case MC_AXON_IOC_GETPAGEINFO: + dbg_usr_log("Ioctl MC_AXON_IOC_GETPAGEINFO\n"); + ret = + copy_from_user(&ioctl_page_info, + (axon_ioctl_page_info_t *) arg, + sizeof(axon_ioctl_page_info_t)); + if (ret == 0) { + ret = + axon_get_page_info(p_usr_file, + &ioctl_page_info); + if (ret == 0) { + ret = + copy_to_user((axon_ioctl_page_info_t *) arg, &ioctl_page_info, sizeof(axon_ioctl_page_info_t)); + } + + } + + break; + + case MC_AXON_IOC_DO_DMA: + dbg_usr_log("Ioctl MC_AXON_IOC_DO_DMA\n"); + ret = + copy_from_user(&ioctl_dma_req, + (axon_ioctl_dma_req_t *) arg, + sizeof(axon_ioctl_dma_req_t)); + if (ret == 0) { + ret = + axon_do_dma(p_usr_file, + &ioctl_dma_req); + } + + break; + + default: + dbg_usr_log("Ioctl unknown ioctl command \n"); + ret = -ENOTTY; + }; + } + + + return ret; +} + + +static int +axon_mmap(struct file *file, struct vm_area_struct *vm) +{ + return -ENOSYS; +} + + + +static int +axon_open(struct inode *inode, struct file *file) +{ + int ret = 0; + + struct axon_usr_file_t *p_usr_file = NULL; + + p_usr_file = kzalloc(sizeof(struct axon_usr_file_t), GFP_KERNEL); + if (p_usr_file != NULL) { + + p_usr_file->p_axon_usr = + container_of(inode->i_cdev, struct axon_usr_t, cdev); + + file->private_data = p_usr_file; +#ifndef AXON_USR_LOOPBACK + p_usr_file->p_dma = + axon_dmax_get(p_usr_file->p_axon_usr->p_axon); + p_usr_file->p_xltr = + axon_addr_xltr_get(p_usr_file->p_axon_usr->p_axon); +#else + p_usr_file->dma_xfer_wq = + create_singlethread_workqueue("axon_lo"); + INIT_WORK(&p_usr_file->dma_xfer, axon_usr_dma_xfer, + p_usr_file); + PREPARE_WORK(&p_usr_file->dma_xfer, axon_usr_dma_xfer, + p_usr_file); + + INIT_LIST_HEAD(&p_usr_file->dma_xfer_list); + +#endif + + INIT_LIST_HEAD(&(p_usr_file->map_list_head.list)); + init_waitqueue_head(&p_usr_file->dma_wait_queue); + + init_MUTEX(&p_usr_file->sem_dma_reqs); + + try_module_get(THIS_MODULE); + + } else { + ret = -ENOMEM; + } + + return ret; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) +static int +axon_flush(struct file *file, fl_owner_t id) +#else +static int +axon_flush(struct file *file) +#endif +{ + return -ENOSYS; +} + + + +static int +axon_release(struct inode *inode, struct file *file) +{ + int ret = 0; + + struct axon_usr_file_t *p_usr_file = + (struct axon_usr_file_t *) (file->private_data); + + if (p_usr_file != NULL) { + + #ifdef AXON_USR_LOOPBACK + flush_workqueue(p_usr_file->dma_xfer_wq); + destroy_workqueue(p_usr_file->dma_xfer_wq); +#endif + axon_release_all_pages(p_usr_file); + kfree(p_usr_file); + } + + module_put(THIS_MODULE); + + return ret; + +} + +static struct file_operations fops = { + .read = axon_read, + .write = axon_write, + .open = axon_open, + .release = axon_release, + .ioctl = axon_ioctl, + .mmap = axon_mmap, + .llseek = axon_llseek, + .flush = axon_flush, +}; + + +static dev_t dma_dev; + +static void +axon_usr_module_cleanup(void) +{ + int i_board; + + if (pp_axon_usr == NULL) + return; + + for (i_board = 0; i_board < axon_usr_devs_count; i_board++) { + struct axon_usr_t *p_axon_usr = pp_axon_usr[i_board]; + + if (p_axon_usr != NULL) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + class_device_destroy(axon_get_class(), + p_axon_usr->cdev_num); +#else + class_simple_device_remove(p_axon_usr->cdev_num); +#endif + cdev_del(&p_axon_usr->cdev); + kfree(p_axon_usr); + } + } + + kfree(pp_axon_usr); + + unregister_chrdev_region(dma_dev, axon_usr_devs_count); +} + +static __init int +axon_usr_module_init(void) +{ + int ret = 0; + int i_board; + +#ifndef AXON_USR_LOOPBACK + axon_usr_devs_count = axon_board_count(); + dbg_usr_inf("Found %d board(s) \n", axon_usr_devs_count); +#else + axon_usr_devs_count = 1; + dbg_usr_inf("Starting Loopback Axon driver for simulation \n"); +#endif + + + if (axon_usr_devs_count <= 0) + return -ENODEV; + + pp_axon_usr = + kzalloc(sizeof(struct axon_usr_t *) * axon_usr_devs_count, + GFP_KERNEL); + + if (pp_axon_usr == NULL) + return -ENOMEM; + + alloc_chrdev_region(&dma_dev, 0, axon_usr_devs_count, "dma"); + + for (i_board = 0; i_board < axon_usr_devs_count && ret == 0; + i_board++) { + + struct axon_usr_t *p_axon_usr; + + p_axon_usr = + kzalloc(sizeof(struct axon_usr_t), GFP_KERNEL); + + if (p_axon_usr == NULL) { + ret = -ENOMEM; + break; + } + + + p_axon_usr->cdev_num = MKDEV(MAJOR(dma_dev), i_board); + + cdev_init(&p_axon_usr->cdev, &fops); + +#ifndef AXON_USR_LOOPBACK + p_axon_usr->p_axon = axon_board_list()[i_board];; +#endif + + atomic_set(&p_axon_usr->key, AXON_DEFAULT_KEY_BASE); + + ret = cdev_add(&p_axon_usr->cdev, p_axon_usr->cdev_num, 1); + if (ret < 0) { +#ifndef AXON_USR_LOOPBACK + dbg_usr_err + ("Unable to add user space driver for board 0x%p, on minor %d\n", + p_axon_usr->p_axon, i_board); +#else + dbg_usr_err + ("Unable to add user space driver for Simulated loopback board," + " on minor %d\n", i_board); +#endif + } +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + class_device_create(axon_get_class(), NULL, + p_axon_usr->cdev_num, NULL, "dma%d", + i_board); +#else + class_simple_device_add(axon_get_class(), + p_axon_usr->cdev_num, NULL, + "dma%d", i_board); +#endif + + pp_axon_usr[i_board] = p_axon_usr; + + } + + + if (ret != 0) { + axon_usr_module_cleanup(); + } + + return ret; +} + +module_init(axon_usr_module_init); +module_exit(axon_usr_module_cleanup); Index: linux/drivers/axon/usr/dma/axon_usr_P.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux/drivers/axon/usr/dma/axon_usr_P.h 2006-12-19 18:12:58.000000000 +0100 @@ -0,0 +1,151 @@ +/****************************************************************** + * Copyright (C) 2006 Mercury Computer Systems, Inc. + * 199 Riverneck Road + * Chelmsford, MA 01824-2820 + * (978) 256-1300 + * webinfo@mc.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * http://www.gnu.org/copyleft/gpl.html + ******************************************************************/ + + +#ifndef AXON_USR_P_H +#define AXON_USR_P_H + +#ifdef __KERNEL__ + +#include +#include + +#include "axon_usr.h" +#ifndef AXON_USR_LOOPBACK +#include "axon_dmax.h" +#endif + + +typedef struct { + + + struct page **pp_pages; + + + int nr_pages; + + + size_t len; + + + size_t offset; + + + unsigned long virt_user_addr; + + +} axon_user_map_info_t; + + +typedef struct { + + axon_map_key_t key; + + axon_user_map_info_t user_map_info; + + struct list_head list; + +} axon_user_map_list_t; + + +#define AXON_DEFAULT_KEY_BASE 0x45f4 +#define AXON_PROCFS_NAME "axon" + +struct axon_usr_t { + + + struct cdev cdev; + + + dev_t cdev_num; + +#ifndef AXON_USR_LOOPBACK + + struct axon_t *p_axon; +#endif + + + atomic_t key; + + + int major; + + +}; + +#define AXON_USR_MAX_PENDING_DMA_REQUEST 32 + +struct pending_dma_request { + +#ifndef AXON_USR_LOOPBACK + struct axon_dma_req_t *dma_req; +#else + axon_ioctl_dma_req_t *ioctl_dma_req; + + struct list_head list; + +#endif + int completed; +}; + +struct axon_usr_file_t { + + axon_user_map_list_t map_list_head; + + struct proc_dir_entry *p_proc_dir_entry; + + struct axon_usr_t *p_axon_usr; + +#ifndef AXON_USR_LOOPBACK + struct addr_xltr_t *p_xltr; + + struct axon_dmax_t *p_dma; +#else + struct workqueue_struct *dma_xfer_wq; + struct work_struct dma_xfer; + struct list_head dma_xfer_list; +#endif + + int minor; + + + wait_queue_head_t dma_wait_queue; + + struct pending_dma_request + p_dma_reqs[AXON_USR_MAX_PENDING_DMA_REQUEST]; + struct semaphore sem_dma_reqs; + + +}; + + + +#else + +#error "This file must not be included by user application. User axon_usr.h instead" + +#endif + + +#endif Index: linux/drivers/axon/usr/include/axon_usr.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux/drivers/axon/usr/include/axon_usr.h 2006-12-19 18:13:08.000000000 +0100 @@ -0,0 +1,106 @@ +/****************************************************************** + * Copyright (C) 2006 Mercury Computer Systems, Inc. + * 199 Riverneck Road + * Chelmsford, MA 01824-2820 + * (978) 256-1300 + * webinfo@mc.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * http://www.gnu.org/copyleft/gpl.html + ******************************************************************/ + + +#ifndef AXON_USR_H +#define AXON_USR_H + +#include + +#include "axon_types.h" + + +#define MC_AXON_IOC_MAGIC 'X' +#define MC_AXON_PROCFS_NAME "axon" + + +#define MC_AXON_IOC_GETPAGES _IOWR(MC_AXON_IOC_MAGIC, 1, axon_ioctl_map_t) +#define MC_AXON_IOC_RELEASEPAGES _IOW(MC_AXON_IOC_MAGIC, 2, axon_ioctl_map_t) +#define MC_AXON_IOC_GETPAGECOUNT _IOWR(MC_AXON_IOC_MAGIC, 3, axon_ioctl_dma_req_t) +#define MC_AXON_IOC_GETPAGEINFO _IOWR(MC_AXON_IOC_MAGIC, 4, axon_ioctl_page_info_t) +#define MC_AXON_IOC_DO_DMA _IOW(MC_AXON_IOC_MAGIC, 5, axon_ioctl_dma_req_t) + +#define MC_AXON_IOC_MAXNR 5 + +typedef int axon_map_key_t; + + +typedef struct { + axon_addr_ptr_t vaddr; + + axon_size_t len; + + axon_map_key_t key; + +} axon_ioctl_map_t; + + +typedef struct { + + axon_map_key_t key; + + size_t count; + + axon_addr_ptr_t vaddr; + + axon_size_t vsize; + +} axon_ioctl_page_count_t; + +typedef struct { + + axon_map_key_t key; + + unsigned int index; + + plb_addr_t plb_addr; + + axon_addr_ptr_t virt_addr; + + axon_size_t offset; + + axon_size_t size; + +} axon_ioctl_page_info_t; + +typedef enum { + AXON_DMA_WRITE, + AXON_DMA_READ, +} axon_dma_req_type_t; + +typedef struct { + + + plb_addr_t paddr_dst; + + plb_addr_t paddr_src; + + axon_size_bus_t size; + + axon_dma_req_type_t type; + +} axon_ioctl_dma_req_t; + + +#endif --