This is a PCI-E mapping driver that will allow the SPU to directly target some host memory with their DMA engine (dangerous). Signed-Off-by: Jean-Christophe DUBOIS -- Index: linux-2.6.21/drivers/axon/usr/pcie/axon_usr_pcie.c =================================================================== --- /dev/null +++ linux-2.6.21/drivers/axon/usr/pcie/axon_usr_pcie.c @@ -0,0 +1,330 @@ +/****************************************************************** + * Copyright (C) 2006 Mercury Computer Systems, Inc. + * 199 Riverneck Road + * Chelmsford, MA 01824-2820 + * (978) 256-1300 + * webinfo@mc.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA + * 02111-1307, USA. + * http://www.gnu.org/copyleft/gpl.html + ******************************************************************/ + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include +#include +#include + + +struct axon_usr_pcie_t { + + struct cdev cdev; + + + dev_t cdev_num; + + + u64 plb_addr; + + + u64 processor_offset; + + + u64 size; + + + struct list_head list; +}; + +MODULE_DESCRIPTION("user space PCIE mapping driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jean-Christophe Dubois (jdubois@mc.com)"); + +#include + + +static struct list_head axon_pcie_list; + +static int axon_usr_pcie_release(struct inode *inode, struct file *file) +{ + int ret = 0; + + dbg_log("begin\n"); + + file->private_data = NULL; + + module_put(THIS_MODULE); + + return ret; +} + + +static int axon_usr_pcie_open(struct inode *inode, struct file *file) +{ + int ret = 0; + + dbg_log("begin\n"); + + if (try_module_get(THIS_MODULE)) { + + file->private_data = + container_of(inode->i_cdev, struct axon_usr_pcie_t, cdev); + } else { + dbg_err("failed in try_module_get()\n"); + ret = -ENODEV; + } + + return ret; +} + + +static int axon_usr_pcie_mmap(struct file *file, struct vm_area_struct *vma) +{ + unsigned long offset = (vma->vm_pgoff << PAGE_SHIFT); + + struct axon_usr_pcie_t *p_axon_pcie = + (struct axon_usr_pcie_t *)(file->private_data); + + int prot = pgprot_val(vma->vm_page_prot); + + dbg_log(" for 0x%08lx\n", offset); + + + if ((offset + vma->vm_end - vma->vm_start) > p_axon_pcie->size) { + dbg_err("mapping request is out of range\n"); + return -EINVAL; + } + + + offset += + (p_axon_pcie->plb_addr & AXON_PLB_MASK_FROM_BE) + + p_axon_pcie->processor_offset; + + dbg_log("maping offset 0x%016lx for %ld bytes\n", offset, + vma->vm_end - vma->vm_start); + + prot |= _PAGE_GUARDED; + + prot |= _PAGE_NO_CACHE; + + vma->vm_page_prot = __pgprot(prot); + + + + vma->vm_flags |= VM_IO; + + vma->vm_flags |= VM_RESERVED; + + if (remap_pfn_range(vma, vma->vm_start, offset >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, vma->vm_page_prot)) { + dbg_err("remap_page_range FAILED\n"); + return -EAGAIN; + } + + return 0; +} + +static struct file_operations fops = { + .open = axon_usr_pcie_open, + .release = axon_usr_pcie_release, + .mmap = axon_usr_pcie_mmap, +}; + +static __init struct device_node +*axon_usr_pcie_of_find_parent_by_name(struct device_node *from, char *name) +{ + struct device_node *dn; + + of_node_get(from); + + + while ((dn = of_get_parent(from))) { + char *node_name = (char *)get_property(dn, "name", NULL); + + of_node_put(from); + from = dn; + + + if ((node_name) && (strcmp(node_name, name) == 0)) { + of_node_put(dn); + return dn; + } + } + + of_node_put(from); + + return NULL; +} + +static unsigned num_pcie; +static dev_t pcie_dev; + + +static void axon_usr_pcie_module_cleanup(void) +{ + struct list_head *pos, *q; + struct axon_usr_pcie_t *p_cur_pcie; + + dbg_log("begin\n"); + + + list_for_each_safe(pos, q, &(axon_pcie_list)) { + p_cur_pcie = list_entry(pos, struct axon_usr_pcie_t, list); + + list_del(&p_cur_pcie->list); + + class_device_destroy(axon_get_class(), p_cur_pcie->cdev_num); + + + cdev_del(&p_cur_pcie->cdev); + + + kfree(p_cur_pcie); + } + + + if (num_pcie) + unregister_chrdev_region(pcie_dev, num_pcie); +} + + +static __init int axon_usr_pcie_module_init(void) +{ + int ret = 0; + struct device_node *dn; + int i_board; + + dbg_log("begin\n"); + + + INIT_LIST_HEAD(&axon_pcie_list); + + + for (num_pcie = 0, dn = NULL; + (dn = of_find_node_by_name(dn, "pciep")); num_pcie++) ; + + if (num_pcie == 0) + return -ENODEV; + + + alloc_chrdev_region(&pcie_dev, 0, num_pcie, "pciep"); + + + for (dn = NULL, i_board = 0; + (dn = of_find_node_by_name(dn, "pciep")); i_board++) { + struct axon_usr_pcie_t *p_axon_pcie; + struct device_node *axon_node; + unsigned long *axon_reg; + + unsigned long *reg = + (unsigned long *)get_property(dn, "reg", NULL); + + if (reg == NULL) { + dbg_inf("Can't get 'reg' property for PCIE node\n"); + continue; + } + + dbg_inf("PCIE memory at 0x%lx, size = 0x%lx\n", reg[0], reg[1]); + + if (reg[1] == 0) { + dbg_inf("PCIE memory at 0x%lx is empty?\n", reg[0]); + continue; + } + + + + + + + axon_node = axon_usr_pcie_of_find_parent_by_name(dn, "axon"); + + if (axon_node == NULL) { + dbg_inf("Can't find Axon parent node\n"); + continue; + } + + + axon_reg = + (unsigned long *)get_property(axon_node, "reg", NULL); + + if (axon_reg == NULL) { + dbg_inf("Can't get 'reg' property for Axon node\n"); + continue; + } + + + if ((axon_reg[0] == 0) || axon_reg[1] == 0) { + dbg_inf("Bad reg values for the Axon node\n"); + continue; + } + + + p_axon_pcie = + kzalloc(sizeof(struct axon_usr_pcie_t), GFP_KERNEL); + + if (p_axon_pcie == NULL) { + dbg_err("failed to allocate memory\n"); + ret = -ENOMEM; + break; + } + + + p_axon_pcie->plb_addr = 0xC00000C000000000; + + p_axon_pcie->size = 0x800000000; + + p_axon_pcie->processor_offset = axon_reg[0]; + + p_axon_pcie->cdev_num = MKDEV(MAJOR(pcie_dev), i_board); + + cdev_init(&p_axon_pcie->cdev, &fops); + + ret = cdev_add(&p_axon_pcie->cdev, p_axon_pcie->cdev_num, 1); + + class_device_create(axon_get_class(), NULL, + p_axon_pcie->cdev_num, NULL, "pciep%d", + i_board); + + + list_add_tail(&p_axon_pcie->list, &axon_pcie_list); + } + + + if (ret != 0) { + axon_usr_pcie_module_cleanup(); + } + + return ret; +} + +module_init(axon_usr_pcie_module_init); +module_exit(axon_usr_pcie_module_cleanup); --