diff options
author | Linus Torvalds | 2016-03-22 17:09:14 -0700 |
---|---|---|
committer | Linus Torvalds | 2016-03-22 17:09:14 -0700 |
commit | a24e3d414e59ac76566dedcad1ed1d319a93ec14 (patch) | |
tree | a14aedc216dd4eb1633c7510b4b5dc2c61cc936b /drivers | |
parent | b91d9c6716319dcd9e6ffcfc9defaf79e705daab (diff) | |
parent | 8fe9752ef10343a8edb603cb93abc2bfae34e748 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge third patch-bomb from Andrew Morton:
- more ocfs2 changes
- a few hotfixes
- Andy's compat cleanups
- misc fixes to fatfs, ptrace, coredump, cpumask, creds, eventfd,
panic, ipmi, kgdb, profile, kfifo, ubsan, etc.
- many rapidio updates: fixes, new drivers.
- kcov: kernel code coverage feature. Like gcov, but not
"prohibitively expensive".
- extable code consolidation for various archs
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (81 commits)
ia64/extable: use generic search and sort routines
x86/extable: use generic search and sort routines
s390/extable: use generic search and sort routines
alpha/extable: use generic search and sort routines
kernel/...: convert pr_warning to pr_warn
drivers: dma-coherent: use memset_io for DMA_MEMORY_IO mappings
drivers: dma-coherent: use MEMREMAP_WC for DMA_MEMORY_MAP
memremap: add MEMREMAP_WC flag
memremap: don't modify flags
kernel/signal.c: add compile-time check for __ARCH_SI_PREAMBLE_SIZE
mm/mprotect.c: don't imply PROT_EXEC on non-exec fs
ipc/sem: make semctl setting sempid consistent
ubsan: fix tree-wide -Wmaybe-uninitialized false positives
kfifo: fix sparse complaints
scripts/gdb: account for changes in module data structure
scripts/gdb: add cmdline reader command
scripts/gdb: add version command
kernel: add kcov code coverage
profile: hide unused functions when !CONFIG_PROC_FS
hpwdt: use nmi_panic() when kernel panics in NMI handler
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/dma-coherent.c | 25 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_watchdog.c | 2 | ||||
-rw-r--r-- | drivers/firewire/core-cdev.c | 4 | ||||
-rw-r--r-- | drivers/firmware/efi/efivars.c | 2 | ||||
-rw-r--r-- | drivers/firmware/efi/libstub/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_process.c | 2 | ||||
-rw-r--r-- | drivers/hid/uhid.c | 2 | ||||
-rw-r--r-- | drivers/input/input-compat.h | 12 | ||||
-rw-r--r-- | drivers/net/rionet.c | 277 | ||||
-rw-r--r-- | drivers/rapidio/Kconfig | 8 | ||||
-rw-r--r-- | drivers/rapidio/devices/Makefile | 1 | ||||
-rw-r--r-- | drivers/rapidio/devices/rio_mport_cdev.c | 2720 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721.c | 1034 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721.h | 87 | ||||
-rw-r--r-- | drivers/rapidio/devices/tsi721_dma.c | 397 | ||||
-rw-r--r-- | drivers/rapidio/rio-driver.c | 12 | ||||
-rw-r--r-- | drivers/rapidio/rio-scan.c | 135 | ||||
-rw-r--r-- | drivers/rapidio/rio.c | 433 | ||||
-rw-r--r-- | drivers/rapidio/rio.h | 5 | ||||
-rw-r--r-- | drivers/staging/lustre/lustre/llite/llite_internal.h | 2 | ||||
-rw-r--r-- | drivers/watchdog/hpwdt.c | 11 |
22 files changed, 4534 insertions, 642 deletions
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 87b808374888..bdf28f7dd5e8 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -2,6 +2,7 @@ * Coherent per-device memory handling. * Borrowed from i386 */ +#include <linux/io.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> @@ -31,7 +32,10 @@ static bool dma_init_coherent_memory( if (!size) goto out; - mem_base = ioremap(phys_addr, size); + if (flags & DMA_MEMORY_MAP) + mem_base = memremap(phys_addr, size, MEMREMAP_WC); + else + mem_base = ioremap(phys_addr, size); if (!mem_base) goto out; @@ -54,8 +58,12 @@ static bool dma_init_coherent_memory( out: kfree(dma_mem); - if (mem_base) - iounmap(mem_base); + if (mem_base) { + if (flags & DMA_MEMORY_MAP) + memunmap(mem_base); + else + iounmap(mem_base); + } return false; } @@ -63,7 +71,11 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem) { if (!mem) return; - iounmap(mem->virt_base); + + if (mem->flags & DMA_MEMORY_MAP) + memunmap(mem->virt_base); + else + iounmap(mem->virt_base); kfree(mem->bitmap); kfree(mem); } @@ -175,7 +187,10 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, */ *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); *ret = mem->virt_base + (pageno << PAGE_SHIFT); - memset(*ret, 0, size); + if (mem->flags & DMA_MEMORY_MAP) + memset(*ret, 0, size); + else + memset_io(*ret, 0, size); spin_unlock_irqrestore(&mem->spinlock, flags); return 1; diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 096f0cef4da1..4facc7517a6a 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -1140,7 +1140,7 @@ ipmi_nmi(unsigned int val, struct pt_regs *regs) the timer. So do so. */ pretimeout_since_last_heartbeat = 1; if (atomic_inc_and_test(&preop_panic_excl)) - panic(PFX "pre-timeout"); + nmi_panic(regs, PFX "pre-timeout"); } return NMI_HANDLED; diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 36a7c2d89a01..aee149bdf4c0 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -221,7 +221,7 @@ struct inbound_phy_packet_event { #ifdef CONFIG_COMPAT static void __user *u64_to_uptr(u64 value) { - if (is_compat_task()) + if (in_compat_syscall()) return compat_ptr(value); else return (void __user *)(unsigned long)value; @@ -229,7 +229,7 @@ static void __user *u64_to_uptr(u64 value) static u64 uptr_to_u64(void __user *ptr) { - if (is_compat_task()) + if (in_compat_syscall()) return ptr_to_compat(ptr); else return (u64)(unsigned long)ptr; diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c index b23a271c6ae5..096adcbcb5a9 100644 --- a/drivers/firmware/efi/efivars.c +++ b/drivers/firmware/efi/efivars.c @@ -231,7 +231,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor, static inline bool is_compat(void) { - if (IS_ENABLED(CONFIG_COMPAT) && is_compat_task()) + if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall()) return true; return false; diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index a15841eced4e..da99bbb74aeb 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -25,6 +25,9 @@ KASAN_SANITIZE := n UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + lib-y := efi-stub-helper.o # include the stub's generic dependencies from lib/ when building for ARM/arm64 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index d2b49c026cf6..07ac724e3ec9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -107,7 +107,7 @@ static int kfd_open(struct inode *inode, struct file *filep) if (iminor(inode) != 0) return -ENODEV; - is_32bit_user_mode = is_compat_task(); + is_32bit_user_mode = in_compat_syscall(); if (is_32bit_user_mode == true) { dev_warn(kfd_device, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index a902ae037398..ac005796b71c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -311,7 +311,7 @@ static struct kfd_process *create_process(const struct task_struct *thread) goto err_process_pqm_init; /* init process apertures*/ - process->is_32bit_user_mode = is_compat_task(); + process->is_32bit_user_mode = in_compat_syscall(); if (kfd_init_apertures(process) != 0) goto err_init_apretures; diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c index e094c572b86e..16b6f11a0700 100644 --- a/drivers/hid/uhid.c +++ b/drivers/hid/uhid.c @@ -384,7 +384,7 @@ struct uhid_create_req_compat { static int uhid_event_from_user(const char __user *buffer, size_t len, struct uhid_event *event) { - if (is_compat_task()) { + if (in_compat_syscall()) { u32 type; if (get_user(type, buffer)) diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h index 148f66fe3205..0f25878d5fa2 100644 --- a/drivers/input/input-compat.h +++ b/drivers/input/input-compat.h @@ -17,17 +17,7 @@ #ifdef CONFIG_COMPAT -/* Note to the author of this code: did it ever occur to - you why the ifdefs are needed? Think about it again. -AK */ -#if defined(CONFIG_X86_64) || defined(CONFIG_TILE) -# define INPUT_COMPAT_TEST is_compat_task() -#elif defined(CONFIG_S390) -# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT) -#elif defined(CONFIG_MIPS) -# define INPUT_COMPAT_TEST test_thread_flag(TIF_32BIT_ADDR) -#else -# define INPUT_COMPAT_TEST test_thread_flag(TIF_32BIT) -#endif +#define INPUT_COMPAT_TEST in_compat_syscall() struct input_event_compat { struct compat_timeval time; diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 01f08a7751f7..9cfe6aeac84e 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -24,6 +24,7 @@ #include <linux/skbuff.h> #include <linux/crc32.h> #include <linux/ethtool.h> +#include <linux/reboot.h> #define DRV_NAME "rionet" #define DRV_VERSION "0.3" @@ -48,6 +49,8 @@ MODULE_LICENSE("GPL"); #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE #define RIONET_MAX_NETS 8 +#define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE +#define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN) struct rionet_private { struct rio_mport *mport; @@ -60,6 +63,7 @@ struct rionet_private { spinlock_t lock; spinlock_t tx_lock; u32 msg_enable; + bool open; }; struct rionet_peer { @@ -71,6 +75,7 @@ struct rionet_peer { struct rionet_net { struct net_device *ndev; struct list_head peers; + spinlock_t lock; /* net info access lock */ struct rio_dev **active; int nact; /* number of active peers */ }; @@ -232,26 +237,32 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u struct net_device *ndev = dev_id; struct rionet_private *rnet = netdev_priv(ndev); struct rionet_peer *peer; + unsigned char netid = rnet->mport->id; if (netif_msg_intr(rnet)) printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x", DRV_NAME, sid, tid, info); if (info == RIONET_DOORBELL_JOIN) { - if (!nets[rnet->mport->id].active[sid]) { - list_for_each_entry(peer, - &nets[rnet->mport->id].peers, node) { + if (!nets[netid].active[sid]) { + spin_lock(&nets[netid].lock); + list_for_each_entry(peer, &nets[netid].peers, node) { if (peer->rdev->destid == sid) { - nets[rnet->mport->id].active[sid] = - peer->rdev; - nets[rnet->mport->id].nact++; + nets[netid].active[sid] = peer->rdev; + nets[netid].nact++; } } + spin_unlock(&nets[netid].lock); + rio_mport_send_doorbell(mport, sid, RIONET_DOORBELL_JOIN); } } else if (info == RIONET_DOORBELL_LEAVE) { - nets[rnet->mport->id].active[sid] = NULL; - nets[rnet->mport->id].nact--; + spin_lock(&nets[netid].lock); + if (nets[netid].active[sid]) { + nets[netid].active[sid] = NULL; + nets[netid].nact--; + } + spin_unlock(&nets[netid].lock); } else { if (netif_msg_intr(rnet)) printk(KERN_WARNING "%s: unhandled doorbell\n", @@ -280,7 +291,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo struct net_device *ndev = dev_id; struct rionet_private *rnet = netdev_priv(ndev); - spin_lock(&rnet->lock); + spin_lock(&rnet->tx_lock); if (netif_msg_intr(rnet)) printk(KERN_INFO @@ -299,14 +310,16 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo if (rnet->tx_cnt < RIONET_TX_RING_SIZE) netif_wake_queue(ndev); - spin_unlock(&rnet->lock); + spin_unlock(&rnet->tx_lock); } static int rionet_open(struct net_device *ndev) { int i, rc = 0; - struct rionet_peer *peer, *tmp; + struct rionet_peer *peer; struct rionet_private *rnet = netdev_priv(ndev); + unsigned char netid = rnet->mport->id; + unsigned long flags; if (netif_msg_ifup(rnet)) printk(KERN_INFO "%s: open\n", DRV_NAME); @@ -345,20 +358,13 @@ static int rionet_open(struct net_device *ndev) netif_carrier_on(ndev); netif_start_queue(ndev); - list_for_each_entry_safe(peer, tmp, - &nets[rnet->mport->id].peers, node) { - if (!(peer->res = rio_request_outb_dbell(peer->rdev, - RIONET_DOORBELL_JOIN, - RIONET_DOORBELL_LEAVE))) - { - printk(KERN_ERR "%s: error requesting doorbells\n", - DRV_NAME); - continue; - } - + spin_lock_irqsave(&nets[netid].lock, flags); + list_for_each_entry(peer, &nets[netid].peers, node) { /* Send a join message */ rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); } + spin_unlock_irqrestore(&nets[netid].lock, flags); + rnet->open = true; out: return rc; @@ -367,7 +373,9 @@ static int rionet_open(struct net_device *ndev) static int rionet_close(struct net_device *ndev) { struct rionet_private *rnet = netdev_priv(ndev); - struct rionet_peer *peer, *tmp; + struct rionet_peer *peer; + unsigned char netid = rnet->mport->id; + unsigned long flags; int i; if (netif_msg_ifup(rnet)) @@ -375,18 +383,21 @@ static int rionet_close(struct net_device *ndev) netif_stop_queue(ndev); netif_carrier_off(ndev); + rnet->open = false; for (i = 0; i < RIONET_RX_RING_SIZE; i++) kfree_skb(rnet->rx_skb[i]); - list_for_each_entry_safe(peer, tmp, - &nets[rnet->mport->id].peers, node) { - if (nets[rnet->mport->id].active[peer->rdev->destid]) { + spin_lock_irqsave(&nets[netid].lock, flags); + list_for_each_entry(peer, &nets[netid].peers, node) { + if (nets[netid].active[peer->rdev->destid]) { rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE); - nets[rnet->mport->id].active[peer->rdev->destid] = NULL; + nets[netid].active[peer->rdev->destid] = NULL; } - rio_release_outb_dbell(peer->rdev, peer->res); + if (peer->res) + rio_release_outb_dbell(peer->rdev, peer->res); } + spin_unlock_irqrestore(&nets[netid].lock, flags); rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN, RIONET_DOORBELL_LEAVE); @@ -400,22 +411,38 @@ static void rionet_remove_dev(struct device *dev, struct subsys_interface *sif) { struct rio_dev *rdev = to_rio_dev(dev); unsigned char netid = rdev->net->hport->id; - struct rionet_peer *peer, *tmp; + struct rionet_peer *peer; + int state, found = 0; + unsigned long flags; - if (dev_rionet_capable(rdev)) { - list_for_each_entry_safe(peer, tmp, &nets[netid].peers, node) { - if (peer->rdev == rdev) { - if (nets[netid].active[rdev->destid]) { - nets[netid].active[rdev->destid] = NULL; - nets[netid].nact--; + if (!dev_rionet_capable(rdev)) + return; + + spin_lock_irqsave(&nets[netid].lock, flags); + list_for_each_entry(peer, &nets[netid].peers, node) { + if (peer->rdev == rdev) { + list_del(&peer->node); + if (nets[netid].active[rdev->destid]) { + state = atomic_read(&rdev->state); + if (state != RIO_DEVICE_GONE && + state != RIO_DEVICE_INITIALIZING) { + rio_send_doorbell(rdev, + RIONET_DOORBELL_LEAVE); } - - list_del(&peer->node); - kfree(peer); - break; + nets[netid].active[rdev->destid] = NULL; + nets[netid].nact--; } + found = 1; + break; } } + spin_unlock_irqrestore(&nets[netid].lock, flags); + + if (found) { + if (peer->res) + rio_release_outb_dbell(rdev, peer->res); + kfree(peer); + } } static void rionet_get_drvinfo(struct net_device *ndev, @@ -443,6 +470,17 @@ static void rionet_set_msglevel(struct net_device *ndev, u32 value) rnet->msg_enable = value; } +static int rionet_change_mtu(struct net_device *ndev, int new_mtu) +{ + if ((new_mtu < 68) || (new_mtu > RIONET_MAX_MTU)) { + printk(KERN_ERR "%s: Invalid MTU size %d\n", + ndev->name, new_mtu); + return -EINVAL; + } + ndev->mtu = new_mtu; + return 0; +} + static const struct ethtool_ops rionet_ethtool_ops = { .get_drvinfo = rionet_get_drvinfo, .get_msglevel = rionet_get_msglevel, @@ -454,7 +492,7 @@ static const struct net_device_ops rionet_netdev_ops = { .ndo_open = rionet_open, .ndo_stop = rionet_close, .ndo_start_xmit = rionet_start_xmit, - .ndo_change_mtu = eth_change_mtu, + .ndo_change_mtu = rionet_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; @@ -478,6 +516,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) /* Set up private area */ rnet = netdev_priv(ndev); rnet->mport = mport; + rnet->open = false; /* Set the default MAC address */ device_id = rio_local_get_device_id(mport); @@ -489,7 +528,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) ndev->dev_addr[5] = device_id & 0xff; ndev->netdev_ops = &rionet_netdev_ops; - ndev->mtu = RIO_MAX_MSG_SIZE - 14; + ndev->mtu = RIONET_MAX_MTU; ndev->features = NETIF_F_LLTX; SET_NETDEV_DEV(ndev, &mport->dev); ndev->ethtool_ops = &rionet_ethtool_ops; @@ -500,8 +539,11 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL; rc = register_netdev(ndev); - if (rc != 0) + if (rc != 0) { + free_pages((unsigned long)nets[mport->id].active, + get_order(rionet_active_bytes)); goto out; + } printk(KERN_INFO "%s: %s %s Version %s, MAC %pM, %s\n", ndev->name, @@ -515,8 +557,6 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev) return rc; } -static unsigned long net_table[RIONET_MAX_NETS/sizeof(unsigned long) + 1]; - static int rionet_add_dev(struct device *dev, struct subsys_interface *sif) { int rc = -ENODEV; @@ -525,19 +565,16 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif) struct net_device *ndev = NULL; struct rio_dev *rdev = to_rio_dev(dev); unsigned char netid = rdev->net->hport->id; - int oldnet; if (netid >= RIONET_MAX_NETS) return rc; - oldnet = test_and_set_bit(netid, net_table); - /* * If first time through this net, make sure local device is rionet * capable and setup netdev (this step will be skipped in later probes * on the same net). */ - if (!oldnet) { + if (!nets[netid].ndev) { rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR, &lsrc_ops); rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR, @@ -555,30 +592,56 @@ static int rionet_add_dev(struct device *dev, struct subsys_interface *sif) rc = -ENOMEM; goto out; } - nets[netid].ndev = ndev; + rc = rionet_setup_netdev(rdev->net->hport, ndev); if (rc) { printk(KERN_ERR "%s: failed to setup netdev (rc=%d)\n", DRV_NAME, rc); + free_netdev(ndev); goto out; } INIT_LIST_HEAD(&nets[netid].peers); + spin_lock_init(&nets[netid].lock); nets[netid].nact = 0; - } else if (nets[netid].ndev == NULL) - goto out; + nets[netid].ndev = ndev; + } /* * If the remote device has mailbox/doorbell capabilities, * add it to the peer list. */ if (dev_rionet_capable(rdev)) { - if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) { + struct rionet_private *rnet; + unsigned long flags; + + rnet = netdev_priv(nets[netid].ndev); + + peer = kzalloc(sizeof(*peer), GFP_KERNEL); + if (!peer) { rc = -ENOMEM; goto out; } peer->rdev = rdev; + peer->res = rio_request_outb_dbell(peer->rdev, + RIONET_DOORBELL_JOIN, + RIONET_DOORBELL_LEAVE); + if (!peer->res) { + pr_err("%s: error requesting doorbells\n", DRV_NAME); + kfree(peer); + rc = -ENOMEM; + goto out; + } + + spin_lock_irqsave(&nets[netid].lock, flags); list_add_tail(&peer->node, &nets[netid].peers); + spin_unlock_irqrestore(&nets[netid].lock, flags); + pr_debug("%s: %s add peer %s\n", + DRV_NAME, __func__, rio_name(rdev)); + + /* If netdev is already opened, send join request to new peer */ + if (rnet->open) + rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN); } return 0; @@ -586,6 +649,61 @@ out: return rc; } +static int rionet_shutdown(struct notifier_block *nb, unsigned long code, + void *unused) +{ + struct rionet_peer *peer; + unsigned long flags; + int i; + + pr_debug("%s: %s\n", DRV_NAME, __func__); + + for (i = 0; i < RIONET_MAX_NETS; i++) { + if (!nets[i].ndev) + continue; + + spin_lock_irqsave(&nets[i].lock, flags); + list_for_each_entry(peer, &nets[i].peers, node) { + if (nets[i].active[peer->rdev->destid]) { + rio_send_doorbell(peer->rdev, + RIONET_DOORBELL_LEAVE); + nets[i].active[peer->rdev->destid] = NULL; + } + } + spin_unlock_irqrestore(&nets[i].lock, flags); + } + + return NOTIFY_DONE; +} + +static void rionet_remove_mport(struct device *dev, + struct class_interface *class_intf) +{ + struct rio_mport *mport = to_rio_mport(dev); + struct net_device *ndev; + int id = mport->id; + + pr_debug("%s %s\n", __func__, mport->name); + + WARN(nets[id].nact, "%s called when connected to %d peers\n", + __func__, nets[id].nact); + WARN(!nets[id].ndev, "%s called for mport without NDEV\n", + __func__); + + if (nets[id].ndev) { + ndev = nets[id].ndev; + netif_stop_queue(ndev); + unregister_netdev(ndev); + + free_pages((unsigned long)nets[id].active, + get_order(sizeof(void *) * + RIO_MAX_ROUTE_ENTRIES(mport->sys_size))); + nets[id].active = NULL; + free_netdev(ndev); + nets[id].ndev = NULL; + } +} + #ifdef MODULE static struct rio_device_id rionet_id_table[] = { {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}, @@ -602,40 +720,43 @@ static struct subsys_interface rionet_interface = { .remove_dev = rionet_remove_dev, }; +static struct notifier_block rionet_notifier = { + .notifier_call = rionet_shutdown, +}; + +/* the rio_mport_interface is used to handle local mport devices */ +static struct class_interface rio_mport_interface __refdata = { + .class = &rio_mport_class, + .add_dev = NULL, + .remove_dev = rionet_remove_mport, +}; + static int __init rionet_init(void) { + int ret; + + ret = register_reboot_notifier(&rionet_notifier); + if (ret) { + pr_err("%s: failed to register reboot notifier (err=%d)\n", + DRV_NAME, ret); + return ret; + } + + ret = class_interface_register(&rio_mport_interface); + if (ret) { + pr_err("%s: class_interface_register error: %d\n", + DRV_NAME, ret); + return ret; + } + return subsys_interface_register(&rionet_interface); } static void __exit rionet_exit(void) { - struct rionet_private *rnet; - struct net_device *ndev; - struct rionet_peer *peer, *tmp; - int i; - - for (i = 0; i < RIONET_MAX_NETS; i++) { - if (nets[i].ndev != NULL) { - ndev = nets[i].ndev; - rnet = netdev_priv(ndev); - unregister_netdev(ndev); - - list_for_each_entry_safe(peer, - tmp, &nets[i].peers, node) { - list_del(&peer->node); - kfree(peer); - } - - free_pages((unsigned long)nets[i].active, - get_order(sizeof(void *) * - RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size))); - nets[i].active = NULL; - - free_netdev(ndev); - } - } - + unregister_reboot_notifier(&rionet_notifier); subsys_interface_unregister(&rionet_interface); + class_interface_unregister(&rio_mport_interface); } late_initcall(rionet_init); diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig index 3e3be57e9a1a..b5a10d3c92c7 100644 --- a/drivers/rapidio/Kconfig +++ b/drivers/rapidio/Kconfig @@ -67,6 +67,14 @@ config RAPIDIO_ENUM_BASIC endchoice +config RAPIDIO_MPORT_CDEV + tristate "RapidIO /dev mport device driver" + depends on RAPIDIO + help + This option includes generic RapidIO mport device driver which + allows to user space applications to perform RapidIO-specific + operations through selected RapidIO mport. + menu "RapidIO Switch drivers" depends on RAPIDIO diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile index 9432c494cf57..927dbf89592b 100644 --- a/drivers/rapidio/devices/Makefile +++ b/drivers/rapidio/devices/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_mport.o tsi721_mport-y := tsi721.o tsi721_mport-$(CONFIG_RAPIDIO_DMA_ENGINE) += tsi721_dma.o +obj-$(CONFIG_RAPIDIO_MPORT_CDEV) += rio_mport_cdev.o diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c new file mode 100644 index 000000000000..9607bc826460 --- /dev/null +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -0,0 +1,2720 @@ +/* + * RapidIO mport character device + * + * Copyright 2014-2015 Integrated Device Technology, Inc. + * Alexandre Bounine <alexandre.bounine@idt.com> + * Copyright 2014-2015 Prodrive Technologies + * Andre van Herk <andre.van.herk@prodrive-technologies.com> + * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com> + * Copyright (C) 2014 Texas Instruments Incorporated + * Aurelien Jacquiot <a-jacquiot@ti.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/cdev.h> +#include <linux/ioctl.h> +#include <linux/uaccess.h> +#include <linux/list.h> +#include <linux/fs.h> +#include <linux/err.h> +#include <linux/net.h> +#include <linux/poll.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/kfifo.h> + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mman.h> + +#include <linux/dma-mapping.h> +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +#include <linux/dmaengine.h> +#endif + +#include <linux/rio.h> +#include <linux/rio_ids.h> +#include <linux/rio_drv.h> +#include <linux/rio_mport_cdev.h> + +#include "../rio.h" + +#define DRV_NAME "rio_mport" +#define DRV_PREFIX DRV_NAME ": " +#define DEV_NAME "rio_mport" +#define DRV_VERSION "1.0.0" + +/* Debug output filtering masks */ +enum { + DBG_NONE = 0, + DBG_INIT = BIT(0), /* driver init */ + DBG_EXIT = BIT(1), /* driver exit */ + DBG_MPORT = BIT(2), /* mport add/remove */ + DBG_RDEV = BIT(3), /* RapidIO device add/remove */ + DBG_DMA = BIT(4), /* DMA transfer messages */ + DBG_MMAP = BIT(5), /* mapping messages */ + DBG_IBW = BIT(6), /* inbound window */ + DBG_EVENT = BIT(7), /* event handling messages */ + DBG_OBW = BIT(8), /* outbound window messages */ + DBG_DBELL = BIT(9), /* doorbell messages */ + DBG_ALL = ~0, +}; + +#ifdef DEBUG +#define rmcd_debug(level, fmt, arg...) \ + do { \ + if (DBG_##level & dbg_level) \ + pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \ + } while (0) +#else +#define rmcd_debug(level, fmt, arg...) \ + no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg) +#endif + +#define rmcd_warn(fmt, arg...) \ + pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg) + +#define rmcd_error(fmt, arg...) \ + pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg) + +MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>"); +MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>"); +MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>"); +MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>"); +MODULE_DESCRIPTION("RapidIO mport character device driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static int dma_timeout = 3000; /* DMA transfer timeout in msec */ +module_param(dma_timeout, int, S_IRUGO); +MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)"); + +#ifdef DEBUG +static u32 dbg_level = DBG_NONE; +module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO); +MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); +#endif + +/* + * An internal DMA coherent buffer + */ +struct mport_dma_buf { + void *ib_base; + dma_addr_t ib_phys; + u32 ib_size; + u64 ib_rio_base; + bool ib_map; + struct file *filp; +}; + +/* + * Internal memory mapping structure + */ +enum rio_mport_map_dir { + MAP_INBOUND, + MAP_OUTBOUND, + MAP_DMA, +}; + +struct rio_mport_mapping { + struct list_head node; + struct mport_dev *md; + enum rio_mport_map_dir dir; + u32 rioid; + u64 rio_addr; + dma_addr_t phys_addr; /* for mmap */ + void *virt_addr; /* kernel address, for dma_free_coherent */ + u64 size; + struct kref ref; /* refcount of vmas sharing the mapping */ + struct file *filp; +}; + +struct rio_mport_dma_map { + int valid; + uint64_t length; + void *vaddr; + dma_addr_t paddr; +}; + +#define MPORT_MAX_DMA_BUFS 16 +#define MPORT_EVENT_DEPTH 10 + +/* + * mport_dev driver-specific structure that represents mport device + * @active mport device status flag + * @node list node to maintain list of registered mports + * @cdev character device + * @dev associated device object + * @mport associated subsystem's master port device object + * @buf_mutex lock for buffer handling + * @file_mutex - lock for open files list + * @file_list - list of open files on given mport + * @properties properties of this mport + * @portwrites queue of inbound portwrites + * @pw_lock lock for port write queue + * @mappings queue for memory mappings + * @dma_chan DMA channels associated with this device + * @dma_ref: + * @comp: + */ +struct mport_dev { + atomic_t active; + struct list_head node; + struct cdev cdev; + struct device dev; + struct rio_mport *mport; + struct mutex buf_mutex; + struct mutex file_mutex; + struct list_head file_list; + struct rio_mport_properties properties; + struct list_head doorbells; + spinlock_t db_lock; + struct list_head portwrites; + spinlock_t pw_lock; + struct list_head mappings; +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + struct dma_chan *dma_chan; + struct kref dma_ref; + struct completion comp; +#endif +}; + +/* + * mport_cdev_priv - data structure specific to individual file object + * associated with an open device + * @md master port character device object + * @async_queue - asynchronous notification queue + * @list - file objects tracking list + * @db_filters inbound doorbell filters for this descriptor + * @pw_filters portwrite filters for this descriptor + * @event_fifo event fifo for this descriptor + * @event_rx_wait wait queue for this descriptor + * @fifo_lock lock for event_fifo + * @event_mask event mask for this descriptor + * @dmach DMA engine channel allocated for specific file object + */ +struct mport_cdev_priv { + struct mport_dev *md; + struct fasync_struct *async_queue; + struct list_head list; + struct list_head db_filters; + struct list_head pw_filters; + struct kfifo event_fifo; + wait_queue_head_t event_rx_wait; + spinlock_t fifo_lock; + unsigned int event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */ +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + struct dma_chan *dmach; + struct list_head async_list; + struct list_head pend_list; + spinlock_t req_lock; + struct mutex dma_lock; + struct kref dma_ref; + struct completion comp; +#endif +}; + +/* + * rio_mport_pw_filter - structure to describe a portwrite filter + * md_node node in mport device's list + * priv_node node in private file object's list + * priv reference to private data + * filter actual portwrite filter + */ +struct rio_mport_pw_filter { + struct list_head md_node; + struct list_head priv_node; + struct mport_cdev_priv *priv; + struct rio_pw_filter filter; +}; + +/* + * rio_mport_db_filter - structure to describe a doorbell filter + * @data_node reference to device node + * @priv_node node in private data + * @priv reference to private data + * @filter actual doorbell filter + */ +struct rio_mport_db_filter { + struct list_head data_node; + struct list_head priv_node; + struct mport_cdev_priv *priv; + struct rio_doorbell_filter filter; +}; + +static LIST_HEAD(mport_devs); +static DEFINE_MUTEX(mport_devs_lock); + +#if (0) /* used by commented out portion of poll function : FIXME */ +static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait); +#endif + +static struct class *dev_class; +static dev_t dev_number; + +static struct workqueue_struct *dma_wq; + +static void mport_release_mapping(struct kref *ref); + +static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg, + int local) +{ + struct rio_mport *mport = priv->md->mport; + struct rio_mport_maint_io maint_io; + u32 *buffer; + u32 offset; + size_t length; + int ret, i; + + if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) + return -EFAULT; + + if ((maint_io.offset % 4) || + (maint_io.length == 0) || (maint_io.length % 4)) + return -EINVAL; + + buffer = vmalloc(maint_io.length); + if (buffer == NULL) + return -ENOMEM; + length = maint_io.length/sizeof(u32); + offset = maint_io.offset; + + for (i = 0; i < length; i++) { + if (local) + ret = __rio_local_read_config_32(mport, + offset, &buffer[i]); + else + ret = rio_mport_read_config_32(mport, maint_io.rioid, + maint_io.hopcount, offset, &buffer[i]); + if (ret) + goto out; + + offset += 4; + } + + if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length))) + ret = -EFAULT; +out: + vfree(buffer); + return ret; +} + +static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg, + int local) +{ + struct rio_mport *mport = priv->md->mport; + struct rio_mport_maint_io maint_io; + u32 *buffer; + u32 offset; + size_t length; + int ret = -EINVAL, i; + + if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io)))) + return -EFAULT; + + if ((maint_io.offset % 4) || + (maint_io.length == 0) || (maint_io.length % 4)) + return -EINVAL; + + buffer = vmalloc(maint_io.length); + if (buffer == NULL) + return -ENOMEM; + length = maint_io.length; + + if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) { + ret = -EFAULT; + goto out; + } + + offset = maint_io.offset; + length /= sizeof(u32); + + for (i = 0; i < length; i++) { + if (local) + ret = __rio_local_write_config_32(mport, + offset, buffer[i]); + else + ret = rio_mport_write_config_32(mport, maint_io.rioid, + maint_io.hopcount, + offset, buffer[i]); + if (ret) + goto out; + + offset += 4; + } + +out: + vfree(buffer); + return ret; +} + + +/* + * Inbound/outbound memory mapping functions + */ +static int +rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, + u32 rioid, u64 raddr, u32 size, + dma_addr_t *paddr) +{ + struct rio_mport *mport = md->mport; + struct rio_mport_mapping *map; + int ret; + + rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size); + + map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; + + ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr); + if (ret < 0) + goto err_map_outb; + + map->dir = MAP_OUTBOUND; + map->rioid = rioid; + map->rio_addr = raddr; + map->size = size; + map->phys_addr = *paddr; + map->filp = filp; + map->md = md; + kref_init(&map->ref); + list_add_tail(&map->node, &md->mappings); + return 0; +err_map_outb: + kfree(map); + return ret; +} + +static int +rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, + u32 rioid, u64 raddr, u32 size, + dma_addr_t *paddr) +{ + struct rio_mport_mapping *map; + int err = -ENOMEM; + + mutex_lock(&md->buf_mutex); + list_for_each_entry(map, &md->mappings, node) { + if (map->dir != MAP_OUTBOUND) + continue; + if (rioid == map->rioid && + raddr == map->rio_addr && size == map->size) { + *paddr = map->phys_addr; + err = 0; + break; + } else if (rioid == map->rioid && + raddr < (map->rio_addr + map->size - 1) && + (raddr + size) > map->rio_addr) { + err = -EBUSY; + break; + } + } + + /* If not found, create new */ + if (err == -ENOMEM) + err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, + size, paddr); + mutex_unlock(&md->buf_mutex); + return err; +} + +static int rio_mport_obw_map(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *data = priv->md; + struct rio_mmap map; + dma_addr_t paddr; + int ret; + + if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) + return -EFAULT; + + rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx", + map.rioid, map.rio_addr, map.length); + + ret = rio_mport_get_outbound_mapping(data, filp, map.rioid, + map.rio_addr, map.length, &paddr); + if (ret < 0) { + rmcd_error("Failed to set OBW err= %d", ret); + return ret; + } + + map.handle = paddr; + + if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) + return -EFAULT; + return 0; +} + +/* + * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space + * + * @priv: driver private data + * @arg: buffer handle returned by allocation routine + */ +static int rio_mport_obw_free(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + u64 handle; + struct rio_mport_mapping *map, *_map; + + if (!md->mport->ops->unmap_outb) + return -EPROTONOSUPPORT; + + if (copy_from_user(&handle, arg, sizeof(u64))) + return -EFAULT; + + rmcd_debug(OBW, "h=0x%llx", handle); + + mutex_lock(&md->buf_mutex); + list_for_each_entry_safe(map, _map, &md->mappings, node) { + if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) { + if (map->filp == filp) { + rmcd_debug(OBW, "kref_put h=0x%llx", handle); + map->filp = NULL; + kref_put(&map->ref, mport_release_mapping); + } + break; + } + } + mutex_unlock(&md->buf_mutex); + + return 0; +} + +/* + * maint_hdid_set() - Set the host Device ID + * @priv: driver private data + * @arg: Device Id + */ +static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg) +{ + struct mport_dev *md = priv->md; + uint16_t hdid; + + if (copy_from_user(&hdid, arg, sizeof(uint16_t))) + return -EFAULT; + + md->mport->host_deviceid = hdid; + md->properties.hdid = hdid; + rio_local_set_device_id(md->mport, hdid); + + rmcd_debug(MPORT, "Set host device Id to %d", hdid); + + return 0; +} + +/* + * maint_comptag_set() - Set the host Component Tag + * @priv: driver private data + * @arg: Component Tag + */ +static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg) +{ + struct mport_dev *md = priv->md; + uint32_t comptag; + + if (copy_from_user(&comptag, arg, sizeof(uint32_t))) + return -EFAULT; + + rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); + + rmcd_debug(MPORT, "Set host Component Tag to %d", comptag); + + return 0; +} + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + +struct mport_dma_req { + struct list_head node; + struct file *filp; + struct mport_cdev_priv *priv; + enum rio_transfer_sync sync; + struct sg_table sgt; + struct page **page_list; + unsigned int nr_pages; + struct rio_mport_mapping *map; + struct dma_chan *dmach; + enum dma_data_direction dir; + dma_cookie_t cookie; + enum dma_status status; + struct completion req_comp; +}; + +struct mport_faf_work { + struct work_struct work; + struct mport_dma_req *req; +}; + +static void mport_release_def_dma(struct kref *dma_ref) +{ + struct mport_dev *md = + container_of(dma_ref, struct mport_dev, dma_ref); + + rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); + rio_release_dma(md->dma_chan); + md->dma_chan = NULL; +} + +static void mport_release_dma(struct kref *dma_ref) +{ + struct mport_cdev_priv *priv = + container_of(dma_ref, struct mport_cdev_priv, dma_ref); + + rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id); + complete(&priv->comp); +} + +static void dma_req_free(struct mport_dma_req *req) +{ + struct mport_cdev_priv *priv = req->priv; + unsigned int i; + + dma_unmap_sg(req->dmach->device->dev, + req->sgt.sgl, req->sgt.nents, req->dir); + sg_free_table(&req->sgt); + if (req->page_list) { + for (i = 0; i < req->nr_pages; i++) + put_page(req->page_list[i]); + kfree(req->page_list); + } + + if (req->map) { + mutex_lock(&req->map->md->buf_mutex); + kref_put(&req->map->ref, mport_release_mapping); + mutex_unlock(&req->map->md->buf_mutex); + } + + kref_put(&priv->dma_ref, mport_release_dma); + + kfree(req); +} + +static void dma_xfer_callback(void *param) +{ + struct mport_dma_req *req = (struct mport_dma_req *)param; + struct mport_cdev_priv *priv = req->priv; + + req->status = dma_async_is_tx_complete(priv->dmach, req->cookie, + NULL, NULL); + complete(&req->req_comp); +} + +static void dma_faf_cleanup(struct work_struct *_work) +{ + struct mport_faf_work *work = container_of(_work, + struct mport_faf_work, work); + struct mport_dma_req *req = work->req; + + dma_req_free(req); + kfree(work); +} + +static void dma_faf_callback(void *param) +{ + struct mport_dma_req *req = (struct mport_dma_req *)param; + struct mport_faf_work *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; + + INIT_WORK(&work->work, dma_faf_cleanup); + work->req = req; + queue_work(dma_wq, &work->work); +} + +/* + * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA + * transfer object. + * Returns pointer to DMA transaction descriptor allocated by DMA driver on + * success or ERR_PTR (and/or NULL) if failed. Caller must check returned + * non-NULL pointer using IS_ERR macro. + */ +static struct dma_async_tx_descriptor +*prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer, + struct sg_table *sgt, int nents, enum dma_transfer_direction dir, + enum dma_ctrl_flags flags) +{ + struct rio_dma_data tx_data; + + tx_data.sg = sgt->sgl; + tx_data.sg_len = nents; + tx_data.rio_addr_u = 0; + tx_data.rio_addr = transfer->rio_addr; + if (dir == DMA_MEM_TO_DEV) { + switch (transfer->method) { + case RIO_EXCHANGE_NWRITE: + tx_data.wr_type = RDW_ALL_NWRITE; + break; + case RIO_EXCHANGE_NWRITE_R_ALL: + tx_data.wr_type = RDW_ALL_NWRITE_R; + break; + case RIO_EXCHANGE_NWRITE_R: + tx_data.wr_type = RDW_LAST_NWRITE_R; + break; + case RIO_EXCHANGE_DEFAULT: + tx_data.wr_type = RDW_DEFAULT; + break; + default: + return ERR_PTR(-EINVAL); + } + } + + return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags); +} + +/* Request DMA channel associated with this mport device. + * Try to request DMA channel for every new process that opened given + * mport. If a new DMA channel is not available use default channel + * which is the first DMA channel opened on mport device. + */ +static int get_dma_channel(struct mport_cdev_priv *priv) +{ + mutex_lock(&priv->dma_lock); + if (!priv->dmach) { + priv->dmach = rio_request_mport_dma(priv->md->mport); + if (!priv->dmach) { + /* Use default DMA channel if available */ + if (priv->md->dma_chan) { + priv->dmach = priv->md->dma_chan; + kref_get(&priv->md->dma_ref); + } else { + rmcd_error("Failed to get DMA channel"); + mutex_unlock(&priv->dma_lock); + return -ENODEV; + } + } else if (!priv->md->dma_chan) { + /* Register default DMA channel if we do not have one */ + priv->md->dma_chan = priv->dmach; + kref_init(&priv->md->dma_ref); + rmcd_debug(DMA, "Register DMA_chan %d as default", + priv->dmach->chan_id); + } + + kref_init(&priv->dma_ref); + init_completion(&priv->comp); + } + + kref_get(&priv->dma_ref); + mutex_unlock(&priv->dma_lock); + return 0; +} + +static void put_dma_channel(struct mport_cdev_priv *priv) +{ + kref_put(&priv->dma_ref, mport_release_dma); +} + +/* + * DMA transfer functions + */ +static int do_dma_request(struct mport_dma_req *req, + struct rio_transfer_io *xfer, + enum rio_transfer_sync sync, int nents) +{ + struct mport_cdev_priv *priv; + struct sg_table *sgt; + struct dma_chan *chan; + struct dma_async_tx_descriptor *tx; + dma_cookie_t cookie; + unsigned long tmo = msecs_to_jiffies(dma_timeout); + enum dma_transfer_direction dir; + long wret; + int ret = 0; + + priv = req->priv; + sgt = &req->sgt; + + chan = priv->dmach; + dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; + + rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s", + current->comm, task_pid_nr(current), + dev_name(&chan->dev->device), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); + + /* Initialize DMA transaction request */ + tx = prep_dma_xfer(chan, xfer, sgt, nents, dir, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + + if (!tx) { + rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx", + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", + xfer->rio_addr, xfer->length); + ret = -EIO; + goto err_out; + } else if (IS_ERR(tx)) { + ret = PTR_ERR(tx); + rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret, + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", + xfer->rio_addr, xfer->length); + goto err_out; + } + + if (sync == RIO_TRANSFER_FAF) + tx->callback = dma_faf_callback; + else + tx->callback = dma_xfer_callback; + tx->callback_param = req; + + req->dmach = chan; + req->sync = sync; + req->status = DMA_IN_PROGRESS; + init_completion(&req->req_comp); + + cookie = dmaengine_submit(tx); + req->cookie = cookie; + + rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); + + if (dma_submit_error(cookie)) { + rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)", + cookie, xfer->rio_addr, xfer->length); + ret = -EIO; + goto err_out; + } + + dma_async_issue_pending(chan); + + if (sync == RIO_TRANSFER_ASYNC) { + spin_lock(&priv->req_lock); + list_add_tail(&req->node, &priv->async_list); + spin_unlock(&priv->req_lock); + return cookie; + } else if (sync == RIO_TRANSFER_FAF) + return 0; + + wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); + + if (wret == 0) { + /* Timeout on wait occurred */ + rmcd_error("%s(%d) timed out waiting for DMA_%s %d", + current->comm, task_pid_nr(current), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); + return -ETIMEDOUT; + } else if (wret == -ERESTARTSYS) { + /* Wait_for_completion was interrupted by a signal but DMA may + * be in progress + */ + rmcd_error("%s(%d) wait for DMA_%s %d was interrupted", + current->comm, task_pid_nr(current), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie); + return -EINTR; + } + + if (req->status != DMA_COMPLETE) { + /* DMA transaction completion was signaled with error */ + rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)", + current->comm, task_pid_nr(current), + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", + cookie, req->status, ret); + ret = -EIO; + } + +err_out: + return ret; +} + +/* + * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from + * the remote RapidIO device + * @filp: file pointer associated with the call + * @transfer_mode: DMA transfer mode + * @sync: synchronization mode + * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR + * DMA_DEV_TO_MEM = read) + * @xfer: data transfer descriptor structure + */ +static int +rio_dma_transfer(struct file *filp, uint32_t transfer_mode, + enum rio_transfer_sync sync, enum dma_data_direction dir, + struct rio_transfer_io *xfer) +{ + struct mport_cdev_priv *priv = filp->private_data; + unsigned long nr_pages = 0; + struct page **page_list = NULL; + struct mport_dma_req *req; + struct mport_dev *md = priv->md; + struct dma_chan *chan; + int i, ret; + int nents; + + if (xfer->length == 0) + return -EINVAL; + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + ret = get_dma_channel(priv); + if (ret) { + kfree(req); + return ret; + } + + /* + * If parameter loc_addr != NULL, we are transferring data from/to + * data buffer allocated in user-space: lock in memory user-space + * buffer pages and build an SG table for DMA transfer request + * + * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is + * used for DMA data transfers: build single entry SG table using + * offset within the internal buffer specified by handle parameter. + */ + if (xfer->loc_addr) { + unsigned long offset; + long pinned; + + offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK; + nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT; + + page_list = kmalloc_array(nr_pages, + sizeof(*page_list), GFP_KERNEL); + if (page_list == NULL) { + ret = -ENOMEM; + goto err_req; + } + + down_read(¤t->mm->mmap_sem); + pinned = get_user_pages(current, current->mm, + (unsigned long)xfer->loc_addr & PAGE_MASK, + nr_pages, dir == DMA_FROM_DEVICE, 0, + page_list, NULL); + up_read(¤t->mm->mmap_sem); + + if (pinned != nr_pages) { + if (pinned < 0) { + rmcd_error("get_user_pages err=%ld", pinned); + nr_pages = 0; + } else + rmcd_error("pinned %ld out of %ld pages", + pinned, nr_pages); + ret = -EFAULT; + goto err_pg; + } + + ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages, + offset, xfer->length, GFP_KERNEL); + if (ret) { + rmcd_error("sg_alloc_table failed with err=%d", ret); + goto err_pg; + } + + req->page_list = page_list; + req->nr_pages = nr_pages; + } else { + dma_addr_t baddr; + struct rio_mport_mapping *map; + + baddr = (dma_addr_t)xfer->handle; + + mutex_lock(&md->buf_mutex); + list_for_each_entry(map, &md->mappings, node) { + if (baddr >= map->phys_addr && + baddr < (map->phys_addr + map->size)) { + kref_get(&map->ref); + req->map = map; + break; + } + } + mutex_unlock(&md->buf_mutex); + + if (req->map == NULL) { + ret = -ENOMEM; + goto err_req; + } + + if (xfer->length + xfer->offset > map->size) { + ret = -EINVAL; + goto err_req; + } + + ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL); + if (unlikely(ret)) { + rmcd_error("sg_alloc_table failed for internal buf"); + goto err_req; + } + + sg_set_buf(req->sgt.sgl, + map->virt_addr + (baddr - map->phys_addr) + + xfer->offset, xfer->length); + } + + req->dir = dir; + req->filp = filp; + req->priv = priv; + chan = priv->dmach; + + nents = dma_map_sg(chan->device->dev, + req->sgt.sgl, req->sgt.nents, dir); + if (nents == -EFAULT) { + rmcd_error("Failed to map SG list"); + return -EFAULT; + } + + ret = do_dma_request(req, xfer, sync, nents); + + if (ret >= 0) { + if (sync == RIO_TRANSFER_SYNC) + goto sync_out; + return ret; /* return ASYNC cookie */ + } + + if (ret == -ETIMEDOUT || ret == -EINTR) { + /* + * This can happen only in case of SYNC transfer. + * Do not free unfinished request structure immediately. + * Place it into pending list and deal with it later + */ + spin_lock(&priv->req_lock); + list_add_tail(&req->node, &priv->pend_list); + spin_unlock(&priv->req_lock); + return ret; + } + + + rmcd_debug(DMA, "do_dma_request failed with err=%d", ret); +sync_out: + dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir); + sg_free_table(&req->sgt); +err_pg: + if (page_list) { + for (i = 0; i < nr_pages; i++) + put_page(page_list[i]); + kfree(page_list); + } +err_req: + if (req->map) { + mutex_lock(&md->buf_mutex); + kref_put(&req->map->ref, mport_release_mapping); + mutex_unlock(&md->buf_mutex); + } + put_dma_channel(priv); + kfree(req); + return ret; +} + +static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct rio_transaction transaction; + struct rio_transfer_io *transfer; + enum dma_data_direction dir; + int i, ret = 0; + + if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction)))) + return -EFAULT; + + if (transaction.count != 1) + return -EINVAL; + + if ((transaction.transfer_mode & + priv->md->properties.transfer_mode) == 0) + return -ENODEV; + + transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io)); + if (!transfer) + return -ENOMEM; + + if (unlikely(copy_from_user(transfer, transaction.block, + transaction.count * sizeof(struct rio_transfer_io)))) { + ret = -EFAULT; + goto out_free; + } + + dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ? + DMA_FROM_DEVICE : DMA_TO_DEVICE; + for (i = 0; i < transaction.count && ret == 0; i++) + ret = rio_dma_transfer(filp, transaction.transfer_mode, + transaction.sync, dir, &transfer[i]); + + if (unlikely(copy_to_user(transaction.block, transfer, + transaction.count * sizeof(struct rio_transfer_io)))) + ret = -EFAULT; + +out_free: + vfree(transfer); + + return ret; +} + +static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv; + struct mport_dev *md; + struct rio_async_tx_wait w_param; + struct mport_dma_req *req; + dma_cookie_t cookie; + unsigned long tmo; + long wret; + int found = 0; + int ret; + + priv = (struct mport_cdev_priv *)filp->private_data; + md = priv->md; + + if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param)))) + return -EFAULT; + + cookie = w_param.token; + if (w_param.timeout) + tmo = msecs_to_jiffies(w_param.timeout); + else /* Use default DMA timeout */ + tmo = msecs_to_jiffies(dma_timeout); + + spin_lock(&priv->req_lock); + list_for_each_entry(req, &priv->async_list, node) { + if (req->cookie == cookie) { + list_del(&req->node); + found = 1; + break; + } + } + spin_unlock(&priv->req_lock); + + if (!found) + return -EAGAIN; + + wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo); + + if (wret == 0) { + /* Timeout on wait occurred */ + rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s", + current->comm, task_pid_nr(current), + (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); + ret = -ETIMEDOUT; + goto err_tmo; + } else if (wret == -ERESTARTSYS) { + /* Wait_for_completion was interrupted by a signal but DMA may + * be still in progress + */ + rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted", + current->comm, task_pid_nr(current), + (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE"); + ret = -EINTR; + goto err_tmo; + } + + if (req->status != DMA_COMPLETE) { + /* DMA transaction completion signaled with transfer error */ + rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d", + current->comm, task_pid_nr(current), + (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE", + req->status); + ret = -EIO; + } else + ret = 0; + + if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED) + dma_req_free(req); + + return ret; + +err_tmo: + /* Return request back into async queue */ + spin_lock(&priv->req_lock); + list_add_tail(&req->node, &priv->async_list); + spin_unlock(&priv->req_lock); + return ret; +} + +static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, + uint64_t size, struct rio_mport_mapping **mapping) +{ + struct rio_mport_mapping *map; + + map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; + + map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, + &map->phys_addr, GFP_KERNEL); + if (map->virt_addr == NULL) { + kfree(map); + return -ENOMEM; + } + + map->dir = MAP_DMA; + map->size = size; + map->filp = filp; + map->md = md; + kref_init(&map->ref); + mutex_lock(&md->buf_mutex); + list_add_tail(&map->node, &md->mappings); + mutex_unlock(&md->buf_mutex); + *mapping = map; + + return 0; +} + +static int rio_mport_alloc_dma(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + struct rio_dma_mem map; + struct rio_mport_mapping *mapping = NULL; + int ret; + + if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem)))) + return -EFAULT; + + ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); + if (ret) + return ret; + + map.dma_handle = mapping->phys_addr; + + if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) { + mutex_lock(&md->buf_mutex); + kref_put(&mapping->ref, mport_release_mapping); + mutex_unlock(&md->buf_mutex); + return -EFAULT; + } + + return 0; +} + +static int rio_mport_free_dma(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + u64 handle; + int ret = -EFAULT; + struct rio_mport_mapping *map, *_map; + + if (copy_from_user(&handle, arg, sizeof(u64))) + return -EFAULT; + rmcd_debug(EXIT, "filp=%p", filp); + + mutex_lock(&md->buf_mutex); + list_for_each_entry_safe(map, _map, &md->mappings, node) { + if (map->dir == MAP_DMA && map->phys_addr == handle && + map->filp == filp) { + kref_put(&map->ref, mport_release_mapping); + ret = 0; + break; + } + } + mutex_unlock(&md->buf_mutex); + + if (ret == -EFAULT) { + rmcd_debug(DMA, "ERR no matching mapping"); + return ret; + } + + return 0; +} +#else +static int rio_mport_transfer_ioctl(struct file *filp, void *arg) +{ + return -ENODEV; +} + +static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg) +{ + return -ENODEV; +} + +static int rio_mport_alloc_dma(struct file *filp, void __user *arg) +{ + return -ENODEV; +} + +static int rio_mport_free_dma(struct file *filp, void __user *arg) +{ + return -ENODEV; +} +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +/* + * Inbound/outbound memory mapping functions + */ + +static int +rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, + u64 raddr, u32 size, + struct rio_mport_mapping **mapping) +{ + struct rio_mport *mport = md->mport; + struct rio_mport_mapping *map; + int ret; + + map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL); + if (map == NULL) + return -ENOMEM; + + map->virt_addr = dma_alloc_coherent(mport->dev.parent, size, + &map->phys_addr, GFP_KERNEL); + if (map->virt_addr == NULL) { + ret = -ENOMEM; + goto err_dma_alloc; + } + + if (raddr == RIO_MAP_ANY_ADDR) + raddr = map->phys_addr; + ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0); + if (ret < 0) + goto err_map_inb; + + map->dir = MAP_INBOUND; + map->rio_addr = raddr; + map->size = size; + map->filp = filp; + map->md = md; + kref_init(&map->ref); + mutex_lock(&md->buf_mutex); + list_add_tail(&map->node, &md->mappings); + mutex_unlock(&md->buf_mutex); + *mapping = map; + return 0; + +err_map_inb: + dma_free_coherent(mport->dev.parent, size, + map->virt_addr, map->phys_addr); +err_dma_alloc: + kfree(map); + return ret; +} + +static int +rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, + u64 raddr, u32 size, + struct rio_mport_mapping **mapping) +{ + struct rio_mport_mapping *map; + int err = -ENOMEM; + + if (raddr == RIO_MAP_ANY_ADDR) + goto get_new; + + mutex_lock(&md->buf_mutex); + list_for_each_entry(map, &md->mappings, node) { + if (map->dir != MAP_INBOUND) + continue; + if (raddr == map->rio_addr && size == map->size) { + /* allow exact match only */ + *mapping = map; + err = 0; + break; + } else if (raddr < (map->rio_addr + map->size - 1) && + (raddr + size) > map->rio_addr) { + err = -EBUSY; + break; + } + } + mutex_unlock(&md->buf_mutex); + + if (err != -ENOMEM) + return err; +get_new: + /* not found, create new */ + return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); +} + +static int rio_mport_map_inbound(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + struct rio_mmap map; + struct rio_mport_mapping *mapping = NULL; + int ret; + + if (!md->mport->ops->map_inb) + return -EPROTONOSUPPORT; + if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap)))) + return -EFAULT; + + rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); + + ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, + map.length, &mapping); + if (ret) + return ret; + + map.handle = mapping->phys_addr; + map.rio_addr = mapping->rio_addr; + + if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) { + /* Delete mapping if it was created by this request */ + if (ret == 0 && mapping->filp == filp) { + mutex_lock(&md->buf_mutex); + kref_put(&mapping->ref, mport_release_mapping); + mutex_unlock(&md->buf_mutex); + } + return -EFAULT; + } + + return 0; +} + +/* + * rio_mport_inbound_free() - unmap from RapidIO address space and free + * previously allocated inbound DMA coherent buffer + * @priv: driver private data + * @arg: buffer handle returned by allocation routine + */ +static int rio_mport_inbound_free(struct file *filp, void __user *arg) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md = priv->md; + u64 handle; + struct rio_mport_mapping *map, *_map; + + rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); + + if (!md->mport->ops->unmap_inb) + return -EPROTONOSUPPORT; + + if (copy_from_user(&handle, arg, sizeof(u64))) + return -EFAULT; + + mutex_lock(&md->buf_mutex); + list_for_each_entry_safe(map, _map, &md->mappings, node) { + if (map->dir == MAP_INBOUND && map->phys_addr == handle) { + if (map->filp == filp) { + map->filp = NULL; + kref_put(&map->ref, mport_release_mapping); + } + break; + } + } + mutex_unlock(&md->buf_mutex); + + return 0; +} + +/* + * maint_port_idx_get() - Get the port index of the mport instance + * @priv: driver private data + * @arg: port index + */ +static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg) +{ + struct mport_dev *md = priv->md; + uint32_t port_idx = md->mport->index; + + rmcd_debug(MPORT, "port_index=%d", port_idx); + + if (copy_to_user(arg, &port_idx, sizeof(port_idx))) + return -EFAULT; + + return 0; +} + +static int rio_mport_add_event(struct mport_cdev_priv *priv, + struct rio_event *event) +{ + int overflow; + + if (!(priv->event_mask & event->header)) + return -EACCES; + + spin_lock(&priv->fifo_lock); + overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event) + || kfifo_in(&priv->event_fifo, (unsigned char *)event, + sizeof(*event)) != sizeof(*event); + spin_unlock(&priv->fifo_lock); + + wake_up_interruptible(&priv->event_rx_wait); + + if (overflow) { + dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n"); + return -EBUSY; + } + + return 0; +} + +static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id, + u16 src, u16 dst, u16 info) +{ + struct mport_dev *data = dev_id; + struct mport_cdev_priv *priv; + struct rio_mport_db_filter *db_filter; + struct rio_event event; + int handled; + + event.header = RIO_DOORBELL; + event.u.doorbell.rioid = src; + event.u.doorbell.payload = info; + + handled = 0; + spin_lock(&data->db_lock); + list_for_each_entry(db_filter, &data->doorbells, data_node) { + if (((db_filter->filter.rioid == 0xffffffff || + db_filter->filter.rioid == src)) && + info >= db_filter->filter.low && + info <= db_filter->filter.high) { + priv = db_filter->priv; + rio_mport_add_event(priv, &event); + handled = 1; + } + } + spin_unlock(&data->db_lock); + + if (!handled) + dev_warn(&data->dev, + "%s: spurious DB received from 0x%x, info=0x%04x\n", + __func__, src, info); +} + +static int rio_mport_add_db_filter(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct mport_dev *md = priv->md; + struct rio_mport_db_filter *db_filter; + struct rio_doorbell_filter filter; + unsigned long flags; + int ret; + + if (copy_from_user(&filter, arg, sizeof(filter))) + return -EFAULT; + + if (filter.low > filter.high) + return -EINVAL; + + ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, + rio_mport_doorbell_handler); + if (ret) { + rmcd_error("%s failed to register IBDB, err=%d", + dev_name(&md->dev), ret); + return ret; + } + + db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL); + if (db_filter == NULL) { + rio_release_inb_dbell(md->mport, filter.low, filter.high); + return -ENOMEM; + } + + db_filter->filter = filter; + db_filter->priv = priv; + spin_lock_irqsave(&md->db_lock, flags); + list_add_tail(&db_filter->priv_node, &priv->db_filters); + list_add_tail(&db_filter->data_node, &md->doorbells); + spin_unlock_irqrestore(&md->db_lock, flags); + + return 0; +} + +static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter) +{ + list_del(&db_filter->data_node); + list_del(&db_filter->priv_node); + kfree(db_filter); +} + +static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct rio_mport_db_filter *db_filter; + struct rio_doorbell_filter filter; + unsigned long flags; + int ret = -EINVAL; + + if (copy_from_user(&filter, arg, sizeof(filter))) + return -EFAULT; + + spin_lock_irqsave(&priv->md->db_lock, flags); + list_for_each_entry(db_filter, &priv->db_filters, priv_node) { + if (db_filter->filter.rioid == filter.rioid && + db_filter->filter.low == filter.low && + db_filter->filter.high == filter.high) { + rio_mport_delete_db_filter(db_filter); + ret = 0; + break; + } + } + spin_unlock_irqrestore(&priv->md->db_lock, flags); + + if (!ret) + rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); + + return ret; +} + +static int rio_mport_match_pw(union rio_pw_msg *msg, + struct rio_pw_filter *filter) +{ + if ((msg->em.comptag & filter->mask) < filter->low || + (msg->em.comptag & filter->mask) > filter->high) + return 0; + return 1; +} + +static int rio_mport_pw_handler(struct rio_mport *mport, void *context, + union rio_pw_msg *msg, int step) +{ + struct mport_dev *md = context; + struct mport_cdev_priv *priv; + struct rio_mport_pw_filter *pw_filter; + struct rio_event event; + int handled; + + event.header = RIO_PORTWRITE; + memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE); + + handled = 0; + spin_lock(&md->pw_lock); + list_for_each_entry(pw_filter, &md->portwrites, md_node) { + if (rio_mport_match_pw(msg, &pw_filter->filter)) { + priv = pw_filter->priv; + rio_mport_add_event(priv, &event); + handled = 1; + } + } + spin_unlock(&md->pw_lock); + + if (!handled) { + printk_ratelimited(KERN_WARNING DRV_NAME + ": mport%d received spurious PW from 0x%08x\n", + mport->id, msg->em.comptag); + } + + return 0; +} + +static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct mport_dev *md = priv->md; + struct rio_mport_pw_filter *pw_filter; + struct rio_pw_filter filter; + unsigned long flags; + int hadd = 0; + + if (copy_from_user(&filter, arg, sizeof(filter))) + return -EFAULT; + + pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL); + if (pw_filter == NULL) + return -ENOMEM; + + pw_filter->filter = filter; + pw_filter->priv = priv; + spin_lock_irqsave(&md->pw_lock, flags); + if (list_empty(&md->portwrites)) + hadd = 1; + list_add_tail(&pw_filter->priv_node, &priv->pw_filters); + list_add_tail(&pw_filter->md_node, &md->portwrites); + spin_unlock_irqrestore(&md->pw_lock, flags); + + if (hadd) { + int ret; + + ret = rio_add_mport_pw_handler(md->mport, md, + rio_mport_pw_handler); + if (ret) { + dev_err(&md->dev, + "%s: failed to add IB_PW handler, err=%d\n", + __func__, ret); + return ret; + } + rio_pw_enable(md->mport, 1); + } + + return 0; +} + +static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter) +{ + list_del(&pw_filter->md_node); + list_del(&pw_filter->priv_node); + kfree(pw_filter); +} + +static int rio_mport_match_pw_filter(struct rio_pw_filter *a, + struct rio_pw_filter *b) +{ + if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high)) + return 1; + return 0; +} + +static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct mport_dev *md = priv->md; + struct rio_mport_pw_filter *pw_filter; + struct rio_pw_filter filter; + unsigned long flags; + int ret = -EINVAL; + int hdel = 0; + + if (copy_from_user(&filter, arg, sizeof(filter))) + return -EFAULT; + + spin_lock_irqsave(&md->pw_lock, flags); + list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) { + if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) { + rio_mport_delete_pw_filter(pw_filter); + ret = 0; + break; + } + } + + if (list_empty(&md->portwrites)) + hdel = 1; + spin_unlock_irqrestore(&md->pw_lock, flags); + + if (hdel) { + rio_del_mport_pw_handler(md->mport, priv->md, + rio_mport_pw_handler); + rio_pw_enable(md->mport, 0); + } + + return ret; +} + +/* + * rio_release_dev - release routine for kernel RIO device object + * @dev: kernel device object associated with a RIO device structure + * + * Frees a RIO device struct associated a RIO device struct. + * The RIO device struct is freed. + */ +static void rio_release_dev(struct device *dev) +{ + struct rio_dev *rdev; + + rdev = to_rio_dev(dev); + pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev)); + kfree(rdev); +} + + +static void rio_release_net(struct device *dev) +{ + struct rio_net *net; + + net = to_rio_net(dev); + rmcd_debug(RDEV, "net_%d", net->id); + kfree(net); +} + + +/* + * rio_mport_add_riodev - creates a kernel RIO device object + * + * Allocates a RIO device data structure and initializes required fields based + * on device's configuration space contents. + * If the device has switch capabilities, then a switch specific portion is + * allocated and configured. + */ +static int rio_mport_add_riodev(struct mport_cdev_priv *priv, + void __user *arg) +{ + struct mport_dev *md = priv->md; + struct rio_rdev_info dev_info; + struct rio_dev *rdev; + struct rio_switch *rswitch = NULL; + struct rio_mport *mport; + size_t size; + u32 rval; + u32 swpinfo = 0; + u16 destid; + u8 hopcount; + int err; + + if (copy_from_user(&dev_info, arg, sizeof(dev_info))) + return -EFAULT; + + rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, + dev_info.comptag, dev_info.destid, dev_info.hopcount); + + if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) { + rmcd_debug(RDEV, "device %s already exists", dev_info.name); + return -EEXIST; + } + + size = sizeof(struct rio_dev); + mport = md->mport; + destid = (u16)dev_info.destid; + hopcount = (u8)dev_info.hopcount; + + if (rio_mport_read_config_32(mport, destid, hopcount, + RIO_PEF_CAR, &rval)) + return -EIO; + + if (rval & RIO_PEF_SWITCH) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &swpinfo); + size += (RIO_GET_TOTAL_PORTS(swpinfo) * + sizeof(rswitch->nextdev[0])) + sizeof(*rswitch); + } + + rdev = kzalloc(size, GFP_KERNEL); + if (rdev == NULL) + return -ENOMEM; + + if (mport->net == NULL) { + struct rio_net *net; + + net = rio_alloc_net(mport); + if (!net) { + err = -ENOMEM; + rmcd_debug(RDEV, "failed to allocate net object"); + goto cleanup; + } + + net->id = mport->id; + net->hport = mport; + dev_set_name(&net->dev, "rnet_%d", net->id); + net->dev.parent = &mport->dev; + net->dev.release = rio_release_net; + err = rio_add_net(net); + if (err) { + rmcd_debug(RDEV, "failed to register net, err=%d", err); + kfree(net); + goto cleanup; + } + } + + rdev->net = mport->net; + rdev->pef = rval; + rdev->swpinfo = swpinfo; + rio_mport_read_config_32(mport, destid, hopcount, + RIO_DEV_ID_CAR, &rval); + rdev->did = rval >> 16; + rdev->vid = rval & 0xffff; + rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR, + &rdev->device_rev); + rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR, + &rval); + rdev->asm_did = rval >> 16; + rdev->asm_vid = rval & 0xffff; + rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR, + &rval); + rdev->asm_rev = rval >> 16; + + if (rdev->pef & RIO_PEF_EXT_FEATURES) { + rdev->efptr = rval & 0xffff; + rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid, + hopcount); + + rdev->em_efptr = rio_mport_get_feature(mport, 0, destid, + hopcount, RIO_EFB_ERR_MGMNT); + } + + rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR, + &rdev->src_ops); + rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR, + &rdev->dst_ops); + + rdev->comp_tag = dev_info.comptag; + rdev->destid = destid; + /* hopcount is stored as specified by a caller, regardles of EP or SW */ + rdev->hopcount = hopcount; + + if (rdev->pef & RIO_PEF_SWITCH) { + rswitch = rdev->rswitch; + rswitch->route_table = NULL; + } + + if (strlen(dev_info.name)) + dev_set_name(&rdev->dev, "%s", dev_info.name); + else if (rdev->pef & RIO_PEF_SWITCH) + dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id, + rdev->comp_tag & RIO_CTAG_UDEVID); + else + dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id, + rdev->comp_tag & RIO_CTAG_UDEVID); + + INIT_LIST_HEAD(&rdev->net_list); + rdev->dev.parent = &mport->net->dev; + rio_attach_device(rdev); + rdev->dev.release = rio_release_dev; + + if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) + rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], + 0, 0xffff); + err = rio_add_device(rdev); + if (err) + goto cleanup; + rio_dev_get(rdev); + + return 0; +cleanup: + kfree(rdev); + return err; +} + +static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg) +{ + struct rio_rdev_info dev_info; + struct rio_dev *rdev = NULL; + struct device *dev; + struct rio_mport *mport; + struct rio_net *net; + + if (copy_from_user(&dev_info, arg, sizeof(dev_info))) + return -EFAULT; + + mport = priv->md->mport; + + /* If device name is specified, removal by name has priority */ + if (strlen(dev_info.name)) { + dev = bus_find_device_by_name(&rio_bus_type, NULL, + dev_info.name); + if (dev) + rdev = to_rio_dev(dev); + } else { + do { + rdev = rio_get_comptag(dev_info.comptag, rdev); + if (rdev && rdev->dev.parent == &mport->net->dev && + rdev->destid == (u16)dev_info.destid && + rdev->hopcount == (u8)dev_info.hopcount) + break; + } while (rdev); + } + + if (!rdev) { + rmcd_debug(RDEV, + "device name:%s ct:0x%x did:0x%x hc:0x%x not found", + dev_info.name, dev_info.comptag, dev_info.destid, + dev_info.hopcount); + return -ENODEV; + } + + net = rdev->net; + rio_dev_put(rdev); + rio_del_device(rdev, RIO_DEVICE_SHUTDOWN); + + if (list_empty(&net->devices)) { + rio_free_net(net); + mport->net = NULL; + } + + return 0; +} + +/* + * Mport cdev management + */ + +/* + * mport_cdev_open() - Open character device (mport) + */ +static int mport_cdev_open(struct inode *inode, struct file *filp) +{ + int ret; + int minor = iminor(inode); + struct mport_dev *chdev; + struct mport_cdev_priv *priv; + + /* Test for valid device */ + if (minor >= RIO_MAX_MPORTS) { + rmcd_error("Invalid minor device number"); + return -EINVAL; + } + + chdev = container_of(inode->i_cdev, struct mport_dev, cdev); + + rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp); + + if (atomic_read(&chdev->active) == 0) + return -ENODEV; + + get_device(&chdev->dev); + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + put_device(&chdev->dev); + return -ENOMEM; + } + + priv->md = chdev; + + mutex_lock(&chdev->file_mutex); + list_add_tail(&priv->list, &chdev->file_list); + mutex_unlock(&chdev->file_mutex); + + INIT_LIST_HEAD(&priv->db_filters); + INIT_LIST_HEAD(&priv->pw_filters); + spin_lock_init(&priv->fifo_lock); + init_waitqueue_head(&priv->event_rx_wait); + ret = kfifo_alloc(&priv->event_fifo, + sizeof(struct rio_event) * MPORT_EVENT_DEPTH, + GFP_KERNEL); + if (ret < 0) { + dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n"); + ret = -ENOMEM; + goto err_fifo; + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + INIT_LIST_HEAD(&priv->async_list); + INIT_LIST_HEAD(&priv->pend_list); + spin_lock_init(&priv->req_lock); + mutex_init(&priv->dma_lock); +#endif + + filp->private_data = priv; + goto out; +err_fifo: + kfree(priv); +out: + return ret; +} + +static int mport_cdev_fasync(int fd, struct file *filp, int mode) +{ + struct mport_cdev_priv *priv = filp->private_data; + + return fasync_helper(fd, filp, mode, &priv->async_queue); +} + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +static void mport_cdev_release_dma(struct file *filp) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md; + struct mport_dma_req *req, *req_next; + unsigned long tmo = msecs_to_jiffies(dma_timeout); + long wret; + LIST_HEAD(list); + + rmcd_debug(EXIT, "from filp=%p %s(%d)", + filp, current->comm, task_pid_nr(current)); + + if (!priv->dmach) { + rmcd_debug(EXIT, "No DMA channel for filp=%p", filp); + return; + } + + md = priv->md; + + flush_workqueue(dma_wq); + + spin_lock(&priv->req_lock); + if (!list_empty(&priv->async_list)) { + rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)", + filp, current->comm, task_pid_nr(current)); + list_splice_init(&priv->async_list, &list); + } + spin_unlock(&priv->req_lock); + + if (!list_empty(&list)) { + rmcd_debug(EXIT, "temp list not empty"); + list_for_each_entry_safe(req, req_next, &list, node) { + rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", + req->filp, req->cookie, + completion_done(&req->req_comp)?"yes":"no"); + list_del(&req->node); + dma_req_free(req); + } + } + + if (!list_empty(&priv->pend_list)) { + rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)", + filp, current->comm, task_pid_nr(current)); + list_for_each_entry_safe(req, + req_next, &priv->pend_list, node) { + rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s", + req->filp, req->cookie, + completion_done(&req->req_comp)?"yes":"no"); + list_del(&req->node); + dma_req_free(req); + } + } + + put_dma_channel(priv); + wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo); + + if (wret <= 0) { + rmcd_error("%s(%d) failed waiting for DMA release err=%ld", + current->comm, task_pid_nr(current), wret); + } + + spin_lock(&priv->req_lock); + + if (!list_empty(&priv->pend_list)) { + rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)", + filp, current->comm, task_pid_nr(current)); + } + + spin_unlock(&priv->req_lock); + + if (priv->dmach != priv->md->dma_chan) { + rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)", + filp, current->comm, task_pid_nr(current)); + rio_release_dma(priv->dmach); + } else { + rmcd_debug(EXIT, "Adjust default DMA channel refcount"); + kref_put(&md->dma_ref, mport_release_def_dma); + } + + priv->dmach = NULL; +} +#else +#define mport_cdev_release_dma(priv) do {} while (0) +#endif + +/* + * mport_cdev_release() - Release character device + */ +static int mport_cdev_release(struct inode *inode, struct file *filp) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *chdev; + struct rio_mport_pw_filter *pw_filter, *pw_filter_next; + struct rio_mport_db_filter *db_filter, *db_filter_next; + struct rio_mport_mapping *map, *_map; + unsigned long flags; + + rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp); + + chdev = priv->md; + mport_cdev_release_dma(filp); + + priv->event_mask = 0; + + spin_lock_irqsave(&chdev->pw_lock, flags); + if (!list_empty(&priv->pw_filters)) { + list_for_each_entry_safe(pw_filter, pw_filter_next, + &priv->pw_filters, priv_node) + rio_mport_delete_pw_filter(pw_filter); + } + spin_unlock_irqrestore(&chdev->pw_lock, flags); + + spin_lock_irqsave(&chdev->db_lock, flags); + list_for_each_entry_safe(db_filter, db_filter_next, + &priv->db_filters, priv_node) { + rio_mport_delete_db_filter(db_filter); + } + spin_unlock_irqrestore(&chdev->db_lock, flags); + + kfifo_free(&priv->event_fifo); + + mutex_lock(&chdev->buf_mutex); + list_for_each_entry_safe(map, _map, &chdev->mappings, node) { + if (map->filp == filp) { + rmcd_debug(EXIT, "release mapping %p filp=%p", + map->virt_addr, filp); + kref_put(&map->ref, mport_release_mapping); + } + } + mutex_unlock(&chdev->buf_mutex); + + mport_cdev_fasync(-1, filp, 0); + filp->private_data = NULL; + mutex_lock(&chdev->file_mutex); + list_del(&priv->list); + mutex_unlock(&chdev->file_mutex); + put_device(&chdev->dev); + kfree(priv); + return 0; +} + +/* + * mport_cdev_ioctl() - IOCTLs for character device + */ +static long mport_cdev_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg) +{ + int err = -EINVAL; + struct mport_cdev_priv *data = filp->private_data; + struct mport_dev *md = data->md; + + if (atomic_read(&md->active) == 0) + return -ENODEV; + + switch (cmd) { + case RIO_MPORT_MAINT_READ_LOCAL: + return rio_mport_maint_rd(data, (void __user *)arg, 1); + case RIO_MPORT_MAINT_WRITE_LOCAL: + return rio_mport_maint_wr(data, (void __user *)arg, 1); + case RIO_MPORT_MAINT_READ_REMOTE: + return rio_mport_maint_rd(data, (void __user *)arg, 0); + case RIO_MPORT_MAINT_WRITE_REMOTE: + return rio_mport_maint_wr(data, (void __user *)arg, 0); + case RIO_MPORT_MAINT_HDID_SET: + return maint_hdid_set(data, (void __user *)arg); + case RIO_MPORT_MAINT_COMPTAG_SET: + return maint_comptag_set(data, (void __user *)arg); + case RIO_MPORT_MAINT_PORT_IDX_GET: + return maint_port_idx_get(data, (void __user *)arg); + case RIO_MPORT_GET_PROPERTIES: + md->properties.hdid = md->mport->host_deviceid; + if (copy_to_user((void __user *)arg, &(data->md->properties), + sizeof(data->md->properties))) + return -EFAULT; + return 0; + case RIO_ENABLE_DOORBELL_RANGE: + return rio_mport_add_db_filter(data, (void __user *)arg); + case RIO_DISABLE_DOORBELL_RANGE: + return rio_mport_remove_db_filter(data, (void __user *)arg); + case RIO_ENABLE_PORTWRITE_RANGE: + return rio_mport_add_pw_filter(data, (void __user *)arg); + case RIO_DISABLE_PORTWRITE_RANGE: + return rio_mport_remove_pw_filter(data, (void __user *)arg); + case RIO_SET_EVENT_MASK: + data->event_mask = arg; + return 0; + case RIO_GET_EVENT_MASK: + if (copy_to_user((void __user *)arg, &data->event_mask, + sizeof(data->event_mask))) + return -EFAULT; + return 0; + case RIO_MAP_OUTBOUND: + return rio_mport_obw_map(filp, (void __user *)arg); + case RIO_MAP_INBOUND: + return rio_mport_map_inbound(filp, (void __user *)arg); + case RIO_UNMAP_OUTBOUND: + return rio_mport_obw_free(filp, (void __user *)arg); + case RIO_UNMAP_INBOUND: + return rio_mport_inbound_free(filp, (void __user *)arg); + case RIO_ALLOC_DMA: + return rio_mport_alloc_dma(filp, (void __user *)arg); + case RIO_FREE_DMA: + return rio_mport_free_dma(filp, (void __user *)arg); + case RIO_WAIT_FOR_ASYNC: + return rio_mport_wait_for_async_dma(filp, (void __user *)arg); + case RIO_TRANSFER: + return rio_mport_transfer_ioctl(filp, (void __user *)arg); + case RIO_DEV_ADD: + return rio_mport_add_riodev(data, (void __user *)arg); + case RIO_DEV_DEL: + return rio_mport_del_riodev(data, (void __user *)arg); + default: + break; + } + + return err; +} + +/* + * mport_release_mapping - free mapping resources and info structure + * @ref: a pointer to the kref within struct rio_mport_mapping + * + * NOTE: Shall be called while holding buf_mutex. + */ +static void mport_release_mapping(struct kref *ref) +{ + struct rio_mport_mapping *map = + container_of(ref, struct rio_mport_mapping, ref); + struct rio_mport *mport = map->md->mport; + + rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s", + map->dir, map->virt_addr, + &map->phys_addr, mport->name); + + list_del(&map->node); + + switch (map->dir) { + case MAP_INBOUND: + rio_unmap_inb_region(mport, map->phys_addr); + case MAP_DMA: + dma_free_coherent(mport->dev.parent, map->size, + map->virt_addr, map->phys_addr); + break; + case MAP_OUTBOUND: + rio_unmap_outb_region(mport, map->rioid, map->rio_addr); + break; + } + kfree(map); +} + +static void mport_mm_open(struct vm_area_struct *vma) +{ + struct rio_mport_mapping *map = vma->vm_private_data; + +rmcd_debug(MMAP, "0x%pad", &map->phys_addr); + kref_get(&map->ref); +} + +static void mport_mm_close(struct vm_area_struct *vma) +{ + struct rio_mport_mapping *map = vma->vm_private_data; + +rmcd_debug(MMAP, "0x%pad", &map->phys_addr); + mutex_lock(&map->md->buf_mutex); + kref_put(&map->ref, mport_release_mapping); + mutex_unlock(&map->md->buf_mutex); +} + +static const struct vm_operations_struct vm_ops = { + .open = mport_mm_open, + .close = mport_mm_close, +}; + +static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct mport_dev *md; + size_t size = vma->vm_end - vma->vm_start; + dma_addr_t baddr; + unsigned long offset; + int found = 0, ret; + struct rio_mport_mapping *map; + + rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx", + (unsigned int)size, vma->vm_pgoff); + + md = priv->md; + baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT); + + mutex_lock(&md->buf_mutex); + list_for_each_entry(map, &md->mappings, node) { + if (baddr >= map->phys_addr && + baddr < (map->phys_addr + map->size)) { + found = 1; + break; + } + } + mutex_unlock(&md->buf_mutex); + + if (!found) + return -ENOMEM; + + offset = baddr - map->phys_addr; + + if (size + offset > map->size) + return -EINVAL; + + vma->vm_pgoff = offset >> PAGE_SHIFT; + rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff); + + if (map->dir == MAP_INBOUND || map->dir == MAP_DMA) + ret = dma_mmap_coherent(md->mport->dev.parent, vma, + map->virt_addr, map->phys_addr, map->size); + else if (map->dir == MAP_OUTBOUND) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + ret = vm_iomap_memory(vma, map->phys_addr, map->size); + } else { + rmcd_error("Attempt to mmap unsupported mapping type"); + ret = -EIO; + } + + if (!ret) { + vma->vm_private_data = map; + vma->vm_ops = &vm_ops; + mport_mm_open(vma); + } else { + rmcd_error("MMAP exit with err=%d", ret); + } + + return ret; +} + +static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait) +{ + struct mport_cdev_priv *priv = filp->private_data; + + poll_wait(filp, &priv->event_rx_wait, wait); + if (kfifo_len(&priv->event_fifo)) + return POLLIN | POLLRDNORM; + + return 0; +} + +static ssize_t mport_read(struct file *filp, char __user *buf, size_t count, + loff_t *ppos) +{ + struct mport_cdev_priv *priv = filp->private_data; + int copied; + ssize_t ret; + + if (!count) + return 0; + + if (kfifo_is_empty(&priv->event_fifo) && + (filp->f_flags & O_NONBLOCK)) + return -EAGAIN; + + if (count % sizeof(struct rio_event)) + return -EINVAL; + + ret = wait_event_interruptible(priv->event_rx_wait, + kfifo_len(&priv->event_fifo) != 0); + if (ret) + return ret; + + while (ret < count) { + if (kfifo_to_user(&priv->event_fifo, buf, + sizeof(struct rio_event), &copied)) + return -EFAULT; + ret += copied; + buf += copied; + } + + return ret; +} + +static ssize_t mport_write(struct file *filp, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct mport_cdev_priv *priv = filp->private_data; + struct rio_mport *mport = priv->md->mport; + struct rio_event event; + int len, ret; + + if (!count) + return 0; + + if (count % sizeof(event)) + return -EINVAL; + + len = 0; + while ((count - len) >= (int)sizeof(event)) { + if (copy_from_user(&event, buf, sizeof(event))) + return -EFAULT; + + if (event.header != RIO_DOORBELL) + return -EINVAL; + + ret = rio_mport_send_doorbell(mport, + (u16)event.u.doorbell.rioid, + event.u.doorbell.payload); + if (ret < 0) + return ret; + + len += sizeof(event); + buf += sizeof(event); + } + + return len; +} + +static const struct file_operations mport_fops = { + .owner = THIS_MODULE, + .open = mport_cdev_open, + .release = mport_cdev_release, + .poll = mport_cdev_poll, + .read = mport_read, + .write = mport_write, + .mmap = mport_cdev_mmap, + .fasync = mport_cdev_fasync, + .unlocked_ioctl = mport_cdev_ioctl +}; + +/* + * Character device management + */ + +static void mport_device_release(struct device *dev) +{ + struct mport_dev *md; + + rmcd_debug(EXIT, "%s", dev_name(dev)); + md = container_of(dev, struct mport_dev, dev); + kfree(md); +} + +/* + * mport_cdev_add() - Create mport_dev from rio_mport + * @mport: RapidIO master port + */ +static struct mport_dev *mport_cdev_add(struct rio_mport *mport) +{ + int ret = 0; + struct mport_dev *md; + struct rio_mport_attr attr; + + md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL); + if (!md) { + rmcd_error("Unable allocate a device object"); + return NULL; + } + + md->mport = mport; + mutex_init(&md->buf_mutex); + mutex_init(&md->file_mutex); + INIT_LIST_HEAD(&md->file_list); + cdev_init(&md->cdev, &mport_fops); + md->cdev.owner = THIS_MODULE; + ret = cdev_add(&md->cdev, MKDEV(MAJOR(dev_number), mport->id), 1); + if (ret < 0) { + kfree(md); + rmcd_error("Unable to register a device, err=%d", ret); + return NULL; + } + + md->dev.devt = md->cdev.dev; + md->dev.class = dev_class; + md->dev.parent = &mport->dev; + md->dev.release = mport_device_release; + dev_set_name(&md->dev, DEV_NAME "%d", mport->id); + atomic_set(&md->active, 1); + + ret = device_register(&md->dev); + if (ret) { + rmcd_error("Failed to register mport %d (err=%d)", + mport->id, ret); + goto err_cdev; + } + + get_device(&md->dev); + + INIT_LIST_HEAD(&md->doorbells); + spin_lock_init(&md->db_lock); + INIT_LIST_HEAD(&md->portwrites); + spin_lock_init(&md->pw_lock); + INIT_LIST_HEAD(&md->mappings); + + md->properties.id = mport->id; + md->properties.sys_size = mport->sys_size; + md->properties.hdid = mport->host_deviceid; + md->properties.index = mport->index; + + /* The transfer_mode property will be returned through mport query + * interface + */ +#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */ + md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; +#else + md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; +#endif + ret = rio_query_mport(mport, &attr); + if (!ret) { + md->properties.flags = attr.flags; + md->properties.link_speed = attr.link_speed; + md->properties.link_width = attr.link_width; + md->properties.dma_max_sge = attr.dma_max_sge; + md->properties.dma_max_size = attr.dma_max_size; + md->properties.dma_align = attr.dma_align; + md->properties.cap_sys_size = 0; + md->properties.cap_transfer_mode = 0; + md->properties.cap_addr_size = 0; + } else + pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n", + mport->name, MAJOR(dev_number), mport->id); + + mutex_lock(&mport_devs_lock); + list_add_tail(&md->node, &mport_devs); + mutex_unlock(&mport_devs_lock); + + pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n", + mport->name, MAJOR(dev_number), mport->id); + + return md; + +err_cdev: + cdev_del(&md->cdev); + kfree(md); + return NULL; +} + +/* + * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release + * associated DMA channels. + */ +static void mport_cdev_terminate_dma(struct mport_dev *md) +{ +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + struct mport_cdev_priv *client; + + rmcd_debug(DMA, "%s", dev_name(&md->dev)); + + mutex_lock(&md->file_mutex); + list_for_each_entry(client, &md->file_list, list) { + if (client->dmach) { + dmaengine_terminate_all(client->dmach); + rio_release_dma(client->dmach); + } + } + mutex_unlock(&md->file_mutex); + + if (md->dma_chan) { + dmaengine_terminate_all(md->dma_chan); + rio_release_dma(md->dma_chan); + md->dma_chan = NULL; + } +#endif +} + + +/* + * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open + * mport_cdev files. + */ +static int mport_cdev_kill_fasync(struct mport_dev *md) +{ + unsigned int files = 0; + struct mport_cdev_priv *client; + + mutex_lock(&md->file_mutex); + list_for_each_entry(client, &md->file_list, list) { + if (client->async_queue) + kill_fasync(&client->async_queue, SIGIO, POLL_HUP); + files++; + } + mutex_unlock(&md->file_mutex); + return files; +} + +/* + * mport_cdev_remove() - Remove mport character device + * @dev: Mport device to remove + */ +static void mport_cdev_remove(struct mport_dev *md) +{ + struct rio_mport_mapping *map, *_map; + + rmcd_debug(EXIT, "Remove %s cdev", md->mport->name); + atomic_set(&md->active, 0); + mport_cdev_terminate_dma(md); + rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler); + cdev_del(&(md->cdev)); + mport_cdev_kill_fasync(md); + + flush_workqueue(dma_wq); + + /* TODO: do we need to give clients some time to close file + * descriptors? Simple wait for XX, or kref? + */ + + /* + * Release DMA buffers allocated for the mport device. + * Disable associated inbound Rapidio requests mapping if applicable. + */ + mutex_lock(&md->buf_mutex); + list_for_each_entry_safe(map, _map, &md->mappings, node) { + kref_put(&map->ref, mport_release_mapping); + } + mutex_unlock(&md->buf_mutex); + + if (!list_empty(&md->mappings)) + rmcd_warn("WARNING: %s pending mappings on removal", + md->mport->name); + + rio_release_inb_dbell(md->mport, 0, 0x0fff); + + device_unregister(&md->dev); + put_device(&md->dev); +} + +/* + * RIO rio_mport_interface driver + */ + +/* + * mport_add_mport() - Add rio_mport from LDM device struct + * @dev: Linux device model struct + * @class_intf: Linux class_interface + */ +static int mport_add_mport(struct device *dev, + struct class_interface *class_intf) +{ + struct rio_mport *mport = NULL; + struct mport_dev *chdev = NULL; + + mport = to_rio_mport(dev); + if (!mport) + return -ENODEV; + + chdev = mport_cdev_add(mport); + if (!chdev) + return -ENODEV; + + return 0; +} + +/* + * mport_remove_mport() - Remove rio_mport from global list + * TODO remove device from global mport_dev list + */ +static void mport_remove_mport(struct device *dev, + struct class_interface *class_intf) +{ + struct rio_mport *mport = NULL; + struct mport_dev *chdev; + int found = 0; + + mport = to_rio_mport(dev); + rmcd_debug(EXIT, "Remove %s", mport->name); + + mutex_lock(&mport_devs_lock); + list_for_each_entry(chdev, &mport_devs, node) { + if (chdev->mport->id == mport->id) { + atomic_set(&chdev->active, 0); + list_del(&chdev->node); + found = 1; + break; + } + } + mutex_unlock(&mport_devs_lock); + + if (found) + mport_cdev_remove(chdev); +} + +/* the rio_mport_interface is used to handle local mport devices */ +static struct class_interface rio_mport_interface __refdata = { + .class = &rio_mport_class, + .add_dev = mport_add_mport, + .remove_dev = mport_remove_mport, +}; + +/* + * Linux kernel module + */ + +/* + * mport_init - Driver module loading + */ +static int __init mport_init(void) +{ + int ret; + + /* Create device class needed by udev */ + dev_class = class_create(THIS_MODULE, DRV_NAME); + if (!dev_class) { + rmcd_error("Unable to create " DRV_NAME " class"); + return -EINVAL; + } + + ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME); + if (ret < 0) + goto err_chr; + + rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number)); + + /* Register to rio_mport_interface */ + ret = class_interface_register(&rio_mport_interface); + if (ret) { + rmcd_error("class_interface_register() failed, err=%d", ret); + goto err_cli; + } + + dma_wq = create_singlethread_workqueue("dma_wq"); + if (!dma_wq) { + rmcd_error("failed to create DMA work queue"); + ret = -ENOMEM; + goto err_wq; + } + + return 0; + +err_wq: + class_interface_unregister(&rio_mport_interface); +err_cli: + unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); +err_chr: + class_destroy(dev_class); + return ret; +} + +/** + * mport_exit - Driver module unloading + */ +static void __exit mport_exit(void) +{ + class_interface_unregister(&rio_mport_interface); + class_destroy(dev_class); + unregister_chrdev_region(dev_number, RIO_MAX_MPORTS); + destroy_workqueue(dma_wq); +} + +module_init(mport_init); +module_exit(mport_exit); diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index eeca70ddbf61..b5b455614f8a 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c @@ -36,7 +36,11 @@ #include "tsi721.h" -#define DEBUG_PW /* Inbound Port-Write debugging */ +#ifdef DEBUG +u32 dbg_level = DBG_INIT | DBG_EXIT; +module_param(dbg_level, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)"); +#endif static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); @@ -143,9 +147,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, & TSI721_DMAC_STS_RUN) { udelay(1); if (++i >= 5000000) { - dev_dbg(&priv->pdev->dev, - "%s : DMA[%d] read timeout ch_status=%x\n", - __func__, priv->mdma.ch_id, ch_stat); + tsi_debug(MAINT, &priv->pdev->dev, + "DMA[%d] read timeout ch_status=%x", + priv->mdma.ch_id, ch_stat); if (!do_wr) *data = 0xffffffff; err = -EIO; @@ -157,10 +161,12 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, /* If DMA operation aborted due to error, * reinitialize DMA channel */ - dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n", - __func__, ch_stat); - dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", - do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); + tsi_debug(MAINT, &priv->pdev->dev, "DMA ABORT ch_stat=%x", + ch_stat); + tsi_debug(MAINT, &priv->pdev->dev, + "OP=%d : destid=%x hc=%x off=%x", + do_wr ? MAINT_WR : MAINT_RD, + destid, hopcount, offset); iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); udelay(10); @@ -236,16 +242,15 @@ static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, /** * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler - * @mport: RapidIO master port structure + * @priv: tsi721 device private structure * * Handles inbound port-write interrupts. Copies PW message from an internal * buffer into PW message FIFO and schedules deferred routine to process * queued messages. */ static int -tsi721_pw_handler(struct rio_mport *mport) +tsi721_pw_handler(struct tsi721_device *priv) { - struct tsi721_device *priv = mport->priv; u32 pw_stat; u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)]; @@ -283,30 +288,15 @@ static void tsi721_pw_dpc(struct work_struct *work) { struct tsi721_device *priv = container_of(work, struct tsi721_device, pw_work); - u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message - buffer for RIO layer */ + union rio_pw_msg pwmsg; /* * Process port-write messages */ - while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer, + while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)&pwmsg, TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) { - /* Process one message */ -#ifdef DEBUG_PW - { - u32 i; - pr_debug("%s : Port-Write Message:", __func__); - for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) { - pr_debug("0x%02x: %08x %08x %08x %08x", i*4, - msg_buffer[i], msg_buffer[i + 1], - msg_buffer[i + 2], msg_buffer[i + 3]); - i += 4; - } - pr_debug("\n"); - } -#endif /* Pass the port-write message to RIO core for processing */ - rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); + rio_inb_pwrite_handler(&priv->mport, &pwmsg); } } @@ -354,8 +344,8 @@ static int tsi721_dsend(struct rio_mport *mport, int index, offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) | (destid << 2); - dev_dbg(&priv->pdev->dev, - "Send Doorbell 0x%04x to destID 0x%x\n", data, destid); + tsi_debug(DBELL, &priv->pdev->dev, + "Send Doorbell 0x%04x to destID 0x%x", data, destid); iowrite16be(data, priv->odb_base + offset); return 0; @@ -363,16 +353,15 @@ static int tsi721_dsend(struct rio_mport *mport, int index, /** * tsi721_dbell_handler - Tsi721 doorbell interrupt handler - * @mport: RapidIO master port structure + * @priv: tsi721 device-specific data structure * * Handles inbound doorbell interrupts. Copies doorbell entry from an internal * buffer into DB message FIFO and schedules deferred routine to process * queued DBs. */ static int -tsi721_dbell_handler(struct rio_mport *mport) +tsi721_dbell_handler(struct tsi721_device *priv) { - struct tsi721_device *priv = mport->priv; u32 regval; /* Disable IDB interrupts */ @@ -404,7 +393,7 @@ static void tsi721_db_dpc(struct work_struct *work) /* * Process queued inbound doorbells */ - mport = priv->mport; + mport = &priv->mport; wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE; @@ -430,10 +419,10 @@ static void tsi721_db_dpc(struct work_struct *work) dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); } else { - dev_dbg(&priv->pdev->dev, - "spurious inb doorbell, sid %2.2x tid %2.2x" - " info %4.4x\n", DBELL_SID(idb.bytes), - DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); + tsi_debug(DBELL, &priv->pdev->dev, + "spurious IDB sid %2.2x tid %2.2x info %4.4x", + DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), + DBELL_INF(idb.bytes)); } wr_ptr = ioread32(priv->regs + @@ -457,15 +446,14 @@ static void tsi721_db_dpc(struct work_struct *work) /** * tsi721_irqhandler - Tsi721 interrupt handler * @irq: Linux interrupt number - * @ptr: Pointer to interrupt-specific data (mport structure) + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported * interrupt events and calls an event-specific handler(s). */ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) { - struct rio_mport *mport = (struct rio_mport *)ptr; - struct tsi721_device *priv = mport->priv; + struct tsi721_device *priv = (struct tsi721_device *)ptr; u32 dev_int; u32 dev_ch_int; u32 intval; @@ -488,10 +476,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) intval = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); if (intval & TSI721_SR_CHINT_IDBQRCV) - tsi721_dbell_handler(mport); + tsi721_dbell_handler(priv); else - dev_info(&priv->pdev->dev, - "Unsupported SR_CH_INT %x\n", intval); + tsi_info(&priv->pdev->dev, + "Unsupported SR_CH_INT %x", intval); /* Clear interrupts */ iowrite32(intval, @@ -545,7 +533,7 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) /* Service SRIO MAC interrupts */ intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); if (intval & TSI721_RIO_EM_INT_STAT_PW_RX) - tsi721_pw_handler(mport); + tsi721_pw_handler(priv); } #ifdef CONFIG_RAPIDIO_DMA_ENGINE @@ -553,8 +541,8 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr) int ch; if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) { - dev_dbg(&priv->pdev->dev, - "IRQ from DMA channel 0x%08x\n", dev_ch_int); + tsi_debug(DMA, &priv->pdev->dev, + "IRQ from DMA channel 0x%08x", dev_ch_int); for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) @@ -613,13 +601,13 @@ static void tsi721_interrupts_init(struct tsi721_device *priv) /** * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging * @irq: Linux interrupt number - * @ptr: Pointer to interrupt-specific data (mport structure) + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles outbound messaging interrupts signaled using MSI-X. */ static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) { - struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; + struct tsi721_device *priv = (struct tsi721_device *)ptr; int mbox; mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; @@ -630,13 +618,13 @@ static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) /** * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging * @irq: Linux interrupt number - * @ptr: Pointer to interrupt-specific data (mport structure) + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles inbound messaging interrupts signaled using MSI-X. */ static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) { - struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; + struct tsi721_device *priv = (struct tsi721_device *)ptr; int mbox; mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; @@ -647,19 +635,19 @@ static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) /** * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler * @irq: Linux interrupt number - * @ptr: Pointer to interrupt-specific data (mport structure) + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles Tsi721 interrupts from SRIO MAC. */ static irqreturn_t tsi721_srio_msix(int irq, void *ptr) { - struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; + struct tsi721_device *priv = (struct tsi721_device *)ptr; u32 srio_int; /* Service SRIO MAC interrupts */ srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX) - tsi721_pw_handler((struct rio_mport *)ptr); + tsi721_pw_handler(priv); return IRQ_HANDLED; } @@ -667,7 +655,7 @@ static irqreturn_t tsi721_srio_msix(int irq, void *ptr) /** * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler * @irq: Linux interrupt number - * @ptr: Pointer to interrupt-specific data (mport structure) + * @ptr: Pointer to interrupt-specific data (tsi721_device structure) * * Handles Tsi721 interrupts from SR2PC Channel. * NOTE: At this moment services only one SR2PC channel associated with inbound @@ -675,13 +663,13 @@ static irqreturn_t tsi721_srio_msix(int irq, void *ptr) */ static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) { - struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; + struct tsi721_device *priv = (struct tsi721_device *)ptr; u32 sr_ch_int; /* Service Inbound DB interrupt from SR2PC channel */ sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV) - tsi721_dbell_handler((struct rio_mport *)ptr); + tsi721_dbell_handler(priv); /* Clear interrupts */ iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); @@ -693,32 +681,31 @@ static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) /** * tsi721_request_msix - register interrupt service for MSI-X mode. - * @mport: RapidIO master port structure + * @priv: tsi721 device-specific data structure * * Registers MSI-X interrupt service routines for interrupts that are active * immediately after mport initialization. Messaging interrupt service routines * should be registered during corresponding open requests. */ -static int tsi721_request_msix(struct rio_mport *mport) +static int tsi721_request_msix(struct tsi721_device *priv) { - struct tsi721_device *priv = mport->priv; int err = 0; err = request_irq(priv->msix[TSI721_VECT_IDB].vector, tsi721_sr2pc_ch_msix, 0, - priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport); + priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv); if (err) - goto out; + return err; err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, tsi721_srio_msix, 0, - priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport); - if (err) - free_irq( - priv->msix[TSI721_VECT_IDB].vector, - (void *)mport); -out: - return err; + priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv); + if (err) { + free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv); + return err; + } + + return 0; } /** @@ -770,8 +757,8 @@ static int tsi721_enable_msix(struct tsi721_device *priv) err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries)); if (err) { - dev_err(&priv->pdev->dev, - "Failed to enable MSI-X (err=%d)\n", err); + tsi_err(&priv->pdev->dev, + "Failed to enable MSI-X (err=%d)", err); return err; } @@ -831,27 +818,209 @@ static int tsi721_enable_msix(struct tsi721_device *priv) } #endif /* CONFIG_PCI_MSI */ -static int tsi721_request_irq(struct rio_mport *mport) +static int tsi721_request_irq(struct tsi721_device *priv) { - struct tsi721_device *priv = mport->priv; int err; #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) - err = tsi721_request_msix(mport); + err = tsi721_request_msix(priv); else #endif err = request_irq(priv->pdev->irq, tsi721_irqhandler, (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED, - DRV_NAME, (void *)mport); + DRV_NAME, (void *)priv); if (err) - dev_err(&priv->pdev->dev, - "Unable to allocate interrupt, Error: %d\n", err); + tsi_err(&priv->pdev->dev, + "Unable to allocate interrupt, err=%d", err); return err; } +static void tsi721_free_irq(struct tsi721_device *priv) +{ +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv); + free_irq(priv->msix[TSI721_VECT_PWRX].vector, (void *)priv); + } else +#endif + free_irq(priv->pdev->irq, (void *)priv); +} + +static int +tsi721_obw_alloc(struct tsi721_device *priv, struct tsi721_obw_bar *pbar, + u32 size, int *win_id) +{ + u64 win_base; + u64 bar_base; + u64 bar_end; + u32 align; + struct tsi721_ob_win *win; + struct tsi721_ob_win *new_win = NULL; + int new_win_idx = -1; + int i = 0; + + bar_base = pbar->base; + bar_end = bar_base + pbar->size; + win_base = bar_base; + align = size/TSI721_PC2SR_ZONES; + + while (i < TSI721_IBWIN_NUM) { + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + if (!priv->ob_win[i].active) { + if (new_win == NULL) { + new_win = &priv->ob_win[i]; + new_win_idx = i; + } + continue; + } + + /* + * If this window belongs to the current BAR check it + * for overlap + */ + win = &priv->ob_win[i]; + + if (win->base >= bar_base && win->base < bar_end) { + if (win_base < (win->base + win->size) && + (win_base + size) > win->base) { + /* Overlap detected */ + win_base = win->base + win->size; + win_base = ALIGN(win_base, align); + break; + } + } + } + } + + if (win_base + size > bar_end) + return -ENOMEM; + + if (!new_win) { + tsi_err(&priv->pdev->dev, "OBW count tracking failed"); + return -EIO; + } + + new_win->active = true; + new_win->base = win_base; + new_win->size = size; + new_win->pbar = pbar; + priv->obwin_cnt--; + pbar->free -= size; + *win_id = new_win_idx; + return 0; +} + +static int tsi721_map_outb_win(struct rio_mport *mport, u16 destid, u64 rstart, + u32 size, u32 flags, dma_addr_t *laddr) +{ + struct tsi721_device *priv = mport->priv; + int i; + struct tsi721_obw_bar *pbar; + struct tsi721_ob_win *ob_win; + int obw = -1; + u32 rval; + u64 rio_addr; + u32 zsize; + int ret = -ENOMEM; + + tsi_debug(OBW, &priv->pdev->dev, + "did=%d ra=0x%llx sz=0x%x", destid, rstart, size); + + if (!is_power_of_2(size) || (size < 0x8000) || (rstart & (size - 1))) + return -EINVAL; + + if (priv->obwin_cnt == 0) + return -EBUSY; + + for (i = 0; i < 2; i++) { + if (priv->p2r_bar[i].free >= size) { + pbar = &priv->p2r_bar[i]; + ret = tsi721_obw_alloc(priv, pbar, size, &obw); + if (!ret) + break; + } + } + + if (ret) + return ret; + + WARN_ON(obw == -1); + ob_win = &priv->ob_win[obw]; + ob_win->destid = destid; + ob_win->rstart = rstart; + tsi_debug(OBW, &priv->pdev->dev, + "allocated OBW%d @%llx", obw, ob_win->base); + + /* + * Configure Outbound Window + */ + + zsize = size/TSI721_PC2SR_ZONES; + rio_addr = rstart; + + /* + * Program Address Translation Zones: + * This implementation uses all 8 zones associated wit window. + */ + for (i = 0; i < TSI721_PC2SR_ZONES; i++) { + + while (ioread32(priv->regs + TSI721_ZONE_SEL) & + TSI721_ZONE_SEL_GO) { + udelay(1); + } + + rval = (u32)(rio_addr & TSI721_LUT_DATA0_ADD) | + TSI721_LUT_DATA0_NREAD | TSI721_LUT_DATA0_NWR; + iowrite32(rval, priv->regs + TSI721_LUT_DATA0); + rval = (u32)(rio_addr >> 32); + iowrite32(rval, priv->regs + TSI721_LUT_DATA1); + rval = destid; + iowrite32(rval, priv->regs + TSI721_LUT_DATA2); + + rval = TSI721_ZONE_SEL_GO | (obw << 3) | i; + iowrite32(rval, priv->regs + TSI721_ZONE_SEL); + + rio_addr += zsize; + } + + iowrite32(TSI721_OBWIN_SIZE(size) << 8, + priv->regs + TSI721_OBWINSZ(obw)); + iowrite32((u32)(ob_win->base >> 32), priv->regs + TSI721_OBWINUB(obw)); + iowrite32((u32)(ob_win->base & TSI721_OBWINLB_BA) | TSI721_OBWINLB_WEN, + priv->regs + TSI721_OBWINLB(obw)); + + *laddr = ob_win->base; + return 0; +} + +static void tsi721_unmap_outb_win(struct rio_mport *mport, + u16 destid, u64 rstart) +{ + struct tsi721_device *priv = mport->priv; + struct tsi721_ob_win *ob_win; + int i; + + tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx", destid, rstart); + + for (i = 0; i < TSI721_OBWIN_NUM; i++) { + ob_win = &priv->ob_win[i]; + + if (ob_win->active && + ob_win->destid == destid && ob_win->rstart == rstart) { + tsi_debug(OBW, &priv->pdev->dev, + "free OBW%d @%llx", i, ob_win->base); + ob_win->active = false; + iowrite32(0, priv->regs + TSI721_OBWINLB(i)); + ob_win->pbar->free += ob_win->size; + priv->obwin_cnt++; + break; + } + } +} + /** * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO) * translation regions. @@ -861,11 +1030,41 @@ static int tsi721_request_irq(struct rio_mport *mport) */ static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) { - int i; + int i, z; + u32 rval; /* Disable all PC2SR translation windows */ for (i = 0; i < TSI721_OBWIN_NUM; i++) iowrite32(0, priv->regs + TSI721_OBWINLB(i)); + + /* Initialize zone lookup tables to avoid ECC errors on reads */ + iowrite32(0, priv->regs + TSI721_LUT_DATA0); + iowrite32(0, priv->regs + TSI721_LUT_DATA1); + iowrite32(0, priv->regs + TSI721_LUT_DATA2); + + for (i = 0; i < TSI721_OBWIN_NUM; i++) { + for (z = 0; z < TSI721_PC2SR_ZONES; z++) { + while (ioread32(priv->regs + TSI721_ZONE_SEL) & + TSI721_ZONE_SEL_GO) { + udelay(1); + } + rval = TSI721_ZONE_SEL_GO | (i << 3) | z; + iowrite32(rval, priv->regs + TSI721_ZONE_SEL); + } + } + + if (priv->p2r_bar[0].size == 0 && priv->p2r_bar[1].size == 0) { + priv->obwin_cnt = 0; + return; + } + + priv->p2r_bar[0].free = priv->p2r_bar[0].size; + priv->p2r_bar[1].free = priv->p2r_bar[1].size; + + for (i = 0; i < TSI721_OBWIN_NUM; i++) + priv->ob_win[i].active = false; + + priv->obwin_cnt = TSI721_OBWIN_NUM; } /** @@ -885,45 +1084,148 @@ static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, u64 rstart, u32 size, u32 flags) { struct tsi721_device *priv = mport->priv; - int i; + int i, avail = -1; u32 regval; + struct tsi721_ib_win *ib_win; + bool direct = (lstart == rstart); + u64 ibw_size; + dma_addr_t loc_start; + u64 ibw_start; + struct tsi721_ib_win_mapping *map = NULL; + int ret = -EBUSY; + + if (direct) { + /* Calculate minimal acceptable window size and base address */ + + ibw_size = roundup_pow_of_two(size); + ibw_start = lstart & ~(ibw_size - 1); + + tsi_debug(IBW, &priv->pdev->dev, + "Direct (RIO_0x%llx -> PCIe_0x%pad), size=0x%x, ibw_start = 0x%llx", + rstart, &lstart, size, ibw_start); + + while ((lstart + size) > (ibw_start + ibw_size)) { + ibw_size *= 2; + ibw_start = lstart & ~(ibw_size - 1); + if (ibw_size > 0x80000000) { /* Limit max size to 2GB */ + return -EBUSY; + } + } - if (!is_power_of_2(size) || size < 0x1000 || - ((u64)lstart & (size - 1)) || (rstart & (size - 1))) - return -EINVAL; + loc_start = ibw_start; + + map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC); + if (map == NULL) + return -ENOMEM; + + } else { + tsi_debug(IBW, &priv->pdev->dev, + "Translated (RIO_0x%llx -> PCIe_0x%pad), size=0x%x", + rstart, &lstart, size); + + if (!is_power_of_2(size) || size < 0x1000 || + ((u64)lstart & (size - 1)) || (rstart & (size - 1))) + return -EINVAL; + if (priv->ibwin_cnt == 0) + return -EBUSY; + ibw_start = rstart; + ibw_size = size; + loc_start = lstart; + } - /* Search for free inbound translation window */ + /* + * Scan for overlapping with active regions and mark the first available + * IB window at the same time. + */ for (i = 0; i < TSI721_IBWIN_NUM; i++) { - regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); - if (!(regval & TSI721_IBWIN_LB_WEN)) + ib_win = &priv->ib_win[i]; + + if (!ib_win->active) { + if (avail == -1) { + avail = i; + ret = 0; + } + } else if (ibw_start < (ib_win->rstart + ib_win->size) && + (ibw_start + ibw_size) > ib_win->rstart) { + /* Return error if address translation involved */ + if (direct && ib_win->xlat) { + ret = -EFAULT; + break; + } + + /* + * Direct mappings usually are larger than originally + * requested fragments - check if this new request fits + * into it. + */ + if (rstart >= ib_win->rstart && + (rstart + size) <= (ib_win->rstart + + ib_win->size)) { + /* We are in - no further mapping required */ + map->lstart = lstart; + list_add_tail(&map->node, &ib_win->mappings); + return 0; + } + + ret = -EFAULT; break; + } } - if (i >= TSI721_IBWIN_NUM) { - dev_err(&priv->pdev->dev, - "Unable to find free inbound window\n"); - return -EBUSY; + if (ret) + goto out; + i = avail; + + /* Sanity check: available IB window must be disabled at this point */ + regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); + if (WARN_ON(regval & TSI721_IBWIN_LB_WEN)) { + ret = -EIO; + goto out; + } + + ib_win = &priv->ib_win[i]; + ib_win->active = true; + ib_win->rstart = ibw_start; + ib_win->lstart = loc_start; + ib_win->size = ibw_size; + ib_win->xlat = (lstart != rstart); + INIT_LIST_HEAD(&ib_win->mappings); + + /* + * When using direct IBW mapping and have larger than requested IBW size + * we can have multiple local memory blocks mapped through the same IBW + * To handle this situation we maintain list of "clients" for such IBWs. + */ + if (direct) { + map->lstart = lstart; + list_add_tail(&map->node, &ib_win->mappings); } - iowrite32(TSI721_IBWIN_SIZE(size) << 8, + iowrite32(TSI721_IBWIN_SIZE(ibw_size) << 8, priv->regs + TSI721_IBWIN_SZ(i)); - iowrite32(((u64)lstart >> 32), priv->regs + TSI721_IBWIN_TUA(i)); - iowrite32(((u64)lstart & TSI721_IBWIN_TLA_ADD), + iowrite32(((u64)loc_start >> 32), priv->regs + TSI721_IBWIN_TUA(i)); + iowrite32(((u64)loc_start & TSI721_IBWIN_TLA_ADD), priv->regs + TSI721_IBWIN_TLA(i)); - iowrite32(rstart >> 32, priv->regs + TSI721_IBWIN_UB(i)); - iowrite32((rstart & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN, + iowrite32(ibw_start >> 32, priv->regs + TSI721_IBWIN_UB(i)); + iowrite32((ibw_start & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN, priv->regs + TSI721_IBWIN_LB(i)); - dev_dbg(&priv->pdev->dev, - "Configured IBWIN%d mapping (RIO_0x%llx -> PCIe_0x%llx)\n", - i, rstart, (unsigned long long)lstart); + + priv->ibwin_cnt--; + + tsi_debug(IBW, &priv->pdev->dev, + "Configured IBWIN%d (RIO_0x%llx -> PCIe_0x%pad), size=0x%llx", + i, ibw_start, &loc_start, ibw_size); return 0; +out: + kfree(map); + return ret; } /** - * fsl_rio_unmap_inb_mem -- Unmapping inbound memory region. + * tsi721_rio_unmap_inb_mem -- Unmapping inbound memory region. * @mport: RapidIO master port * @lstart: Local memory space start address. */ @@ -931,25 +1233,56 @@ static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, dma_addr_t lstart) { struct tsi721_device *priv = mport->priv; + struct tsi721_ib_win *ib_win; int i; - u64 addr; - u32 regval; + + tsi_debug(IBW, &priv->pdev->dev, + "Unmap IBW mapped to PCIe_0x%pad", &lstart); /* Search for matching active inbound translation window */ for (i = 0; i < TSI721_IBWIN_NUM; i++) { - regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); - if (regval & TSI721_IBWIN_LB_WEN) { - regval = ioread32(priv->regs + TSI721_IBWIN_TUA(i)); - addr = (u64)regval << 32; - regval = ioread32(priv->regs + TSI721_IBWIN_TLA(i)); - addr |= regval & TSI721_IBWIN_TLA_ADD; - - if (addr == (u64)lstart) { - iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); - break; + ib_win = &priv->ib_win[i]; + + /* Address translating IBWs must to be an exact march */ + if (!ib_win->active || + (ib_win->xlat && lstart != ib_win->lstart)) + continue; + + if (lstart >= ib_win->lstart && + lstart < (ib_win->lstart + ib_win->size)) { + + if (!ib_win->xlat) { + struct tsi721_ib_win_mapping *map; + int found = 0; + + list_for_each_entry(map, + &ib_win->mappings, node) { + if (map->lstart == lstart) { + list_del(&map->node); + kfree(map); + found = 1; + break; + } + } + + if (!found) + continue; + + if (!list_empty(&ib_win->mappings)) + break; } + + tsi_debug(IBW, &priv->pdev->dev, "Disable IBWIN_%d", i); + iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); + ib_win->active = false; + priv->ibwin_cnt++; + break; } } + + if (i == TSI721_IBWIN_NUM) + tsi_debug(IBW, &priv->pdev->dev, + "IB window mapped to %pad not found", &lstart); } /** @@ -966,6 +1299,27 @@ static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) /* Disable all SR2PC inbound windows */ for (i = 0; i < TSI721_IBWIN_NUM; i++) iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); + priv->ibwin_cnt = TSI721_IBWIN_NUM; +} + +/* + * tsi721_close_sr2pc_mapping - closes all active inbound (SRIO->PCIe) + * translation regions. + * @priv: pointer to tsi721 device private data + */ +static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv) +{ + struct tsi721_ib_win *ib_win; + int i; + + /* Disable all active SR2PC inbound windows */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + ib_win = &priv->ib_win[i]; + if (ib_win->active) { + iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); + ib_win->active = false; + } + } } /** @@ -982,7 +1336,7 @@ static int tsi721_port_write_init(struct tsi721_device *priv) spin_lock_init(&priv->pw_fifo_lock); if (kfifo_alloc(&priv->pw_fifo, TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { - dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n"); + tsi_err(&priv->pdev->dev, "PW FIFO allocation failed"); return -ENOMEM; } @@ -991,6 +1345,11 @@ static int tsi721_port_write_init(struct tsi721_device *priv) return 0; } +static void tsi721_port_write_free(struct tsi721_device *priv) +{ + kfifo_free(&priv->pw_fifo); +} + static int tsi721_doorbell_init(struct tsi721_device *priv) { /* Outbound Doorbells do not require any setup. @@ -1009,8 +1368,9 @@ static int tsi721_doorbell_init(struct tsi721_device *priv) if (!priv->idb_base) return -ENOMEM; - dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", - priv->idb_base, (unsigned long long)priv->idb_dma); + tsi_debug(DBELL, &priv->pdev->dev, + "Allocated IDB buffer @ %p (phys = %pad)", + priv->idb_base, &priv->idb_dma); iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE), priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE)); @@ -1056,9 +1416,8 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv) int bd_num = 2; void __iomem *regs; - dev_dbg(&priv->pdev->dev, - "Init Block DMA Engine for Maintenance requests, CH%d\n", - TSI721_DMACH_MAINT); + tsi_debug(MAINT, &priv->pdev->dev, + "Init BDMA_%d Maintenance requests", TSI721_DMACH_MAINT); /* * Initialize DMA channel for maintenance requests @@ -1078,8 +1437,8 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv) priv->mdma.bd_phys = bd_phys; priv->mdma.bd_base = bd_ptr; - dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", - bd_ptr, (unsigned long long)bd_phys); + tsi_debug(MAINT, &priv->pdev->dev, "DMA descriptors @ %p (phys = %pad)", + bd_ptr, &bd_phys); /* Allocate space for descriptor status FIFO */ sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? @@ -1101,9 +1460,9 @@ static int tsi721_bdma_maint_init(struct tsi721_device *priv) priv->mdma.sts_base = sts_ptr; priv->mdma.sts_size = sts_size; - dev_dbg(&priv->pdev->dev, - "desc status FIFO @ %p (phys = %llx) size=0x%x\n", - sts_ptr, (unsigned long long)sts_phys, sts_size); + tsi_debug(MAINT, &priv->pdev->dev, + "desc status FIFO @ %p (phys = %pad) size=0x%x", + sts_ptr, &sts_phys, sts_size); /* Initialize DMA descriptors ring */ bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); @@ -1304,11 +1663,14 @@ tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, struct tsi721_device *priv = mport->priv; struct tsi721_omsg_desc *desc; u32 tx_slot; + unsigned long flags; if (!priv->omsg_init[mbox] || len > TSI721_MSG_MAX_SIZE || len < 8) return -EINVAL; + spin_lock_irqsave(&priv->omsg_ring[mbox].lock, flags); + tx_slot = priv->omsg_ring[mbox].tx_slot; /* Copy copy message into transfer buffer */ @@ -1320,9 +1682,11 @@ tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, /* Build descriptor associated with buffer */ desc = priv->omsg_ring[mbox].omd_base; desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid); +#ifdef TSI721_OMSG_DESC_INT + /* Request IOF_DONE interrupt generation for each N-th frame in queue */ if (tx_slot % 4 == 0) desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF); - +#endif desc[tx_slot].msg_info = cpu_to_le32((mport->sys_size << 26) | (mbox << 22) | (0xe << 12) | (len & 0xff8)); @@ -1348,6 +1712,8 @@ tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); + spin_unlock_irqrestore(&priv->omsg_ring[mbox].lock, flags); + return 0; } @@ -1361,20 +1727,23 @@ tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) { u32 omsg_int; + struct rio_mport *mport = &priv->mport; + void *dev_id = NULL; + u32 tx_slot = 0xffffffff; + int do_callback = 0; spin_lock(&priv->omsg_ring[ch].lock); omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); if (omsg_int & TSI721_OBDMAC_INT_ST_FULL) - dev_info(&priv->pdev->dev, - "OB MBOX%d: Status FIFO is full\n", ch); + tsi_info(&priv->pdev->dev, + "OB MBOX%d: Status FIFO is full", ch); if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) { u32 srd_ptr; u64 *sts_ptr, last_ptr = 0, prev_ptr = 0; int i, j; - u32 tx_slot; /* * Find last successfully processed descriptor @@ -1402,7 +1771,7 @@ static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) priv->omsg_ring[ch].sts_rdptr = srd_ptr; iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); - if (!priv->mport->outb_msg[ch].mcback) + if (!mport->outb_msg[ch].mcback) goto no_sts_update; /* Inform upper layer about transfer completion */ @@ -1424,14 +1793,19 @@ static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) goto no_sts_update; } + if (tx_slot >= priv->omsg_ring[ch].size) + tsi_debug(OMSG, &priv->pdev->dev, + "OB_MSG tx_slot=%x > size=%x", + tx_slot, priv->omsg_ring[ch].size); + WARN_ON(tx_slot >= priv->omsg_ring[ch].size); + /* Move slot index to the next message to be sent */ ++tx_slot; if (tx_slot == priv->omsg_ring[ch].size) tx_slot = 0; - BUG_ON(tx_slot >= priv->omsg_ring[ch].size); - priv->mport->outb_msg[ch].mcback(priv->mport, - priv->omsg_ring[ch].dev_id, ch, - tx_slot); + + dev_id = priv->omsg_ring[ch].dev_id; + do_callback = 1; } no_sts_update: @@ -1442,20 +1816,20 @@ no_sts_update: * reinitialize OB MSG channel */ - dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n", - ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); + tsi_debug(OMSG, &priv->pdev->dev, "OB MSG ABORT ch_stat=%x", + ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); iowrite32(TSI721_OBDMAC_INT_ERROR, priv->regs + TSI721_OBDMAC_INT(ch)); - iowrite32(TSI721_OBDMAC_CTL_INIT, + iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(ch)); ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); /* Inform upper level to clear all pending tx slots */ - if (priv->mport->outb_msg[ch].mcback) - priv->mport->outb_msg[ch].mcback(priv->mport, - priv->omsg_ring[ch].dev_id, ch, - priv->omsg_ring[ch].tx_slot); + dev_id = priv->omsg_ring[ch].dev_id; + tx_slot = priv->omsg_ring[ch].tx_slot; + do_callback = 1; + /* Synch tx_slot tracking */ iowrite32(priv->omsg_ring[ch].tx_slot, priv->regs + TSI721_OBDMAC_DRDCNT(ch)); @@ -1477,6 +1851,9 @@ no_sts_update: } spin_unlock(&priv->omsg_ring[ch].lock); + + if (mport->outb_msg[ch].mcback && do_callback) + mport->outb_msg[ch].mcback(mport, dev_id, ch, tx_slot); } /** @@ -1514,9 +1891,8 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, &priv->omsg_ring[mbox].omq_phys[i], GFP_KERNEL); if (priv->omsg_ring[mbox].omq_base[i] == NULL) { - dev_dbg(&priv->pdev->dev, - "Unable to allocate OB MSG data buffer for" - " MBOX%d\n", mbox); + tsi_debug(OMSG, &priv->pdev->dev, + "ENOMEM for OB_MSG_%d data buffer", mbox); rc = -ENOMEM; goto out_buf; } @@ -1528,9 +1904,8 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, (entries + 1) * sizeof(struct tsi721_omsg_desc), &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL); if (priv->omsg_ring[mbox].omd_base == NULL) { - dev_dbg(&priv->pdev->dev, - "Unable to allocate OB MSG descriptor memory " - "for MBOX%d\n", mbox); + tsi_debug(OMSG, &priv->pdev->dev, + "ENOMEM for OB_MSG_%d descriptor memory", mbox); rc = -ENOMEM; goto out_buf; } @@ -1544,9 +1919,8 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, sizeof(struct tsi721_dma_sts), &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); if (priv->omsg_ring[mbox].sts_base == NULL) { - dev_dbg(&priv->pdev->dev, - "Unable to allocate OB MSG descriptor status FIFO " - "for MBOX%d\n", mbox); + tsi_debug(OMSG, &priv->pdev->dev, + "ENOMEM for OB_MSG_%d status FIFO", mbox); rc = -ENOMEM; goto out_desc; } @@ -1575,32 +1949,28 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { + int idx = TSI721_VECT_OMB0_DONE + mbox; + /* Request interrupt service if we are in MSI-X mode */ - rc = request_irq( - priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, - tsi721_omsg_msix, 0, - priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name, - (void *)mport); + rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0, + priv->msix[idx].irq_name, (void *)priv); if (rc) { - dev_dbg(&priv->pdev->dev, - "Unable to allocate MSI-X interrupt for " - "OBOX%d-DONE\n", mbox); + tsi_debug(OMSG, &priv->pdev->dev, + "Unable to get MSI-X IRQ for OBOX%d-DONE", + mbox); goto out_stat; } - rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, - tsi721_omsg_msix, 0, - priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name, - (void *)mport); + idx = TSI721_VECT_OMB0_INT + mbox; + rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0, + priv->msix[idx].irq_name, (void *)priv); if (rc) { - dev_dbg(&priv->pdev->dev, - "Unable to allocate MSI-X interrupt for " - "MBOX%d-INT\n", mbox); - free_irq( - priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, - (void *)mport); + tsi_debug(OMSG, &priv->pdev->dev, + "Unable to get MSI-X IRQ for MBOX%d-INT", mbox); + idx = TSI721_VECT_OMB0_DONE + mbox; + free_irq(priv->msix[idx].vector, (void *)priv); goto out_stat; } } @@ -1621,7 +1991,8 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, mb(); /* Initialize Outbound Message engine */ - iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox)); + iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT, + priv->regs + TSI721_OBDMAC_CTL(mbox)); ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); udelay(10); @@ -1684,9 +2055,9 @@ static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox) #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, - (void *)mport); + (void *)priv); free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, - (void *)mport); + (void *)priv); } #endif /* CONFIG_PCI_MSI */ @@ -1731,30 +2102,28 @@ static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) { u32 mbox = ch - 4; u32 imsg_int; + struct rio_mport *mport = &priv->mport; spin_lock(&priv->imsg_ring[mbox].lock); imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); if (imsg_int & TSI721_IBDMAC_INT_SRTO) - dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n", - mbox); + tsi_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout", mbox); if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR) - dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n", - mbox); + tsi_info(&priv->pdev->dev, "IB MBOX%d PCIe error", mbox); if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW) - dev_info(&priv->pdev->dev, - "IB MBOX%d IB free queue low\n", mbox); + tsi_info(&priv->pdev->dev, "IB MBOX%d IB free queue low", mbox); /* Clear IB channel interrupts */ iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); /* If an IB Msg is received notify the upper layer */ if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV && - priv->mport->inb_msg[mbox].mcback) - priv->mport->inb_msg[mbox].mcback(priv->mport, + mport->inb_msg[mbox].mcback) + mport->inb_msg[mbox].mcback(mport, priv->imsg_ring[mbox].dev_id, mbox, -1); if (!(priv->flags & TSI721_USING_MSIX)) { @@ -1810,8 +2179,8 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, GFP_KERNEL); if (priv->imsg_ring[mbox].buf_base == NULL) { - dev_err(&priv->pdev->dev, - "Failed to allocate buffers for IB MBOX%d\n", mbox); + tsi_err(&priv->pdev->dev, + "Failed to allocate buffers for IB MBOX%d", mbox); rc = -ENOMEM; goto out; } @@ -1824,8 +2193,8 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, GFP_KERNEL); if (priv->imsg_ring[mbox].imfq_base == NULL) { - dev_err(&priv->pdev->dev, - "Failed to allocate free queue for IB MBOX%d\n", mbox); + tsi_err(&priv->pdev->dev, + "Failed to allocate free queue for IB MBOX%d", mbox); rc = -ENOMEM; goto out_buf; } @@ -1837,8 +2206,8 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL); if (priv->imsg_ring[mbox].imd_base == NULL) { - dev_err(&priv->pdev->dev, - "Failed to allocate descriptor memory for IB MBOX%d\n", + tsi_err(&priv->pdev->dev, + "Failed to allocate descriptor memory for IB MBOX%d", mbox); rc = -ENOMEM; goto out_dma; @@ -1859,7 +2228,7 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, * once when first inbound mailbox is requested. */ if (!(priv->flags & TSI721_IMSGID_SET)) { - iowrite32((u32)priv->mport->host_deviceid, + iowrite32((u32)priv->mport.host_deviceid, priv->regs + TSI721_IB_DEVID); priv->flags |= TSI721_IMSGID_SET; } @@ -1890,31 +2259,29 @@ static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { + int idx = TSI721_VECT_IMB0_RCV + mbox; + /* Request interrupt service if we are in MSI-X mode */ - rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, - tsi721_imsg_msix, 0, - priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name, - (void *)mport); + rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0, + priv->msix[idx].irq_name, (void *)priv); if (rc) { - dev_dbg(&priv->pdev->dev, - "Unable to allocate MSI-X interrupt for " - "IBOX%d-DONE\n", mbox); + tsi_debug(IMSG, &priv->pdev->dev, + "Unable to get MSI-X IRQ for IBOX%d-DONE", + mbox); goto out_desc; } - rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, - tsi721_imsg_msix, 0, - priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name, - (void *)mport); + idx = TSI721_VECT_IMB0_INT + mbox; + rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0, + priv->msix[idx].irq_name, (void *)priv); if (rc) { - dev_dbg(&priv->pdev->dev, - "Unable to allocate MSI-X interrupt for " - "IBOX%d-INT\n", mbox); + tsi_debug(IMSG, &priv->pdev->dev, + "Unable to get MSI-X IRQ for IBOX%d-INT", mbox); free_irq( priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, - (void *)mport); + (void *)priv); goto out_desc; } } @@ -1985,9 +2352,9 @@ static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, - (void *)mport); + (void *)priv); free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, - (void *)mport); + (void *)priv); } #endif /* CONFIG_PCI_MSI */ @@ -2034,8 +2401,8 @@ static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) rx_slot = priv->imsg_ring[mbox].rx_slot; if (priv->imsg_ring[mbox].imq_base[rx_slot]) { - dev_err(&priv->pdev->dev, - "Error adding inbound buffer %d, buffer exists\n", + tsi_err(&priv->pdev->dev, + "Error adding inbound buffer %d, buffer exists", rx_slot); rc = -EINVAL; goto out; @@ -2153,6 +2520,39 @@ static int tsi721_messages_init(struct tsi721_device *priv) } /** + * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Inbound mailbox number + * + * Returns pointer to the message on success or NULL on failure. + */ +static int tsi721_query_mport(struct rio_mport *mport, + struct rio_mport_attr *attr) +{ + struct tsi721_device *priv = mport->priv; + u32 rval; + + rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_ERR_STS_CSR(0))); + if (rval & RIO_PORT_N_ERR_STS_PORT_OK) { + rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL2_CSR(0))); + attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28; + rval = ioread32(priv->regs + (0x100 + RIO_PORT_N_CTL_CSR(0))); + attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27; + } else + attr->link_speed = RIO_LINK_DOWN; + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + attr->flags = RIO_MPORT_DMA | RIO_MPORT_DMA_SG; + attr->dma_max_sge = 0; + attr->dma_max_size = TSI721_BDMA_MAX_BCOUNT; + attr->dma_align = 0; +#else + attr->flags = 0; +#endif + return 0; +} + +/** * tsi721_disable_ints - disables all device interrupts * @priv: pointer to tsi721 private data */ @@ -2203,6 +2603,34 @@ static void tsi721_disable_ints(struct tsi721_device *priv) iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN); } +static struct rio_ops tsi721_rio_ops = { + .lcread = tsi721_lcread, + .lcwrite = tsi721_lcwrite, + .cread = tsi721_cread_dma, + .cwrite = tsi721_cwrite_dma, + .dsend = tsi721_dsend, + .open_inb_mbox = tsi721_open_inb_mbox, + .close_inb_mbox = tsi721_close_inb_mbox, + .open_outb_mbox = tsi721_open_outb_mbox, + .close_outb_mbox = tsi721_close_outb_mbox, + .add_outb_message = tsi721_add_outb_message, + .add_inb_buffer = tsi721_add_inb_buffer, + .get_inb_message = tsi721_get_inb_message, + .map_inb = tsi721_rio_map_inb_mem, + .unmap_inb = tsi721_rio_unmap_inb_mem, + .pwenable = tsi721_pw_enable, + .query_mport = tsi721_query_mport, + .map_outb = tsi721_map_outb_win, + .unmap_outb = tsi721_unmap_outb_win, +}; + +static void tsi721_mport_release(struct device *dev) +{ + struct rio_mport *mport = to_rio_mport(dev); + + tsi_debug(EXIT, dev, "%s id=%d", mport->name, mport->id); +} + /** * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port * @priv: pointer to tsi721 private data @@ -2213,46 +2641,20 @@ static int tsi721_setup_mport(struct tsi721_device *priv) { struct pci_dev *pdev = priv->pdev; int err = 0; - struct rio_ops *ops; - - struct rio_mport *mport; + struct rio_mport *mport = &priv->mport; - ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); - if (!ops) { - dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n"); - return -ENOMEM; - } - - ops->lcread = tsi721_lcread; - ops->lcwrite = tsi721_lcwrite; - ops->cread = tsi721_cread_dma; - ops->cwrite = tsi721_cwrite_dma; - ops->dsend = tsi721_dsend; - ops->open_inb_mbox = tsi721_open_inb_mbox; - ops->close_inb_mbox = tsi721_close_inb_mbox; - ops->open_outb_mbox = tsi721_open_outb_mbox; - ops->close_outb_mbox = tsi721_close_outb_mbox; - ops->add_outb_message = tsi721_add_outb_message; - ops->add_inb_buffer = tsi721_add_inb_buffer; - ops->get_inb_message = tsi721_get_inb_message; - ops->map_inb = tsi721_rio_map_inb_mem; - ops->unmap_inb = tsi721_rio_unmap_inb_mem; - - mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); - if (!mport) { - kfree(ops); - dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n"); - return -ENOMEM; - } + err = rio_mport_initialize(mport); + if (err) + return err; - mport->ops = ops; + mport->ops = &tsi721_rio_ops; mport->index = 0; mport->sys_size = 0; /* small system */ mport->phy_type = RIO_PHY_SERIAL; mport->priv = (void *)priv; mport->phys_efptr = 0x100; mport->dev.parent = &pdev->dev; - priv->mport = mport; + mport->dev.release = tsi721_mport_release; INIT_LIST_HEAD(&mport->dbells); @@ -2270,31 +2672,28 @@ static int tsi721_setup_mport(struct tsi721_device *priv) else if (!pci_enable_msi(pdev)) priv->flags |= TSI721_USING_MSI; else - dev_info(&pdev->dev, - "MSI/MSI-X is not available. Using legacy INTx.\n"); + tsi_debug(MPORT, &pdev->dev, + "MSI/MSI-X is not available. Using legacy INTx."); #endif /* CONFIG_PCI_MSI */ - err = tsi721_request_irq(mport); + err = tsi721_request_irq(priv); - if (!err) { - tsi721_interrupts_init(priv); - ops->pwenable = tsi721_pw_enable; - } else { - dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " - "vector %02X err=0x%x\n", pdev->irq, err); - goto err_exit; + if (err) { + tsi_err(&pdev->dev, "Unable to get PCI IRQ %02X (err=0x%x)", + pdev->irq, err); + return err; } #ifdef CONFIG_RAPIDIO_DMA_ENGINE - tsi721_register_dma(priv); + err = tsi721_register_dma(priv); + if (err) + goto err_exit; #endif /* Enable SRIO link */ iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | TSI721_DEVCTL_SRBOOT_CMPL, priv->regs + TSI721_DEVCTL); - rio_register_mport(mport); - if (mport->host_deviceid >= 0) iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED, @@ -2302,11 +2701,16 @@ static int tsi721_setup_mport(struct tsi721_device *priv) else iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); + err = rio_register_mport(mport); + if (err) { + tsi721_unregister_dma(priv); + goto err_exit; + } + return 0; err_exit: - kfree(mport); - kfree(ops); + tsi721_free_irq(priv); return err; } @@ -2317,15 +2721,14 @@ static int tsi721_probe(struct pci_dev *pdev, int err; priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); - if (priv == NULL) { - dev_err(&pdev->dev, "Failed to allocate memory for device\n"); + if (!priv) { err = -ENOMEM; goto err_exit; } err = pci_enable_device(pdev); if (err) { - dev_err(&pdev->dev, "Failed to enable PCI device\n"); + tsi_err(&pdev->dev, "Failed to enable PCI device"); goto err_clean; } @@ -2333,13 +2736,12 @@ static int tsi721_probe(struct pci_dev *pdev, #ifdef DEBUG { - int i; - for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { - dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", - i, (unsigned long long)pci_resource_start(pdev, i), - (unsigned long)pci_resource_len(pdev, i), - pci_resource_flags(pdev, i)); - } + int i; + + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + tsi_debug(INIT, &pdev->dev, "res%d %pR", + i, &pdev->resource[i]); + } } #endif /* @@ -2350,8 +2752,7 @@ static int tsi721_probe(struct pci_dev *pdev, if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) || pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 || pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) { - dev_err(&pdev->dev, - "Missing or misconfigured CSR BAR0, aborting.\n"); + tsi_err(&pdev->dev, "Missing or misconfigured CSR BAR0"); err = -ENODEV; goto err_disable_pdev; } @@ -2360,8 +2761,7 @@ static int tsi721_probe(struct pci_dev *pdev, if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) || pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 || pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) { - dev_err(&pdev->dev, - "Missing or misconfigured Doorbell BAR1, aborting.\n"); + tsi_err(&pdev->dev, "Missing or misconfigured Doorbell BAR1"); err = -ENODEV; goto err_disable_pdev; } @@ -2373,20 +2773,32 @@ static int tsi721_probe(struct pci_dev *pdev, * It may be a good idea to keep them disabled using HW configuration * to save PCI memory space. */ - if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) && - (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) { - dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n"); + + priv->p2r_bar[0].size = priv->p2r_bar[1].size = 0; + + if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64) { + if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_PREFETCH) + tsi_debug(INIT, &pdev->dev, + "Prefetchable OBW BAR2 will not be used"); + else { + priv->p2r_bar[0].base = pci_resource_start(pdev, BAR_2); + priv->p2r_bar[0].size = pci_resource_len(pdev, BAR_2); + } } - if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) && - (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) { - dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n"); + if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64) { + if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_PREFETCH) + tsi_debug(INIT, &pdev->dev, + "Prefetchable OBW BAR4 will not be used"); + else { + priv->p2r_bar[1].base = pci_resource_start(pdev, BAR_4); + priv->p2r_bar[1].size = pci_resource_len(pdev, BAR_4); + } } err = pci_request_regions(pdev, DRV_NAME); if (err) { - dev_err(&pdev->dev, "Cannot obtain PCI resources, " - "aborting.\n"); + tsi_err(&pdev->dev, "Unable to obtain PCI resources"); goto err_disable_pdev; } @@ -2394,16 +2806,14 @@ static int tsi721_probe(struct pci_dev *pdev, priv->regs = pci_ioremap_bar(pdev, BAR_0); if (!priv->regs) { - dev_err(&pdev->dev, - "Unable to map device registers space, aborting\n"); + tsi_err(&pdev->dev, "Unable to map device registers space"); err = -ENOMEM; goto err_free_res; } priv->odb_base = pci_ioremap_bar(pdev, BAR_1); if (!priv->odb_base) { - dev_err(&pdev->dev, - "Unable to map outbound doorbells space, aborting\n"); + tsi_err(&pdev->dev, "Unable to map outbound doorbells space"); err = -ENOMEM; goto err_unmap_bars; } @@ -2412,25 +2822,23 @@ static int tsi721_probe(struct pci_dev *pdev, if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { - dev_info(&pdev->dev, "Unable to set DMA mask\n"); + tsi_err(&pdev->dev, "Unable to set DMA mask"); goto err_unmap_bars; } if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) - dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); + tsi_info(&pdev->dev, "Unable to set consistent DMA mask"); } else { err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) - dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); + tsi_info(&pdev->dev, "Unable to set consistent DMA mask"); } BUG_ON(!pci_is_pcie(pdev)); - /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ + /* Clear "no snoop" and "relaxed ordering" bits. */ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, - PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | - PCI_EXP_DEVCTL_NOSNOOP_EN, - PCI_EXP_DEVCTL_READRQ_512B); + PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0); /* Adjust PCIe completion timeout. */ pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); @@ -2452,7 +2860,7 @@ static int tsi721_probe(struct pci_dev *pdev, tsi721_init_sr2pc_mapping(priv); if (tsi721_bdma_maint_init(priv)) { - dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); + tsi_err(&pdev->dev, "BDMA initialization failed"); err = -ENOMEM; goto err_unmap_bars; } @@ -2471,9 +2879,13 @@ static int tsi721_probe(struct pci_dev *pdev, if (err) goto err_free_consistent; + pci_set_drvdata(pdev, priv); + tsi721_interrupts_init(priv); + return 0; err_free_consistent: + tsi721_port_write_free(priv); tsi721_doorbell_free(priv); err_free_bdma: tsi721_bdma_maint_free(priv); @@ -2493,6 +2905,53 @@ err_exit: return err; } +static void tsi721_remove(struct pci_dev *pdev) +{ + struct tsi721_device *priv = pci_get_drvdata(pdev); + + tsi_debug(EXIT, &pdev->dev, "enter"); + + tsi721_disable_ints(priv); + tsi721_free_irq(priv); + flush_scheduled_work(); + rio_unregister_mport(&priv->mport); + + tsi721_unregister_dma(priv); + tsi721_bdma_maint_free(priv); + tsi721_doorbell_free(priv); + tsi721_port_write_free(priv); + tsi721_close_sr2pc_mapping(priv); + + if (priv->regs) + iounmap(priv->regs); + if (priv->odb_base) + iounmap(priv->odb_base); +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) + pci_disable_msix(priv->pdev); + else if (priv->flags & TSI721_USING_MSI) + pci_disable_msi(priv->pdev); +#endif + pci_release_regions(pdev); + pci_clear_master(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + kfree(priv); + tsi_debug(EXIT, &pdev->dev, "exit"); +} + +static void tsi721_shutdown(struct pci_dev *pdev) +{ + struct tsi721_device *priv = pci_get_drvdata(pdev); + + tsi_debug(EXIT, &pdev->dev, "enter"); + + tsi721_disable_ints(priv); + tsi721_dma_stop_all(priv); + pci_clear_master(pdev); + pci_disable_device(pdev); +} + static const struct pci_device_id tsi721_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, { 0, } /* terminate list */ @@ -2504,14 +2963,11 @@ static struct pci_driver tsi721_driver = { .name = "tsi721", .id_table = tsi721_pci_tbl, .probe = tsi721_probe, + .remove = tsi721_remove, + .shutdown = tsi721_shutdown, }; -static int __init tsi721_init(void) -{ - return pci_register_driver(&tsi721_driver); -} - -device_initcall(tsi721_init); +module_pci_driver(tsi721_driver); MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver"); MODULE_AUTHOR("Integrated Device Technology, Inc."); diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index 9d2502543ef6..5456dbddc929 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h @@ -21,6 +21,46 @@ #ifndef __TSI721_H #define __TSI721_H +/* Debug output filtering masks */ +enum { + DBG_NONE = 0, + DBG_INIT = BIT(0), /* driver init */ + DBG_EXIT = BIT(1), /* driver exit */ + DBG_MPORT = BIT(2), /* mport add/remove */ + DBG_MAINT = BIT(3), /* maintenance ops messages */ + DBG_DMA = BIT(4), /* DMA transfer messages */ + DBG_DMAV = BIT(5), /* verbose DMA transfer messages */ + DBG_IBW = BIT(6), /* inbound window */ + DBG_EVENT = BIT(7), /* event handling messages */ + DBG_OBW = BIT(8), /* outbound window messages */ + DBG_DBELL = BIT(9), /* doorbell messages */ + DBG_OMSG = BIT(10), /* doorbell messages */ + DBG_IMSG = BIT(11), /* doorbell messages */ + DBG_ALL = ~0, +}; + +#ifdef DEBUG +extern u32 dbg_level; + +#define tsi_debug(level, dev, fmt, arg...) \ + do { \ + if (DBG_##level & dbg_level) \ + dev_dbg(dev, "%s: " fmt "\n", __func__, ##arg); \ + } while (0) +#else +#define tsi_debug(level, dev, fmt, arg...) \ + no_printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##arg) +#endif + +#define tsi_info(dev, fmt, arg...) \ + dev_info(dev, "%s: " fmt "\n", __func__, ##arg) + +#define tsi_warn(dev, fmt, arg...) \ + dev_warn(dev, "%s: WARNING " fmt "\n", __func__, ##arg) + +#define tsi_err(dev, fmt, arg...) \ + dev_err(dev, "%s: ERROR " fmt "\n", __func__, ##arg) + #define DRV_NAME "tsi721" #define DEFAULT_HOPCOUNT 0xff @@ -674,7 +714,7 @@ struct tsi721_bdma_chan { struct dma_chan dchan; struct tsi721_tx_desc *tx_desc; spinlock_t lock; - struct list_head active_list; + struct tsi721_tx_desc *active_tx; struct list_head queue; struct list_head free_list; struct tasklet_struct tasklet; @@ -808,9 +848,38 @@ struct msix_irq { }; #endif /* CONFIG_PCI_MSI */ +struct tsi721_ib_win_mapping { + struct list_head node; + dma_addr_t lstart; +}; + +struct tsi721_ib_win { + u64 rstart; + u32 size; + dma_addr_t lstart; + bool active; + bool xlat; + struct list_head mappings; +}; + +struct tsi721_obw_bar { + u64 base; + u64 size; + u64 free; +}; + +struct tsi721_ob_win { + u64 base; + u32 size; + u16 destid; + u64 rstart; + bool active; + struct tsi721_obw_bar *pbar; +}; + struct tsi721_device { struct pci_dev *pdev; - struct rio_mport *mport; + struct rio_mport mport; u32 flags; void __iomem *regs; #ifdef CONFIG_PCI_MSI @@ -843,11 +912,25 @@ struct tsi721_device { /* Outbound Messaging */ int omsg_init[TSI721_OMSG_CHNUM]; struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM]; + + /* Inbound Mapping Windows */ + struct tsi721_ib_win ib_win[TSI721_IBWIN_NUM]; + int ibwin_cnt; + + /* Outbound Mapping Windows */ + struct tsi721_obw_bar p2r_bar[2]; + struct tsi721_ob_win ob_win[TSI721_OBWIN_NUM]; + int obwin_cnt; }; #ifdef CONFIG_RAPIDIO_DMA_ENGINE extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan); extern int tsi721_register_dma(struct tsi721_device *priv); +extern void tsi721_unregister_dma(struct tsi721_device *priv); +extern void tsi721_dma_stop_all(struct tsi721_device *priv); +#else +#define tsi721_dma_stop_all(priv) do {} while (0) +#define tsi721_unregister_dma(priv) do {} while (0) #endif #endif diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 47295940a868..155cae1e62de 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -30,6 +30,7 @@ #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/kfifo.h> +#include <linux/sched.h> #include <linux/delay.h> #include "../../dma/dmaengine.h" @@ -63,14 +64,6 @@ struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) return container_of(txd, struct tsi721_tx_desc, txd); } -static inline -struct tsi721_tx_desc *tsi721_dma_first_active( - struct tsi721_bdma_chan *bdma_chan) -{ - return list_first_entry(&bdma_chan->active_list, - struct tsi721_tx_desc, desc_node); -} - static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) { struct tsi721_dma_desc *bd_ptr; @@ -83,7 +76,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); #endif - dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); /* * Allocate space for DMA descriptors @@ -91,7 +84,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) */ bd_ptr = dma_zalloc_coherent(dev, (bd_num + 1) * sizeof(struct tsi721_dma_desc), - &bd_phys, GFP_KERNEL); + &bd_phys, GFP_ATOMIC); if (!bd_ptr) return -ENOMEM; @@ -99,8 +92,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) bdma_chan->bd_phys = bd_phys; bdma_chan->bd_base = bd_ptr; - dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n", - bd_ptr, (unsigned long long)bd_phys); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "DMAC%d descriptors @ %p (phys = %pad)", + bdma_chan->id, bd_ptr, &bd_phys); /* Allocate space for descriptor status FIFO */ sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? @@ -108,7 +102,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) sts_size = roundup_pow_of_two(sts_size); sts_ptr = dma_zalloc_coherent(dev, sts_size * sizeof(struct tsi721_dma_sts), - &sts_phys, GFP_KERNEL); + &sts_phys, GFP_ATOMIC); if (!sts_ptr) { /* Free space allocated for DMA descriptors */ dma_free_coherent(dev, @@ -122,9 +116,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) bdma_chan->sts_base = sts_ptr; bdma_chan->sts_size = sts_size; - dev_dbg(dev, - "desc status FIFO @ %p (phys = %llx) size=0x%x\n", - sts_ptr, (unsigned long long)sts_phys, sts_size); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x", + bdma_chan->id, sts_ptr, &sts_phys, sts_size); /* Initialize DMA descriptors ring using added link descriptor */ bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29); @@ -163,8 +157,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) priv->msix[idx].irq_name, (void *)bdma_chan); if (rc) { - dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n", - bdma_chan->id); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "Unable to get MSI-X for DMAC%d-DONE", + bdma_chan->id); goto err_out; } @@ -174,8 +169,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) priv->msix[idx].irq_name, (void *)bdma_chan); if (rc) { - dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n", - bdma_chan->id); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "Unable to get MSI-X for DMAC%d-INT", + bdma_chan->id); free_irq( priv->msix[TSI721_VECT_DMA0_DONE + bdma_chan->id].vector, @@ -286,7 +282,7 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) /* Disable BDMA channel interrupts */ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); if (bdma_chan->active) - tasklet_schedule(&bdma_chan->tasklet); + tasklet_hi_schedule(&bdma_chan->tasklet); } #ifdef CONFIG_PCI_MSI @@ -301,7 +297,8 @@ static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) { struct tsi721_bdma_chan *bdma_chan = ptr; - tsi721_bdma_handler(bdma_chan); + if (bdma_chan->active) + tasklet_hi_schedule(&bdma_chan->tasklet); return IRQ_HANDLED; } #endif /* CONFIG_PCI_MSI */ @@ -310,20 +307,22 @@ static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) { if (!tsi721_dma_is_idle(bdma_chan)) { - dev_err(bdma_chan->dchan.device->dev, - "BUG: Attempt to start non-idle channel\n"); + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d Attempt to start non-idle channel", + bdma_chan->id); return; } if (bdma_chan->wr_count == bdma_chan->wr_count_next) { - dev_err(bdma_chan->dchan.device->dev, - "BUG: Attempt to start DMA with no BDs ready\n"); + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d Attempt to start DMA with no BDs ready %d", + bdma_chan->id, task_pid_nr(current)); return; } - dev_dbg(bdma_chan->dchan.device->dev, - "%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id, - bdma_chan->wr_count_next); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d", + bdma_chan->id, bdma_chan->wr_count_next, + task_pid_nr(current)); iowrite32(bdma_chan->wr_count_next, bdma_chan->regs + TSI721_DMAC_DWRCNT); @@ -425,10 +424,11 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc) struct tsi721_dma_desc *bd_ptr = NULL; u32 idx, rd_idx; u32 add_count = 0; + struct device *ch_dev = &dchan->dev->device; if (!tsi721_dma_is_idle(bdma_chan)) { - dev_err(bdma_chan->dchan.device->dev, - "BUG: Attempt to use non-idle channel\n"); + tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel", + bdma_chan->id); return -EIO; } @@ -439,7 +439,7 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc) rio_addr = desc->rio_addr; next_addr = -1; bcount = 0; - sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size; + sys_size = dma_to_mport(dchan->device)->sys_size; rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT); rd_idx %= (bdma_chan->bd_num + 1); @@ -451,18 +451,18 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc) add_count++; } - dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n", - __func__, rd_idx, idx); + tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d", + bdma_chan->id, rd_idx, idx); for_each_sg(desc->sg, sg, desc->sg_len, i) { - dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n", - i, desc->sg_len, + tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d", + bdma_chan->id, i, desc->sg_len, (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { - dev_err(dchan->device->dev, - "%s: SG entry %d is too large\n", __func__, i); + tsi_err(ch_dev, "DMAC%d SG entry %d is too large", + bdma_chan->id, i); err = -EINVAL; break; } @@ -479,17 +479,16 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc) } else if (next_addr != -1) { /* Finalize descriptor using total byte count value */ tsi721_desc_fill_end(bd_ptr, bcount, 0); - dev_dbg(dchan->device->dev, - "%s: prev desc final len: %d\n", - __func__, bcount); + tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d", + bdma_chan->id, bcount); } desc->rio_addr = rio_addr; if (i && idx == rd_idx) { - dev_dbg(dchan->device->dev, - "%s: HW descriptor ring is full @ %d\n", - __func__, i); + tsi_debug(DMAV, ch_dev, + "DMAC%d HW descriptor ring is full @ %d", + bdma_chan->id, i); desc->sg = sg; desc->sg_len -= i; break; @@ -498,13 +497,12 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc) bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx]; err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); if (err) { - dev_err(dchan->device->dev, - "Failed to build desc: err=%d\n", err); + tsi_err(ch_dev, "Failed to build desc: err=%d", err); break; } - dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n", - bd_ptr, desc->destid, desc->rio_addr); + tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx", + bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr); next_addr = sg_dma_address(sg); bcount = sg_dma_len(sg); @@ -519,8 +517,9 @@ static int tsi721_submit_sg(struct tsi721_tx_desc *desc) entry_done: if (sg_is_last(sg)) { tsi721_desc_fill_end(bd_ptr, bcount, 0); - dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n", - __func__, bcount); + tsi_debug(DMAV, ch_dev, + "DMAC%d last desc final len: %d", + bdma_chan->id, bcount); desc->sg_len = 0; } else { rio_addr += sg_dma_len(sg); @@ -534,35 +533,43 @@ entry_done: return err; } -static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) +static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan, + struct tsi721_tx_desc *desc) { - struct tsi721_tx_desc *desc; int err; - dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); + + if (!tsi721_dma_is_idle(bdma_chan)) + return; /* - * If there are any new transactions in the queue add them - * into the processing list - */ - if (!list_empty(&bdma_chan->queue)) - list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); + * If there is no data transfer in progress, fetch new descriptor from + * the pending queue. + */ + + if (desc == NULL && bdma_chan->active_tx == NULL && + !list_empty(&bdma_chan->queue)) { + desc = list_first_entry(&bdma_chan->queue, + struct tsi721_tx_desc, desc_node); + list_del_init((&desc->desc_node)); + bdma_chan->active_tx = desc; + } - /* Start new transaction (if available) */ - if (!list_empty(&bdma_chan->active_list)) { - desc = tsi721_dma_first_active(bdma_chan); + if (desc) { err = tsi721_submit_sg(desc); if (!err) tsi721_start_dma(bdma_chan); else { tsi721_dma_tx_err(bdma_chan, desc); - dev_dbg(bdma_chan->dchan.device->dev, - "ERR: tsi721_submit_sg failed with err=%d\n", - err); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, + "DMAC%d ERR: tsi721_submit_sg failed with err=%d", + bdma_chan->id, err); } } - dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit", + bdma_chan->id); } static void tsi721_dma_tasklet(unsigned long data) @@ -571,22 +578,84 @@ static void tsi721_dma_tasklet(unsigned long data) u32 dmac_int, dmac_sts; dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); - dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n", - __func__, bdma_chan->id, dmac_int); + tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x", + bdma_chan->id, dmac_int); /* Clear channel interrupts */ iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); if (dmac_int & TSI721_DMAC_INT_ERR) { + int i = 10000; + struct tsi721_tx_desc *desc; + + desc = bdma_chan->active_tx; dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); - dev_err(bdma_chan->dchan.device->dev, - "%s: DMA ERROR - DMAC%d_STS = 0x%x\n", - __func__, bdma_chan->id, dmac_sts); + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d_STS = 0x%x did=%d raddr=0x%llx", + bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr); + + /* Re-initialize DMA channel if possible */ + + if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0) + goto err_out; + + tsi721_clr_stat(bdma_chan); + + spin_lock(&bdma_chan->lock); + + /* Put DMA channel into init state */ + iowrite32(TSI721_DMAC_CTL_INIT, + bdma_chan->regs + TSI721_DMAC_CTL); + do { + udelay(1); + dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); + i--; + } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i); + + if (dmac_sts & TSI721_DMAC_STS_ABORT) { + tsi_err(&bdma_chan->dchan.dev->device, + "Failed to re-initiate DMAC%d", bdma_chan->id); + spin_unlock(&bdma_chan->lock); + goto err_out; + } + + /* Setup DMA descriptor pointers */ + iowrite32(((u64)bdma_chan->bd_phys >> 32), + bdma_chan->regs + TSI721_DMAC_DPTRH); + iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK), + bdma_chan->regs + TSI721_DMAC_DPTRL); + + /* Setup descriptor status FIFO */ + iowrite32(((u64)bdma_chan->sts_phys >> 32), + bdma_chan->regs + TSI721_DMAC_DSBH); + iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK), + bdma_chan->regs + TSI721_DMAC_DSBL); + iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size), + bdma_chan->regs + TSI721_DMAC_DSSZ); + + /* Clear interrupt bits */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INT); + + ioread32(bdma_chan->regs + TSI721_DMAC_INT); + + bdma_chan->wr_count = bdma_chan->wr_count_next = 0; + bdma_chan->sts_rdptr = 0; + udelay(10); + + desc = bdma_chan->active_tx; + desc->status = DMA_ERROR; + dma_cookie_complete(&desc->txd); + list_add(&desc->desc_node, &bdma_chan->free_list); + bdma_chan->active_tx = NULL; + if (bdma_chan->active) + tsi721_advance_work(bdma_chan, NULL); + spin_unlock(&bdma_chan->lock); } if (dmac_int & TSI721_DMAC_INT_STFULL) { - dev_err(bdma_chan->dchan.device->dev, - "%s: DMAC%d descriptor status FIFO is full\n", - __func__, bdma_chan->id); + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d descriptor status FIFO is full", + bdma_chan->id); } if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { @@ -594,7 +663,7 @@ static void tsi721_dma_tasklet(unsigned long data) tsi721_clr_stat(bdma_chan); spin_lock(&bdma_chan->lock); - desc = tsi721_dma_first_active(bdma_chan); + desc = bdma_chan->active_tx; if (desc->sg_len == 0) { dma_async_tx_callback callback = NULL; @@ -606,17 +675,21 @@ static void tsi721_dma_tasklet(unsigned long data) callback = desc->txd.callback; param = desc->txd.callback_param; } - list_move(&desc->desc_node, &bdma_chan->free_list); + list_add(&desc->desc_node, &bdma_chan->free_list); + bdma_chan->active_tx = NULL; + if (bdma_chan->active) + tsi721_advance_work(bdma_chan, NULL); spin_unlock(&bdma_chan->lock); if (callback) callback(param); - spin_lock(&bdma_chan->lock); + } else { + if (bdma_chan->active) + tsi721_advance_work(bdma_chan, + bdma_chan->active_tx); + spin_unlock(&bdma_chan->lock); } - - tsi721_advance_work(bdma_chan); - spin_unlock(&bdma_chan->lock); } - +err_out: /* Re-Enable BDMA channel interrupts */ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); } @@ -629,8 +702,9 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) /* Check if the descriptor is detached from any lists */ if (!list_empty(&desc->desc_node)) { - dev_err(bdma_chan->dchan.device->dev, - "%s: wrong state of descriptor %p\n", __func__, txd); + tsi_err(&bdma_chan->dchan.dev->device, + "DMAC%d wrong state of descriptor %p", + bdma_chan->id, txd); return -EIO; } @@ -655,25 +729,25 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) struct tsi721_tx_desc *desc = NULL; int i; - dev_dbg(dchan->device->dev, "%s: for channel %d\n", - __func__, bdma_chan->id); + tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); if (bdma_chan->bd_base) return TSI721_DMA_TX_QUEUE_SZ; /* Initialize BDMA channel */ if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { - dev_err(dchan->device->dev, "Unable to initialize data DMA" - " channel %d, aborting\n", bdma_chan->id); + tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d", + bdma_chan->id); return -ENODEV; } /* Allocate queue of transaction descriptors */ desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc), - GFP_KERNEL); + GFP_ATOMIC); if (!desc) { - dev_err(dchan->device->dev, - "Failed to allocate logical descriptors\n"); + tsi_err(&dchan->dev->device, + "DMAC%d Failed to allocate logical descriptors", + bdma_chan->id); tsi721_bdma_ch_free(bdma_chan); return -ENOMEM; } @@ -714,15 +788,11 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); - dev_dbg(dchan->device->dev, "%s: for channel %d\n", - __func__, bdma_chan->id); + tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); if (bdma_chan->bd_base == NULL) return; - BUG_ON(!list_empty(&bdma_chan->active_list)); - BUG_ON(!list_empty(&bdma_chan->queue)); - tsi721_bdma_interrupt_enable(bdma_chan, 0); bdma_chan->active = false; tsi721_sync_dma_irq(bdma_chan); @@ -736,20 +806,26 @@ static enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - return dma_cookie_status(dchan, cookie, txstate); + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + enum dma_status status; + + spin_lock_bh(&bdma_chan->lock); + status = dma_cookie_status(dchan, cookie, txstate); + spin_unlock_bh(&bdma_chan->lock); + return status; } static void tsi721_issue_pending(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); - dev_dbg(dchan->device->dev, "%s: Enter\n", __func__); + tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); + spin_lock_bh(&bdma_chan->lock); if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { - spin_lock_bh(&bdma_chan->lock); - tsi721_advance_work(bdma_chan); - spin_unlock_bh(&bdma_chan->lock); + tsi721_advance_work(bdma_chan, NULL); } + spin_unlock_bh(&bdma_chan->lock); } static @@ -759,18 +835,19 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, void *tinfo) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); - struct tsi721_tx_desc *desc, *_d; + struct tsi721_tx_desc *desc; struct rio_dma_ext *rext = tinfo; enum dma_rtype rtype; struct dma_async_tx_descriptor *txd = NULL; if (!sgl || !sg_len) { - dev_err(dchan->device->dev, "%s: No SG list\n", __func__); - return NULL; + tsi_err(&dchan->dev->device, "DMAC%d No SG list", + bdma_chan->id); + return ERR_PTR(-EINVAL); } - dev_dbg(dchan->device->dev, "%s: %s\n", __func__, - (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); + tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id, + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); if (dir == DMA_DEV_TO_MEM) rtype = NREAD; @@ -788,30 +865,36 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, break; } } else { - dev_err(dchan->device->dev, - "%s: Unsupported DMA direction option\n", __func__); - return NULL; + tsi_err(&dchan->dev->device, + "DMAC%d Unsupported DMA direction option", + bdma_chan->id); + return ERR_PTR(-EINVAL); } spin_lock_bh(&bdma_chan->lock); - list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) { - if (async_tx_test_ack(&desc->txd)) { - list_del_init(&desc->desc_node); - desc->destid = rext->destid; - desc->rio_addr = rext->rio_addr; - desc->rio_addr_u = 0; - desc->rtype = rtype; - desc->sg_len = sg_len; - desc->sg = sgl; - txd = &desc->txd; - txd->flags = flags; - break; - } + if (!list_empty(&bdma_chan->free_list)) { + desc = list_first_entry(&bdma_chan->free_list, + struct tsi721_tx_desc, desc_node); + list_del_init(&desc->desc_node); + desc->destid = rext->destid; + desc->rio_addr = rext->rio_addr; + desc->rio_addr_u = 0; + desc->rtype = rtype; + desc->sg_len = sg_len; + desc->sg = sgl; + txd = &desc->txd; + txd->flags = flags; } spin_unlock_bh(&bdma_chan->lock); + if (!txd) { + tsi_debug(DMA, &dchan->dev->device, + "DMAC%d free TXD is not available", bdma_chan->id); + return ERR_PTR(-EBUSY); + } + return txd; } @@ -819,16 +902,18 @@ static int tsi721_terminate_all(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); struct tsi721_tx_desc *desc, *_d; - u32 dmac_int; LIST_HEAD(list); - dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); + tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); spin_lock_bh(&bdma_chan->lock); bdma_chan->active = false; - if (!tsi721_dma_is_idle(bdma_chan)) { + while (!tsi721_dma_is_idle(bdma_chan)) { + + udelay(5); +#if (0) /* make sure to stop the transfer */ iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL); @@ -837,9 +922,11 @@ static int tsi721_terminate_all(struct dma_chan *dchan) do { dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); +#endif } - list_splice_init(&bdma_chan->active_list, &list); + if (bdma_chan->active_tx) + list_add(&bdma_chan->active_tx->desc_node, &list); list_splice_init(&bdma_chan->queue, &list); list_for_each_entry_safe(desc, _d, &list, desc_node) @@ -850,12 +937,42 @@ static int tsi721_terminate_all(struct dma_chan *dchan) return 0; } +static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan) +{ + if (!bdma_chan->active) + return; + spin_lock_bh(&bdma_chan->lock); + if (!tsi721_dma_is_idle(bdma_chan)) { + int timeout = 100000; + + /* stop the transfer in progress */ + iowrite32(TSI721_DMAC_CTL_SUSP, + bdma_chan->regs + TSI721_DMAC_CTL); + + /* Wait until DMA channel stops */ + while (!tsi721_dma_is_idle(bdma_chan) && --timeout) + udelay(1); + } + + spin_unlock_bh(&bdma_chan->lock); +} + +void tsi721_dma_stop_all(struct tsi721_device *priv) +{ + int i; + + for (i = 0; i < TSI721_DMA_MAXCH; i++) { + if (i != TSI721_DMACH_MAINT) + tsi721_dma_stop(&priv->bdma[i]); + } +} + int tsi721_register_dma(struct tsi721_device *priv) { int i; int nr_channels = 0; int err; - struct rio_mport *mport = priv->mport; + struct rio_mport *mport = &priv->mport; INIT_LIST_HEAD(&mport->dma.channels); @@ -875,7 +992,7 @@ int tsi721_register_dma(struct tsi721_device *priv) spin_lock_init(&bdma_chan->lock); - INIT_LIST_HEAD(&bdma_chan->active_list); + bdma_chan->active_tx = NULL; INIT_LIST_HEAD(&bdma_chan->queue); INIT_LIST_HEAD(&bdma_chan->free_list); @@ -901,7 +1018,33 @@ int tsi721_register_dma(struct tsi721_device *priv) err = dma_async_device_register(&mport->dma); if (err) - dev_err(&priv->pdev->dev, "Failed to register DMA device\n"); + tsi_err(&priv->pdev->dev, "Failed to register DMA device"); return err; } + +void tsi721_unregister_dma(struct tsi721_device *priv) +{ + struct rio_mport *mport = &priv->mport; + struct dma_chan *chan, *_c; + struct tsi721_bdma_chan *bdma_chan; + + tsi721_dma_stop_all(priv); + dma_async_device_unregister(&mport->dma); + + list_for_each_entry_safe(chan, _c, &mport->dma.channels, + device_node) { + bdma_chan = to_tsi721_chan(chan); + if (bdma_chan->active) { + tsi721_bdma_interrupt_enable(bdma_chan, 0); + bdma_chan->active = false; + tsi721_sync_dma_irq(bdma_chan); + tasklet_kill(&bdma_chan->tasklet); + INIT_LIST_HEAD(&bdma_chan->free_list); + kfree(bdma_chan->tx_desc); + tsi721_bdma_ch_free(bdma_chan); + } + + list_del(&chan->device_node); + } +} diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c index f301f059bb85..128350f4d17a 100644 --- a/drivers/rapidio/rio-driver.c +++ b/drivers/rapidio/rio-driver.c @@ -131,6 +131,17 @@ static int rio_device_remove(struct device *dev) return 0; } +static void rio_device_shutdown(struct device *dev) +{ + struct rio_dev *rdev = to_rio_dev(dev); + struct rio_driver *rdrv = rdev->driver; + + dev_dbg(dev, "RIO: %s\n", __func__); + + if (rdrv && rdrv->shutdown) + rdrv->shutdown(rdev); +} + /** * rio_register_driver - register a new RIO driver * @rdrv: the RIO driver structure to register @@ -229,6 +240,7 @@ struct bus_type rio_bus_type = { .bus_groups = rio_bus_groups, .probe = rio_device_probe, .remove = rio_device_remove, + .shutdown = rio_device_shutdown, .uevent = rio_uevent, }; diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index d6a126c17c03..a63a380809d1 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -39,6 +39,13 @@ static void rio_init_em(struct rio_dev *rdev); +struct rio_id_table { + u16 start; /* logical minimal id */ + u32 max; /* max number of IDs in table */ + spinlock_t lock; + unsigned long table[0]; +}; + static int next_destid = 0; static int next_comptag = 1; @@ -62,7 +69,7 @@ static int rio_mport_phys_table[] = { static u16 rio_destid_alloc(struct rio_net *net) { int destid; - struct rio_id_table *idtab = &net->destid_table; + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; spin_lock(&idtab->lock); destid = find_first_zero_bit(idtab->table, idtab->max); @@ -88,7 +95,7 @@ static u16 rio_destid_alloc(struct rio_net *net) static int rio_destid_reserve(struct rio_net *net, u16 destid) { int oldbit; - struct rio_id_table *idtab = &net->destid_table; + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; destid -= idtab->start; spin_lock(&idtab->lock); @@ -106,7 +113,7 @@ static int rio_destid_reserve(struct rio_net *net, u16 destid) */ static void rio_destid_free(struct rio_net *net, u16 destid) { - struct rio_id_table *idtab = &net->destid_table; + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; destid -= idtab->start; spin_lock(&idtab->lock); @@ -121,7 +128,7 @@ static void rio_destid_free(struct rio_net *net, u16 destid) static u16 rio_destid_first(struct rio_net *net) { int destid; - struct rio_id_table *idtab = &net->destid_table; + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; spin_lock(&idtab->lock); destid = find_first_bit(idtab->table, idtab->max); @@ -141,7 +148,7 @@ static u16 rio_destid_first(struct rio_net *net) static u16 rio_destid_next(struct rio_net *net, u16 from) { int destid; - struct rio_id_table *idtab = &net->destid_table; + struct rio_id_table *idtab = (struct rio_id_table *)net->enum_data; spin_lock(&idtab->lock); destid = find_next_bit(idtab->table, idtab->max, from); @@ -187,19 +194,6 @@ static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u } /** - * rio_local_set_device_id - Set the base/extended device id for a port - * @port: RIO master port - * @did: Device ID value to be written - * - * Writes the base/extended device id from a device. - */ -static void rio_local_set_device_id(struct rio_mport *port, u16 did) -{ - rio_local_write_config_32(port, RIO_DID_CSR, RIO_SET_DID(port->sys_size, - did)); -} - -/** * rio_clear_locks- Release all host locks and signal enumeration complete * @net: RIO network to run on * @@ -449,9 +443,6 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, if (do_enum) rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0); - - list_add_tail(&rswitch->node, &net->switches); - } else { if (do_enum) /*Enable Input Output Port (transmitter reviever)*/ @@ -461,13 +452,9 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, rdev->comp_tag & RIO_CTAG_UDEVID); } - rdev->dev.parent = &port->dev; + rdev->dev.parent = &net->dev; rio_attach_device(rdev); - - device_initialize(&rdev->dev); rdev->dev.release = rio_release_dev; - rio_dev_get(rdev); - rdev->dma_mask = DMA_BIT_MASK(32); rdev->dev.dma_mask = &rdev->dma_mask; rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); @@ -480,6 +467,8 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, if (ret) goto cleanup; + rio_dev_get(rdev); + return rdev; cleanup: @@ -621,8 +610,6 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, rdev = rio_setup_device(net, port, RIO_ANY_DESTID(port->sys_size), hopcount, 1); if (rdev) { - /* Add device to the global and bus/net specific list. */ - list_add_tail(&rdev->net_list, &net->devices); rdev->prev = prev; if (prev && rio_is_switch(prev)) prev->rswitch->nextdev[prev_port] = rdev; @@ -778,8 +765,6 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, /* Setup new RIO device */ if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) { - /* Add device to the global and bus/net specific list. */ - list_add_tail(&rdev->net_list, &net->devices); rdev->prev = prev; if (prev && rio_is_switch(prev)) prev->rswitch->nextdev[prev_port] = rdev; @@ -864,50 +849,71 @@ static int rio_mport_is_active(struct rio_mport *port) return result & RIO_PORT_N_ERR_STS_PORT_OK; } -/** - * rio_alloc_net- Allocate and configure a new RIO network - * @port: Master port associated with the RIO network +static void rio_scan_release_net(struct rio_net *net) +{ + pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id); + kfree(net->enum_data); +} + +static void rio_scan_release_dev(struct device *dev) +{ + struct rio_net *net; + + net = to_rio_net(dev); + pr_debug("RIO-SCAN: %s: net_%d\n", __func__, net->id); + kfree(net); +} + +/* + * rio_scan_alloc_net - Allocate and configure a new RIO network + * @mport: Master port associated with the RIO network * @do_enum: Enumeration/Discovery mode flag * @start: logical minimal start id for new net * - * Allocates a RIO network structure, initializes per-network - * list heads, and adds the associated master port to the - * network list of associated master ports. Returns a - * RIO network pointer on success or %NULL on failure. + * Allocates a new RIO network structure and initializes enumerator-specific + * part of it (if required). + * Returns a RIO network pointer on success or %NULL on failure. */ -static struct rio_net *rio_alloc_net(struct rio_mport *port, - int do_enum, u16 start) +static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport, + int do_enum, u16 start) { struct rio_net *net; - net = kzalloc(sizeof(struct rio_net), GFP_KERNEL); + net = rio_alloc_net(mport); + if (net && do_enum) { - net->destid_table.table = kcalloc( - BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)), - sizeof(long), - GFP_KERNEL); + struct rio_id_table *idtab; + size_t size; + + size = sizeof(struct rio_id_table) + + BITS_TO_LONGS( + RIO_MAX_ROUTE_ENTRIES(mport->sys_size) + ) * sizeof(long); + + idtab = kzalloc(size, GFP_KERNEL); - if (net->destid_table.table == NULL) { + if (idtab == NULL) { pr_err("RIO: failed to allocate destID table\n"); - kfree(net); + rio_free_net(net); net = NULL; } else { - net->destid_table.start = start; - net->destid_table.max = - RIO_MAX_ROUTE_ENTRIES(port->sys_size); - spin_lock_init(&net->destid_table.lock); + net->enum_data = idtab; + net->release = rio_scan_release_net; + idtab->start = start; + idtab->max = RIO_MAX_ROUTE_ENTRIES(mport->sys_size); + spin_lock_init(&idtab->lock); } } if (net) { - INIT_LIST_HEAD(&net->node); - INIT_LIST_HEAD(&net->devices); - INIT_LIST_HEAD(&net->switches); - INIT_LIST_HEAD(&net->mports); - list_add_tail(&port->nnode, &net->mports); - net->hport = port; - net->id = port->id; + net->id = mport->id; + net->hport = mport; + dev_set_name(&net->dev, "rnet_%d", net->id); + net->dev.parent = &mport->dev; + net->dev.release = rio_scan_release_dev; + rio_add_net(net); } + return net; } @@ -968,17 +974,6 @@ static void rio_init_em(struct rio_dev *rdev) } /** - * rio_pw_enable - Enables/disables port-write handling by a master port - * @port: Master port associated with port-write handling - * @enable: 1=enable, 0=disable - */ -static void rio_pw_enable(struct rio_mport *port, int enable) -{ - if (port->ops->pwenable) - port->ops->pwenable(port, enable); -} - -/** * rio_enum_mport- Start enumeration through a master port * @mport: Master port to send transactions * @flags: Enumeration control flags @@ -1016,7 +1011,7 @@ static int rio_enum_mport(struct rio_mport *mport, u32 flags) /* If master port has an active link, allocate net and enum peers */ if (rio_mport_is_active(mport)) { - net = rio_alloc_net(mport, 1, 0); + net = rio_scan_alloc_net(mport, 1, 0); if (!net) { printk(KERN_ERR "RIO: failed to allocate new net\n"); rc = -ENOMEM; @@ -1133,7 +1128,7 @@ static int rio_disc_mport(struct rio_mport *mport, u32 flags) enum_done: pr_debug("RIO: ... enumeration done\n"); - net = rio_alloc_net(mport, 0, 0); + net = rio_scan_alloc_net(mport, 0, 0); if (!net) { printk(KERN_ERR "RIO: Failed to allocate new net\n"); goto bail; diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index e220edc85c68..0dcaa660cba1 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -30,6 +30,20 @@ #include "rio.h" +/* + * struct rio_pwrite - RIO portwrite event + * @node: Node in list of doorbell events + * @pwcback: Doorbell event callback + * @context: Handler specific context to pass on event + */ +struct rio_pwrite { + struct list_head node; + + int (*pwcback)(struct rio_mport *mport, void *context, + union rio_pw_msg *msg, int step); + void *context; +}; + MODULE_DESCRIPTION("RapidIO Subsystem Core"); MODULE_AUTHOR("Matt Porter <mporter@kernel.crashing.org>"); MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>"); @@ -42,6 +56,7 @@ MODULE_PARM_DESC(hdid, "Destination ID assignment to local RapidIO controllers"); static LIST_HEAD(rio_devices); +static LIST_HEAD(rio_nets); static DEFINE_SPINLOCK(rio_global_list_lock); static LIST_HEAD(rio_mports); @@ -68,6 +83,89 @@ u16 rio_local_get_device_id(struct rio_mport *port) } /** + * rio_query_mport - Query mport device attributes + * @port: mport device to query + * @mport_attr: mport attributes data structure + * + * Returns attributes of specified mport through the + * pointer to attributes data structure. + */ +int rio_query_mport(struct rio_mport *port, + struct rio_mport_attr *mport_attr) +{ + if (!port->ops->query_mport) + return -ENODATA; + return port->ops->query_mport(port, mport_attr); +} +EXPORT_SYMBOL(rio_query_mport); + +/** + * rio_alloc_net- Allocate and initialize a new RIO network data structure + * @mport: Master port associated with the RIO network + * + * Allocates a RIO network structure, initializes per-network + * list heads, and adds the associated master port to the + * network list of associated master ports. Returns a + * RIO network pointer on success or %NULL on failure. + */ +struct rio_net *rio_alloc_net(struct rio_mport *mport) +{ + struct rio_net *net; + + net = kzalloc(sizeof(struct rio_net), GFP_KERNEL); + if (net) { + INIT_LIST_HEAD(&net->node); + INIT_LIST_HEAD(&net->devices); + INIT_LIST_HEAD(&net->switches); + INIT_LIST_HEAD(&net->mports); + mport->net = net; + } + return net; +} +EXPORT_SYMBOL_GPL(rio_alloc_net); + +int rio_add_net(struct rio_net *net) +{ + int err; + + err = device_register(&net->dev); + if (err) + return err; + spin_lock(&rio_global_list_lock); + list_add_tail(&net->node, &rio_nets); + spin_unlock(&rio_global_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_add_net); + +void rio_free_net(struct rio_net *net) +{ + spin_lock(&rio_global_list_lock); + if (!list_empty(&net->node)) + list_del(&net->node); + spin_unlock(&rio_global_list_lock); + if (net->release) + net->release(net); + device_unregister(&net->dev); +} +EXPORT_SYMBOL_GPL(rio_free_net); + +/** + * rio_local_set_device_id - Set the base/extended device id for a port + * @port: RIO master port + * @did: Device ID value to be written + * + * Writes the base/extended device id from a device. + */ +void rio_local_set_device_id(struct rio_mport *port, u16 did) +{ + rio_local_write_config_32(port, RIO_DID_CSR, + RIO_SET_DID(port->sys_size, did)); +} +EXPORT_SYMBOL_GPL(rio_local_set_device_id); + +/** * rio_add_device- Adds a RIO device to the device model * @rdev: RIO device * @@ -79,12 +177,19 @@ int rio_add_device(struct rio_dev *rdev) { int err; - err = device_add(&rdev->dev); + atomic_set(&rdev->state, RIO_DEVICE_RUNNING); + err = device_register(&rdev->dev); if (err) return err; spin_lock(&rio_global_list_lock); list_add_tail(&rdev->global_list, &rio_devices); + if (rdev->net) { + list_add_tail(&rdev->net_list, &rdev->net->devices); + if (rdev->pef & RIO_PEF_SWITCH) + list_add_tail(&rdev->rswitch->node, + &rdev->net->switches); + } spin_unlock(&rio_global_list_lock); rio_create_sysfs_dev_files(rdev); @@ -93,6 +198,33 @@ int rio_add_device(struct rio_dev *rdev) } EXPORT_SYMBOL_GPL(rio_add_device); +/* + * rio_del_device - removes a RIO device from the device model + * @rdev: RIO device + * @state: device state to set during removal process + * + * Removes the RIO device to the kernel device list and subsystem's device list. + * Clears sysfs entries for the removed device. + */ +void rio_del_device(struct rio_dev *rdev, enum rio_device_state state) +{ + pr_debug("RIO: %s: removing %s\n", __func__, rio_name(rdev)); + atomic_set(&rdev->state, state); + spin_lock(&rio_global_list_lock); + list_del(&rdev->global_list); + if (rdev->net) { + list_del(&rdev->net_list); + if (rdev->pef & RIO_PEF_SWITCH) { + list_del(&rdev->rswitch->node); + kfree(rdev->rswitch->route_table); + } + } + spin_unlock(&rio_global_list_lock); + rio_remove_sysfs_dev_files(rdev); + device_unregister(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rio_del_device); + /** * rio_request_inb_mbox - request inbound mailbox service * @mport: RIO master port from which to allocate the mailbox resource @@ -258,7 +390,9 @@ rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res, dbell->dinb = dinb; dbell->dev_id = dev_id; + mutex_lock(&mport->lock); list_add_tail(&dbell->node, &mport->dbells); + mutex_unlock(&mport->lock); out: return rc; @@ -322,12 +456,15 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) int rc = 0, found = 0; struct rio_dbell *dbell; + mutex_lock(&mport->lock); list_for_each_entry(dbell, &mport->dbells, node) { if ((dbell->res->start == start) && (dbell->res->end == end)) { + list_del(&dbell->node); found = 1; break; } } + mutex_unlock(&mport->lock); /* If we can't find an exact match, fail */ if (!found) { @@ -335,9 +472,6 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) goto out; } - /* Delete from list */ - list_del(&dbell->node); - /* Release the doorbell resource */ rc = release_resource(dbell->res); @@ -394,7 +528,71 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) } /** - * rio_request_inb_pwrite - request inbound port-write message service + * rio_add_mport_pw_handler - add port-write message handler into the list + * of mport specific pw handlers + * @mport: RIO master port to bind the portwrite callback + * @context: Handler specific context to pass on event + * @pwcback: Callback to execute when portwrite is received + * + * Returns 0 if the request has been satisfied. + */ +int rio_add_mport_pw_handler(struct rio_mport *mport, void *context, + int (*pwcback)(struct rio_mport *mport, + void *context, union rio_pw_msg *msg, int step)) +{ + int rc = 0; + struct rio_pwrite *pwrite; + + pwrite = kzalloc(sizeof(struct rio_pwrite), GFP_KERNEL); + if (!pwrite) { + rc = -ENOMEM; + goto out; + } + + pwrite->pwcback = pwcback; + pwrite->context = context; + mutex_lock(&mport->lock); + list_add_tail(&pwrite->node, &mport->pwrites); + mutex_unlock(&mport->lock); +out: + return rc; +} +EXPORT_SYMBOL_GPL(rio_add_mport_pw_handler); + +/** + * rio_del_mport_pw_handler - remove port-write message handler from the list + * of mport specific pw handlers + * @mport: RIO master port to bind the portwrite callback + * @context: Registered handler specific context to pass on event + * @pwcback: Registered callback function + * + * Returns 0 if the request has been satisfied. + */ +int rio_del_mport_pw_handler(struct rio_mport *mport, void *context, + int (*pwcback)(struct rio_mport *mport, + void *context, union rio_pw_msg *msg, int step)) +{ + int rc = -EINVAL; + struct rio_pwrite *pwrite; + + mutex_lock(&mport->lock); + list_for_each_entry(pwrite, &mport->pwrites, node) { + if (pwrite->pwcback == pwcback && pwrite->context == context) { + list_del(&pwrite->node); + kfree(pwrite); + rc = 0; + break; + } + } + mutex_unlock(&mport->lock); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_del_mport_pw_handler); + +/** + * rio_request_inb_pwrite - request inbound port-write message service for + * specific RapidIO device * @rdev: RIO device to which register inbound port-write callback routine * @pwcback: Callback routine to execute when port-write is received * @@ -419,6 +617,7 @@ EXPORT_SYMBOL_GPL(rio_request_inb_pwrite); /** * rio_release_inb_pwrite - release inbound port-write message service + * associated with specific RapidIO device * @rdev: RIO device which registered for inbound port-write callback * * Removes callback from the rio_dev structure. Returns 0 if the request @@ -440,6 +639,24 @@ int rio_release_inb_pwrite(struct rio_dev *rdev) EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); /** + * rio_pw_enable - Enables/disables port-write handling by a master port + * @mport: Master port associated with port-write handling + * @enable: 1=enable, 0=disable + */ +void rio_pw_enable(struct rio_mport *mport, int enable) +{ + if (mport->ops->pwenable) { + mutex_lock(&mport->lock); + + if ((enable && ++mport->pwe_refcnt == 1) || + (!enable && mport->pwe_refcnt && --mport->pwe_refcnt == 0)) + mport->ops->pwenable(mport, enable); + mutex_unlock(&mport->lock); + } +} +EXPORT_SYMBOL_GPL(rio_pw_enable); + +/** * rio_map_inb_region -- Map inbound memory region. * @mport: Master port. * @local: physical address of memory region to be mapped @@ -483,6 +700,56 @@ void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart) EXPORT_SYMBOL_GPL(rio_unmap_inb_region); /** + * rio_map_outb_region -- Map outbound memory region. + * @mport: Master port. + * @destid: destination id window points to + * @rbase: RIO base address window translates to + * @size: Size of the memory region + * @rflags: Flags for mapping. + * @local: physical address of memory region mapped + * + * Return: 0 -- Success. + * + * This function will create the mapping from RIO space to local memory. + */ +int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase, + u32 size, u32 rflags, dma_addr_t *local) +{ + int rc = 0; + unsigned long flags; + + if (!mport->ops->map_outb) + return -ENODEV; + + spin_lock_irqsave(&rio_mmap_lock, flags); + rc = mport->ops->map_outb(mport, destid, rbase, size, + rflags, local); + spin_unlock_irqrestore(&rio_mmap_lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_map_outb_region); + +/** + * rio_unmap_inb_region -- Unmap the inbound memory region + * @mport: Master port + * @destid: destination id mapping points to + * @rstart: RIO base address window translates to + */ +void rio_unmap_outb_region(struct rio_mport *mport, u16 destid, u64 rstart) +{ + unsigned long flags; + + if (!mport->ops->unmap_outb) + return; + + spin_lock_irqsave(&rio_mmap_lock, flags); + mport->ops->unmap_outb(mport, destid, rstart); + spin_unlock_irqrestore(&rio_mmap_lock, flags); +} +EXPORT_SYMBOL_GPL(rio_unmap_outb_region); + +/** * rio_mport_get_physefb - Helper function that returns register offset * for Physical Layer Extended Features Block. * @port: Master port to issue transaction @@ -864,52 +1131,66 @@ rd_err: } /** - * rio_inb_pwrite_handler - process inbound port-write message + * rio_inb_pwrite_handler - inbound port-write message handler + * @mport: mport device associated with port-write * @pw_msg: pointer to inbound port-write message * * Processes an inbound port-write message. Returns 0 if the request * has been satisfied. */ -int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) +int rio_inb_pwrite_handler(struct rio_mport *mport, union rio_pw_msg *pw_msg) { struct rio_dev *rdev; u32 err_status, em_perrdet, em_ltlerrdet; int rc, portnum; - - rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL); - if (rdev == NULL) { - /* Device removed or enumeration error */ - pr_debug("RIO: %s No matching device for CTag 0x%08x\n", - __func__, pw_msg->em.comptag); - return -EIO; - } - - pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev)); + struct rio_pwrite *pwrite; #ifdef DEBUG_PW { - u32 i; - for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) { + u32 i; + + pr_debug("%s: PW to mport_%d:\n", __func__, mport->id); + for (i = 0; i < RIO_PW_MSG_SIZE / sizeof(u32); i = i + 4) { pr_debug("0x%02x: %08x %08x %08x %08x\n", - i*4, pw_msg->raw[i], pw_msg->raw[i + 1], - pw_msg->raw[i + 2], pw_msg->raw[i + 3]); - i += 4; - } + i * 4, pw_msg->raw[i], pw_msg->raw[i + 1], + pw_msg->raw[i + 2], pw_msg->raw[i + 3]); + } } #endif - /* Call an external service function (if such is registered - * for this device). This may be the service for endpoints that send - * device-specific port-write messages. End-point messages expected - * to be handled completely by EP specific device driver. + rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL); + if (rdev) { + pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev)); + } else { + pr_debug("RIO: %s No matching device for CTag 0x%08x\n", + __func__, pw_msg->em.comptag); + } + + /* Call a device-specific handler (if it is registered for the device). + * This may be the service for endpoints that send device-specific + * port-write messages. End-point messages expected to be handled + * completely by EP specific device driver. * For switches rc==0 signals that no standard processing required. */ - if (rdev->pwcback != NULL) { + if (rdev && rdev->pwcback) { rc = rdev->pwcback(rdev, pw_msg, 0); if (rc == 0) return 0; } + mutex_lock(&mport->lock); + list_for_each_entry(pwrite, &mport->pwrites, node) + pwrite->pwcback(mport, pwrite->context, pw_msg, 0); + mutex_unlock(&mport->lock); + + if (!rdev) + return 0; + + /* + * FIXME: The code below stays as it was before for now until we decide + * how to do default PW handling in combination with per-mport callbacks + */ + portnum = pw_msg->em.is_port & 0xFF; /* Check if device and route to it are functional: @@ -1909,32 +2190,31 @@ static int rio_get_hdid(int index) return hdid[index]; } -int rio_register_mport(struct rio_mport *port) +int rio_mport_initialize(struct rio_mport *mport) { - struct rio_scan_node *scan = NULL; - int res = 0; - if (next_portid >= RIO_MAX_MPORTS) { pr_err("RIO: reached specified max number of mports\n"); - return 1; + return -ENODEV; } - port->id = next_portid++; - port->host_deviceid = rio_get_hdid(port->id); - port->nscan = NULL; + atomic_set(&mport->state, RIO_DEVICE_INITIALIZING); + mport->id = next_portid++; + mport->host_deviceid = rio_get_hdid(mport->id); + mport->nscan = NULL; + mutex_init(&mport->lock); + mport->pwe_refcnt = 0; + INIT_LIST_HEAD(&mport->pwrites); - dev_set_name(&port->dev, "rapidio%d", port->id); - port->dev.class = &rio_mport_class; + return 0; +} +EXPORT_SYMBOL_GPL(rio_mport_initialize); - res = device_register(&port->dev); - if (res) - dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", - port->id, res); - else - dev_dbg(&port->dev, "RIO: mport%d registered\n", port->id); +int rio_register_mport(struct rio_mport *port) +{ + struct rio_scan_node *scan = NULL; + int res = 0; mutex_lock(&rio_mport_list_lock); - list_add_tail(&port->node, &rio_mports); /* * Check if there are any registered enumeration/discovery operations @@ -1948,12 +2228,73 @@ int rio_register_mport(struct rio_mport *port) break; } } + + list_add_tail(&port->node, &rio_mports); mutex_unlock(&rio_mport_list_lock); + dev_set_name(&port->dev, "rapidio%d", port->id); + port->dev.class = &rio_mport_class; + atomic_set(&port->state, RIO_DEVICE_RUNNING); + + res = device_register(&port->dev); + if (res) + dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", + port->id, res); + else + dev_dbg(&port->dev, "RIO: registered mport%d\n", port->id); + + return res; +} +EXPORT_SYMBOL_GPL(rio_register_mport); + +static int rio_mport_cleanup_callback(struct device *dev, void *data) +{ + struct rio_dev *rdev = to_rio_dev(dev); + + if (dev->bus == &rio_bus_type) + rio_del_device(rdev, RIO_DEVICE_SHUTDOWN); + return 0; +} + +static int rio_net_remove_children(struct rio_net *net) +{ + /* + * Unregister all RapidIO devices residing on this net (this will + * invoke notification of registered subsystem interfaces as well). + */ + device_for_each_child(&net->dev, NULL, rio_mport_cleanup_callback); + return 0; +} + +int rio_unregister_mport(struct rio_mport *port) +{ pr_debug("RIO: %s %s id=%d\n", __func__, port->name, port->id); + + /* Transition mport to the SHUTDOWN state */ + if (atomic_cmpxchg(&port->state, + RIO_DEVICE_RUNNING, + RIO_DEVICE_SHUTDOWN) != RIO_DEVICE_RUNNING) { + pr_err("RIO: %s unexpected state transition for mport %s\n", + __func__, port->name); + } + + if (port->net && port->net->hport == port) { + rio_net_remove_children(port->net); + rio_free_net(port->net); + } + + /* + * Unregister all RapidIO devices attached to this mport (this will + * invoke notification of registered subsystem interfaces as well). + */ + mutex_lock(&rio_mport_list_lock); + list_del(&port->node); + mutex_unlock(&rio_mport_list_lock); + device_unregister(&port->dev); + return 0; } -EXPORT_SYMBOL_GPL(rio_register_mport); +EXPORT_SYMBOL_GPL(rio_unregister_mport); EXPORT_SYMBOL_GPL(rio_local_get_device_id); EXPORT_SYMBOL_GPL(rio_get_device); diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h index 2d0550e08ea2..625d09add001 100644 --- a/drivers/rapidio/rio.h +++ b/drivers/rapidio/rio.h @@ -28,6 +28,7 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount); extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); +extern void rio_remove_sysfs_dev_files(struct rio_dev *rdev); extern int rio_lock_device(struct rio_mport *port, u16 destid, u8 hopcount, int wait_ms); extern int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount); @@ -38,7 +39,11 @@ extern int rio_route_get_entry(struct rio_dev *rdev, u16 table, extern int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock); extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); +extern struct rio_net *rio_alloc_net(struct rio_mport *mport); +extern int rio_add_net(struct rio_net *net); +extern void rio_free_net(struct rio_net *net); extern int rio_add_device(struct rio_dev *rdev); +extern void rio_del_device(struct rio_dev *rdev, enum rio_device_state state); extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, u8 hopcount, u8 port_num); extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 973f5cdec192..3e1572cb457b 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -657,7 +657,7 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi) #if BITS_PER_LONG == 32 return 1; #elif defined(CONFIG_COMPAT) - return unlikely(is_compat_task() || (sbi->ll_flags & LL_SBI_32BIT_API)); + return unlikely(in_compat_syscall() || (sbi->ll_flags & LL_SBI_32BIT_API)); #else return unlikely(sbi->ll_flags & LL_SBI_32BIT_API); #endif diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 8fc284cdce4e..8f89bd8a826a 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -484,7 +484,7 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) static int die_nmi_called; if (!hpwdt_nmi_decoding) - goto out; + return NMI_DONE; spin_lock_irqsave(&rom_lock, rom_pl); if (!die_nmi_called && !is_icru && !is_uefi) @@ -497,11 +497,11 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) if (!is_icru && !is_uefi) { if (cmn_regs.u1.ral == 0) { - panic("An NMI occurred, " - "but unable to determine source.\n"); + nmi_panic(regs, "An NMI occurred, but unable to determine source.\n"); + return NMI_HANDLED; } } - panic("An NMI occurred. Depending on your system the reason " + nmi_panic(regs, "An NMI occurred. Depending on your system the reason " "for the NMI is logged in any one of the following " "resources:\n" "1. Integrated Management Log (IML)\n" @@ -509,8 +509,7 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) "3. OA Forward Progress Log\n" "4. iLO Event Log"); -out: - return NMI_DONE; + return NMI_HANDLED; } #endif /* CONFIG_HPWDT_NMI_DECODING */ |