aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/ibm/ibmveth.c
diff options
context:
space:
mode:
authorLinus Torvalds2017-07-06 19:20:54 -0700
committerLinus Torvalds2017-07-06 19:20:54 -0700
commitf72e24a1240b78f421649c4d88f5c24ab1c896a1 (patch)
tree90bed3bf33ae0abf5636dafcc3eda3cc354612b0 /drivers/net/ethernet/ibm/ibmveth.c
parent2c669275dc3245e2866a0eea15bda8ec8d1ab8db (diff)
parent1655cf8829d82d367d8fdb5cb58e5885d7d2a391 (diff)
Merge tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping infrastructure from Christoph Hellwig: "This is the first pull request for the new dma-mapping subsystem In this new subsystem we'll try to properly maintain all the generic code related to dma-mapping, and will further consolidate arch code into common helpers. This pull request contains: - removal of the DMA_ERROR_CODE macro, replacing it with calls to ->mapping_error so that the dma_map_ops instances are more self contained and can be shared across architectures (me) - removal of the ->set_dma_mask method, which duplicates the ->dma_capable one in terms of functionality, but requires more duplicate code. - various updates for the coherent dma pool and related arm code (Vladimir) - various smaller cleanups (me)" * tag 'dma-mapping-4.13' of git://git.infradead.org/users/hch/dma-mapping: (56 commits) ARM: dma-mapping: Remove traces of NOMMU code ARM: NOMMU: Set ARM_DMA_MEM_BUFFERABLE for M-class cpus ARM: NOMMU: Introduce dma operations for noMMU drivers: dma-mapping: allow dma_common_mmap() for NOMMU drivers: dma-coherent: Introduce default DMA pool drivers: dma-coherent: Account dma_pfn_offset when used with device tree dma: Take into account dma_pfn_offset dma-mapping: replace dmam_alloc_noncoherent with dmam_alloc_attrs dma-mapping: remove dmam_free_noncoherent crypto: qat - avoid an uninitialized variable warning au1100fb: remove a bogus dma_free_nonconsistent call MAINTAINERS: add entry for dma mapping helpers powerpc: merge __dma_set_mask into dma_set_mask dma-mapping: remove the set_dma_mask method powerpc/cell: use the dma_supported method for ops switching powerpc/cell: clean up fixed mapping dma_ops initialization tile: remove dma_supported and mapping_error methods xen-swiotlb: remove xen_swiotlb_set_dma_mask arm: implement ->dma_supported instead of ->set_dma_mask mips/loongson64: implement ->dma_supported instead of ->set_dma_mask ...
Diffstat (limited to 'drivers/net/ethernet/ibm/ibmveth.c')
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c159
1 files changed, 74 insertions, 85 deletions
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 3e0a695537e2..d17c2b03f580 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -469,56 +469,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
}
}
-static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
-{
- int i;
- struct device *dev = &adapter->vdev->dev;
-
- if (adapter->buffer_list_addr != NULL) {
- if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
- dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
- DMA_BIDIRECTIONAL);
- adapter->buffer_list_dma = DMA_ERROR_CODE;
- }
- free_page((unsigned long)adapter->buffer_list_addr);
- adapter->buffer_list_addr = NULL;
- }
-
- if (adapter->filter_list_addr != NULL) {
- if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
- dma_unmap_single(dev, adapter->filter_list_dma, 4096,
- DMA_BIDIRECTIONAL);
- adapter->filter_list_dma = DMA_ERROR_CODE;
- }
- free_page((unsigned long)adapter->filter_list_addr);
- adapter->filter_list_addr = NULL;
- }
-
- if (adapter->rx_queue.queue_addr != NULL) {
- dma_free_coherent(dev, adapter->rx_queue.queue_len,
- adapter->rx_queue.queue_addr,
- adapter->rx_queue.queue_dma);
- adapter->rx_queue.queue_addr = NULL;
- }
-
- for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
- if (adapter->rx_buff_pool[i].active)
- ibmveth_free_buffer_pool(adapter,
- &adapter->rx_buff_pool[i]);
-
- if (adapter->bounce_buffer != NULL) {
- if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- dma_unmap_single(&adapter->vdev->dev,
- adapter->bounce_buffer_dma,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- DMA_BIDIRECTIONAL);
- adapter->bounce_buffer_dma = DMA_ERROR_CODE;
- }
- kfree(adapter->bounce_buffer);
- adapter->bounce_buffer = NULL;
- }
-}
-
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address)
{
@@ -575,14 +525,17 @@ static int ibmveth_open(struct net_device *netdev)
for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
rxq_entries += adapter->rx_buff_pool[i].size;
+ rc = -ENOMEM;
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
- adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ if (!adapter->buffer_list_addr) {
+ netdev_err(netdev, "unable to allocate list pages\n");
+ goto out;
+ }
- if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
- netdev_err(netdev, "unable to allocate filter or buffer list "
- "pages\n");
- rc = -ENOMEM;
- goto err_out;
+ adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
+ if (!adapter->filter_list_addr) {
+ netdev_err(netdev, "unable to allocate filter pages\n");
+ goto out_free_buffer_list;
}
dev = &adapter->vdev->dev;
@@ -592,22 +545,21 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.queue_addr =
dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
&adapter->rx_queue.queue_dma, GFP_KERNEL);
- if (!adapter->rx_queue.queue_addr) {
- rc = -ENOMEM;
- goto err_out;
- }
+ if (!adapter->rx_queue.queue_addr)
+ goto out_free_filter_list;
adapter->buffer_list_dma = dma_map_single(dev,
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
+ netdev_err(netdev, "unable to map buffer list pages\n");
+ goto out_free_queue_mem;
+ }
+
adapter->filter_list_dma = dma_map_single(dev,
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
-
- if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
- (dma_mapping_error(dev, adapter->filter_list_dma))) {
- netdev_err(netdev, "unable to map filter or buffer list "
- "pages\n");
- rc = -ENOMEM;
- goto err_out;
+ if (dma_mapping_error(dev, adapter->filter_list_dma)) {
+ netdev_err(netdev, "unable to map filter list pages\n");
+ goto out_unmap_buffer_list;
}
adapter->rx_queue.index = 0;
@@ -638,7 +590,7 @@ static int ibmveth_open(struct net_device *netdev)
rxq_desc.desc,
mac_address);
rc = -ENONET;
- goto err_out;
+ goto out_unmap_filter_list;
}
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
@@ -648,7 +600,7 @@ static int ibmveth_open(struct net_device *netdev)
netdev_err(netdev, "unable to alloc pool\n");
adapter->rx_buff_pool[i].active = 0;
rc = -ENOMEM;
- goto err_out;
+ goto out_free_buffer_pools;
}
}
@@ -662,22 +614,21 @@ static int ibmveth_open(struct net_device *netdev)
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
- goto err_out;
+ goto out_free_buffer_pools;
}
+ rc = -ENOMEM;
adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
- if (!adapter->bounce_buffer) {
- rc = -ENOMEM;
- goto err_out_free_irq;
- }
+ if (!adapter->bounce_buffer)
+ goto out_free_irq;
+
adapter->bounce_buffer_dma =
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
netdev_err(netdev, "unable to map bounce buffer\n");
- rc = -ENOMEM;
- goto err_out_free_irq;
+ goto out_free_bounce_buffer;
}
netdev_dbg(netdev, "initial replenish cycle\n");
@@ -689,10 +640,31 @@ static int ibmveth_open(struct net_device *netdev)
return 0;
-err_out_free_irq:
+out_free_bounce_buffer:
+ kfree(adapter->bounce_buffer);
+out_free_irq:
free_irq(netdev->irq, netdev);
-err_out:
- ibmveth_cleanup(adapter);
+out_free_buffer_pools:
+ while (--i >= 0) {
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+ }
+out_unmap_filter_list:
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+out_unmap_buffer_list:
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+out_free_queue_mem:
+ dma_free_coherent(dev, adapter->rx_queue.queue_len,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_dma);
+out_free_filter_list:
+ free_page((unsigned long)adapter->filter_list_addr);
+out_free_buffer_list:
+ free_page((unsigned long)adapter->buffer_list_addr);
+out:
napi_disable(&adapter->napi);
return rc;
}
@@ -700,7 +672,9 @@ err_out:
static int ibmveth_close(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->vdev->dev;
long lpar_rc;
+ int i;
netdev_dbg(netdev, "close starting\n");
@@ -724,7 +698,27 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_update_rx_no_buffer(adapter);
- ibmveth_cleanup(adapter);
+ dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ free_page((unsigned long)adapter->buffer_list_addr);
+
+ dma_unmap_single(dev, adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
+ free_page((unsigned long)adapter->filter_list_addr);
+
+ dma_free_coherent(dev, adapter->rx_queue.queue_len,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_dma);
+
+ for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
+ if (adapter->rx_buff_pool[i].active)
+ ibmveth_free_buffer_pool(adapter,
+ &adapter->rx_buff_pool[i]);
+
+ dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ DMA_BIDIRECTIONAL);
+ kfree(adapter->bounce_buffer);
netdev_dbg(netdev, "close complete\n");
@@ -1719,11 +1713,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
-
- adapter->buffer_list_dma = DMA_ERROR_CODE;
- adapter->filter_list_dma = DMA_ERROR_CODE;
- adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
-
netdev_dbg(netdev, "registering netdev...\n");
ibmveth_set_features(netdev, netdev->features);