aboutsummaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorJeff Garzik2006-03-11 13:35:31 -0500
committerJeff Garzik2006-03-11 13:35:31 -0500
commit749dfc70554f2c9e6624ac843d66571265ed9338 (patch)
treebf591255b3f158222f90852d53c4279e6e7e9ced /drivers/net
parent74f5ec29ae93aa42c49f4285c20c457afe937881 (diff)
parent0992a5d029181421877a716eaf99145828ff7eae (diff)
Merge branch 'upstream-fixes'
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c13
-rw-r--r--drivers/net/Kconfig3
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/dl2k.c25
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/sky2.c32
-rw-r--r--drivers/net/tg3.c87
-rw-r--r--drivers/net/tulip/de2104x.c26
8 files changed, 120 insertions, 70 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 824e430486c2..830528dce0ca 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1574,6 +1574,7 @@ MODULE_LICENSE("GPL");
static int __init el3_init_module(void)
{
+ int ret = 0;
el3_cards = 0;
if (debug >= 0)
@@ -1589,14 +1590,16 @@ static int __init el3_init_module(void)
}
#ifdef CONFIG_EISA
- if (eisa_driver_register (&el3_eisa_driver) < 0) {
- eisa_driver_unregister (&el3_eisa_driver);
- }
+ ret = eisa_driver_register(&el3_eisa_driver);
#endif
#ifdef CONFIG_MCA
- mca_register_driver(&el3_mca_driver);
+ {
+ int err = mca_register_driver(&el3_mca_driver);
+ if (ret == 0)
+ ret = err;
+ }
#endif
- return 0;
+ return ret;
}
static void __exit el3_cleanup_module(void)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index bdf294d2df26..d4d8e5f9ebf1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1087,7 +1087,8 @@ config NE2000
without a specific driver are compatible with NE2000.
If you have a PCI NE2000 card however, say N here and Y to "PCI
- NE2000 support", above. If you have a NE2000 card and are running on
+ NE2000 and clone support" under "EISA, VLB, PCI and on board
+ controllers" below. If you have a NE2000 card and are running on
an MCA system (a bus system used on some IBM PS/2 computers and
laptops), say N here and Y to "NE/2 (ne2000 MCA version) support",
below.
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 0069f5fa973a..22fc5b869a60 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -1012,7 +1012,7 @@ static int __init read_eeprom(struct net_device *dev)
#ifdef MODULE
static struct net_device *de620_dev;
-int init_module(void)
+int __init init_module(void)
{
de620_dev = de620_probe(-1);
if (IS_ERR(de620_dev))
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 6376b63d9b17..1f3627470c95 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -50,8 +50,8 @@
*/
#define DRV_NAME "D-Link DL2000-based linux driver"
-#define DRV_VERSION "v1.17a"
-#define DRV_RELDATE "2002/10/04"
+#define DRV_VERSION "v1.17b"
+#define DRV_RELDATE "2006/03/10"
#include "dl2k.h"
static char version[] __devinitdata =
@@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq)
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
- np->tx_ring[entry].fraginfo,
+ np->tx_ring[entry].fraginfo & 0xffffffffffff,
skb->len, PCI_DMA_TODEVICE);
if (irq)
dev_kfree_skb_irq (skb);
@@ -892,14 +892,16 @@ receive_packet (struct net_device *dev)
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
- pci_unmap_single (np->pdev, desc->fraginfo,
+ pci_unmap_single (np->pdev,
+ desc->fraginfo & 0xffffffffffff,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) {
pci_dma_sync_single_for_cpu(np->pdev,
- desc->fraginfo,
+ desc->fraginfo &
+ 0xffffffffffff,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb->dev = dev;
@@ -910,7 +912,8 @@ receive_packet (struct net_device *dev)
pkt_len, 0);
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
- desc->fraginfo,
+ desc->fraginfo &
+ 0xffffffffffff,
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
@@ -1796,8 +1799,9 @@ rio_close (struct net_device *dev)
np->rx_ring[i].fraginfo = 0;
skb = np->rx_skbuff[i];
if (skb) {
- pci_unmap_single (np->pdev, np->rx_ring[i].fraginfo,
- skb->len, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(np->pdev,
+ np->rx_ring[i].fraginfo & 0xffffffffffff,
+ skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
np->rx_skbuff[i] = NULL;
}
@@ -1805,8 +1809,9 @@ rio_close (struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single (np->pdev, np->tx_ring[i].fraginfo,
- skb->len, PCI_DMA_TODEVICE);
+ pci_unmap_single(np->pdev,
+ np->tx_ring[i].fraginfo & 0xffffffffffff,
+ skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
np->tx_skbuff[i] = NULL;
}
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 9adaf5fa9d48..0c631a77ccf6 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2815,7 +2815,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
printk(KERN_ERR
"__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
- return -EFAULT;
+ return NETDEV_TX_OK;
}
len = skb->len - skb->data_len;
break;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 72c1630977d6..73260364cba3 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -74,7 +74,7 @@
#define TX_RING_SIZE 512
#define TX_DEF_PENDING (TX_RING_SIZE - 1)
#define TX_MIN_PENDING 64
-#define MAX_SKB_TX_LE (4 + 2*MAX_SKB_FRAGS)
+#define MAX_SKB_TX_LE (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
@@ -622,8 +622,8 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
/* Configure Rx MAC FIFO */
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
- sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T),
- GMF_RX_CTRL_DEF);
+ sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+ GMF_OPER_ON | GMF_RX_F_FL_ON);
/* Flush Rx MAC FIFO on any flow control or error */
sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
@@ -995,6 +995,10 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2_rx_add(sky2, re->mapaddr);
}
+ /* Truncate oversize frames */
+ sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), sky2->rx_bufsize - 8);
+ sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
+
/* Tell chip about available buffers */
sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put);
sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX));
@@ -1145,6 +1149,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
struct sky2_tx_le *le = NULL;
struct tx_ring_info *re;
unsigned i, len;
+ int avail;
dma_addr_t mapping;
u32 addr64;
u16 mss;
@@ -1287,12 +1292,16 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
re->idx = sky2->tx_prod;
le->ctrl |= EOP;
+ avail = tx_avail(sky2);
+ if (mss != 0 || avail < TX_MIN_PENDING) {
+ le->ctrl |= FRC_STAT;
+ if (avail <= MAX_SKB_TX_LE)
+ netif_stop_queue(dev);
+ }
+
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
&sky2->tx_last_put, TX_RING_SIZE);
- if (tx_avail(sky2) <= MAX_SKB_TX_LE)
- netif_stop_queue(dev);
-
out_unlock:
spin_unlock(&sky2->tx_lock);
@@ -1707,10 +1716,12 @@ static void sky2_tx_timeout(struct net_device *dev)
#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
-/* Want receive buffer size to be multiple of 64 bits, and incl room for vlan */
+/* Want receive buffer size to be multiple of 64 bits
+ * and incl room for vlan and truncation
+ */
static inline unsigned sky2_buf_size(int mtu)
{
- return roundup(mtu + ETH_HLEN + 4, 8);
+ return roundup(mtu + ETH_HLEN + VLAN_HLEN, 8) + 8;
}
static int sky2_change_mtu(struct net_device *dev, int new_mtu)
@@ -1793,7 +1804,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
if (!(status & GMR_FS_RX_OK))
goto resubmit;
- if ((status >> 16) != length || length > sky2->rx_bufsize)
+ if (length > sky2->netdev->mtu + ETH_HLEN)
goto oversize;
if (length < copybreak) {
@@ -3243,8 +3254,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
}
}
- err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ | SA_SAMPLE_RANDOM,
- DRV_NAME, hw);
+ err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
if (err) {
printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
pci_name(pdev), pdev->irq);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 83ff5994a8d5..0f3798f81883 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3532,9 +3532,23 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
(base + len + 8 < base));
}
+/* Test for DMA addresses > 40-bit */
+static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+ int len)
+{
+#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+ return (((u64) mapping + len) > DMA_40BIT_MASK);
+ return 0;
+#else
+ return 0;
+#endif
+}
+
static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
-static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
+/* Workaround 4GB and 40-bit hardware DMA bugs. */
+static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
u32 last_plus_one, u32 *start,
u32 base_flags, u32 mss)
{
@@ -3742,6 +3756,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
+ if (tg3_40bit_overflow_test(tp, mapping, len))
+ would_hit_hwbug = 1;
+
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tg3_set_txd(tp, entry, mapping, len,
base_flags, (i == last)|(mss << 1));
@@ -3763,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
- if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one,
+ if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
&start, base_flags, mss))
goto out_unlock;
@@ -10608,8 +10625,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
unsigned long tg3reg_base, tg3reg_len;
struct net_device *dev;
struct tg3 *tp;
- int i, err, pci_using_dac, pm_cap;
+ int i, err, pm_cap;
char str[40];
+ u64 dma_mask, persist_dma_mask;
if (tg3_version_printed++ == 0)
printk(KERN_INFO "%s", version);
@@ -10646,26 +10664,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_free_res;
}
- /* Configure DMA attributes. */
- err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
- if (!err) {
- pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- if (err < 0) {
- printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
- "for consistent allocations\n");
- goto err_out_free_res;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
- if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
- goto err_out_free_res;
- }
- pci_using_dac = 0;
- }
-
tg3reg_base = pci_resource_start(pdev, 0);
tg3reg_len = pci_resource_len(pdev, 0);
@@ -10679,8 +10677,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_using_dac)
- dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_LLTX;
#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
@@ -10765,6 +10761,44 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_iounmap;
}
+ /* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit.
+ * On 64-bit systems with IOMMU, use 40-bit dma_mask.
+ * On 64-bit systems without IOMMU, use 64-bit dma_mask and
+ * do DMA address check in tg3_start_xmit().
+ */
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+ persist_dma_mask = dma_mask = DMA_40BIT_MASK;
+#ifdef CONFIG_HIGHMEM
+ dma_mask = DMA_64BIT_MASK;
+#endif
+ } else if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
+ persist_dma_mask = dma_mask = DMA_32BIT_MASK;
+ else
+ persist_dma_mask = dma_mask = DMA_64BIT_MASK;
+
+ /* Configure DMA attributes. */
+ if (dma_mask > DMA_32BIT_MASK) {
+ err = pci_set_dma_mask(pdev, dma_mask);
+ if (!err) {
+ dev->features |= NETIF_F_HIGHDMA;
+ err = pci_set_consistent_dma_mask(pdev,
+ persist_dma_mask);
+ if (err < 0) {
+ printk(KERN_ERR PFX "Unable to obtain 64 bit "
+ "DMA for consistent allocations\n");
+ goto err_out_iounmap;
+ }
+ }
+ }
+ if (err || dma_mask == DMA_32BIT_MASK) {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ printk(KERN_ERR PFX "No usable DMA configuration, "
+ "aborting.\n");
+ goto err_out_iounmap;
+ }
+ }
+
tg3_init_bufmgr_config(tp);
#if TG3_TSO_SUPPORT != 0
@@ -10833,9 +10867,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
} else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
- if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
- dev->features &= ~NETIF_F_HIGHDMA;
-
/* flow control autonegotiation is default behavior */
tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d6c3d52d2e86..6299e186c73f 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1357,7 +1357,6 @@ static int de_open (struct net_device *dev)
{
struct de_private *de = dev->priv;
int rc;
- unsigned long flags;
if (netif_msg_ifup(de))
printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
@@ -1371,18 +1370,20 @@ static int de_open (struct net_device *dev)
return rc;
}
- rc = de_init_hw(de);
- if (rc) {
- printk(KERN_ERR "%s: h/w init failure, err=%d\n",
- dev->name, rc);
- goto err_out_free;
- }
+ dw32(IntrMask, 0);
rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
if (rc) {
printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
dev->name, dev->irq, rc);
- goto err_out_hw;
+ goto err_out_free;
+ }
+
+ rc = de_init_hw(de);
+ if (rc) {
+ printk(KERN_ERR "%s: h/w init failure, err=%d\n",
+ dev->name, rc);
+ goto err_out_free_irq;
}
netif_start_queue(dev);
@@ -1390,11 +1391,8 @@ static int de_open (struct net_device *dev)
return 0;
-err_out_hw:
- spin_lock_irqsave(&de->lock, flags);
- de_stop_hw(de);
- spin_unlock_irqrestore(&de->lock, flags);
-
+err_out_free_irq:
+ free_irq(dev->irq, dev);
err_out_free:
de_free_rings(de);
return rc;
@@ -1450,6 +1448,8 @@ static void de_tx_timeout (struct net_device *dev)
synchronize_irq(dev->irq);
de_clean_rings(de);
+ de_init_rings(de);
+
de_init_hw(de);
netif_wake_queue(dev);