aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/ac.c40
-rw-r--r--drivers/acpi/toshiba_acpi.c3
-rw-r--r--drivers/block/paride/pf.c25
-rw-r--r--drivers/block/rd.c13
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c2
-rw-r--r--drivers/char/pcmcia/cm4040_cs.c2
-rw-r--r--drivers/char/random.c6
-rw-r--r--drivers/char/rtc.c52
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c153
-rw-r--r--drivers/crypto/geode-aes.c3
-rw-r--r--drivers/dma/dmaengine.c17
-rw-r--r--drivers/dma/ioat.c11
-rw-r--r--drivers/dma/ioat_dca.c164
-rw-r--r--drivers/dma/ioat_dma.c578
-rw-r--r--drivers/dma/ioatdma.h32
-rw-r--r--drivers/dma/ioatdma_hw.h33
-rw-r--r--drivers/dma/ioatdma_registers.h106
-rw-r--r--drivers/edac/i5000_edac.c2
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c7
-rw-r--r--drivers/i2c/chips/eeprom.c37
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/i2c/i2c-dev.c86
-rw-r--r--drivers/ide/Kconfig4
-rw-r--r--drivers/ide/cris/ide-cris.c3
-rw-r--r--drivers/ide/ide-io.c6
-rw-r--r--drivers/ide/ide-lib.c1
-rw-r--r--drivers/ide/pci/cmd64x.c5
-rw-r--r--drivers/ide/pci/cs5530.c3
-rw-r--r--drivers/ide/pci/it821x.c3
-rw-r--r--drivers/ide/pci/jmicron.c3
-rw-r--r--drivers/ide/pci/sc1200.c3
-rw-r--r--drivers/ide/pci/sis5513.c1
-rw-r--r--drivers/ide/ppc/pmac.c1
-rw-r--r--drivers/ide/setup-pci.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c48
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c20
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c29
-rw-r--r--drivers/infiniband/hw/ehca/hipz_hw.h6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c5
-rw-r--r--drivers/isdn/sc/card.h2
-rw-r--r--drivers/isdn/sc/packet.c2
-rw-r--r--drivers/isdn/sc/shmem.c2
-rw-r--r--drivers/lguest/lguest_user.c2
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/misc/ioc4.c10
-rw-r--r--drivers/net/arm/ep93xx_eth.c2
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/chelsio/sge.c2
-rw-r--r--drivers/net/cris/eth_v10.c440
-rw-r--r--drivers/net/e1000/e1000_main.c13
-rw-r--r--drivers/net/fs_enet/Kconfig11
-rw-r--r--drivers/net/fs_enet/Makefile15
-rw-r--r--drivers/net/mlx4/alloc.c7
-rw-r--r--drivers/net/mlx4/qp.c2
-rw-r--r--drivers/net/netx-eth.c6
-rw-r--r--drivers/net/s2io.c110
-rw-r--r--drivers/net/skge.c51
-rw-r--r--drivers/net/sunhme.c17
-rw-r--r--drivers/net/via-velocity.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.h29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c126
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h8
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/oprofile/cpu_buffer.c7
-rw-r--r--drivers/oprofile/cpu_buffer.h1
-rw-r--r--drivers/oprofile/oprofile_stats.c4
-rw-r--r--drivers/rtc/Kconfig8
-rw-r--r--drivers/rtc/hctosys.c4
-rw-r--r--drivers/rtc/rtc-ds1307.c93
-rw-r--r--drivers/rtc/rtc-ds1553.c2
-rw-r--r--drivers/rtc/rtc-ds1742.c5
-rw-r--r--drivers/rtc/rtc-m48t59.c3
-rw-r--r--drivers/rtc/rtc-stk17ta8.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c4
-rw-r--r--drivers/serial/8250_pnp.c10
-rw-r--r--drivers/serial/atmel_serial.c9
-rw-r--r--drivers/serial/crisv10.c1293
-rw-r--r--drivers/serial/crisv10.h146
-rw-r--r--drivers/spi/spi.c8
-rw-r--r--drivers/spi/spi_txx9.c40
-rw-r--r--drivers/spi/tle62x0.c5
-rw-r--r--drivers/usb/serial/keyspan.c38
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/gbefb.c4
-rw-r--r--drivers/video/geode/lxfb.h2
-rw-r--r--drivers/video/ps3fb.c2
-rw-r--r--drivers/video/s1d13xxxfb.c5
-rw-r--r--drivers/video/sis/sis_main.c3
-rw-r--r--drivers/video/uvesafb.c6
-rw-r--r--drivers/w1/masters/ds2490.c2
101 files changed, 2557 insertions, 1655 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ce9dead0f499..087a7028ae84 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -50,6 +50,7 @@ config ACPI_SLEEP
config ACPI_PROCFS
bool "Deprecated /proc/acpi files"
depends on PROC_FS
+ default y
---help---
For backwards compatibility, this option allows
deprecated /proc/acpi/ files to exist, even when
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index e03de37a750d..30238f6ff232 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -27,8 +27,10 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
+#ifdef CONFIG_ACPI_PROCFS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#endif
#include <linux/power_supply.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -49,12 +51,15 @@ MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI AC Adapter Driver");
MODULE_LICENSE("GPL");
+#ifdef CONFIG_ACPI_PROCFS
extern struct proc_dir_entry *acpi_lock_ac_dir(void);
extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
+static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+#endif
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device, int type);
-static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+static int acpi_ac_resume(struct acpi_device *device);
const static struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0},
@@ -69,6 +74,7 @@ static struct acpi_driver acpi_ac_driver = {
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
+ .resume = acpi_ac_resume,
},
};
@@ -80,12 +86,15 @@ struct acpi_ac {
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
+#ifdef CONFIG_ACPI_PROCFS
static const struct file_operations acpi_ac_fops = {
.open = acpi_ac_open_fs,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
+#endif
+
static int get_ac_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -127,6 +136,7 @@ static int acpi_ac_get_state(struct acpi_ac *ac)
return 0;
}
+#ifdef CONFIG_ACPI_PROCFS
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
@@ -206,6 +216,7 @@ static int acpi_ac_remove_fs(struct acpi_device *device)
return 0;
}
+#endif
/* --------------------------------------------------------------------------
Driver Model
@@ -264,7 +275,9 @@ static int acpi_ac_add(struct acpi_device *device)
if (result)
goto end;
+#ifdef CONFIG_ACPI_PROCFS
result = acpi_ac_add_fs(device);
+#endif
if (result)
goto end;
ac->charger.name = acpi_device_bid(device);
@@ -287,13 +300,30 @@ static int acpi_ac_add(struct acpi_device *device)
end:
if (result) {
+#ifdef CONFIG_ACPI_PROCFS
acpi_ac_remove_fs(device);
+#endif
kfree(ac);
}
return result;
}
+static int acpi_ac_resume(struct acpi_device *device)
+{
+ struct acpi_ac *ac;
+ unsigned old_state;
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+ ac = acpi_driver_data(device);
+ old_state = ac->state;
+ if (acpi_ac_get_state(ac))
+ return 0;
+ if (old_state != ac->state)
+ kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
+ return 0;
+}
+
static int acpi_ac_remove(struct acpi_device *device, int type)
{
acpi_status status = AE_OK;
@@ -309,7 +339,9 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
ACPI_ALL_NOTIFY, acpi_ac_notify);
if (ac->charger.dev)
power_supply_unregister(&ac->charger);
+#ifdef CONFIG_ACPI_PROCFS
acpi_ac_remove_fs(device);
+#endif
kfree(ac);
@@ -323,13 +355,17 @@ static int __init acpi_ac_init(void)
if (acpi_disabled)
return -ENODEV;
+#ifdef CONFIG_ACPI_PROCFS
acpi_ac_dir = acpi_lock_ac_dir();
if (!acpi_ac_dir)
return -ENODEV;
+#endif
result = acpi_bus_register_driver(&acpi_ac_driver);
if (result < 0) {
+#ifdef CONFIG_ACPI_PROCFS
acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
return -ENODEV;
}
@@ -341,7 +377,9 @@ static void __exit acpi_ac_exit(void)
acpi_bus_unregister_driver(&acpi_ac_driver);
+#ifdef CONFIG_ACPI_PROCFS
acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
return;
}
diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c
index a736ef7bdee4..9e8c20c6a0b7 100644
--- a/drivers/acpi/toshiba_acpi.c
+++ b/drivers/acpi/toshiba_acpi.c
@@ -591,9 +591,12 @@ static int __init toshiba_acpi_init(void)
NULL,
&toshiba_backlight_data);
if (IS_ERR(toshiba_backlight_device)) {
+ int ret = PTR_ERR(toshiba_backlight_device);
+
printk(KERN_ERR "Could not register toshiba backlight device\n");
toshiba_backlight_device = NULL;
toshiba_acpi_exit();
+ return ret;
}
toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ceffa6034e20..e7fe6ca97dd8 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -488,13 +488,11 @@ static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fu
return r;
}
-#define DBMSG(msg) ((verbose>1)?(msg):NULL)
-
static void pf_lock(struct pf_unit *pf, int func)
{
char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
- pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "unlock" : "lock");
+ pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock");
}
static void pf_eject(struct pf_unit *pf)
@@ -555,7 +553,7 @@ static void pf_mode_sense(struct pf_unit *pf)
{ ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
char buf[8];
- pf_atapi(pf, ms_cmd, 8, buf, DBMSG("mode sense"));
+ pf_atapi(pf, ms_cmd, 8, buf, "mode sense");
pf->media_status = PF_RW;
if (buf[3] & 0x80)
pf->media_status = PF_RO;
@@ -591,7 +589,7 @@ static void pf_get_capacity(struct pf_unit *pf)
char buf[8];
int bs;
- if (pf_atapi(pf, rc_cmd, 8, buf, DBMSG("get capacity"))) {
+ if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) {
pf->media_status = PF_NM;
return;
}
@@ -804,13 +802,18 @@ static int pf_next_buf(void)
pf_buf += 512;
pf_block++;
if (!pf_run)
- return 0;
- if (!pf_count)
return 1;
- spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(1);
- spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
- return 1;
+ if (!pf_count) {
+ spin_lock_irqsave(&pf_spin_lock, saved_flags);
+ pf_end_request(1);
+ pf_req = elv_next_request(pf_queue);
+ spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
+ if (!pf_req)
+ return 1;
+ pf_count = pf_req->current_nr_sectors;
+ pf_buf = pf_req->buffer;
+ }
+ return 0;
}
static inline void next_request(int success)
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 47f8ac6cce57..82f4eecc8699 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -189,6 +189,18 @@ static int ramdisk_set_page_dirty(struct page *page)
return 0;
}
+/*
+ * releasepage is called by pagevec_strip/try_to_release_page if
+ * buffers_heads_over_limit is true. Without a releasepage function
+ * try_to_free_buffers is called instead. That can unset the dirty
+ * bit of our ram disk pages, which will be eventually freed, even
+ * if the page is still in use.
+ */
+static int ramdisk_releasepage(struct page *page, gfp_t dummy)
+{
+ return 0;
+}
+
static const struct address_space_operations ramdisk_aops = {
.readpage = ramdisk_readpage,
.prepare_write = ramdisk_prepare_write,
@@ -196,6 +208,7 @@ static const struct address_space_operations ramdisk_aops = {
.writepage = ramdisk_writepage,
.set_page_dirty = ramdisk_set_page_dirty,
.writepages = ramdisk_writepages,
+ .releasepage = ramdisk_releasepage,
};
static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index cc5d77797def..02518da6a386 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -47,7 +47,7 @@
/* #define ATR_CSUM */
#ifdef PCMCIA_DEBUG
-#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle))
+#define reader_to_dev(x) (&handle_to_dev(x->p_dev))
static int pc_debug = PCMCIA_DEBUG;
module_param(pc_debug, int, 0600);
#define DEBUGP(n, rdr, x, args...) do { \
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index a0b9c8728d56..5f291bf739a6 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -41,7 +41,7 @@
#ifdef PCMCIA_DEBUG
-#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle))
+#define reader_to_dev(x) (&handle_to_dev(x->p_dev))
static int pc_debug = PCMCIA_DEBUG;
module_param(pc_debug, int, 0600);
#define DEBUGP(n, rdr, x, args...) do { \
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1756b1f7cb72..5fee05661823 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1494,7 +1494,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
seq += keyptr->count;
- seq += ktime_get_real().tv64;
+ seq += ktime_to_ns(ktime_get_real());
return seq;
}
@@ -1556,7 +1556,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
* overlaps less than one time per MSL (2 minutes).
* Choosing a clock of 64 ns period is OK. (period of 274 s)
*/
- seq += ktime_get_real().tv64 >> 6;
+ seq += ktime_to_ns(ktime_get_real()) >> 6;
#if 0
printk("init_seq(%lx, %lx, %d, %d) = %d\n",
saddr, daddr, sport, dport, seq);
@@ -1616,7 +1616,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
seq = half_md4_transform(hash, keyptr->secret);
seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
- seq += ktime_get_real().tv64;
+ seq += ktime_to_ns(ktime_get_real());
seq &= (1ull << 48) - 1;
#if 0
printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index ec6b65ec69ea..0c66b802736a 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -918,6 +918,31 @@ static const struct file_operations rtc_proc_fops = {
};
#endif
+static resource_size_t rtc_size;
+
+static struct resource * __init rtc_request_region(resource_size_t size)
+{
+ struct resource *r;
+
+ if (RTC_IOMAPPED)
+ r = request_region(RTC_PORT(0), size, "rtc");
+ else
+ r = request_mem_region(RTC_PORT(0), size, "rtc");
+
+ if (r)
+ rtc_size = size;
+
+ return r;
+}
+
+static void rtc_release_region(void)
+{
+ if (RTC_IOMAPPED)
+ release_region(RTC_PORT(0), rtc_size);
+ else
+ release_mem_region(RTC_PORT(0), rtc_size);
+}
+
static int __init rtc_init(void)
{
#ifdef CONFIG_PROC_FS
@@ -968,10 +993,17 @@ found:
}
no_irq:
#else
- if (RTC_IOMAPPED)
- r = request_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc");
- else
- r = request_mem_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc");
+ r = rtc_request_region(RTC_IO_EXTENT);
+
+ /*
+ * If we've already requested a smaller range (for example, because
+ * PNPBIOS or ACPI told us how the device is configured), the request
+ * above might fail because it's too big.
+ *
+ * If so, request just the range we actually use.
+ */
+ if (!r)
+ r = rtc_request_region(RTC_IO_EXTENT_USED);
if (!r) {
#ifdef RTC_IRQ
rtc_has_irq = 0;
@@ -992,10 +1024,7 @@ no_irq:
/* Yeah right, seeing as irq 8 doesn't even hit the bus. */
rtc_has_irq = 0;
printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ);
- if (RTC_IOMAPPED)
- release_region(RTC_PORT(0), RTC_IO_EXTENT);
- else
- release_mem_region(RTC_PORT(0), RTC_IO_EXTENT);
+ rtc_release_region();
return -EIO;
}
hpet_rtc_timer_init();
@@ -1009,7 +1038,7 @@ no_irq:
free_irq(RTC_IRQ, NULL);
rtc_has_irq = 0;
#endif
- release_region(RTC_PORT(0), RTC_IO_EXTENT);
+ rtc_release_region();
return -ENODEV;
}
@@ -1091,10 +1120,7 @@ static void __exit rtc_exit (void)
if (rtc_has_irq)
free_irq (rtc_irq, &rtc_port);
#else
- if (RTC_IOMAPPED)
- release_region(RTC_PORT(0), RTC_IO_EXTENT);
- else
- release_mem_region(RTC_PORT(0), RTC_IO_EXTENT);
+ rtc_release_region();
#ifdef RTC_IRQ
if (rtc_has_irq)
free_irq (RTC_IRQ, NULL);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 4bd33ce8a6f3..1bba99747f5b 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -37,17 +37,17 @@
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
-/*
- * The polling frequency of this governor depends on the capability of
+/*
+ * The polling frequency of this governor depends on the capability of
* the processor. Default polling frequency is 1000 times the transition
- * latency of the processor. The governor will work on any processor with
- * transition latency <= 10mS, using appropriate sampling
+ * latency of the processor. The governor will work on any processor with
+ * transition latency <= 10mS, using appropriate sampling
* rate.
* For CPUs with transition latency > 10mS (mostly drivers
* with CPUFREQ_ETERNAL), this governor will not work.
* All times here are in uS.
*/
-static unsigned int def_sampling_rate;
+static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE_RATIO (2)
/* for correct statistics, we need at least 10 ticks between each measure */
#define MIN_STAT_SAMPLING_RATE \
@@ -63,12 +63,12 @@ static unsigned int def_sampling_rate;
static void do_dbs_timer(struct work_struct *work);
struct cpu_dbs_info_s {
- struct cpufreq_policy *cur_policy;
- unsigned int prev_cpu_idle_up;
- unsigned int prev_cpu_idle_down;
- unsigned int enable;
- unsigned int down_skip;
- unsigned int requested_freq;
+ struct cpufreq_policy *cur_policy;
+ unsigned int prev_cpu_idle_up;
+ unsigned int prev_cpu_idle_down;
+ unsigned int enable;
+ unsigned int down_skip;
+ unsigned int requested_freq;
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
@@ -82,24 +82,24 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
* cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
* is recursive for the same process. -Venki
*/
-static DEFINE_MUTEX (dbs_mutex);
+static DEFINE_MUTEX (dbs_mutex);
static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
struct dbs_tuners {
- unsigned int sampling_rate;
- unsigned int sampling_down_factor;
- unsigned int up_threshold;
- unsigned int down_threshold;
- unsigned int ignore_nice;
- unsigned int freq_step;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int ignore_nice;
+ unsigned int freq_step;
};
static struct dbs_tuners dbs_tuners_ins = {
- .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
- .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
- .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
- .ignore_nice = 0,
- .freq_step = 5,
+ .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
+ .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
+ .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
+ .ignore_nice = 0,
+ .freq_step = 5,
};
static inline unsigned int get_cpu_idle_time(unsigned int cpu)
@@ -109,13 +109,34 @@ static inline unsigned int get_cpu_idle_time(unsigned int cpu)
if (dbs_tuners_ins.ignore_nice)
add_nice = kstat_cpu(cpu).cpustat.nice;
- ret = kstat_cpu(cpu).cpustat.idle +
+ ret = kstat_cpu(cpu).cpustat.idle +
kstat_cpu(cpu).cpustat.iowait +
add_nice;
return ret;
}
+/* keep track of frequency transitions */
+static int
+dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
+ freq->cpu);
+
+ if (!this_dbs_info->enable)
+ return 0;
+
+ this_dbs_info->requested_freq = freq->new;
+
+ return 0;
+}
+
+static struct notifier_block dbs_cpufreq_notifier_block = {
+ .notifier_call = dbs_cpufreq_notifier
+};
+
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
@@ -127,8 +148,8 @@ static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
}
-#define define_one_ro(_name) \
-static struct freq_attr _name = \
+#define define_one_ro(_name) \
+static struct freq_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(sampling_rate_max);
@@ -148,7 +169,7 @@ show_one(down_threshold, down_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(freq_step, freq_step);
-static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
+static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
@@ -164,7 +185,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
+static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
@@ -183,7 +204,7 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_up_threshold(struct cpufreq_policy *unused,
+static ssize_t store_up_threshold(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
@@ -202,7 +223,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_down_threshold(struct cpufreq_policy *unused,
+static ssize_t store_down_threshold(struct cpufreq_policy *unused,
const char *buf, size_t count)
{
unsigned int input;
@@ -228,16 +249,16 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
int ret;
unsigned int j;
-
- ret = sscanf (buf, "%u", &input);
- if ( ret != 1 )
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
return -EINVAL;
- if ( input > 1 )
+ if (input > 1)
input = 1;
-
+
mutex_lock(&dbs_mutex);
- if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
+ if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
mutex_unlock(&dbs_mutex);
return count;
}
@@ -261,14 +282,14 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
unsigned int input;
int ret;
- ret = sscanf (buf, "%u", &input);
+ ret = sscanf(buf, "%u", &input);
- if ( ret != 1 )
+ if (ret != 1)
return -EINVAL;
- if ( input > 100 )
+ if (input > 100)
input = 100;
-
+
/* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :) */
mutex_lock(&dbs_mutex);
@@ -322,18 +343,18 @@ static void dbs_check_cpu(int cpu)
policy = this_dbs_info->cur_policy;
- /*
- * The default safe range is 20% to 80%
+ /*
+ * The default safe range is 20% to 80%
* Every sampling_rate, we check
- * - If current idle time is less than 20%, then we try to
- * increase frequency
+ * - If current idle time is less than 20%, then we try to
+ * increase frequency
* Every sampling_rate*sampling_down_factor, we check
- * - If current idle time is more than 80%, then we try to
- * decrease frequency
+ * - If current idle time is more than 80%, then we try to
+ * decrease frequency
*
- * Any frequency increase takes it to the maximum frequency.
- * Frequency reduction happens at minimum steps of
- * 5% (default) of max_frequency
+ * Any frequency increase takes it to the maximum frequency.
+ * Frequency reduction happens at minimum steps of
+ * 5% (default) of max_frequency
*/
/* Check for frequency increase */
@@ -361,13 +382,13 @@ static void dbs_check_cpu(int cpu)
/* if we are already at full speed then break out early */
if (this_dbs_info->requested_freq == policy->max)
return;
-
+
freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
/* max freq cannot be less than 100. But who knows.... */
if (unlikely(freq_step == 0))
freq_step = 5;
-
+
this_dbs_info->requested_freq += freq_step;
if (this_dbs_info->requested_freq > policy->max)
this_dbs_info->requested_freq = policy->max;
@@ -427,15 +448,15 @@ static void dbs_check_cpu(int cpu)
}
static void do_dbs_timer(struct work_struct *work)
-{
+{
int i;
mutex_lock(&dbs_mutex);
for_each_online_cpu(i)
dbs_check_cpu(i);
- schedule_delayed_work(&dbs_work,
+ schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
mutex_unlock(&dbs_mutex);
-}
+}
static inline void dbs_timer_init(void)
{
@@ -462,13 +483,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
switch (event) {
case CPUFREQ_GOV_START:
- if ((!cpu_online(cpu)) ||
- (!policy->cur))
+ if ((!cpu_online(cpu)) || (!policy->cur))
return -EINVAL;
if (this_dbs_info->enable) /* Already enabled */
break;
-
+
mutex_lock(&dbs_mutex);
rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -481,7 +501,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
-
+
j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu);
j_dbs_info->prev_cpu_idle_down
= j_dbs_info->prev_cpu_idle_up;
@@ -511,8 +531,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_tuners_ins.sampling_rate = def_sampling_rate;
dbs_timer_init();
+ cpufreq_register_notifier(
+ &dbs_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
}
-
+
mutex_unlock(&dbs_mutex);
break;
@@ -525,9 +548,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
* Stop the timerschedule work, when this governor
* is used for first time
*/
- if (dbs_enable == 0)
+ if (dbs_enable == 0) {
dbs_timer_exit();
-
+ cpufreq_unregister_notifier(
+ &dbs_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
mutex_unlock(&dbs_mutex);
break;
@@ -537,11 +564,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
if (policy->max < this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
- policy->max, CPUFREQ_RELATION_H);
+ policy->max, CPUFREQ_RELATION_H);
else if (policy->min > this_dbs_info->cur_policy->cur)
__cpufreq_driver_target(
this_dbs_info->cur_policy,
- policy->min, CPUFREQ_RELATION_L);
+ policy->min, CPUFREQ_RELATION_L);
mutex_unlock(&dbs_mutex);
break;
}
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index f9a34abbf4fa..711e246e1ef0 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -110,8 +110,7 @@ geode_aes_crypt(struct geode_aes_op *op)
* we don't need to worry
*/
- if (op->src == op->dst)
- flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
+ flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
if (op->dir == AES_DIR_ENCRYPT)
flags |= AES_CTRL_ENCRYPT;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 82489923af09..d59b2f417306 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -182,10 +182,9 @@ static void dma_client_chan_alloc(struct dma_client *client)
/* we are done once this client rejects
* an available resource
*/
- if (ack == DMA_ACK) {
+ if (ack == DMA_ACK)
dma_chan_get(chan);
- kref_get(&device->refcount);
- } else if (ack == DMA_NAK)
+ else if (ack == DMA_NAK)
return;
}
}
@@ -272,11 +271,8 @@ static void dma_clients_notify_removed(struct dma_chan *chan)
/* client was holding resources for this channel so
* free it
*/
- if (ack == DMA_ACK) {
+ if (ack == DMA_ACK)
dma_chan_put(chan);
- kref_put(&chan->device->refcount,
- dma_async_device_cleanup);
- }
}
mutex_unlock(&dma_list_mutex);
@@ -316,11 +312,8 @@ void dma_async_client_unregister(struct dma_client *client)
ack = client->event_callback(client, chan,
DMA_RESOURCE_REMOVED);
- if (ack == DMA_ACK) {
+ if (ack == DMA_ACK)
dma_chan_put(chan);
- kref_put(&chan->device->refcount,
- dma_async_device_cleanup);
- }
}
list_del(&client->global_node);
@@ -397,6 +390,8 @@ int dma_async_device_register(struct dma_device *device)
goto err_out;
}
+ /* One for the channel, one of the class device */
+ kref_get(&device->refcount);
kref_get(&device->refcount);
kref_init(&chan->refcount);
chan->slow_ref = 0;
diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c
index f204c39fb412..16e0fd8facfb 100644
--- a/drivers/dma/ioat.c
+++ b/drivers/dma/ioat.c
@@ -39,10 +39,14 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
static struct pci_device_id ioat_pci_tbl[] = {
+ /* I/OAT v1 platforms */
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) },
{ PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) },
+
+ /* I/OAT v2 platforms */
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) },
{ 0, }
};
@@ -74,10 +78,17 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase)
if (device->dma && ioat_dca_enabled)
device->dca = ioat_dca_init(pdev, iobase);
break;
+ case IOAT_VER_2_0:
+ device->dma = ioat_dma_probe(pdev, iobase);
+ if (device->dma && ioat_dca_enabled)
+ device->dca = ioat2_dca_init(pdev, iobase);
+ break;
default:
err = -ENODEV;
break;
}
+ if (!device->dma)
+ err = -ENODEV;
return err;
}
diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c
index ba985715b803..0fa8a98051a8 100644
--- a/drivers/dma/ioat_dca.c
+++ b/drivers/dma/ioat_dca.c
@@ -261,3 +261,167 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase)
return dca;
}
+
+static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
+{
+ struct ioat_dca_priv *ioatdca = dca_priv(dca);
+ struct pci_dev *pdev;
+ int i;
+ u16 id;
+ u16 global_req_table;
+
+ /* This implementation only supports PCI-Express */
+ if (dev->bus != &pci_bus_type)
+ return -ENODEV;
+ pdev = to_pci_dev(dev);
+ id = dcaid_from_pcidev(pdev);
+
+ if (ioatdca->requester_count == ioatdca->max_requesters)
+ return -ENODEV;
+
+ for (i = 0; i < ioatdca->max_requesters; i++) {
+ if (ioatdca->req_slots[i].pdev == NULL) {
+ /* found an empty slot */
+ ioatdca->requester_count++;
+ ioatdca->req_slots[i].pdev = pdev;
+ ioatdca->req_slots[i].rid = id;
+ global_req_table =
+ readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
+ writel(id | IOAT_DCA_GREQID_VALID,
+ ioatdca->iobase + global_req_table + (i * 4));
+ return i;
+ }
+ }
+ /* Error, ioatdma->requester_count is out of whack */
+ return -EFAULT;
+}
+
+static int ioat2_dca_remove_requester(struct dca_provider *dca,
+ struct device *dev)
+{
+ struct ioat_dca_priv *ioatdca = dca_priv(dca);
+ struct pci_dev *pdev;
+ int i;
+ u16 global_req_table;
+
+ /* This implementation only supports PCI-Express */
+ if (dev->bus != &pci_bus_type)
+ return -ENODEV;
+ pdev = to_pci_dev(dev);
+
+ for (i = 0; i < ioatdca->max_requesters; i++) {
+ if (ioatdca->req_slots[i].pdev == pdev) {
+ global_req_table =
+ readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET);
+ writel(0, ioatdca->iobase + global_req_table + (i * 4));
+ ioatdca->req_slots[i].pdev = NULL;
+ ioatdca->req_slots[i].rid = 0;
+ ioatdca->requester_count--;
+ return i;
+ }
+ }
+ return -ENODEV;
+}
+
+static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu)
+{
+ u8 tag;
+
+ tag = ioat_dca_get_tag(dca, cpu);
+ tag = (~tag) & 0x1F;
+ return tag;
+}
+
+static struct dca_ops ioat2_dca_ops = {
+ .add_requester = ioat2_dca_add_requester,
+ .remove_requester = ioat2_dca_remove_requester,
+ .get_tag = ioat2_dca_get_tag,
+};
+
+static int ioat2_dca_count_dca_slots(void *iobase, u16 dca_offset)
+{
+ int slots = 0;
+ u32 req;
+ u16 global_req_table;
+
+ global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET);
+ if (global_req_table == 0)
+ return 0;
+ do {
+ req = readl(iobase + global_req_table + (slots * sizeof(u32)));
+ slots++;
+ } while ((req & IOAT_DCA_GREQID_LASTID) == 0);
+
+ return slots;
+}
+
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
+{
+ struct dca_provider *dca;
+ struct ioat_dca_priv *ioatdca;
+ int slots;
+ int i;
+ int err;
+ u32 tag_map;
+ u16 dca_offset;
+ u16 csi_fsb_control;
+ u16 pcie_control;
+ u8 bit;
+
+ if (!system_has_dca_enabled(pdev))
+ return NULL;
+
+ dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET);
+ if (dca_offset == 0)
+ return NULL;
+
+ slots = ioat2_dca_count_dca_slots(iobase, dca_offset);
+ if (slots == 0)
+ return NULL;
+
+ dca = alloc_dca_provider(&ioat2_dca_ops,
+ sizeof(*ioatdca)
+ + (sizeof(struct ioat_dca_slot) * slots));
+ if (!dca)
+ return NULL;
+
+ ioatdca = dca_priv(dca);
+ ioatdca->iobase = iobase;
+ ioatdca->dca_base = iobase + dca_offset;
+ ioatdca->max_requesters = slots;
+
+ /* some bios might not know to turn these on */
+ csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
+ if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) {
+ csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH;
+ writew(csi_fsb_control,
+ ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET);
+ }
+ pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
+ if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) {
+ pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR;
+ writew(pcie_control,
+ ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET);
+ }
+
+
+ /* TODO version, compatibility and configuration checks */
+
+ /* copy out the APIC to DCA tag map */
+ tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET);
+ for (i = 0; i < 5; i++) {
+ bit = (tag_map >> (4 * i)) & 0x0f;
+ if (bit < 8)
+ ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID;
+ else
+ ioatdca->tag_map[i] = 0;
+ }
+
+ err = register_dca_provider(dca, &pdev->dev);
+ if (err) {
+ free_dca_provider(dca);
+ return NULL;
+ }
+
+ return dca;
+}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 7e4a785c2dff..c1c2dcc6fc2e 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -36,18 +36,24 @@
#include "ioatdma_registers.h"
#include "ioatdma_hw.h"
-#define INITIAL_IOAT_DESC_COUNT 128
-
#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
+static int ioat_pending_level = 4;
+module_param(ioat_pending_level, int, 0644);
+MODULE_PARM_DESC(ioat_pending_level,
+ "high-water mark for pushing ioat descriptors (default: 4)");
+
/* internal functions */
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
+
+static struct ioat_desc_sw *
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
static struct ioat_desc_sw *
-ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
+ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
struct ioatdma_device *device,
@@ -130,6 +136,12 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
ioat_chan->device = device;
ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
ioat_chan->xfercap = xfercap;
+ ioat_chan->desccount = 0;
+ if (ioat_chan->device->version != IOAT_VER_1_2) {
+ writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
+ | IOAT_DMA_DCA_ANY_CPU,
+ ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
+ }
spin_lock_init(&ioat_chan->cleanup_lock);
spin_lock_init(&ioat_chan->desc_lock);
INIT_LIST_HEAD(&ioat_chan->free_desc);
@@ -161,13 +173,17 @@ static void ioat_set_dest(dma_addr_t addr,
tx_to_ioat_desc(tx)->dst = addr;
}
-static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
+static inline void __ioat1_dma_memcpy_issue_pending(
+ struct ioat_dma_chan *ioat_chan);
+static inline void __ioat2_dma_memcpy_issue_pending(
+ struct ioat_dma_chan *ioat_chan);
+
+static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
struct ioat_desc_sw *prev, *new;
struct ioat_dma_descriptor *hw;
- int append = 0;
dma_cookie_t cookie;
LIST_HEAD(new_chain);
u32 copy;
@@ -209,7 +225,7 @@ static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&new->node, &new_chain);
desc_count++;
prev = new;
- } while (len && (new = ioat_dma_get_next_descriptor(ioat_chan)));
+ } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
if (new->async_tx.callback) {
@@ -246,20 +262,98 @@ static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx)
first->async_tx.phys;
__list_splice(&new_chain, ioat_chan->used_desc.prev);
+ ioat_chan->dmacount += desc_count;
ioat_chan->pending += desc_count;
- if (ioat_chan->pending >= 4) {
- append = 1;
- ioat_chan->pending = 0;
- }
+ if (ioat_chan->pending >= ioat_pending_level)
+ __ioat1_dma_memcpy_issue_pending(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
- if (append)
- writeb(IOAT_CHANCMD_APPEND,
- ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+ return cookie;
+}
+
+static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
+ struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
+ struct ioat_desc_sw *new;
+ struct ioat_dma_descriptor *hw;
+ dma_cookie_t cookie;
+ u32 copy;
+ size_t len;
+ dma_addr_t src, dst;
+ int orig_ack;
+ unsigned int desc_count = 0;
+
+ /* src and dest and len are stored in the initial descriptor */
+ len = first->len;
+ src = first->src;
+ dst = first->dst;
+ orig_ack = first->async_tx.ack;
+ new = first;
+
+ /* ioat_chan->desc_lock is still in force in version 2 path */
+
+ do {
+ copy = min((u32) len, ioat_chan->xfercap);
+
+ new->async_tx.ack = 1;
+
+ hw = new->hw;
+ hw->size = copy;
+ hw->ctl = 0;
+ hw->src_addr = src;
+ hw->dst_addr = dst;
+
+ len -= copy;
+ dst += copy;
+ src += copy;
+ desc_count++;
+ } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
+
+ hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
+ if (new->async_tx.callback) {
+ hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
+ if (first != new) {
+ /* move callback into to last desc */
+ new->async_tx.callback = first->async_tx.callback;
+ new->async_tx.callback_param
+ = first->async_tx.callback_param;
+ first->async_tx.callback = NULL;
+ first->async_tx.callback_param = NULL;
+ }
+ }
+
+ new->tx_cnt = desc_count;
+ new->async_tx.ack = orig_ack; /* client is in control of this ack */
+
+ /* store the original values for use in later cleanup */
+ if (new != first) {
+ new->src = first->src;
+ new->dst = first->dst;
+ new->len = first->len;
+ }
+
+ /* cookie incr and addition to used_list must be atomic */
+ cookie = ioat_chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ ioat_chan->common.cookie = new->async_tx.cookie = cookie;
+
+ ioat_chan->dmacount += desc_count;
+ ioat_chan->pending += desc_count;
+ if (ioat_chan->pending >= ioat_pending_level)
+ __ioat2_dma_memcpy_issue_pending(ioat_chan);
+ spin_unlock_bh(&ioat_chan->desc_lock);
return cookie;
}
+/**
+ * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
+ * @ioat_chan: the channel supplying the memory pool for the descriptors
+ * @flags: allocation flags
+ */
static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
struct ioat_dma_chan *ioat_chan,
gfp_t flags)
@@ -284,15 +378,57 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
desc_sw->async_tx.tx_set_src = ioat_set_src;
desc_sw->async_tx.tx_set_dest = ioat_set_dest;
- desc_sw->async_tx.tx_submit = ioat_tx_submit;
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ desc_sw->async_tx.tx_submit = ioat1_tx_submit;
+ break;
+ case IOAT_VER_2_0:
+ desc_sw->async_tx.tx_submit = ioat2_tx_submit;
+ break;
+ }
INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
+
desc_sw->hw = desc;
desc_sw->async_tx.phys = phys;
return desc_sw;
}
-/* returns the actual number of allocated descriptors */
+static int ioat_initial_desc_count = 256;
+module_param(ioat_initial_desc_count, int, 0644);
+MODULE_PARM_DESC(ioat_initial_desc_count,
+ "initial descriptors per channel (default: 256)");
+
+/**
+ * ioat2_dma_massage_chan_desc - link the descriptors into a circle
+ * @ioat_chan: the channel to be massaged
+ */
+static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
+{
+ struct ioat_desc_sw *desc, *_desc;
+
+ /* setup used_desc */
+ ioat_chan->used_desc.next = ioat_chan->free_desc.next;
+ ioat_chan->used_desc.prev = NULL;
+
+ /* pull free_desc out of the circle so that every node is a hw
+ * descriptor, but leave it pointing to the list
+ */
+ ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
+ ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
+
+ /* circle link the hw descriptors */
+ desc = to_ioat_desc(ioat_chan->free_desc.next);
+ desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
+ list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
+ desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
+ }
+}
+
+/**
+ * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
+ * @chan: the channel to be filled out
+ */
static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
@@ -304,7 +440,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
/* have we already been set up? */
if (!list_empty(&ioat_chan->free_desc))
- return INITIAL_IOAT_DESC_COUNT;
+ return ioat_chan->desccount;
/* Setup register to interrupt and write completion status on error */
chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
@@ -320,7 +456,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
}
/* Allocate descriptors */
- for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) {
+ for (i = 0; i < ioat_initial_desc_count; i++) {
desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
if (!desc) {
dev_err(&ioat_chan->device->pdev->dev,
@@ -330,7 +466,10 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->node, &tmp_list);
}
spin_lock_bh(&ioat_chan->desc_lock);
+ ioat_chan->desccount = i;
list_splice(&tmp_list, &ioat_chan->free_desc);
+ if (ioat_chan->device->version != IOAT_VER_1_2)
+ ioat2_dma_massage_chan_desc(ioat_chan);
spin_unlock_bh(&ioat_chan->desc_lock);
/* allocate a completion writeback area */
@@ -347,10 +486,14 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan)
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
tasklet_enable(&ioat_chan->cleanup_task);
- ioat_dma_start_null_desc(ioat_chan);
- return i;
+ ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
+ return ioat_chan->desccount;
}
+/**
+ * ioat_dma_free_chan_resources - release all the descriptors
+ * @chan: the channel to be cleaned
+ */
static void ioat_dma_free_chan_resources(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
@@ -364,22 +507,45 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
/* Delay 100ms after reset to allow internal DMA logic to quiesce
* before removing DMA descriptor resources.
*/
- writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+ writeb(IOAT_CHANCMD_RESET,
+ ioat_chan->reg_base
+ + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
mdelay(100);
spin_lock_bh(&ioat_chan->desc_lock);
- list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
- in_use_descs++;
- list_del(&desc->node);
- pci_pool_free(ioatdma_device->dma_pool, desc->hw,
- desc->async_tx.phys);
- kfree(desc);
- }
- list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) {
- list_del(&desc->node);
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ list_for_each_entry_safe(desc, _desc,
+ &ioat_chan->used_desc, node) {
+ in_use_descs++;
+ list_del(&desc->node);
+ pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+ desc->async_tx.phys);
+ kfree(desc);
+ }
+ list_for_each_entry_safe(desc, _desc,
+ &ioat_chan->free_desc, node) {
+ list_del(&desc->node);
+ pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+ desc->async_tx.phys);
+ kfree(desc);
+ }
+ break;
+ case IOAT_VER_2_0:
+ list_for_each_entry_safe(desc, _desc,
+ ioat_chan->free_desc.next, node) {
+ list_del(&desc->node);
+ pci_pool_free(ioatdma_device->dma_pool, desc->hw,
+ desc->async_tx.phys);
+ kfree(desc);
+ }
+ desc = to_ioat_desc(ioat_chan->free_desc.next);
pci_pool_free(ioatdma_device->dma_pool, desc->hw,
desc->async_tx.phys);
kfree(desc);
+ INIT_LIST_HEAD(&ioat_chan->free_desc);
+ INIT_LIST_HEAD(&ioat_chan->used_desc);
+ break;
}
spin_unlock_bh(&ioat_chan->desc_lock);
@@ -395,6 +561,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
ioat_chan->last_completion = ioat_chan->completion_addr = 0;
ioat_chan->pending = 0;
+ ioat_chan->dmacount = 0;
}
/**
@@ -406,7 +573,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
* has run out.
*/
static struct ioat_desc_sw *
-ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
+ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
{
struct ioat_desc_sw *new = NULL;
@@ -425,7 +592,82 @@ ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
return new;
}
-static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
+static struct ioat_desc_sw *
+ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
+{
+ struct ioat_desc_sw *new = NULL;
+
+ /*
+ * used.prev points to where to start processing
+ * used.next points to next free descriptor
+ * if used.prev == NULL, there are none waiting to be processed
+ * if used.next == used.prev.prev, there is only one free descriptor,
+ * and we need to use it to as a noop descriptor before
+ * linking in a new set of descriptors, since the device
+ * has probably already read the pointer to it
+ */
+ if (ioat_chan->used_desc.prev &&
+ ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
+
+ struct ioat_desc_sw *desc = NULL;
+ struct ioat_desc_sw *noop_desc = NULL;
+ int i;
+
+ /* set up the noop descriptor */
+ noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
+ noop_desc->hw->size = 0;
+ noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
+ noop_desc->hw->src_addr = 0;
+ noop_desc->hw->dst_addr = 0;
+
+ ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
+ ioat_chan->pending++;
+ ioat_chan->dmacount++;
+
+ /* get a few more descriptors */
+ for (i = 16; i; i--) {
+ desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
+ BUG_ON(!desc);
+ list_add_tail(&desc->node, ioat_chan->used_desc.next);
+
+ desc->hw->next
+ = to_ioat_desc(desc->node.next)->async_tx.phys;
+ to_ioat_desc(desc->node.prev)->hw->next
+ = desc->async_tx.phys;
+ ioat_chan->desccount++;
+ }
+
+ ioat_chan->used_desc.next = noop_desc->node.next;
+ }
+ new = to_ioat_desc(ioat_chan->used_desc.next);
+ prefetch(new);
+ ioat_chan->used_desc.next = new->node.next;
+
+ if (ioat_chan->used_desc.prev == NULL)
+ ioat_chan->used_desc.prev = &new->node;
+
+ prefetch(new->hw);
+ return new;
+}
+
+static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
+ struct ioat_dma_chan *ioat_chan)
+{
+ if (!ioat_chan)
+ return NULL;
+
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ return ioat1_dma_get_next_descriptor(ioat_chan);
+ break;
+ case IOAT_VER_2_0:
+ return ioat2_dma_get_next_descriptor(ioat_chan);
+ break;
+ }
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
struct dma_chan *chan,
size_t len,
int int_en)
@@ -441,19 +683,62 @@ static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy(
return new ? &new->async_tx : NULL;
}
+static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
+ struct dma_chan *chan,
+ size_t len,
+ int int_en)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+ struct ioat_desc_sw *new;
+
+ spin_lock_bh(&ioat_chan->desc_lock);
+ new = ioat2_dma_get_next_descriptor(ioat_chan);
+ new->len = len;
+
+ /* leave ioat_chan->desc_lock set in version 2 path */
+ return new ? &new->async_tx : NULL;
+}
+
+
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
* @chan: DMA channel handle
*/
-static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan)
+static inline void __ioat1_dma_memcpy_issue_pending(
+ struct ioat_dma_chan *ioat_chan)
+{
+ ioat_chan->pending = 0;
+ writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
+}
+
+static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
if (ioat_chan->pending != 0) {
- ioat_chan->pending = 0;
- writeb(IOAT_CHANCMD_APPEND,
- ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
+ spin_lock_bh(&ioat_chan->desc_lock);
+ __ioat1_dma_memcpy_issue_pending(ioat_chan);
+ spin_unlock_bh(&ioat_chan->desc_lock);
+ }
+}
+
+static inline void __ioat2_dma_memcpy_issue_pending(
+ struct ioat_dma_chan *ioat_chan)
+{
+ ioat_chan->pending = 0;
+ writew(ioat_chan->dmacount,
+ ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+}
+
+static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
+{
+ struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
+
+ if (ioat_chan->pending != 0) {
+ spin_lock_bh(&ioat_chan->desc_lock);
+ __ioat2_dma_memcpy_issue_pending(ioat_chan);
+ spin_unlock_bh(&ioat_chan->desc_lock);
}
}
@@ -465,11 +750,17 @@ static void ioat_dma_cleanup_tasklet(unsigned long data)
chan->reg_base + IOAT_CHANCTRL_OFFSET);
}
+/**
+ * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
+ * @chan: ioat channel to be cleaned up
+ */
static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
{
unsigned long phys_complete;
struct ioat_desc_sw *desc, *_desc;
dma_cookie_t cookie = 0;
+ unsigned long desc_phys;
+ struct ioat_desc_sw *latest_desc;
prefetch(ioat_chan->completion_virt);
@@ -507,56 +798,115 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
cookie = 0;
spin_lock_bh(&ioat_chan->desc_lock);
- list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) {
-
- /*
- * Incoming DMA requests may use multiple descriptors, due to
- * exceeding xfercap, perhaps. If so, only the last one will
- * have a cookie, and require unmapping.
- */
- if (desc->async_tx.cookie) {
- cookie = desc->async_tx.cookie;
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ list_for_each_entry_safe(desc, _desc,
+ &ioat_chan->used_desc, node) {
/*
- * yes we are unmapping both _page and _single alloc'd
- * regions with unmap_page. Is this *really* that bad?
+ * Incoming DMA requests may use multiple descriptors,
+ * due to exceeding xfercap, perhaps. If so, only the
+ * last one will have a cookie, and require unmapping.
*/
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, dst),
- pci_unmap_len(desc, len),
- PCI_DMA_FROMDEVICE);
- pci_unmap_page(ioat_chan->device->pdev,
- pci_unmap_addr(desc, src),
- pci_unmap_len(desc, len),
- PCI_DMA_TODEVICE);
- if (desc->async_tx.callback) {
- desc->async_tx.callback(
- desc->async_tx.callback_param);
- desc->async_tx.callback = NULL;
+ if (desc->async_tx.cookie) {
+ cookie = desc->async_tx.cookie;
+
+ /*
+ * yes we are unmapping both _page and _single
+ * alloc'd regions with unmap_page. Is this
+ * *really* that bad?
+ */
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, dst),
+ pci_unmap_len(desc, len),
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, src),
+ pci_unmap_len(desc, len),
+ PCI_DMA_TODEVICE);
+
+ if (desc->async_tx.callback) {
+ desc->async_tx.callback(desc->async_tx.callback_param);
+ desc->async_tx.callback = NULL;
+ }
}
- }
- if (desc->async_tx.phys != phys_complete) {
- /*
- * a completed entry, but not the last, so cleanup
- * if the client is done with the descriptor
- */
- if (desc->async_tx.ack) {
- list_del(&desc->node);
- list_add_tail(&desc->node,
- &ioat_chan->free_desc);
- } else
+ if (desc->async_tx.phys != phys_complete) {
+ /*
+ * a completed entry, but not the last, so clean
+ * up if the client is done with the descriptor
+ */
+ if (desc->async_tx.ack) {
+ list_del(&desc->node);
+ list_add_tail(&desc->node,
+ &ioat_chan->free_desc);
+ } else
+ desc->async_tx.cookie = 0;
+ } else {
+ /*
+ * last used desc. Do not remove, so we can
+ * append from it, but don't look at it next
+ * time, either
+ */
desc->async_tx.cookie = 0;
- } else {
- /*
- * last used desc. Do not remove, so we can append from
- * it, but don't look at it next time, either
- */
- desc->async_tx.cookie = 0;
- /* TODO check status bits? */
+ /* TODO check status bits? */
+ break;
+ }
+ }
+ break;
+ case IOAT_VER_2_0:
+ /* has some other thread has already cleaned up? */
+ if (ioat_chan->used_desc.prev == NULL)
break;
+
+ /* work backwards to find latest finished desc */
+ desc = to_ioat_desc(ioat_chan->used_desc.next);
+ latest_desc = NULL;
+ do {
+ desc = to_ioat_desc(desc->node.prev);
+ desc_phys = (unsigned long)desc->async_tx.phys
+ & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
+ if (desc_phys == phys_complete) {
+ latest_desc = desc;
+ break;
+ }
+ } while (&desc->node != ioat_chan->used_desc.prev);
+
+ if (latest_desc != NULL) {
+
+ /* work forwards to clear finished descriptors */
+ for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
+ &desc->node != latest_desc->node.next &&
+ &desc->node != ioat_chan->used_desc.next;
+ desc = to_ioat_desc(desc->node.next)) {
+ if (desc->async_tx.cookie) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, dst),
+ pci_unmap_len(desc, len),
+ PCI_DMA_FROMDEVICE);
+ pci_unmap_page(ioat_chan->device->pdev,
+ pci_unmap_addr(desc, src),
+ pci_unmap_len(desc, len),
+ PCI_DMA_TODEVICE);
+
+ if (desc->async_tx.callback) {
+ desc->async_tx.callback(desc->async_tx.callback_param);
+ desc->async_tx.callback = NULL;
+ }
+ }
+ }
+
+ /* move used.prev up beyond those that are finished */
+ if (&desc->node == ioat_chan->used_desc.next)
+ ioat_chan->used_desc.prev = NULL;
+ else
+ ioat_chan->used_desc.prev = &desc->node;
}
+ break;
}
spin_unlock_bh(&ioat_chan->desc_lock);
@@ -621,8 +971,6 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
return dma_async_is_complete(cookie, last_complete, last_used);
}
-/* PCI API */
-
static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
{
struct ioat_desc_sw *desc;
@@ -633,21 +981,34 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
| IOAT_DMA_DESCRIPTOR_CTL_INT_GN
| IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- desc->hw->next = 0;
desc->hw->size = 0;
desc->hw->src_addr = 0;
desc->hw->dst_addr = 0;
desc->async_tx.ack = 1;
-
- list_add_tail(&desc->node, &ioat_chan->used_desc);
+ switch (ioat_chan->device->version) {
+ case IOAT_VER_1_2:
+ desc->hw->next = 0;
+ list_add_tail(&desc->node, &ioat_chan->used_desc);
+
+ writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
+ writel(((u64) desc->async_tx.phys) >> 32,
+ ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
+
+ writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
+ + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
+ break;
+ case IOAT_VER_2_0:
+ writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
+ ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
+ writel(((u64) desc->async_tx.phys) >> 32,
+ ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
+
+ ioat_chan->dmacount++;
+ __ioat2_dma_memcpy_issue_pending(ioat_chan);
+ break;
+ }
spin_unlock_bh(&ioat_chan->desc_lock);
-
- writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
- ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW);
- writel(((u64) desc->async_tx.phys) >> 32,
- ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH);
-
- writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET);
}
/*
@@ -693,14 +1054,14 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
dma_chan = container_of(device->common.channels.next,
struct dma_chan,
device_node);
- if (ioat_dma_alloc_chan_resources(dma_chan) < 1) {
+ if (device->common.device_alloc_chan_resources(dma_chan) < 1) {
dev_err(&device->pdev->dev,
"selftest cannot allocate chan resource\n");
err = -ENODEV;
goto out;
}
- tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
+ tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
if (!tx) {
dev_err(&device->pdev->dev,
"Self-test prep failed, disabling\n");
@@ -710,24 +1071,25 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
async_tx_ack(tx);
addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
- DMA_TO_DEVICE);
- ioat_set_src(addr, tx, 0);
+ DMA_TO_DEVICE);
+ tx->tx_set_src(addr, tx, 0);
addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
- DMA_FROM_DEVICE);
- ioat_set_dest(addr, tx, 0);
+ DMA_FROM_DEVICE);
+ tx->tx_set_dest(addr, tx, 0);
tx->callback = ioat_dma_test_callback;
tx->callback_param = (void *)0x8086;
- cookie = ioat_tx_submit(tx);
+ cookie = tx->tx_submit(tx);
if (cookie < 0) {
dev_err(&device->pdev->dev,
"Self-test setup failed, disabling\n");
err = -ENODEV;
goto free_resources;
}
- ioat_dma_memcpy_issue_pending(dma_chan);
+ device->common.device_issue_pending(dma_chan);
msleep(1);
- if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
+ != DMA_SUCCESS) {
dev_err(&device->pdev->dev,
"Self-test copy timed out, disabling\n");
err = -ENODEV;
@@ -741,7 +1103,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
}
free_resources:
- ioat_dma_free_chan_resources(dma_chan);
+ device->common.device_free_chan_resources(dma_chan);
out:
kfree(src);
kfree(dest);
@@ -941,16 +1303,28 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
INIT_LIST_HEAD(&device->common.channels);
ioat_dma_enumerate_channels(device);
- dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
device->common.device_alloc_chan_resources =
ioat_dma_alloc_chan_resources;
device->common.device_free_chan_resources =
ioat_dma_free_chan_resources;
- device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy;
+ device->common.dev = &pdev->dev;
+
+ dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
device->common.device_is_tx_complete = ioat_dma_is_complete;
- device->common.device_issue_pending = ioat_dma_memcpy_issue_pending;
device->common.device_dependency_added = ioat_dma_dependency_added;
- device->common.dev = &pdev->dev;
+ switch (device->version) {
+ case IOAT_VER_1_2:
+ device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
+ device->common.device_issue_pending =
+ ioat1_dma_memcpy_issue_pending;
+ break;
+ case IOAT_VER_2_0:
+ device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
+ device->common.device_issue_pending =
+ ioat2_dma_memcpy_issue_pending;
+ break;
+ }
+
dev_err(&device->pdev->dev,
"Intel(R) I/OAT DMA Engine found,"
" %d channels, device version 0x%02x, driver version %s\n",
diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h
index 5f9881e7b0ed..b668234ef654 100644
--- a/drivers/dma/ioatdma.h
+++ b/drivers/dma/ioatdma.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
+ * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -28,7 +28,7 @@
#include <linux/cache.h>
#include <linux/pci_ids.h>
-#define IOAT_DMA_VERSION "1.26"
+#define IOAT_DMA_VERSION "2.04"
enum ioat_interrupt {
none = 0,
@@ -39,6 +39,8 @@ enum ioat_interrupt {
};
#define IOAT_LOW_COMPLETION_MASK 0xffffffc0
+#define IOAT_DMA_DCA_ANY_CPU ~0
+
/**
* struct ioatdma_device - internal representation of a IOAT device
@@ -47,6 +49,9 @@ enum ioat_interrupt {
* @dma_pool: for allocating DMA descriptors
* @common: embedded struct dma_device
* @version: version of ioatdma device
+ * @irq_mode: which style irq to use
+ * @msix_entries: irq handlers
+ * @idx: per channel data
*/
struct ioatdma_device {
@@ -63,23 +68,7 @@ struct ioatdma_device {
/**
* struct ioat_dma_chan - internal representation of a DMA channel
- * @device:
- * @reg_base:
- * @sw_in_use:
- * @completion:
- * @completion_low:
- * @completion_high:
- * @completed_cookie: last cookie seen completed on cleanup
- * @cookie: value of last cookie given to client
- * @last_completion:
- * @xfercap:
- * @desc_lock:
- * @free_desc:
- * @used_desc:
- * @resource:
- * @device_node:
*/
-
struct ioat_dma_chan {
void __iomem *reg_base;
@@ -95,6 +84,8 @@ struct ioat_dma_chan {
struct list_head used_desc;
int pending;
+ int dmacount;
+ int desccount;
struct ioatdma_device *device;
struct dma_chan common;
@@ -134,12 +125,13 @@ struct ioat_desc_sw {
struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
void __iomem *iobase);
void ioat_dma_remove(struct ioatdma_device *device);
-struct dca_provider *ioat_dca_init(struct pci_dev *pdev,
- void __iomem *iobase);
+struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
+struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase);
#else
#define ioat_dma_probe(pdev, iobase) NULL
#define ioat_dma_remove(device) do { } while (0)
#define ioat_dca_init(pdev, iobase) NULL
+#define ioat2_dca_init(pdev, iobase) NULL
#endif
#endif /* IOATDMA_H */
diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h
index 9e7434e1551f..dd470fa91d86 100644
--- a/drivers/dma/ioatdma_hw.h
+++ b/drivers/dma/ioatdma_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
+ * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -22,12 +22,19 @@
#define _IOAT_HW_H_
/* PCI Configuration Space Values */
-#define IOAT_PCI_VID 0x8086
-#define IOAT_PCI_DID 0x1A38
-#define IOAT_PCI_RID 0x00
-#define IOAT_PCI_SVID 0x8086
-#define IOAT_PCI_SID 0x8086
-#define IOAT_VER_1_2 0x12 /* Version 1.2 */
+#define IOAT_PCI_VID 0x8086
+
+/* CB device ID's */
+#define IOAT_PCI_DID_5000 0x1A38
+#define IOAT_PCI_DID_CNB 0x360B
+#define IOAT_PCI_DID_SCNB 0x65FF
+#define IOAT_PCI_DID_SNB 0x402F
+
+#define IOAT_PCI_RID 0x00
+#define IOAT_PCI_SVID 0x8086
+#define IOAT_PCI_SID 0x8086
+#define IOAT_VER_1_2 0x12 /* Version 1.2 */
+#define IOAT_VER_2_0 0x20 /* Version 2.0 */
struct ioat_dma_descriptor {
uint32_t size;
@@ -47,6 +54,16 @@ struct ioat_dma_descriptor {
#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008
#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010
#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020
-#define IOAT_DMA_DESCRIPTOR_OPCODE 0xFF000000
+#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040
+#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080
+#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100
+#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200
+#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400
+
+#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000
+#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000
+
+#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001
+#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000
#endif
diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h
index baaab5ea146a..9832d7ebd931 100644
--- a/drivers/dma/ioatdma_registers.h
+++ b/drivers/dma/ioatdma_registers.h
@@ -42,26 +42,25 @@
#define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */
#define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */
#define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */
-#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */
+#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */
#define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */
#define IOAT_VER_OFFSET 0x08 /* 8-bit */
#define IOAT_VER_MAJOR_MASK 0xF0
#define IOAT_VER_MINOR_MASK 0x0F
-#define GET_IOAT_VER_MAJOR(x) ((x) & IOAT_VER_MAJOR_MASK)
+#define GET_IOAT_VER_MAJOR(x) (((x) & IOAT_VER_MAJOR_MASK) >> 4)
#define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK)
#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */
-#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalesing Supported */
+#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
#define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001
-
#define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */
/* DMA Channel Registers */
@@ -74,25 +73,101 @@
#define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004
#define IOAT_CHANCTRL_INT_DISABLE 0x0001
-#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatability */
-#define IOAT_DMA_COMP_V1 0x0001 /* Compatability with DMA version 1 */
-
-#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
-#define IOAT_CHANSTS_OFFSET_LOW 0x04
-#define IOAT_CHANSTS_OFFSET_HIGH 0x08
-#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0UL
+#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
+#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */
+#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */
+
+
+#define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
+#define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */
+#define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET)
+#define IOAT1_CHANSTS_OFFSET_LOW 0x04
+#define IOAT2_CHANSTS_OFFSET_LOW 0x08
+#define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW)
+#define IOAT1_CHANSTS_OFFSET_HIGH 0x08
+#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C
+#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH)
+#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F
#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010
+#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3
-#define IOAT_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
-#define IOAT_CHAINADDR_OFFSET_LOW 0x0C
-#define IOAT_CHAINADDR_OFFSET_HIGH 0x10
-#define IOAT_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */
+
+#define IOAT_CHAN_DMACOUNT_OFFSET 0x06 /* 16-bit DMA Count register */
+
+#define IOAT_DCACTRL_OFFSET 0x30 /* 32 bit Direct Cache Access Control Register */
+#define IOAT_DCACTRL_CMPL_WRITE_ENABLE 0x10000
+#define IOAT_DCACTRL_TARGET_CPU_MASK 0xFFFF /* APIC ID */
+
+/* CB DCA Memory Space Registers */
+#define IOAT_DCAOFFSET_OFFSET 0x14
+/* CB_BAR + IOAT_DCAOFFSET value */
+#define IOAT_DCA_VER_OFFSET 0x00
+#define IOAT_DCA_VER_MAJOR_MASK 0xF0
+#define IOAT_DCA_VER_MINOR_MASK 0x0F
+
+#define IOAT_DCA_COMP_OFFSET 0x02
+#define IOAT_DCA_COMP_V1 0x1
+
+#define IOAT_FSB_CAPABILITY_OFFSET 0x04
+#define IOAT_FSB_CAPABILITY_PREFETCH 0x1
+
+#define IOAT_PCI_CAPABILITY_OFFSET 0x06
+#define IOAT_PCI_CAPABILITY_MEMWR 0x1
+
+#define IOAT_FSB_CAP_ENABLE_OFFSET 0x08
+#define IOAT_FSB_CAP_ENABLE_PREFETCH 0x1
+
+#define IOAT_PCI_CAP_ENABLE_OFFSET 0x0A
+#define IOAT_PCI_CAP_ENABLE_MEMWR 0x1
+
+#define IOAT_APICID_TAG_MAP_OFFSET 0x0C
+#define IOAT_APICID_TAG_MAP_TAG0 0x0000000F
+#define IOAT_APICID_TAG_MAP_TAG0_SHIFT 0
+#define IOAT_APICID_TAG_MAP_TAG1 0x000000F0
+#define IOAT_APICID_TAG_MAP_TAG1_SHIFT 4
+#define IOAT_APICID_TAG_MAP_TAG2 0x00000F00
+#define IOAT_APICID_TAG_MAP_TAG2_SHIFT 8
+#define IOAT_APICID_TAG_MAP_TAG3 0x0000F000
+#define IOAT_APICID_TAG_MAP_TAG3_SHIFT 12
+#define IOAT_APICID_TAG_MAP_TAG4 0x000F0000
+#define IOAT_APICID_TAG_MAP_TAG4_SHIFT 16
+#define IOAT_APICID_TAG_CB2_VALID 0x8080808080
+
+#define IOAT_DCA_GREQID_OFFSET 0x10
+#define IOAT_DCA_GREQID_SIZE 0x04
+#define IOAT_DCA_GREQID_MASK 0xFFFF
+#define IOAT_DCA_GREQID_IGNOREFUN 0x10000000
+#define IOAT_DCA_GREQID_VALID 0x20000000
+#define IOAT_DCA_GREQID_LASTID 0x80000000
+
+
+
+#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */
+#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */
+#define IOAT_CHAINADDR_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHAINADDR_OFFSET : IOAT2_CHAINADDR_OFFSET)
+#define IOAT1_CHAINADDR_OFFSET_LOW 0x0C
+#define IOAT2_CHAINADDR_OFFSET_LOW 0x10
+#define IOAT_CHAINADDR_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHAINADDR_OFFSET_LOW : IOAT2_CHAINADDR_OFFSET_LOW)
+#define IOAT1_CHAINADDR_OFFSET_HIGH 0x10
+#define IOAT2_CHAINADDR_OFFSET_HIGH 0x14
+#define IOAT_CHAINADDR_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHAINADDR_OFFSET_HIGH : IOAT2_CHAINADDR_OFFSET_HIGH)
+
+#define IOAT1_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */
+#define IOAT2_CHANCMD_OFFSET 0x04 /* 8-bit DMA Channel Command Register */
+#define IOAT_CHANCMD_OFFSET(ver) ((ver) < IOAT_VER_2_0 \
+ ? IOAT1_CHANCMD_OFFSET : IOAT2_CHANCMD_OFFSET)
#define IOAT_CHANCMD_RESET 0x20
#define IOAT_CHANCMD_RESUME 0x10
#define IOAT_CHANCMD_ABORT 0x08
@@ -124,6 +199,7 @@
#define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000
#define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000
#define IOAT_CHANERR_SOFT_ERR 0x4000
+#define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000
#define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 96f7e63e3996..a1f24c42d5ff 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1462,7 +1462,7 @@ MODULE_DEVICE_TABLE(pci, i5000_pci_tbl);
*
*/
static struct pci_driver i5000_driver = {
- .name = __stringify(KBUILD_BASENAME),
+ .name = KBUILD_BASENAME,
.probe = i5000_init_one,
.remove = __devexit_p(i5000_remove_one),
.id_table = i5000_pci_tbl,
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 58e32714afb5..ca18e0be4901 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -51,6 +51,7 @@ struct pasemi_smbus {
#define MRXFIFO_DATA_M 0x000000ff
#define SMSTA_XEN 0x08000000
+#define SMSTA_MTN 0x00200000
#define CTL_MRR 0x00000400
#define CTL_MTR 0x00000200
@@ -98,6 +99,10 @@ static unsigned int pasemi_smb_waitready(struct pasemi_smbus *smbus)
status = reg_read(smbus, REG_SMSTA);
}
+ /* Got NACK? */
+ if (status & SMSTA_MTN)
+ return -ENXIO;
+
if (timeout < 0) {
dev_warn(&smbus->dev->dev, "Timeout, status 0x%08x\n", status);
reg_write(smbus, REG_SMSTA, status);
@@ -364,7 +369,7 @@ static int __devinit pasemi_smb_probe(struct pci_dev *dev,
smbus->adapter.algo = &smbus_algorithm;
smbus->adapter.algo_data = smbus;
- /* set up the driverfs linkage to our parent device */
+ /* set up the sysfs linkage to our parent device */
smbus->adapter.dev.parent = &dev->dev;
reg_write(smbus, REG_CTL, (CTL_MTR | CTL_MRR |
diff --git a/drivers/i2c/chips/eeprom.c b/drivers/i2c/chips/eeprom.c
index d3da1fb05b9b..1a7eeebac506 100644
--- a/drivers/i2c/chips/eeprom.c
+++ b/drivers/i2c/chips/eeprom.c
@@ -128,13 +128,20 @@ static ssize_t eeprom_read(struct kobject *kobj, struct bin_attribute *bin_attr,
for (slice = off >> 5; slice <= (off + count - 1) >> 5; slice++)
eeprom_update_client(client, slice);
- /* Hide Vaio security settings to regular users (16 first bytes) */
- if (data->nature == VAIO && off < 16 && !capable(CAP_SYS_ADMIN)) {
- size_t in_row1 = 16 - off;
- in_row1 = min(in_row1, count);
- memset(buf, 0, in_row1);
- if (count - in_row1 > 0)
- memcpy(buf + in_row1, &data->data[16], count - in_row1);
+ /* Hide Vaio private settings to regular users:
+ - BIOS passwords: bytes 0x00 to 0x0f
+ - UUID: bytes 0x10 to 0x1f
+ - Serial number: 0xc0 to 0xdf */
+ if (data->nature == VAIO && !capable(CAP_SYS_ADMIN)) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if ((off + i <= 0x1f) ||
+ (off + i >= 0xc0 && off + i <= 0xdf))
+ buf[i] = 0;
+ else
+ buf[i] = data->data[off + i];
+ }
} else {
memcpy(buf, &data->data[off], count);
}
@@ -197,14 +204,18 @@ static int eeprom_detect(struct i2c_adapter *adapter, int address, int kind)
goto exit_kfree;
/* Detect the Vaio nature of EEPROMs.
- We use the "PCG-" prefix as the signature. */
+ We use the "PCG-" or "VGN-" prefix as the signature. */
if (address == 0x57) {
- if (i2c_smbus_read_byte_data(new_client, 0x80) == 'P'
- && i2c_smbus_read_byte(new_client) == 'C'
- && i2c_smbus_read_byte(new_client) == 'G'
- && i2c_smbus_read_byte(new_client) == '-') {
+ char name[4];
+
+ name[0] = i2c_smbus_read_byte_data(new_client, 0x80);
+ name[1] = i2c_smbus_read_byte(new_client);
+ name[2] = i2c_smbus_read_byte(new_client);
+ name[3] = i2c_smbus_read_byte(new_client);
+
+ if (!memcmp(name, "PCG-", 4) || !memcmp(name, "VGN-", 4)) {
dev_info(&new_client->dev, "Vaio EEPROM detected, "
- "enabling password protection\n");
+ "enabling privacy protection\n");
data->nature = VAIO;
}
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 1a4e8dc03b36..b5e13e405e72 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -673,7 +673,7 @@ static int __i2c_check_addr(struct i2c_adapter *adapter, unsigned int addr)
return 0;
}
-int i2c_check_addr(struct i2c_adapter *adapter, int addr)
+static int i2c_check_addr(struct i2c_adapter *adapter, int addr)
{
int rval;
@@ -683,7 +683,6 @@ int i2c_check_addr(struct i2c_adapter *adapter, int addr)
return rval;
}
-EXPORT_SYMBOL(i2c_check_addr);
int i2c_attach_client(struct i2c_client *client)
{
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 5a15e50748de..c21ae20ae362 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -38,6 +38,15 @@
static struct i2c_driver i2cdev_driver;
+/*
+ * An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a
+ * slave (i2c_client) with which messages will be exchanged. It's coupled
+ * with a character special file which is accessed by user mode drivers.
+ *
+ * The list of i2c_dev structures is parallel to the i2c_adapter lists
+ * maintained by the driver model, and is updated using notifications
+ * delivered to the i2cdev_driver.
+ */
struct i2c_dev {
struct list_head list;
struct i2c_adapter *adap;
@@ -103,6 +112,25 @@ static ssize_t show_adapter_name(struct device *dev,
}
static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
+/* ------------------------------------------------------------------------- */
+
+/*
+ * After opening an instance of this character special file, a file
+ * descriptor starts out associated only with an i2c_adapter (and bus).
+ *
+ * Using the I2C_RDWR ioctl(), you can then *immediately* issue i2c_msg
+ * traffic to any devices on the bus used by that adapter. That's because
+ * the i2c_msg vectors embed all the addressing information they need, and
+ * are submitted directly to an i2c_adapter. However, SMBus-only adapters
+ * don't support that interface.
+ *
+ * To use read()/write() system calls on that file descriptor, or to use
+ * SMBus interfaces (and work with SMBus-only hosts!), you must first issue
+ * an I2C_SLAVE (or I2C_SLAVE_FORCE) ioctl. That configures an anonymous
+ * (never registered) i2c_client so it holds the addressing information
+ * needed by those system calls and by this SMBus interface.
+ */
+
static ssize_t i2cdev_read (struct file *file, char __user *buf, size_t count,
loff_t *offset)
{
@@ -154,6 +182,29 @@ static ssize_t i2cdev_write (struct file *file, const char __user *buf, size_t c
return ret;
}
+/* This address checking function differs from the one in i2c-core
+ in that it considers an address with a registered device, but no
+ bounded driver, as NOT busy. */
+static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
+{
+ struct list_head *item;
+ struct i2c_client *client;
+ int res = 0;
+
+ mutex_lock(&adapter->clist_lock);
+ list_for_each(item, &adapter->clients) {
+ client = list_entry(item, struct i2c_client, list);
+ if (client->addr == addr) {
+ if (client->driver)
+ res = -EBUSY;
+ break;
+ }
+ }
+ mutex_unlock(&adapter->clist_lock);
+
+ return res;
+}
+
static int i2cdev_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -172,11 +223,22 @@ static int i2cdev_ioctl(struct inode *inode, struct file *file,
switch ( cmd ) {
case I2C_SLAVE:
case I2C_SLAVE_FORCE:
+ /* NOTE: devices set up to work with "new style" drivers
+ * can't use I2C_SLAVE, even when the device node is not
+ * bound to a driver. Only I2C_SLAVE_FORCE will work.
+ *
+ * Setting the PEC flag here won't affect kernel drivers,
+ * which will be using the i2c_client node registered with
+ * the driver model core. Likewise, when that client has
+ * the PEC flag already set, the i2c-dev driver won't see
+ * (or use) this setting.
+ */
if ((arg > 0x3ff) ||
(((client->flags & I2C_M_TEN) == 0) && arg > 0x7f))
return -EINVAL;
- if ((cmd == I2C_SLAVE) && i2c_check_addr(client->adapter,arg))
+ if (cmd == I2C_SLAVE && i2cdev_check_addr(client->adapter, arg))
return -EBUSY;
+ /* REVISIT: address could become busy later */
client->addr = arg;
return 0;
case I2C_TENBIT:
@@ -386,6 +448,13 @@ static int i2cdev_open(struct inode *inode, struct file *file)
if (!adap)
return -ENODEV;
+ /* This creates an anonymous i2c_client, which may later be
+ * pointed to some address using I2C_SLAVE or I2C_SLAVE_FORCE.
+ *
+ * This client is ** NEVER REGISTERED ** with the driver model
+ * or I2C core code!! It just holds private copies of addressing
+ * information and maybe a PEC flag.
+ */
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client) {
i2c_put_adapter(adap);
@@ -394,7 +463,6 @@ static int i2cdev_open(struct inode *inode, struct file *file)
snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr);
client->driver = &i2cdev_driver;
- /* registered with adapter, passed as client to user */
client->adapter = adap;
file->private_data = client;
@@ -422,6 +490,14 @@ static const struct file_operations i2cdev_fops = {
.release = i2cdev_release,
};
+/* ------------------------------------------------------------------------- */
+
+/*
+ * The legacy "i2cdev_driver" is used primarily to get notifications when
+ * I2C adapters are added or removed, so that each one gets an i2c_dev
+ * and is thus made available to userspace driver code.
+ */
+
static struct class *i2c_dev_class;
static int i2cdev_attach_adapter(struct i2c_adapter *adap)
@@ -486,6 +562,12 @@ static struct i2c_driver i2cdev_driver = {
.detach_client = i2cdev_detach_client,
};
+/* ------------------------------------------------------------------------- */
+
+/*
+ * module load/unload record keeping
+ */
+
static int __init i2c_dev_init(void)
{
int res;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index d1e8df187222..e445fe6e4ba9 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -203,10 +203,6 @@ config BLK_DEV_IDECD
CD-ROM drive, you can say N to all other CD-ROM options, but be sure
to say Y or M to "ISO 9660 CD-ROM file system support".
- Note that older versions of LILO (LInux LOader) cannot properly deal
- with IDE/ATAPI CD-ROMs, so install LILO 16 or higher, available from
- <http://lilo.go.dyndns.org/>.
-
To compile this driver as a module, choose M here: the
module will be called ide-cd.
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index e196aefa2070..7f5bc2ee6c7e 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -748,8 +748,7 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
hold = ATA_DMA2_HOLD;
break;
default:
- BUG();
- break;
+ return;
}
if (speed >= XFER_UDMA_0)
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 755011827afa..db22d1ff4e55 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -885,7 +885,6 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
return do_rw_taskfile(drive, args);
} else if (rq->cmd_type == REQ_TYPE_ATA_TASK) {
u8 *args = rq->buffer;
- u8 sel;
if (!args)
goto done;
@@ -903,10 +902,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
hwif->OUTB(args[3], IDE_SECTOR_REG);
hwif->OUTB(args[4], IDE_LCYL_REG);
hwif->OUTB(args[5], IDE_HCYL_REG);
- sel = (args[6] & ~0x10);
- if (drive->select.b.unit)
- sel |= 0x10;
- hwif->OUTB(sel, IDE_SELECT_REG);
+ hwif->OUTB((args[6] & 0xEF)|drive->select.all, IDE_SELECT_REG);
ide_cmd(drive, args[0], args[2], &drive_cmd_intr);
return ide_started;
} else if (rq->cmd_type == REQ_TYPE_ATA_CMD) {
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index af86433baede..1609b8604f56 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -514,6 +514,7 @@ static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat)
if (drive->addressing == 1) {
__u64 sectors = 0;
u32 low = 0, high = 0;
+ hwif->OUTB(drive->ctl&~0x80, IDE_CONTROL_REG);
low = ide_read_24(drive);
hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG);
high = ide_read_24(drive);
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index ea0143ef5fe5..51fca441c294 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -1,5 +1,5 @@
/*
- * linux/drivers/ide/pci/cmd64x.c Version 1.50 May 10, 2007
+ * linux/drivers/ide/pci/cmd64x.c Version 1.51 Nov 8, 2007
*
* cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
* Due to massive hardware bugs, UltraDMA is only supported
@@ -339,7 +339,8 @@ static int cmd648_ide_dma_end (ide_drive_t *drive)
u8 mrdmode = inb(hwif->dma_master + 0x01);
/* clear the interrupt bit */
- outb(mrdmode | irq_mask, hwif->dma_master + 0x01);
+ outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask,
+ hwif->dma_master + 0x01);
return err;
}
diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c
index 599408952bd4..547690395eee 100644
--- a/drivers/ide/pci/cs5530.c
+++ b/drivers/ide/pci/cs5530.c
@@ -117,8 +117,7 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
case XFER_MW_DMA_1: timings = 0x00012121; break;
case XFER_MW_DMA_2: timings = 0x00002020; break;
default:
- BUG();
- break;
+ return;
}
basereg = CS5530_BASEREG(drive->hwif);
reg = inl(basereg + 4); /* get drive0 config register */
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 5c9975435319..99b7d763b6c7 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -653,8 +653,7 @@ static const struct ide_port_info it821x_chipsets[] __devinitdata = {
static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]);
- return 0;
+ return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]);
}
static const struct pci_device_id it821x_pci_tbl[] = {
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index bdf64d997708..0083eaf89c77 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -139,8 +139,7 @@ static const struct ide_port_info jmicron_chipset __devinitdata = {
static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
- ide_setup_pci_device(dev, &jmicron_chipset);
- return 0;
+ return ide_setup_pci_device(dev, &jmicron_chipset);
}
/* All JMB PATA controllers have and will continue to have the same
diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c
index 0a7b3202066d..707d5ff66b03 100644
--- a/drivers/ide/pci/sc1200.c
+++ b/drivers/ide/pci/sc1200.c
@@ -186,8 +186,7 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
}
break;
default:
- BUG();
- break;
+ return;
}
if (unit == 0) { /* are we configuring drive0? */
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index 6b7bb53acefd..f6e2ab3dd166 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -356,7 +356,6 @@ static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
sis_program_timings(drive, speed);
break;
default:
- BUG();
break;
}
}
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 816b5311dad6..5afdfef7264c 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1138,6 +1138,7 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
hwif->drives[0].autotune = IDE_TUNE_AUTO;
hwif->drives[1].autotune = IDE_TUNE_AUTO;
hwif->host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
+ IDE_HFLAG_PIO_NO_DOWNGRADE |
IDE_HFLAG_POST_SET_MODE;
hwif->pio_mask = ATA_PIO4;
hwif->set_pio_mode = pmac_ide_set_pio_mode;
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 02d14bf85ab2..25fd09053220 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -7,11 +7,6 @@
* May be copied or modified under the terms of the GNU General Public License
*/
-/*
- * This module provides support for automatic detection and
- * configuration of all PCI IDE interfaces present in a system.
- */
-
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index f0c777589374..b5436ca92e68 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1000,6 +1000,7 @@ static int iwch_query_device(struct ib_device *ibdev,
props->max_sge = dev->attr.max_sge_per_wr;
props->max_sge_rd = 1;
props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp;
+ props->max_qp_init_rd_atom = dev->attr.max_rdma_reads_per_qp;
props->max_cq = dev->attr.max_cqs;
props->max_cqe = dev->attr.max_cqes_per_cq;
props->max_mr = dev->attr.max_mem_regs;
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 97d108634c58..453eb995c1d4 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -50,6 +50,38 @@
static struct kmem_cache *av_cache;
+int ehca_calc_ipd(struct ehca_shca *shca, int port,
+ enum ib_rate path_rate, u32 *ipd)
+{
+ int path = ib_rate_to_mult(path_rate);
+ int link, ret;
+ struct ib_port_attr pa;
+
+ if (path_rate == IB_RATE_PORT_CURRENT) {
+ *ipd = 0;
+ return 0;
+ }
+
+ if (unlikely(path < 0)) {
+ ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
+ path_rate);
+ return -EINVAL;
+ }
+
+ ret = ehca_query_port(&shca->ib_device, port, &pa);
+ if (unlikely(ret < 0)) {
+ ehca_err(&shca->ib_device, "Failed to query port ret=%i", ret);
+ return ret;
+ }
+
+ link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
+
+ /* IPD = round((link / path) - 1) */
+ *ipd = ((link + (path >> 1)) / path) - 1;
+
+ return 0;
+}
+
struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
{
int ret;
@@ -69,15 +101,13 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
av->av.slid_path_bits = ah_attr->src_path_bits;
if (ehca_static_rate < 0) {
- int ah_mult = ib_rate_to_mult(ah_attr->static_rate);
- int ehca_mult =
- ib_rate_to_mult(shca->sport[ah_attr->port_num].rate );
-
- if (ah_mult >= ehca_mult)
- av->av.ipd = 0;
- else
- av->av.ipd = (ah_mult > 0) ?
- ((ehca_mult - 1) / ah_mult) : 0;
+ u32 ipd;
+ if (ehca_calc_ipd(shca, ah_attr->port_num,
+ ah_attr->static_rate, &ipd)) {
+ ret = -EINVAL;
+ goto create_ah_exit1;
+ }
+ av->av.ipd = ipd;
} else
av->av.ipd = ehca_static_rate;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 2d660ae189e5..87f12d4312a7 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -95,7 +95,6 @@ struct ehca_sma_attr {
struct ehca_sport {
struct ib_cq *ibcq_aqp1;
struct ib_qp *ibqp_aqp1;
- enum ib_rate rate;
enum ib_port_state port_state;
struct ehca_sma_attr saved_attr;
};
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 15806d140461..5bd7b591987e 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -151,7 +151,6 @@ int ehca_query_port(struct ib_device *ibdev,
}
memset(props, 0, sizeof(struct ib_port_attr));
- props->state = rblock->state;
switch (rblock->max_mtu) {
case 0x1:
@@ -188,11 +187,20 @@ int ehca_query_port(struct ib_device *ibdev,
props->subnet_timeout = rblock->subnet_timeout;
props->init_type_reply = rblock->init_type_reply;
- props->active_width = IB_WIDTH_12X;
- props->active_speed = 0x1;
-
- /* at the moment (logical) link state is always LINK_UP */
- props->phys_state = 0x5;
+ if (rblock->state && rblock->phys_width) {
+ props->phys_state = rblock->phys_pstate;
+ props->state = rblock->phys_state;
+ props->active_width = rblock->phys_width;
+ props->active_speed = rblock->phys_speed;
+ } else {
+ /* old firmware releases don't report physical
+ * port info, so use default values
+ */
+ props->phys_state = 5;
+ props->state = rblock->state;
+ props->active_width = IB_WIDTH_12X;
+ props->active_speed = 0x1;
+ }
query_port1:
ehca_free_fw_ctrlblock(rblock);
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index dce503bb7d6b..5485799cdc8d 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -189,6 +189,9 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void ehca_poll_eqs(unsigned long data);
+int ehca_calc_ipd(struct ehca_shca *shca, int port,
+ enum ib_rate path_rate, u32 *ipd);
+
#ifdef CONFIG_PPC_64K_PAGES
void *ehca_alloc_fw_ctrlblock(gfp_t flags);
void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c6cd38c5321f..90d4334179bf 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -327,9 +327,6 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
shca->hw_level = ehca_hw_level;
ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
- shca->sport[0].rate = IB_RATE_30_GBPS;
- shca->sport[1].rate = IB_RATE_30_GBPS;
-
shca->hca_cap = rblock->hca_cap_indicators;
ehca_gen_dbg(" ... HCA capabilities:");
for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index de182648b282..2e3e6547cb78 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -1196,10 +1196,6 @@ static int internal_modify_qp(struct ib_qp *ibqp,
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
}
if (attr_mask & IB_QP_AV) {
- int ah_mult = ib_rate_to_mult(attr->ah_attr.static_rate);
- int ehca_mult = ib_rate_to_mult(shca->sport[my_qp->
- init_attr.port_num].rate);
-
mqpcb->dlid = attr->ah_attr.dlid;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
@@ -1207,11 +1203,12 @@ static int internal_modify_qp(struct ib_qp *ibqp,
mqpcb->service_level = attr->ah_attr.sl;
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
- if (ah_mult < ehca_mult)
- mqpcb->max_static_rate = (ah_mult > 0) ?
- ((ehca_mult - 1) / ah_mult) : 0;
- else
- mqpcb->max_static_rate = 0;
+ if (ehca_calc_ipd(shca, my_qp->init_attr.port_num,
+ attr->ah_attr.static_rate,
+ &mqpcb->max_static_rate)) {
+ ret = -EINVAL;
+ goto modify_qp_exit2;
+ }
update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
/*
@@ -1280,10 +1277,6 @@ static int internal_modify_qp(struct ib_qp *ibqp,
(MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
}
if (attr_mask & IB_QP_ALT_PATH) {
- int ah_mult = ib_rate_to_mult(attr->alt_ah_attr.static_rate);
- int ehca_mult = ib_rate_to_mult(
- shca->sport[my_qp->init_attr.port_num].rate);
-
if (attr->alt_port_num < 1
|| attr->alt_port_num > shca->num_ports) {
ret = -EINVAL;
@@ -1309,10 +1302,12 @@ static int internal_modify_qp(struct ib_qp *ibqp,
mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
mqpcb->service_level_al = attr->alt_ah_attr.sl;
- if (ah_mult > 0 && ah_mult < ehca_mult)
- mqpcb->max_static_rate_al = (ehca_mult - 1) / ah_mult;
- else
- mqpcb->max_static_rate_al = 0;
+ if (ehca_calc_ipd(shca, my_qp->init_attr.port_num,
+ attr->alt_ah_attr.static_rate,
+ &mqpcb->max_static_rate_al)) {
+ ret = -EINVAL;
+ goto modify_qp_exit2;
+ }
/* OpenIB doesn't support alternate retry counts - copy them */
mqpcb->retry_count_al = mqpcb->retry_count;
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h
index d9739e554515..485b8400359e 100644
--- a/drivers/infiniband/hw/ehca/hipz_hw.h
+++ b/drivers/infiniband/hw/ehca/hipz_hw.h
@@ -402,7 +402,11 @@ struct hipz_query_port {
u64 max_msg_sz;
u32 max_mtu;
u32 vl_cap;
- u8 reserved2[1900];
+ u32 phys_pstate;
+ u32 phys_state;
+ u32 phys_speed;
+ u32 phys_width;
+ u8 reserved2[1884];
u64 guid_entries[255];
} __attribute__ ((packed));
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 645ed71fd797..08d8ae148cd0 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -404,7 +404,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (ret)
- goto bail;
+ goto bail_free;
}
spin_lock_irq(&cq->lock);
@@ -424,10 +424,8 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
else
n = head - tail;
if (unlikely((u32)cqe < n)) {
- spin_unlock_irq(&cq->lock);
- vfree(wc);
ret = -EOVERFLOW;
- goto bail;
+ goto bail_unlock;
}
for (n = 0; tail != head; n++) {
if (cq->ip)
@@ -459,7 +457,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
}
ret = 0;
+ goto bail;
+bail_unlock:
+ spin_unlock_irq(&cq->lock);
+bail_free:
+ vfree(wc);
bail:
return ret;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 5c29b2bfea17..120a61b03bc4 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -959,8 +959,9 @@ static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
/* If this is a partial ACK, reset the retransmit timer. */
if (qp->s_last != qp->s_tail) {
spin_lock(&dev->pending_lock);
- list_add_tail(&qp->timerwait,
- &dev->pending[dev->pending_index]);
+ if (list_empty(&qp->timerwait))
+ list_add_tail(&qp->timerwait,
+ &dev->pending[dev->pending_index]);
spin_unlock(&dev->pending_lock);
/*
* If we get a partial ACK for a resent operation,
diff --git a/drivers/isdn/sc/card.h b/drivers/isdn/sc/card.h
index 5992f63c383e..0120bcf88311 100644
--- a/drivers/isdn/sc/card.h
+++ b/drivers/isdn/sc/card.h
@@ -109,7 +109,7 @@ void memcpy_fromshmem(int card, void *dest, const void *src, size_t n);
int get_card_from_id(int driver);
int indicate_status(int card, int event, ulong Channel, char *Data);
irqreturn_t interrupt_handler(int interrupt, void *cardptr);
-int sndpkt(int devId, int channel, struct sk_buff *data);
+int sndpkt(int devId, int channel, int ack, struct sk_buff *data);
void rcvpkt(int card, RspMessage *rcvmsg);
int command(isdn_ctrl *cmd);
int reset(int card);
diff --git a/drivers/isdn/sc/packet.c b/drivers/isdn/sc/packet.c
index 92016a2608e9..5ff6ae868440 100644
--- a/drivers/isdn/sc/packet.c
+++ b/drivers/isdn/sc/packet.c
@@ -20,7 +20,7 @@
#include "message.h"
#include "card.h"
-int sndpkt(int devId, int channel, struct sk_buff *data)
+int sndpkt(int devId, int channel, int ack, struct sk_buff *data)
{
LLData ReqLnkWrite;
int status;
diff --git a/drivers/isdn/sc/shmem.c b/drivers/isdn/sc/shmem.c
index e0331e0094f1..712220cef139 100644
--- a/drivers/isdn/sc/shmem.c
+++ b/drivers/isdn/sc/shmem.c
@@ -50,7 +50,7 @@ void memcpy_toshmem(int card, void *dest, const void *src, size_t n)
outb(((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE) >> 14) | 0x80,
sc_adapter[card]->ioport[sc_adapter[card]->shmem_pgport]);
- memcpy_toio(sc_adapter[card]->rambase + dest_rem, src, n);
+ memcpy_toio((void __iomem *)(sc_adapter[card]->rambase + dest_rem), src, n);
spin_unlock_irqrestore(&sc_adapter[card]->lock, flags);
pr_debug("%s: set page to %#x\n",sc_adapter[card]->devicename,
((sc_adapter[card]->shmem_magic + ch * SRAM_PAGESIZE)>>14)|0x80);
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 9d716fa42cad..3b92a61ba8d2 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -184,7 +184,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
free_regs:
free_page(lg->regs_page);
release_guest:
- memset(lg, 0, sizeof(*lg));
+ kfree(lg);
unlock:
mutex_unlock(&lguest_lock);
return err;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 1cfc984cc7b7..a5aad8cad843 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -688,7 +688,8 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
}
static struct dma_async_tx_descriptor *
-ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
+ unsigned long pending)
{
int disks = sh->disks;
int pd_idx = sh->pd_idx, i;
@@ -696,7 +697,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
/* check if prexor is active which means only process blocks
* that are part of a read-modify-write (Wantprexor)
*/
- int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+ int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
pr_debug("%s: stripe %llu\n", __FUNCTION__,
(unsigned long long)sh->sector);
@@ -773,7 +774,8 @@ static void ops_complete_write(void *stripe_head_ref)
}
static void
-ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
+ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
+ unsigned long pending)
{
/* kernel stack size limits the total number of disks */
int disks = sh->disks;
@@ -781,7 +783,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
int count = 0, pd_idx = sh->pd_idx, i;
struct page *xor_dest;
- int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending);
+ int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
unsigned long flags;
dma_async_tx_callback callback;
@@ -808,7 +810,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
}
/* check whether this postxor is part of a write */
- callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ?
+ callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ?
ops_complete_write : ops_complete_postxor;
/* 1/ if we prexor'd then the dest is reused as a source
@@ -896,12 +898,12 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long pending)
tx = ops_run_prexor(sh, tx);
if (test_bit(STRIPE_OP_BIODRAIN, &pending)) {
- tx = ops_run_biodrain(sh, tx);
+ tx = ops_run_biodrain(sh, tx, pending);
overlap_clear++;
}
if (test_bit(STRIPE_OP_POSTXOR, &pending))
- ops_run_postxor(sh, tx);
+ ops_run_postxor(sh, tx, pending);
if (test_bit(STRIPE_OP_CHECK, &pending))
ops_run_check(sh);
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index 6a5a05d1f392..05172d2613d6 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -244,10 +244,11 @@ ioc4_variant(struct ioc4_driver_data *idd)
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
- pci_dev_put(pdev);
} while (pdev && !found);
- if (NULL != pdev)
+ if (NULL != pdev) {
+ pci_dev_put(pdev);
return IOC4_VARIANT_IO9;
+ }
/* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */
pdev = NULL;
@@ -258,10 +259,11 @@ ioc4_variant(struct ioc4_driver_data *idd)
idd->idd_pdev->bus->number == pdev->bus->number &&
3 == PCI_SLOT(pdev->devfn))
found = 1;
- pci_dev_put(pdev);
} while (pdev && !found);
- if (NULL != pdev)
+ if (NULL != pdev) {
+ pci_dev_put(pdev);
return IOC4_VARIANT_IO10;
+ }
/* PCI-RT: No SCSI/SATA controller will be present */
return IOC4_VARIANT_PCI_RT;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 7f016f3d5bf0..91a6590d107b 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -417,7 +417,7 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
if (status & REG_INTSTS_RX) {
spin_lock(&ep->rx_lock);
- if (likely(__netif_rx_schedule_prep(dev, &ep->napi))) {
+ if (likely(netif_rx_schedule_prep(dev, &ep->napi))) {
wrl(ep, REG_INTEN, REG_INTEN_TX);
__netif_rx_schedule(dev, &ep->napi);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a198404a3e36..423298c84a1d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1847,9 +1847,9 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
*/
void bond_destroy(struct bonding *bond)
{
- unregister_netdevice(bond->dev);
bond_deinit(bond->dev);
bond_destroy_sysfs_entry(bond);
+ unregister_netdevice(bond->dev);
}
/*
@@ -4475,8 +4475,8 @@ static void bond_free_all(void)
bond_mc_list_destroy(bond);
/* Release the bonded slaves */
bond_release_all(bond_dev);
- unregister_netdevice(bond_dev);
bond_deinit(bond_dev);
+ unregister_netdevice(bond_dev);
}
#ifdef CONFIG_PROC_FS
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index ffa7e649a6ef..443666292a5c 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1379,11 +1379,11 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
}
__skb_pull(skb, sizeof(*p));
- skb->dev->last_rx = jiffies;
st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
st->rx_packets++;
skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
+ skb->dev->last_rx = jiffies;
if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
skb->protocol == htons(ETH_P_IP) &&
(skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index edd6828f0a78..917b7b46f1a7 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -250,6 +250,7 @@
#include <asm/system.h>
#include <asm/ethernet.h>
#include <asm/cache.h>
+#include <asm/arch/io_interface_mux.h>
//#define ETHDEBUG
#define D(x)
@@ -279,6 +280,9 @@ struct net_local {
* by this lock as well.
*/
spinlock_t lock;
+
+ spinlock_t led_lock; /* Protect LED state */
+ spinlock_t transceiver_lock; /* Protect transceiver state. */
};
typedef struct etrax_eth_descr
@@ -295,8 +299,6 @@ struct transceiver_ops
void (*check_duplex)(struct net_device* dev);
};
-struct transceiver_ops* transceiver;
-
/* Duplex settings */
enum duplex
{
@@ -307,7 +309,7 @@ enum duplex
/* Dma descriptors etc. */
-#define MAX_MEDIA_DATA_SIZE 1518
+#define MAX_MEDIA_DATA_SIZE 1522
#define MIN_PACKET_LEN 46
#define ETHER_HEAD_LEN 14
@@ -332,8 +334,8 @@ enum duplex
/*Intel LXT972A specific*/
#define MDIO_INT_STATUS_REG_2 0x0011
-#define MDIO_INT_FULL_DUPLEX_IND ( 1 << 9 )
-#define MDIO_INT_SPEED ( 1 << 14 )
+#define MDIO_INT_FULL_DUPLEX_IND (1 << 9)
+#define MDIO_INT_SPEED (1 << 14)
/* Network flash constants */
#define NET_FLASH_TIME (HZ/50) /* 20 ms */
@@ -344,8 +346,8 @@ enum duplex
#define NO_NETWORK_ACTIVITY 0
#define NETWORK_ACTIVITY 1
-#define NBR_OF_RX_DESC 64
-#define NBR_OF_TX_DESC 256
+#define NBR_OF_RX_DESC 32
+#define NBR_OF_TX_DESC 16
/* Large packets are sent directly to upper layers while small packets are */
/* copied (to reduce memory waste). The following constant decides the breakpoint */
@@ -367,7 +369,6 @@ enum duplex
static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to
to be processed */
static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */
-static etrax_eth_descr *myPrevRxDesc; /* The descriptor right before myNextRxDesc */
static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32)));
@@ -377,7 +378,6 @@ static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */
static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32)));
static unsigned int network_rec_config_shadow = 0;
-static unsigned int mdio_phy_addr; /* Transciever address */
static unsigned int network_tr_ctrl_shadow = 0;
@@ -411,7 +411,7 @@ static int e100_set_config(struct net_device* dev, struct ifmap* map);
static void e100_tx_timeout(struct net_device *dev);
static struct net_device_stats *e100_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
-static void e100_hardware_send_packet(char *buf, int length);
+static void e100_hardware_send_packet(struct net_local* np, char *buf, int length);
static void update_rx_stats(struct net_device_stats *);
static void update_tx_stats(struct net_device_stats *);
static int e100_probe_transceiver(struct net_device* dev);
@@ -434,7 +434,10 @@ static void e100_clear_network_leds(unsigned long dummy);
static void e100_set_network_leds(int active);
static const struct ethtool_ops e100_ethtool_ops;
-
+#if defined(CONFIG_ETRAX_NO_PHY)
+static void dummy_check_speed(struct net_device* dev);
+static void dummy_check_duplex(struct net_device* dev);
+#else
static void broadcom_check_speed(struct net_device* dev);
static void broadcom_check_duplex(struct net_device* dev);
static void tdk_check_speed(struct net_device* dev);
@@ -443,16 +446,28 @@ static void intel_check_speed(struct net_device* dev);
static void intel_check_duplex(struct net_device* dev);
static void generic_check_speed(struct net_device* dev);
static void generic_check_duplex(struct net_device* dev);
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void e100_netpoll(struct net_device* dev);
+#endif
+
+static int autoneg_normal = 1;
struct transceiver_ops transceivers[] =
{
+#if defined(CONFIG_ETRAX_NO_PHY)
+ {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */
+#else
{0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */
{0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */
{0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */
{0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/
{0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */
+#endif
};
+struct transceiver_ops* transceiver = &transceivers[0];
+
#define tx_done(dev) (*R_DMA_CH0_CMD == 0)
/*
@@ -471,14 +486,22 @@ etrax_ethernet_init(void)
int i, err;
printk(KERN_INFO
- "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n");
+ "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n");
- dev = alloc_etherdev(sizeof(struct net_local));
- np = dev->priv;
+ if (cris_request_io_interface(if_eth, cardname)) {
+ printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n");
+ return -EBUSY;
+ }
+ dev = alloc_etherdev(sizeof(struct net_local));
if (!dev)
return -ENOMEM;
+ np = netdev_priv(dev);
+
+ /* we do our own locking */
+ dev->features |= NETIF_F_LLTX;
+
dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */
/* now setup our etrax specific stuff */
@@ -498,14 +521,22 @@ etrax_ethernet_init(void)
dev->do_ioctl = e100_ioctl;
dev->set_config = e100_set_config;
dev->tx_timeout = e100_tx_timeout;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = e100_netpoll;
+#endif
+
+ spin_lock_init(&np->lock);
+ spin_lock_init(&np->led_lock);
+ spin_lock_init(&np->transceiver_lock);
/* Initialise the list of Etrax DMA-descriptors */
/* Initialise receive descriptors */
for (i = 0; i < NBR_OF_RX_DESC; i++) {
- /* Allocate two extra cachelines to make sure that buffer used by DMA
- * does not share cacheline with any other data (to avoid cache bug)
+ /* Allocate two extra cachelines to make sure that buffer used
+ * by DMA does not share cacheline with any other data (to
+ * avoid cache bug)
*/
RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
if (!RxDescList[i].skb)
@@ -541,7 +572,6 @@ etrax_ethernet_init(void)
myNextRxDesc = &RxDescList[0];
myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
- myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1];
myFirstTxDesc = &TxDescList[0];
myNextTxDesc = &TxDescList[0];
myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1];
@@ -562,10 +592,11 @@ etrax_ethernet_init(void)
current_speed = 10;
current_speed_selection = 0; /* Auto */
speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
- duplex_timer.data = (unsigned long)dev;
+ speed_timer.data = (unsigned long)dev;
speed_timer.function = e100_check_speed;
clear_led_timer.function = e100_clear_network_leds;
+ clear_led_timer.data = (unsigned long)dev;
full_duplex = 0;
current_duplex = autoneg;
@@ -574,7 +605,6 @@ etrax_ethernet_init(void)
duplex_timer.function = e100_check_duplex;
/* Initialize mii interface */
- np->mii_if.phy_id = mdio_phy_addr;
np->mii_if.phy_id_mask = 0x1f;
np->mii_if.reg_num_mask = 0x1f;
np->mii_if.dev = dev;
@@ -585,6 +615,9 @@ etrax_ethernet_init(void)
/* unwanted addresses are matched */
*R_NETWORK_GA_0 = 0x00000000;
*R_NETWORK_GA_1 = 0x00000000;
+
+ /* Initialize next time the led can flash */
+ led_next_time = jiffies;
return 0;
}
@@ -595,9 +628,9 @@ etrax_ethernet_init(void)
static int
e100_set_mac_address(struct net_device *dev, void *p)
{
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
struct sockaddr *addr = p;
- int i;
+ DECLARE_MAC_BUF(mac);
spin_lock(&np->lock); /* preemption protection */
@@ -686,6 +719,25 @@ e100_open(struct net_device *dev)
goto grace_exit2;
}
+ /*
+ * Always allocate the DMA channels after the IRQ,
+ * and clean up on failure.
+ */
+
+ if (cris_request_dma(NETWORK_TX_DMA_NBR,
+ cardname,
+ DMA_VERBOSE_ON_ERROR,
+ dma_eth)) {
+ goto grace_exit3;
+ }
+
+ if (cris_request_dma(NETWORK_RX_DMA_NBR,
+ cardname,
+ DMA_VERBOSE_ON_ERROR,
+ dma_eth)) {
+ goto grace_exit4;
+ }
+
/* give the HW an idea of what MAC address we want */
*R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) |
@@ -700,6 +752,7 @@ e100_open(struct net_device *dev)
*R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */
#else
+ SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522);
SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive);
SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable);
SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex);
@@ -719,8 +772,7 @@ e100_open(struct net_device *dev)
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable);
*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
/* enable the irq's for ethernet DMA */
@@ -752,12 +804,13 @@ e100_open(struct net_device *dev)
*R_DMA_CH0_FIRST = 0;
*R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc);
+ netif_start_queue(dev);
- restore_flags(flags);
+ local_irq_restore(flags);
/* Probe for transceiver */
if (e100_probe_transceiver(dev))
- goto grace_exit3;
+ goto grace_exit5;
/* Start duplex/speed timers */
add_timer(&speed_timer);
@@ -766,10 +819,14 @@ e100_open(struct net_device *dev)
/* We are now ready to accept transmit requeusts from
* the queueing layer of the networking.
*/
- netif_start_queue(dev);
+ netif_carrier_on(dev);
return 0;
+grace_exit5:
+ cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
+grace_exit4:
+ cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
grace_exit3:
free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
grace_exit2:
@@ -780,12 +837,20 @@ grace_exit0:
return -EAGAIN;
}
-
+#if defined(CONFIG_ETRAX_NO_PHY)
+static void
+dummy_check_speed(struct net_device* dev)
+{
+ current_speed = 100;
+}
+#else
static void
generic_check_speed(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
if ((data & ADVERTISE_100FULL) ||
(data & ADVERTISE_100HALF))
current_speed = 100;
@@ -797,7 +862,10 @@ static void
tdk_check_speed(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_TDK_DIAGNOSTIC_REG);
current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10);
}
@@ -805,7 +873,10 @@ static void
broadcom_check_speed(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_AUX_CTRL_STATUS_REG);
current_speed = (data & MDIO_BC_SPEED ? 100 : 10);
}
@@ -813,46 +884,62 @@ static void
intel_check_speed(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_INT_STATUS_REG_2);
current_speed = (data & MDIO_INT_SPEED ? 100 : 10);
}
-
+#endif
static void
e100_check_speed(unsigned long priv)
{
struct net_device* dev = (struct net_device*)priv;
+ struct net_local *np = netdev_priv(dev);
static int led_initiated = 0;
unsigned long data;
int old_speed = current_speed;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR);
+ spin_lock(&np->transceiver_lock);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR);
if (!(data & BMSR_LSTATUS)) {
current_speed = 0;
} else {
transceiver->check_speed(dev);
}
+ spin_lock(&np->led_lock);
if ((old_speed != current_speed) || !led_initiated) {
led_initiated = 1;
e100_set_network_leds(NO_NETWORK_ACTIVITY);
+ if (current_speed)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
}
+ spin_unlock(&np->led_lock);
/* Reinitialize the timer. */
speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
add_timer(&speed_timer);
+
+ spin_unlock(&np->transceiver_lock);
}
static void
e100_negotiate(struct net_device* dev)
{
- unsigned short data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
+ struct net_local *np = netdev_priv(dev);
+ unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MII_ADVERTISE);
/* Discard old speed and duplex settings */
data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL |
ADVERTISE_10HALF | ADVERTISE_10FULL);
switch (current_speed_selection) {
- case 10 :
+ case 10:
if (current_duplex == full)
data |= ADVERTISE_10FULL;
else if (current_duplex == half)
@@ -861,7 +948,7 @@ e100_negotiate(struct net_device* dev)
data |= ADVERTISE_10HALF | ADVERTISE_10FULL;
break;
- case 100 :
+ case 100:
if (current_duplex == full)
data |= ADVERTISE_100FULL;
else if (current_duplex == half)
@@ -870,7 +957,7 @@ e100_negotiate(struct net_device* dev)
data |= ADVERTISE_100HALF | ADVERTISE_100FULL;
break;
- case 0 : /* Auto */
+ case 0: /* Auto */
if (current_duplex == full)
data |= ADVERTISE_100FULL | ADVERTISE_10FULL;
else if (current_duplex == half)
@@ -880,35 +967,44 @@ e100_negotiate(struct net_device* dev)
ADVERTISE_100HALF | ADVERTISE_100FULL;
break;
- default : /* assume autoneg speed and duplex */
+ default: /* assume autoneg speed and duplex */
data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL;
+ break;
}
- e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data);
+ e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
/* Renegotiate with link partner */
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
+ if (autoneg_normal) {
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
data |= BMCR_ANENABLE | BMCR_ANRESTART;
-
- e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data);
+ }
+ e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
}
static void
e100_set_speed(struct net_device* dev, unsigned long speed)
{
+ struct net_local *np = netdev_priv(dev);
+
+ spin_lock(&np->transceiver_lock);
if (speed != current_speed_selection) {
current_speed_selection = speed;
e100_negotiate(dev);
}
+ spin_unlock(&np->transceiver_lock);
}
static void
e100_check_duplex(unsigned long priv)
{
struct net_device *dev = (struct net_device *)priv;
- struct net_local *np = (struct net_local *)dev->priv;
- int old_duplex = full_duplex;
+ struct net_local *np = netdev_priv(dev);
+ int old_duplex;
+
+ spin_lock(&np->transceiver_lock);
+ old_duplex = full_duplex;
transceiver->check_duplex(dev);
if (old_duplex != full_duplex) {
/* Duplex changed */
@@ -920,13 +1016,22 @@ e100_check_duplex(unsigned long priv)
duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
add_timer(&duplex_timer);
np->mii_if.full_duplex = full_duplex;
+ spin_unlock(&np->transceiver_lock);
}
-
+#if defined(CONFIG_ETRAX_NO_PHY)
+static void
+dummy_check_duplex(struct net_device* dev)
+{
+ full_duplex = 1;
+}
+#else
static void
generic_check_duplex(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE);
if ((data & ADVERTISE_10FULL) ||
(data & ADVERTISE_100FULL))
full_duplex = 1;
@@ -938,7 +1043,10 @@ static void
tdk_check_duplex(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_TDK_DIAGNOSTIC_REG);
full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0;
}
@@ -946,7 +1054,10 @@ static void
broadcom_check_duplex(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_AUX_CTRL_STATUS_REG);
full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0;
}
@@ -954,38 +1065,55 @@ static void
intel_check_duplex(struct net_device* dev)
{
unsigned long data;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2);
+ struct net_local *np = netdev_priv(dev);
+
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id,
+ MDIO_INT_STATUS_REG_2);
full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0;
}
-
+#endif
static void
e100_set_duplex(struct net_device* dev, enum duplex new_duplex)
{
+ struct net_local *np = netdev_priv(dev);
+
+ spin_lock(&np->transceiver_lock);
if (new_duplex != current_duplex) {
current_duplex = new_duplex;
e100_negotiate(dev);
}
+ spin_unlock(&np->transceiver_lock);
}
static int
e100_probe_transceiver(struct net_device* dev)
{
+ int ret = 0;
+
+#if !defined(CONFIG_ETRAX_NO_PHY)
unsigned int phyid_high;
unsigned int phyid_low;
unsigned int oui;
struct transceiver_ops* ops = NULL;
+ struct net_local *np = netdev_priv(dev);
+
+ spin_lock(&np->transceiver_lock);
/* Probe MDIO physical address */
- for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) {
- if (e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR) != 0xffff)
+ for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31;
+ np->mii_if.phy_id++) {
+ if (e100_get_mdio_reg(dev,
+ np->mii_if.phy_id, MII_BMSR) != 0xffff)
break;
}
- if (mdio_phy_addr == 32)
- return -ENODEV;
+ if (np->mii_if.phy_id == 32) {
+ ret = -ENODEV;
+ goto out;
+ }
/* Get manufacturer */
- phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1);
- phyid_low = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID2);
+ phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1);
+ phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2);
oui = (phyid_high << 6) | (phyid_low >> 10);
for (ops = &transceivers[0]; ops->oui; ops++) {
@@ -993,8 +1121,10 @@ e100_probe_transceiver(struct net_device* dev)
break;
}
transceiver = ops;
-
- return 0;
+out:
+ spin_unlock(&np->transceiver_lock);
+#endif
+ return ret;
}
static int
@@ -1088,13 +1218,14 @@ e100_receive_mdio_bit()
static void
e100_reset_transceiver(struct net_device* dev)
{
+ struct net_local *np = netdev_priv(dev);
unsigned short cmd;
unsigned short data;
int bitCounter;
- data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR);
+ data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
- cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (MII_BMCR << 2);
+ cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2);
e100_send_mdio_cmd(cmd, 1);
@@ -1112,7 +1243,7 @@ e100_reset_transceiver(struct net_device* dev)
static void
e100_tx_timeout(struct net_device *dev)
{
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&np->lock, flags);
@@ -1134,8 +1265,7 @@ e100_tx_timeout(struct net_device *dev)
e100_reset_transceiver(dev);
/* and get rid of the packets that never got an interrupt */
- while (myFirstTxDesc != myNextTxDesc)
- {
+ while (myFirstTxDesc != myNextTxDesc) {
dev_kfree_skb(myFirstTxDesc->skb);
myFirstTxDesc->skb = 0;
myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
@@ -1161,7 +1291,7 @@ e100_tx_timeout(struct net_device *dev)
static int
e100_send_packet(struct sk_buff *skb, struct net_device *dev)
{
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
unsigned char *buf = skb->data;
unsigned long flags;
@@ -1174,7 +1304,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
- e100_hardware_send_packet(buf, skb->len);
+ e100_hardware_send_packet(np, buf, skb->len);
myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next);
@@ -1197,13 +1327,15 @@ static irqreturn_t
e100rxtx_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct net_local *np = (struct net_local *)dev->priv;
- unsigned long irqbits = *R_IRQ_MASK2_RD;
+ struct net_local *np = netdev_priv(dev);
+ unsigned long irqbits;
- /* Disable RX/TX IRQs to avoid reentrancy */
- *R_IRQ_MASK2_CLR =
- IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) |
- IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr);
+ /*
+ * Note that both rx and tx interrupts are blocked at this point,
+ * regardless of which got us here.
+ */
+
+ irqbits = *R_IRQ_MASK2_RD;
/* Handle received packets */
if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) {
@@ -1219,7 +1351,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
* allocate a new buffer to put a packet in.
*/
e100_rx(dev);
- ((struct net_local *)dev->priv)->stats.rx_packets++;
+ np->stats.rx_packets++;
/* restart/continue on the channel, for safety */
*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
/* clear dma channel 1 eop/descr irq bits */
@@ -1233,9 +1365,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
}
/* Report any packets that have been sent */
- while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) &&
- myFirstTxDesc != myNextTxDesc)
- {
+ while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
+ (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
np->stats.tx_bytes += myFirstTxDesc->skb->len;
np->stats.tx_packets++;
@@ -1244,19 +1375,15 @@ e100rxtx_interrupt(int irq, void *dev_id)
dev_kfree_skb_irq(myFirstTxDesc->skb);
myFirstTxDesc->skb = 0;
myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next);
+ /* Wake up queue. */
+ netif_wake_queue(dev);
}
if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) {
- /* acknowledge the eop interrupt and wake up queue */
+ /* acknowledge the eop interrupt. */
*R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do);
- netif_wake_queue(dev);
}
- /* Enable RX/TX IRQs again */
- *R_IRQ_MASK2_SET =
- IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) |
- IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set);
-
return IRQ_HANDLED;
}
@@ -1264,7 +1391,7 @@ static irqreturn_t
e100nw_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
unsigned long irqbits = *R_IRQ_MASK0_RD;
/* check for underrun irq */
@@ -1286,7 +1413,6 @@ e100nw_interrupt(int irq, void *dev_id)
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
- *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr);
np->stats.tx_errors++;
D(printk("ethernet excessive collisions!\n"));
}
@@ -1299,12 +1425,13 @@ e100_rx(struct net_device *dev)
{
struct sk_buff *skb;
int length = 0;
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
unsigned char *skb_data_ptr;
#ifdef ETHDEBUG
int i;
#endif
-
+ etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */
+ spin_lock(&np->led_lock);
if (!led_active && time_after(jiffies, led_next_time)) {
/* light the network leds depending on the current speed. */
e100_set_network_leds(NETWORK_ACTIVITY);
@@ -1314,9 +1441,10 @@ e100_rx(struct net_device *dev)
led_active = 1;
mod_timer(&clear_led_timer, jiffies + HZ/10);
}
+ spin_unlock(&np->led_lock);
length = myNextRxDesc->descr.hw_len - 4;
- ((struct net_local *)dev->priv)->stats.rx_bytes += length;
+ np->stats.rx_bytes += length;
#ifdef ETHDEBUG
printk("Got a packet of length %d:\n", length);
@@ -1336,7 +1464,7 @@ e100_rx(struct net_device *dev)
if (!skb) {
np->stats.rx_errors++;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- return;
+ goto update_nextrxdesc;
}
skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */
@@ -1354,15 +1482,15 @@ e100_rx(struct net_device *dev)
else {
/* Large packet, send directly to upper layers and allocate new
* memory (aligned to cache line boundary to avoid bug).
- * Before sending the skb to upper layers we must make sure that
- * skb->data points to the aligned start of the packet.
+ * Before sending the skb to upper layers we must make sure
+ * that skb->data points to the aligned start of the packet.
*/
int align;
struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
if (!new_skb) {
np->stats.rx_errors++;
printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
- return;
+ goto update_nextrxdesc;
}
skb = myNextRxDesc->skb;
align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data;
@@ -1377,9 +1505,10 @@ e100_rx(struct net_device *dev)
/* Send the packet to the upper layers */
netif_rx(skb);
+ update_nextrxdesc:
/* Prepare for next packet */
myNextRxDesc->descr.status = 0;
- myPrevRxDesc = myNextRxDesc;
+ prevRxDesc = myNextRxDesc;
myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next);
rx_queue_len++;
@@ -1387,9 +1516,9 @@ e100_rx(struct net_device *dev)
/* Check if descriptors should be returned */
if (rx_queue_len == RX_QUEUE_THRESHOLD) {
flush_etrax_cache();
- myPrevRxDesc->descr.ctrl |= d_eol;
+ prevRxDesc->descr.ctrl |= d_eol;
myLastRxDesc->descr.ctrl &= ~d_eol;
- myLastRxDesc = myPrevRxDesc;
+ myLastRxDesc = prevRxDesc;
rx_queue_len = 0;
}
}
@@ -1398,7 +1527,7 @@ e100_rx(struct net_device *dev)
static int
e100_close(struct net_device *dev)
{
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
printk(KERN_INFO "Closing %s.\n", dev->name);
@@ -1426,6 +1555,9 @@ e100_close(struct net_device *dev)
free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev);
free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev);
+ cris_free_dma(NETWORK_TX_DMA_NBR, cardname);
+ cris_free_dma(NETWORK_RX_DMA_NBR, cardname);
+
/* Update the statistics here. */
update_rx_stats(&np->stats);
@@ -1443,18 +1575,11 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
struct net_local *np = netdev_priv(dev);
+ int rc = 0;
+ int old_autoneg;
spin_lock(&np->lock); /* Preempt protection */
switch (cmd) {
- case SIOCGMIIPHY: /* Get PHY address */
- data->phy_id = mdio_phy_addr;
- break;
- case SIOCGMIIREG: /* Read MII register */
- data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num);
- break;
- case SIOCSMIIREG: /* Write MII register */
- e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in);
- break;
/* The ioctls below should be considered obsolete but are */
/* still present for compatability with old scripts/apps */
case SET_ETH_SPEED_10: /* 10 Mbps */
@@ -1463,60 +1588,47 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SET_ETH_SPEED_100: /* 100 Mbps */
e100_set_speed(dev, 100);
break;
- case SET_ETH_SPEED_AUTO: /* Auto negotiate speed */
+ case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */
e100_set_speed(dev, 0);
break;
- case SET_ETH_DUPLEX_HALF: /* Half duplex. */
+ case SET_ETH_DUPLEX_HALF: /* Half duplex */
e100_set_duplex(dev, half);
break;
- case SET_ETH_DUPLEX_FULL: /* Full duplex. */
+ case SET_ETH_DUPLEX_FULL: /* Full duplex */
e100_set_duplex(dev, full);
break;
- case SET_ETH_DUPLEX_AUTO: /* Autonegotiate duplex*/
+ case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */
e100_set_duplex(dev, autoneg);
break;
+ case SET_ETH_AUTONEG:
+ old_autoneg = autoneg_normal;
+ autoneg_normal = *(int*)data;
+ if (autoneg_normal != old_autoneg)
+ e100_negotiate(dev);
+ break;
default:
- return -EINVAL;
+ rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr),
+ cmd, NULL);
+ break;
}
spin_unlock(&np->lock);
- return 0;
+ return rc;
}
-static int e100_set_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int e100_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
- ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII |
- SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
- ecmd->port = PORT_TP;
- ecmd->transceiver = XCVR_EXTERNAL;
- ecmd->phy_address = mdio_phy_addr;
- ecmd->speed = current_speed;
- ecmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
- ecmd->advertising = ADVERTISED_TP;
+ struct net_local *np = netdev_priv(dev);
+ int err;
- if (current_duplex == autoneg && current_speed_selection == 0)
- ecmd->advertising |= ADVERTISED_Autoneg;
- else {
- ecmd->advertising |=
- ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
- if (current_speed_selection == 10)
- ecmd->advertising &= ~(ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full);
- else if (current_speed_selection == 100)
- ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full);
- if (current_duplex == half)
- ecmd->advertising &= ~(ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Full);
- else if (current_duplex == full)
- ecmd->advertising &= ~(ADVERTISED_10baseT_Half |
- ADVERTISED_100baseT_Half);
- }
+ spin_lock_irq(&np->lock);
+ err = mii_ethtool_gset(&np->mii_if, cmd);
+ spin_unlock_irq(&np->lock);
- ecmd->autoneg = AUTONEG_ENABLE;
- return 0;
+ /* The PHY may support 1000baseT, but the Etrax100 does not. */
+ cmd->supported &= ~(SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full);
+ return err;
}
static int e100_set_settings(struct net_device *dev,
@@ -1560,7 +1672,8 @@ static const struct ethtool_ops e100_ethtool_ops = {
static int
e100_set_config(struct net_device *dev, struct ifmap *map)
{
- struct net_local *np = (struct net_local *)dev->priv;
+ struct net_local *np = netdev_priv(dev);
+
spin_lock(&np->lock); /* Preempt protection */
switch(map->port) {
@@ -1612,7 +1725,6 @@ update_tx_stats(struct net_device_stats *es)
es->collisions +=
IO_EXTRACT(R_TR_COUNTERS, single_col, r) +
IO_EXTRACT(R_TR_COUNTERS, multiple_col, r);
- es->tx_errors += IO_EXTRACT(R_TR_COUNTERS, deferred, r);
}
/*
@@ -1622,8 +1734,9 @@ update_tx_stats(struct net_device_stats *es)
static struct net_device_stats *
e100_get_stats(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *)dev->priv;
+ struct net_local *lp = netdev_priv(dev);
unsigned long flags;
+
spin_lock_irqsave(&lp->lock, flags);
update_rx_stats(&lp->stats);
@@ -1643,13 +1756,13 @@ e100_get_stats(struct net_device *dev)
static void
set_multicast_list(struct net_device *dev)
{
- struct net_local *lp = (struct net_local *)dev->priv;
+ struct net_local *lp = netdev_priv(dev);
int num_addr = dev->mc_count;
unsigned long int lo_bits;
unsigned long int hi_bits;
+
spin_lock(&lp->lock);
- if (dev->flags & IFF_PROMISC)
- {
+ if (dev->flags & IFF_PROMISC) {
/* promiscuous mode */
lo_bits = 0xfffffffful;
hi_bits = 0xfffffffful;
@@ -1679,9 +1792,10 @@ set_multicast_list(struct net_device *dev)
struct dev_mc_list *dmi = dev->mc_list;
int i;
char *baddr;
+
lo_bits = 0x00000000ul;
hi_bits = 0x00000000ul;
- for (i=0; i<num_addr; i++) {
+ for (i = 0; i < num_addr; i++) {
/* Calculate the hash index for the GA registers */
hash_ix = 0;
@@ -1708,8 +1822,7 @@ set_multicast_list(struct net_device *dev)
if (hash_ix >= 32) {
hi_bits |= (1 << (hash_ix-32));
- }
- else {
+ } else {
lo_bits |= (1 << hash_ix);
}
dmi = dmi->next;
@@ -1724,10 +1837,11 @@ set_multicast_list(struct net_device *dev)
}
void
-e100_hardware_send_packet(char *buf, int length)
+e100_hardware_send_packet(struct net_local *np, char *buf, int length)
{
D(printk("e100 send pack, buf 0x%x len %d\n", buf, length));
+ spin_lock(&np->led_lock);
if (!led_active && time_after(jiffies, led_next_time)) {
/* light the network leds depending on the current speed. */
e100_set_network_leds(NETWORK_ACTIVITY);
@@ -1737,6 +1851,7 @@ e100_hardware_send_packet(char *buf, int length)
led_active = 1;
mod_timer(&clear_led_timer, jiffies + HZ/10);
}
+ spin_unlock(&np->led_lock);
/* configure the tx dma descriptor */
myNextTxDesc->descr.sw_len = length;
@@ -1754,6 +1869,11 @@ e100_hardware_send_packet(char *buf, int length)
static void
e100_clear_network_leds(unsigned long dummy)
{
+ struct net_device *dev = (struct net_device *)dummy;
+ struct net_local *np = netdev_priv(dev);
+
+ spin_lock(&np->led_lock);
+
if (led_active && time_after(jiffies, led_next_time)) {
e100_set_network_leds(NO_NETWORK_ACTIVITY);
@@ -1761,6 +1881,8 @@ e100_clear_network_leds(unsigned long dummy)
led_next_time = jiffies + NET_FLASH_PAUSE;
led_active = 0;
}
+
+ spin_unlock(&np->led_lock);
}
static void
@@ -1781,19 +1903,25 @@ e100_set_network_leds(int active)
#else
LED_NETWORK_SET(LED_OFF);
#endif
- }
- else if (light_leds) {
+ } else if (light_leds) {
if (current_speed == 10) {
LED_NETWORK_SET(LED_ORANGE);
} else {
LED_NETWORK_SET(LED_GREEN);
}
- }
- else {
+ } else {
LED_NETWORK_SET(LED_OFF);
}
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+e100_netpoll(struct net_device* netdev)
+{
+ e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL);
+}
+#endif
+
static int
etrax_init_module(void)
{
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 72deff0d4d90..cf39473ef90a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -4804,6 +4804,7 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
spin_unlock_irqrestore(&adapter->stats_lock, flags);
return -EIO;
}
+ spin_unlock_irqrestore(&adapter->stats_lock, flags);
if (adapter->hw.media_type == e1000_media_type_copper) {
switch (data->reg_num) {
case PHY_CTRL:
@@ -4824,12 +4825,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
DUPLEX_HALF;
retval = e1000_set_spd_dplx(adapter,
spddplx);
- if (retval) {
- spin_unlock_irqrestore(
- &adapter->stats_lock,
- flags);
+ if (retval)
return retval;
- }
}
if (netif_running(adapter->netdev))
e1000_reinit_locked(adapter);
@@ -4838,11 +4835,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
case M88E1000_PHY_SPEC_CTRL:
case M88E1000_EXT_PHY_SPEC_CTRL:
- if (e1000_phy_reset(&adapter->hw)) {
- spin_unlock_irqrestore(
- &adapter->stats_lock, flags);
+ if (e1000_phy_reset(&adapter->hw))
return -EIO;
- }
break;
}
} else {
@@ -4857,7 +4851,6 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
}
}
- spin_unlock_irqrestore(&adapter->stats_lock, flags);
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
index 2765e49e07df..562ea68ed99b 100644
--- a/drivers/net/fs_enet/Kconfig
+++ b/drivers/net/fs_enet/Kconfig
@@ -2,6 +2,7 @@ config FS_ENET
tristate "Freescale Ethernet Driver"
depends on CPM1 || CPM2
select MII
+ select PHYLIB
config FS_ENET_HAS_SCC
bool "Chip has an SCC usable for ethernet"
@@ -11,11 +12,19 @@ config FS_ENET_HAS_SCC
config FS_ENET_HAS_FCC
bool "Chip has an FCC usable for ethernet"
depends on FS_ENET && CPM2
- select MDIO_BITBANG
default y
config FS_ENET_HAS_FEC
bool "Chip has an FEC usable for ethernet"
depends on FS_ENET && CPM1
+ select FS_ENET_MDIO_FEC
default y
+config FS_ENET_MDIO_FEC
+ tristate "MDIO driver for FEC"
+ depends on FS_ENET && CPM1
+
+config FS_ENET_MDIO_FCC
+ tristate "MDIO driver for FCC"
+ depends on FS_ENET && CPM2
+ select MDIO_BITBANG
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
index 02d4dc18ba69..1ffbe0756a0c 100644
--- a/drivers/net/fs_enet/Makefile
+++ b/drivers/net/fs_enet/Makefile
@@ -4,7 +4,16 @@
obj-$(CONFIG_FS_ENET) += fs_enet.o
-obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o
-obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o
+fs_enet-$(CONFIG_FS_ENET_HAS_SCC) += mac-scc.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FEC) += mac-fec.o
+fs_enet-$(CONFIG_FS_ENET_HAS_FCC) += mac-fcc.o
-fs_enet-objs := fs_enet-main.o
+ifeq ($(CONFIG_PPC_CPM_NEW_BINDING),y)
+obj-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
+obj-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
+else
+fs_enet-$(CONFIG_FS_ENET_MDIO_FEC) += mii-fec.o
+fs_enet-$(CONFIG_FS_ENET_MDIO_FCC) += mii-bitbang.o
+endif
+
+fs_enet-objs := fs_enet-main.o $(fs_enet-m)
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index f8d63d39f592..b226e019bc8b 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -171,9 +171,10 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
buf->u.direct.map);
else {
for (i = 0; i < buf->nbufs; ++i)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- buf->u.page_list[i].buf,
- buf->u.page_list[i].map);
+ if (buf->u.page_list[i].buf)
+ dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ buf->u.page_list[i].buf,
+ buf->u.page_list[i].map);
kfree(buf->u.page_list);
}
}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index cc4b1be18219..42b47639c81c 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -240,7 +240,7 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
- if (qp->qpn < dev->caps.sqp_start + 8)
+ if (qp->qpn >= dev->caps.sqp_start + 8)
mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index eb0aff787dfd..5267e031daa0 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -128,8 +128,8 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
FIFO_PTR_FRAMELEN(len));
ndev->trans_start = jiffies;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
netif_stop_queue(ndev);
spin_unlock_irq(&priv->lock);
@@ -155,7 +155,7 @@ static void netx_eth_receive(struct net_device *ndev)
if (unlikely(skb == NULL)) {
printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
ndev->name);
- dev->stats.rx_dropped++;
+ ndev->stats.rx_dropped++;
return;
}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index b8c0e7b4ca1c..632666706247 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -84,7 +84,7 @@
#include "s2io.h"
#include "s2io-regs.h"
-#define DRV_VERSION "2.0.26.5"
+#define DRV_VERSION "2.0.26.6"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
@@ -3775,6 +3775,40 @@ static int __devinit s2io_test_msi(struct s2io_nic *sp)
return err;
}
+
+static void remove_msix_isr(struct s2io_nic *sp)
+{
+ int i;
+ u16 msi_control;
+
+ for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
+ if (sp->s2io_entries[i].in_use ==
+ MSIX_REGISTERED_SUCCESS) {
+ int vector = sp->entries[i].vector;
+ void *arg = sp->s2io_entries[i].arg;
+ free_irq(vector, arg);
+ }
+ }
+
+ kfree(sp->entries);
+ kfree(sp->s2io_entries);
+ sp->entries = NULL;
+ sp->s2io_entries = NULL;
+
+ pci_read_config_word(sp->pdev, 0x42, &msi_control);
+ msi_control &= 0xFFFE; /* Disable MSI */
+ pci_write_config_word(sp->pdev, 0x42, msi_control);
+
+ pci_disable_msix(sp->pdev);
+}
+
+static void remove_inta_isr(struct s2io_nic *sp)
+{
+ struct net_device *dev = sp->dev;
+
+ free_irq(sp->pdev->irq, dev);
+}
+
/* ********************************************************* *
* Functions defined below concern the OS part of the driver *
* ********************************************************* */
@@ -3809,28 +3843,9 @@ static int s2io_open(struct net_device *dev)
int ret = s2io_enable_msi_x(sp);
if (!ret) {
- u16 msi_control;
-
ret = s2io_test_msi(sp);
-
/* rollback MSI-X, will re-enable during add_isr() */
- kfree(sp->entries);
- sp->mac_control.stats_info->sw_stat.mem_freed +=
- (MAX_REQUESTED_MSI_X *
- sizeof(struct msix_entry));
- kfree(sp->s2io_entries);
- sp->mac_control.stats_info->sw_stat.mem_freed +=
- (MAX_REQUESTED_MSI_X *
- sizeof(struct s2io_msix_entry));
- sp->entries = NULL;
- sp->s2io_entries = NULL;
-
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
-
- pci_disable_msix(sp->pdev);
-
+ remove_msix_isr(sp);
}
if (ret) {
@@ -6719,15 +6734,22 @@ static int s2io_add_isr(struct s2io_nic * sp)
}
}
if (err) {
+ remove_msix_isr(sp);
DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
"failed\n", dev->name, i);
- DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
- return -1;
+ DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
+ dev->name);
+ sp->config.intr_type = INTA;
+ break;
}
sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
}
- printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
- printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
+ if (!err) {
+ printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
+ msix_tx_cnt);
+ printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
+ msix_rx_cnt);
+ }
}
if (sp->config.intr_type == INTA) {
err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
@@ -6742,40 +6764,10 @@ static int s2io_add_isr(struct s2io_nic * sp)
}
static void s2io_rem_isr(struct s2io_nic * sp)
{
- struct net_device *dev = sp->dev;
- struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
-
- if (sp->config.intr_type == MSI_X) {
- int i;
- u16 msi_control;
-
- for (i=1; (sp->s2io_entries[i].in_use ==
- MSIX_REGISTERED_SUCCESS); i++) {
- int vector = sp->entries[i].vector;
- void *arg = sp->s2io_entries[i].arg;
-
- synchronize_irq(vector);
- free_irq(vector, arg);
- }
-
- kfree(sp->entries);
- stats->mem_freed +=
- (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
- kfree(sp->s2io_entries);
- stats->mem_freed +=
- (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
- sp->entries = NULL;
- sp->s2io_entries = NULL;
-
- pci_read_config_word(sp->pdev, 0x42, &msi_control);
- msi_control &= 0xFFFE; /* Disable MSI */
- pci_write_config_word(sp->pdev, 0x42, msi_control);
-
- pci_disable_msix(sp->pdev);
- } else {
- synchronize_irq(sp->pdev->irq);
- free_irq(sp->pdev->irq, dev);
- }
+ if (sp->config.intr_type == MSI_X)
+ remove_msix_isr(sp);
+ else
+ remove_inta_isr(sp);
}
static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index b9961dc47606..6d62250fba07 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2512,31 +2512,32 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
-/* Assign Ram Buffer allocation to queue */
-static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, u32 space)
+static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
{
u32 end;
- /* convert from K bytes to qwords used for hw register */
- start *= 1024/8;
- space *= 1024/8;
- end = start + space - 1;
+ start /= 8;
+ len /= 8;
+ end = start + len - 1;
skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
skge_write32(hw, RB_ADDR(q, RB_START), start);
- skge_write32(hw, RB_ADDR(q, RB_END), end);
skge_write32(hw, RB_ADDR(q, RB_WP), start);
skge_write32(hw, RB_ADDR(q, RB_RP), start);
+ skge_write32(hw, RB_ADDR(q, RB_END), end);
if (q == Q_R1 || q == Q_R2) {
- u32 tp = space - space/4;
-
/* Set thresholds on receive queue's */
- skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
- skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
- } else if (hw->chip_id != CHIP_ID_GENESIS)
- /* Genesis Tx Fifo is too small for normal store/forward */
+ skge_write32(hw, RB_ADDR(q, RB_RX_UTPP),
+ start + (2*len)/3);
+ skge_write32(hw, RB_ADDR(q, RB_RX_LTPP),
+ start + (len/3));
+ } else {
+ /* Enable store & forward on Tx queue's because
+ * Tx FIFO is only 4K on Genesis and 1K on Yukon
+ */
skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
+ }
skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
}
@@ -2564,7 +2565,7 @@ static int skge_up(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- u32 ramaddr, ramsize, rxspace;
+ u32 chunk, ram_addr;
size_t rx_size, tx_size;
int err;
@@ -2619,15 +2620,14 @@ static int skge_up(struct net_device *dev)
spin_unlock_bh(&hw->phy_lock);
/* Configure RAMbuffers */
- ramsize = (hw->ram_size - hw->ram_offset) / hw->ports;
- ramaddr = hw->ram_offset + port * ramsize;
- rxspace = 8 + (2*(ramsize - 16))/3;
-
- skge_ramset(hw, rxqaddr[port], ramaddr, rxspace);
- skge_ramset(hw, txqaddr[port], ramaddr + rxspace, ramsize - rxspace);
+ chunk = hw->ram_size / ((hw->ports + 1)*2);
+ ram_addr = hw->ram_offset + 2 * chunk * port;
+ skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean);
+
BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean);
+ skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk);
skge_qset(skge, txqaddr[port], skge->tx_ring.to_use);
/* Start receiver BMU */
@@ -3591,12 +3591,15 @@ static int skge_reset(struct skge_hw *hw)
if (hw->chip_id == CHIP_ID_GENESIS) {
if (t8 == 3) {
/* special case: 4 x 64k x 36, offset = 0x80000 */
- hw->ram_size = 1024;
- hw->ram_offset = 512;
+ hw->ram_size = 0x100000;
+ hw->ram_offset = 0x80000;
} else
hw->ram_size = t8 * 512;
- } else /* Yukon */
- hw->ram_size = t8 ? t8 * 4 : 128;
+ }
+ else if (t8 == 0)
+ hw->ram_size = 0x20000;
+ else
+ hw->ram_size = t8 * 4096;
hw->intr_mask = IS_HW_ERR;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index c20a3bd21bb2..9cc13dd8a821 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1281,7 +1281,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
skb->dev = dev;
/* Because we reserve afterwards. */
- skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET));
+ skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, &hb->happy_meal_rxd[i],
(RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
hme_dma_map(hp, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE));
@@ -1700,6 +1700,11 @@ static int happy_meal_init(struct happy_meal *hp)
HMD(("tx old[%08x] and rx [%08x] ON!\n",
hme_read32(hp, bregs + BMAC_TXCFG),
hme_read32(hp, bregs + BMAC_RXCFG)));
+
+ /* Set larger TX/RX size to allow for 802.1q */
+ hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
+ hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
+
hme_write32(hp, bregs + BMAC_TXCFG,
hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
hme_write32(hp, bregs + BMAC_RXCFG,
@@ -2039,7 +2044,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
hme_dma_unmap(hp, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE);
hp->rx_skbs[elem] = new_skb;
new_skb->dev = dev;
- skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET));
+ skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
hme_dma_map(hp, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROMDEVICE));
@@ -2809,8 +2814,8 @@ static int __devinit happy_meal_sbus_probe_one(struct sbus_dev *sdev, int is_qfe
dev->watchdog_timeo = 5*HZ;
dev->ethtool_ops = &hme_ethtool_ops;
- /* Happy Meal can do it all... except VLAN. */
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_VLAN_CHALLENGED;
+ /* Happy Meal can do it all... */
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->irq = sdev->irqs[0];
@@ -3143,8 +3148,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
dev->dma = 0;
- /* Happy Meal can do it all... except VLAN. */
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_VLAN_CHALLENGED;
+ /* Happy Meal can do it all... */
+ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
/* Hook up PCI register/dma accessors. */
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 5c4a92de9a07..450e29d7a9f3 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1963,6 +1963,11 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
+ if (!netif_running(dev)) {
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
if (new_mtu != oldmtu) {
spin_lock_irqsave(&vptr->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 262ab0b55824..c48b1b537d2b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -71,19 +71,19 @@ struct iwl_rate_scale_priv {
};
static s32 iwl_expected_tpt_g[IWL_RATE_COUNT] = {
- 0, 0, 76, 104, 130, 168, 191, 202, 7, 13, 35, 58
+ 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
};
static s32 iwl_expected_tpt_g_prot[IWL_RATE_COUNT] = {
- 0, 0, 0, 80, 93, 113, 123, 125, 7, 13, 35, 58
+ 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
};
static s32 iwl_expected_tpt_a[IWL_RATE_COUNT] = {
- 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0, 0
+ 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
};
static s32 iwl_expected_tpt_b[IWL_RATE_COUNT] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 7, 13, 35, 58
+ 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
};
struct iwl_tpt_entry {
@@ -350,6 +350,10 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
sta->last_txrate = sta->txrate;
+ /* For MODE_IEEE80211A mode it start at IWL_FIRST_OFDM_RATE */
+ if (local->hw.conf.phymode == MODE_IEEE80211A)
+ sta->last_txrate += IWL_FIRST_OFDM_RATE;
+
IWL_DEBUG_RATE("leave\n");
}
@@ -417,6 +421,33 @@ static void rs_free_sta(void *priv, void *priv_sta)
IWL_DEBUG_RATE("leave\n");
}
+
+/*
+ * get ieee prev rate from rate scale table.
+ * for A and B mode we need to overright prev
+ * value
+ */
+static int rs_adjust_next_rate(struct iwl_priv *priv, int rate)
+{
+ int next_rate = iwl_get_prev_ieee_rate(rate);
+
+ switch (priv->phymode) {
+ case MODE_IEEE80211A:
+ if (rate == IWL_RATE_12M_INDEX)
+ next_rate = IWL_RATE_9M_INDEX;
+ else if (rate == IWL_RATE_6M_INDEX)
+ next_rate = IWL_RATE_6M_INDEX;
+ break;
+ case MODE_IEEE80211B:
+ if (rate == IWL_RATE_11M_INDEX_TABLE)
+ next_rate = IWL_RATE_5M_INDEX_TABLE;
+ break;
+ default:
+ break;
+ }
+
+ return next_rate;
+}
/**
* rs_tx_status - Update rate control values based on Tx results
*
@@ -479,7 +510,8 @@ static void rs_tx_status(void *priv_rate,
last_index = scale_rate_index;
} else {
current_count = priv->retry_rate;
- last_index = iwl_get_prev_ieee_rate(scale_rate_index);
+ last_index = rs_adjust_next_rate(priv,
+ scale_rate_index);
}
/* Update this rate accounting for as many retries
@@ -494,9 +526,10 @@ static void rs_tx_status(void *priv_rate,
if (retries)
scale_rate_index =
- iwl_get_prev_ieee_rate(scale_rate_index);
+ rs_adjust_next_rate(priv, scale_rate_index);
}
+
/* Update the last index window with success/failure based on ACK */
IWL_DEBUG_RATE("Update rate %d with %s.\n",
last_index,
@@ -672,7 +705,10 @@ static struct ieee80211_rate *rs_get_rate(void *priv_rate,
}
rate_mask = sta->supp_rates;
- index = min(sta->txrate & 0xffff, IWL_RATE_COUNT - 1);
+ index = min(sta->last_txrate & 0xffff, IWL_RATE_COUNT - 1);
+
+ if (priv->phymode == (u8) MODE_IEEE80211A)
+ rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
rs_priv = (void *)sta->rate_ctrl_priv;
@@ -801,7 +837,11 @@ static struct ieee80211_rate *rs_get_rate(void *priv_rate,
out:
sta->last_txrate = index;
- sta->txrate = sta->last_txrate;
+ if (priv->phymode == (u8) MODE_IEEE80211A)
+ sta->txrate = sta->last_txrate - IWL_FIRST_OFDM_RATE;
+ else
+ sta->txrate = sta->last_txrate;
+
sta_info_put(sta);
IWL_DEBUG_RATE("leave: %d\n", index);
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
index b926738e0ea1..bec4d3ffca1d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
@@ -36,10 +36,17 @@ struct iwl_rate_info {
u8 next_rs; /* next rate used in rs algo */
u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
u8 next_rs_tgg; /* next rate used in TGG rs algo */
+ u8 table_rs_index; /* index in rate scale table cmd */
+ u8 prev_table_rs; /* prev in rate table cmd */
+
};
enum {
- IWL_RATE_6M_INDEX = 0,
+ IWL_RATE_1M_INDEX = 0,
+ IWL_RATE_2M_INDEX,
+ IWL_RATE_5M_INDEX,
+ IWL_RATE_11M_INDEX,
+ IWL_RATE_6M_INDEX,
IWL_RATE_9M_INDEX,
IWL_RATE_12M_INDEX,
IWL_RATE_18M_INDEX,
@@ -47,16 +54,28 @@ enum {
IWL_RATE_36M_INDEX,
IWL_RATE_48M_INDEX,
IWL_RATE_54M_INDEX,
- IWL_RATE_1M_INDEX,
- IWL_RATE_2M_INDEX,
- IWL_RATE_5M_INDEX,
- IWL_RATE_11M_INDEX,
IWL_RATE_COUNT,
IWL_RATE_INVM_INDEX,
IWL_RATE_INVALID = IWL_RATE_INVM_INDEX
};
enum {
+ IWL_RATE_6M_INDEX_TABLE = 0,
+ IWL_RATE_9M_INDEX_TABLE,
+ IWL_RATE_12M_INDEX_TABLE,
+ IWL_RATE_18M_INDEX_TABLE,
+ IWL_RATE_24M_INDEX_TABLE,
+ IWL_RATE_36M_INDEX_TABLE,
+ IWL_RATE_48M_INDEX_TABLE,
+ IWL_RATE_54M_INDEX_TABLE,
+ IWL_RATE_1M_INDEX_TABLE,
+ IWL_RATE_2M_INDEX_TABLE,
+ IWL_RATE_5M_INDEX_TABLE,
+ IWL_RATE_11M_INDEX_TABLE,
+ IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX,
+};
+
+enum {
IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
IWL_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 19bcb01e2784..3a45fe99a83e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -54,7 +54,9 @@
IWL_RATE_##rp##M_INDEX, \
IWL_RATE_##rn##M_INDEX, \
IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX }
+ IWL_RATE_##np##M_INDEX, \
+ IWL_RATE_##r##M_INDEX_TABLE, \
+ IWL_RATE_##ip##M_INDEX_TABLE }
/*
* Parameter order:
@@ -65,6 +67,10 @@
*
*/
const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
@@ -73,10 +79,6 @@ const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
- IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
};
/* 1 = enable the iwl_disable_events() function */
@@ -662,10 +664,11 @@ void iwl_hw_build_tx_cmd_rate(struct iwl_priv *priv,
cmd->cmd.tx.tx_flags = tx_flags;
/* OFDM */
- cmd->cmd.tx.supp_rates[0] = rate_mask & IWL_OFDM_RATES_MASK;
+ cmd->cmd.tx.supp_rates[0] =
+ ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
/* CCK */
- cmd->cmd.tx.supp_rates[1] = (rate_mask >> 8) & 0xF;
+ cmd->cmd.tx.supp_rates[1] = (rate_mask & 0xF);
IWL_DEBUG_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
"cck/ofdm mask: 0x%x/0x%x\n", sta_id,
@@ -1432,7 +1435,7 @@ static void iwl_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
/* use this channel group's 6Mbit clipping/saturation pwr,
* but cap at regulatory scan power restriction (set during init
* based on eeprom channel data) for this channel. */
- power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX]);
+ power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
/* further limit to user's max power preference.
* FIXME: Other spectrum management power limitations do not
@@ -1447,7 +1450,7 @@ static void iwl_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
* *index*. */
power_index = ch_info->power_info[rate_index].power_table_index
- (power - ch_info->power_info
- [IWL_RATE_6M_INDEX].requested_power) * 2;
+ [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
/* store reference index that we use when adjusting *all* scan
* powers. So we can accommodate user (all channel) or spectrum
@@ -1476,7 +1479,7 @@ static void iwl_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
*/
int iwl_hw_reg_send_txpower(struct iwl_priv *priv)
{
- int rate_idx;
+ int rate_idx, i;
const struct iwl_channel_info *ch_info = NULL;
struct iwl_txpowertable_cmd txpower = {
.channel = priv->active_rxon.channel,
@@ -1500,20 +1503,36 @@ int iwl_hw_reg_send_txpower(struct iwl_priv *priv)
}
/* fill cmd with power settings for all rates for current channel */
- for (rate_idx = 0; rate_idx < IWL_RATE_COUNT; rate_idx++) {
- txpower.power[rate_idx].tpc = ch_info->power_info[rate_idx].tpc;
- txpower.power[rate_idx].rate = iwl_rates[rate_idx].plcp;
+ /* Fill OFDM rate */
+ for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
+ rate_idx <= IWL_LAST_OFDM_RATE; rate_idx++, i++) {
+
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = iwl_rates[rate_idx].plcp;
IWL_DEBUG_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
le16_to_cpu(txpower.channel),
txpower.band,
- txpower.power[rate_idx].tpc.tx_gain,
- txpower.power[rate_idx].tpc.dsp_atten,
- txpower.power[rate_idx].rate);
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten,
+ txpower.power[i].rate);
+ }
+ /* Fill CCK rates */
+ for (rate_idx = IWL_FIRST_CCK_RATE;
+ rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = iwl_rates[rate_idx].plcp;
+
+ IWL_DEBUG_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+ le16_to_cpu(txpower.channel),
+ txpower.band,
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten,
+ txpower.power[i].rate);
}
return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
- sizeof(struct iwl_txpowertable_cmd), &txpower);
+ sizeof(struct iwl_txpowertable_cmd), &txpower);
}
@@ -1549,7 +1568,7 @@ static int iwl_hw_reg_set_new_power(struct iwl_priv *priv,
power_info = ch_info->power_info;
/* update OFDM Txpower settings */
- for (i = IWL_FIRST_OFDM_RATE; i <= IWL_LAST_OFDM_RATE;
+ for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
i++, ++power_info) {
int delta_idx;
@@ -1573,14 +1592,14 @@ static int iwl_hw_reg_set_new_power(struct iwl_priv *priv,
* ... all CCK power settings for a given channel are the *same*. */
if (power_changed) {
power =
- ch_info->power_info[IWL_RATE_12M_INDEX].
+ ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
/* do all CCK rates' iwl_channel_power_info structures */
- for (i = IWL_FIRST_CCK_RATE; i <= IWL_LAST_CCK_RATE; i++) {
+ for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
power_info->requested_power = power;
power_info->base_power_index =
- ch_info->power_info[IWL_RATE_12M_INDEX].
+ ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
++power_info;
}
@@ -1674,7 +1693,7 @@ static int iwl_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
for (scan_tbl_index = 0;
scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX : IWL_RATE_6M_INDEX;
+ IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
iwl_hw_reg_set_scan_power(priv, scan_tbl_index,
actual_index, clip_pwrs,
ch_info, a_band);
@@ -1905,19 +1924,19 @@ static void iwl_hw_reg_init_channel_groups(struct iwl_priv *priv)
for (rate_index = 0;
rate_index < IWL_RATE_COUNT; rate_index++, clip_pwrs++) {
switch (rate_index) {
- case IWL_RATE_36M_INDEX:
+ case IWL_RATE_36M_INDEX_TABLE:
if (i == 0) /* B/G */
*clip_pwrs = satur_pwr;
else /* A */
*clip_pwrs = satur_pwr - 5;
break;
- case IWL_RATE_48M_INDEX:
+ case IWL_RATE_48M_INDEX_TABLE:
if (i == 0)
*clip_pwrs = satur_pwr - 7;
else
*clip_pwrs = satur_pwr - 10;
break;
- case IWL_RATE_54M_INDEX:
+ case IWL_RATE_54M_INDEX_TABLE:
if (i == 0)
*clip_pwrs = satur_pwr - 9;
else
@@ -2031,7 +2050,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
}
/* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
- pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX];
+ pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
power = pwr_info->requested_power +
IWL_CCK_FROM_OFDM_POWER_DIFF;
pwr_index = pwr_info->power_table_index +
@@ -2047,9 +2066,9 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
/* fill each CCK rate's iwl_channel_power_info structure
* NOTE: All CCK-rate Txpwrs are the same for a given chnl!
* NOTE: CCK rates start at end of OFDM rates! */
- for (rate_index = IWL_OFDM_RATES;
- rate_index < IWL_RATE_COUNT; rate_index++) {
- pwr_info = &ch_info->power_info[rate_index];
+ for (rate_index = 0;
+ rate_index < IWL_CCK_RATES; rate_index++) {
+ pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
pwr_info->requested_power = power;
pwr_info->power_table_index = pwr_index;
pwr_info->base_power_index = base_pwr_index;
@@ -2061,7 +2080,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
for (scan_tbl_index = 0;
scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX : IWL_RATE_6M_INDEX;
+ IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
iwl_hw_reg_set_scan_power(priv, scan_tbl_index,
actual_index, clip_pwrs, ch_info, a_band);
}
@@ -2139,17 +2158,20 @@ int iwl_hw_get_rx_read(struct iwl_priv *priv)
*/
int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
{
- int rc, i;
+ int rc, i, index, prev_index;
struct iwl_rate_scaling_cmd rate_cmd = {
.reserved = {0, 0, 0},
};
struct iwl_rate_scaling_info *table = rate_cmd.table;
for (i = 0; i < ARRAY_SIZE(iwl_rates); i++) {
- table[i].rate_n_flags =
+ index = iwl_rates[i].table_rs_index;
+
+ table[index].rate_n_flags =
iwl_hw_set_rate_n_flags(iwl_rates[i].plcp, 0);
- table[i].try_cnt = priv->retry_rate;
- table[i].next_rate_index = iwl_get_prev_ieee_rate(i);
+ table[index].try_cnt = priv->retry_rate;
+ prev_index = iwl_get_prev_ieee_rate(i);
+ table[index].next_rate_index = iwl_rates[prev_index].table_rs_index;
}
switch (priv->phymode) {
@@ -2157,26 +2179,26 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
IWL_DEBUG_RATE("Select A mode rate scale\n");
/* If one of the following CCK rates is used,
* have it fall back to the 6M OFDM rate */
- for (i = IWL_FIRST_CCK_RATE; i <= IWL_LAST_CCK_RATE; i++)
- table[i].next_rate_index = IWL_FIRST_OFDM_RATE;
+ for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++)
+ table[i].next_rate_index = iwl_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
/* Don't fall back to CCK rates */
- table[IWL_RATE_12M_INDEX].next_rate_index = IWL_RATE_9M_INDEX;
+ table[IWL_RATE_12M_INDEX_TABLE].next_rate_index = IWL_RATE_9M_INDEX_TABLE;
/* Don't drop out of OFDM rates */
- table[IWL_FIRST_OFDM_RATE].next_rate_index =
- IWL_FIRST_OFDM_RATE;
+ table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
+ iwl_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
break;
case MODE_IEEE80211B:
IWL_DEBUG_RATE("Select B mode rate scale\n");
/* If an OFDM rate is used, have it fall back to the
* 1M CCK rates */
- for (i = IWL_FIRST_OFDM_RATE; i <= IWL_LAST_OFDM_RATE; i++)
- table[i].next_rate_index = IWL_FIRST_CCK_RATE;
+ for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; i++)
+ table[i].next_rate_index = iwl_rates[IWL_FIRST_CCK_RATE].table_rs_index;
/* CCK shouldn't fall back to OFDM... */
- table[IWL_RATE_11M_INDEX].next_rate_index = IWL_RATE_5M_INDEX;
+ table[IWL_RATE_11M_INDEX_TABLE].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
break;
default:
@@ -2248,22 +2270,12 @@ unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
TX_CMD_FLG_TSF_MSK);
- /* supp_rates[0] == OFDM */
- tx_beacon_cmd->tx.supp_rates[0] = IWL_OFDM_BASIC_RATES_MASK;
-
- /* supp_rates[1] == CCK
- *
- * NOTE: IWL_*_RATES_MASK are not in the order that supp_rates
- * expects so we have to shift them around.
- *
- * supp_rates expects:
- * CCK rates are bit0..3
- *
- * However IWL_*_RATES_MASK has:
- * CCK rates are bit8..11
- */
+ /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
+ tx_beacon_cmd->tx.supp_rates[0] =
+ (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
+
tx_beacon_cmd->tx.supp_rates[1] =
- (IWL_CCK_BASIC_RATES_MASK >> 8) & 0xF;
+ (IWL_CCK_BASIC_RATES_MASK & 0xF);
return (sizeof(struct iwl_tx_beacon_cmd) + frame_size);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index be7c9f42a340..465da4f67ce7 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -4850,7 +4850,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
/* Hardware disappeared */
IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
- goto none;
+ goto unplugged;
}
IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -4858,6 +4858,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
/* iwl_irq_tasklet() will service interrupts and re-enable them */
tasklet_schedule(&priv->irq_tasklet);
+unplugged:
spin_unlock(&priv->lock);
return IRQ_HANDLED;
@@ -5331,13 +5332,13 @@ static int iwl_init_geos(struct iwl_priv *priv)
/* 5.2GHz channels start after the 2.4GHz channels */
modes[A].mode = MODE_IEEE80211A;
modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
- modes[A].rates = rates;
+ modes[A].rates = &rates[4];
modes[A].num_rates = 8; /* just OFDM */
modes[A].num_channels = 0;
modes[B].mode = MODE_IEEE80211B;
modes[B].channels = channels;
- modes[B].rates = &rates[8];
+ modes[B].rates = rates;
modes[B].num_rates = 4; /* just CCK */
modes[B].num_channels = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index 6757c6c1b25a..9918780f5e86 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -5156,9 +5156,10 @@ static irqreturn_t iwl_isr(int irq, void *data)
}
if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared */
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta);
- goto none;
+ goto unplugged;
}
IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
@@ -5166,8 +5167,9 @@ static irqreturn_t iwl_isr(int irq, void *data)
/* iwl_irq_tasklet() will service interrupts and re-enable them */
tasklet_schedule(&priv->irq_tasklet);
- spin_unlock(&priv->lock);
+ unplugged:
+ spin_unlock(&priv->lock);
return IRQ_HANDLED;
none:
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index ff2d63267b19..702321c30164 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -620,7 +620,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev)
* up to version C the link tuning should halt after 20
* seconds.
*/
- if (rt2x00_get_rev(&rt2x00dev->chip) < RT2560_VERSION_D &&
+ if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D &&
rt2x00dev->link.count > 20)
return;
@@ -630,7 +630,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev)
* Chipset versions C and lower should directly continue
* to the dynamic CCA tuning.
*/
- if (rt2x00_get_rev(&rt2x00dev->chip) < RT2560_VERSION_D)
+ if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D)
goto dynamic_cca_tune;
/*
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 7cdc80a122bb..277a020b35e9 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -753,7 +753,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
- if (rt2x00_get_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) {
+ if (rt2x00_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) {
rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg);
reg &= ~0x0002;
} else {
@@ -1257,7 +1257,7 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg);
rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
- if (rt2x00_rev(&rt2x00dev->chip, 0xffff0)) {
+ if (!rt2x00_check_rev(&rt2x00dev->chip, 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 9845e584b731..d1ad5251a77a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -751,14 +751,16 @@ static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip)
return (chipset->rf == chip);
}
-static inline u16 rt2x00_get_rev(const struct rt2x00_chip *chipset)
+static inline u16 rt2x00_rev(const struct rt2x00_chip *chipset)
{
return chipset->rev;
}
-static inline u16 rt2x00_rev(const struct rt2x00_chip *chipset, const u32 mask)
+static inline u16 rt2x00_check_rev(const struct rt2x00_chip *chipset,
+ const u32 rev)
{
- return chipset->rev & mask;
+ return (((chipset->rev & 0xffff0) == rev) &&
+ !!(chipset->rev & 0x0000f));
}
/*
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 46c8c0840a65..dc640bf6b5eb 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1486,7 +1486,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt73usb_register_read(rt2x00dev, MAC_CSR0, &reg);
rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
- if (!rt2x00_rev(&rt2x00dev->chip, 0x25730)) {
+ if (!rt2x00_check_rev(&rt2x00dev->chip, 0x25730)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index a83c3db7d18f..c93d3d2640ab 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -64,6 +64,8 @@ int alloc_cpu_buffers(void)
b->head_pos = 0;
b->sample_received = 0;
b->sample_lost_overflow = 0;
+ b->backtrace_aborted = 0;
+ b->sample_invalid_eip = 0;
b->cpu = i;
INIT_DELAYED_WORK(&b->work, wq_sync_buffer);
}
@@ -175,6 +177,11 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
cpu_buf->sample_received++;
+ if (pc == ESCAPE_CODE) {
+ cpu_buf->sample_invalid_eip++;
+ return 0;
+ }
+
if (nr_available_slots(cpu_buf) < 3) {
cpu_buf->sample_lost_overflow++;
return 0;
diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h
index 49900d9e3235..c66c025abe75 100644
--- a/drivers/oprofile/cpu_buffer.h
+++ b/drivers/oprofile/cpu_buffer.h
@@ -42,6 +42,7 @@ struct oprofile_cpu_buffer {
unsigned long sample_received;
unsigned long sample_lost_overflow;
unsigned long backtrace_aborted;
+ unsigned long sample_invalid_eip;
int cpu;
struct delayed_work work;
} ____cacheline_aligned;
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index f0acb661c253..d1f6d776e9e4 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -26,6 +26,8 @@ void oprofile_reset_stats(void)
cpu_buf = &cpu_buffer[i];
cpu_buf->sample_received = 0;
cpu_buf->sample_lost_overflow = 0;
+ cpu_buf->backtrace_aborted = 0;
+ cpu_buf->sample_invalid_eip = 0;
}
atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
@@ -61,6 +63,8 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
&cpu_buf->sample_lost_overflow);
oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted",
&cpu_buf->backtrace_aborted);
+ oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
+ &cpu_buf->sample_invalid_eip);
}
oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cbde770eb121..e5cdc0294aaa 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -36,7 +36,9 @@ config RTC_HCTOSYS_DEVICE
help
The RTC device that will be used to (re)initialize the system
clock, usually rtc0. Initialization is done when the system
- starts up, and when it resumes from a low power state.
+ starts up, and when it resumes from a low power state. This
+ device should record time in UTC, since the kernel won't do
+ timezone correction.
The driver for this RTC device must be loaded before late_initcall
functions run, so it must usually be statically linked.
@@ -133,8 +135,8 @@ config RTC_DRV_DS1307
The first seven registers on these chips hold an RTC, and other
registers may add features such as NVRAM, a trickle charger for
- the RTC/NVRAM backup power, and alarms. This driver may not
- expose all those available chip features.
+ the RTC/NVRAM backup power, and alarms. NVRAM is visible in
+ sysfs, but other chip features may not be available.
This driver can also be built as a module. If so, the module
will be called rtc-ds1307.
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index 178527252c6a..33c0e98243ee 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -47,8 +47,8 @@ static int __init rtc_hctosys(void)
do_settimeofday(&tv);
dev_info(rtc->dev.parent,
- "setting the system clock to "
- "%d-%02d-%02d %02d:%02d:%02d (%u)\n",
+ "setting system clock to "
+ "%d-%02d-%02d %02d:%02d:%02d UTC (%u)\n",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec,
(unsigned int) tv.tv_sec);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index db6f3f0d8982..bc1c7fe94ad3 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -89,6 +89,7 @@ enum ds_type {
struct ds1307 {
u8 reg_addr;
+ bool has_nvram;
u8 regs[8];
enum ds_type type;
struct i2c_msg msg[2];
@@ -242,6 +243,87 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
.set_time = ds1307_set_time,
};
+/*----------------------------------------------------------------------*/
+
+#define NVRAM_SIZE 56
+
+static ssize_t
+ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client;
+ struct ds1307 *ds1307;
+ struct i2c_msg msg[2];
+ int result;
+
+ client = to_i2c_client(container_of(kobj, struct device, kobj));
+ ds1307 = i2c_get_clientdata(client);
+
+ if (unlikely(off >= NVRAM_SIZE))
+ return 0;
+ if ((off + count) > NVRAM_SIZE)
+ count = NVRAM_SIZE - off;
+ if (unlikely(!count))
+ return count;
+
+ msg[0].addr = client->addr;
+ msg[0].flags = 0;
+ msg[0].len = 1;
+ msg[0].buf = buf;
+
+ buf[0] = 8 + off;
+
+ msg[1].addr = client->addr;
+ msg[1].flags = I2C_M_RD;
+ msg[1].len = count;
+ msg[1].buf = buf;
+
+ result = i2c_transfer(to_i2c_adapter(client->dev.parent), msg, 2);
+ if (result != 2) {
+ dev_err(&client->dev, "%s error %d\n", "nvram read", result);
+ return -EIO;
+ }
+ return count;
+}
+
+static ssize_t
+ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client;
+ u8 buffer[NVRAM_SIZE + 1];
+ int ret;
+
+ client = to_i2c_client(container_of(kobj, struct device, kobj));
+
+ if (unlikely(off >= NVRAM_SIZE))
+ return -EFBIG;
+ if ((off + count) > NVRAM_SIZE)
+ count = NVRAM_SIZE - off;
+ if (unlikely(!count))
+ return count;
+
+ buffer[0] = 8 + off;
+ memcpy(buffer + 1, buf, count);
+
+ ret = i2c_master_send(client, buffer, count + 1);
+ return (ret < 0) ? ret : (ret - 1);
+}
+
+static struct bin_attribute nvram = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUGO | S_IWUSR,
+ .owner = THIS_MODULE,
+ },
+
+ .read = ds1307_nvram_read,
+ .write = ds1307_nvram_write,
+ .size = NVRAM_SIZE,
+};
+
+/*----------------------------------------------------------------------*/
+
static struct i2c_driver ds1307_driver;
static int __devinit ds1307_probe(struct i2c_client *client)
@@ -413,6 +495,14 @@ read_rtc:
goto exit_free;
}
+ if (chip->nvram56) {
+ err = sysfs_create_bin_file(&client->dev.kobj, &nvram);
+ if (err == 0) {
+ ds1307->has_nvram = true;
+ dev_info(&client->dev, "56 bytes nvram\n");
+ }
+ }
+
return 0;
exit_bad:
@@ -432,6 +522,9 @@ static int __devexit ds1307_remove(struct i2c_client *client)
{
struct ds1307 *ds1307 = i2c_get_clientdata(client);
+ if (ds1307->has_nvram)
+ sysfs_remove_bin_file(&client->dev.kobj, &nvram);
+
rtc_device_unregister(ds1307->rtc);
kfree(ds1307);
return 0;
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index bb53c09bad16..d9e848dcd450 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -291,7 +291,7 @@ static ssize_t ds1553_nvram_write(struct kobject *kobj,
static struct bin_attribute ds1553_nvram_attr = {
.attr = {
.name = "nvram",
- .mode = S_IRUGO | S_IWUGO,
+ .mode = S_IRUGO | S_IWUSR,
},
.size = RTC_OFFSET,
.read = ds1553_nvram_read,
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index c535b78698e2..2e73f0b183b2 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -160,10 +160,13 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj,
static struct bin_attribute ds1742_nvram_attr = {
.attr = {
.name = "nvram",
- .mode = S_IRUGO | S_IWUGO,
+ .mode = S_IRUGO | S_IWUSR,
},
.read = ds1742_nvram_read,
.write = ds1742_nvram_write,
+ /* REVISIT: size in sysfs won't match actual size... if it's
+ * not a constant, each RTC should have its own attribute.
+ */
};
static int __devinit ds1742_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 2bad1637330a..cd0bbc0e8038 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -353,11 +353,12 @@ static ssize_t m48t59_nvram_write(struct kobject *kobj,
static struct bin_attribute m48t59_nvram_attr = {
.attr = {
.name = "nvram",
- .mode = S_IRUGO | S_IWUGO,
+ .mode = S_IRUGO | S_IWUSR,
.owner = THIS_MODULE,
},
.read = m48t59_nvram_read,
.write = m48t59_nvram_write,
+ .size = M48T59_NVRAM_SIZE,
};
static int __devinit m48t59_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 8288b6b2bf2b..a265da7c6ff8 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -291,7 +291,7 @@ static ssize_t stk17ta8_nvram_write(struct kobject *kobj,
static struct bin_attribute stk17ta8_nvram_attr = {
.attr = {
.name = "nvram",
- .mode = S_IRUGO | S_IWUGO,
+ .mode = S_IRUGO | S_IWUSR,
.owner = THIS_MODULE,
},
.size = RTC_OFFSET,
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 5b0932f61473..06509bff71f7 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -377,7 +377,7 @@ out:
#define FLASH_RESET 0xF0
-#define FLASH_SIZE 0x200000
+#define ASD_FLASH_SIZE 0x200000
#define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** "
#define FLASH_NEXT_ENTRY_OFFS 0x2000
#define FLASH_MAX_DIR_ENTRIES 32
@@ -609,7 +609,7 @@ static int asd_find_flash_dir(struct asd_ha_struct *asd_ha,
struct asd_flash_dir *flash_dir)
{
u32 v;
- for (v = 0; v < FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) {
+ for (v = 0; v < ASD_FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) {
asd_read_flash_seg(asd_ha, flash_dir, v,
sizeof(FLASH_DIR_COOKIE)-1);
if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE,
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 926f58a674a1..1de098e75497 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -69,6 +69,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
{ "CTL3001", 0 },
/* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
{ "CTL3011", 0 },
+ /* Davicom ISA 33.6K Modem */
+ { "DAV0336", 0 },
/* Creative */
/* Creative Modem Blaster Flash56 DI5601-1 */
{ "DMB1032", 0 },
@@ -345,6 +347,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
/* Fujitsu Wacom Tablet PC devices */
{ "FUJ02E5", 0 },
{ "FUJ02E6", 0 },
+ /*
+ * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
+ * disguise)
+ */
+ { "LTS0001", 0 },
/* Rockwell's (PORALiNK) 33600 INT PNP */
{ "WCI0003", 0 },
/* Unkown PnP modems */
@@ -432,7 +439,8 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
}
memset(&port, 0, sizeof(struct uart_port));
- port.irq = pnp_irq(dev, 0);
+ if (pnp_irq_valid(dev, 0))
+ port.irq = pnp_irq(dev, 0);
if (pnp_port_valid(dev, 0)) {
port.iobase = pnp_port_start(dev, 0);
port.iotype = UPIO_PORT;
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 4d6b3c56d20e..111da57f5334 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -204,8 +204,6 @@ static u_int atmel_get_mctrl(struct uart_port *port)
*/
static void atmel_stop_tx(struct uart_port *port)
{
- struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
-
UART_PUT_IDR(port, ATMEL_US_TXRDY);
}
@@ -214,8 +212,6 @@ static void atmel_stop_tx(struct uart_port *port)
*/
static void atmel_start_tx(struct uart_port *port)
{
- struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
-
UART_PUT_IER(port, ATMEL_US_TXRDY);
}
@@ -224,8 +220,6 @@ static void atmel_start_tx(struct uart_port *port)
*/
static void atmel_stop_rx(struct uart_port *port)
{
- struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
-
UART_PUT_IDR(port, ATMEL_US_RXRDY);
}
@@ -409,7 +403,6 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
*/
static int atmel_startup(struct uart_port *port)
{
- struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
int retval;
/*
@@ -456,8 +449,6 @@ static int atmel_startup(struct uart_port *port)
*/
static void atmel_shutdown(struct uart_port *port)
{
- struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port;
-
/*
* Disable all interrupts, port and break condition.
*/
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index f523cdf4b02b..a4e23cf47906 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -1,426 +1,10 @@
-/* $Id: serial.c,v 1.25 2004/09/29 10:33:49 starvik Exp $
- *
+/*
* Serial port driver for the ETRAX 100LX chip
*
- * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Axis Communications AB
+ * Copyright (C) 1998-2007 Axis Communications AB
*
* Many, many authors. Based once upon a time on serial.c for 16x50.
*
- * $Log: serial.c,v $
- * Revision 1.25 2004/09/29 10:33:49 starvik
- * Resolved a dealock when printing debug from kernel.
- *
- * Revision 1.24 2004/08/27 23:25:59 johana
- * rs_set_termios() must call change_speed() if c_iflag has changed or
- * automatic XOFF handling will be enabled and transmitter will stop
- * if 0x13 is received.
- *
- * Revision 1.23 2004/08/24 06:57:13 starvik
- * More whitespace cleanup
- *
- * Revision 1.22 2004/08/24 06:12:20 starvik
- * Whitespace cleanup
- *
- * Revision 1.20 2004/05/24 12:00:20 starvik
- * Big merge of stuff from Linux 2.4 (e.g. manual mode for the serial port).
- *
- * Revision 1.19 2004/05/17 13:12:15 starvik
- * Kernel console hook
- * Big merge from Linux 2.4 still pending.
- *
- * Revision 1.18 2003/10/28 07:18:30 starvik
- * Compiles with debug info
- *
- * Revision 1.17 2003/07/04 08:27:37 starvik
- * Merge of Linux 2.5.74
- *
- * Revision 1.16 2003/06/13 10:05:19 johana
- * Help the user to avoid trouble by:
- * Forcing mixed mode for status/control lines if not all pins are used.
- *
- * Revision 1.15 2003/06/13 09:43:01 johana
- * Merged in the following changes from os/linux/arch/cris/drivers/serial.c
- * + some minor changes to reduce diff.
- *
- * Revision 1.49 2003/05/30 11:31:54 johana
- * Merged in change-branch--serial9bit that adds CMSPAR support for sticky
- * parity (mark/space)
- *
- * Revision 1.48 2003/05/30 11:03:57 johana
- * Implemented rs_send_xchar() by disabling the DMA and writing manually.
- * Added e100_disable_txdma_channel() and e100_enable_txdma_channel().
- * Fixed rs_throttle() and rs_unthrottle() to properly call rs_send_xchar
- * instead of setting info->x_char and check the CRTSCTS flag before
- * controlling the rts pin.
- *
- * Revision 1.14 2003/04/09 08:12:44 pkj
- * Corrected typo changes made upstream.
- *
- * Revision 1.13 2003/04/09 05:20:47 starvik
- * Merge of Linux 2.5.67
- *
- * Revision 1.11 2003/01/22 06:48:37 starvik
- * Fixed warnings issued by GCC 3.2.1
- *
- * Revision 1.9 2002/12/13 09:07:47 starvik
- * Alert user that RX_TIMEOUT_TICKS==0 doesn't work
- *
- * Revision 1.8 2002/12/11 13:13:57 starvik
- * Added arch/ to v10 specific includes
- * Added fix from Linux 2.4 in serial.c (flush_to_flip_buffer)
- *
- * Revision 1.7 2002/12/06 07:13:57 starvik
- * Corrected work queue stuff
- * Removed CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST
- *
- * Revision 1.6 2002/11/21 07:17:46 starvik
- * Change static inline to extern inline where otherwise outlined with gcc-3.2
- *
- * Revision 1.5 2002/11/14 15:59:49 starvik
- * Linux 2.5 port of the latest serial driver from 2.4. The work queue stuff
- * probably doesn't work yet.
- *
- * Revision 1.42 2002/11/05 09:08:47 johana
- * Better implementation of rs_stop() and rs_start() that uses the XOFF
- * register to start/stop transmission.
- * change_speed() also initilises XOFF register correctly so that
- * auto_xoff is enabled when IXON flag is set by user.
- * This gives fast XOFF response times.
- *
- * Revision 1.41 2002/11/04 18:40:57 johana
- * Implemented rs_stop() and rs_start().
- * Simple tests using hwtestserial indicates that this should be enough
- * to make it work.
- *
- * Revision 1.40 2002/10/14 05:33:18 starvik
- * RS-485 uses fast timers even if SERIAL_FAST_TIMER is disabled
- *
- * Revision 1.39 2002/09/30 21:00:57 johana
- * Support for CONFIG_ETRAX_SERx_DTR_RI_DSR_CD_MIXED where the status and
- * control pins can be mixed between PA and PB.
- * If no serial port uses MIXED old solution is used
- * (saves a few bytes and cycles).
- * control_pins struct uses masks instead of bit numbers.
- * Corrected dummy values and polarity in line_info() so
- * /proc/tty/driver/serial is now correct.
- * (the E100_xxx_GET() macros is really active low - perhaps not obvious)
- *
- * Revision 1.38 2002/08/23 11:01:36 starvik
- * Check that serial port is enabled in all interrupt handlers to avoid
- * restarts of DMA channels not assigned to serial ports
- *
- * Revision 1.37 2002/08/13 13:02:37 bjornw
- * Removed some warnings because of unused code
- *
- * Revision 1.36 2002/08/08 12:50:01 starvik
- * Serial interrupt is shared with synchronous serial port driver
- *
- * Revision 1.35 2002/06/03 10:40:49 starvik
- * Increased RS-485 RTS toggle timer to 2 characters
- *
- * Revision 1.34 2002/05/28 18:59:36 johana
- * Whitespace and comment fixing to be more like etrax100ser.c 1.71.
- *
- * Revision 1.33 2002/05/28 17:55:43 johana
- * RS-485 uses FAST_TIMER if enabled, and starts a short (one char time)
- * timer from tranismit_chars (interrupt context).
- * The timer toggles RTS in interrupt context when expired giving minimum
- * latencies.
- *
- * Revision 1.32 2002/05/22 13:58:00 johana
- * Renamed rs_write() to raw_write() and made it inline.
- * New rs_write() handles RS-485 if configured and enabled
- * (moved code from e100_write_rs485()).
- * RS-485 ioctl's uses copy_from_user() instead of verify_area().
- *
- * Revision 1.31 2002/04/22 11:20:03 johana
- * Updated copyright years.
- *
- * Revision 1.30 2002/04/22 09:39:12 johana
- * RS-485 support compiles.
- *
- * Revision 1.29 2002/01/14 16:10:01 pkj
- * Allocate the receive buffers dynamically. The static 4kB buffer was
- * too small for the peaks. This means that we can get rid of the extra
- * buffer and the copying to it. It also means we require less memory
- * under normal operations, but can use more when needed (there is a
- * cap at 64kB for safety reasons). If there is no memory available
- * we panic(), and die a horrible death...
- *
- * Revision 1.28 2001/12/18 15:04:53 johana
- * Cleaned up write_rs485() - now it works correctly without padding extra
- * char.
- * Added sane default initialisation of rs485.
- * Added #ifdef around dummy variables.
- *
- * Revision 1.27 2001/11/29 17:00:41 pkj
- * 2kB seems to be too small a buffer when using 921600 bps,
- * so increase it to 4kB (this was already done for the elinux
- * version of the serial driver).
- *
- * Revision 1.26 2001/11/19 14:20:41 pkj
- * Minor changes to comments and unused code.
- *
- * Revision 1.25 2001/11/12 20:03:43 pkj
- * Fixed compiler warnings.
- *
- * Revision 1.24 2001/11/12 15:10:05 pkj
- * Total redesign of the receiving part of the serial driver.
- * Uses eight chained descriptors to write to a 4kB buffer.
- * This data is then serialised into a 2kB buffer. From there it
- * is copied into the TTY's flip buffers when they become available.
- * A lot of copying, and the sizes of the buffers might need to be
- * tweaked, but all in all it should work better than the previous
- * version, without the need to modify the TTY code in any way.
- * Also note that erroneous bytes are now correctly marked in the
- * flag buffers (instead of always marking the first byte).
- *
- * Revision 1.23 2001/10/30 17:53:26 pkj
- * * Set info->uses_dma to 0 when a port is closed.
- * * Mark the timer1 interrupt as a fast one (SA_INTERRUPT).
- * * Call start_flush_timer() in start_receive() if
- * CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST is defined.
- *
- * Revision 1.22 2001/10/30 17:44:03 pkj
- * Use %lu for received and transmitted counters in line_info().
- *
- * Revision 1.21 2001/10/30 17:40:34 pkj
- * Clean-up. The only change to functionality is that
- * CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS(=5) is used instead of
- * MAX_FLUSH_TIME(=8).
- *
- * Revision 1.20 2001/10/30 15:24:49 johana
- * Added char_time stuff from 2.0 driver.
- *
- * Revision 1.19 2001/10/30 15:23:03 johana
- * Merged with 1.13.2 branch + fixed indentation
- * and changed CONFIG_ETRAX100_XYS to CONFIG_ETRAX_XYZ
- *
- * Revision 1.18 2001/09/24 09:27:22 pkj
- * Completed ext_baud_table[] in cflag_to_baud() and cflag_to_etrax_baud().
- *
- * Revision 1.17 2001/08/24 11:32:49 ronny
- * More fixes for the CONFIG_ETRAX_SERIAL_PORT0 define.
- *
- * Revision 1.16 2001/08/24 07:56:22 ronny
- * Added config ifdefs around ser0 irq requests.
- *
- * Revision 1.15 2001/08/16 09:10:31 bjarne
- * serial.c - corrected the initialization of rs_table, the wrong defines
- * where used.
- * Corrected a test in timed_flush_handler.
- * Changed configured to enabled.
- * serial.h - Changed configured to enabled.
- *
- * Revision 1.14 2001/08/15 07:31:23 bjarne
- * Introduced two new members to the e100_serial struct.
- * configured - Will be set to 1 if the port has been configured in .config
- * uses_dma - Should be set to 1 if the port uses DMA. Currently it is set
- * to 1
- * when a port is opened. This is used to limit the DMA interrupt
- * routines to only manipulate DMA channels actually used by the
- * serial driver.
- *
- * Revision 1.13.2.2 2001/10/17 13:57:13 starvik
- * Receiver was broken by the break fixes
- *
- * Revision 1.13.2.1 2001/07/20 13:57:39 ronny
- * Merge with new stuff from etrax100ser.c. Works but haven't checked stuff
- * like break handling.
- *
- * Revision 1.13 2001/05/09 12:40:31 johana
- * Use DMA_NBR and IRQ_NBR defines from dma.h and irq.h
- *
- * Revision 1.12 2001/04/19 12:23:07 bjornw
- * CONFIG_RS485 -> CONFIG_ETRAX_RS485
- *
- * Revision 1.11 2001/04/05 14:29:48 markusl
- * Updated according to review remarks i.e.
- * -Use correct types in port structure to avoid compiler warnings
- * -Try to use IO_* macros whenever possible
- * -Open should never return -EBUSY
- *
- * Revision 1.10 2001/03/05 13:14:07 bjornw
- * Another spelling fix
- *
- * Revision 1.9 2001/02/23 13:46:38 bjornw
- * Spellling check
- *
- * Revision 1.8 2001/01/23 14:56:35 markusl
- * Made use of ser1 optional
- * Needed by USB
- *
- * Revision 1.7 2001/01/19 16:14:48 perf
- * Added kernel options for serial ports 234.
- * Changed option names from CONFIG_ETRAX100_XYZ to CONFIG_ETRAX_XYZ.
- *
- * Revision 1.6 2000/11/22 16:36:09 bjornw
- * Please marketing by using the correct case when spelling Etrax.
- *
- * Revision 1.5 2000/11/21 16:43:37 bjornw
- * Fixed so it compiles under CONFIG_SVINTO_SIM
- *
- * Revision 1.4 2000/11/15 17:34:12 bjornw
- * Added a timeout timer for flushing input channels. The interrupt-based
- * fast flush system should be easy to merge with this later (works the same
- * way, only with an irq instead of a system timer_list)
- *
- * Revision 1.3 2000/11/13 17:19:57 bjornw
- * * Incredibly, this almost complete rewrite of serial.c worked (at least
- * for output) the first time.
- *
- * Items worth noticing:
- *
- * No Etrax100 port 1 workarounds (does only compile on 2.4 anyway now)
- * RS485 is not ported (why can't it be done in userspace as on x86 ?)
- * Statistics done through async_icount - if any more stats are needed,
- * that's the place to put them or in an arch-dep version of it.
- * timeout_interrupt and the other fast timeout stuff not ported yet
- * There be dragons in this 3k+ line driver
- *
- * Revision 1.2 2000/11/10 16:50:28 bjornw
- * First shot at a 2.4 port, does not compile totally yet
- *
- * Revision 1.1 2000/11/10 16:47:32 bjornw
- * Added verbatim copy of rev 1.49 etrax100ser.c from elinux
- *
- * Revision 1.49 2000/10/30 15:47:14 tobiasa
- * Changed version number.
- *
- * Revision 1.48 2000/10/25 11:02:43 johana
- * Changed %ul to %lu in printf's
- *
- * Revision 1.47 2000/10/18 15:06:53 pkj
- * Compile correctly with CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST and
- * CONFIG_ETRAX_SERIAL_PROC_ENTRY together.
- * Some clean-up of the /proc/serial file.
- *
- * Revision 1.46 2000/10/16 12:59:40 johana
- * Added CONFIG_ETRAX_SERIAL_PROC_ENTRY for statistics and debug info.
- *
- * Revision 1.45 2000/10/13 17:10:59 pkj
- * Do not flush DMAs while flipping TTY buffers.
- *
- * Revision 1.44 2000/10/13 16:34:29 pkj
- * Added a delay in ser_interrupt() for 2.3ms when an error is detected.
- * We do not know why this delay is required yet, but without it the
- * irmaflash program does not work (this was the program that needed
- * the ser_interrupt() to be needed in the first place). This should not
- * affect normal use of the serial ports.
- *
- * Revision 1.43 2000/10/13 16:30:44 pkj
- * New version of the fast flush of serial buffers code. This time
- * it is localized to the serial driver and uses a fast timer to
- * do the work.
- *
- * Revision 1.42 2000/10/13 14:54:26 bennyo
- * Fix for switching RTS when using rs485
- *
- * Revision 1.41 2000/10/12 11:43:44 pkj
- * Cleaned up a number of comments.
- *
- * Revision 1.40 2000/10/10 11:58:39 johana
- * Made RS485 support generic for all ports.
- * Toggle rts in interrupt if no delay wanted.
- * WARNING: No true transmitter empty check??
- * Set d_wait bit when sending data so interrupt is delayed until
- * fifo flushed. (Fix tcdrain() problem)
- *
- * Revision 1.39 2000/10/04 16:08:02 bjornw
- * * Use virt_to_phys etc. for DMA addresses
- * * Removed CONFIG_FLUSH_DMA_FAST hacks
- * * Indentation fix
- *
- * Revision 1.38 2000/10/02 12:27:10 mattias
- * * added variable used when using fast flush on serial dma.
- * (CONFIG_FLUSH_DMA_FAST)
- *
- * Revision 1.37 2000/09/27 09:44:24 pkj
- * Uncomment definition of SERIAL_HANDLE_EARLY_ERRORS.
- *
- * Revision 1.36 2000/09/20 13:12:52 johana
- * Support for CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS:
- * Number of timer ticks between flush of receive fifo (1 tick = 10ms).
- * Try 0-3 for low latency applications. Approx 5 for high load
- * applications (e.g. PPP). Maybe this should be more adaptive some day...
- *
- * Revision 1.35 2000/09/20 10:36:08 johana
- * Typo in get_lsr_info()
- *
- * Revision 1.34 2000/09/20 10:29:59 johana
- * Let rs_chars_in_buffer() check fifo content as well.
- * get_lsr_info() might work now (not tested).
- * Easier to change the port to debug.
- *
- * Revision 1.33 2000/09/13 07:52:11 torbjore
- * Support RS485
- *
- * Revision 1.32 2000/08/31 14:45:37 bjornw
- * After sending a break we need to reset the transmit DMA channel
- *
- * Revision 1.31 2000/06/21 12:13:29 johana
- * Fixed wait for all chars sent when closing port.
- * (Used to always take 1 second!)
- * Added shadows for directions of status/ctrl signals.
- *
- * Revision 1.30 2000/05/29 16:27:55 bjornw
- * Simulator ifdef moved a bit
- *
- * Revision 1.29 2000/05/09 09:40:30 mattias
- * * Added description of dma registers used in timeout_interrupt
- * * Removed old code
- *
- * Revision 1.28 2000/05/08 16:38:58 mattias
- * * Bugfix for flushing fifo in timeout_interrupt
- * Problem occurs when bluetooth stack waits for a small number of bytes
- * containing an event acknowledging free buffers in bluetooth HW
- * As before, data was stuck in fifo until more data came on uart and
- * flushed it up to the stack.
- *
- * Revision 1.27 2000/05/02 09:52:28 jonasd
- * Added fix for peculiar etrax behaviour when eop is forced on an empty
- * fifo. This is used when flashing the IRMA chip. Disabled by default.
- *
- * Revision 1.26 2000/03/29 15:32:02 bjornw
- * 2.0.34 updates
- *
- * Revision 1.25 2000/02/16 16:59:36 bjornw
- * * Receive DMA directly into the flip-buffer, eliminating an intermediary
- * receive buffer and a memcpy. Will avoid some overruns.
- * * Error message on debug port if an overrun or flip buffer overrun occurs.
- * * Just use the first byte in the flag flip buffer for errors.
- * * Check for timeout on the serial ports only each 5/100 s, not 1/100.
- *
- * Revision 1.24 2000/02/09 18:02:28 bjornw
- * * Clear serial errors (overrun, framing, parity) correctly. Before, the
- * receiver would get stuck if an error occurred and we did not restart
- * the input DMA.
- * * Cosmetics (indentation, some code made into inlines)
- * * Some more debug options
- * * Actually shut down the serial port (DMA irq, DMA reset, receiver stop)
- * when the last open is closed. Corresponding fixes in startup().
- * * rs_close() "tx FIFO wait" code moved into right place, bug & -> && fixed
- * and make a special case out of port 1 (R_DMA_CHx_STATUS is broken for that)
- * * e100_disable_rx/enable_rx just disables/enables the receiver, not RTS
- *
- * Revision 1.23 2000/01/24 17:46:19 johana
- * Wait for flush of DMA/FIFO when closing port.
- *
- * Revision 1.22 2000/01/20 18:10:23 johana
- * Added TIOCMGET ioctl to return modem status.
- * Implemented modem status/control that works with the extra signals
- * (DTR, DSR, RI,CD) as well.
- * 3 different modes supported:
- * ser0 on PB (Bundy), ser1 on PB (Lisa) and ser2 on PA (Bundy)
- * Fixed DEF_TX value that caused the serial transmitter pin (txd) to go to 0 when
- * closing the last filehandle, NASTY!.
- * Added break generation, not tested though!
- * Use IRQF_SHARED when request_irq() for ser2 and ser3 (shared with) par0 and par1.
- * You can't use them at the same time (yet..), but you can hopefully switch
- * between ser2/par0, ser3/par1 with the same kernel config.
- * Replaced some magic constants with defines
- *
- *
*/
static char *serial_version = "$Revision: 1.25 $";
@@ -446,6 +30,7 @@ static char *serial_version = "$Revision: 1.25 $";
#include <asm/io.h>
#include <asm/irq.h>
+#include <asm/dma.h>
#include <asm/system.h>
#include <linux/delay.h>
@@ -454,8 +39,9 @@ static char *serial_version = "$Revision: 1.25 $";
/* non-arch dependent serial structures are in linux/serial.h */
#include <linux/serial.h>
/* while we keep our own stuff (struct e100_serial) in a local .h file */
-#include "serial.h"
+#include "crisv10.h"
#include <asm/fasttimer.h>
+#include <asm/arch/io_interface_mux.h>
#ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER
#ifndef CONFIG_ETRAX_FAST_TIMER
@@ -504,18 +90,6 @@ struct tty_driver *serial_driver;
from eLinux */
#define SERIAL_HANDLE_EARLY_ERRORS
-/* Defined and used in n_tty.c, but we need it here as well */
-#define TTY_THRESHOLD_THROTTLE 128
-
-/* Due to buffersizes and threshold values, our SERIAL_DESCR_BUF_SIZE
- * must not be to high or flow control won't work if we leave it to the tty
- * layer so we have our own throttling in flush_to_flip
- * TTY_FLIPBUF_SIZE=512,
- * TTY_THRESHOLD_THROTTLE/UNTHROTTLE=128
- * BUF_SIZE can't be > 128
- */
-#define CRIS_BUF_SIZE 512
-
/* Currently 16 descriptors x 128 bytes = 2048 bytes */
#define SERIAL_DESCR_BUF_SIZE 256
@@ -588,13 +162,13 @@ unsigned long timer_data_to_ns(unsigned long timer_data);
static void change_speed(struct e100_serial *info);
static void rs_throttle(struct tty_struct * tty);
static void rs_wait_until_sent(struct tty_struct *tty, int timeout);
-static int rs_write(struct tty_struct * tty, int from_user,
- const unsigned char *buf, int count);
+static int rs_write(struct tty_struct *tty,
+ const unsigned char *buf, int count);
#ifdef CONFIG_ETRAX_RS485
-static int e100_write_rs485(struct tty_struct * tty, int from_user,
- const unsigned char *buf, int count);
+static int e100_write_rs485(struct tty_struct *tty,
+ const unsigned char *buf, int count);
#endif
-static int get_lsr_info(struct e100_serial * info, unsigned int *value);
+static int get_lsr_info(struct e100_serial *info, unsigned int *value);
#define DEF_BAUD 115200 /* 115.2 kbit/s */
@@ -679,20 +253,39 @@ static struct e100_serial rs_table[] = {
.rx_ctrl = DEF_RX,
.tx_ctrl = DEF_TX,
.iseteop = 2,
+ .dma_owner = dma_ser0,
+ .io_if = if_serial_0,
#ifdef CONFIG_ETRAX_SERIAL_PORT0
.enabled = 1,
#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
.dma_out_enabled = 1,
+ .dma_out_nbr = SER0_TX_DMA_NBR,
+ .dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR,
+ .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_description = "serial 0 dma tr",
#else
.dma_out_enabled = 0,
+ .dma_out_nbr = UINT_MAX,
+ .dma_out_irq_nbr = 0,
+ .dma_out_irq_flags = 0,
+ .dma_out_irq_description = NULL,
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
.dma_in_enabled = 1,
+ .dma_in_nbr = SER0_RX_DMA_NBR,
+ .dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR,
+ .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_description = "serial 0 dma rec",
#else
- .dma_in_enabled = 0
+ .dma_in_enabled = 0,
+ .dma_in_nbr = UINT_MAX,
+ .dma_in_irq_nbr = 0,
+ .dma_in_irq_flags = 0,
+ .dma_in_irq_description = NULL,
#endif
#else
.enabled = 0,
+ .io_if_description = NULL,
.dma_out_enabled = 0,
.dma_in_enabled = 0
#endif
@@ -714,20 +307,42 @@ static struct e100_serial rs_table[] = {
.rx_ctrl = DEF_RX,
.tx_ctrl = DEF_TX,
.iseteop = 3,
+ .dma_owner = dma_ser1,
+ .io_if = if_serial_1,
#ifdef CONFIG_ETRAX_SERIAL_PORT1
.enabled = 1,
+ .io_if_description = "ser1",
#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
.dma_out_enabled = 1,
+ .dma_out_nbr = SER1_TX_DMA_NBR,
+ .dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR,
+ .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_description = "serial 1 dma tr",
#else
.dma_out_enabled = 0,
+ .dma_out_nbr = UINT_MAX,
+ .dma_out_irq_nbr = 0,
+ .dma_out_irq_flags = 0,
+ .dma_out_irq_description = NULL,
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
.dma_in_enabled = 1,
+ .dma_in_nbr = SER1_RX_DMA_NBR,
+ .dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR,
+ .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_description = "serial 1 dma rec",
#else
- .dma_in_enabled = 0
+ .dma_in_enabled = 0,
+ .dma_in_enabled = 0,
+ .dma_in_nbr = UINT_MAX,
+ .dma_in_irq_nbr = 0,
+ .dma_in_irq_flags = 0,
+ .dma_in_irq_description = NULL,
#endif
#else
.enabled = 0,
+ .io_if_description = NULL,
+ .dma_in_irq_nbr = 0,
.dma_out_enabled = 0,
.dma_in_enabled = 0
#endif
@@ -748,20 +363,40 @@ static struct e100_serial rs_table[] = {
.rx_ctrl = DEF_RX,
.tx_ctrl = DEF_TX,
.iseteop = 0,
+ .dma_owner = dma_ser2,
+ .io_if = if_serial_2,
#ifdef CONFIG_ETRAX_SERIAL_PORT2
.enabled = 1,
+ .io_if_description = "ser2",
#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
.dma_out_enabled = 1,
+ .dma_out_nbr = SER2_TX_DMA_NBR,
+ .dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR,
+ .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_description = "serial 2 dma tr",
#else
.dma_out_enabled = 0,
+ .dma_out_nbr = UINT_MAX,
+ .dma_out_irq_nbr = 0,
+ .dma_out_irq_flags = 0,
+ .dma_out_irq_description = NULL,
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
.dma_in_enabled = 1,
+ .dma_in_nbr = SER2_RX_DMA_NBR,
+ .dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR,
+ .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_description = "serial 2 dma rec",
#else
- .dma_in_enabled = 0
+ .dma_in_enabled = 0,
+ .dma_in_nbr = UINT_MAX,
+ .dma_in_irq_nbr = 0,
+ .dma_in_irq_flags = 0,
+ .dma_in_irq_description = NULL,
#endif
#else
.enabled = 0,
+ .io_if_description = NULL,
.dma_out_enabled = 0,
.dma_in_enabled = 0
#endif
@@ -782,20 +417,40 @@ static struct e100_serial rs_table[] = {
.rx_ctrl = DEF_RX,
.tx_ctrl = DEF_TX,
.iseteop = 1,
+ .dma_owner = dma_ser3,
+ .io_if = if_serial_3,
#ifdef CONFIG_ETRAX_SERIAL_PORT3
.enabled = 1,
+ .io_if_description = "ser3",
#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
.dma_out_enabled = 1,
+ .dma_out_nbr = SER3_TX_DMA_NBR,
+ .dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR,
+ .dma_out_irq_flags = IRQF_DISABLED,
+ .dma_out_irq_description = "serial 3 dma tr",
#else
.dma_out_enabled = 0,
+ .dma_out_nbr = UINT_MAX,
+ .dma_out_irq_nbr = 0,
+ .dma_out_irq_flags = 0,
+ .dma_out_irq_description = NULL,
#endif
#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
.dma_in_enabled = 1,
+ .dma_in_nbr = SER3_RX_DMA_NBR,
+ .dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR,
+ .dma_in_irq_flags = IRQF_DISABLED,
+ .dma_in_irq_description = "serial 3 dma rec",
#else
- .dma_in_enabled = 0
+ .dma_in_enabled = 0,
+ .dma_in_nbr = UINT_MAX,
+ .dma_in_irq_nbr = 0,
+ .dma_in_irq_flags = 0,
+ .dma_in_irq_description = NULL
#endif
#else
.enabled = 0,
+ .io_if_description = NULL,
.dma_out_enabled = 0,
.dma_in_enabled = 0
#endif
@@ -1416,12 +1071,11 @@ e100_dtr(struct e100_serial *info, int set)
{
unsigned long flags;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
*e100_modem_pins[info->line].dtr_shadow &= ~mask;
*e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask);
*e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow;
- restore_flags(flags);
+ local_irq_restore(flags);
}
#ifdef SERIAL_DEBUG_IO
@@ -1440,12 +1094,11 @@ e100_rts(struct e100_serial *info, int set)
{
#ifndef CONFIG_SVINTO_SIM
unsigned long flags;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
info->rx_ctrl &= ~E100_RTS_MASK;
info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */
info->port[REG_REC_CTRL] = info->rx_ctrl;
- restore_flags(flags);
+ local_irq_restore(flags);
#ifdef SERIAL_DEBUG_IO
printk("ser%i rts %i\n", info->line, set);
#endif
@@ -1463,12 +1116,11 @@ e100_ri_out(struct e100_serial *info, int set)
unsigned char mask = e100_modem_pins[info->line].ri_mask;
unsigned long flags;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
*e100_modem_pins[info->line].ri_shadow &= ~mask;
*e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask);
*e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow;
- restore_flags(flags);
+ local_irq_restore(flags);
}
#endif
}
@@ -1481,12 +1133,11 @@ e100_cd_out(struct e100_serial *info, int set)
unsigned char mask = e100_modem_pins[info->line].cd_mask;
unsigned long flags;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
*e100_modem_pins[info->line].cd_shadow &= ~mask;
*e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask);
*e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow;
- restore_flags(flags);
+ local_irq_restore(flags);
}
#endif
}
@@ -1560,8 +1211,7 @@ static void e100_disable_txdma_channel(struct e100_serial *info)
/* Disable output DMA channel for the serial port in question
* ( set to something other then serialX)
*/
- save_flags(flags);
- cli();
+ local_irq_save(flags);
DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line));
if (info->line == 0) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) ==
@@ -1589,7 +1239,7 @@ static void e100_disable_txdma_channel(struct e100_serial *info)
}
}
*R_GEN_CONFIG = genconfig_shadow;
- restore_flags(flags);
+ local_irq_restore(flags);
}
@@ -1597,8 +1247,7 @@ static void e100_enable_txdma_channel(struct e100_serial *info)
{
unsigned long flags;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line));
/* Enable output DMA channel for the serial port in question */
if (info->line == 0) {
@@ -1615,7 +1264,7 @@ static void e100_enable_txdma_channel(struct e100_serial *info)
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3);
}
*R_GEN_CONFIG = genconfig_shadow;
- restore_flags(flags);
+ local_irq_restore(flags);
}
static void e100_disable_rxdma_channel(struct e100_serial *info)
@@ -1625,8 +1274,7 @@ static void e100_disable_rxdma_channel(struct e100_serial *info)
/* Disable input DMA channel for the serial port in question
* ( set to something other then serialX)
*/
- save_flags(flags);
- cli();
+ local_irq_save(flags);
if (info->line == 0) {
if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) ==
IO_STATE(R_GEN_CONFIG, dma7, serial0)) {
@@ -1653,7 +1301,7 @@ static void e100_disable_rxdma_channel(struct e100_serial *info)
}
}
*R_GEN_CONFIG = genconfig_shadow;
- restore_flags(flags);
+ local_irq_restore(flags);
}
@@ -1661,8 +1309,7 @@ static void e100_enable_rxdma_channel(struct e100_serial *info)
{
unsigned long flags;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
/* Enable input DMA channel for the serial port in question */
if (info->line == 0) {
genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7);
@@ -1678,7 +1325,7 @@ static void e100_enable_rxdma_channel(struct e100_serial *info)
genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3);
}
*R_GEN_CONFIG = genconfig_shadow;
- restore_flags(flags);
+ local_irq_restore(flags);
}
#ifdef SERIAL_HANDLE_EARLY_ERRORS
@@ -1785,7 +1432,7 @@ e100_enable_rs485(struct tty_struct *tty,struct rs485_control *r)
}
static int
-e100_write_rs485(struct tty_struct *tty, int from_user,
+e100_write_rs485(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct e100_serial * info = (struct e100_serial *)tty->driver_data;
@@ -1798,7 +1445,7 @@ e100_write_rs485(struct tty_struct *tty, int from_user,
*/
info->rs485.enabled = 1;
/* rs_write now deals with RS485 if enabled */
- count = rs_write(tty, from_user, buf, count);
+ count = rs_write(tty, buf, count);
info->rs485.enabled = old_enabled;
return count;
}
@@ -1836,7 +1483,7 @@ rs_stop(struct tty_struct *tty)
unsigned long flags;
unsigned long xoff;
- save_flags(flags); cli();
+ local_irq_save(flags);
DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n",
CIRC_CNT(info->xmit.head,
info->xmit.tail,SERIAL_XMIT_SIZE)));
@@ -1848,7 +1495,7 @@ rs_stop(struct tty_struct *tty)
}
*((unsigned long *)&info->port[REG_XOFF]) = xoff;
- restore_flags(flags);
+ local_irq_restore(flags);
}
}
@@ -1860,7 +1507,7 @@ rs_start(struct tty_struct *tty)
unsigned long flags;
unsigned long xoff;
- save_flags(flags); cli();
+ local_irq_save(flags);
DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n",
CIRC_CNT(info->xmit.head,
info->xmit.tail,SERIAL_XMIT_SIZE)));
@@ -1875,7 +1522,7 @@ rs_start(struct tty_struct *tty)
info->xmit.head != info->xmit.tail && info->xmit.buf)
e100_enable_serial_tx_ready_irq(info);
- restore_flags(flags);
+ local_irq_restore(flags);
}
}
@@ -2055,8 +1702,7 @@ static int serial_fast_timer_expired = 0;
static void flush_timeout_function(unsigned long data);
#define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\
unsigned long timer_flags; \
- save_flags(timer_flags); \
- cli(); \
+ local_irq_save(timer_flags); \
if (fast_timers[info->line].function == NULL) { \
serial_fast_timer_started++; \
TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \
@@ -2070,7 +1716,7 @@ static void flush_timeout_function(unsigned long data);
else { \
TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \
} \
- restore_flags(timer_flags); \
+ local_irq_restore(timer_flags); \
}
#define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec)
@@ -2099,8 +1745,7 @@ append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer)
{
unsigned long flags;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
if (!info->first_recv_buffer)
info->first_recv_buffer = buffer;
@@ -2113,7 +1758,7 @@ append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer)
if (info->recv_cnt > info->max_recv_cnt)
info->max_recv_cnt = info->recv_cnt;
- restore_flags(flags);
+ local_irq_restore(flags);
}
static int
@@ -2133,11 +1778,7 @@ add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char fl
info->icount.rx++;
} else {
struct tty_struct *tty = info->tty;
- *tty->flip.char_buf_ptr = data;
- *tty->flip.flag_buf_ptr = flag;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ tty_insert_flip_char(tty, data, flag);
info->icount.rx++;
}
@@ -2322,7 +1963,6 @@ start_receive(struct e100_serial *info)
*/
return;
#endif
- info->tty->flip.count = 0;
if (info->uses_dma_in) {
/* reset the input dma channel to be sure it works */
@@ -2484,32 +2124,20 @@ static void flush_to_flip_buffer(struct e100_serial *info)
{
struct tty_struct *tty;
struct etrax_recv_buffer *buffer;
- unsigned int length;
unsigned long flags;
- int max_flip_size;
-
- if (!info->first_recv_buffer)
- return;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
+ tty = info->tty;
- if (!(tty = info->tty)) {
- restore_flags(flags);
+ if (!tty) {
+ local_irq_restore(flags);
return;
}
while ((buffer = info->first_recv_buffer) != NULL) {
unsigned int count = buffer->length;
- count = tty_buffer_request_room(tty, count);
- if (count == 0) /* Throttle ?? */
- break;
-
- if (count > 1)
- tty_insert_flip_strings(tty, buffer->buffer, count - 1);
- tty_insert_flip_char(tty, buffer->buffer[count-1], buffer->error);
-
+ tty_insert_flip_string(tty, buffer->buffer, count);
info->recv_cnt -= count;
if (count == buffer->length) {
@@ -2525,18 +2153,9 @@ static void flush_to_flip_buffer(struct e100_serial *info)
if (!info->first_recv_buffer)
info->last_recv_buffer = NULL;
- restore_flags(flags);
-
- DFLIP(
- if (1) {
- DEBUG_LOG(info->line, "*** rxtot %i\n", info->icount.rx);
- DEBUG_LOG(info->line, "ldisc %lu\n", tty->ldisc.chars_in_buffer(tty));
- DEBUG_LOG(info->line, "room %lu\n", tty->ldisc.receive_room(tty));
- }
+ local_irq_restore(flags);
- );
-
- /* this includes a check for low-latency */
+ /* This includes a check for low-latency */
tty_flip_buffer_push(tty);
}
@@ -2679,21 +2298,7 @@ struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info)
printk("!NO TTY!\n");
return info;
}
- if (tty->flip.count >= CRIS_BUF_SIZE - TTY_THRESHOLD_THROTTLE) {
- /* check TTY_THROTTLED first so it indicates our state */
- if (!test_and_set_bit(TTY_THROTTLED, &tty->flags)) {
- DFLOW(DEBUG_LOG(info->line, "rs_throttle flip.count: %i\n", tty->flip.count));
- rs_throttle(tty);
- }
- }
- if (tty->flip.count >= CRIS_BUF_SIZE) {
- DEBUG_LOG(info->line, "force FLIP! %i\n", tty->flip.count);
- tty->flip.work.func((void *) tty);
- if (tty->flip.count >= CRIS_BUF_SIZE) {
- DEBUG_LOG(info->line, "FLIP FULL! %i\n", tty->flip.count);
- return info; /* if TTY_DONT_FLIP is set */
- }
- }
+
/* Read data and status at the same time */
data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]);
more_data:
@@ -2746,27 +2351,26 @@ more_data:
DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt);
info->errorcode = ERRCODE_INSERT_BREAK;
} else {
+ unsigned char data = IO_EXTRACT(R_SERIAL0_READ,
+ data_in, data_read);
+ char flag = TTY_NORMAL;
if (info->errorcode == ERRCODE_INSERT_BREAK) {
- info->icount.brk++;
- *tty->flip.char_buf_ptr = 0;
- *tty->flip.flag_buf_ptr = TTY_BREAK;
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
+ struct tty_struct *tty = info->tty;
+ tty_insert_flip_char(tty, 0, flag);
info->icount.rx++;
}
- *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read);
if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) {
info->icount.parity++;
- *tty->flip.flag_buf_ptr = TTY_PARITY;
+ flag = TTY_PARITY;
} else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) {
info->icount.overrun++;
- *tty->flip.flag_buf_ptr = TTY_OVERRUN;
+ flag = TTY_OVERRUN;
} else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) {
info->icount.frame++;
- *tty->flip.flag_buf_ptr = TTY_FRAME;
+ flag = TTY_FRAME;
}
+ tty_insert_flip_char(tty, data, flag);
info->errorcode = 0;
}
info->break_detected_cnt = 0;
@@ -2782,16 +2386,14 @@ more_data:
log_int(rdpc(), 0, 0);
}
);
- *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read);
- *tty->flip.flag_buf_ptr = 0;
+ tty_insert_flip_char(tty,
+ IO_EXTRACT(R_SERIAL0_READ, data_in, data_read),
+ TTY_NORMAL);
} else {
DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read);
}
- tty->flip.flag_buf_ptr++;
- tty->flip.char_buf_ptr++;
- tty->flip.count++;
info->icount.rx++;
data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]);
if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) {
@@ -2929,7 +2531,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
if (info->x_char) {
unsigned char rstat;
DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char));
- save_flags(flags); cli();
+ local_irq_save(flags);
rstat = info->port[REG_STATUS];
DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
@@ -2938,7 +2540,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
info->x_char = 0;
/* We must enable since it is disabled in ser_interrupt */
e100_enable_serial_tx_ready_irq(info);
- restore_flags(flags);
+ local_irq_restore(flags);
return;
}
if (info->uses_dma_out) {
@@ -2946,7 +2548,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
int i;
/* We only use normal tx interrupt when sending x_char */
DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0));
- save_flags(flags); cli();
+ local_irq_save(flags);
rstat = info->port[REG_STATUS];
DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat));
e100_disable_serial_tx_ready_irq(info);
@@ -2959,7 +2561,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
nop();
*info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue);
- restore_flags(flags);
+ local_irq_restore(flags);
return;
}
/* Normal char-by-char interrupt */
@@ -2973,7 +2575,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
}
DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail]));
/* Send a byte, rs485 timing is critical so turn of ints */
- save_flags(flags); cli();
+ local_irq_save(flags);
info->port[REG_TR_DATA] = info->xmit.buf[info->xmit.tail];
info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1);
info->icount.tx++;
@@ -2997,7 +2599,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info)
/* We must enable since it is disabled in ser_interrupt */
e100_enable_serial_tx_ready_irq(info);
}
- restore_flags(flags);
+ local_irq_restore(flags);
if (CIRC_CNT(info->xmit.head,
info->xmit.tail,
@@ -3022,7 +2624,7 @@ ser_interrupt(int irq, void *dev_id)
int handled = 0;
static volatile unsigned long reentered_ready_mask = 0;
- save_flags(flags); cli();
+ local_irq_save(flags);
irq_mask1_rd = *R_IRQ_MASK1_RD;
/* First handle all rx interrupts with ints disabled */
info = rs_table;
@@ -3067,7 +2669,7 @@ ser_interrupt(int irq, void *dev_id)
/* Unblock the serial interrupt */
*R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set);
- sti();
+ local_irq_enable();
ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */
info = rs_table;
for (i = 0; i < NR_PORTS; i++) {
@@ -3080,11 +2682,11 @@ ser_interrupt(int irq, void *dev_id)
ready_mask <<= 2;
}
/* handle_ser_tx_interrupt enables tr_ready interrupts */
- cli();
+ local_irq_disable();
/* Handle reentered TX interrupt */
irq_mask1_rd = reentered_ready_mask;
}
- cli();
+ local_irq_disable();
tx_started = 0;
} else {
unsigned long ready_mask;
@@ -3100,7 +2702,7 @@ ser_interrupt(int irq, void *dev_id)
}
}
- restore_flags(flags);
+ local_irq_restore(flags);
return IRQ_RETVAL(handled);
} /* ser_interrupt */
#endif
@@ -3121,11 +2723,13 @@ ser_interrupt(int irq, void *dev_id)
* them using rs_sched_event(), and they get done here.
*/
static void
-do_softint(void *private_)
+do_softint(struct work_struct *work)
{
- struct e100_serial *info = (struct e100_serial *) private_;
+ struct e100_serial *info;
struct tty_struct *tty;
+ info = container_of(work, struct e100_serial, work);
+
tty = info->tty;
if (!tty)
return;
@@ -3145,13 +2749,12 @@ startup(struct e100_serial * info)
if (!xmit_page)
return -ENOMEM;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
/* if it was already initialized, skip this */
if (info->flags & ASYNC_INITIALIZED) {
- restore_flags(flags);
+ local_irq_restore(flags);
free_page(xmit_page);
return 0;
}
@@ -3277,7 +2880,7 @@ startup(struct e100_serial * info)
info->flags |= ASYNC_INITIALIZED;
- restore_flags(flags);
+ local_irq_restore(flags);
return 0;
}
@@ -3328,8 +2931,7 @@ shutdown(struct e100_serial * info)
info->irq);
#endif
- save_flags(flags);
- cli(); /* Disable interrupts */
+ local_irq_save(flags);
if (info->xmit.buf) {
free_page((unsigned long)info->xmit.buf);
@@ -3353,7 +2955,7 @@ shutdown(struct e100_serial * info)
set_bit(TTY_IO_ERROR, &info->tty->flags);
info->flags &= ~ASYNC_INITIALIZED;
- restore_flags(flags);
+ local_irq_restore(flags);
}
@@ -3411,7 +3013,6 @@ change_speed(struct e100_serial *info)
DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8));
info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8;
}
- }
#endif
else
{
@@ -3445,8 +3046,7 @@ change_speed(struct e100_serial *info)
#ifndef CONFIG_SVINTO_SIM
/* start with default settings and then fill in changes */
- save_flags(flags);
- cli();
+ local_irq_save(flags);
/* 8 bit, no/even parity */
info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) |
IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) |
@@ -3510,7 +3110,7 @@ change_speed(struct e100_serial *info)
}
*((unsigned long *)&info->port[REG_XOFF]) = xoff;
- restore_flags(flags);
+ local_irq_restore(flags);
#endif /* !CONFIG_SVINTO_SIM */
update_char_time(info);
@@ -3538,13 +3138,12 @@ rs_flush_chars(struct tty_struct *tty)
/* this protection might not exactly be necessary here */
- save_flags(flags);
- cli();
+ local_irq_save(flags);
start_transmit(info);
- restore_flags(flags);
+ local_irq_restore(flags);
}
-static int rs_raw_write(struct tty_struct * tty, int from_user,
+static int rs_raw_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
int c, ret = 0;
@@ -3567,53 +3166,19 @@ static int rs_raw_write(struct tty_struct * tty, int from_user,
SIMCOUT(buf, count);
return count;
#endif
- save_flags(flags);
+ local_save_flags(flags);
DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
- /* the cli/restore_flags pairs below are needed because the
- * DMA interrupt handler moves the info->xmit values. the memcpy
- * needs to be in the critical region unfortunately, because we
- * need to read xmit values, memcpy, write xmit values in one
- * atomic operation... this could perhaps be avoided by more clever
- * design.
+ /* The local_irq_disable/restore_flags pairs below are needed
+ * because the DMA interrupt handler moves the info->xmit values.
+ * the memcpy needs to be in the critical region unfortunately,
+ * because we need to read xmit values, memcpy, write xmit values
+ * in one atomic operation... this could perhaps be avoided by
+ * more clever design.
*/
- if (from_user) {
- mutex_lock(&tmp_buf_mutex);
- while (1) {
- int c1;
- c = CIRC_SPACE_TO_END(info->xmit.head,
- info->xmit.tail,
- SERIAL_XMIT_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
-
- c -= copy_from_user(tmp_buf, buf, c);
- if (!c) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
- cli();
- c1 = CIRC_SPACE_TO_END(info->xmit.head,
- info->xmit.tail,
- SERIAL_XMIT_SIZE);
- if (c1 < c)
- c = c1;
- memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c);
- info->xmit.head = ((info->xmit.head + c) &
- (SERIAL_XMIT_SIZE-1));
- restore_flags(flags);
- buf += c;
- count -= c;
- ret += c;
- }
- mutex_unlock(&tmp_buf_mutex);
- } else {
- cli();
+ local_irq_disable();
while (count) {
c = CIRC_SPACE_TO_END(info->xmit.head,
info->xmit.tail,
@@ -3631,8 +3196,7 @@ static int rs_raw_write(struct tty_struct * tty, int from_user,
count -= c;
ret += c;
}
- restore_flags(flags);
- }
+ local_irq_restore(flags);
/* enable transmitter if not running, unless the tty is stopped
* this does not need IRQ protection since if tr_running == 0
@@ -3651,7 +3215,7 @@ static int rs_raw_write(struct tty_struct * tty, int from_user,
} /* raw_raw_write() */
static int
-rs_write(struct tty_struct * tty, int from_user,
+rs_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
#if defined(CONFIG_ETRAX_RS485)
@@ -3678,7 +3242,7 @@ rs_write(struct tty_struct * tty, int from_user,
}
#endif /* CONFIG_ETRAX_RS485 */
- count = rs_raw_write(tty, from_user, buf, count);
+ count = rs_raw_write(tty, buf, count);
#if defined(CONFIG_ETRAX_RS485)
if (info->rs485.enabled)
@@ -3746,10 +3310,9 @@ rs_flush_buffer(struct tty_struct *tty)
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned long flags;
- save_flags(flags);
- cli();
+ local_irq_save(flags);
info->xmit.head = info->xmit.tail = 0;
- restore_flags(flags);
+ local_irq_restore(flags);
tty_wakeup(tty);
}
@@ -3767,7 +3330,7 @@ static void rs_send_xchar(struct tty_struct *tty, char ch)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
unsigned long flags;
- save_flags(flags); cli();
+ local_irq_save(flags);
if (info->uses_dma_out) {
/* Put the DMA on hold and disable the channel */
*info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold);
@@ -3784,7 +3347,7 @@ static void rs_send_xchar(struct tty_struct *tty, char ch)
DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch));
info->x_char = ch;
e100_enable_serial_tx_ready_irq(info);
- restore_flags(flags);
+ local_irq_restore(flags);
}
/*
@@ -3996,21 +3559,61 @@ char *get_control_state_str(int MLines, char *s)
}
#endif
+static void
+rs_break(struct tty_struct *tty, int break_state)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned long flags;
+
+ if (!info->port)
+ return;
+
+ local_irq_save(flags);
+ if (break_state == -1) {
+ /* Go to manual mode and set the txd pin to 0 */
+ /* Clear bit 7 (txd) and 6 (tr_enable) */
+ info->tx_ctrl &= 0x3F;
+ } else {
+ /* Set bit 7 (txd) and 6 (tr_enable) */
+ info->tx_ctrl |= (0x80 | 0x40);
+ }
+ info->port[REG_TR_CTRL] = info->tx_ctrl;
+ local_irq_restore(flags);
+}
+
static int
-get_modem_info(struct e100_serial * info, unsigned int *value)
+rs_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
{
- unsigned int result;
- /* Polarity isn't verified */
-#if 0 /*def SERIAL_DEBUG_IO */
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
- printk("get_modem_info: RTS: %i DTR: %i CD: %i RI: %i DSR: %i CTS: %i\n",
- E100_RTS_GET(info),
- E100_DTR_GET(info),
- E100_CD_GET(info),
- E100_RI_GET(info),
- E100_DSR_GET(info),
- E100_CTS_GET(info));
-#endif
+ if (clear & TIOCM_RTS)
+ e100_rts(info, 0);
+ if (clear & TIOCM_DTR)
+ e100_dtr(info, 0);
+ /* Handle FEMALE behaviour */
+ if (clear & TIOCM_RI)
+ e100_ri_out(info, 0);
+ if (clear & TIOCM_CD)
+ e100_cd_out(info, 0);
+
+ if (set & TIOCM_RTS)
+ e100_rts(info, 1);
+ if (set & TIOCM_DTR)
+ e100_dtr(info, 1);
+ /* Handle FEMALE behaviour */
+ if (set & TIOCM_RI)
+ e100_ri_out(info, 1);
+ if (set & TIOCM_CD)
+ e100_cd_out(info, 1);
+ return 0;
+}
+
+static int
+rs_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+ unsigned int result;
result =
(!E100_RTS_GET(info) ? TIOCM_RTS : 0)
@@ -4021,95 +3624,20 @@ get_modem_info(struct e100_serial * info, unsigned int *value)
| (!E100_CTS_GET(info) ? TIOCM_CTS : 0);
#ifdef SERIAL_DEBUG_IO
- printk("e100ser: modem state: %i 0x%08X\n", result, result);
+ printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n",
+ info->line, result, result);
{
char s[100];
get_control_state_str(result, s);
- printk("state: %s\n", s);
+ printk(KERN_DEBUG "state: %s\n", s);
}
#endif
- if (copy_to_user(value, &result, sizeof(int)))
- return -EFAULT;
- return 0;
-}
+ return result;
-
-static int
-set_modem_info(struct e100_serial * info, unsigned int cmd,
- unsigned int *value)
-{
- unsigned int arg;
-
- if (copy_from_user(&arg, value, sizeof(int)))
- return -EFAULT;
-
- switch (cmd) {
- case TIOCMBIS:
- if (arg & TIOCM_RTS) {
- e100_rts(info, 1);
- }
- if (arg & TIOCM_DTR) {
- e100_dtr(info, 1);
- }
- /* Handle FEMALE behaviour */
- if (arg & TIOCM_RI) {
- e100_ri_out(info, 1);
- }
- if (arg & TIOCM_CD) {
- e100_cd_out(info, 1);
- }
- break;
- case TIOCMBIC:
- if (arg & TIOCM_RTS) {
- e100_rts(info, 0);
- }
- if (arg & TIOCM_DTR) {
- e100_dtr(info, 0);
- }
- /* Handle FEMALE behaviour */
- if (arg & TIOCM_RI) {
- e100_ri_out(info, 0);
- }
- if (arg & TIOCM_CD) {
- e100_cd_out(info, 0);
- }
- break;
- case TIOCMSET:
- e100_rts(info, arg & TIOCM_RTS);
- e100_dtr(info, arg & TIOCM_DTR);
- /* Handle FEMALE behaviour */
- e100_ri_out(info, arg & TIOCM_RI);
- e100_cd_out(info, arg & TIOCM_CD);
- break;
- default:
- return -EINVAL;
- }
- return 0;
}
-static void
-rs_break(struct tty_struct *tty, int break_state)
-{
- struct e100_serial * info = (struct e100_serial *)tty->driver_data;
- unsigned long flags;
-
- if (!info->port)
- return;
-
- save_flags(flags);
- cli();
- if (break_state == -1) {
- /* Go to manual mode and set the txd pin to 0 */
- info->tx_ctrl &= 0x3F; /* Clear bit 7 (txd) and 6 (tr_enable) */
- } else {
- info->tx_ctrl |= (0x80 | 0x40); /* Set bit 7 (txd) and 6 (tr_enable) */
- }
- info->port[REG_TR_CTRL] = info->tx_ctrl;
- restore_flags(flags);
-}
-
static int
rs_ioctl(struct tty_struct *tty, struct file * file,
unsigned int cmd, unsigned long arg)
@@ -4124,49 +3652,45 @@ rs_ioctl(struct tty_struct *tty, struct file * file,
}
switch (cmd) {
- case TIOCMGET:
- return get_modem_info(info, (unsigned int *) arg);
- case TIOCMBIS:
- case TIOCMBIC:
- case TIOCMSET:
- return set_modem_info(info, cmd, (unsigned int *) arg);
- case TIOCGSERIAL:
- return get_serial_info(info,
- (struct serial_struct *) arg);
- case TIOCSSERIAL:
- return set_serial_info(info,
- (struct serial_struct *) arg);
- case TIOCSERGETLSR: /* Get line status register */
- return get_lsr_info(info, (unsigned int *) arg);
-
- case TIOCSERGSTRUCT:
- if (copy_to_user((struct e100_serial *) arg,
- info, sizeof(struct e100_serial)))
- return -EFAULT;
- return 0;
+ case TIOCGSERIAL:
+ return get_serial_info(info,
+ (struct serial_struct *) arg);
+ case TIOCSSERIAL:
+ return set_serial_info(info,
+ (struct serial_struct *) arg);
+ case TIOCSERGETLSR: /* Get line status register */
+ return get_lsr_info(info, (unsigned int *) arg);
+
+ case TIOCSERGSTRUCT:
+ if (copy_to_user((struct e100_serial *) arg,
+ info, sizeof(struct e100_serial)))
+ return -EFAULT;
+ return 0;
#if defined(CONFIG_ETRAX_RS485)
- case TIOCSERSETRS485:
- {
- struct rs485_control rs485ctrl;
- if (copy_from_user(&rs485ctrl, (struct rs485_control*)arg, sizeof(rs485ctrl)))
- return -EFAULT;
+ case TIOCSERSETRS485:
+ {
+ struct rs485_control rs485ctrl;
+ if (copy_from_user(&rs485ctrl, (struct rs485_control *)arg,
+ sizeof(rs485ctrl)))
+ return -EFAULT;
- return e100_enable_rs485(tty, &rs485ctrl);
- }
+ return e100_enable_rs485(tty, &rs485ctrl);
+ }
- case TIOCSERWRRS485:
- {
- struct rs485_write rs485wr;
- if (copy_from_user(&rs485wr, (struct rs485_write*)arg, sizeof(rs485wr)))
- return -EFAULT;
+ case TIOCSERWRRS485:
+ {
+ struct rs485_write rs485wr;
+ if (copy_from_user(&rs485wr, (struct rs485_write *)arg,
+ sizeof(rs485wr)))
+ return -EFAULT;
- return e100_write_rs485(tty, 1, rs485wr.outc, rs485wr.outc_size);
- }
+ return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size);
+ }
#endif
- default:
- return -ENOIOCTLCMD;
+ default:
+ return -ENOIOCTLCMD;
}
return 0;
}
@@ -4191,46 +3715,6 @@ rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
}
-/* In debugport.c - register a console write function that uses the normal
- * serial driver
- */
-typedef int (*debugport_write_function)(int i, const char *buf, unsigned int len);
-
-extern debugport_write_function debug_write_function;
-
-static int rs_debug_write_function(int i, const char *buf, unsigned int len)
-{
- int cnt;
- int written = 0;
- struct tty_struct *tty;
- static int recurse_cnt = 0;
-
- tty = rs_table[i].tty;
- if (tty) {
- unsigned long flags;
- if (recurse_cnt > 5) /* We skip this debug output */
- return 1;
-
- local_irq_save(flags);
- recurse_cnt++;
- local_irq_restore(flags);
- do {
- cnt = rs_write(tty, 0, buf + written, len);
- if (cnt >= 0) {
- written += cnt;
- buf += cnt;
- len -= cnt;
- } else
- len = cnt;
- } while(len > 0);
- local_irq_save(flags);
- recurse_cnt--;
- local_irq_restore(flags);
- return 1;
- }
- return 0;
-}
-
/*
* ------------------------------------------------------------
* rs_close()
@@ -4252,11 +3736,10 @@ rs_close(struct tty_struct *tty, struct file * filp)
/* interrupts are disabled for this entire function */
- save_flags(flags);
- cli();
+ local_irq_save(flags);
if (tty_hung_up_p(filp)) {
- restore_flags(flags);
+ local_irq_restore(flags);
return;
}
@@ -4283,7 +3766,7 @@ rs_close(struct tty_struct *tty, struct file * filp)
info->count = 0;
}
if (info->count) {
- restore_flags(flags);
+ local_irq_restore(flags);
return;
}
info->flags |= ASYNC_CLOSING;
@@ -4337,7 +3820,7 @@ rs_close(struct tty_struct *tty, struct file * filp)
}
info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
wake_up_interruptible(&info->close_wait);
- restore_flags(flags);
+ local_irq_restore(flags);
/* port closed */
@@ -4359,6 +3842,28 @@ rs_close(struct tty_struct *tty, struct file * filp)
#endif
}
#endif
+
+ /*
+ * Release any allocated DMA irq's.
+ */
+ if (info->dma_in_enabled) {
+ free_irq(info->dma_in_irq_nbr, info);
+ cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
+ info->uses_dma_in = 0;
+#ifdef SERIAL_DEBUG_OPEN
+ printk(KERN_DEBUG "DMA irq '%s' freed\n",
+ info->dma_in_irq_description);
+#endif
+ }
+ if (info->dma_out_enabled) {
+ free_irq(info->dma_out_irq_nbr, info);
+ cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
+ info->uses_dma_out = 0;
+#ifdef SERIAL_DEBUG_OPEN
+ printk(KERN_DEBUG "DMA irq '%s' freed\n",
+ info->dma_out_irq_description);
+#endif
+ }
}
/*
@@ -4433,8 +3938,8 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
- if (info->flags & ASYNC_CLOSING)
- interruptible_sleep_on(&info->close_wait);
+ wait_event_interruptible(info->close_wait,
+ !(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
if (info->flags & ASYNC_HUP_NOTIFY)
return -EAGAIN;
@@ -4472,21 +3977,19 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
printk("block_til_ready before block: ttyS%d, count = %d\n",
info->line, info->count);
#endif
- save_flags(flags);
- cli();
+ local_irq_save(flags);
if (!tty_hung_up_p(filp)) {
extra_count++;
info->count--;
}
- restore_flags(flags);
+ local_irq_restore(flags);
info->blocked_open++;
while (1) {
- save_flags(flags);
- cli();
+ local_irq_save(flags);
/* assert RTS and DTR */
e100_rts(info, 1);
e100_dtr(info, 1);
- restore_flags(flags);
+ local_irq_restore(flags);
set_current_state(TASK_INTERRUPTIBLE);
if (tty_hung_up_p(filp) ||
!(info->flags & ASYNC_INITIALIZED)) {
@@ -4528,6 +4031,19 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
return 0;
}
+static void
+deinit_port(struct e100_serial *info)
+{
+ if (info->dma_out_enabled) {
+ cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description);
+ free_irq(info->dma_out_irq_nbr, info);
+ }
+ if (info->dma_in_enabled) {
+ cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description);
+ free_irq(info->dma_in_irq_nbr, info);
+ }
+}
+
/*
* This routine is called whenever a serial port is opened.
* It performs the serial-specific initialization for the tty structure.
@@ -4538,9 +4054,9 @@ rs_open(struct tty_struct *tty, struct file * filp)
struct e100_serial *info;
int retval, line;
unsigned long page;
+ int allocated_resources = 0;
/* find which port we want to open */
-
line = tty->index;
if (line < 0 || line >= NR_PORTS)
@@ -4580,8 +4096,8 @@ rs_open(struct tty_struct *tty, struct file * filp)
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
- if (info->flags & ASYNC_CLOSING)
- interruptible_sleep_on(&info->close_wait);
+ wait_event_interruptible(info->close_wait,
+ !(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
return ((info->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
@@ -4591,12 +4107,85 @@ rs_open(struct tty_struct *tty, struct file * filp)
}
/*
+ * If DMA is enabled try to allocate the irq's.
+ */
+ if (info->count == 1) {
+ allocated_resources = 1;
+ if (info->dma_in_enabled) {
+ if (request_irq(info->dma_in_irq_nbr,
+ rec_interrupt,
+ info->dma_in_irq_flags,
+ info->dma_in_irq_description,
+ info)) {
+ printk(KERN_WARNING "DMA irq '%s' busy; "
+ "falling back to non-DMA mode\n",
+ info->dma_in_irq_description);
+ /* Make sure we never try to use DMA in */
+ /* for the port again. */
+ info->dma_in_enabled = 0;
+ } else if (cris_request_dma(info->dma_in_nbr,
+ info->dma_in_irq_description,
+ DMA_VERBOSE_ON_ERROR,
+ info->dma_owner)) {
+ free_irq(info->dma_in_irq_nbr, info);
+ printk(KERN_WARNING "DMA '%s' busy; "
+ "falling back to non-DMA mode\n",
+ info->dma_in_irq_description);
+ /* Make sure we never try to use DMA in */
+ /* for the port again. */
+ info->dma_in_enabled = 0;
+ }
+#ifdef SERIAL_DEBUG_OPEN
+ else
+ printk(KERN_DEBUG "DMA irq '%s' allocated\n",
+ info->dma_in_irq_description);
+#endif
+ }
+ if (info->dma_out_enabled) {
+ if (request_irq(info->dma_out_irq_nbr,
+ tr_interrupt,
+ info->dma_out_irq_flags,
+ info->dma_out_irq_description,
+ info)) {
+ printk(KERN_WARNING "DMA irq '%s' busy; "
+ "falling back to non-DMA mode\n",
+ info->dma_out_irq_description);
+ /* Make sure we never try to use DMA out */
+ /* for the port again. */
+ info->dma_out_enabled = 0;
+ } else if (cris_request_dma(info->dma_out_nbr,
+ info->dma_out_irq_description,
+ DMA_VERBOSE_ON_ERROR,
+ info->dma_owner)) {
+ free_irq(info->dma_out_irq_nbr, info);
+ printk(KERN_WARNING "DMA '%s' busy; "
+ "falling back to non-DMA mode\n",
+ info->dma_out_irq_description);
+ /* Make sure we never try to use DMA out */
+ /* for the port again. */
+ info->dma_out_enabled = 0;
+ }
+#ifdef SERIAL_DEBUG_OPEN
+ else
+ printk(KERN_DEBUG "DMA irq '%s' allocated\n",
+ info->dma_out_irq_description);
+#endif
+ }
+ }
+
+ /*
* Start up the serial port
*/
retval = startup(info);
- if (retval)
+ if (retval) {
+ if (allocated_resources)
+ deinit_port(info);
+
+ /* FIXME Decrease count info->count here too? */
return retval;
+ }
+
retval = block_til_ready(tty, filp, info);
if (retval) {
@@ -4604,6 +4193,9 @@ rs_open(struct tty_struct *tty, struct file * filp)
printk("rs_open returning after block_til_ready with %d\n",
retval);
#endif
+ if (allocated_resources)
+ deinit_port(info);
+
return retval;
}
@@ -4793,6 +4385,8 @@ static const struct tty_operations rs_ops = {
.send_xchar = rs_send_xchar,
.wait_until_sent = rs_wait_until_sent,
.read_proc = rs_read_proc,
+ .tiocmget = rs_tiocmget,
+ .tiocmset = rs_tiocmset
};
static int __init
@@ -4810,9 +4404,27 @@ rs_init(void)
/* Setup the timed flush handler system */
#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
- init_timer(&flush_timer);
- flush_timer.function = timed_flush_handler;
- mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS);
+ setup_timer(&flush_timer, timed_flush_handler, 0);
+ mod_timer(&flush_timer, jiffies + 5);
+#endif
+
+#if defined(CONFIG_ETRAX_RS485)
+#if defined(CONFIG_ETRAX_RS485_ON_PA)
+ if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit,
+ rs485_pa_bit)) {
+ printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
+ "RS485 pin\n");
+ return -EBUSY;
+ }
+#endif
+#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
+ if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit,
+ rs485_port_g_bit)) {
+ printk(KERN_CRIT "ETRAX100LX serial: Could not allocate "
+ "RS485 pin\n");
+ return -EBUSY;
+ }
+#endif
#endif
/* Initialize the tty_driver structure */
@@ -4839,6 +4451,16 @@ rs_init(void)
/* do some initializing for the separate ports */
for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) {
+ if (info->enabled) {
+ if (cris_request_io_interface(info->io_if,
+ info->io_if_description)) {
+ printk(KERN_CRIT "ETRAX100LX async serial: "
+ "Could not allocate IO pins for "
+ "%s, port %d\n",
+ info->io_if_description, i);
+ info->enabled = 0;
+ }
+ }
info->uses_dma_in = 0;
info->uses_dma_out = 0;
info->line = i;
@@ -4872,7 +4494,7 @@ rs_init(void)
info->rs485.delay_rts_before_send = 0;
info->rs485.enabled = 0;
#endif
- INIT_WORK(&info->work, do_softint, info);
+ INIT_WORK(&info->work, do_softint);
if (info->enabled) {
printk(KERN_INFO "%s%d at 0x%x is a builtin UART with DMA\n",
@@ -4890,64 +4512,17 @@ rs_init(void)
#endif
#ifndef CONFIG_SVINTO_SIM
+#ifndef CONFIG_ETRAX_KGDB
/* Not needed in simulator. May only complicate stuff. */
/* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */
- if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial ", NULL))
- panic("irq8");
-
-#ifdef CONFIG_ETRAX_SERIAL_PORT0
-#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT
- if (request_irq(SER0_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 0 dma tr", NULL))
- panic("irq22");
-#endif
-#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN
- if (request_irq(SER0_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 0 dma rec", NULL))
- panic("irq23");
-#endif
-#endif
-
-#ifdef CONFIG_ETRAX_SERIAL_PORT1
-#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT
- if (request_irq(SER1_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 1 dma tr", NULL))
- panic("irq24");
-#endif
-#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN
- if (request_irq(SER1_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 1 dma rec", NULL))
- panic("irq25");
-#endif
-#endif
-#ifdef CONFIG_ETRAX_SERIAL_PORT2
- /* DMA Shared with par0 (and SCSI0 and ATA) */
-#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT
- if (request_irq(SER2_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma tr", NULL))
- panic("irq18");
-#endif
-#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN
- if (request_irq(SER2_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma rec", NULL))
- panic("irq19");
-#endif
-#endif
-#ifdef CONFIG_ETRAX_SERIAL_PORT3
- /* DMA Shared with par1 (and SCSI1 and Extern DMA 0) */
-#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT
- if (request_irq(SER3_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma tr", NULL))
- panic("irq20");
-#endif
-#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN
- if (request_irq(SER3_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma rec", NULL))
- panic("irq21");
-#endif
-#endif
+ if (request_irq(SERIAL_IRQ_NBR, ser_interrupt,
+ IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
+ panic("%s: Failed to request irq8", __FUNCTION__);
-#ifdef CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST
- if (request_irq(TIMER1_IRQ_NBR, timeout_interrupt, IRQF_SHARED | IRQF_DISABLED,
- "fast serial dma timeout", NULL)) {
- printk(KERN_CRIT "err: timer1 irq\n");
- }
#endif
#endif /* CONFIG_SVINTO_SIM */
- debug_write_function = rs_debug_write_function;
+
return 0;
}
diff --git a/drivers/serial/crisv10.h b/drivers/serial/crisv10.h
new file mode 100644
index 000000000000..ccd0f32b7372
--- /dev/null
+++ b/drivers/serial/crisv10.h
@@ -0,0 +1,146 @@
+/*
+ * serial.h: Arch-dep definitions for the Etrax100 serial driver.
+ *
+ * Copyright (C) 1998-2007 Axis Communications AB
+ */
+
+#ifndef _ETRAX_SERIAL_H
+#define _ETRAX_SERIAL_H
+
+#include <linux/circ_buf.h>
+#include <asm/termios.h>
+#include <asm/dma.h>
+#include <asm/arch/io_interface_mux.h>
+
+/* Software state per channel */
+
+#ifdef __KERNEL__
+/*
+ * This is our internal structure for each serial port's state.
+ *
+ * Many fields are paralleled by the structure used by the serial_struct
+ * structure.
+ *
+ * For definitions of the flags field, see tty.h
+ */
+
+#define SERIAL_RECV_DESCRIPTORS 8
+
+struct etrax_recv_buffer {
+ struct etrax_recv_buffer *next;
+ unsigned short length;
+ unsigned char error;
+ unsigned char pad;
+
+ unsigned char buffer[0];
+};
+
+struct e100_serial {
+ int baud;
+ volatile u8 *port; /* R_SERIALx_CTRL */
+ u32 irq; /* bitnr in R_IRQ_MASK2 for dmaX_descr */
+
+ /* Output registers */
+ volatile u8 *oclrintradr; /* adr to R_DMA_CHx_CLR_INTR */
+ volatile u32 *ofirstadr; /* adr to R_DMA_CHx_FIRST */
+ volatile u8 *ocmdadr; /* adr to R_DMA_CHx_CMD */
+ const volatile u8 *ostatusadr; /* adr to R_DMA_CHx_STATUS */
+
+ /* Input registers */
+ volatile u8 *iclrintradr; /* adr to R_DMA_CHx_CLR_INTR */
+ volatile u32 *ifirstadr; /* adr to R_DMA_CHx_FIRST */
+ volatile u8 *icmdadr; /* adr to R_DMA_CHx_CMD */
+ volatile u32 *idescradr; /* adr to R_DMA_CHx_DESCR */
+
+ int flags; /* defined in tty.h */
+
+ u8 rx_ctrl; /* shadow for R_SERIALx_REC_CTRL */
+ u8 tx_ctrl; /* shadow for R_SERIALx_TR_CTRL */
+ u8 iseteop; /* bit number for R_SET_EOP for the input dma */
+ int enabled; /* Set to 1 if the port is enabled in HW config */
+
+ u8 dma_out_enabled; /* Set to 1 if DMA should be used */
+ u8 dma_in_enabled; /* Set to 1 if DMA should be used */
+
+ /* end of fields defined in rs_table[] in .c-file */
+ int dma_owner;
+ unsigned int dma_in_nbr;
+ unsigned int dma_out_nbr;
+ unsigned int dma_in_irq_nbr;
+ unsigned int dma_out_irq_nbr;
+ unsigned long dma_in_irq_flags;
+ unsigned long dma_out_irq_flags;
+ char *dma_in_irq_description;
+ char *dma_out_irq_description;
+
+ enum cris_io_interface io_if;
+ char *io_if_description;
+
+ u8 uses_dma_in; /* Set to 1 if DMA is used */
+ u8 uses_dma_out; /* Set to 1 if DMA is used */
+ u8 forced_eop; /* a fifo eop has been forced */
+ int baud_base; /* For special baudrates */
+ int custom_divisor; /* For special baudrates */
+ struct etrax_dma_descr tr_descr;
+ struct etrax_dma_descr rec_descr[SERIAL_RECV_DESCRIPTORS];
+ int cur_rec_descr;
+
+ volatile int tr_running; /* 1 if output is running */
+
+ struct tty_struct *tty;
+ int read_status_mask;
+ int ignore_status_mask;
+ int x_char; /* xon/xoff character */
+ int close_delay;
+ unsigned short closing_wait;
+ unsigned short closing_wait2;
+ unsigned long event;
+ unsigned long last_active;
+ int line;
+ int type; /* PORT_ETRAX */
+ int count; /* # of fd on device */
+ int blocked_open; /* # of blocked opens */
+ struct circ_buf xmit;
+ struct etrax_recv_buffer *first_recv_buffer;
+ struct etrax_recv_buffer *last_recv_buffer;
+ unsigned int recv_cnt;
+ unsigned int max_recv_cnt;
+
+ struct work_struct work;
+ struct async_icount icount; /* error-statistics etc.*/
+ struct ktermios normal_termios;
+ struct ktermios callout_termios;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t close_wait;
+
+ unsigned long char_time_usec; /* The time for 1 char, in usecs */
+ unsigned long flush_time_usec; /* How often we should flush */
+ unsigned long last_tx_active_usec; /* Last tx usec in the jiffies */
+ unsigned long last_tx_active; /* Last tx time in jiffies */
+ unsigned long last_rx_active_usec; /* Last rx usec in the jiffies */
+ unsigned long last_rx_active; /* Last rx time in jiffies */
+
+ int break_detected_cnt;
+ int errorcode;
+
+#ifdef CONFIG_ETRAX_RS485
+ struct rs485_control rs485; /* RS-485 support */
+#endif
+};
+
+/* this PORT is not in the standard serial.h. it's not actually used for
+ * anything since we only have one type of async serial-port anyway in this
+ * system.
+ */
+
+#define PORT_ETRAX 1
+
+/*
+ * Events are used to schedule things to happen at timer-interrupt
+ * time, instead of at rs interrupt time.
+ */
+#define RS_EVENT_WRITE_WAKEUP 0
+
+#endif /* __KERNEL__ */
+
+#endif /* !_ETRAX_SERIAL_H */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 89769ce16f88..b31f4431849b 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -457,10 +457,11 @@ done:
EXPORT_SYMBOL_GPL(spi_register_master);
-static int __unregister(struct device *dev, void *unused)
+static int __unregister(struct device *dev, void *master_dev)
{
/* note: before about 2.6.14-rc1 this would corrupt memory: */
- spi_unregister_device(to_spi_device(dev));
+ if (dev != master_dev)
+ spi_unregister_device(to_spi_device(dev));
return 0;
}
@@ -478,7 +479,8 @@ void spi_unregister_master(struct spi_master *master)
{
int dummy;
- dummy = device_for_each_child(master->dev.parent, NULL, __unregister);
+ dummy = device_for_each_child(master->dev.parent, &master->dev,
+ __unregister);
device_unregister(&master->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_master);
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index cc5094f37dd3..363ac8e68821 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -24,6 +24,7 @@
#include <linux/spi/spi.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/io.h>
#include <asm/gpio.h>
@@ -74,7 +75,6 @@ struct txx9spi {
struct list_head queue;
wait_queue_head_t waitq;
void __iomem *membase;
- int irq;
int baseclk;
struct clk *clk;
u32 max_speed_hz, min_speed_hz;
@@ -350,12 +350,12 @@ static int __init txx9spi_probe(struct platform_device *dev)
struct resource *res;
int ret = -ENODEV;
u32 mcr;
+ int irq;
master = spi_alloc_master(&dev->dev, sizeof(*c));
if (!master)
return ret;
c = spi_master_get_devdata(master);
- c->irq = -1;
platform_set_drvdata(dev, master);
INIT_WORK(&c->work, txx9spi_work);
@@ -381,32 +381,36 @@ static int __init txx9spi_probe(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
- goto exit;
- c->membase = ioremap(res->start, res->end - res->start + 1);
+ goto exit_busy;
+ if (!devm_request_mem_region(&dev->dev,
+ res->start, res->end - res->start + 1,
+ "spi_txx9"))
+ goto exit_busy;
+ c->membase = devm_ioremap(&dev->dev,
+ res->start, res->end - res->start + 1);
if (!c->membase)
- goto exit;
+ goto exit_busy;
/* enter config mode */
mcr = txx9spi_rd(c, TXx9_SPMCR);
mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
- c->irq = platform_get_irq(dev, 0);
- if (c->irq < 0)
- goto exit;
- ret = request_irq(c->irq, txx9spi_interrupt, 0, dev->name, c);
- if (ret) {
- c->irq = -1;
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0)
+ goto exit_busy;
+ ret = devm_request_irq(&dev->dev, irq, txx9spi_interrupt, 0,
+ "spi_txx9", c);
+ if (ret)
goto exit;
- }
c->workqueue = create_singlethread_workqueue(master->dev.parent->bus_id);
if (!c->workqueue)
- goto exit;
+ goto exit_busy;
c->last_chipselect = -1;
dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
- (unsigned long long)res->start, c->irq,
+ (unsigned long long)res->start, irq,
(c->baseclk + 500000) / 1000000);
master->bus_num = dev->id;
@@ -418,13 +422,11 @@ static int __init txx9spi_probe(struct platform_device *dev)
if (ret)
goto exit;
return 0;
+exit_busy:
+ ret = -EBUSY;
exit:
if (c->workqueue)
destroy_workqueue(c->workqueue);
- if (c->irq >= 0)
- free_irq(c->irq, c);
- if (c->membase)
- iounmap(c->membase);
if (c->clk) {
clk_disable(c->clk);
clk_put(c->clk);
@@ -442,8 +444,6 @@ static int __exit txx9spi_remove(struct platform_device *dev)
spi_unregister_master(master);
platform_set_drvdata(dev, NULL);
destroy_workqueue(c->workqueue);
- free_irq(c->irq, c);
- iounmap(c->membase);
clk_disable(c->clk);
clk_put(c->clk);
spi_master_put(master);
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
index 6da58ca48b33..455991fbe28f 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/tle62x0.c
@@ -107,8 +107,11 @@ static ssize_t tle62x0_status_show(struct device *dev,
mutex_lock(&st->lock);
ret = tle62x0_read(st);
-
dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
fault <<= 8;
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 6bfdba6a213f..1f7ab15df36d 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -1215,20 +1215,18 @@ static int keyspan_chars_in_buffer (struct usb_serial_port *port)
static int keyspan_open (struct usb_serial_port *port, struct file *filp)
{
- struct keyspan_port_private *p_priv;
- struct keyspan_serial_private *s_priv;
- struct usb_serial *serial = port->serial;
+ struct keyspan_port_private *p_priv;
+ struct keyspan_serial_private *s_priv;
+ struct usb_serial *serial = port->serial;
const struct keyspan_device_details *d_details;
int i, err;
- int baud_rate, device_port;
struct urb *urb;
- unsigned int cflag;
s_priv = usb_get_serial_data(serial);
p_priv = usb_get_serial_port_data(port);
d_details = p_priv->device_details;
-
- dbg("%s - port%d.", __FUNCTION__, port->number);
+
+ dbg("%s - port%d.", __FUNCTION__, port->number);
/* Set some sane defaults */
p_priv->rts_state = 1;
@@ -1249,7 +1247,7 @@ static int keyspan_open (struct usb_serial_port *port, struct file *filp)
urb->dev = serial->dev;
/* make sure endpoint data toggle is synchronized with the device */
-
+
usb_clear_halt(urb->dev, urb->pipe);
if ((err = usb_submit_urb(urb, GFP_KERNEL)) != 0) {
@@ -1265,30 +1263,6 @@ static int keyspan_open (struct usb_serial_port *port, struct file *filp)
/* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), 0); */
}
- /* get the terminal config for the setup message now so we don't
- * need to send 2 of them */
-
- cflag = port->tty->termios->c_cflag;
- device_port = port->number - port->serial->minor;
-
- /* Baud rate calculation takes baud rate as an integer
- so other rates can be generated if desired. */
- baud_rate = tty_get_baud_rate(port->tty);
- /* If no match or invalid, leave as default */
- if (baud_rate >= 0
- && d_details->calculate_baud_rate(baud_rate, d_details->baudclk,
- NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) {
- p_priv->baud = baud_rate;
- }
-
- /* set CTS/RTS handshake etc. */
- p_priv->cflag = cflag;
- p_priv->flow_control = (cflag & CRTSCTS)? flow_cts: flow_none;
-
- keyspan_send_setup(port, 1);
- //mdelay(100);
- //keyspan_set_termios(port, NULL);
-
return (0);
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index cc4b60f899ca..7d86e9eae915 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -503,7 +503,7 @@ config FB_VALKYRIE
config FB_CT65550
bool "Chips 65550 display support"
- depends on (FB = y) && PPC32
+ depends on (FB = y) && PPC32 && PCI
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
index b9b572b293d4..2e552d5bbb5d 100644
--- a/drivers/video/gbefb.c
+++ b/drivers/video/gbefb.c
@@ -183,8 +183,8 @@ static struct fb_videomode default_mode_LCD __initdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-struct fb_videomode *default_mode = &default_mode_CRT;
-struct fb_var_screeninfo *default_var = &default_var_CRT;
+struct fb_videomode *default_mode __initdata = &default_mode_CRT;
+struct fb_var_screeninfo *default_var __initdata = &default_var_CRT;
static int flat_panel_enabled = 0;
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h
index 6c227f9592a5..ca13c48d19b0 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/geode/lxfb.h
@@ -33,7 +33,7 @@ void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int,
#define MSR_LX_GLD_CONFIG 0x48002001
#define MSR_LX_GLCP_DOTPLL 0x4c000015
-#define MSR_LX_DF_PADSEL 0x48000011
+#define MSR_LX_DF_PADSEL 0x48002011
#define MSR_LX_DC_SPARE 0x80000011
#define MSR_LX_DF_GLCONFIG 0x48002001
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index b3463ddcfd60..75836aa83191 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -727,7 +727,7 @@ static int ps3fb_blank(int blank, struct fb_info *info)
static int ps3fb_get_vblank(struct fb_vblank *vblank)
{
- memset(vblank, 0, sizeof(&vblank));
+ memset(vblank, 0, sizeof(*vblank));
vblank->flags = FB_VBLANK_HAVE_VSYNC;
return 0;
}
diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
index a5333c190789..b829dc7c5edf 100644
--- a/drivers/video/s1d13xxxfb.c
+++ b/drivers/video/s1d13xxxfb.c
@@ -540,7 +540,7 @@ s1d13xxxfb_probe(struct platform_device *pdev)
int ret = 0;
u8 revision;
- dbg("probe called: device is %p\n", dev);
+ dbg("probe called: device is %p\n", pdev);
printk(KERN_INFO "Epson S1D13XXX FB Driver\n");
@@ -753,8 +753,11 @@ static struct platform_driver s1d13xxxfb_driver = {
static int __init
s1d13xxxfb_init(void)
{
+
+#ifndef MODULE
if (fb_get_options("s1d13xxxfb", NULL))
return -ENODEV;
+#endif
return platform_driver_register(&s1d13xxxfb_driver);
}
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index bc7d23683735..37bd24b8d83b 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -1248,7 +1248,6 @@ sisfb_do_set_var(struct fb_var_screeninfo *var, int isactive, struct fb_info *in
if(found_mode) {
ivideo->sisfb_mode_idx = sisfb_validate_mode(ivideo,
ivideo->sisfb_mode_idx, ivideo->currentvbflags);
- ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni];
} else {
ivideo->sisfb_mode_idx = -1;
}
@@ -1260,6 +1259,8 @@ sisfb_do_set_var(struct fb_var_screeninfo *var, int isactive, struct fb_info *in
return -EINVAL;
}
+ ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni];
+
if(sisfb_search_refresh_rate(ivideo, ivideo->refresh_rate, ivideo->sisfb_mode_idx) == 0) {
ivideo->rate_idx = sisbios_mode[ivideo->sisfb_mode_idx].rate_idx;
ivideo->refresh_rate = 60;
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index b983d262ab78..d1d6c0facd54 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -926,8 +926,10 @@ static int uvesafb_setpalette(struct uvesafb_pal_entry *entries, int count,
int start, struct fb_info *info)
{
struct uvesafb_ktask *task;
+#ifdef CONFIG_X86
struct uvesafb_par *par = info->par;
int i = par->mode_idx;
+#endif
int err = 0;
/*
@@ -1103,11 +1105,11 @@ static int uvesafb_pan_display(struct fb_var_screeninfo *var,
static int uvesafb_blank(int blank, struct fb_info *info)
{
- struct uvesafb_par *par = info->par;
struct uvesafb_ktask *task;
int err = 1;
-
#ifdef CONFIG_X86
+ struct uvesafb_par *par = info->par;
+
if (par->vbe_ib.capabilities & VBE_CAP_VGACOMPAT) {
int loop = 10000;
u8 seq = 0, crtc17 = 0;
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 299e274d241a..b63b5e044a4c 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -233,7 +233,7 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
{
int count, err;
- memset(st, 0, sizeof(st));
+ memset(st, 0, sizeof(*st));
count = 0;
err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100);