aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDmitry Torokhov2005-06-01 02:37:23 -0500
committerDmitry Torokhov2005-06-01 02:37:23 -0500
commit9d5432979951c8761c2b4517007039b9bcc1c110 (patch)
tree9441cb1cd4a7674a2292936bfb42514f33d24cd4 /drivers
parenta913829e90e2af7a6e98f5aadcc9fec4dcf1ef64 (diff)
parent21e3024cbddb712f6a078bf4132d7682d3c4e35e (diff)
Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cdrom/viocd.c14
-rw-r--r--drivers/cpufreq/Kconfig24
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c586
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c180
-rw-r--r--drivers/cpufreq/cpufreq_stats.c47
-rw-r--r--drivers/firmware/pcdp.c1
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/input/mousedev.c15
-rw-r--r--drivers/macintosh/via-pmu.c8
-rw-r--r--drivers/media/dvb/bt8xx/dst.c122
-rw-r--r--drivers/net/tg3.c571
-rw-r--r--drivers/scsi/ahci.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c2
-rw-r--r--drivers/scsi/ata_piix.c2
-rw-r--r--drivers/scsi/libata-core.c15
-rw-r--r--drivers/scsi/libata.h2
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c1
-rw-r--r--drivers/scsi/sata_qstor.c2
-rw-r--r--drivers/scsi/sata_sil.c1
-rw-r--r--drivers/scsi/sata_sis.c1
-rw-r--r--drivers/scsi/sata_svw.c1
-rw-r--r--drivers/scsi/sata_sx4.c2
-rw-r--r--drivers/scsi/sata_uli.c1
-rw-r--r--drivers/scsi/sata_via.c1
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--drivers/usb/media/pwc/pwc-ctrl.c2
-rw-r--r--drivers/usb/media/pwc/pwc-uncompress.c2
-rw-r--r--drivers/video/intelfb/intelfbdrv.c22
31 files changed, 1427 insertions, 216 deletions
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index fcca26c89bbc..38dd9ffbe8bc 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -488,6 +488,20 @@ static int viocd_packet(struct cdrom_device_info *cdi,
& (CDC_DVD_RAM | CDC_RAM)) != 0;
}
break;
+ case GPCMD_GET_CONFIGURATION:
+ if (cgc->cmd[3] == CDF_RWRT) {
+ struct rwrt_feature_desc *rfd = (struct rwrt_feature_desc *)(cgc->buffer + sizeof(struct feature_header));
+
+ if ((buflen >=
+ (sizeof(struct feature_header) + sizeof(*rfd))) &&
+ (cdi->ops->capability & ~cdi->mask
+ & (CDC_DVD_RAM | CDC_RAM))) {
+ rfd->feature_code = cpu_to_be16(CDF_RWRT);
+ rfd->curr = 1;
+ ret = 0;
+ }
+ }
+ break;
default:
if (cgc->sense) {
/* indicate Unknown code */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 95882bb1950e..60c9be99c6d9 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -46,6 +46,10 @@ config CPU_FREQ_STAT_DETAILS
This will show detail CPU frequency translation table in sysfs file
system
+# Note that it is not currently possible to set the other governors (such as ondemand)
+# as the default, since if they fail to initialise, cpufreq will be
+# left in an undefined state.
+
choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110
@@ -115,4 +119,24 @@ config CPU_FREQ_GOV_ONDEMAND
If in doubt, say N.
+config CPU_FREQ_GOV_CONSERVATIVE
+ tristate "'conservative' cpufreq governor"
+ depends on CPU_FREQ
+ help
+ 'conservative' - this driver is rather similar to the 'ondemand'
+ governor both in its source code and its purpose, the difference is
+ its optimisation for better suitability in a battery powered
+ environment. The frequency is gracefully increased and decreased
+ rather than jumping to 100% when speed is required.
+
+ If you have a desktop machine then you should really be considering
+ the 'ondemand' governor instead, however if you are using a laptop,
+ PDA or even an AMD64 based computer (due to the unacceptable
+ step-by-step latency issues between the minimum and maximum frequency
+ transitions in the CPU) you will probably want to use this governor.
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
+ If in doubt, say N.
+
endif # CPU_FREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 67b16e5a41a7..71fc3b4173f1 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
+obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8e561313d094..03b5fb2ddcf4 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -258,7 +258,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
(likely(cpufreq_cpu_data[freqs->cpu]->cur)) &&
(unlikely(freqs->old != cpufreq_cpu_data[freqs->cpu]->cur)))
{
- printk(KERN_WARNING "Warning: CPU frequency is %u, "
+ dprintk(KERN_WARNING "Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.\n", freqs->old, cpufreq_cpu_data[freqs->cpu]->cur);
freqs->old = cpufreq_cpu_data[freqs->cpu]->cur;
}
@@ -814,7 +814,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, unsigne
{
struct cpufreq_freqs freqs;
- printk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
+ dprintk(KERN_WARNING "Warning: CPU frequency out of sync: cpufreq and timing "
"core thinks of %u, is %u kHz.\n", old_freq, new_freq);
freqs.cpu = cpu;
@@ -923,7 +923,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, u32 state)
struct cpufreq_freqs freqs;
if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
- printk(KERN_DEBUG "Warning: CPU frequency is %u, "
+ dprintk(KERN_DEBUG "Warning: CPU frequency is %u, "
"cpufreq assumed %u kHz.\n",
cur_freq, cpu_policy->cur);
@@ -1004,7 +1004,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
struct cpufreq_freqs freqs;
if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
- printk(KERN_WARNING "Warning: CPU frequency"
+ dprintk(KERN_WARNING "Warning: CPU frequency"
"is %u, cpufreq assumed %u kHz.\n",
cur_freq, cpu_policy->cur);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
new file mode 100644
index 000000000000..e1df376e709e
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -0,0 +1,586 @@
+/*
+ * drivers/cpufreq/cpufreq_conservative.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/cpufreq.h>
+#include <linux/sysctl.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/kmod.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/kernel_stat.h>
+#include <linux/percpu.h>
+
+/*
+ * dbs is used in this file as a shortform for demandbased switching
+ * It helps to keep variable names smaller, simpler
+ */
+
+#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define MIN_FREQUENCY_UP_THRESHOLD (0)
+#define MAX_FREQUENCY_UP_THRESHOLD (100)
+
+#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
+#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
+#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
+
+/*
+ * The polling frequency of this governor depends on the capability of
+ * the processor. Default polling frequency is 1000 times the transition
+ * latency of the processor. The governor will work on any processor with
+ * transition latency <= 10mS, using appropriate sampling
+ * rate.
+ * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
+ * this governor will not work.
+ * All times here are in uS.
+ */
+static unsigned int def_sampling_rate;
+#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
+#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
+#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (100000)
+#define DEF_SAMPLING_DOWN_FACTOR (5)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000)
+
+static void do_dbs_timer(void *data);
+
+struct cpu_dbs_info_s {
+ struct cpufreq_policy *cur_policy;
+ unsigned int prev_cpu_idle_up;
+ unsigned int prev_cpu_idle_down;
+ unsigned int enable;
+};
+static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
+
+static unsigned int dbs_enable; /* number of CPUs using this policy */
+
+static DECLARE_MUTEX (dbs_sem);
+static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
+
+struct dbs_tuners {
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int ignore_nice;
+ unsigned int freq_step;
+};
+
+static struct dbs_tuners dbs_tuners_ins = {
+ .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
+ .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
+ .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
+};
+
+static inline unsigned int get_cpu_idle_time(unsigned int cpu)
+{
+ return kstat_cpu(cpu).cpustat.idle +
+ kstat_cpu(cpu).cpustat.iowait +
+ ( !dbs_tuners_ins.ignore_nice ?
+ kstat_cpu(cpu).cpustat.nice :
+ 0);
+}
+
+/************************** sysfs interface ************************/
+static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
+}
+
+static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
+}
+
+#define define_one_ro(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+define_one_ro(sampling_rate_max);
+define_one_ro(sampling_rate_min);
+
+/* cpufreq_conservative Governor Tunables */
+#define show_one(file_name, object) \
+static ssize_t show_##file_name \
+(struct cpufreq_policy *unused, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
+}
+show_one(sampling_rate, sampling_rate);
+show_one(sampling_down_factor, sampling_down_factor);
+show_one(up_threshold, up_threshold);
+show_one(down_threshold, down_threshold);
+show_one(ignore_nice, ignore_nice);
+show_one(freq_step, freq_step);
+
+static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+ if (ret != 1 )
+ return -EINVAL;
+
+ down(&dbs_sem);
+ dbs_tuners_ins.sampling_down_factor = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+
+ down(&dbs_sem);
+ if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
+ up(&dbs_sem);
+ return -EINVAL;
+ }
+
+ dbs_tuners_ins.sampling_rate = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_up_threshold(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+
+ down(&dbs_sem);
+ if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
+ input < MIN_FREQUENCY_UP_THRESHOLD ||
+ input <= dbs_tuners_ins.down_threshold) {
+ up(&dbs_sem);
+ return -EINVAL;
+ }
+
+ dbs_tuners_ins.up_threshold = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_down_threshold(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+
+ down(&dbs_sem);
+ if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
+ input < MIN_FREQUENCY_DOWN_THRESHOLD ||
+ input >= dbs_tuners_ins.up_threshold) {
+ up(&dbs_sem);
+ return -EINVAL;
+ }
+
+ dbs_tuners_ins.down_threshold = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ unsigned int j;
+
+ ret = sscanf (buf, "%u", &input);
+ if ( ret != 1 )
+ return -EINVAL;
+
+ if ( input > 1 )
+ input = 1;
+
+ down(&dbs_sem);
+ if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
+ up(&dbs_sem);
+ return count;
+ }
+ dbs_tuners_ins.ignore_nice = input;
+
+ /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+ j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
+ }
+ up(&dbs_sem);
+
+ return count;
+}
+
+static ssize_t store_freq_step(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf (buf, "%u", &input);
+
+ if ( ret != 1 )
+ return -EINVAL;
+
+ if ( input > 100 )
+ input = 100;
+
+ /* no need to test here if freq_step is zero as the user might actually
+ * want this, they would be crazy though :) */
+ down(&dbs_sem);
+ dbs_tuners_ins.freq_step = input;
+ up(&dbs_sem);
+
+ return count;
+}
+
+#define define_one_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+define_one_rw(sampling_rate);
+define_one_rw(sampling_down_factor);
+define_one_rw(up_threshold);
+define_one_rw(down_threshold);
+define_one_rw(ignore_nice);
+define_one_rw(freq_step);
+
+static struct attribute * dbs_attributes[] = {
+ &sampling_rate_max.attr,
+ &sampling_rate_min.attr,
+ &sampling_rate.attr,
+ &sampling_down_factor.attr,
+ &up_threshold.attr,
+ &down_threshold.attr,
+ &ignore_nice.attr,
+ &freq_step.attr,
+ NULL
+};
+
+static struct attribute_group dbs_attr_group = {
+ .attrs = dbs_attributes,
+ .name = "conservative",
+};
+
+/************************** sysfs end ************************/
+
+static void dbs_check_cpu(int cpu)
+{
+ unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
+ unsigned int freq_step;
+ unsigned int freq_down_sampling_rate;
+ static int down_skip[NR_CPUS];
+ static int requested_freq[NR_CPUS];
+ static unsigned short init_flag = 0;
+ struct cpu_dbs_info_s *this_dbs_info;
+ struct cpu_dbs_info_s *dbs_info;
+
+ struct cpufreq_policy *policy;
+ unsigned int j;
+
+ this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ if (!this_dbs_info->enable)
+ return;
+
+ policy = this_dbs_info->cur_policy;
+
+ if ( init_flag == 0 ) {
+ for ( /* NULL */; init_flag < NR_CPUS; init_flag++ ) {
+ dbs_info = &per_cpu(cpu_dbs_info, init_flag);
+ requested_freq[cpu] = dbs_info->cur_policy->cur;
+ }
+ init_flag = 1;
+ }
+
+ /*
+ * The default safe range is 20% to 80%
+ * Every sampling_rate, we check
+ * - If current idle time is less than 20%, then we try to
+ * increase frequency
+ * Every sampling_rate*sampling_down_factor, we check
+ * - If current idle time is more than 80%, then we try to
+ * decrease frequency
+ *
+ * Any frequency increase takes it to the maximum frequency.
+ * Frequency reduction happens at minimum steps of
+ * 5% (default) of max_frequency
+ */
+
+ /* Check for frequency increase */
+
+ idle_ticks = UINT_MAX;
+ for_each_cpu_mask(j, policy->cpus) {
+ unsigned int tmp_idle_ticks, total_idle_ticks;
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ /* Check for frequency increase */
+ total_idle_ticks = get_cpu_idle_time(j);
+ tmp_idle_ticks = total_idle_ticks -
+ j_dbs_info->prev_cpu_idle_up;
+ j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
+
+ if (tmp_idle_ticks < idle_ticks)
+ idle_ticks = tmp_idle_ticks;
+ }
+
+ /* Scale idle ticks by 100 and compare with up and down ticks */
+ idle_ticks *= 100;
+ up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+ if (idle_ticks < up_idle_ticks) {
+ down_skip[cpu] = 0;
+ for_each_cpu_mask(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->prev_cpu_idle_down =
+ j_dbs_info->prev_cpu_idle_up;
+ }
+ /* if we are already at full speed then break out early */
+ if (requested_freq[cpu] == policy->max)
+ return;
+
+ freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
+
+ /* max freq cannot be less than 100. But who knows.... */
+ if (unlikely(freq_step == 0))
+ freq_step = 5;
+
+ requested_freq[cpu] += freq_step;
+ if (requested_freq[cpu] > policy->max)
+ requested_freq[cpu] = policy->max;
+
+ __cpufreq_driver_target(policy, requested_freq[cpu],
+ CPUFREQ_RELATION_H);
+ return;
+ }
+
+ /* Check for frequency decrease */
+ down_skip[cpu]++;
+ if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
+ return;
+
+ idle_ticks = UINT_MAX;
+ for_each_cpu_mask(j, policy->cpus) {
+ unsigned int tmp_idle_ticks, total_idle_ticks;
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
+ tmp_idle_ticks = total_idle_ticks -
+ j_dbs_info->prev_cpu_idle_down;
+ j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
+
+ if (tmp_idle_ticks < idle_ticks)
+ idle_ticks = tmp_idle_ticks;
+ }
+
+ /* Scale idle ticks by 100 and compare with up and down ticks */
+ idle_ticks *= 100;
+ down_skip[cpu] = 0;
+
+ freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
+ dbs_tuners_ins.sampling_down_factor;
+ down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
+ usecs_to_jiffies(freq_down_sampling_rate);
+
+ if (idle_ticks > down_idle_ticks) {
+ /* if we are already at the lowest speed then break out early
+ * or if we 'cannot' reduce the speed as the user might want
+ * freq_step to be zero */
+ if (requested_freq[cpu] == policy->min
+ || dbs_tuners_ins.freq_step == 0)
+ return;
+
+ freq_step = (dbs_tuners_ins.freq_step * policy->max) / 100;
+
+ /* max freq cannot be less than 100. But who knows.... */
+ if (unlikely(freq_step == 0))
+ freq_step = 5;
+
+ requested_freq[cpu] -= freq_step;
+ if (requested_freq[cpu] < policy->min)
+ requested_freq[cpu] = policy->min;
+
+ __cpufreq_driver_target(policy,
+ requested_freq[cpu],
+ CPUFREQ_RELATION_H);
+ return;
+ }
+}
+
+static void do_dbs_timer(void *data)
+{
+ int i;
+ down(&dbs_sem);
+ for_each_online_cpu(i)
+ dbs_check_cpu(i);
+ schedule_delayed_work(&dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ up(&dbs_sem);
+}
+
+static inline void dbs_timer_init(void)
+{
+ INIT_WORK(&dbs_work, do_dbs_timer, NULL);
+ schedule_delayed_work(&dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ return;
+}
+
+static inline void dbs_timer_exit(void)
+{
+ cancel_delayed_work(&dbs_work);
+ return;
+}
+
+static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ unsigned int cpu = policy->cpu;
+ struct cpu_dbs_info_s *this_dbs_info;
+ unsigned int j;
+
+ this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) ||
+ (!policy->cur))
+ return -EINVAL;
+
+ if (policy->cpuinfo.transition_latency >
+ (TRANSITION_LATENCY_LIMIT * 1000))
+ return -EINVAL;
+ if (this_dbs_info->enable) /* Already enabled */
+ break;
+
+ down(&dbs_sem);
+ for_each_cpu_mask(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->cur_policy = policy;
+
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+ j_dbs_info->prev_cpu_idle_down
+ = j_dbs_info->prev_cpu_idle_up;
+ }
+ this_dbs_info->enable = 1;
+ sysfs_create_group(&policy->kobj, &dbs_attr_group);
+ dbs_enable++;
+ /*
+ * Start the timerschedule work, when this governor
+ * is used for first time
+ */
+ if (dbs_enable == 1) {
+ unsigned int latency;
+ /* policy latency is in nS. Convert it to uS first */
+
+ latency = policy->cpuinfo.transition_latency;
+ if (latency < 1000)
+ latency = 1000;
+
+ def_sampling_rate = (latency / 1000) *
+ DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
+ dbs_tuners_ins.sampling_rate = def_sampling_rate;
+ dbs_tuners_ins.ignore_nice = 0;
+ dbs_tuners_ins.freq_step = 5;
+
+ dbs_timer_init();
+ }
+
+ up(&dbs_sem);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ down(&dbs_sem);
+ this_dbs_info->enable = 0;
+ sysfs_remove_group(&policy->kobj, &dbs_attr_group);
+ dbs_enable--;
+ /*
+ * Stop the timerschedule work, when this governor
+ * is used for first time
+ */
+ if (dbs_enable == 0)
+ dbs_timer_exit();
+
+ up(&dbs_sem);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ down(&dbs_sem);
+ if (policy->max < this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(
+ this_dbs_info->cur_policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(
+ this_dbs_info->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ up(&dbs_sem);
+ break;
+ }
+ return 0;
+}
+
+static struct cpufreq_governor cpufreq_gov_dbs = {
+ .name = "conservative",
+ .governor = cpufreq_governor_dbs,
+ .owner = THIS_MODULE,
+};
+
+static int __init cpufreq_gov_dbs_init(void)
+{
+ return cpufreq_register_governor(&cpufreq_gov_dbs);
+}
+
+static void __exit cpufreq_gov_dbs_exit(void)
+{
+ /* Make sure that the scheduled work is indeed not running */
+ flush_scheduled_work();
+
+ cpufreq_unregister_governor(&cpufreq_gov_dbs);
+}
+
+
+MODULE_AUTHOR ("Alexander Clouter <alex-kernel@digriz.org.uk>");
+MODULE_DESCRIPTION ("'cpufreq_conservative' - A dynamic cpufreq governor for "
+ "Low Latency Frequency Transition capable processors "
+ "optimised for use in a battery environment");
+MODULE_LICENSE ("GPL");
+
+module_init(cpufreq_gov_dbs_init);
+module_exit(cpufreq_gov_dbs_exit);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 8d83a21c6477..c1fc9c62bb51 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -34,13 +34,9 @@
*/
#define DEF_FREQUENCY_UP_THRESHOLD (80)
-#define MIN_FREQUENCY_UP_THRESHOLD (0)
+#define MIN_FREQUENCY_UP_THRESHOLD (11)
#define MAX_FREQUENCY_UP_THRESHOLD (100)
-#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
-#define MIN_FREQUENCY_DOWN_THRESHOLD (0)
-#define MAX_FREQUENCY_DOWN_THRESHOLD (100)
-
/*
* The polling frequency of this governor depends on the capability of
* the processor. Default polling frequency is 1000 times the transition
@@ -55,9 +51,9 @@ static unsigned int def_sampling_rate;
#define MIN_SAMPLING_RATE (def_sampling_rate / 2)
#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
-#define DEF_SAMPLING_DOWN_FACTOR (10)
+#define DEF_SAMPLING_DOWN_FACTOR (1)
+#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
-#define sampling_rate_in_HZ(x) (((x * HZ) < (1000 * 1000))?1:((x * HZ) / (1000 * 1000)))
static void do_dbs_timer(void *data);
@@ -78,15 +74,23 @@ struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
- unsigned int down_threshold;
+ unsigned int ignore_nice;
};
static struct dbs_tuners dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
- .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
};
+static inline unsigned int get_cpu_idle_time(unsigned int cpu)
+{
+ return kstat_cpu(cpu).cpustat.idle +
+ kstat_cpu(cpu).cpustat.iowait +
+ ( !dbs_tuners_ins.ignore_nice ?
+ kstat_cpu(cpu).cpustat.nice :
+ 0);
+}
+
/************************** sysfs interface ************************/
static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
{
@@ -115,7 +119,7 @@ static ssize_t show_##file_name \
show_one(sampling_rate, sampling_rate);
show_one(sampling_down_factor, sampling_down_factor);
show_one(up_threshold, up_threshold);
-show_one(down_threshold, down_threshold);
+show_one(ignore_nice, ignore_nice);
static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
const char *buf, size_t count)
@@ -126,6 +130,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
if (ret != 1 )
return -EINVAL;
+ if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+
down(&dbs_sem);
dbs_tuners_ins.sampling_down_factor = input;
up(&dbs_sem);
@@ -161,8 +168,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
down(&dbs_sem);
if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
- input < MIN_FREQUENCY_UP_THRESHOLD ||
- input <= dbs_tuners_ins.down_threshold) {
+ input < MIN_FREQUENCY_UP_THRESHOLD) {
up(&dbs_sem);
return -EINVAL;
}
@@ -173,22 +179,35 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
return count;
}
-static ssize_t store_down_threshold(struct cpufreq_policy *unused,
+static ssize_t store_ignore_nice(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int input;
int ret;
+
+ unsigned int j;
+
ret = sscanf (buf, "%u", &input);
+ if ( ret != 1 )
+ return -EINVAL;
+ if ( input > 1 )
+ input = 1;
+
down(&dbs_sem);
- if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
- input < MIN_FREQUENCY_DOWN_THRESHOLD ||
- input >= dbs_tuners_ins.up_threshold) {
+ if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
up(&dbs_sem);
- return -EINVAL;
+ return count;
}
+ dbs_tuners_ins.ignore_nice = input;
- dbs_tuners_ins.down_threshold = input;
+ /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+ j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
+ }
up(&dbs_sem);
return count;
@@ -201,7 +220,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
define_one_rw(sampling_rate);
define_one_rw(sampling_down_factor);
define_one_rw(up_threshold);
-define_one_rw(down_threshold);
+define_one_rw(ignore_nice);
static struct attribute * dbs_attributes[] = {
&sampling_rate_max.attr,
@@ -209,7 +228,7 @@ static struct attribute * dbs_attributes[] = {
&sampling_rate.attr,
&sampling_down_factor.attr,
&up_threshold.attr,
- &down_threshold.attr,
+ &ignore_nice.attr,
NULL
};
@@ -222,9 +241,8 @@ static struct attribute_group dbs_attr_group = {
static void dbs_check_cpu(int cpu)
{
- unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
- unsigned int total_idle_ticks;
- unsigned int freq_down_step;
+ unsigned int idle_ticks, up_idle_ticks, total_ticks;
+ unsigned int freq_next;
unsigned int freq_down_sampling_rate;
static int down_skip[NR_CPUS];
struct cpu_dbs_info_s *this_dbs_info;
@@ -238,38 +256,25 @@ static void dbs_check_cpu(int cpu)
policy = this_dbs_info->cur_policy;
/*
- * The default safe range is 20% to 80%
- * Every sampling_rate, we check
- * - If current idle time is less than 20%, then we try to
- * increase frequency
- * Every sampling_rate*sampling_down_factor, we check
- * - If current idle time is more than 80%, then we try to
- * decrease frequency
+ * Every sampling_rate, we check, if current idle time is less
+ * than 20% (default), then we try to increase frequency
+ * Every sampling_rate*sampling_down_factor, we look for a the lowest
+ * frequency which can sustain the load while keeping idle time over
+ * 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
- * 5% of max_frequency
+ * 5% (default) of current frequency
*/
/* Check for frequency increase */
- total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
- idle_ticks = total_idle_ticks -
- this_dbs_info->prev_cpu_idle_up;
- this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
-
-
+ idle_ticks = UINT_MAX;
for_each_cpu_mask(j, policy->cpus) {
- unsigned int tmp_idle_ticks;
+ unsigned int tmp_idle_ticks, total_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
- if (j == cpu)
- continue;
-
j_dbs_info = &per_cpu(cpu_dbs_info, j);
- /* Check for frequency increase */
- total_idle_ticks = kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
+ total_idle_ticks = get_cpu_idle_time(j);
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_up;
j_dbs_info->prev_cpu_idle_up = total_idle_ticks;
@@ -281,13 +286,23 @@ static void dbs_check_cpu(int cpu)
/* Scale idle ticks by 100 and compare with up and down ticks */
idle_ticks *= 100;
up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
- sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate);
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
if (idle_ticks < up_idle_ticks) {
+ down_skip[cpu] = 0;
+ for_each_cpu_mask(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->prev_cpu_idle_down =
+ j_dbs_info->prev_cpu_idle_up;
+ }
+ /* if we are already at full speed then break out early */
+ if (policy->cur == policy->max)
+ return;
+
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- down_skip[cpu] = 0;
- this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
return;
}
@@ -296,23 +311,14 @@ static void dbs_check_cpu(int cpu)
if (down_skip[cpu] < dbs_tuners_ins.sampling_down_factor)
return;
- total_idle_ticks = kstat_cpu(cpu).cpustat.idle +
- kstat_cpu(cpu).cpustat.iowait;
- idle_ticks = total_idle_ticks -
- this_dbs_info->prev_cpu_idle_down;
- this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
-
+ idle_ticks = UINT_MAX;
for_each_cpu_mask(j, policy->cpus) {
- unsigned int tmp_idle_ticks;
+ unsigned int tmp_idle_ticks, total_idle_ticks;
struct cpu_dbs_info_s *j_dbs_info;
- if (j == cpu)
- continue;
-
j_dbs_info = &per_cpu(cpu_dbs_info, j);
- /* Check for frequency increase */
- total_idle_ticks = kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
+ /* Check for frequency decrease */
+ total_idle_ticks = j_dbs_info->prev_cpu_idle_up;
tmp_idle_ticks = total_idle_ticks -
j_dbs_info->prev_cpu_idle_down;
j_dbs_info->prev_cpu_idle_down = total_idle_ticks;
@@ -321,38 +327,37 @@ static void dbs_check_cpu(int cpu)
idle_ticks = tmp_idle_ticks;
}
- /* Scale idle ticks by 100 and compare with up and down ticks */
- idle_ticks *= 100;
down_skip[cpu] = 0;
+ /* if we cannot reduce the frequency anymore, break out early */
+ if (policy->cur == policy->min)
+ return;
+ /* Compute how many ticks there are between two measurements */
freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
dbs_tuners_ins.sampling_down_factor;
- down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
- sampling_rate_in_HZ(freq_down_sampling_rate);
+ total_ticks = usecs_to_jiffies(freq_down_sampling_rate);
- if (idle_ticks > down_idle_ticks ) {
- freq_down_step = (5 * policy->max) / 100;
-
- /* max freq cannot be less than 100. But who knows.... */
- if (unlikely(freq_down_step == 0))
- freq_down_step = 5;
+ /*
+ * The optimal frequency is the frequency that is the lowest that
+ * can support the current CPU usage without triggering the up
+ * policy. To be safe, we focus 10 points under the threshold.
+ */
+ freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks;
+ freq_next = (freq_next * policy->cur) /
+ (dbs_tuners_ins.up_threshold - 10);
- __cpufreq_driver_target(policy,
- policy->cur - freq_down_step,
- CPUFREQ_RELATION_H);
- return;
- }
+ if (freq_next <= ((policy->cur * 95) / 100))
+ __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
}
static void do_dbs_timer(void *data)
{
int i;
down(&dbs_sem);
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- dbs_check_cpu(i);
+ for_each_online_cpu(i)
+ dbs_check_cpu(i);
schedule_delayed_work(&dbs_work,
- sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate));
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
up(&dbs_sem);
}
@@ -360,7 +365,7 @@ static inline void dbs_timer_init(void)
{
INIT_WORK(&dbs_work, do_dbs_timer, NULL);
schedule_delayed_work(&dbs_work,
- sampling_rate_in_HZ(dbs_tuners_ins.sampling_rate));
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
}
@@ -397,12 +402,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info = &per_cpu(cpu_dbs_info, j);
j_dbs_info->cur_policy = policy;
- j_dbs_info->prev_cpu_idle_up =
- kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
- j_dbs_info->prev_cpu_idle_down =
- kstat_cpu(j).cpustat.idle +
- kstat_cpu(j).cpustat.iowait;
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+ j_dbs_info->prev_cpu_idle_down
+ = j_dbs_info->prev_cpu_idle_up;
}
this_dbs_info->enable = 1;
sysfs_create_group(&policy->kobj, &dbs_attr_group);
@@ -422,6 +424,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
def_sampling_rate = (latency / 1000) *
DEF_SAMPLING_RATE_LATENCY_MULTIPLIER;
dbs_tuners_ins.sampling_rate = def_sampling_rate;
+ dbs_tuners_ins.ignore_nice = 0;
dbs_timer_init();
}
@@ -461,12 +464,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
return 0;
}
-struct cpufreq_governor cpufreq_gov_dbs = {
+static struct cpufreq_governor cpufreq_gov_dbs = {
.name = "ondemand",
.governor = cpufreq_governor_dbs,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(cpufreq_gov_dbs);
static int __init cpufreq_gov_dbs_init(void)
{
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 2084593937c6..741b6b191e6a 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -19,6 +19,7 @@
#include <linux/percpu.h>
#include <linux/kobject.h>
#include <linux/spinlock.h>
+#include <asm/cputime.h>
static spinlock_t cpufreq_stats_lock;
@@ -29,20 +30,14 @@ static struct freq_attr _attr_##_name = {\
.show = _show,\
};
-static unsigned long
-delta_time(unsigned long old, unsigned long new)
-{
- return (old > new) ? (old - new): (new + ~old + 1);
-}
-
struct cpufreq_stats {
unsigned int cpu;
unsigned int total_trans;
- unsigned long long last_time;
+ unsigned long long last_time;
unsigned int max_state;
unsigned int state_num;
unsigned int last_index;
- unsigned long long *time_in_state;
+ cputime64_t *time_in_state;
unsigned int *freq_table;
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
unsigned int *trans_table;
@@ -60,12 +55,16 @@ static int
cpufreq_stats_update (unsigned int cpu)
{
struct cpufreq_stats *stat;
+ unsigned long long cur_time;
+
+ cur_time = get_jiffies_64();
spin_lock(&cpufreq_stats_lock);
stat = cpufreq_stats_table[cpu];
if (stat->time_in_state)
- stat->time_in_state[stat->last_index] +=
- delta_time(stat->last_time, jiffies);
- stat->last_time = jiffies;
+ stat->time_in_state[stat->last_index] =
+ cputime64_add(stat->time_in_state[stat->last_index],
+ cputime_sub(cur_time, stat->last_time));
+ stat->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
return 0;
}
@@ -90,8 +89,8 @@ show_time_in_state(struct cpufreq_policy *policy, char *buf)
return 0;
cpufreq_stats_update(stat->cpu);
for (i = 0; i < stat->state_num; i++) {
- len += sprintf(buf + len, "%u %llu\n",
- stat->freq_table[i], stat->time_in_state[i]);
+ len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
+ (unsigned long long)cputime64_to_clock_t(stat->time_in_state[i]));
}
return len;
}
@@ -107,16 +106,30 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
if(!stat)
return 0;
cpufreq_stats_update(stat->cpu);
+ len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
+ len += snprintf(buf + len, PAGE_SIZE - len, " : ");
+ for (i = 0; i < stat->state_num; i++) {
+ if (len >= PAGE_SIZE)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
+ stat->freq_table[i]);
+ }
+ if (len >= PAGE_SIZE)
+ return len;
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+
for (i = 0; i < stat->state_num; i++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u:\t",
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
stat->freq_table[i]);
for (j = 0; j < stat->state_num; j++) {
if (len >= PAGE_SIZE)
break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%u\t",
+ len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
stat->trans_table[i*stat->max_state+j]);
}
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
@@ -197,7 +210,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
count++;
}
- alloc_size = count * sizeof(int) + count * sizeof(long long);
+ alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
alloc_size += count * count * sizeof(int);
@@ -224,7 +237,7 @@ cpufreq_stats_create_table (struct cpufreq_policy *policy,
}
stat->state_num = j;
spin_lock(&cpufreq_stats_lock);
- stat->last_time = jiffies;
+ stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock);
cpufreq_cpu_put(data);
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 6d5df6c2efa2..df1b721154d2 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -11,6 +11,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/config.h>
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/efi.h>
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 78e3e7b24d7d..39f3e9101ed4 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1936,7 +1936,7 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
* NOTE! The "len" and "addr" checks should possibly have
* separate masks.
*/
- if ((rq->data_len & mask) || (addr & mask))
+ if ((rq->data_len & 15) || (addr & mask))
info->dma = 0;
}
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 564974ce5793..96fb9870834a 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -101,6 +101,7 @@ struct mousedev_list {
unsigned char ready, buffer, bufsiz;
unsigned char imexseq, impsseq;
enum mousedev_emul mode;
+ unsigned long last_buttons;
};
#define MOUSEDEV_SEQ_LEN 6
@@ -224,7 +225,7 @@ static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_h
spin_lock_irqsave(&list->packet_lock, flags);
p = &list->packets[list->head];
- if (list->ready && p->buttons != packet->buttons) {
+ if (list->ready && p->buttons != mousedev->packet.buttons) {
unsigned int new_head = (list->head + 1) % PACKET_QUEUE_LEN;
if (new_head != list->tail) {
p = &list->packets[list->head = new_head];
@@ -249,10 +250,13 @@ static void mousedev_notify_readers(struct mousedev *mousedev, struct mousedev_h
p->dz += packet->dz;
p->buttons = mousedev->packet.buttons;
- list->ready = 1;
+ if (p->dx || p->dy || p->dz || p->buttons != list->last_buttons)
+ list->ready = 1;
spin_unlock_irqrestore(&list->packet_lock, flags);
- kill_fasync(&list->fasync, SIGIO, POLL_IN);
+
+ if (list->ready)
+ kill_fasync(&list->fasync, SIGIO, POLL_IN);
}
wake_up_interruptible(&mousedev->wait);
@@ -477,9 +481,10 @@ static void mousedev_packet(struct mousedev_list *list, signed char *ps2_data)
}
if (!p->dx && !p->dy && !p->dz) {
- if (list->tail == list->head)
+ if (list->tail == list->head) {
list->ready = 0;
- else
+ list->last_buttons = p->buttons;
+ } else
list->tail = (list->tail + 1) % PACKET_QUEUE_LEN;
}
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index e654aa5eecd4..bb9f4044c74d 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2421,7 +2421,7 @@ pmac_wakeup_devices(void)
/* Re-enable local CPU interrupts */
local_irq_enable();
- mdelay(100);
+ mdelay(10);
preempt_enable();
/* Re-enable clock spreading on some machines */
@@ -2549,7 +2549,9 @@ powerbook_sleep_Core99(void)
return ret;
}
- printk(KERN_DEBUG "HID1, before: %x\n", mfspr(SPRN_HID1));
+ /* Stop environment and ADB interrupts */
+ pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0);
+ pmu_wait_complete(&req);
/* Tell PMU what events will wake us up */
pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS,
@@ -2611,8 +2613,6 @@ powerbook_sleep_Core99(void)
pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
pmu_wait_complete(&req);
- printk(KERN_DEBUG "HID1, after: %x\n", mfspr(SPRN_HID1));
-
pmac_wakeup_devices();
return 0;
diff --git a/drivers/media/dvb/bt8xx/dst.c b/drivers/media/dvb/bt8xx/dst.c
index d047e349d706..1339912c308b 100644
--- a/drivers/media/dvb/bt8xx/dst.c
+++ b/drivers/media/dvb/bt8xx/dst.c
@@ -906,22 +906,12 @@ static int dst_tone_power_cmd(struct dst_state* state)
if (state->dst_type == DST_TYPE_IS_TERR)
return 0;
- if (state->voltage == SEC_VOLTAGE_OFF)
- paket[4] = 0;
- else
- paket[4] = 1;
-
- if (state->tone == SEC_TONE_ON)
- paket[2] = 0x02;
- else
- paket[2] = 0;
- if (state->minicmd == SEC_MINI_A)
- paket[3] = 0x02;
- else
- paket[3] = 0;
-
+ paket[4] = state->tx_tuna[4];
+ paket[2] = state->tx_tuna[2];
+ paket[3] = state->tx_tuna[3];
paket[7] = dst_check_sum (paket, 7);
dst_command(state, paket, 8);
+
return 0;
}
@@ -980,7 +970,7 @@ static int dst_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage);
static int dst_write_tuna(struct dvb_frontend* fe)
{
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
int retval;
u8 reply;
@@ -1048,10 +1038,10 @@ static int dst_write_tuna(struct dvb_frontend* fe)
static int dst_set_diseqc(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd)
{
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
u8 paket[8] = { 0x00, 0x08, 0x04, 0xe0, 0x10, 0x38, 0xf0, 0xec };
- if (state->dst_type == DST_TYPE_IS_TERR)
+ if (state->dst_type != DST_TYPE_IS_SAT)
return 0;
if (cmd->msg_len == 0 || cmd->msg_len > 4)
@@ -1064,39 +1054,32 @@ static int dst_set_diseqc(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd*
static int dst_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
{
- u8 *val;
int need_cmd;
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
state->voltage = voltage;
- if (state->dst_type == DST_TYPE_IS_TERR)
+ if (state->dst_type != DST_TYPE_IS_SAT)
return 0;
need_cmd = 0;
- val = &state->tx_tuna[0];
- val[8] &= ~0x40;
switch (voltage) {
- case SEC_VOLTAGE_13:
- if ((state->diseq_flags & HAS_POWER) == 0)
- need_cmd = 1;
- state->diseq_flags |= HAS_POWER;
- break;
+ case SEC_VOLTAGE_13:
+ case SEC_VOLTAGE_18:
+ if ((state->diseq_flags & HAS_POWER) == 0)
+ need_cmd = 1;
+ state->diseq_flags |= HAS_POWER;
+ state->tx_tuna[4] = 0x01;
+ break;
- case SEC_VOLTAGE_18:
- if ((state->diseq_flags & HAS_POWER) == 0)
+ case SEC_VOLTAGE_OFF:
need_cmd = 1;
- state->diseq_flags |= HAS_POWER;
- val[8] |= 0x40;
- break;
-
- case SEC_VOLTAGE_OFF:
- need_cmd = 1;
- state->diseq_flags &= ~(HAS_POWER | HAS_LOCK | ATTEMPT_TUNE);
- break;
+ state->diseq_flags &= ~(HAS_POWER | HAS_LOCK | ATTEMPT_TUNE);
+ state->tx_tuna[4] = 0x00;
+ break;
- default:
- return -EINVAL;
+ default:
+ return -EINVAL;
}
if (need_cmd)
dst_tone_power_cmd(state);
@@ -1106,37 +1089,56 @@ static int dst_set_voltage(struct dvb_frontend* fe, fe_sec_voltage_t voltage)
static int dst_set_tone(struct dvb_frontend* fe, fe_sec_tone_mode_t tone)
{
- u8 *val;
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
state->tone = tone;
- if (state->dst_type == DST_TYPE_IS_TERR)
+ if (state->dst_type != DST_TYPE_IS_SAT)
return 0;
- val = &state->tx_tuna[0];
+ switch (tone) {
+ case SEC_TONE_OFF:
+ state->tx_tuna[2] = 0xff;
+ break;
- val[8] &= ~0x1;
+ case SEC_TONE_ON:
+ state->tx_tuna[2] = 0x02;
+ break;
- switch (tone) {
- case SEC_TONE_OFF:
- break;
+ default:
+ return -EINVAL;
+ }
+ dst_tone_power_cmd(state);
- case SEC_TONE_ON:
- val[8] |= 1;
- break;
+ return 0;
+}
- default:
- return -EINVAL;
+static int dst_send_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t minicmd)
+{
+ struct dst_state *state = fe->demodulator_priv;
+
+ if (state->dst_type != DST_TYPE_IS_SAT)
+ return 0;
+
+ state->minicmd = minicmd;
+
+ switch (minicmd) {
+ case SEC_MINI_A:
+ state->tx_tuna[3] = 0x02;
+ break;
+ case SEC_MINI_B:
+ state->tx_tuna[3] = 0xff;
+ break;
}
dst_tone_power_cmd(state);
return 0;
}
+
static int dst_init(struct dvb_frontend* fe)
{
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
static u8 ini_satci_tuna[] = { 9, 0, 3, 0xb6, 1, 0, 0x73, 0x21, 0, 0 };
static u8 ini_satfta_tuna[] = { 0, 0, 3, 0xb6, 1, 0x55, 0xbd, 0x50, 0, 0 };
static u8 ini_tvfta_tuna[] = { 0, 0, 3, 0xb6, 1, 7, 0x0, 0x0, 0, 0 };
@@ -1168,7 +1170,7 @@ static int dst_init(struct dvb_frontend* fe)
static int dst_read_status(struct dvb_frontend* fe, fe_status_t* status)
{
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
*status = 0;
if (state->diseq_flags & HAS_LOCK) {
@@ -1182,7 +1184,7 @@ static int dst_read_status(struct dvb_frontend* fe, fe_status_t* status)
static int dst_read_signal_strength(struct dvb_frontend* fe, u16* strength)
{
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
dst_get_signal(state);
*strength = state->decode_strength;
@@ -1192,7 +1194,7 @@ static int dst_read_signal_strength(struct dvb_frontend* fe, u16* strength)
static int dst_read_snr(struct dvb_frontend* fe, u16* snr)
{
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
dst_get_signal(state);
*snr = state->decode_snr;
@@ -1202,7 +1204,7 @@ static int dst_read_snr(struct dvb_frontend* fe, u16* snr)
static int dst_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
{
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
dst_set_freq(state, p->frequency);
if (verbose > 4)
@@ -1228,7 +1230,7 @@ static int dst_set_frontend(struct dvb_frontend* fe, struct dvb_frontend_paramet
static int dst_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_parameters *p)
{
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
p->frequency = state->decode_freq;
p->inversion = state->inversion;
@@ -1248,7 +1250,7 @@ static int dst_get_frontend(struct dvb_frontend* fe, struct dvb_frontend_paramet
static void dst_release(struct dvb_frontend* fe)
{
- struct dst_state* state = (struct dst_state*) fe->demodulator_priv;
+ struct dst_state* state = fe->demodulator_priv;
kfree(state);
}
@@ -1346,7 +1348,7 @@ static struct dvb_frontend_ops dst_dvbs_ops = {
.read_signal_strength = dst_read_signal_strength,
.read_snr = dst_read_snr,
- .diseqc_send_burst = dst_set_tone,
+ .diseqc_send_burst = dst_send_burst,
.diseqc_send_master_cmd = dst_set_diseqc,
.set_voltage = dst_set_voltage,
.set_tone = dst_set_tone,
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index f10dd74988c4..fc9b5cd957aa 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -133,6 +133,8 @@
/* number of ETHTOOL_GSTATS u64's */
#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
+#define TG3_NUM_TEST 6
+
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -316,6 +318,17 @@ static struct {
{ "nic_tx_threshold_hit" }
};
+static struct {
+ const char string[ETH_GSTRING_LEN];
+} ethtool_test_keys[TG3_NUM_TEST] = {
+ { "nvram test (online) " },
+ { "link test (online) " },
+ { "register test (offline)" },
+ { "memory test (offline)" },
+ { "loopback test (offline)" },
+ { "interrupt test (offline)" },
+};
+
static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
{
if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
@@ -3070,7 +3083,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
}
static int tg3_init_hw(struct tg3 *);
-static int tg3_halt(struct tg3 *, int);
+static int tg3_halt(struct tg3 *, int, int);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tg3_poll_controller(struct net_device *dev)
@@ -3094,7 +3107,7 @@ static void tg3_reset_task(void *_data)
restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
- tg3_halt(tp, 0);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
tg3_init_hw(tp);
tg3_netif_start(tp);
@@ -3440,7 +3453,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock);
- tg3_halt(tp, 1);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_set_mtu(dev, tp, new_mtu);
@@ -4131,19 +4144,19 @@ static void tg3_stop_fw(struct tg3 *tp)
}
/* tp->lock is held. */
-static int tg3_halt(struct tg3 *tp, int silent)
+static int tg3_halt(struct tg3 *tp, int kind, int silent)
{
int err;
tg3_stop_fw(tp);
- tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
+ tg3_write_sig_pre_reset(tp, kind);
tg3_abort_hw(tp, silent);
err = tg3_chip_reset(tp);
- tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
- tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+ tg3_write_sig_legacy(tp, kind);
+ tg3_write_sig_post_reset(tp, kind);
if (err)
return err;
@@ -4357,7 +4370,12 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
*/
tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
+ /* It is possible that bootcode is still loading at this point.
+ * Get the nvram lock first before halting the cpu.
+ */
+ tg3_nvram_lock(tp);
err = tg3_halt_cpu(tp, cpu_base);
+ tg3_nvram_unlock(tp);
if (err)
goto out;
@@ -5881,6 +5899,9 @@ static int tg3_test_interrupt(struct tg3 *tp)
int err, i;
u32 int_mbox = 0;
+ if (!netif_running(dev))
+ return -ENODEV;
+
tg3_disable_ints(tp);
free_irq(tp->pdev->irq, dev);
@@ -5984,7 +6005,7 @@ static int tg3_test_msi(struct tg3 *tp)
spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock);
- tg3_halt(tp, 1);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
err = tg3_init_hw(tp);
spin_unlock(&tp->tx_lock);
@@ -6060,7 +6081,7 @@ static int tg3_open(struct net_device *dev)
err = tg3_init_hw(tp);
if (err) {
- tg3_halt(tp, 1);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
} else {
if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
@@ -6104,7 +6125,7 @@ static int tg3_open(struct net_device *dev)
pci_disable_msi(tp->pdev);
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
}
- tg3_halt(tp, 1);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
tg3_free_consistent(tp);
@@ -6377,7 +6398,7 @@ static int tg3_close(struct net_device *dev)
tg3_disable_ints(tp);
- tg3_halt(tp, 1);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
tp->tg3_flags &=
~(TG3_FLAG_INIT_COMPLETE |
@@ -7097,7 +7118,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
tp->tx_pending = ering->tx_pending;
if (netif_running(dev)) {
- tg3_halt(tp, 1);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_init_hw(tp);
tg3_netif_start(tp);
}
@@ -7140,7 +7161,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
if (netif_running(dev)) {
- tg3_halt(tp, 1);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_init_hw(tp);
tg3_netif_start(tp);
}
@@ -7199,12 +7220,20 @@ static int tg3_get_stats_count (struct net_device *dev)
return TG3_NUM_STATS;
}
+static int tg3_get_test_count (struct net_device *dev)
+{
+ return TG3_NUM_TEST;
+}
+
static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
break;
+ case ETH_SS_TEST:
+ memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
+ break;
default:
WARN_ON(1); /* we need a WARN() */
break;
@@ -7218,6 +7247,516 @@ static void tg3_get_ethtool_stats (struct net_device *dev,
memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
}
+#define NVRAM_TEST_SIZE 0x100
+
+static int tg3_test_nvram(struct tg3 *tp)
+{
+ u32 *buf, csum;
+ int i, j, err = 0;
+
+ buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) {
+ u32 val;
+
+ if ((err = tg3_nvram_read(tp, i, &val)) != 0)
+ break;
+ buf[j] = cpu_to_le32(val);
+ }
+ if (i < NVRAM_TEST_SIZE)
+ goto out;
+
+ err = -EIO;
+ if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC)
+ goto out;
+
+ /* Bootstrap checksum at offset 0x10 */
+ csum = calc_crc((unsigned char *) buf, 0x10);
+ if(csum != cpu_to_le32(buf[0x10/4]))
+ goto out;
+
+ /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
+ csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
+ if (csum != cpu_to_le32(buf[0xfc/4]))
+ goto out;
+
+ err = 0;
+
+out:
+ kfree(buf);
+ return err;
+}
+
+#define TG3_SERDES_TIMEOUT_SEC 2
+#define TG3_COPPER_TIMEOUT_SEC 6
+
+static int tg3_test_link(struct tg3 *tp)
+{
+ int i, max;
+
+ if (!netif_running(tp->dev))
+ return -ENODEV;
+
+ if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
+ max = TG3_SERDES_TIMEOUT_SEC;
+ else
+ max = TG3_COPPER_TIMEOUT_SEC;
+
+ for (i = 0; i < max; i++) {
+ if (netif_carrier_ok(tp->dev))
+ return 0;
+
+ if (msleep_interruptible(1000))
+ break;
+ }
+
+ return -EIO;
+}
+
+/* Only test the commonly used registers */
+static int tg3_test_registers(struct tg3 *tp)
+{
+ int i, is_5705;
+ u32 offset, read_mask, write_mask, val, save_val, read_val;
+ static struct {
+ u16 offset;
+ u16 flags;
+#define TG3_FL_5705 0x1
+#define TG3_FL_NOT_5705 0x2
+#define TG3_FL_NOT_5788 0x4
+ u32 read_mask;
+ u32 write_mask;
+ } reg_tbl[] = {
+ /* MAC Control Registers */
+ { MAC_MODE, TG3_FL_NOT_5705,
+ 0x00000000, 0x00ef6f8c },
+ { MAC_MODE, TG3_FL_5705,
+ 0x00000000, 0x01ef6b8c },
+ { MAC_STATUS, TG3_FL_NOT_5705,
+ 0x03800107, 0x00000000 },
+ { MAC_STATUS, TG3_FL_5705,
+ 0x03800100, 0x00000000 },
+ { MAC_ADDR_0_HIGH, 0x0000,
+ 0x00000000, 0x0000ffff },
+ { MAC_ADDR_0_LOW, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_RX_MTU_SIZE, 0x0000,
+ 0x00000000, 0x0000ffff },
+ { MAC_TX_MODE, 0x0000,
+ 0x00000000, 0x00000070 },
+ { MAC_TX_LENGTHS, 0x0000,
+ 0x00000000, 0x00003fff },
+ { MAC_RX_MODE, TG3_FL_NOT_5705,
+ 0x00000000, 0x000007fc },
+ { MAC_RX_MODE, TG3_FL_5705,
+ 0x00000000, 0x000007dc },
+ { MAC_HASH_REG_0, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_HASH_REG_1, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_HASH_REG_2, 0x0000,
+ 0x00000000, 0xffffffff },
+ { MAC_HASH_REG_3, 0x0000,
+ 0x00000000, 0xffffffff },
+
+ /* Receive Data and Receive BD Initiator Control Registers. */
+ { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
+ 0x00000000, 0x00000003 },
+ { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_STD_BD+0, 0x0000,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_STD_BD+4, 0x0000,
+ 0x00000000, 0xffffffff },
+ { RCVDBDI_STD_BD+8, 0x0000,
+ 0x00000000, 0xffff0002 },
+ { RCVDBDI_STD_BD+0xc, 0x0000,
+ 0x00000000, 0xffffffff },
+
+ /* Receive BD Initiator Control Registers. */
+ { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { RCVBDI_STD_THRESH, TG3_FL_5705,
+ 0x00000000, 0x000003ff },
+ { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+
+ /* Host Coalescing Control Registers. */
+ { HOSTCC_MODE, TG3_FL_NOT_5705,
+ 0x00000000, 0x00000004 },
+ { HOSTCC_MODE, TG3_FL_5705,
+ 0x00000000, 0x000000f6 },
+ { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
+ 0x00000000, 0x000003ff },
+ { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
+ 0x00000000, 0x000003ff },
+ { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+ 0x00000000, 0x000000ff },
+ { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
+ 0x00000000, 0xffffffff },
+ { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
+ 0xffffffff, 0x00000000 },
+ { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
+ 0xffffffff, 0x00000000 },
+
+ /* Buffer Manager Control Registers. */
+ { BUFMGR_MB_POOL_ADDR, 0x0000,
+ 0x00000000, 0x007fff80 },
+ { BUFMGR_MB_POOL_SIZE, 0x0000,
+ 0x00000000, 0x007fffff },
+ { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
+ 0x00000000, 0x0000003f },
+ { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
+ 0x00000000, 0x000001ff },
+ { BUFMGR_MB_HIGH_WATER, 0x0000,
+ 0x00000000, 0x000001ff },
+ { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
+ 0xffffffff, 0x00000000 },
+ { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
+ 0xffffffff, 0x00000000 },
+
+ /* Mailbox Registers */
+ { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
+ 0x00000000, 0x000001ff },
+ { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
+ 0x00000000, 0x000001ff },
+ { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
+ 0x00000000, 0x000007ff },
+ { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
+ 0x00000000, 0x000001ff },
+
+ { 0xffff, 0x0000, 0x00000000, 0x00000000 },
+ };
+
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ is_5705 = 1;
+ else
+ is_5705 = 0;
+
+ for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+ if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
+ continue;
+
+ if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
+ continue;
+
+ if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
+ (reg_tbl[i].flags & TG3_FL_NOT_5788))
+ continue;
+
+ offset = (u32) reg_tbl[i].offset;
+ read_mask = reg_tbl[i].read_mask;
+ write_mask = reg_tbl[i].write_mask;
+
+ /* Save the original register content */
+ save_val = tr32(offset);
+
+ /* Determine the read-only value. */
+ read_val = save_val & read_mask;
+
+ /* Write zero to the register, then make sure the read-only bits
+ * are not changed and the read/write bits are all zeros.
+ */
+ tw32(offset, 0);
+
+ val = tr32(offset);
+
+ /* Test the read-only and read/write bits. */
+ if (((val & read_mask) != read_val) || (val & write_mask))
+ goto out;
+
+ /* Write ones to all the bits defined by RdMask and WrMask, then
+ * make sure the read-only bits are not changed and the
+ * read/write bits are all ones.
+ */
+ tw32(offset, read_mask | write_mask);
+
+ val = tr32(offset);
+
+ /* Test the read-only bits. */
+ if ((val & read_mask) != read_val)
+ goto out;
+
+ /* Test the read/write bits. */
+ if ((val & write_mask) != write_mask)
+ goto out;
+
+ tw32(offset, save_val);
+ }
+
+ return 0;
+
+out:
+ printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
+ tw32(offset, save_val);
+ return -EIO;
+}
+
+static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
+{
+ static u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
+ int i;
+ u32 j;
+
+ for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
+ for (j = 0; j < len; j += 4) {
+ u32 val;
+
+ tg3_write_mem(tp, offset + j, test_pattern[i]);
+ tg3_read_mem(tp, offset + j, &val);
+ if (val != test_pattern[i])
+ return -EIO;
+ }
+ }
+ return 0;
+}
+
+static int tg3_test_memory(struct tg3 *tp)
+{
+ static struct mem_entry {
+ u32 offset;
+ u32 len;
+ } mem_tbl_570x[] = {
+ { 0x00000000, 0x01000},
+ { 0x00002000, 0x1c000},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_5705[] = {
+ { 0x00000100, 0x0000c},
+ { 0x00000200, 0x00008},
+ { 0x00000b50, 0x00400},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x01000},
+ { 0x00008000, 0x02000},
+ { 0x00010000, 0x0e000},
+ { 0xffffffff, 0x00000}
+ };
+ struct mem_entry *mem_tbl;
+ int err = 0;
+ int i;
+
+ if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ mem_tbl = mem_tbl_5705;
+ else
+ mem_tbl = mem_tbl_570x;
+
+ for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+ if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
+ mem_tbl[i].len)) != 0)
+ break;
+ }
+
+ return err;
+}
+
+static int tg3_test_loopback(struct tg3 *tp)
+{
+ u32 mac_mode, send_idx, rx_start_idx, rx_idx, tx_idx, opaque_key;
+ u32 desc_idx;
+ struct sk_buff *skb, *rx_skb;
+ u8 *tx_data;
+ dma_addr_t map;
+ int num_pkts, tx_len, rx_len, i, err;
+ struct tg3_rx_buffer_desc *desc;
+
+ if (!netif_running(tp->dev))
+ return -ENODEV;
+
+ err = -EIO;
+
+ tg3_abort_hw(tp, 1);
+
+ /* Clearing this flag to keep interrupts disabled */
+ tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+ tg3_reset_hw(tp);
+
+ mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
+ MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
+ MAC_MODE_PORT_MODE_GMII;
+ tw32(MAC_MODE, mac_mode);
+
+ tx_len = 1514;
+ skb = dev_alloc_skb(tx_len);
+ tx_data = skb_put(skb, tx_len);
+ memcpy(tx_data, tp->dev->dev_addr, 6);
+ memset(tx_data + 6, 0x0, 8);
+
+ tw32(MAC_RX_MTU_SIZE, tx_len + 4);
+
+ for (i = 14; i < tx_len; i++)
+ tx_data[i] = (u8) (i & 0xff);
+
+ map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+
+ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+ HOSTCC_MODE_NOW);
+
+ udelay(10);
+
+ rx_start_idx = tp->hw_status->idx[0].rx_producer;
+
+ send_idx = 0;
+ num_pkts = 0;
+
+ tg3_set_txd(tp, send_idx, map, tx_len, 0, 1);
+
+ send_idx++;
+ num_pkts++;
+
+ tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, send_idx);
+ tr32(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
+
+ udelay(10);
+
+ for (i = 0; i < 10; i++) {
+ tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+ HOSTCC_MODE_NOW);
+
+ udelay(10);
+
+ tx_idx = tp->hw_status->idx[0].tx_consumer;
+ rx_idx = tp->hw_status->idx[0].rx_producer;
+ if ((tx_idx == send_idx) &&
+ (rx_idx == (rx_start_idx + num_pkts)))
+ break;
+ }
+
+ pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
+ dev_kfree_skb(skb);
+
+ if (tx_idx != send_idx)
+ goto out;
+
+ if (rx_idx != rx_start_idx + num_pkts)
+ goto out;
+
+ desc = &tp->rx_rcb[rx_start_idx];
+ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+ if (opaque_key != RXD_OPAQUE_RING_STD)
+ goto out;
+
+ if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
+ goto out;
+
+ rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
+ if (rx_len != tx_len)
+ goto out;
+
+ rx_skb = tp->rx_std_buffers[desc_idx].skb;
+
+ map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
+ pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
+
+ for (i = 14; i < tx_len; i++) {
+ if (*(rx_skb->data + i) != (u8) (i & 0xff))
+ goto out;
+ }
+ err = 0;
+
+ /* tg3_free_rings will unmap and free the rx_skb */
+out:
+ return err;
+}
+
+static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
+ u64 *data)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
+
+ if (tg3_test_nvram(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[0] = 1;
+ }
+ if (tg3_test_link(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[1] = 1;
+ }
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (netif_running(dev))
+ tg3_netif_stop(tp);
+
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tg3_halt(tp, RESET_KIND_SUSPEND, 1);
+ tg3_nvram_lock(tp);
+ tg3_halt_cpu(tp, RX_CPU_BASE);
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ tg3_halt_cpu(tp, TX_CPU_BASE);
+ tg3_nvram_unlock(tp);
+
+ if (tg3_test_registers(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[2] = 1;
+ }
+ if (tg3_test_memory(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[3] = 1;
+ }
+ if (tg3_test_loopback(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[4] = 1;
+ }
+
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+ if (tg3_test_interrupt(tp) != 0) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ data[5] = 1;
+ }
+ spin_lock_irq(&tp->lock);
+ spin_lock(&tp->tx_lock);
+
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ if (netif_running(dev)) {
+ tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+ tg3_init_hw(tp);
+ tg3_netif_start(tp);
+ }
+ spin_unlock(&tp->tx_lock);
+ spin_unlock_irq(&tp->lock);
+ }
+}
+
static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mii_ioctl_data *data = if_mii(ifr);
@@ -7331,6 +7870,8 @@ static struct ethtool_ops tg3_ethtool_ops = {
.get_tso = ethtool_op_get_tso,
.set_tso = tg3_set_tso,
#endif
+ .self_test_count = tg3_get_test_count,
+ .self_test = tg3_self_test,
.get_strings = tg3_get_strings,
.get_stats_count = tg3_get_stats_count,
.get_ethtool_stats = tg3_get_ethtool_stats,
@@ -9478,7 +10019,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
pci_save_state(tp->pdev);
tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
- tg3_halt(tp, 1);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
}
err = tg3_test_dma(tp);
@@ -9605,7 +10146,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
spin_lock_irq(&tp->lock);
spin_lock(&tp->tx_lock);
- tg3_halt(tp, 1);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
spin_unlock(&tp->tx_lock);
spin_unlock_irq(&tp->lock);
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index da5bd33d982d..fc5263c6b102 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -32,6 +32,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/dma-mapping.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include <linux/libata.h>
@@ -289,6 +290,8 @@ static void ahci_host_stop(struct ata_host_set *host_set)
{
struct ahci_host_priv *hpriv = host_set->private_data;
kfree(hpriv);
+
+ ata_host_stop(host_set);
}
static int ahci_port_start(struct ata_port *ap)
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 550c9921691a..7c02b7dc7098 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -2488,7 +2488,7 @@ ahd_linux_dv_thread(void *data)
sprintf(current->comm, "ahd_dv_%d", ahd->unit);
#else
daemonize("ahd_dv_%d", ahd->unit);
- current->flags |= PF_FREEZE;
+ current->flags |= PF_NOFREEZE;
#endif
unlock_kernel();
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 3867f91ef8c7..54c52349adc5 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -153,6 +153,7 @@ static struct ata_port_operations piix_pata_ops = {
.port_start = ata_port_start,
.port_stop = ata_port_stop,
+ .host_stop = ata_host_stop,
};
static struct ata_port_operations piix_sata_ops = {
@@ -180,6 +181,7 @@ static struct ata_port_operations piix_sata_ops = {
.port_start = ata_port_start,
.port_stop = ata_port_stop,
+ .host_stop = ata_host_stop,
};
static struct ata_port_info piix_port_info[] = {
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 63d3f70d06e1..30a88f0e7bd6 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -3322,6 +3322,13 @@ void ata_port_stop (struct ata_port *ap)
dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
}
+void ata_host_stop (struct ata_host_set *host_set)
+{
+ if (host_set->mmio_base)
+ iounmap(host_set->mmio_base);
+}
+
+
/**
* ata_host_remove - Unregister SCSI host structure with upper layers
* @ap: Port to unregister
@@ -3878,10 +3885,6 @@ void ata_pci_remove_one (struct pci_dev *pdev)
}
free_irq(host_set->irq, host_set);
- if (host_set->ops->host_stop)
- host_set->ops->host_stop(host_set);
- if (host_set->mmio_base)
- iounmap(host_set->mmio_base);
for (i = 0; i < host_set->n_ports; i++) {
ap = host_set->ports[i];
@@ -3900,6 +3903,9 @@ void ata_pci_remove_one (struct pci_dev *pdev)
scsi_host_put(ap->host);
}
+ if (host_set->ops->host_stop)
+ host_set->ops->host_stop(host_set);
+
kfree(host_set);
pci_release_regions(pdev);
@@ -3997,6 +4003,7 @@ EXPORT_SYMBOL_GPL(ata_chk_err);
EXPORT_SYMBOL_GPL(ata_exec_command);
EXPORT_SYMBOL_GPL(ata_port_start);
EXPORT_SYMBOL_GPL(ata_port_stop);
+EXPORT_SYMBOL_GPL(ata_host_stop);
EXPORT_SYMBOL_GPL(ata_interrupt);
EXPORT_SYMBOL_GPL(ata_qc_prep);
EXPORT_SYMBOL_GPL(ata_bmdma_setup);
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 6518226b8f87..d90430bbb0de 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -26,7 +26,7 @@
#define __LIBATA_H__
#define DRV_NAME "libata"
-#define DRV_VERSION "1.10" /* must be exactly four chars */
+#define DRV_VERSION "1.11" /* must be exactly four chars */
struct ata_scsi_args {
u16 *id;
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 69009f853a49..b0403ccd8a25 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -329,6 +329,8 @@ static void nv_host_stop (struct ata_host_set *host_set)
host->host_desc->disable_hotplug(host_set);
kfree(host);
+
+ ata_host_stop(host_set);
}
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index c4e9e0298122..b18c90582e67 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -122,6 +122,7 @@ static struct ata_port_operations pdc_ata_ops = {
.scr_write = pdc_sata_scr_write,
.port_start = pdc_port_start,
.port_stop = pdc_port_stop,
+ .host_stop = ata_host_stop,
};
static struct ata_port_info pdc_port_info[] = {
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index dfd362104717..1383e8a28d72 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -536,6 +536,8 @@ static void qs_host_stop(struct ata_host_set *host_set)
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
+
+ ata_host_stop(host_set);
}
static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 2b2ff48be396..238580d244e6 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -161,6 +161,7 @@ static struct ata_port_operations sil_ops = {
.scr_write = sil_scr_write,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
+ .host_stop = ata_host_stop,
};
static struct ata_port_info sil_port_info[] = {
diff --git a/drivers/scsi/sata_sis.c b/drivers/scsi/sata_sis.c
index 5105ddd08447..e418b89c6b9d 100644
--- a/drivers/scsi/sata_sis.c
+++ b/drivers/scsi/sata_sis.c
@@ -114,6 +114,7 @@ static struct ata_port_operations sis_ops = {
.scr_write = sis_scr_write,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
+ .host_stop = ata_host_stop,
};
static struct ata_port_info sis_port_info = {
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 05075bd3a893..edef1fa969fc 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -313,6 +313,7 @@ static struct ata_port_operations k2_sata_ops = {
.scr_write = k2_sata_scr_write,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
+ .host_stop = ata_host_stop,
};
static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 70118650c461..140cea05de3f 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -245,6 +245,8 @@ static void pdc20621_host_stop(struct ata_host_set *host_set)
iounmap(dimm_mmio);
kfree(hpriv);
+
+ ata_host_stop(host_set);
}
static int pdc_port_start(struct ata_port *ap)
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index 0bff4f475f26..a71fb54eebd3 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -113,6 +113,7 @@ static struct ata_port_operations uli_ops = {
.port_start = ata_port_start,
.port_stop = ata_port_stop,
+ .host_stop = ata_host_stop,
};
static struct ata_port_info uli_port_info = {
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 3a7830667277..f43183c19a12 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -134,6 +134,7 @@ static struct ata_port_operations svia_sata_ops = {
.port_start = ata_port_start,
.port_stop = ata_port_stop,
+ .host_stop = ata_host_stop,
};
static struct ata_port_info svia_port_info = {
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 2c28f0ad73c2..c5e09dc6f3de 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -21,6 +21,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include <linux/libata.h>
@@ -230,6 +231,7 @@ static struct ata_port_operations vsc_sata_ops = {
.scr_write = vsc_sata_scr_write,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
+ .host_stop = ata_host_stop,
};
static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
diff --git a/drivers/usb/media/pwc/pwc-ctrl.c b/drivers/usb/media/pwc/pwc-ctrl.c
index 3e1e4fe20d85..53099190952c 100644
--- a/drivers/usb/media/pwc/pwc-ctrl.c
+++ b/drivers/usb/media/pwc/pwc-ctrl.c
@@ -48,8 +48,6 @@
#include "pwc-uncompress.h"
#include "pwc-kiara.h"
#include "pwc-timon.h"
-#include "pwc-dec1.h"
-#include "pwc-dec23.h"
/* Request types: video */
#define SET_LUM_CTL 0x01
diff --git a/drivers/usb/media/pwc/pwc-uncompress.c b/drivers/usb/media/pwc/pwc-uncompress.c
index c596083f06ba..bc3b1635eab0 100644
--- a/drivers/usb/media/pwc/pwc-uncompress.c
+++ b/drivers/usb/media/pwc/pwc-uncompress.c
@@ -29,8 +29,6 @@
#include "pwc.h"
#include "pwc-uncompress.h"
-#include "pwc-dec1.h"
-#include "pwc-dec23.h"
int pwc_decompress(struct pwc_device *pdev)
{
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 549e22939260..25f9a9a65c24 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -228,17 +228,17 @@ MODULE_DESCRIPTION(
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, intelfb_pci_table);
-static int accel __initdata = 1;
-static int vram __initdata = 4;
-static int hwcursor __initdata = 1;
-static int mtrr __initdata = 1;
-static int fixed __initdata = 0;
-static int noinit __initdata = 0;
-static int noregister __initdata = 0;
-static int probeonly __initdata = 0;
-static int idonly __initdata = 0;
-static int bailearly __initdata = 0;
-static char *mode __initdata = NULL;
+static int accel = 1;
+static int vram = 4;
+static int hwcursor = 1;
+static int mtrr = 1;
+static int fixed = 0;
+static int noinit = 0;
+static int noregister = 0;
+static int probeonly = 0;
+static int idonly = 0;
+static int bailearly = 0;
+static char *mode = NULL;
module_param(accel, bool, S_IRUGO);
MODULE_PARM_DESC(accel, "Enable console acceleration");