diff options
author | Linus Torvalds | 2008-01-31 00:40:09 +1100 |
---|---|---|
committer | Linus Torvalds | 2008-01-31 00:40:09 +1100 |
commit | dd430ca20c40ecccd6954a7efd13d4398f507728 (patch) | |
tree | b65089436d17b2bcc6054ede2e335a821b50007f /drivers | |
parent | 60e233172eabdd1f831bd48631b9626ce2279d9b (diff) | |
parent | afadcd788f37bfa62d92662e54a720c26c91becf (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86: (890 commits)
x86: fix nodemap_size according to nodeid bits
x86: fix overlap between pagetable with bss section
x86: add PCI IDs to k8topology_64.c
x86: fix early_ioremap pagetable ops
x86: use the same pgd_list for PAE and 64-bit
x86: defer cr3 reload when doing pud_clear()
x86: early boot debugging via FireWire (ohci1394_dma=early)
x86: don't special-case pmd allocations as much
x86: shrink some ifdefs in fault.c
x86: ignore spurious faults
x86: remove nx_enabled from fault.c
x86: unify fault_32|64.c
x86: unify fault_32|64.c with ifdefs
x86: unify fault_32|64.c by ifdef'd function bodies
x86: arch/x86/mm/init_32.c printk fixes
x86: arch/x86/mm/init_32.c cleanup
x86: arch/x86/mm/init_64.c printk fixes
x86: unify ioremap
x86: fixes some bugs about EFI memory map handling
x86: use reboot_type on EFI 32
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/Makefile | 2 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 34 | ||||
-rw-r--r-- | drivers/char/agp/ali-agp.c | 2 | ||||
-rw-r--r-- | drivers/char/agp/backend.c | 3 | ||||
-rw-r--r-- | drivers/char/agp/generic.c | 3 | ||||
-rw-r--r-- | drivers/char/agp/i460-agp.c | 2 | ||||
-rw-r--r-- | drivers/char/agp/intel-agp.c | 11 | ||||
-rw-r--r-- | drivers/char/hpet.c | 126 | ||||
-rw-r--r-- | drivers/char/rtc.c | 253 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 2 | ||||
-rw-r--r-- | drivers/firmware/dmi_scan.c | 26 | ||||
-rw-r--r-- | drivers/ieee1394/Makefile | 1 | ||||
-rw-r--r-- | drivers/ieee1394/init_ohci1394_dma.c | 285 | ||||
-rw-r--r-- | drivers/input/mouse/pc110pad.c | 7 | ||||
-rw-r--r-- | drivers/kvm/svm.c | 2 | ||||
-rw-r--r-- | drivers/kvm/vmx.c | 8 | ||||
-rw-r--r-- | drivers/lguest/x86/core.c | 4 | ||||
-rw-r--r-- | drivers/pnp/pnpbios/bioscalls.c | 5 | ||||
-rw-r--r-- | drivers/video/vermilion/vermilion.c | 15 |
19 files changed, 550 insertions, 241 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 8cb37e3557d4..d92d4d82d001 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -38,7 +38,7 @@ obj-$(CONFIG_SCSI) += scsi/ obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_FUSION) += message/ obj-$(CONFIG_FIREWIRE) += firewire/ -obj-$(CONFIG_IEEE1394) += ieee1394/ +obj-y += ieee1394/ obj-$(CONFIG_UIO) += uio/ obj-y += cdrom/ obj-y += auxdisplay/ diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 2235f4e02d26..eb1f82f79153 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -357,6 +357,26 @@ int acpi_processor_resume(struct acpi_device * device) return 0; } +#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) +static int tsc_halts_in_c(int state) +{ + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + /* + * AMD Fam10h TSC will tick in all + * C/P/S0/S1 states when this bit is set. + */ + if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) + return 0; + /*FALL THROUGH*/ + case X86_VENDOR_INTEL: + /* Several cases known where TSC halts in C2 too */ + default: + return state > ACPI_STATE_C1; + } +} +#endif + #ifndef CONFIG_CPU_IDLE static void acpi_processor_idle(void) { @@ -516,7 +536,8 @@ static void acpi_processor_idle(void) #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) /* TSC halts in C2, so notify users */ - mark_tsc_unstable("possible TSC halt in C2"); + if (tsc_halts_in_c(ACPI_STATE_C2)) + mark_tsc_unstable("possible TSC halt in C2"); #endif /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2); @@ -534,6 +555,7 @@ static void acpi_processor_idle(void) break; case ACPI_STATE_C3: + acpi_unlazy_tlb(smp_processor_id()); /* * Must be done before busmaster disable as we might * need to access HPET ! @@ -579,7 +601,8 @@ static void acpi_processor_idle(void) #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) /* TSC halts in C3, so notify users */ - mark_tsc_unstable("TSC halts in C3"); + if (tsc_halts_in_c(ACPI_STATE_C3)) + mark_tsc_unstable("TSC halts in C3"); #endif /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2); @@ -1423,6 +1446,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, return 0; } + acpi_unlazy_tlb(smp_processor_id()); /* * Must be done before busmaster disable as we might need to * access HPET ! @@ -1443,7 +1467,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) /* TSC could halt in idle, so notify users */ - mark_tsc_unstable("TSC halts in idle");; + if (tsc_halts_in_c(cx->type)) + mark_tsc_unstable("TSC halts in idle");; #endif sleep_ticks = ticks_elapsed(t1, t2); @@ -1554,7 +1579,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) /* TSC could halt in idle, so notify users */ - mark_tsc_unstable("TSC halts in idle"); + if (tsc_halts_in_c(ACPI_STATE_C3)) + mark_tsc_unstable("TSC halts in idle"); #endif sleep_ticks = ticks_elapsed(t1, t2); /* Tell the scheduler how much we idled: */ diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c index aa5ddb716ffb..1ffb381130c3 100644 --- a/drivers/char/agp/ali-agp.c +++ b/drivers/char/agp/ali-agp.c @@ -145,7 +145,6 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge) void *addr = agp_generic_alloc_page(agp_bridge); u32 temp; - global_flush_tlb(); if (!addr) return NULL; @@ -162,7 +161,6 @@ static void ali_destroy_page(void * addr, int flags) if (flags & AGP_PAGE_DESTROY_UNMAP) { global_cache_flush(); /* is this really needed? --hch */ agp_generic_destroy_page(addr, flags); - global_flush_tlb(); } else agp_generic_destroy_page(addr, flags); } diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c index 832ded20fe70..2720882e66fe 100644 --- a/drivers/char/agp/backend.c +++ b/drivers/char/agp/backend.c @@ -147,7 +147,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge) printk(KERN_ERR PFX "unable to get memory for scratch page.\n"); return -ENOMEM; } - flush_agp_mappings(); bridge->scratch_page_real = virt_to_gart(addr); bridge->scratch_page = @@ -191,7 +190,6 @@ err_out: if (bridge->driver->needs_scratch_page) { bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), AGP_PAGE_DESTROY_UNMAP); - flush_agp_mappings(); bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), AGP_PAGE_DESTROY_FREE); } @@ -219,7 +217,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge) bridge->driver->needs_scratch_page) { bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), AGP_PAGE_DESTROY_UNMAP); - flush_agp_mappings(); bridge->driver->agp_destroy_page(gart_to_virt(bridge->scratch_page_real), AGP_PAGE_DESTROY_FREE); } diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 64b2f6d7059d..1a4674ce0c71 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -197,7 +197,6 @@ void agp_free_memory(struct agp_memory *curr) for (i = 0; i < curr->page_count; i++) { curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP); } - flush_agp_mappings(); for (i = 0; i < curr->page_count; i++) { curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE); } @@ -267,8 +266,6 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, } new->bridge = bridge; - flush_agp_mappings(); - return new; } EXPORT_SYMBOL(agp_allocate_memory); diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c index e72a83e2bad5..76f581c85a7d 100644 --- a/drivers/char/agp/i460-agp.c +++ b/drivers/char/agp/i460-agp.c @@ -527,7 +527,6 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge) if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { page = agp_generic_alloc_page(agp_bridge); - global_flush_tlb(); } else /* Returning NULL would cause problems */ /* AK: really dubious code. */ @@ -539,7 +538,6 @@ static void i460_destroy_page (void *page, int flags) { if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) { agp_generic_destroy_page(page, flags); - global_flush_tlb(); } } diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 03eac1eb8e0f..189efb6ef970 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -210,13 +210,11 @@ static void *i8xx_alloc_pages(void) if (page == NULL) return NULL; - if (change_page_attr(page, 4, PAGE_KERNEL_NOCACHE) < 0) { - change_page_attr(page, 4, PAGE_KERNEL); - global_flush_tlb(); + if (set_pages_uc(page, 4) < 0) { + set_pages_wb(page, 4); __free_pages(page, 2); return NULL; } - global_flush_tlb(); get_page(page); atomic_inc(&agp_bridge->current_memory_agp); return page_address(page); @@ -230,8 +228,7 @@ static void i8xx_destroy_pages(void *addr) return; page = virt_to_page(addr); - change_page_attr(page, 4, PAGE_KERNEL); - global_flush_tlb(); + set_pages_wb(page, 4); put_page(page); __free_pages(page, 2); atomic_dec(&agp_bridge->current_memory_agp); @@ -341,7 +338,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type) switch (pg_count) { case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge); - global_flush_tlb(); break; case 4: /* kludge to get 4 physical pages for ARGB cursor */ @@ -404,7 +400,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr) else { agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), AGP_PAGE_DESTROY_UNMAP); - global_flush_tlb(); agp_bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[0]), AGP_PAGE_DESTROY_FREE); } diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 4c16778e3f84..465ad35ed38f 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -600,63 +600,6 @@ static int hpet_is_known(struct hpet_data *hdp) return 0; } -EXPORT_SYMBOL(hpet_alloc); -EXPORT_SYMBOL(hpet_register); -EXPORT_SYMBOL(hpet_unregister); -EXPORT_SYMBOL(hpet_control); - -int hpet_register(struct hpet_task *tp, int periodic) -{ - unsigned int i; - u64 mask; - struct hpet_timer __iomem *timer; - struct hpet_dev *devp; - struct hpets *hpetp; - - switch (periodic) { - case 1: - mask = Tn_PER_INT_CAP_MASK; - break; - case 0: - mask = 0; - break; - default: - return -EINVAL; - } - - tp->ht_opaque = NULL; - - spin_lock_irq(&hpet_task_lock); - spin_lock(&hpet_lock); - - for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) - for (timer = hpetp->hp_hpet->hpet_timers, i = 0; - i < hpetp->hp_ntimer; i++, timer++) { - if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK) - != mask) - continue; - - devp = &hpetp->hp_dev[i]; - - if (devp->hd_flags & HPET_OPEN || devp->hd_task) { - devp = NULL; - continue; - } - - tp->ht_opaque = devp; - devp->hd_task = tp; - break; - } - - spin_unlock(&hpet_lock); - spin_unlock_irq(&hpet_task_lock); - - if (tp->ht_opaque) - return 0; - else - return -EBUSY; -} - static inline int hpet_tpcheck(struct hpet_task *tp) { struct hpet_dev *devp; @@ -706,24 +649,6 @@ int hpet_unregister(struct hpet_task *tp) return 0; } -int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg) -{ - struct hpet_dev *devp; - int err; - - if ((err = hpet_tpcheck(tp))) - return err; - - spin_lock_irq(&hpet_lock); - devp = tp->ht_opaque; - if (devp->hd_task != tp) { - spin_unlock_irq(&hpet_lock); - return -ENXIO; - } - spin_unlock_irq(&hpet_lock); - return hpet_ioctl_common(devp, cmd, arg, 1); -} - static ctl_table hpet_table[] = { { .ctl_name = CTL_UNNUMBERED, @@ -806,14 +731,14 @@ static unsigned long hpet_calibrate(struct hpets *hpetp) int hpet_alloc(struct hpet_data *hdp) { - u64 cap, mcfg; + u64 cap, mcfg, hpet_config; struct hpet_dev *devp; - u32 i, ntimer; + u32 i, ntimer, irq; struct hpets *hpetp; size_t siz; struct hpet __iomem *hpet; static struct hpets *last = NULL; - unsigned long period; + unsigned long period, irq_bitmap; unsigned long long temp; /* @@ -840,11 +765,47 @@ int hpet_alloc(struct hpet_data *hdp) hpetp->hp_hpet_phys = hdp->hd_phys_address; hpetp->hp_ntimer = hdp->hd_nirqs; + hpet = hpetp->hp_hpet; - for (i = 0; i < hdp->hd_nirqs; i++) - hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; + /* Assign IRQs statically for legacy devices */ + hpetp->hp_dev[0].hd_hdwirq = hdp->hd_irq[0]; + hpetp->hp_dev[1].hd_hdwirq = hdp->hd_irq[1]; - hpet = hpetp->hp_hpet; + /* Assign IRQs dynamically for the others */ + for (i = 2, devp = &hpetp->hp_dev[2]; i < hdp->hd_nirqs; i++, devp++) { + struct hpet_timer __iomem *timer; + + timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; + + /* Check if there's already an IRQ assigned to the timer */ + if (hdp->hd_irq[i]) { + hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; + continue; + } + + hpet_config = readq(&timer->hpet_config); + irq_bitmap = (hpet_config & Tn_INT_ROUTE_CAP_MASK) + >> Tn_INT_ROUTE_CAP_SHIFT; + if (!irq_bitmap) + irq = 0; /* No valid IRQ Assignable */ + else { + irq = find_first_bit(&irq_bitmap, 32); + do { + hpet_config |= irq << Tn_INT_ROUTE_CNF_SHIFT; + writeq(hpet_config, &timer->hpet_config); + + /* + * Verify whether we have written a valid + * IRQ number by reading it back again + */ + hpet_config = readq(&timer->hpet_config); + if (irq == (hpet_config & Tn_INT_ROUTE_CNF_MASK) + >> Tn_INT_ROUTE_CNF_SHIFT) + break; /* Success */ + } while ((irq = (find_next_bit(&irq_bitmap, 32, irq)))); + } + hpetp->hp_dev[i].hd_hdwirq = irq; + } cap = readq(&hpet->hpet_cap); @@ -875,7 +836,8 @@ int hpet_alloc(struct hpet_data *hdp) hpetp->hp_which, hdp->hd_phys_address, hpetp->hp_ntimer > 1 ? "s" : ""); for (i = 0; i < hpetp->hp_ntimer; i++) - printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); + printk("%s %d", i > 0 ? "," : "", + hpetp->hp_dev[i].hd_hdwirq); printk("\n"); printk(KERN_INFO "hpet%u: %u %d-bit timers, %Lu Hz\n", diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 0c66b802736a..78b151c4d20f 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -1,5 +1,5 @@ /* - * Real Time Clock interface for Linux + * Real Time Clock interface for Linux * * Copyright (C) 1996 Paul Gortmaker * @@ -17,7 +17,7 @@ * has been received. If a RTC interrupt has already happened, * it will output an unsigned long and then block. The output value * contains the interrupt status in the low byte and the number of - * interrupts since the last read in the remaining high bytes. The + * interrupts since the last read in the remaining high bytes. The * /dev/rtc interface can also be used with the select(2) call. * * This program is free software; you can redistribute it and/or @@ -104,12 +104,14 @@ static int rtc_has_irq = 1; #ifndef CONFIG_HPET_EMULATE_RTC #define is_hpet_enabled() 0 -#define hpet_set_alarm_time(hrs, min, sec) 0 -#define hpet_set_periodic_freq(arg) 0 -#define hpet_mask_rtc_irq_bit(arg) 0 -#define hpet_set_rtc_irq_bit(arg) 0 -#define hpet_rtc_timer_init() do { } while (0) -#define hpet_rtc_dropped_irq() 0 +#define hpet_set_alarm_time(hrs, min, sec) 0 +#define hpet_set_periodic_freq(arg) 0 +#define hpet_mask_rtc_irq_bit(arg) 0 +#define hpet_set_rtc_irq_bit(arg) 0 +#define hpet_rtc_timer_init() do { } while (0) +#define hpet_rtc_dropped_irq() 0 +#define hpet_register_irq_handler(h) 0 +#define hpet_unregister_irq_handler(h) 0 #ifdef RTC_IRQ static irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id) { @@ -147,7 +149,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, static unsigned int rtc_poll(struct file *file, poll_table *wait); #endif -static void get_rtc_alm_time (struct rtc_time *alm_tm); +static void get_rtc_alm_time(struct rtc_time *alm_tm); #ifdef RTC_IRQ static void set_rtc_irq_bit_locked(unsigned char bit); static void mask_rtc_irq_bit_locked(unsigned char bit); @@ -185,9 +187,9 @@ static int rtc_proc_open(struct inode *inode, struct file *file); * rtc_status but before mod_timer is called, which would then reenable the * timer (but you would need to have an awful timing before you'd trip on it) */ -static unsigned long rtc_status = 0; /* bitmapped status byte. */ -static unsigned long rtc_freq = 0; /* Current periodic IRQ rate */ -static unsigned long rtc_irq_data = 0; /* our output to the world */ +static unsigned long rtc_status; /* bitmapped status byte. */ +static unsigned long rtc_freq; /* Current periodic IRQ rate */ +static unsigned long rtc_irq_data; /* our output to the world */ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */ #ifdef RTC_IRQ @@ -195,7 +197,7 @@ static unsigned long rtc_max_user_freq = 64; /* > this, need CAP_SYS_RESOURCE */ * rtc_task_lock nests inside rtc_lock. */ static DEFINE_SPINLOCK(rtc_task_lock); -static rtc_task_t *rtc_callback = NULL; +static rtc_task_t *rtc_callback; #endif /* @@ -205,7 +207,7 @@ static rtc_task_t *rtc_callback = NULL; static unsigned long epoch = 1900; /* year corresponding to 0x00 */ -static const unsigned char days_in_mo[] = +static const unsigned char days_in_mo[] = {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; /* @@ -242,7 +244,7 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id) * the last read in the remainder of rtc_irq_data. */ - spin_lock (&rtc_lock); + spin_lock(&rtc_lock); rtc_irq_data += 0x100; rtc_irq_data &= ~0xff; if (is_hpet_enabled()) { @@ -259,16 +261,16 @@ irqreturn_t rtc_interrupt(int irq, void *dev_id) if (rtc_status & RTC_TIMER_ON) mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); - spin_unlock (&rtc_lock); + spin_unlock(&rtc_lock); /* Now do the rest of the actions */ spin_lock(&rtc_task_lock); if (rtc_callback) rtc_callback->func(rtc_callback->private_data); spin_unlock(&rtc_task_lock); - wake_up_interruptible(&rtc_wait); + wake_up_interruptible(&rtc_wait); - kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); + kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); return IRQ_HANDLED; } @@ -335,7 +337,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf, DECLARE_WAITQUEUE(wait, current); unsigned long data; ssize_t retval; - + if (rtc_has_irq == 0) return -EIO; @@ -358,11 +360,11 @@ static ssize_t rtc_read(struct file *file, char __user *buf, * confusing. And no, xchg() is not the answer. */ __set_current_state(TASK_INTERRUPTIBLE); - - spin_lock_irq (&rtc_lock); + + spin_lock_irq(&rtc_lock); data = rtc_irq_data; rtc_irq_data = 0; - spin_unlock_irq (&rtc_lock); + spin_unlock_irq(&rtc_lock); if (data != 0) break; @@ -378,10 +380,13 @@ static ssize_t rtc_read(struct file *file, char __user *buf, schedule(); } while (1); - if (count == sizeof(unsigned int)) - retval = put_user(data, (unsigned int __user *)buf) ?: sizeof(int); - else - retval = put_user(data, (unsigned long __user *)buf) ?: sizeof(long); + if (count == sizeof(unsigned int)) { + retval = put_user(data, + (unsigned int __user *)buf) ?: sizeof(int); + } else { + retval = put_user(data, + (unsigned long __user *)buf) ?: sizeof(long); + } if (!retval) retval = count; out: @@ -394,7 +399,7 @@ static ssize_t rtc_read(struct file *file, char __user *buf, static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) { - struct rtc_time wtime; + struct rtc_time wtime; #ifdef RTC_IRQ if (rtc_has_irq == 0) { @@ -426,35 +431,41 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) } case RTC_PIE_OFF: /* Mask periodic int. enab. bit */ { - unsigned long flags; /* can be called from isr via rtc_control() */ - spin_lock_irqsave (&rtc_lock, flags); + /* can be called from isr via rtc_control() */ + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); mask_rtc_irq_bit_locked(RTC_PIE); if (rtc_status & RTC_TIMER_ON) { rtc_status &= ~RTC_TIMER_ON; del_timer(&rtc_irq_timer); } - spin_unlock_irqrestore (&rtc_lock, flags); + spin_unlock_irqrestore(&rtc_lock, flags); + return 0; } case RTC_PIE_ON: /* Allow periodic ints */ { - unsigned long flags; /* can be called from isr via rtc_control() */ + /* can be called from isr via rtc_control() */ + unsigned long flags; + /* * We don't really want Joe User enabling more * than 64Hz of interrupts on a multi-user machine. */ if (!kernel && (rtc_freq > rtc_max_user_freq) && - (!capable(CAP_SYS_RESOURCE))) + (!capable(CAP_SYS_RESOURCE))) return -EACCES; - spin_lock_irqsave (&rtc_lock, flags); + spin_lock_irqsave(&rtc_lock, flags); if (!(rtc_status & RTC_TIMER_ON)) { mod_timer(&rtc_irq_timer, jiffies + HZ/rtc_freq + 2*HZ/100); rtc_status |= RTC_TIMER_ON; } set_rtc_irq_bit_locked(RTC_PIE); - spin_unlock_irqrestore (&rtc_lock, flags); + spin_unlock_irqrestore(&rtc_lock, flags); + return 0; } case RTC_UIE_OFF: /* Mask ints from RTC updates. */ @@ -477,7 +488,7 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) */ memset(&wtime, 0, sizeof(struct rtc_time)); get_rtc_alm_time(&wtime); - break; + break; } case RTC_ALM_SET: /* Store a time into the alarm */ { @@ -505,16 +516,21 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) */ } if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || - RTC_ALWAYS_BCD) - { - if (sec < 60) BIN_TO_BCD(sec); - else sec = 0xff; - - if (min < 60) BIN_TO_BCD(min); - else min = 0xff; - - if (hrs < 24) BIN_TO_BCD(hrs); - else hrs = 0xff; + RTC_ALWAYS_BCD) { + if (sec < 60) + BIN_TO_BCD(sec); + else + sec = 0xff; + + if (min < 60) + BIN_TO_BCD(min); + else + min = 0xff; + + if (hrs < 24) + BIN_TO_BCD(hrs); + else + hrs = 0xff; } CMOS_WRITE(hrs, RTC_HOURS_ALARM); CMOS_WRITE(min, RTC_MINUTES_ALARM); @@ -563,11 +579,12 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) return -EINVAL; - + if ((hrs >= 24) || (min >= 60) || (sec >= 60)) return -EINVAL; - if ((yrs -= epoch) > 255) /* They are unsigned */ + yrs -= epoch; + if (yrs > 255) /* They are unsigned */ return -EINVAL; spin_lock_irq(&rtc_lock); @@ -635,9 +652,10 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) { int tmp = 0; unsigned char val; - unsigned long flags; /* can be called from isr via rtc_control() */ + /* can be called from isr via rtc_control() */ + unsigned long flags; - /* + /* * The max we can do is 8192Hz. */ if ((arg < 2) || (arg > 8192)) @@ -646,7 +664,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) * We don't really want Joe User generating more * than 64Hz of interrupts on a multi-user machine. */ - if (!kernel && (arg > rtc_max_user_freq) && (!capable(CAP_SYS_RESOURCE))) + if (!kernel && (arg > rtc_max_user_freq) && + !capable(CAP_SYS_RESOURCE)) return -EACCES; while (arg > (1<<tmp)) @@ -674,11 +693,11 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) #endif case RTC_EPOCH_READ: /* Read the epoch. */ { - return put_user (epoch, (unsigned long __user *)arg); + return put_user(epoch, (unsigned long __user *)arg); } case RTC_EPOCH_SET: /* Set the epoch. */ { - /* + /* * There were no RTC clocks before 1900. */ if (arg < 1900) @@ -693,7 +712,8 @@ static int rtc_do_ioctl(unsigned int cmd, unsigned long arg, int kernel) default: return -ENOTTY; } - return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0; + return copy_to_user((void __user *)arg, + &wtime, sizeof wtime) ? -EFAULT : 0; } static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, @@ -712,26 +732,25 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, * needed here. Or anywhere else in this driver. */ static int rtc_open(struct inode *inode, struct file *file) { - spin_lock_irq (&rtc_lock); + spin_lock_irq(&rtc_lock); - if(rtc_status & RTC_IS_OPEN) + if (rtc_status & RTC_IS_OPEN) goto out_busy; rtc_status |= RTC_IS_OPEN; rtc_irq_data = 0; - spin_unlock_irq (&rtc_lock); + spin_unlock_irq(&rtc_lock); return 0; out_busy: - spin_unlock_irq (&rtc_lock); + spin_unlock_irq(&rtc_lock); return -EBUSY; } -static int rtc_fasync (int fd, struct file *filp, int on) - +static int rtc_fasync(int fd, struct file *filp, int on) { - return fasync_helper (fd, filp, on, &rtc_async_queue); + return fasync_helper(fd, filp, on, &rtc_async_queue); } static int rtc_release(struct inode *inode, struct file *file) @@ -762,16 +781,16 @@ static int rtc_release(struct inode *inode, struct file *file) } spin_unlock_irq(&rtc_lock); - if (file->f_flags & FASYNC) { - rtc_fasync (-1, file, 0); - } + if (file->f_flags & FASYNC) + rtc_fasync(-1, file, 0); no_irq: #endif - spin_lock_irq (&rtc_lock); + spin_lock_irq(&rtc_lock); rtc_irq_data = 0; rtc_status &= ~RTC_IS_OPEN; - spin_unlock_irq (&rtc_lock); + spin_unlock_irq(&rtc_lock); + return 0; } @@ -786,9 +805,9 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait) poll_wait(file, &rtc_wait, wait); - spin_lock_irq (&rtc_lock); + spin_lock_irq(&rtc_lock); l = rtc_irq_data; - spin_unlock_irq (&rtc_lock); + spin_unlock_irq(&rtc_lock); if (l != 0) return POLLIN | POLLRDNORM; @@ -796,14 +815,6 @@ static unsigned int rtc_poll(struct file *file, poll_table *wait) } #endif -/* - * exported stuffs - */ - -EXPORT_SYMBOL(rtc_register); -EXPORT_SYMBOL(rtc_unregister); -EXPORT_SYMBOL(rtc_control); - int rtc_register(rtc_task_t *task) { #ifndef RTC_IRQ @@ -829,6 +840,7 @@ int rtc_register(rtc_task_t *task) return 0; #endif } +EXPORT_SYMBOL(rtc_register); int rtc_unregister(rtc_task_t *task) { @@ -845,7 +857,7 @@ int rtc_unregister(rtc_task_t *task) return -ENXIO; } rtc_callback = NULL; - + /* disable controls */ if (!hpet_mask_rtc_irq_bit(RTC_PIE | RTC_AIE | RTC_UIE)) { tmp = CMOS_READ(RTC_CONTROL); @@ -865,6 +877,7 @@ int rtc_unregister(rtc_task_t *task) return 0; #endif } +EXPORT_SYMBOL(rtc_unregister); int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg) { @@ -883,7 +896,7 @@ int rtc_control(rtc_task_t *task, unsigned int cmd, unsigned long arg) return rtc_do_ioctl(cmd, arg, 1); #endif } - +EXPORT_SYMBOL(rtc_control); /* * The various file operations we support. @@ -910,11 +923,11 @@ static struct miscdevice rtc_dev = { #ifdef CONFIG_PROC_FS static const struct file_operations rtc_proc_fops = { - .owner = THIS_MODULE, - .open = rtc_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, + .owner = THIS_MODULE, + .open = rtc_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, }; #endif @@ -965,7 +978,7 @@ static int __init rtc_init(void) #ifdef CONFIG_SPARC32 for_each_ebus(ebus) { for_each_ebusdev(edev, ebus) { - if(strcmp(edev->prom_node->name, "rtc") == 0) { + if (strcmp(edev->prom_node->name, "rtc") == 0) { rtc_port = edev->resource[0].start; rtc_irq = edev->irqs[0]; goto found; @@ -986,7 +999,8 @@ found: * XXX Interrupt pin #7 in Espresso is shared between RTC and * PCI Slot 2 INTA# (and some INTx# in Slot 1). */ - if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", (void *)&rtc_port)) { + if (request_irq(rtc_irq, rtc_interrupt, IRQF_SHARED, "rtc", + (void *)&rtc_port)) { rtc_has_irq = 0; printk(KERN_ERR "rtc: cannot register IRQ %d\n", rtc_irq); return -EIO; @@ -1015,16 +1029,26 @@ no_irq: #ifdef RTC_IRQ if (is_hpet_enabled()) { + int err; + rtc_int_handler_ptr = hpet_rtc_interrupt; + err = hpet_register_irq_handler(rtc_interrupt); + if (err != 0) { + printk(KERN_WARNING "hpet_register_irq_handler failed " + "in rtc_init()."); + return err; + } } else { rtc_int_handler_ptr = rtc_interrupt; } - if(request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, "rtc", NULL)) { + if (request_irq(RTC_IRQ, rtc_int_handler_ptr, IRQF_DISABLED, + "rtc", NULL)) { /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ rtc_has_irq = 0; printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); rtc_release_region(); + return -EIO; } hpet_rtc_timer_init(); @@ -1036,6 +1060,7 @@ no_irq: if (misc_register(&rtc_dev)) { #ifdef RTC_IRQ free_irq(RTC_IRQ, NULL); + hpet_unregister_irq_handler(rtc_interrupt); rtc_has_irq = 0; #endif rtc_release_region(); @@ -1052,21 +1077,21 @@ no_irq: #if defined(__alpha__) || defined(__mips__) rtc_freq = HZ; - + /* Each operating system on an Alpha uses its own epoch. Let's try to guess which one we are using now. */ - + if (rtc_is_updating() != 0) msleep(20); - + spin_lock_irq(&rtc_lock); year = CMOS_READ(RTC_YEAR); ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); - + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) BCD_TO_BIN(year); /* This should never happen... */ - + if (year < 20) { epoch = 2000; guess = "SRM (post-2000)"; @@ -1087,7 +1112,8 @@ no_irq: #endif } if (guess) - printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", guess, epoch); + printk(KERN_INFO "rtc: %s epoch (%lu) detected\n", + guess, epoch); #endif #ifdef RTC_IRQ if (rtc_has_irq == 0) @@ -1096,8 +1122,12 @@ no_irq: spin_lock_irq(&rtc_lock); rtc_freq = 1024; if (!hpet_set_periodic_freq(rtc_freq)) { - /* Initialize periodic freq. to CMOS reset default, which is 1024Hz */ - CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), RTC_FREQ_SELECT); + /* + * Initialize periodic frequency to CMOS reset default, + * which is 1024Hz + */ + CMOS_WRITE(((CMOS_READ(RTC_FREQ_SELECT) & 0xF0) | 0x06), + RTC_FREQ_SELECT); } spin_unlock_irq(&rtc_lock); no_irq2: @@ -1110,20 +1140,22 @@ no_irq2: return 0; } -static void __exit rtc_exit (void) +static void __exit rtc_exit(void) { cleanup_sysctl(); - remove_proc_entry ("driver/rtc", NULL); + remove_proc_entry("driver/rtc", NULL); misc_deregister(&rtc_dev); #ifdef CONFIG_SPARC32 if (rtc_has_irq) - free_irq (rtc_irq, &rtc_port); + free_irq(rtc_irq, &rtc_port); #else rtc_release_region(); #ifdef RTC_IRQ - if (rtc_has_irq) - free_irq (RTC_IRQ, NULL); + if (rtc_has_irq) { + free_irq(RTC_IRQ, NULL); + hpet_unregister_irq_handler(hpet_rtc_interrupt); + } #endif #endif /* CONFIG_SPARC32 */ } @@ -1133,14 +1165,14 @@ module_exit(rtc_exit); #ifdef RTC_IRQ /* - * At IRQ rates >= 4096Hz, an interrupt may get lost altogether. + * At IRQ rates >= 4096Hz, an interrupt may get lost altogether. * (usually during an IDE disk interrupt, with IRQ unmasking off) * Since the interrupt handler doesn't get called, the IRQ status * byte doesn't get read, and the RTC stops generating interrupts. * A timer is set, and will call this function if/when that happens. * To get it out of this stalled state, we just read the status. * At least a jiffy of interrupts (rtc_freq/HZ) will have been lost. - * (You *really* shouldn't be trying to use a non-realtime system + * (You *really* shouldn't be trying to use a non-realtime system * for something that requires a steady > 1KHz signal anyways.) */ @@ -1148,7 +1180,7 @@ static void rtc_dropped_irq(unsigned long data) { unsigned long freq; - spin_lock_irq (&rtc_lock); + spin_lock_irq(&rtc_lock); if (hpet_rtc_dropped_irq()) { spin_unlock_irq(&rtc_lock); @@ -1167,13 +1199,15 @@ static void rtc_dropped_irq(unsigned long data) spin_unlock_irq(&rtc_lock); - if (printk_ratelimit()) - printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", freq); + if (printk_ratelimit()) { + printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n", + freq); + } /* Now we have new data */ wake_up_interruptible(&rtc_wait); - kill_fasync (&rtc_async_queue, SIGIO, POLL_IN); + kill_fasync(&rtc_async_queue, SIGIO, POLL_IN); } #endif @@ -1277,7 +1311,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm) * can take just over 2ms. We wait 20ms. There is no need to * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. * If you need to know *exactly* when a second has started, enable - * periodic update complete interrupts, (via ioctl) and then + * periodic update complete interrupts, (via ioctl) and then * immediately read /dev/rtc which will block until you get the IRQ. * Once the read clears, read the RTC time (again via ioctl). Easy. */ @@ -1307,8 +1341,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm) ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irqrestore(&rtc_lock, flags); - if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - { + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { BCD_TO_BIN(rtc_tm->tm_sec); BCD_TO_BIN(rtc_tm->tm_min); BCD_TO_BIN(rtc_tm->tm_hour); @@ -1326,7 +1359,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm) * Account for differences between how the RTC uses the values * and how they are defined in a struct rtc_time; */ - if ((rtc_tm->tm_year += (epoch - 1900)) <= 69) + rtc_tm->tm_year += epoch - 1900; + if (rtc_tm->tm_year <= 69) rtc_tm->tm_year += 100; rtc_tm->tm_mon--; @@ -1347,8 +1381,7 @@ static void get_rtc_alm_time(struct rtc_time *alm_tm) ctrl = CMOS_READ(RTC_CONTROL); spin_unlock_irq(&rtc_lock); - if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) - { + if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { BCD_TO_BIN(alm_tm->tm_sec); BCD_TO_BIN(alm_tm->tm_min); BCD_TO_BIN(alm_tm->tm_hour); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 5efd5550f4ca..b730d6709529 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1604,7 +1604,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, memcpy(&policy->cpuinfo, &data->cpuinfo, sizeof(struct cpufreq_cpuinfo)); - if (policy->min > data->min && policy->min > policy->max) { + if (policy->min > data->max || policy->max < data->min) { ret = -EINVAL; goto error_out; } diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 5e596a7e3601..9008ed5ef4ce 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -8,6 +8,8 @@ #include <linux/slab.h> #include <asm/dmi.h> +static char dmi_empty_string[] = " "; + static char * __init dmi_string(const struct dmi_header *dm, u8 s) { const u8 *bp = ((u8 *) dm) + dm->length; @@ -21,11 +23,16 @@ static char * __init dmi_string(const struct dmi_header *dm, u8 s) } if (*bp != 0) { - str = dmi_alloc(strlen(bp) + 1); + size_t len = strlen(bp)+1; + size_t cmp_len = len > 8 ? 8 : len; + + if (!memcmp(bp, dmi_empty_string, cmp_len)) + return dmi_empty_string; + str = dmi_alloc(len); if (str != NULL) strcpy(str, bp); else - printk(KERN_ERR "dmi_string: out of memory.\n"); + printk(KERN_ERR "dmi_string: cannot allocate %Zu bytes.\n", len); } } @@ -175,12 +182,23 @@ static void __init dmi_save_devices(const struct dmi_header *dm) } } +static struct dmi_device empty_oem_string_dev = { + .name = dmi_empty_string, +}; + static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) { int i, count = *(u8 *)(dm + 1); struct dmi_device *dev; for (i = 1; i <= count; i++) { + char *devname = dmi_string(dm, i); + + if (!strcmp(devname, dmi_empty_string)) { + list_add(&empty_oem_string_dev.list, &dmi_devices); + continue; + } + dev = dmi_alloc(sizeof(*dev)); if (!dev) { printk(KERN_ERR @@ -189,7 +207,7 @@ static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) } dev->type = DMI_DEV_TYPE_OEM_STRING; - dev->name = dmi_string(dm, i); + dev->name = devname; dev->device_data = NULL; list_add(&dev->list, &dmi_devices); @@ -331,9 +349,11 @@ void __init dmi_scan_machine(void) rc = dmi_present(q); if (!rc) { dmi_available = 1; + dmi_iounmap(p, 0x10000); return; } } + dmi_iounmap(p, 0x10000); } out: printk(KERN_INFO "DMI not present or invalid.\n"); } diff --git a/drivers/ieee1394/Makefile b/drivers/ieee1394/Makefile index 489c133664d5..1f8153b57503 100644 --- a/drivers/ieee1394/Makefile +++ b/drivers/ieee1394/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_IEEE1394_SBP2) += sbp2.o obj-$(CONFIG_IEEE1394_DV1394) += dv1394.o obj-$(CONFIG_IEEE1394_ETH1394) += eth1394.o +obj-$(CONFIG_PROVIDE_OHCI1394_DMA_INIT) += init_ohci1394_dma.o diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c new file mode 100644 index 000000000000..ddaab6eb8ace --- /dev/null +++ b/drivers/ieee1394/init_ohci1394_dma.c @@ -0,0 +1,285 @@ +/* + * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers + * + * Copyright (C) 2006-2007 Bernhard Kaindl <bk@suse.de> + * + * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c + * this file has functions to: + * - scan the PCI very early on boot for all OHCI 1394-compliant controllers + * - reset and initialize them and make them join the IEEE1394 bus and + * - enable physical DMA on them to allow remote debugging + * + * All code and data is marked as __init and __initdata, respective as + * during boot, all OHCI1394 controllers may be claimed by the firewire + * stack and at this point, this code should not touch them anymore. + * + * To use physical DMA after the initialization of the firewire stack, + * be sure that the stack enables it and (re-)attach after the bus reset + * which may be caused by the firewire stack initialization. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/interrupt.h> /* for ohci1394.h */ +#include <linux/delay.h> +#include <linux/pci.h> /* for PCI defines */ +#include <linux/init_ohci1394_dma.h> +#include <asm/pci-direct.h> /* for direct PCI config space access */ +#include <asm/fixmap.h> + +#include "ieee1394_types.h" +#include "ohci1394.h" + +int __initdata init_ohci1394_dma_early; + +/* Reads a PHY register of an OHCI-1394 controller */ +static inline u8 __init get_phy_reg(struct ti_ohci *ohci, u8 addr) +{ + int i; + quadlet_t r; + + reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000); + + for (i = 0; i < OHCI_LOOP_COUNT; i++) { + if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000) + break; + mdelay(1); + } + r = reg_read(ohci, OHCI1394_PhyControl); + + return (r & 0x00ff0000) >> 16; +} + +/* Writes to a PHY register of an OHCI-1394 controller */ +static inline void __init set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data) +{ + int i; + + reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000); + + for (i = 0; i < OHCI_LOOP_COUNT; i++) { + u32 r = reg_read(ohci, OHCI1394_PhyControl); + if (!(r & 0x00004000)) + break; + mdelay(1); + } +} + +/* Resets an OHCI-1394 controller (for sane state before initialization) */ +static inline void __init init_ohci1394_soft_reset(struct ti_ohci *ohci) { + int i; + + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); + + for (i = 0; i < OHCI_LOOP_COUNT; i++) { + if (!(reg_read(ohci, OHCI1394_HCControlSet) + & OHCI1394_HCControl_softReset)) + break; + mdelay(1); + } +} + +/* Basic OHCI-1394 register and port inititalization */ +static inline void __init init_ohci1394_initialize(struct ti_ohci *ohci) +{ + quadlet_t bus_options; + int num_ports, i; + + /* Put some defaults to these undefined bus options */ + bus_options = reg_read(ohci, OHCI1394_BusOptions); + bus_options |= 0x60000000; /* Enable CMC and ISC */ + bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */ + bus_options &= ~0x18000000; /* Disable PMC and BMC */ + reg_write(ohci, OHCI1394_BusOptions, bus_options); + + /* Set the bus number */ + reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0); + + /* Enable posted writes */ + reg_write(ohci, OHCI1394_HCControlSet, + OHCI1394_HCControl_postedWriteEnable); + + /* Clear link control register */ + reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff); + + /* enable phys */ + reg_write(ohci, OHCI1394_LinkControlSet, + OHCI1394_LinkControl_RcvPhyPkt); + + /* Don't accept phy packets into AR request context */ + reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400); + + /* Clear the Isochonouys interrupt masks */ + reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff); + reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff); + reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff); + reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff); + + /* Accept asyncronous transfer requests from all nodes for now */ + reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000); + + /* Specify asyncronous transfer retries */ + reg_write(ohci, OHCI1394_ATRetries, + OHCI1394_MAX_AT_REQ_RETRIES | + (OHCI1394_MAX_AT_RESP_RETRIES<<4) | + (OHCI1394_MAX_PHYS_RESP_RETRIES<<8)); + + /* We don't want hardware swapping */ + reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap); + + /* Enable link */ + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable); + + /* If anything is connected to a port, make sure it is enabled */ + num_ports = get_phy_reg(ohci, 2) & 0xf; + for (i = 0; i < num_ports; i++) { + unsigned int status; + + set_phy_reg(ohci, 7, i); + status = get_phy_reg(ohci, 8); + + if (status & 0x20) + set_phy_reg(ohci, 8, status & ~1); + } +} + +/** + * init_ohci1394_wait_for_busresets - wait until bus resets are completed + * + * OHCI1394 initialization itself and any device going on- or offline + * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec + * specifies that physical DMA is disabled on each bus reset and it + * has to be enabled after each bus reset when needed. We resort + * to polling here because on early boot, we have no interrupts. + */ +static inline void __init init_ohci1394_wait_for_busresets(struct ti_ohci *ohci) +{ + int i, events; + + for (i=0; i < 9; i++) { + mdelay(200); + events = reg_read(ohci, OHCI1394_IntEventSet); + if (events & OHCI1394_busReset) + reg_write(ohci, OHCI1394_IntEventClear, + OHCI1394_busReset); + } +} + +/** + * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging + * This enables remote DMA access over IEEE1394 from every host for the low + * 4GB of address space. DMA accesses above 4GB are not available currently. + */ +static inline void __init init_ohci1394_enable_physical_dma(struct ti_ohci *hci) +{ + reg_write(hci, OHCI1394_PhyReqFilterHiSet, 0xffffffff); + reg_write(hci, OHCI1394_PhyReqFilterLoSet, 0xffffffff); + reg_write(hci, OHCI1394_PhyUpperBound, 0xffff0000); +} + +/** + * init_ohci1394_reset_and_init_dma - init controller and enable DMA + * This initializes the given controller and enables physical DMA engine in it. + */ +static inline void __init init_ohci1394_reset_and_init_dma(struct ti_ohci *ohci) +{ + /* Start off with a soft reset, clears everything to a sane state. */ + init_ohci1394_soft_reset(ohci); + + /* Accessing some registers without LPS enabled may cause lock up */ + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS); + + /* Disable and clear interrupts */ + reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff); + reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff); + + mdelay(50); /* Wait 50msec to make sure we have full link enabled */ + + init_ohci1394_initialize(ohci); + /* + * The initialization causes at least one IEEE1394 bus reset. Enabling + * physical DMA only works *after* *all* bus resets have calmed down: + */ + init_ohci1394_wait_for_busresets(ohci); + + /* We had to wait and do this now if we want to debug early problems */ + init_ohci1394_enable_physical_dma(ohci); +} + +/** + * init_ohci1394_controller - Map the registers of the controller and init DMA + * This maps the registers of the specified controller and initializes it + */ +static inline void __init init_ohci1394_controller(int num, int slot, int func) +{ + unsigned long ohci_base; + struct ti_ohci ohci; + + printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394" + " at %02x:%02x.%x\n", num, slot, func); + + ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2)) + & PCI_BASE_ADDRESS_MEM_MASK; + + set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base); + + ohci.registers = (void *)fix_to_virt(FIX_OHCI1394_BASE); + + init_ohci1394_reset_and_init_dma(&ohci); +} + +/** + * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them + * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them + */ +void __init init_ohci1394_dma_on_all_controllers(void) +{ + int num, slot, func; + + if (!early_pci_allowed()) + return; + + /* Poor man's PCI discovery, the only thing we can do at early boot */ + for (num = 0; num < 32; num++) { + for (slot = 0; slot < 32; slot++) { + for (func = 0; func < 8; func++) { + u32 class = read_pci_config(num,slot,func, + PCI_CLASS_REVISION); + if ((class == 0xffffffff)) + continue; /* No device at this func */ + + if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI) + continue; /* Not an OHCI-1394 device */ + + init_ohci1394_controller(num, slot, func); + break; /* Assume one controller per device */ + } + } + } + printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n"); +} + +/** + * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization + */ +static int __init setup_ohci1394_dma(char *opt) +{ + if (!strcmp(opt, "early")) + init_ohci1394_dma_early = 1; + return 0; +} + +/* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */ +early_param("ohci1394_dma", setup_ohci1394_dma); diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c index 8991ab0b4fe3..61cff8374e6c 100644 --- a/drivers/input/mouse/pc110pad.c +++ b/drivers/input/mouse/pc110pad.c @@ -39,6 +39,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> +#include <linux/delay.h> #include <asm/io.h> #include <asm/irq.h> @@ -62,8 +63,10 @@ static irqreturn_t pc110pad_interrupt(int irq, void *ptr) int value = inb_p(pc110pad_io); int handshake = inb_p(pc110pad_io + 2); - outb_p(handshake | 1, pc110pad_io + 2); - outb_p(handshake & ~1, pc110pad_io + 2); + outb(handshake | 1, pc110pad_io + 2); + udelay(2); + outb(handshake & ~1, pc110pad_io + 2); + udelay(2); inb_p(0x64); pc110pad_data[pc110pad_count++] = value; diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c index 4e04e49a2f1c..ced4ac1955db 100644 --- a/drivers/kvm/svm.c +++ b/drivers/kvm/svm.c @@ -290,7 +290,7 @@ static void svm_hardware_enable(void *garbage) #ifdef CONFIG_X86_64 struct desc_ptr gdt_descr; #else - struct Xgt_desc_struct gdt_descr; + struct desc_ptr gdt_descr; #endif struct desc_struct *gdt; int me = raw_smp_processor_id(); diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c index bb56ae3f89b6..5b397b6c9f93 100644 --- a/drivers/kvm/vmx.c +++ b/drivers/kvm/vmx.c @@ -524,7 +524,7 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) { if (vcpu->rmode.active) - rflags |= IOPL_MASK | X86_EFLAGS_VM; + rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; vmcs_writel(GUEST_RFLAGS, rflags); } @@ -1050,7 +1050,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu) vmcs_write32(GUEST_TR_AR_BYTES, vcpu->rmode.tr.ar); flags = vmcs_readl(GUEST_RFLAGS); - flags &= ~(IOPL_MASK | X86_EFLAGS_VM); + flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); flags |= (vcpu->rmode.save_iopl << IOPL_SHIFT); vmcs_writel(GUEST_RFLAGS, flags); @@ -1107,9 +1107,9 @@ static void enter_rmode(struct kvm_vcpu *vcpu) vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); flags = vmcs_readl(GUEST_RFLAGS); - vcpu->rmode.save_iopl = (flags & IOPL_MASK) >> IOPL_SHIFT; + vcpu->rmode.save_iopl = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; - flags |= IOPL_MASK | X86_EFLAGS_VM; + flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; vmcs_writel(GUEST_RFLAGS, flags); vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 96d0fd07c57d..44adb00e1490 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -94,7 +94,7 @@ static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages) /* Set up the two "TSS" members which tell the CPU what stack to use * for traps which do directly into the Guest (ie. traps at privilege * level 1). */ - pages->state.guest_tss.esp1 = lg->esp1; + pages->state.guest_tss.sp1 = lg->esp1; pages->state.guest_tss.ss1 = lg->ss1; /* Copy direct-to-Guest trap entries. */ @@ -416,7 +416,7 @@ void __init lguest_arch_host_init(void) /* We know where we want the stack to be when the Guest enters * the switcher: in pages->regs. The stack grows upwards, so * we start it at the end of that structure. */ - state->guest_tss.esp0 = (long)(&pages->regs + 1); + state->guest_tss.sp0 = (long)(&pages->regs + 1); /* And this is the GDT entry to use for the stack: we keep a * couple of special LGUEST entries. */ state->guest_tss.ss0 = LGUEST_DS; diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index 5dba68fe33f5..a8364d815222 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -61,7 +61,7 @@ set_base(gdt[(selname) >> 3], (u32)(address)); \ set_limit(gdt[(selname) >> 3], size); \ } while(0) -static struct desc_struct bad_bios_desc = { 0, 0x00409200 }; +static struct desc_struct bad_bios_desc; /* * At some point we want to use this stack frame pointer to unwind @@ -477,6 +477,9 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header) pnp_bios_callpoint.offset = header->fields.pm16offset; pnp_bios_callpoint.segment = PNP_CS16; + bad_bios_desc.a = 0; + bad_bios_desc.b = 0x00409200; + set_base(bad_bios_desc, __va((unsigned long)0x40 << 4)); _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4)); for (i = 0; i < NR_CPUS; i++) { diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c index c31f549ebea0..1c656667b937 100644 --- a/drivers/video/vermilion/vermilion.c +++ b/drivers/video/vermilion/vermilion.c @@ -88,9 +88,7 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order, { gfp_t flags; unsigned long i; - pgprot_t wc_pageprot; - wc_pageprot = PAGE_KERNEL_NOCACHE; max_order++; do { /* @@ -126,14 +124,8 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order, /* * Change caching policy of the linear kernel map to avoid * mapping type conflicts with user-space mappings. - * The first global_flush_tlb() is really only there to do a global - * wbinvd(). */ - - global_flush_tlb(); - change_page_attr(virt_to_page(va->logical), va->size >> PAGE_SHIFT, - wc_pageprot); - global_flush_tlb(); + set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT); printk(KERN_DEBUG MODULE_NAME ": Allocated %ld bytes vram area at 0x%08lx\n", @@ -157,9 +149,8 @@ static void vmlfb_free_vram_area(struct vram_area *va) * Reset the linear kernel map caching policy. */ - change_page_attr(virt_to_page(va->logical), - va->size >> PAGE_SHIFT, PAGE_KERNEL); - global_flush_tlb(); + set_pages_wb(virt_to_page(va->logical), + va->size >> PAGE_SHIFT); /* * Decrease the usage count on the pages we've used |