diff options
author | Tejun Heo | 2011-11-28 09:46:22 -0800 |
---|---|---|
committer | Tejun Heo | 2011-11-28 09:46:22 -0800 |
commit | d4bbf7e7759afc172e2bfbc5c416324590049cdd (patch) | |
tree | 7eab5ee5481cd3dcf1162329fec827177640018a /arch/x86/xen | |
parent | a150439c4a97db379f0ed6faa46fbbb6e7bf3cb2 (diff) | |
parent | 401d0069cb344f401bc9d264c31db55876ff78c0 (diff) |
Merge branch 'master' into x86/memblock
Conflicts & resolutions:
* arch/x86/xen/setup.c
dc91c728fd "xen: allow extra memory to be in multiple regions"
24aa07882b "memblock, x86: Replace memblock_x86_reserve/free..."
conflicted on xen_add_extra_mem() updates. The resolution is
trivial as the latter just want to replace
memblock_x86_reserve_range() with memblock_reserve().
* drivers/pci/intel-iommu.c
166e9278a3f "x86/ia64: intel-iommu: move to drivers/iommu/"
5dfe8660a3d "bootmem: Replace work_with_active_regions() with..."
conflicted as the former moved the file under drivers/iommu/.
Resolved by applying the chnages from the latter on the moved
file.
* mm/Kconfig
6661672053a "memblock: add NO_BOOTMEM config symbol"
c378ddd53f9 "memblock, x86: Make ARCH_DISCARD_MEMBLOCK a config option"
conflicted trivially. Both added config options. Just
letting both add their own options resolves the conflict.
* mm/memblock.c
d1f0ece6cdc "mm/memblock.c: small function definition fixes"
ed7b56a799c "memblock: Remove memblock_memory_can_coalesce()"
confliected. The former updates function removed by the
latter. Resolution is trivial.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/Kconfig | 11 | ||||
-rw-r--r-- | arch/x86/xen/Makefile | 4 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 36 | ||||
-rw-r--r-- | arch/x86/xen/grant-table.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 205 | ||||
-rw-r--r-- | arch/x86/xen/multicalls.c | 169 | ||||
-rw-r--r-- | arch/x86/xen/multicalls.h | 6 | ||||
-rw-r--r-- | arch/x86/xen/p2m.c | 128 | ||||
-rw-r--r-- | arch/x86/xen/platform-pci-unplug.c | 2 | ||||
-rw-r--r-- | arch/x86/xen/setup.c | 294 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 15 | ||||
-rw-r--r-- | arch/x86/xen/time.c | 21 | ||||
-rw-r--r-- | arch/x86/xen/trace.c | 62 | ||||
-rw-r--r-- | arch/x86/xen/vga.c | 67 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm_32.S | 8 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 11 |
16 files changed, 634 insertions, 407 deletions
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 5cc821cb2e09..26c731a106af 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -25,8 +25,7 @@ config XEN_PRIVILEGED_GUEST config XEN_PVHVM def_bool y - depends on XEN - depends on X86_LOCAL_APIC + depends on XEN && PCI && X86_LOCAL_APIC config XEN_MAX_DOMAIN_MEMORY int @@ -49,11 +48,3 @@ config XEN_DEBUG_FS help Enable statistics output and various tuning options in debugfs. Enabling this option may incur a significant performance overhead. - -config XEN_DEBUG - bool "Enable Xen debug checks" - depends on XEN - default n - help - Enable various WARN_ON checks in the Xen MMU code. - Enabling this option WILL incur a significant performance overhead. diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 17c565de3d64..add2c2d729ce 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -15,8 +15,10 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ grant-table.o suspend.o platform-pci-unplug.o \ p2m.o +obj-$(CONFIG_EVENT_TRACING) += trace.o + obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o - +obj-$(CONFIG_XEN_DOM0) += vga.o obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 5525163a0398..1f928659c338 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(xen_domain_type); unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; EXPORT_SYMBOL(machine_to_phys_mapping); -unsigned int machine_to_phys_order; -EXPORT_SYMBOL(machine_to_phys_order); +unsigned long machine_to_phys_nr; +EXPORT_SYMBOL(machine_to_phys_nr); struct start_info *xen_start_info; EXPORT_SYMBOL_GPL(xen_start_info); @@ -251,6 +251,7 @@ static void __init xen_init_cpuid_mask(void) ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ (1 << X86_FEATURE_ACPI)); /* disable ACPI */ ax = 1; + cx = 0; xen_cpuid(&ax, &bx, &cx, &dx); xsave_mask = @@ -341,6 +342,8 @@ static void xen_set_ldt(const void *addr, unsigned entries) struct mmuext_op *op; struct multicall_space mcs = xen_mc_entry(sizeof(*op)); + trace_xen_cpu_set_ldt(addr, entries); + op = mcs.args; op->cmd = MMUEXT_SET_LDT; op->arg1.linear_addr = (unsigned long)addr; @@ -496,6 +499,8 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]); u64 entry = *(u64 *)ptr; + trace_xen_cpu_write_ldt_entry(dt, entrynum, entry); + preempt_disable(); xen_mc_flush(); @@ -565,6 +570,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) unsigned long p = (unsigned long)&dt[entrynum]; unsigned long start, end; + trace_xen_cpu_write_idt_entry(dt, entrynum, g); + preempt_disable(); start = __this_cpu_read(idt_desc.address); @@ -619,6 +626,8 @@ static void xen_load_idt(const struct desc_ptr *desc) static DEFINE_SPINLOCK(lock); static struct trap_info traps[257]; + trace_xen_cpu_load_idt(desc); + spin_lock(&lock); __get_cpu_var(idt_desc) = *desc; @@ -637,6 +646,8 @@ static void xen_load_idt(const struct desc_ptr *desc) static void xen_write_gdt_entry(struct desc_struct *dt, int entry, const void *desc, int type) { + trace_xen_cpu_write_gdt_entry(dt, entry, desc, type); + preempt_disable(); switch (type) { @@ -665,6 +676,8 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, const void *desc, int type) { + trace_xen_cpu_write_gdt_entry(dt, entry, desc, type); + switch (type) { case DESC_LDT: case DESC_TSS: @@ -684,7 +697,9 @@ static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, static void xen_load_sp0(struct tss_struct *tss, struct thread_struct *thread) { - struct multicall_space mcs = xen_mc_entry(0); + struct multicall_space mcs; + + mcs = xen_mc_entry(0); MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); xen_mc_issue(PARAVIRT_LAZY_CPU); } @@ -937,6 +952,10 @@ static const struct pv_info xen_info __initconst = { .paravirt_enabled = 1, .shared_kernel_pmd = 0, +#ifdef CONFIG_X86_64 + .extra_user_64bit_cs = FLAT_USER_CS64, +#endif + .name = "Xen", }; @@ -1248,6 +1267,14 @@ asmlinkage void __init xen_start_kernel(void) if (pci_xen) x86_init.pci.arch_init = pci_xen_init; } else { + const struct dom0_vga_console_info *info = + (void *)((char *)xen_start_info + + xen_start_info->console.dom0.info_off); + + xen_init_vga(info, xen_start_info->console.dom0.info_size); + xen_start_info->console.domU.mfn = 0; + xen_start_info->console.domU.evtchn = 0; + /* Make sure ACS will be enabled */ pci_request_acs(); } @@ -1329,7 +1356,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, int cpu = (long)hcpu; switch (action) { case CPU_UP_PREPARE: - per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; + xen_vcpu_setup(cpu); if (xen_have_vector_callback) xen_init_lock_cpu(cpu); break; @@ -1359,7 +1386,6 @@ static void __init xen_hvm_guest_init(void) xen_hvm_smp_init(); register_cpu_notifier(&xen_hvm_cpu_notifier); xen_unplug_emulated_devices(); - have_vcpu_info_placement = 0; x86_init.irqs.intr_init = xen_init_IRQ; xen_hvm_init_time_ops(); xen_hvm_init_mmu_ops(); diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c index 49ba9b5224d1..5a40d24ba331 100644 --- a/arch/x86/xen/grant-table.c +++ b/arch/x86/xen/grant-table.c @@ -71,7 +71,7 @@ int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, if (shared == NULL) { struct vm_struct *area = - xen_alloc_vm_area(PAGE_SIZE * max_nr_gframes); + alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); BUG_ON(area == NULL); shared = area->addr; *__shared = shared; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index ad54fa10f8a2..f4bf8aa574f4 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -48,6 +48,8 @@ #include <linux/memblock.h> #include <linux/seq_file.h> +#include <trace/events/xen.h> + #include <asm/pgtable.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> @@ -194,6 +196,8 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) struct multicall_space mcs; struct mmu_update *u; + trace_xen_mmu_set_domain_pte(ptep, pteval, domid); + mcs = xen_mc_entry(sizeof(*u)); u = mcs.args; @@ -225,6 +229,24 @@ static void xen_extend_mmu_update(const struct mmu_update *update) *u = *update; } +static void xen_extend_mmuext_op(const struct mmuext_op *op) +{ + struct multicall_space mcs; + struct mmuext_op *u; + + mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); + + if (mcs.mc != NULL) { + mcs.mc->args[1]++; + } else { + mcs = __xen_mc_entry(sizeof(*u)); + MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); + } + + u = mcs.args; + *u = *op; +} + static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) { struct mmu_update u; @@ -245,6 +267,8 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) static void xen_set_pmd(pmd_t *ptr, pmd_t val) { + trace_xen_mmu_set_pmd(ptr, val); + /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { @@ -282,22 +306,30 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) return true; } -static void xen_set_pte(pte_t *ptep, pte_t pteval) +static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) { if (!xen_batched_set_pte(ptep, pteval)) native_set_pte(ptep, pteval); } +static void xen_set_pte(pte_t *ptep, pte_t pteval) +{ + trace_xen_mmu_set_pte(ptep, pteval); + __xen_set_pte(ptep, pteval); +} + static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - xen_set_pte(ptep, pteval); + trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); + __xen_set_pte(ptep, pteval); } pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { /* Just return the pte as-is. We preserve the bits on commit */ + trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); return *ptep; } @@ -306,6 +338,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, { struct mmu_update u; + trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); xen_mc_batch(); u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; @@ -462,41 +495,6 @@ static pte_t xen_make_pte(pteval_t pte) } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); -#ifdef CONFIG_XEN_DEBUG -pte_t xen_make_pte_debug(pteval_t pte) -{ - phys_addr_t addr = (pte & PTE_PFN_MASK); - phys_addr_t other_addr; - bool io_page = false; - pte_t _pte; - - if (pte & _PAGE_IOMAP) - io_page = true; - - _pte = xen_make_pte(pte); - - if (!addr) - return _pte; - - if (io_page && - (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { - other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; - WARN_ONCE(addr != other_addr, - "0x%lx is using VM_IO, but it is 0x%lx!\n", - (unsigned long)addr, (unsigned long)other_addr); - } else { - pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; - other_addr = (_pte.pte & PTE_PFN_MASK); - WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), - "0x%lx is missing VM_IO (and wasn't fixed)!\n", - (unsigned long)addr); - } - - return _pte; -} -PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); -#endif - static pgd_t xen_make_pgd(pgdval_t pgd) { pgd = pte_pfn_to_mfn(pgd); @@ -530,6 +528,8 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val) static void xen_set_pud(pud_t *ptr, pud_t val) { + trace_xen_mmu_set_pud(ptr, val); + /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { @@ -543,17 +543,20 @@ static void xen_set_pud(pud_t *ptr, pud_t val) #ifdef CONFIG_X86_PAE static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { + trace_xen_mmu_set_pte_atomic(ptep, pte); set_64bit((u64 *)ptep, native_pte_val(pte)); } static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { + trace_xen_mmu_pte_clear(mm, addr, ptep); if (!xen_batched_set_pte(ptep, native_make_pte(0))) native_pte_clear(mm, addr, ptep); } static void xen_pmd_clear(pmd_t *pmdp) { + trace_xen_mmu_pmd_clear(pmdp); set_pmd(pmdp, __pmd(0)); } #endif /* CONFIG_X86_PAE */ @@ -629,6 +632,8 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val) { pgd_t *user_ptr = xen_get_user_pgd(ptr); + trace_xen_mmu_set_pgd(ptr, user_ptr, val); + /* If page is not pinned, we can just update the entry directly */ if (!xen_page_pinned(ptr)) { @@ -788,14 +793,12 @@ static void xen_pte_unlock(void *v) static void xen_do_pin(unsigned level, unsigned long pfn) { - struct mmuext_op *op; - struct multicall_space mcs; + struct mmuext_op op; - mcs = __xen_mc_entry(sizeof(*op)); - op = mcs.args; - op->cmd = level; - op->arg1.mfn = pfn_to_mfn(pfn); - MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + op.cmd = level; + op.arg1.mfn = pfn_to_mfn(pfn); + + xen_extend_mmuext_op(&op); } static int xen_pin_page(struct mm_struct *mm, struct page *page, @@ -863,6 +866,8 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, read-only, and can be pinned. */ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) { + trace_xen_mmu_pgd_pin(mm, pgd); + xen_mc_batch(); if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { @@ -988,6 +993,8 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page, /* Release a pagetables pages back as normal RW */ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) { + trace_xen_mmu_pgd_unpin(mm, pgd); + xen_mc_batch(); xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); @@ -1196,6 +1203,8 @@ static void xen_flush_tlb(void) struct mmuext_op *op; struct multicall_space mcs; + trace_xen_mmu_flush_tlb(0); + preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); @@ -1214,6 +1223,8 @@ static void xen_flush_tlb_single(unsigned long addr) struct mmuext_op *op; struct multicall_space mcs; + trace_xen_mmu_flush_tlb_single(addr); + preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); @@ -1240,6 +1251,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, } *args; struct multicall_space mcs; + trace_xen_mmu_flush_tlb_others(cpus, mm, va); + if (cpumask_empty(cpus)) return; /* nothing to do */ @@ -1275,10 +1288,11 @@ static void set_current_cr3(void *v) static void __xen_write_cr3(bool kernel, unsigned long cr3) { - struct mmuext_op *op; - struct multicall_space mcs; + struct mmuext_op op; unsigned long mfn; + trace_xen_mmu_write_cr3(kernel, cr3); + if (cr3) mfn = pfn_to_mfn(PFN_DOWN(cr3)); else @@ -1286,13 +1300,10 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3) WARN_ON(mfn == 0 && kernel); - mcs = __xen_mc_entry(sizeof(*op)); - - op = mcs.args; - op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; - op->arg1.mfn = mfn; + op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; + op.arg1.mfn = mfn; - MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + xen_extend_mmuext_op(&op); if (kernel) { percpu_write(xen_cr3, cr3); @@ -1451,19 +1462,52 @@ static void __init xen_release_pmd_init(unsigned long pfn) make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); } +static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) +{ + struct multicall_space mcs; + struct mmuext_op *op; + + mcs = __xen_mc_entry(sizeof(*op)); + op = mcs.args; + op->cmd = cmd; + op->arg1.mfn = pfn_to_mfn(pfn); + + MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); +} + +static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) +{ + struct multicall_space mcs; + unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); + + mcs = __xen_mc_entry(0); + MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, + pfn_pte(pfn, prot), 0); +} + /* This needs to make sure the new pte page is pinned iff its being attached to a pinned pagetable. */ -static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) +static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, + unsigned level) { - struct page *page = pfn_to_page(pfn); + bool pinned = PagePinned(virt_to_page(mm->pgd)); + + trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); + + if (pinned) { + struct page *page = pfn_to_page(pfn); - if (PagePinned(virt_to_page(mm->pgd))) { SetPagePinned(page); if (!PageHighMem(page)) { - make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); + xen_mc_batch(); + + __set_pfn_prot(pfn, PAGE_KERNEL_RO); + if (level == PT_PTE && USE_SPLIT_PTLOCKS) - pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + + xen_mc_issue(PARAVIRT_LAZY_MMU); } else { /* make sure there are no stray mappings of this page */ @@ -1483,15 +1527,23 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) } /* This should never happen until we're OK to use struct page */ -static void xen_release_ptpage(unsigned long pfn, unsigned level) +static inline void xen_release_ptpage(unsigned long pfn, unsigned level) { struct page *page = pfn_to_page(pfn); + bool pinned = PagePinned(page); + + trace_xen_mmu_release_ptpage(pfn, level, pinned); - if (PagePinned(page)) { + if (pinned) { if (!PageHighMem(page)) { + xen_mc_batch(); + if (level == PT_PTE && USE_SPLIT_PTLOCKS) - pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); - make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); + __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); + + __set_pfn_prot(pfn, PAGE_KERNEL); + + xen_mc_issue(PARAVIRT_LAZY_MMU); } ClearPagePinned(page); } @@ -1626,15 +1678,17 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) void __init xen_setup_machphys_mapping(void) { struct xen_machphys_mapping mapping; - unsigned long machine_to_phys_nr_ents; if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { machine_to_phys_mapping = (unsigned long *)mapping.v_start; - machine_to_phys_nr_ents = mapping.max_mfn + 1; + machine_to_phys_nr = mapping.max_mfn + 1; } else { - machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; + machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; } - machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); +#ifdef CONFIG_X86_32 + WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) + < machine_to_phys_mapping); +#endif } #ifdef CONFIG_X86_64 @@ -1825,6 +1879,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) # endif #else case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: + case VVAR_PAGE: #endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: @@ -1865,7 +1920,8 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #ifdef CONFIG_X86_64 /* Replicate changes to map the vsyscall page into the user pagetable vsyscall mapping. */ - if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { + if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) || + idx == VVAR_PAGE) { unsigned long vaddr = __fix_to_virt(idx); set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); } @@ -1897,9 +1953,6 @@ void __init xen_ident_map_ISA(void) static void __init xen_post_allocator_init(void) { -#ifdef CONFIG_XEN_DEBUG - pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); -#endif pv_mmu_ops.set_pte = xen_set_pte; pv_mmu_ops.set_pmd = xen_set_pmd; pv_mmu_ops.set_pud = xen_set_pud; @@ -2309,17 +2362,3 @@ out: return err; } EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); - -#ifdef CONFIG_XEN_DEBUG_FS -static int p2m_dump_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, p2m_dump_show, NULL); -} - -static const struct file_operations p2m_dump_fops = { - .open = p2m_dump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; -#endif /* CONFIG_XEN_DEBUG_FS */ diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 1b2b73ff0a6e..0d82003e76ad 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c @@ -30,12 +30,13 @@ #define MC_BATCH 32 -#define MC_DEBUG 1 +#define MC_DEBUG 0 #define MC_ARGS (MC_BATCH * 16) struct mc_buffer { + unsigned mcidx, argidx, cbidx; struct multicall_entry entries[MC_BATCH]; #if MC_DEBUG struct multicall_entry debug[MC_BATCH]; @@ -46,85 +47,15 @@ struct mc_buffer { void (*fn)(void *); void *data; } callbacks[MC_BATCH]; - unsigned mcidx, argidx, cbidx; }; static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); -/* flush reasons 0- slots, 1- args, 2- callbacks */ -enum flush_reasons -{ - FL_SLOTS, - FL_ARGS, - FL_CALLBACKS, - - FL_N_REASONS -}; - -#ifdef CONFIG_XEN_DEBUG_FS -#define NHYPERCALLS 40 /* not really */ - -static struct { - unsigned histo[MC_BATCH+1]; - - unsigned issued; - unsigned arg_total; - unsigned hypercalls; - unsigned histo_hypercalls[NHYPERCALLS]; - - unsigned flush[FL_N_REASONS]; -} mc_stats; - -static u8 zero_stats; - -static inline void check_zero(void) -{ - if (unlikely(zero_stats)) { - memset(&mc_stats, 0, sizeof(mc_stats)); - zero_stats = 0; - } -} - -static void mc_add_stats(const struct mc_buffer *mc) -{ - int i; - - check_zero(); - - mc_stats.issued++; - mc_stats.hypercalls += mc->mcidx; - mc_stats.arg_total += mc->argidx; - - mc_stats.histo[mc->mcidx]++; - for(i = 0; i < mc->mcidx; i++) { - unsigned op = mc->entries[i].op; - if (op < NHYPERCALLS) - mc_stats.histo_hypercalls[op]++; - } -} - -static void mc_stats_flush(enum flush_reasons idx) -{ - check_zero(); - - mc_stats.flush[idx]++; -} - -#else /* !CONFIG_XEN_DEBUG_FS */ - -static inline void mc_add_stats(const struct mc_buffer *mc) -{ -} - -static inline void mc_stats_flush(enum flush_reasons idx) -{ -} -#endif /* CONFIG_XEN_DEBUG_FS */ - void xen_mc_flush(void) { struct mc_buffer *b = &__get_cpu_var(mc_buffer); + struct multicall_entry *mc; int ret = 0; unsigned long flags; int i; @@ -135,9 +66,26 @@ void xen_mc_flush(void) something in the middle */ local_irq_save(flags); - mc_add_stats(b); + trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); + + switch (b->mcidx) { + case 0: + /* no-op */ + BUG_ON(b->argidx != 0); + break; + + case 1: + /* Singleton multicall - bypass multicall machinery + and just do the call directly. */ + mc = &b->entries[0]; + + mc->result = privcmd_call(mc->op, + mc->args[0], mc->args[1], mc->args[2], + mc->args[3], mc->args[4]); + ret = mc->result < 0; + break; - if (b->mcidx) { + default: #if MC_DEBUG memcpy(b->debug, b->entries, b->mcidx * sizeof(struct multicall_entry)); @@ -164,11 +112,10 @@ void xen_mc_flush(void) } } #endif + } - b->mcidx = 0; - b->argidx = 0; - } else - BUG_ON(b->argidx != 0); + b->mcidx = 0; + b->argidx = 0; for (i = 0; i < b->cbidx; i++) { struct callback *cb = &b->callbacks[i]; @@ -188,18 +135,21 @@ struct multicall_space __xen_mc_entry(size_t args) struct multicall_space ret; unsigned argidx = roundup(b->argidx, sizeof(u64)); + trace_xen_mc_entry_alloc(args); + BUG_ON(preemptible()); BUG_ON(b->argidx >= MC_ARGS); - if (b->mcidx == MC_BATCH || - (argidx + args) >= MC_ARGS) { - mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); + if (unlikely(b->mcidx == MC_BATCH || + (argidx + args) >= MC_ARGS)) { + trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ? + XEN_MC_FL_BATCH : XEN_MC_FL_ARGS); xen_mc_flush(); argidx = roundup(b->argidx, sizeof(u64)); } ret.mc = &b->entries[b->mcidx]; -#ifdef MC_DEBUG +#if MC_DEBUG b->caller[b->mcidx] = __builtin_return_address(0); #endif b->mcidx++; @@ -218,20 +168,25 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) BUG_ON(preemptible()); BUG_ON(b->argidx >= MC_ARGS); - if (b->mcidx == 0) - return ret; - - if (b->entries[b->mcidx - 1].op != op) - return ret; + if (unlikely(b->mcidx == 0 || + b->entries[b->mcidx - 1].op != op)) { + trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP); + goto out; + } - if ((b->argidx + size) >= MC_ARGS) - return ret; + if (unlikely((b->argidx + size) >= MC_ARGS)) { + trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE); + goto out; + } ret.mc = &b->entries[b->mcidx - 1]; ret.args = &b->args[b->argidx]; b->argidx += size; BUG_ON(b->argidx >= MC_ARGS); + + trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK); +out: return ret; } @@ -241,43 +196,13 @@ void xen_mc_callback(void (*fn)(void *), void *data) struct callback *cb; if (b->cbidx == MC_BATCH) { - mc_stats_flush(FL_CALLBACKS); + trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK); xen_mc_flush(); } + trace_xen_mc_callback(fn, data); + cb = &b->callbacks[b->cbidx++]; cb->fn = fn; cb->data = data; } - -#ifdef CONFIG_XEN_DEBUG_FS - -static struct dentry *d_mc_debug; - -static int __init xen_mc_debugfs(void) -{ - struct dentry *d_xen = xen_init_debugfs(); - - if (d_xen == NULL) - return -ENOMEM; - - d_mc_debug = debugfs_create_dir("multicalls", d_xen); - - debugfs_create_u8("zero_stats", 0644, d_mc_debug, &zero_stats); - - debugfs_create_u32("batches", 0444, d_mc_debug, &mc_stats.issued); - debugfs_create_u32("hypercalls", 0444, d_mc_debug, &mc_stats.hypercalls); - debugfs_create_u32("arg_total", 0444, d_mc_debug, &mc_stats.arg_total); - - xen_debugfs_create_u32_array("batch_histo", 0444, d_mc_debug, - mc_stats.histo, MC_BATCH); - xen_debugfs_create_u32_array("hypercall_histo", 0444, d_mc_debug, - mc_stats.histo_hypercalls, NHYPERCALLS); - xen_debugfs_create_u32_array("flush_reasons", 0444, d_mc_debug, - mc_stats.flush, FL_N_REASONS); - - return 0; -} -fs_initcall(xen_mc_debugfs); - -#endif /* CONFIG_XEN_DEBUG_FS */ diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h index 4ec8035e3216..dee79b78a90f 100644 --- a/arch/x86/xen/multicalls.h +++ b/arch/x86/xen/multicalls.h @@ -1,6 +1,8 @@ #ifndef _XEN_MULTICALLS_H #define _XEN_MULTICALLS_H +#include <trace/events/xen.h> + #include "xen-ops.h" /* Multicalls */ @@ -20,8 +22,10 @@ DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags); static inline void xen_mc_batch(void) { unsigned long flags; + /* need to disable interrupts until this entry is complete */ local_irq_save(flags); + trace_xen_mc_batch(paravirt_get_lazy_mode()); __this_cpu_write(xen_mc_irq_flags, flags); } @@ -37,6 +41,8 @@ void xen_mc_flush(void); /* Issue a multicall if we're not in a lazy mode */ static inline void xen_mc_issue(unsigned mode) { + trace_xen_mc_issue(mode); + if ((paravirt_get_lazy_mode() & mode) == 0) xen_mc_flush(); diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 58efeb9d5440..1b267e75158d 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -161,7 +161,9 @@ #include <asm/xen/page.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> +#include <xen/grant_table.h> +#include "multicalls.h" #include "xen-ops.h" static void __init m2p_override_init(void); @@ -676,7 +678,8 @@ static unsigned long mfn_hash(unsigned long mfn) } /* Add an MFN override for a particular page */ -int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) +int m2p_add_override(unsigned long mfn, struct page *page, + struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; unsigned long pfn; @@ -692,16 +695,28 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) "m2p_add_override: pfn %lx not mapped", pfn)) return -EINVAL; } - - page->private = mfn; + WARN_ON(PagePrivate(page)); + SetPagePrivate(page); + set_page_private(page, mfn); page->index = pfn_to_mfn(pfn); if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) return -ENOMEM; - if (clear_pte && !PageHighMem(page)) - /* Just zap old mapping for now */ - pte_clear(&init_mm, address, ptep); + if (kmap_op != NULL) { + if (!PageHighMem(page)) { + struct multicall_space mcs = + xen_mc_entry(sizeof(*kmap_op)); + + MULTI_grant_table_op(mcs.mc, + GNTTABOP_map_grant_ref, kmap_op, 1); + + xen_mc_issue(PARAVIRT_LAZY_MMU); + } + /* let's use dev_bus_addr to record the old mfn instead */ + kmap_op->dev_bus_addr = page->index; + page->index = (unsigned long) kmap_op; + } spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); spin_unlock_irqrestore(&m2p_override_lock, flags); @@ -735,13 +750,56 @@ int m2p_remove_override(struct page *page, bool clear_pte) spin_lock_irqsave(&m2p_override_lock, flags); list_del(&page->lru); spin_unlock_irqrestore(&m2p_override_lock, flags); - set_phys_to_machine(pfn, page->index); + WARN_ON(!PagePrivate(page)); + ClearPagePrivate(page); - if (clear_pte && !PageHighMem(page)) - set_pte_at(&init_mm, address, ptep, - pfn_pte(pfn, PAGE_KERNEL)); - /* No tlb flush necessary because the caller already - * left the pte unmapped. */ + if (clear_pte) { + struct gnttab_map_grant_ref *map_op = + (struct gnttab_map_grant_ref *) page->index; + set_phys_to_machine(pfn, map_op->dev_bus_addr); + if (!PageHighMem(page)) { + struct multicall_space mcs; + struct gnttab_unmap_grant_ref *unmap_op; + + /* + * It might be that we queued all the m2p grant table + * hypercalls in a multicall, then m2p_remove_override + * get called before the multicall has actually been + * issued. In this case handle is going to -1 because + * it hasn't been modified yet. + */ + if (map_op->handle == -1) + xen_mc_flush(); + /* + * Now if map_op->handle is negative it means that the + * hypercall actually returned an error. + */ + if (map_op->handle == GNTST_general_error) { + printk(KERN_WARNING "m2p_remove_override: " + "pfn %lx mfn %lx, failed to modify kernel mappings", + pfn, mfn); + return -1; + } + + mcs = xen_mc_entry( + sizeof(struct gnttab_unmap_grant_ref)); + unmap_op = mcs.args; + unmap_op->host_addr = map_op->host_addr; + unmap_op->handle = map_op->handle; + unmap_op->dev_bus_addr = 0; + + MULTI_grant_table_op(mcs.mc, + GNTTABOP_unmap_grant_ref, unmap_op, 1); + + xen_mc_issue(PARAVIRT_LAZY_MMU); + + set_pte_at(&init_mm, address, ptep, + pfn_pte(pfn, PAGE_KERNEL)); + __flush_tlb_single(address); + map_op->host_addr = 0; + } + } else + set_phys_to_machine(pfn, page->index); return 0; } @@ -758,7 +816,7 @@ struct page *m2p_find_override(unsigned long mfn) spin_lock_irqsave(&m2p_override_lock, flags); list_for_each_entry(p, bucket, lru) { - if (p->private == mfn) { + if (page_private(p) == mfn) { ret = p; break; } @@ -782,17 +840,21 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) EXPORT_SYMBOL_GPL(m2p_find_override_pfn); #ifdef CONFIG_XEN_DEBUG_FS - -int p2m_dump_show(struct seq_file *m, void *v) +#include <linux/debugfs.h> +#include "debugfs.h" +static int p2m_dump_show(struct seq_file *m, void *v) { static const char * const level_name[] = { "top", "middle", - "entry", "abnormal" }; - static const char * const type_name[] = { "identity", "missing", - "pfn", "abnormal"}; + "entry", "abnormal", "error"}; #define TYPE_IDENTITY 0 #define TYPE_MISSING 1 #define TYPE_PFN 2 #define TYPE_UNKNOWN 3 + static const char * const type_name[] = { + [TYPE_IDENTITY] = "identity", + [TYPE_MISSING] = "missing", + [TYPE_PFN] = "pfn", + [TYPE_UNKNOWN] = "abnormal"}; unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0; unsigned int uninitialized_var(prev_level); unsigned int uninitialized_var(prev_type); @@ -856,4 +918,32 @@ int p2m_dump_show(struct seq_file *m, void *v) #undef TYPE_PFN #undef TYPE_UNKNOWN } -#endif + +static int p2m_dump_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, p2m_dump_show, NULL); +} + +static const struct file_operations p2m_dump_fops = { + .open = p2m_dump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *d_mmu_debug; + +static int __init xen_p2m_debugfs(void) +{ + struct dentry *d_xen = xen_init_debugfs(); + + if (d_xen == NULL) + return -ENOMEM; + + d_mmu_debug = debugfs_create_dir("mmu", d_xen); + + debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); + return 0; +} +fs_initcall(xen_p2m_debugfs); +#endif /* CONFIG_XEN_DEBUG_FS */ diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c index 25c52f94a27c..ffcf2615640b 100644 --- a/arch/x86/xen/platform-pci-unplug.c +++ b/arch/x86/xen/platform-pci-unplug.c @@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); #ifdef CONFIG_XEN_PVHVM static int xen_emul_unplug; -static int __init check_platform_magic(void) +static int check_platform_magic(void) { short magic; char protocol; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 73daaf75801a..f5e1362550e7 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <linux/pm.h> #include <linux/memblock.h> +#include <linux/cpuidle.h> #include <asm/elf.h> #include <asm/vdso.h> @@ -36,7 +37,10 @@ extern void xen_syscall_target(void); extern void xen_syscall32_target(void); /* Amount of extra memory space we add to the e820 ranges */ -phys_addr_t xen_extra_mem_start, xen_extra_mem_size; +struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; + +/* Number of pages released from the initial allocation. */ +unsigned long xen_released_pages; /* * The maximum amount of extra memory compared to the base size. The @@ -50,50 +54,47 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; */ #define EXTRA_MEM_RATIO (10) -static void __init xen_add_extra_mem(unsigned long pages) +static void __init xen_add_extra_mem(u64 start, u64 size) { unsigned long pfn; + int i; - u64 size = (u64)pages * PAGE_SIZE; - u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; - - if (!pages) - return; - - e820_add_region(extra_start, size, E820_RAM); - sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); - - memblock_reserve(extra_start, size); + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + /* Add new region. */ + if (xen_extra_mem[i].size == 0) { + xen_extra_mem[i].start = start; + xen_extra_mem[i].size = size; + break; + } + /* Append to existing region. */ + if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) { + xen_extra_mem[i].size += size; + break; + } + } + if (i == XEN_EXTRA_MEM_MAX_REGIONS) + printk(KERN_WARNING "Warning: not enough extra memory regions\n"); - xen_extra_mem_size += size; + memblock_reserve(start, size); - xen_max_p2m_pfn = PFN_DOWN(extra_start + size); + xen_max_p2m_pfn = PFN_DOWN(start + size); - for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) + for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); } -static unsigned long __init xen_release_chunk(phys_addr_t start_addr, - phys_addr_t end_addr) +static unsigned long __init xen_release_chunk(unsigned long start, + unsigned long end) { struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; - unsigned long start, end; unsigned long len = 0; unsigned long pfn; int ret; - start = PFN_UP(start_addr); - end = PFN_DOWN(end_addr); - - if (end <= start) - return 0; - - printk(KERN_INFO "xen_release_chunk: looking at area pfn %lx-%lx: ", - start, end); for(pfn = start; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -106,100 +107,104 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); - WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", - start, end, ret); + WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); if (ret == 1) { __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); len++; } } - printk(KERN_CONT "%ld pages freed\n", len); + printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n", + start, end, len); return len; } -static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, - const struct e820map *e820) +static unsigned long __init xen_set_identity_and_release( + const struct e820entry *list, size_t map_size, unsigned long nr_pages) { - phys_addr_t max_addr = PFN_PHYS(max_pfn); - phys_addr_t last_end = ISA_END_ADDRESS; + phys_addr_t start = 0; unsigned long released = 0; + unsigned long identity = 0; + const struct e820entry *entry; int i; - /* Free any unused memory above the low 1Mbyte. */ - for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { - phys_addr_t end = e820->map[i].addr; - end = min(max_addr, end); + /* + * Combine non-RAM regions and gaps until a RAM region (or the + * end of the map) is reached, then set the 1:1 map and + * release the pages (if available) in those non-RAM regions. + * + * The combined non-RAM regions are rounded to a whole number + * of pages so any partial pages are accessible via the 1:1 + * mapping. This is needed for some BIOSes that put (for + * example) the DMI tables in a reserved region that begins on + * a non-page boundary. + */ + for (i = 0, entry = list; i < map_size; i++, entry++) { + phys_addr_t end = entry->addr + entry->size; + + if (entry->type == E820_RAM || i == map_size - 1) { + unsigned long start_pfn = PFN_DOWN(start); + unsigned long end_pfn = PFN_UP(end); - if (last_end < end) - released += xen_release_chunk(last_end, end); - last_end = max(last_end, e820->map[i].addr + e820->map[i].size); + if (entry->type == E820_RAM) + end_pfn = PFN_UP(entry->addr); + + if (start_pfn < end_pfn) { + if (start_pfn < nr_pages) + released += xen_release_chunk( + start_pfn, min(end_pfn, nr_pages)); + + identity += set_phys_range_identity( + start_pfn, end_pfn); + } + start = end; + } } - if (last_end < max_addr) - released += xen_release_chunk(last_end, max_addr); + printk(KERN_INFO "Released %lu pages of unused memory\n", released); + printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); - printk(KERN_INFO "released %ld pages of unused memory\n", released); return released; } -static unsigned long __init xen_set_identity(const struct e820entry *list, - ssize_t map_size) +static unsigned long __init xen_get_max_pages(void) { - phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS; - phys_addr_t start_pci = last; - const struct e820entry *entry; - unsigned long identity = 0; - int i; - - for (i = 0, entry = list; i < map_size; i++, entry++) { - phys_addr_t start = entry->addr; - phys_addr_t end = start + entry->size; - - if (start < last) - start = last; - - if (end <= start) - continue; + unsigned long max_pages = MAX_DOMAIN_PAGES; + domid_t domid = DOMID_SELF; + int ret; - /* Skip over the 1MB region. */ - if (last > end) - continue; + ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); + if (ret > 0) + max_pages = ret; + return min(max_pages, MAX_DOMAIN_PAGES); +} - if ((entry->type == E820_RAM) || (entry->type == E820_UNUSABLE)) { - if (start > start_pci) - identity += set_phys_range_identity( - PFN_UP(start_pci), PFN_DOWN(start)); +static void xen_align_and_add_e820_region(u64 start, u64 size, int type) +{ + u64 end = start + size; - /* Without saving 'last' we would gooble RAM too - * at the end of the loop. */ - last = end; - start_pci = end; - continue; - } - start_pci = min(start, start_pci); - last = end; + /* Align RAM regions to page boundaries. */ + if (type == E820_RAM) { + start = PAGE_ALIGN(start); + end &= ~((u64)PAGE_SIZE - 1); } - if (last > start_pci) - identity += set_phys_range_identity( - PFN_UP(start_pci), PFN_DOWN(last)); - return identity; + + e820_add_region(start, end - start, type); } + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ char * __init xen_memory_setup(void) { static struct e820entry map[E820MAX] __initdata; - static struct e820entry map_raw[E820MAX] __initdata; unsigned long max_pfn = xen_start_info->nr_pages; unsigned long long mem_end; int rc; struct xen_memory_map memmap; + unsigned long max_pages; unsigned long extra_pages = 0; - unsigned long extra_limit; - unsigned long identity_pages = 0; int i; int op; @@ -225,58 +230,65 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); - memcpy(map_raw, map, sizeof(map)); - e820.nr_map = 0; - xen_extra_mem_start = mem_end; - for (i = 0; i < memmap.nr_entries; i++) { - unsigned long long end; - - /* Guard against non-page aligned E820 entries. */ - if (map[i].type == E820_RAM) - map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE; - - end = map[i].addr + map[i].size; - if (map[i].type == E820_RAM && end > mem_end) { - /* RAM off the end - may be partially included */ - u64 delta = min(map[i].size, end - mem_end); - - map[i].size -= delta; - end -= delta; - - extra_pages += PFN_DOWN(delta); - /* - * Set RAM below 4GB that is not for us to be unusable. - * This prevents "System RAM" address space from being - * used as potential resource for I/O address (happens - * when 'allocate_resource' is called). - */ - if (delta && - (xen_initial_domain() && end < 0x100000000ULL)) - e820_add_region(end, delta, E820_UNUSABLE); + /* Make sure the Xen-supplied memory map is well-ordered. */ + sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); + + max_pages = xen_get_max_pages(); + if (max_pages > max_pfn) + extra_pages += max_pages - max_pfn; + + /* + * Set P2M for all non-RAM pages and E820 gaps to be identity + * type PFNs. Any RAM pages that would be made inaccesible by + * this are first released. + */ + xen_released_pages = xen_set_identity_and_release( + map, memmap.nr_entries, max_pfn); + extra_pages += xen_released_pages; + + /* + * Clamp the amount of extra memory to a EXTRA_MEM_RATIO + * factor the base size. On non-highmem systems, the base + * size is the full initial memory allocation; on highmem it + * is limited to the max size of lowmem, so that it doesn't + * get completely filled. + * + * In principle there could be a problem in lowmem systems if + * the initial memory is also very large with respect to + * lowmem, but we won't try to deal with that here. + */ + extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), + extra_pages); + + i = 0; + while (i < memmap.nr_entries) { + u64 addr = map[i].addr; + u64 size = map[i].size; + u32 type = map[i].type; + + if (type == E820_RAM) { + if (addr < mem_end) { + size = min(size, mem_end - addr); + } else if (extra_pages) { + size = min(size, (u64)extra_pages * PAGE_SIZE); + extra_pages -= size / PAGE_SIZE; + xen_add_extra_mem(addr, size); + } else + type = E820_UNUSABLE; } - if (map[i].size > 0 && end > xen_extra_mem_start) - xen_extra_mem_start = end; + xen_align_and_add_e820_region(addr, size, type); - /* Add region if any remains */ - if (map[i].size > 0) - e820_add_region(map[i].addr, map[i].size, map[i].type); + map[i].addr += size; + map[i].size -= size; + if (map[i].size == 0) + i++; } - /* Align the balloon area so that max_low_pfn does not get set - * to be at the _end_ of the PCI gap at the far end (fee01000). - * Note that xen_extra_mem_start gets set in the loop above to be - * past the last E820 region. */ - if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32))) - xen_extra_mem_start = (1ULL<<32); /* * In domU, the ISA region is normal, usable memory, but we * reserve ISA memory anyway because too many things poke * about in there. - * - * In Dom0, the host E820 information can leave gaps in the - * ISA range, which would cause us to release those pages. To - * avoid this, we unconditionally reserve them here. */ e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RESERVED); @@ -292,36 +304,6 @@ char * __init xen_memory_setup(void) sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); - extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820); - - /* - * Clamp the amount of extra memory to a EXTRA_MEM_RATIO - * factor the base size. On non-highmem systems, the base - * size is the full initial memory allocation; on highmem it - * is limited to the max size of lowmem, so that it doesn't - * get completely filled. - * - * In principle there could be a problem in lowmem systems if - * the initial memory is also very large with respect to - * lowmem, but we won't try to deal with that here. - */ - extra_limit = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), - max_pfn + extra_pages); - - if (extra_limit >= max_pfn) - extra_pages = extra_limit - max_pfn; - else - extra_pages = 0; - - xen_add_extra_mem(extra_pages); - - /* - * Set P2M for all non-RAM pages and E820 gaps to be identity - * type PFNs. We supply it with the non-sanitized version - * of the E820. - */ - identity_pages = xen_set_identity(map_raw, memmap.nr_entries); - printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); return "Xen"; } @@ -425,7 +407,7 @@ void __init xen_arch_setup(void) #ifdef CONFIG_X86_32 boot_cpu_data.hlt_works_ok = 1; #endif - pm_idle = default_idle; + disable_cpuidle(); boot_option_idle_override = IDLE_HALT; fiddle_vdso(); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index b4533a86d7e4..041d4fe9dfe4 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -32,6 +32,7 @@ #include <xen/page.h> #include <xen/events.h> +#include <xen/hvc-console.h> #include "xen-ops.h" #include "mmu.h" @@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) unsigned cpu; unsigned int i; + if (skip_ioapic_setup) { + char *m = (max_cpus == 0) ? + "The nosmp parameter is incompatible with Xen; " \ + "use Xen dom0_max_vcpus=1 parameter" : + "The noapic parameter is incompatible with Xen"; + + xen_raw_printk(m); + panic(m); + } xen_init_lock_cpu(0); smp_store_cpu_info(0); @@ -521,10 +531,7 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) native_smp_prepare_cpus(max_cpus); WARN_ON(xen_smp_intr_init(0)); - if (!xen_have_vector_callback) - return; xen_init_lock_cpu(0); - xen_init_spinlocks(); } static int __cpuinit xen_hvm_cpu_up(unsigned int cpu) @@ -546,6 +553,8 @@ static void xen_hvm_cpu_die(unsigned int cpu) void __init xen_hvm_smp_init(void) { + if (!xen_have_vector_callback) + return; smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; smp_ops.smp_send_reschedule = xen_smp_send_reschedule; smp_ops.cpu_up = xen_hvm_cpu_up; diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5158c505bef9..0296a9522501 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -168,9 +168,10 @@ cycle_t xen_clocksource_read(void) struct pvclock_vcpu_time_info *src; cycle_t ret; - src = &get_cpu_var(xen_vcpu)->time; + preempt_disable_notrace(); + src = &__get_cpu_var(xen_vcpu)->time; ret = pvclock_clocksource_read(src); - put_cpu_var(xen_vcpu); + preempt_enable_notrace(); return ret; } @@ -200,8 +201,22 @@ static unsigned long xen_get_wallclock(void) static int xen_set_wallclock(unsigned long now) { + struct xen_platform_op op; + int rc; + /* do nothing for domU */ - return -1; + if (!xen_initial_domain()) + return -1; + + op.cmd = XENPF_settime; + op.u.settime.secs = now; + op.u.settime.nsecs = 0; + op.u.settime.system_time = xen_clocksource_read(); + + rc = HYPERVISOR_dom0_op(&op); + WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now); + + return rc; } static struct clocksource xen_clocksource __read_mostly = { diff --git a/arch/x86/xen/trace.c b/arch/x86/xen/trace.c new file mode 100644 index 000000000000..520022d1a181 --- /dev/null +++ b/arch/x86/xen/trace.c @@ -0,0 +1,62 @@ +#include <linux/ftrace.h> +#include <xen/interface/xen.h> + +#define N(x) [__HYPERVISOR_##x] = "("#x")" +static const char *xen_hypercall_names[] = { + N(set_trap_table), + N(mmu_update), + N(set_gdt), + N(stack_switch), + N(set_callbacks), + N(fpu_taskswitch), + N(sched_op_compat), + N(dom0_op), + N(set_debugreg), + N(get_debugreg), + N(update_descriptor), + N(memory_op), + N(multicall), + N(update_va_mapping), + N(set_timer_op), + N(event_channel_op_compat), + N(xen_version), + N(console_io), + N(physdev_op_compat), + N(grant_table_op), + N(vm_assist), + N(update_va_mapping_otherdomain), + N(iret), + N(vcpu_op), + N(set_segment_base), + N(mmuext_op), + N(acm_op), + N(nmi_op), + N(sched_op), + N(callback_op), + N(xenoprof_op), + N(event_channel_op), + N(physdev_op), + N(hvm_op), + +/* Architecture-specific hypercall definitions. */ + N(arch_0), + N(arch_1), + N(arch_2), + N(arch_3), + N(arch_4), + N(arch_5), + N(arch_6), + N(arch_7), +}; +#undef N + +static const char *xen_hypercall_name(unsigned op) +{ + if (op < ARRAY_SIZE(xen_hypercall_names) && xen_hypercall_names[op] != NULL) + return xen_hypercall_names[op]; + + return ""; +} + +#define CREATE_TRACE_POINTS +#include <trace/events/xen.h> diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c new file mode 100644 index 000000000000..1cd7f4d11e29 --- /dev/null +++ b/arch/x86/xen/vga.c @@ -0,0 +1,67 @@ +#include <linux/screen_info.h> +#include <linux/init.h> + +#include <asm/bootparam.h> +#include <asm/setup.h> + +#include <xen/interface/xen.h> + +#include "xen-ops.h" + +void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) +{ + struct screen_info *screen_info = &boot_params.screen_info; + + /* This is drawn from a dump from vgacon:startup in + * standard Linux. */ + screen_info->orig_video_mode = 3; + screen_info->orig_video_isVGA = 1; + screen_info->orig_video_lines = 25; + screen_info->orig_video_cols = 80; + screen_info->orig_video_ega_bx = 3; + screen_info->orig_video_points = 16; + screen_info->orig_y = screen_info->orig_video_lines - 1; + + switch (info->video_type) { + case XEN_VGATYPE_TEXT_MODE_3: + if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3) + + sizeof(info->u.text_mode_3)) + break; + screen_info->orig_video_lines = info->u.text_mode_3.rows; + screen_info->orig_video_cols = info->u.text_mode_3.columns; + screen_info->orig_x = info->u.text_mode_3.cursor_x; + screen_info->orig_y = info->u.text_mode_3.cursor_y; + screen_info->orig_video_points = + info->u.text_mode_3.font_height; + break; + + case XEN_VGATYPE_VESA_LFB: + if (size < offsetof(struct dom0_vga_console_info, + u.vesa_lfb.gbl_caps)) + break; + screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB; + screen_info->lfb_width = info->u.vesa_lfb.width; + screen_info->lfb_height = info->u.vesa_lfb.height; + screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel; + screen_info->lfb_base = info->u.vesa_lfb.lfb_base; + screen_info->lfb_size = info->u.vesa_lfb.lfb_size; + screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line; + screen_info->red_size = info->u.vesa_lfb.red_size; + screen_info->red_pos = info->u.vesa_lfb.red_pos; + screen_info->green_size = info->u.vesa_lfb.green_size; + screen_info->green_pos = info->u.vesa_lfb.green_pos; + screen_info->blue_size = info->u.vesa_lfb.blue_size; + screen_info->blue_pos = info->u.vesa_lfb.blue_pos; + screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size; + screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos; + if (size >= offsetof(struct dom0_vga_console_info, + u.vesa_lfb.gbl_caps) + + sizeof(info->u.vesa_lfb.gbl_caps)) + screen_info->capabilities = info->u.vesa_lfb.gbl_caps; + if (size >= offsetof(struct dom0_vga_console_info, + u.vesa_lfb.mode_attrs) + + sizeof(info->u.vesa_lfb.mode_attrs)) + screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs; + break; + } +} diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S index 22a2093b5862..b040b0e518ca 100644 --- a/arch/x86/xen/xen-asm_32.S +++ b/arch/x86/xen/xen-asm_32.S @@ -113,11 +113,13 @@ xen_iret_start_crit: /* * If there's something pending, mask events again so we can - * jump back into xen_hypervisor_callback + * jump back into xen_hypervisor_callback. Otherwise do not + * touch XEN_vcpu_info_mask. */ - sete XEN_vcpu_info_mask(%eax) + jne 1f + movb $1, XEN_vcpu_info_mask(%eax) - popl %eax +1: popl %eax /* * From this point on the registers are restored and the stack diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 97dfdc8757b3..b095739ccd4c 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -88,6 +88,17 @@ static inline void xen_uninit_lock_cpu(int cpu) } #endif +struct dom0_vga_console_info; + +#ifdef CONFIG_XEN_DOM0 +void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); +#else +static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, + size_t size) +{ +} +#endif + /* Declare an asm function, along with symbols needed to make it inlineable */ #define DECL_ASM(ret, name, ...) \ |