From 74315cccd2104a953f493acca2c6b0519d6f5c6f Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Wed, 8 Jun 2011 17:29:11 -0400 Subject: iommu-api: Add missing header file If CONFIG_IOMMU_API is not defined some functions will just return -ENODEV. Add errno.h for the definition of ENODEV. Signed-off-by: Laura Abbott Signed-off-by: Joerg Roedel --- include/linux/iommu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0a2ba4098996..9940319d6f9d 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -19,6 +19,8 @@ #ifndef __LINUX_IOMMU_H #define __LINUX_IOMMU_H +#include + #define IOMMU_READ (1) #define IOMMU_WRITE (2) #define IOMMU_CACHE (4) /* DMA cache coherency */ -- cgit v1.2.3 From 166e9278a3f98bab29ebb3d685a81cfb11b98be0 Mon Sep 17 00:00:00 2001 From: Ohad Ben-Cohen Date: Fri, 10 Jun 2011 21:42:27 +0300 Subject: x86/ia64: intel-iommu: move to drivers/iommu/ This should ease finding similarities with different platforms, with the intention of solving problems once in a generic framework which everyone can use. Note: to move intel-iommu.c, the declaration of pci_find_upstream_pcie_bridge() has to move from drivers/pci/pci.h to include/linux/pci.h. This is handled in this patch, too. As suggested, also drop DMAR's EXPERIMENTAL tag while we're at it. Compile-tested on x86_64. Signed-off-by: Ohad Ben-Cohen Signed-off-by: Joerg Roedel --- arch/ia64/Kconfig | 21 - arch/x86/Kconfig | 50 - drivers/iommu/Kconfig | 49 + drivers/iommu/Makefile | 2 + drivers/iommu/dmar.c | 1461 +++++++++++++++ drivers/iommu/intel-iommu.c | 4016 +++++++++++++++++++++++++++++++++++++++ drivers/iommu/intr_remapping.c | 797 ++++++++ drivers/iommu/intr_remapping.h | 17 + drivers/iommu/iova.c | 435 +++++ drivers/pci/Makefile | 5 - drivers/pci/dmar.c | 1461 --------------- drivers/pci/intel-iommu.c | 4017 ---------------------------------------- drivers/pci/intr_remapping.c | 798 -------- drivers/pci/intr_remapping.h | 17 - drivers/pci/iova.c | 435 ----- drivers/pci/pci.h | 2 - include/linux/pci.h | 11 + 17 files changed, 6788 insertions(+), 6806 deletions(-) create mode 100644 drivers/iommu/dmar.c create mode 100644 drivers/iommu/intel-iommu.c create mode 100644 drivers/iommu/intr_remapping.c create mode 100644 drivers/iommu/intr_remapping.h create mode 100644 drivers/iommu/iova.c delete mode 100644 drivers/pci/dmar.c delete mode 100644 drivers/pci/intel-iommu.c delete mode 100644 drivers/pci/intr_remapping.c delete mode 100644 drivers/pci/intr_remapping.h delete mode 100644 drivers/pci/iova.c (limited to 'include/linux') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 9929e4e11ea0..7336ba653b8f 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -627,27 +627,6 @@ source "drivers/pci/hotplug/Kconfig" source "drivers/pcmcia/Kconfig" -config DMAR - bool "Support for DMA Remapping Devices (EXPERIMENTAL)" - depends on IA64_GENERIC && ACPI && EXPERIMENTAL - help - DMA remapping (DMAR) devices support enables independent address - translations for Direct Memory Access (DMA) from devices. - These DMA remapping devices are reported via ACPI tables - and include PCI device scope covered by these DMA - remapping devices. - -config DMAR_DEFAULT_ON - def_bool y - prompt "Enable DMA Remapping Devices by default" - depends on DMAR - help - Selecting this option will enable a DMAR device at boot time if - one is found. If this option is not selected, DMAR support can - be enabled by passing intel_iommu=on to the kernel. It is - recommended you say N here while the DMAR code remains - experimental. - endmenu endif diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1b6a2e212db4..a169573c64fc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1912,56 +1912,6 @@ config PCI_CNB20LE_QUIRK You should say N unless you know you need this. -config DMAR - bool "Support for DMA Remapping Devices (EXPERIMENTAL)" - depends on PCI_MSI && ACPI && EXPERIMENTAL - select IOMMU_API - help - DMA remapping (DMAR) devices support enables independent address - translations for Direct Memory Access (DMA) from devices. - These DMA remapping devices are reported via ACPI tables - and include PCI device scope covered by these DMA - remapping devices. - -config DMAR_DEFAULT_ON - def_bool y - prompt "Enable DMA Remapping Devices by default" - depends on DMAR - help - Selecting this option will enable a DMAR device at boot time if - one is found. If this option is not selected, DMAR support can - be enabled by passing intel_iommu=on to the kernel. It is - recommended you say N here while the DMAR code remains - experimental. - -config DMAR_BROKEN_GFX_WA - bool "Workaround broken graphics drivers (going away soon)" - depends on DMAR && BROKEN - ---help--- - Current Graphics drivers tend to use physical address - for DMA and avoid using DMA APIs. Setting this config - option permits the IOMMU driver to set a unity map for - all the OS-visible memory. Hence the driver can continue - to use physical addresses for DMA, at least until this - option is removed in the 2.6.32 kernel. - -config DMAR_FLOPPY_WA - def_bool y - depends on DMAR - ---help--- - Floppy disk drivers are known to bypass DMA API calls - thereby failing to work when IOMMU is enabled. This - workaround will setup a 1:1 mapping for the first - 16MiB to make floppy (an ISA device) work. - -config INTR_REMAP - bool "Support for Interrupt Remapping (EXPERIMENTAL)" - depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL - ---help--- - Supports Interrupt remapping for IO-APIC and MSI devices. - To use x2apic mode in the CPU's which support x2APIC enhancements or - to support platforms with CPU's having > 8 bit APIC ID, say Y. - source "drivers/pci/pcie/Kconfig" source "drivers/pci/Kconfig" diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 9246c5bf25af..e2a5f141ae2d 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -46,3 +46,52 @@ config AMD_IOMMU_STATS statistics about whats happening in the driver and exports that information to userspace via debugfs. If unsure, say N. + +# Intel IOMMU support +config DMAR + bool "Support for DMA Remapping Devices" + depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) + select IOMMU_API + help + DMA remapping (DMAR) devices support enables independent address + translations for Direct Memory Access (DMA) from devices. + These DMA remapping devices are reported via ACPI tables + and include PCI device scope covered by these DMA + remapping devices. + +config DMAR_DEFAULT_ON + def_bool y + prompt "Enable DMA Remapping Devices by default" + depends on DMAR + help + Selecting this option will enable a DMAR device at boot time if + one is found. If this option is not selected, DMAR support can + be enabled by passing intel_iommu=on to the kernel. + +config DMAR_BROKEN_GFX_WA + bool "Workaround broken graphics drivers (going away soon)" + depends on DMAR && BROKEN && X86 + ---help--- + Current Graphics drivers tend to use physical address + for DMA and avoid using DMA APIs. Setting this config + option permits the IOMMU driver to set a unity map for + all the OS-visible memory. Hence the driver can continue + to use physical addresses for DMA, at least until this + option is removed in the 2.6.32 kernel. + +config DMAR_FLOPPY_WA + def_bool y + depends on DMAR && X86 + ---help--- + Floppy disk drivers are known to bypass DMA API calls + thereby failing to work when IOMMU is enabled. This + workaround will setup a 1:1 mapping for the first + 16MiB to make floppy (an ISA device) work. + +config INTR_REMAP + bool "Support for Interrupt Remapping (EXPERIMENTAL)" + depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL + ---help--- + Supports Interrupt remapping for IO-APIC and MSI devices. + To use x2apic mode in the CPU's which support x2APIC enhancements or + to support platforms with CPU's having > 8 bit APIC ID, say Y. diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 4237eaf84609..49e9c0f46bd5 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,3 +1,5 @@ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o +obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o +obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c new file mode 100644 index 000000000000..3dc9befa5aec --- /dev/null +++ b/drivers/iommu/dmar.c @@ -0,0 +1,1461 @@ +/* + * Copyright (c) 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Copyright (C) 2006-2008 Intel Corporation + * Author: Ashok Raj + * Author: Shaohua Li + * Author: Anil S Keshavamurthy + * + * This file implements early detection/parsing of Remapping Devices + * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI + * tables. + * + * These routines are used by both DMA-remapping and Interrupt-remapping + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFIX "DMAR: " + +/* No locks are needed as DMA remapping hardware unit + * list is constructed at boot time and hotplug of + * these units are not supported by the architecture. + */ +LIST_HEAD(dmar_drhd_units); + +static struct acpi_table_header * __initdata dmar_tbl; +static acpi_size dmar_tbl_size; + +static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) +{ + /* + * add INCLUDE_ALL at the tail, so scan the list will find it at + * the very end. + */ + if (drhd->include_all) + list_add_tail(&drhd->list, &dmar_drhd_units); + else + list_add(&drhd->list, &dmar_drhd_units); +} + +static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, + struct pci_dev **dev, u16 segment) +{ + struct pci_bus *bus; + struct pci_dev *pdev = NULL; + struct acpi_dmar_pci_path *path; + int count; + + bus = pci_find_bus(segment, scope->bus); + path = (struct acpi_dmar_pci_path *)(scope + 1); + count = (scope->length - sizeof(struct acpi_dmar_device_scope)) + / sizeof(struct acpi_dmar_pci_path); + + while (count) { + if (pdev) + pci_dev_put(pdev); + /* + * Some BIOSes list non-exist devices in DMAR table, just + * ignore it + */ + if (!bus) { + printk(KERN_WARNING + PREFIX "Device scope bus [%d] not found\n", + scope->bus); + break; + } + pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); + if (!pdev) { + printk(KERN_WARNING PREFIX + "Device scope device [%04x:%02x:%02x.%02x] not found\n", + segment, bus->number, path->dev, path->fn); + break; + } + path ++; + count --; + bus = pdev->subordinate; + } + if (!pdev) { + printk(KERN_WARNING PREFIX + "Device scope device [%04x:%02x:%02x.%02x] not found\n", + segment, scope->bus, path->dev, path->fn); + *dev = NULL; + return 0; + } + if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ + pdev->subordinate) || (scope->entry_type == \ + ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { + pci_dev_put(pdev); + printk(KERN_WARNING PREFIX + "Device scope type does not match for %s\n", + pci_name(pdev)); + return -EINVAL; + } + *dev = pdev; + return 0; +} + +static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, + struct pci_dev ***devices, u16 segment) +{ + struct acpi_dmar_device_scope *scope; + void * tmp = start; + int index; + int ret; + + *cnt = 0; + while (start < end) { + scope = start; + if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || + scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) + (*cnt)++; + else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) { + printk(KERN_WARNING PREFIX + "Unsupported device scope\n"); + } + start += scope->length; + } + if (*cnt == 0) + return 0; + + *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); + if (!*devices) + return -ENOMEM; + + start = tmp; + index = 0; + while (start < end) { + scope = start; + if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || + scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { + ret = dmar_parse_one_dev_scope(scope, + &(*devices)[index], segment); + if (ret) { + kfree(*devices); + return ret; + } + index ++; + } + start += scope->length; + } + + return 0; +} + +/** + * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition + * structure which uniquely represent one DMA remapping hardware unit + * present in the platform + */ +static int __init +dmar_parse_one_drhd(struct acpi_dmar_header *header) +{ + struct acpi_dmar_hardware_unit *drhd; + struct dmar_drhd_unit *dmaru; + int ret = 0; + + drhd = (struct acpi_dmar_hardware_unit *)header; + dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); + if (!dmaru) + return -ENOMEM; + + dmaru->hdr = header; + dmaru->reg_base_addr = drhd->address; + dmaru->segment = drhd->segment; + dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ + + ret = alloc_iommu(dmaru); + if (ret) { + kfree(dmaru); + return ret; + } + dmar_register_drhd_unit(dmaru); + return 0; +} + +static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) +{ + struct acpi_dmar_hardware_unit *drhd; + int ret = 0; + + drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; + + if (dmaru->include_all) + return 0; + + ret = dmar_parse_dev_scope((void *)(drhd + 1), + ((void *)drhd) + drhd->header.length, + &dmaru->devices_cnt, &dmaru->devices, + drhd->segment); + if (ret) { + list_del(&dmaru->list); + kfree(dmaru); + } + return ret; +} + +#ifdef CONFIG_DMAR +LIST_HEAD(dmar_rmrr_units); + +static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) +{ + list_add(&rmrr->list, &dmar_rmrr_units); +} + + +static int __init +dmar_parse_one_rmrr(struct acpi_dmar_header *header) +{ + struct acpi_dmar_reserved_memory *rmrr; + struct dmar_rmrr_unit *rmrru; + + rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); + if (!rmrru) + return -ENOMEM; + + rmrru->hdr = header; + rmrr = (struct acpi_dmar_reserved_memory *)header; + rmrru->base_address = rmrr->base_address; + rmrru->end_address = rmrr->end_address; + + dmar_register_rmrr_unit(rmrru); + return 0; +} + +static int __init +rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) +{ + struct acpi_dmar_reserved_memory *rmrr; + int ret; + + rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; + ret = dmar_parse_dev_scope((void *)(rmrr + 1), + ((void *)rmrr) + rmrr->header.length, + &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); + + if (ret || (rmrru->devices_cnt == 0)) { + list_del(&rmrru->list); + kfree(rmrru); + } + return ret; +} + +static LIST_HEAD(dmar_atsr_units); + +static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) +{ + struct acpi_dmar_atsr *atsr; + struct dmar_atsr_unit *atsru; + + atsr = container_of(hdr, struct acpi_dmar_atsr, header); + atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); + if (!atsru) + return -ENOMEM; + + atsru->hdr = hdr; + atsru->include_all = atsr->flags & 0x1; + + list_add(&atsru->list, &dmar_atsr_units); + + return 0; +} + +static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) +{ + int rc; + struct acpi_dmar_atsr *atsr; + + if (atsru->include_all) + return 0; + + atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); + rc = dmar_parse_dev_scope((void *)(atsr + 1), + (void *)atsr + atsr->header.length, + &atsru->devices_cnt, &atsru->devices, + atsr->segment); + if (rc || !atsru->devices_cnt) { + list_del(&atsru->list); + kfree(atsru); + } + + return rc; +} + +int dmar_find_matched_atsr_unit(struct pci_dev *dev) +{ + int i; + struct pci_bus *bus; + struct acpi_dmar_atsr *atsr; + struct dmar_atsr_unit *atsru; + + dev = pci_physfn(dev); + + list_for_each_entry(atsru, &dmar_atsr_units, list) { + atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); + if (atsr->segment == pci_domain_nr(dev->bus)) + goto found; + } + + return 0; + +found: + for (bus = dev->bus; bus; bus = bus->parent) { + struct pci_dev *bridge = bus->self; + + if (!bridge || !pci_is_pcie(bridge) || + bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) + return 0; + + if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { + for (i = 0; i < atsru->devices_cnt; i++) + if (atsru->devices[i] == bridge) + return 1; + break; + } + } + + if (atsru->include_all) + return 1; + + return 0; +} +#endif + +#ifdef CONFIG_ACPI_NUMA +static int __init +dmar_parse_one_rhsa(struct acpi_dmar_header *header) +{ + struct acpi_dmar_rhsa *rhsa; + struct dmar_drhd_unit *drhd; + + rhsa = (struct acpi_dmar_rhsa *)header; + for_each_drhd_unit(drhd) { + if (drhd->reg_base_addr == rhsa->base_address) { + int node = acpi_map_pxm_to_node(rhsa->proximity_domain); + + if (!node_online(node)) + node = -1; + drhd->iommu->node = node; + return 0; + } + } + WARN_TAINT( + 1, TAINT_FIRMWARE_WORKAROUND, + "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + drhd->reg_base_addr, + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + + return 0; +} +#endif + +static void __init +dmar_table_print_dmar_entry(struct acpi_dmar_header *header) +{ + struct acpi_dmar_hardware_unit *drhd; + struct acpi_dmar_reserved_memory *rmrr; + struct acpi_dmar_atsr *atsr; + struct acpi_dmar_rhsa *rhsa; + + switch (header->type) { + case ACPI_DMAR_TYPE_HARDWARE_UNIT: + drhd = container_of(header, struct acpi_dmar_hardware_unit, + header); + printk (KERN_INFO PREFIX + "DRHD base: %#016Lx flags: %#x\n", + (unsigned long long)drhd->address, drhd->flags); + break; + case ACPI_DMAR_TYPE_RESERVED_MEMORY: + rmrr = container_of(header, struct acpi_dmar_reserved_memory, + header); + printk (KERN_INFO PREFIX + "RMRR base: %#016Lx end: %#016Lx\n", + (unsigned long long)rmrr->base_address, + (unsigned long long)rmrr->end_address); + break; + case ACPI_DMAR_TYPE_ATSR: + atsr = container_of(header, struct acpi_dmar_atsr, header); + printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); + break; + case ACPI_DMAR_HARDWARE_AFFINITY: + rhsa = container_of(header, struct acpi_dmar_rhsa, header); + printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", + (unsigned long long)rhsa->base_address, + rhsa->proximity_domain); + break; + } +} + +/** + * dmar_table_detect - checks to see if the platform supports DMAR devices + */ +static int __init dmar_table_detect(void) +{ + acpi_status status = AE_OK; + + /* if we could find DMAR table, then there are DMAR devices */ + status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, + (struct acpi_table_header **)&dmar_tbl, + &dmar_tbl_size); + + if (ACPI_SUCCESS(status) && !dmar_tbl) { + printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); + status = AE_NOT_FOUND; + } + + return (ACPI_SUCCESS(status) ? 1 : 0); +} + +/** + * parse_dmar_table - parses the DMA reporting table + */ +static int __init +parse_dmar_table(void) +{ + struct acpi_table_dmar *dmar; + struct acpi_dmar_header *entry_header; + int ret = 0; + + /* + * Do it again, earlier dmar_tbl mapping could be mapped with + * fixed map. + */ + dmar_table_detect(); + + /* + * ACPI tables may not be DMA protected by tboot, so use DMAR copy + * SINIT saved in SinitMleData in TXT heap (which is DMA protected) + */ + dmar_tbl = tboot_get_dmar_table(dmar_tbl); + + dmar = (struct acpi_table_dmar *)dmar_tbl; + if (!dmar) + return -ENODEV; + + if (dmar->width < PAGE_SHIFT - 1) { + printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); + return -EINVAL; + } + + printk (KERN_INFO PREFIX "Host address width %d\n", + dmar->width + 1); + + entry_header = (struct acpi_dmar_header *)(dmar + 1); + while (((unsigned long)entry_header) < + (((unsigned long)dmar) + dmar_tbl->length)) { + /* Avoid looping forever on bad ACPI tables */ + if (entry_header->length == 0) { + printk(KERN_WARNING PREFIX + "Invalid 0-length structure\n"); + ret = -EINVAL; + break; + } + + dmar_table_print_dmar_entry(entry_header); + + switch (entry_header->type) { + case ACPI_DMAR_TYPE_HARDWARE_UNIT: + ret = dmar_parse_one_drhd(entry_header); + break; + case ACPI_DMAR_TYPE_RESERVED_MEMORY: +#ifdef CONFIG_DMAR + ret = dmar_parse_one_rmrr(entry_header); +#endif + break; + case ACPI_DMAR_TYPE_ATSR: +#ifdef CONFIG_DMAR + ret = dmar_parse_one_atsr(entry_header); +#endif + break; + case ACPI_DMAR_HARDWARE_AFFINITY: +#ifdef CONFIG_ACPI_NUMA + ret = dmar_parse_one_rhsa(entry_header); +#endif + break; + default: + printk(KERN_WARNING PREFIX + "Unknown DMAR structure type %d\n", + entry_header->type); + ret = 0; /* for forward compatibility */ + break; + } + if (ret) + break; + + entry_header = ((void *)entry_header + entry_header->length); + } + return ret; +} + +static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, + struct pci_dev *dev) +{ + int index; + + while (dev) { + for (index = 0; index < cnt; index++) + if (dev == devices[index]) + return 1; + + /* Check our parent */ + dev = dev->bus->self; + } + + return 0; +} + +struct dmar_drhd_unit * +dmar_find_matched_drhd_unit(struct pci_dev *dev) +{ + struct dmar_drhd_unit *dmaru = NULL; + struct acpi_dmar_hardware_unit *drhd; + + dev = pci_physfn(dev); + + list_for_each_entry(dmaru, &dmar_drhd_units, list) { + drhd = container_of(dmaru->hdr, + struct acpi_dmar_hardware_unit, + header); + + if (dmaru->include_all && + drhd->segment == pci_domain_nr(dev->bus)) + return dmaru; + + if (dmar_pci_device_match(dmaru->devices, + dmaru->devices_cnt, dev)) + return dmaru; + } + + return NULL; +} + +int __init dmar_dev_scope_init(void) +{ + struct dmar_drhd_unit *drhd, *drhd_n; + int ret = -ENODEV; + + list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { + ret = dmar_parse_dev(drhd); + if (ret) + return ret; + } + +#ifdef CONFIG_DMAR + { + struct dmar_rmrr_unit *rmrr, *rmrr_n; + struct dmar_atsr_unit *atsr, *atsr_n; + + list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { + ret = rmrr_parse_dev(rmrr); + if (ret) + return ret; + } + + list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) { + ret = atsr_parse_dev(atsr); + if (ret) + return ret; + } + } +#endif + + return ret; +} + + +int __init dmar_table_init(void) +{ + static int dmar_table_initialized; + int ret; + + if (dmar_table_initialized) + return 0; + + dmar_table_initialized = 1; + + ret = parse_dmar_table(); + if (ret) { + if (ret != -ENODEV) + printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); + return ret; + } + + if (list_empty(&dmar_drhd_units)) { + printk(KERN_INFO PREFIX "No DMAR devices found\n"); + return -ENODEV; + } + +#ifdef CONFIG_DMAR + if (list_empty(&dmar_rmrr_units)) + printk(KERN_INFO PREFIX "No RMRR found\n"); + + if (list_empty(&dmar_atsr_units)) + printk(KERN_INFO PREFIX "No ATSR found\n"); +#endif + + return 0; +} + +static void warn_invalid_dmar(u64 addr, const char *message) +{ + WARN_TAINT_ONCE( + 1, TAINT_FIRMWARE_WORKAROUND, + "Your BIOS is broken; DMAR reported at address %llx%s!\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + addr, message, + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); +} + +int __init check_zero_address(void) +{ + struct acpi_table_dmar *dmar; + struct acpi_dmar_header *entry_header; + struct acpi_dmar_hardware_unit *drhd; + + dmar = (struct acpi_table_dmar *)dmar_tbl; + entry_header = (struct acpi_dmar_header *)(dmar + 1); + + while (((unsigned long)entry_header) < + (((unsigned long)dmar) + dmar_tbl->length)) { + /* Avoid looping forever on bad ACPI tables */ + if (entry_header->length == 0) { + printk(KERN_WARNING PREFIX + "Invalid 0-length structure\n"); + return 0; + } + + if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { + void __iomem *addr; + u64 cap, ecap; + + drhd = (void *)entry_header; + if (!drhd->address) { + warn_invalid_dmar(0, ""); + goto failed; + } + + addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); + if (!addr ) { + printk("IOMMU: can't validate: %llx\n", drhd->address); + goto failed; + } + cap = dmar_readq(addr + DMAR_CAP_REG); + ecap = dmar_readq(addr + DMAR_ECAP_REG); + early_iounmap(addr, VTD_PAGE_SIZE); + if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { + warn_invalid_dmar(drhd->address, + " returns all ones"); + goto failed; + } + } + + entry_header = ((void *)entry_header + entry_header->length); + } + return 1; + +failed: +#ifdef CONFIG_DMAR + dmar_disabled = 1; +#endif + return 0; +} + +int __init detect_intel_iommu(void) +{ + int ret; + + ret = dmar_table_detect(); + if (ret) + ret = check_zero_address(); + { +#ifdef CONFIG_INTR_REMAP + struct acpi_table_dmar *dmar; + + dmar = (struct acpi_table_dmar *) dmar_tbl; + if (ret && cpu_has_x2apic && dmar->flags & 0x1) + printk(KERN_INFO + "Queued invalidation will be enabled to support " + "x2apic and Intr-remapping.\n"); +#endif +#ifdef CONFIG_DMAR + if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { + iommu_detected = 1; + /* Make sure ACS will be enabled */ + pci_request_acs(); + } +#endif +#ifdef CONFIG_X86 + if (ret) + x86_init.iommu.iommu_init = intel_iommu_init; +#endif + } + early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); + dmar_tbl = NULL; + + return ret ? 1 : -ENODEV; +} + + +int alloc_iommu(struct dmar_drhd_unit *drhd) +{ + struct intel_iommu *iommu; + int map_size; + u32 ver; + static int iommu_allocated = 0; + int agaw = 0; + int msagaw = 0; + + if (!drhd->reg_base_addr) { + warn_invalid_dmar(0, ""); + return -EINVAL; + } + + iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENOMEM; + + iommu->seq_id = iommu_allocated++; + sprintf (iommu->name, "dmar%d", iommu->seq_id); + + iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); + if (!iommu->reg) { + printk(KERN_ERR "IOMMU: can't map the region\n"); + goto error; + } + iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); + iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); + + if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { + warn_invalid_dmar(drhd->reg_base_addr, " returns all ones"); + goto err_unmap; + } + +#ifdef CONFIG_DMAR + agaw = iommu_calculate_agaw(iommu); + if (agaw < 0) { + printk(KERN_ERR + "Cannot get a valid agaw for iommu (seq_id = %d)\n", + iommu->seq_id); + goto err_unmap; + } + msagaw = iommu_calculate_max_sagaw(iommu); + if (msagaw < 0) { + printk(KERN_ERR + "Cannot get a valid max agaw for iommu (seq_id = %d)\n", + iommu->seq_id); + goto err_unmap; + } +#endif + iommu->agaw = agaw; + iommu->msagaw = msagaw; + + iommu->node = -1; + + /* the registers might be more than one page */ + map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), + cap_max_fault_reg_offset(iommu->cap)); + map_size = VTD_PAGE_ALIGN(map_size); + if (map_size > VTD_PAGE_SIZE) { + iounmap(iommu->reg); + iommu->reg = ioremap(drhd->reg_base_addr, map_size); + if (!iommu->reg) { + printk(KERN_ERR "IOMMU: can't map the region\n"); + goto error; + } + } + + ver = readl(iommu->reg + DMAR_VER_REG); + pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", + iommu->seq_id, + (unsigned long long)drhd->reg_base_addr, + DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), + (unsigned long long)iommu->cap, + (unsigned long long)iommu->ecap); + + spin_lock_init(&iommu->register_lock); + + drhd->iommu = iommu; + return 0; + + err_unmap: + iounmap(iommu->reg); + error: + kfree(iommu); + return -1; +} + +void free_iommu(struct intel_iommu *iommu) +{ + if (!iommu) + return; + +#ifdef CONFIG_DMAR + free_dmar_iommu(iommu); +#endif + + if (iommu->reg) + iounmap(iommu->reg); + kfree(iommu); +} + +/* + * Reclaim all the submitted descriptors which have completed its work. + */ +static inline void reclaim_free_desc(struct q_inval *qi) +{ + while (qi->desc_status[qi->free_tail] == QI_DONE || + qi->desc_status[qi->free_tail] == QI_ABORT) { + qi->desc_status[qi->free_tail] = QI_FREE; + qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; + qi->free_cnt++; + } +} + +static int qi_check_fault(struct intel_iommu *iommu, int index) +{ + u32 fault; + int head, tail; + struct q_inval *qi = iommu->qi; + int wait_index = (index + 1) % QI_LENGTH; + + if (qi->desc_status[wait_index] == QI_ABORT) + return -EAGAIN; + + fault = readl(iommu->reg + DMAR_FSTS_REG); + + /* + * If IQE happens, the head points to the descriptor associated + * with the error. No new descriptors are fetched until the IQE + * is cleared. + */ + if (fault & DMA_FSTS_IQE) { + head = readl(iommu->reg + DMAR_IQH_REG); + if ((head >> DMAR_IQ_SHIFT) == index) { + printk(KERN_ERR "VT-d detected invalid descriptor: " + "low=%llx, high=%llx\n", + (unsigned long long)qi->desc[index].low, + (unsigned long long)qi->desc[index].high); + memcpy(&qi->desc[index], &qi->desc[wait_index], + sizeof(struct qi_desc)); + __iommu_flush_cache(iommu, &qi->desc[index], + sizeof(struct qi_desc)); + writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); + return -EINVAL; + } + } + + /* + * If ITE happens, all pending wait_desc commands are aborted. + * No new descriptors are fetched until the ITE is cleared. + */ + if (fault & DMA_FSTS_ITE) { + head = readl(iommu->reg + DMAR_IQH_REG); + head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; + head |= 1; + tail = readl(iommu->reg + DMAR_IQT_REG); + tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; + + writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); + + do { + if (qi->desc_status[head] == QI_IN_USE) + qi->desc_status[head] = QI_ABORT; + head = (head - 2 + QI_LENGTH) % QI_LENGTH; + } while (head != tail); + + if (qi->desc_status[wait_index] == QI_ABORT) + return -EAGAIN; + } + + if (fault & DMA_FSTS_ICE) + writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); + + return 0; +} + +/* + * Submit the queued invalidation descriptor to the remapping + * hardware unit and wait for its completion. + */ +int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) +{ + int rc; + struct q_inval *qi = iommu->qi; + struct qi_desc *hw, wait_desc; + int wait_index, index; + unsigned long flags; + + if (!qi) + return 0; + + hw = qi->desc; + +restart: + rc = 0; + + spin_lock_irqsave(&qi->q_lock, flags); + while (qi->free_cnt < 3) { + spin_unlock_irqrestore(&qi->q_lock, flags); + cpu_relax(); + spin_lock_irqsave(&qi->q_lock, flags); + } + + index = qi->free_head; + wait_index = (index + 1) % QI_LENGTH; + + qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; + + hw[index] = *desc; + + wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | + QI_IWD_STATUS_WRITE | QI_IWD_TYPE; + wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); + + hw[wait_index] = wait_desc; + + __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); + __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); + + qi->free_head = (qi->free_head + 2) % QI_LENGTH; + qi->free_cnt -= 2; + + /* + * update the HW tail register indicating the presence of + * new descriptors. + */ + writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); + + while (qi->desc_status[wait_index] != QI_DONE) { + /* + * We will leave the interrupts disabled, to prevent interrupt + * context to queue another cmd while a cmd is already submitted + * and waiting for completion on this cpu. This is to avoid + * a deadlock where the interrupt context can wait indefinitely + * for free slots in the queue. + */ + rc = qi_check_fault(iommu, index); + if (rc) + break; + + spin_unlock(&qi->q_lock); + cpu_relax(); + spin_lock(&qi->q_lock); + } + + qi->desc_status[index] = QI_DONE; + + reclaim_free_desc(qi); + spin_unlock_irqrestore(&qi->q_lock, flags); + + if (rc == -EAGAIN) + goto restart; + + return rc; +} + +/* + * Flush the global interrupt entry cache. + */ +void qi_global_iec(struct intel_iommu *iommu) +{ + struct qi_desc desc; + + desc.low = QI_IEC_TYPE; + desc.high = 0; + + /* should never fail */ + qi_submit_sync(&desc, iommu); +} + +void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, + u64 type) +{ + struct qi_desc desc; + + desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) + | QI_CC_GRAN(type) | QI_CC_TYPE; + desc.high = 0; + + qi_submit_sync(&desc, iommu); +} + +void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type) +{ + u8 dw = 0, dr = 0; + + struct qi_desc desc; + int ih = 0; + + if (cap_write_drain(iommu->cap)) + dw = 1; + + if (cap_read_drain(iommu->cap)) + dr = 1; + + desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) + | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; + desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) + | QI_IOTLB_AM(size_order); + + qi_submit_sync(&desc, iommu); +} + +void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, + u64 addr, unsigned mask) +{ + struct qi_desc desc; + + if (mask) { + BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); + addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1; + desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; + } else + desc.high = QI_DEV_IOTLB_ADDR(addr); + + if (qdep >= QI_DEV_IOTLB_MAX_INVS) + qdep = 0; + + desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | + QI_DIOTLB_TYPE; + + qi_submit_sync(&desc, iommu); +} + +/* + * Disable Queued Invalidation interface. + */ +void dmar_disable_qi(struct intel_iommu *iommu) +{ + unsigned long flags; + u32 sts; + cycles_t start_time = get_cycles(); + + if (!ecap_qis(iommu->ecap)) + return; + + spin_lock_irqsave(&iommu->register_lock, flags); + + sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); + if (!(sts & DMA_GSTS_QIES)) + goto end; + + /* + * Give a chance to HW to complete the pending invalidation requests. + */ + while ((readl(iommu->reg + DMAR_IQT_REG) != + readl(iommu->reg + DMAR_IQH_REG)) && + (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) + cpu_relax(); + + iommu->gcmd &= ~DMA_GCMD_QIE; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, + !(sts & DMA_GSTS_QIES), sts); +end: + spin_unlock_irqrestore(&iommu->register_lock, flags); +} + +/* + * Enable queued invalidation. + */ +static void __dmar_enable_qi(struct intel_iommu *iommu) +{ + u32 sts; + unsigned long flags; + struct q_inval *qi = iommu->qi; + + qi->free_head = qi->free_tail = 0; + qi->free_cnt = QI_LENGTH; + + spin_lock_irqsave(&iommu->register_lock, flags); + + /* write zero to the tail reg */ + writel(0, iommu->reg + DMAR_IQT_REG); + + dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); + + iommu->gcmd |= DMA_GCMD_QIE; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + + /* Make sure hardware complete it */ + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); + + spin_unlock_irqrestore(&iommu->register_lock, flags); +} + +/* + * Enable Queued Invalidation interface. This is a must to support + * interrupt-remapping. Also used by DMA-remapping, which replaces + * register based IOTLB invalidation. + */ +int dmar_enable_qi(struct intel_iommu *iommu) +{ + struct q_inval *qi; + struct page *desc_page; + + if (!ecap_qis(iommu->ecap)) + return -ENOENT; + + /* + * queued invalidation is already setup and enabled. + */ + if (iommu->qi) + return 0; + + iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); + if (!iommu->qi) + return -ENOMEM; + + qi = iommu->qi; + + + desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); + if (!desc_page) { + kfree(qi); + iommu->qi = 0; + return -ENOMEM; + } + + qi->desc = page_address(desc_page); + + qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); + if (!qi->desc_status) { + free_page((unsigned long) qi->desc); + kfree(qi); + iommu->qi = 0; + return -ENOMEM; + } + + qi->free_head = qi->free_tail = 0; + qi->free_cnt = QI_LENGTH; + + spin_lock_init(&qi->q_lock); + + __dmar_enable_qi(iommu); + + return 0; +} + +/* iommu interrupt handling. Most stuff are MSI-like. */ + +enum faulttype { + DMA_REMAP, + INTR_REMAP, + UNKNOWN, +}; + +static const char *dma_remap_fault_reasons[] = +{ + "Software", + "Present bit in root entry is clear", + "Present bit in context entry is clear", + "Invalid context entry", + "Access beyond MGAW", + "PTE Write access is not set", + "PTE Read access is not set", + "Next page table ptr is invalid", + "Root table address invalid", + "Context table ptr is invalid", + "non-zero reserved fields in RTP", + "non-zero reserved fields in CTP", + "non-zero reserved fields in PTE", +}; + +static const char *intr_remap_fault_reasons[] = +{ + "Detected reserved fields in the decoded interrupt-remapped request", + "Interrupt index exceeded the interrupt-remapping table size", + "Present field in the IRTE entry is clear", + "Error accessing interrupt-remapping table pointed by IRTA_REG", + "Detected reserved fields in the IRTE entry", + "Blocked a compatibility format interrupt request", + "Blocked an interrupt request due to source-id verification failure", +}; + +#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) + +const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) +{ + if (fault_reason >= 0x20 && (fault_reason <= 0x20 + + ARRAY_SIZE(intr_remap_fault_reasons))) { + *fault_type = INTR_REMAP; + return intr_remap_fault_reasons[fault_reason - 0x20]; + } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { + *fault_type = DMA_REMAP; + return dma_remap_fault_reasons[fault_reason]; + } else { + *fault_type = UNKNOWN; + return "Unknown"; + } +} + +void dmar_msi_unmask(struct irq_data *data) +{ + struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); + unsigned long flag; + + /* unmask it */ + spin_lock_irqsave(&iommu->register_lock, flag); + writel(0, iommu->reg + DMAR_FECTL_REG); + /* Read a reg to force flush the post write */ + readl(iommu->reg + DMAR_FECTL_REG); + spin_unlock_irqrestore(&iommu->register_lock, flag); +} + +void dmar_msi_mask(struct irq_data *data) +{ + unsigned long flag; + struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); + + /* mask it */ + spin_lock_irqsave(&iommu->register_lock, flag); + writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); + /* Read a reg to force flush the post write */ + readl(iommu->reg + DMAR_FECTL_REG); + spin_unlock_irqrestore(&iommu->register_lock, flag); +} + +void dmar_msi_write(int irq, struct msi_msg *msg) +{ + struct intel_iommu *iommu = irq_get_handler_data(irq); + unsigned long flag; + + spin_lock_irqsave(&iommu->register_lock, flag); + writel(msg->data, iommu->reg + DMAR_FEDATA_REG); + writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); + writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); + spin_unlock_irqrestore(&iommu->register_lock, flag); +} + +void dmar_msi_read(int irq, struct msi_msg *msg) +{ + struct intel_iommu *iommu = irq_get_handler_data(irq); + unsigned long flag; + + spin_lock_irqsave(&iommu->register_lock, flag); + msg->data = readl(iommu->reg + DMAR_FEDATA_REG); + msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); + msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); + spin_unlock_irqrestore(&iommu->register_lock, flag); +} + +static int dmar_fault_do_one(struct intel_iommu *iommu, int type, + u8 fault_reason, u16 source_id, unsigned long long addr) +{ + const char *reason; + int fault_type; + + reason = dmar_get_fault_reason(fault_reason, &fault_type); + + if (fault_type == INTR_REMAP) + printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] " + "fault index %llx\n" + "INTR-REMAP:[fault reason %02d] %s\n", + (source_id >> 8), PCI_SLOT(source_id & 0xFF), + PCI_FUNC(source_id & 0xFF), addr >> 48, + fault_reason, reason); + else + printk(KERN_ERR + "DMAR:[%s] Request device [%02x:%02x.%d] " + "fault addr %llx \n" + "DMAR:[fault reason %02d] %s\n", + (type ? "DMA Read" : "DMA Write"), + (source_id >> 8), PCI_SLOT(source_id & 0xFF), + PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); + return 0; +} + +#define PRIMARY_FAULT_REG_LEN (16) +irqreturn_t dmar_fault(int irq, void *dev_id) +{ + struct intel_iommu *iommu = dev_id; + int reg, fault_index; + u32 fault_status; + unsigned long flag; + + spin_lock_irqsave(&iommu->register_lock, flag); + fault_status = readl(iommu->reg + DMAR_FSTS_REG); + if (fault_status) + printk(KERN_ERR "DRHD: handling fault status reg %x\n", + fault_status); + + /* TBD: ignore advanced fault log currently */ + if (!(fault_status & DMA_FSTS_PPF)) + goto clear_rest; + + fault_index = dma_fsts_fault_record_index(fault_status); + reg = cap_fault_reg_offset(iommu->cap); + while (1) { + u8 fault_reason; + u16 source_id; + u64 guest_addr; + int type; + u32 data; + + /* highest 32 bits */ + data = readl(iommu->reg + reg + + fault_index * PRIMARY_FAULT_REG_LEN + 12); + if (!(data & DMA_FRCD_F)) + break; + + fault_reason = dma_frcd_fault_reason(data); + type = dma_frcd_type(data); + + data = readl(iommu->reg + reg + + fault_index * PRIMARY_FAULT_REG_LEN + 8); + source_id = dma_frcd_source_id(data); + + guest_addr = dmar_readq(iommu->reg + reg + + fault_index * PRIMARY_FAULT_REG_LEN); + guest_addr = dma_frcd_page_addr(guest_addr); + /* clear the fault */ + writel(DMA_FRCD_F, iommu->reg + reg + + fault_index * PRIMARY_FAULT_REG_LEN + 12); + + spin_unlock_irqrestore(&iommu->register_lock, flag); + + dmar_fault_do_one(iommu, type, fault_reason, + source_id, guest_addr); + + fault_index++; + if (fault_index >= cap_num_fault_regs(iommu->cap)) + fault_index = 0; + spin_lock_irqsave(&iommu->register_lock, flag); + } +clear_rest: + /* clear all the other faults */ + fault_status = readl(iommu->reg + DMAR_FSTS_REG); + writel(fault_status, iommu->reg + DMAR_FSTS_REG); + + spin_unlock_irqrestore(&iommu->register_lock, flag); + return IRQ_HANDLED; +} + +int dmar_set_interrupt(struct intel_iommu *iommu) +{ + int irq, ret; + + /* + * Check if the fault interrupt is already initialized. + */ + if (iommu->irq) + return 0; + + irq = create_irq(); + if (!irq) { + printk(KERN_ERR "IOMMU: no free vectors\n"); + return -EINVAL; + } + + irq_set_handler_data(irq, iommu); + iommu->irq = irq; + + ret = arch_setup_dmar_msi(irq); + if (ret) { + irq_set_handler_data(irq, NULL); + iommu->irq = 0; + destroy_irq(irq); + return ret; + } + + ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); + if (ret) + printk(KERN_ERR "IOMMU: can't request irq\n"); + return ret; +} + +int __init enable_drhd_fault_handling(void) +{ + struct dmar_drhd_unit *drhd; + + /* + * Enable fault control interrupt. + */ + for_each_drhd_unit(drhd) { + int ret; + struct intel_iommu *iommu = drhd->iommu; + ret = dmar_set_interrupt(iommu); + + if (ret) { + printk(KERN_ERR "DRHD %Lx: failed to enable fault, " + " interrupt, ret %d\n", + (unsigned long long)drhd->reg_base_addr, ret); + return -1; + } + + /* + * Clear any previous faults. + */ + dmar_fault(iommu->irq, iommu); + } + + return 0; +} + +/* + * Re-enable Queued Invalidation interface. + */ +int dmar_reenable_qi(struct intel_iommu *iommu) +{ + if (!ecap_qis(iommu->ecap)) + return -ENOENT; + + if (!iommu->qi) + return -ENOENT; + + /* + * First disable queued invalidation. + */ + dmar_disable_qi(iommu); + /* + * Then enable queued invalidation again. Since there is no pending + * invalidation requests now, it's safe to re-enable queued + * invalidation. + */ + __dmar_enable_qi(iommu); + + return 0; +} + +/* + * Check interrupt remapping support in DMAR table description. + */ +int __init dmar_ir_support(void) +{ + struct acpi_table_dmar *dmar; + dmar = (struct acpi_table_dmar *)dmar_tbl; + if (!dmar) + return 0; + return dmar->flags & 0x1; +} +IOMMU_INIT_POST(detect_intel_iommu); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c new file mode 100644 index 000000000000..c621c98c99da --- /dev/null +++ b/drivers/iommu/intel-iommu.c @@ -0,0 +1,4016 @@ +/* + * Copyright (c) 2006, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Copyright (C) 2006-2008 Intel Corporation + * Author: Ashok Raj + * Author: Shaohua Li + * Author: Anil S Keshavamurthy + * Author: Fenghua Yu + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ROOT_SIZE VTD_PAGE_SIZE +#define CONTEXT_SIZE VTD_PAGE_SIZE + +#define IS_BRIDGE_HOST_DEVICE(pdev) \ + ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) +#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) +#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) +#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) + +#define IOAPIC_RANGE_START (0xfee00000) +#define IOAPIC_RANGE_END (0xfeefffff) +#define IOVA_START_ADDR (0x1000) + +#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 + +#define MAX_AGAW_WIDTH 64 + +#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1) +#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1) + +/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR + to match. That way, we can use 'unsigned long' for PFNs with impunity. */ +#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \ + __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) +#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) + +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) +#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) + +/* page table handling */ +#define LEVEL_STRIDE (9) +#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) + +static inline int agaw_to_level(int agaw) +{ + return agaw + 2; +} + +static inline int agaw_to_width(int agaw) +{ + return 30 + agaw * LEVEL_STRIDE; +} + +static inline int width_to_agaw(int width) +{ + return (width - 30) / LEVEL_STRIDE; +} + +static inline unsigned int level_to_offset_bits(int level) +{ + return (level - 1) * LEVEL_STRIDE; +} + +static inline int pfn_level_offset(unsigned long pfn, int level) +{ + return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; +} + +static inline unsigned long level_mask(int level) +{ + return -1UL << level_to_offset_bits(level); +} + +static inline unsigned long level_size(int level) +{ + return 1UL << level_to_offset_bits(level); +} + +static inline unsigned long align_to_level(unsigned long pfn, int level) +{ + return (pfn + level_size(level) - 1) & level_mask(level); +} + +static inline unsigned long lvl_to_nr_pages(unsigned int lvl) +{ + return 1 << ((lvl - 1) * LEVEL_STRIDE); +} + +/* VT-d pages must always be _smaller_ than MM pages. Otherwise things + are never going to work. */ +static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) +{ + return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT); +} + +static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) +{ + return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); +} +static inline unsigned long page_to_dma_pfn(struct page *pg) +{ + return mm_to_dma_pfn(page_to_pfn(pg)); +} +static inline unsigned long virt_to_dma_pfn(void *p) +{ + return page_to_dma_pfn(virt_to_page(p)); +} + +/* global iommu list, set NULL for ignored DMAR units */ +static struct intel_iommu **g_iommus; + +static void __init check_tylersburg_isoch(void); +static int rwbf_quirk; + +/* + * set to 1 to panic kernel if can't successfully enable VT-d + * (used when kernel is launched w/ TXT) + */ +static int force_on = 0; + +/* + * 0: Present + * 1-11: Reserved + * 12-63: Context Ptr (12 - (haw-1)) + * 64-127: Reserved + */ +struct root_entry { + u64 val; + u64 rsvd1; +}; +#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) +static inline bool root_present(struct root_entry *root) +{ + return (root->val & 1); +} +static inline void set_root_present(struct root_entry *root) +{ + root->val |= 1; +} +static inline void set_root_value(struct root_entry *root, unsigned long value) +{ + root->val |= value & VTD_PAGE_MASK; +} + +static inline struct context_entry * +get_context_addr_from_root(struct root_entry *root) +{ + return (struct context_entry *) + (root_present(root)?phys_to_virt( + root->val & VTD_PAGE_MASK) : + NULL); +} + +/* + * low 64 bits: + * 0: present + * 1: fault processing disable + * 2-3: translation type + * 12-63: address space root + * high 64 bits: + * 0-2: address width + * 3-6: aval + * 8-23: domain id + */ +struct context_entry { + u64 lo; + u64 hi; +}; + +static inline bool context_present(struct context_entry *context) +{ + return (context->lo & 1); +} +static inline void context_set_present(struct context_entry *context) +{ + context->lo |= 1; +} + +static inline void context_set_fault_enable(struct context_entry *context) +{ + context->lo &= (((u64)-1) << 2) | 1; +} + +static inline void context_set_translation_type(struct context_entry *context, + unsigned long value) +{ + context->lo &= (((u64)-1) << 4) | 3; + context->lo |= (value & 3) << 2; +} + +static inline void context_set_address_root(struct context_entry *context, + unsigned long value) +{ + context->lo |= value & VTD_PAGE_MASK; +} + +static inline void context_set_address_width(struct context_entry *context, + unsigned long value) +{ + context->hi |= value & 7; +} + +static inline void context_set_domain_id(struct context_entry *context, + unsigned long value) +{ + context->hi |= (value & ((1 << 16) - 1)) << 8; +} + +static inline void context_clear_entry(struct context_entry *context) +{ + context->lo = 0; + context->hi = 0; +} + +/* + * 0: readable + * 1: writable + * 2-6: reserved + * 7: super page + * 8-10: available + * 11: snoop behavior + * 12-63: Host physcial address + */ +struct dma_pte { + u64 val; +}; + +static inline void dma_clear_pte(struct dma_pte *pte) +{ + pte->val = 0; +} + +static inline void dma_set_pte_readable(struct dma_pte *pte) +{ + pte->val |= DMA_PTE_READ; +} + +static inline void dma_set_pte_writable(struct dma_pte *pte) +{ + pte->val |= DMA_PTE_WRITE; +} + +static inline void dma_set_pte_snp(struct dma_pte *pte) +{ + pte->val |= DMA_PTE_SNP; +} + +static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) +{ + pte->val = (pte->val & ~3) | (prot & 3); +} + +static inline u64 dma_pte_addr(struct dma_pte *pte) +{ +#ifdef CONFIG_64BIT + return pte->val & VTD_PAGE_MASK; +#else + /* Must have a full atomic 64-bit read */ + return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; +#endif +} + +static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn) +{ + pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT; +} + +static inline bool dma_pte_present(struct dma_pte *pte) +{ + return (pte->val & 3) != 0; +} + +static inline int first_pte_in_page(struct dma_pte *pte) +{ + return !((unsigned long)pte & ~VTD_PAGE_MASK); +} + +/* + * This domain is a statically identity mapping domain. + * 1. This domain creats a static 1:1 mapping to all usable memory. + * 2. It maps to each iommu if successful. + * 3. Each iommu mapps to this domain if successful. + */ +static struct dmar_domain *si_domain; +static int hw_pass_through = 1; + +/* devices under the same p2p bridge are owned in one domain */ +#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) + +/* domain represents a virtual machine, more than one devices + * across iommus may be owned in one domain, e.g. kvm guest. + */ +#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) + +/* si_domain contains mulitple devices */ +#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2) + +struct dmar_domain { + int id; /* domain id */ + int nid; /* node id */ + unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ + + struct list_head devices; /* all devices' list */ + struct iova_domain iovad; /* iova's that belong to this domain */ + + struct dma_pte *pgd; /* virtual address */ + int gaw; /* max guest address width */ + + /* adjusted guest address width, 0 is level 2 30-bit */ + int agaw; + + int flags; /* flags to find out type of domain */ + + int iommu_coherency;/* indicate coherency of iommu access */ + int iommu_snooping; /* indicate snooping control feature*/ + int iommu_count; /* reference count of iommu */ + int iommu_superpage;/* Level of superpages supported: + 0 == 4KiB (no superpages), 1 == 2MiB, + 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ + spinlock_t iommu_lock; /* protect iommu set in domain */ + u64 max_addr; /* maximum mapped address */ +}; + +/* PCI domain-device relationship */ +struct device_domain_info { + struct list_head link; /* link to domain siblings */ + struct list_head global; /* link to global list */ + int segment; /* PCI domain */ + u8 bus; /* PCI bus number */ + u8 devfn; /* PCI devfn number */ + struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */ + struct intel_iommu *iommu; /* IOMMU used by this device */ + struct dmar_domain *domain; /* pointer to domain */ +}; + +static void flush_unmaps_timeout(unsigned long data); + +DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); + +#define HIGH_WATER_MARK 250 +struct deferred_flush_tables { + int next; + struct iova *iova[HIGH_WATER_MARK]; + struct dmar_domain *domain[HIGH_WATER_MARK]; +}; + +static struct deferred_flush_tables *deferred_flush; + +/* bitmap for indexing intel_iommus */ +static int g_num_of_iommus; + +static DEFINE_SPINLOCK(async_umap_flush_lock); +static LIST_HEAD(unmaps_to_do); + +static int timer_on; +static long list_size; + +static void domain_remove_dev_info(struct dmar_domain *domain); + +#ifdef CONFIG_DMAR_DEFAULT_ON +int dmar_disabled = 0; +#else +int dmar_disabled = 1; +#endif /*CONFIG_DMAR_DEFAULT_ON*/ + +static int dmar_map_gfx = 1; +static int dmar_forcedac; +static int intel_iommu_strict; +static int intel_iommu_superpage = 1; + +#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) +static DEFINE_SPINLOCK(device_domain_lock); +static LIST_HEAD(device_domain_list); + +static struct iommu_ops intel_iommu_ops; + +static int __init intel_iommu_setup(char *str) +{ + if (!str) + return -EINVAL; + while (*str) { + if (!strncmp(str, "on", 2)) { + dmar_disabled = 0; + printk(KERN_INFO "Intel-IOMMU: enabled\n"); + } else if (!strncmp(str, "off", 3)) { + dmar_disabled = 1; + printk(KERN_INFO "Intel-IOMMU: disabled\n"); + } else if (!strncmp(str, "igfx_off", 8)) { + dmar_map_gfx = 0; + printk(KERN_INFO + "Intel-IOMMU: disable GFX device mapping\n"); + } else if (!strncmp(str, "forcedac", 8)) { + printk(KERN_INFO + "Intel-IOMMU: Forcing DAC for PCI devices\n"); + dmar_forcedac = 1; + } else if (!strncmp(str, "strict", 6)) { + printk(KERN_INFO + "Intel-IOMMU: disable batched IOTLB flush\n"); + intel_iommu_strict = 1; + } else if (!strncmp(str, "sp_off", 6)) { + printk(KERN_INFO + "Intel-IOMMU: disable supported super page\n"); + intel_iommu_superpage = 0; + } + + str += strcspn(str, ","); + while (*str == ',') + str++; + } + return 0; +} +__setup("intel_iommu=", intel_iommu_setup); + +static struct kmem_cache *iommu_domain_cache; +static struct kmem_cache *iommu_devinfo_cache; +static struct kmem_cache *iommu_iova_cache; + +static inline void *alloc_pgtable_page(int node) +{ + struct page *page; + void *vaddr = NULL; + + page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0); + if (page) + vaddr = page_address(page); + return vaddr; +} + +static inline void free_pgtable_page(void *vaddr) +{ + free_page((unsigned long)vaddr); +} + +static inline void *alloc_domain_mem(void) +{ + return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC); +} + +static void free_domain_mem(void *vaddr) +{ + kmem_cache_free(iommu_domain_cache, vaddr); +} + +static inline void * alloc_devinfo_mem(void) +{ + return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC); +} + +static inline void free_devinfo_mem(void *vaddr) +{ + kmem_cache_free(iommu_devinfo_cache, vaddr); +} + +struct iova *alloc_iova_mem(void) +{ + return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); +} + +void free_iova_mem(struct iova *iova) +{ + kmem_cache_free(iommu_iova_cache, iova); +} + + +static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) +{ + unsigned long sagaw; + int agaw = -1; + + sagaw = cap_sagaw(iommu->cap); + for (agaw = width_to_agaw(max_gaw); + agaw >= 0; agaw--) { + if (test_bit(agaw, &sagaw)) + break; + } + + return agaw; +} + +/* + * Calculate max SAGAW for each iommu. + */ +int iommu_calculate_max_sagaw(struct intel_iommu *iommu) +{ + return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); +} + +/* + * calculate agaw for each iommu. + * "SAGAW" may be different across iommus, use a default agaw, and + * get a supported less agaw for iommus that don't support the default agaw. + */ +int iommu_calculate_agaw(struct intel_iommu *iommu) +{ + return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); +} + +/* This functionin only returns single iommu in a domain */ +static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) +{ + int iommu_id; + + /* si_domain and vm domain should not get here. */ + BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); + BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); + + iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); + if (iommu_id < 0 || iommu_id >= g_num_of_iommus) + return NULL; + + return g_iommus[iommu_id]; +} + +static void domain_update_iommu_coherency(struct dmar_domain *domain) +{ + int i; + + domain->iommu_coherency = 1; + + for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { + if (!ecap_coherent(g_iommus[i]->ecap)) { + domain->iommu_coherency = 0; + break; + } + } +} + +static void domain_update_iommu_snooping(struct dmar_domain *domain) +{ + int i; + + domain->iommu_snooping = 1; + + for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { + if (!ecap_sc_support(g_iommus[i]->ecap)) { + domain->iommu_snooping = 0; + break; + } + } +} + +static void domain_update_iommu_superpage(struct dmar_domain *domain) +{ + int i, mask = 0xf; + + if (!intel_iommu_superpage) { + domain->iommu_superpage = 0; + return; + } + + domain->iommu_superpage = 4; /* 1TiB */ + + for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { + mask |= cap_super_page_val(g_iommus[i]->cap); + if (!mask) { + break; + } + } + domain->iommu_superpage = fls(mask); +} + +/* Some capabilities may be different across iommus */ +static void domain_update_iommu_cap(struct dmar_domain *domain) +{ + domain_update_iommu_coherency(domain); + domain_update_iommu_snooping(domain); + domain_update_iommu_superpage(domain); +} + +static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) +{ + struct dmar_drhd_unit *drhd = NULL; + int i; + + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + if (segment != drhd->segment) + continue; + + for (i = 0; i < drhd->devices_cnt; i++) { + if (drhd->devices[i] && + drhd->devices[i]->bus->number == bus && + drhd->devices[i]->devfn == devfn) + return drhd->iommu; + if (drhd->devices[i] && + drhd->devices[i]->subordinate && + drhd->devices[i]->subordinate->number <= bus && + drhd->devices[i]->subordinate->subordinate >= bus) + return drhd->iommu; + } + + if (drhd->include_all) + return drhd->iommu; + } + + return NULL; +} + +static void domain_flush_cache(struct dmar_domain *domain, + void *addr, int size) +{ + if (!domain->iommu_coherency) + clflush_cache_range(addr, size); +} + +/* Gets context entry for a given bus and devfn */ +static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, + u8 bus, u8 devfn) +{ + struct root_entry *root; + struct context_entry *context; + unsigned long phy_addr; + unsigned long flags; + + spin_lock_irqsave(&iommu->lock, flags); + root = &iommu->root_entry[bus]; + context = get_context_addr_from_root(root); + if (!context) { + context = (struct context_entry *) + alloc_pgtable_page(iommu->node); + if (!context) { + spin_unlock_irqrestore(&iommu->lock, flags); + return NULL; + } + __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); + phy_addr = virt_to_phys((void *)context); + set_root_value(root, phy_addr); + set_root_present(root); + __iommu_flush_cache(iommu, root, sizeof(*root)); + } + spin_unlock_irqrestore(&iommu->lock, flags); + return &context[devfn]; +} + +static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) +{ + struct root_entry *root; + struct context_entry *context; + int ret; + unsigned long flags; + + spin_lock_irqsave(&iommu->lock, flags); + root = &iommu->root_entry[bus]; + context = get_context_addr_from_root(root); + if (!context) { + ret = 0; + goto out; + } + ret = context_present(&context[devfn]); +out: + spin_unlock_irqrestore(&iommu->lock, flags); + return ret; +} + +static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) +{ + struct root_entry *root; + struct context_entry *context; + unsigned long flags; + + spin_lock_irqsave(&iommu->lock, flags); + root = &iommu->root_entry[bus]; + context = get_context_addr_from_root(root); + if (context) { + context_clear_entry(&context[devfn]); + __iommu_flush_cache(iommu, &context[devfn], \ + sizeof(*context)); + } + spin_unlock_irqrestore(&iommu->lock, flags); +} + +static void free_context_table(struct intel_iommu *iommu) +{ + struct root_entry *root; + int i; + unsigned long flags; + struct context_entry *context; + + spin_lock_irqsave(&iommu->lock, flags); + if (!iommu->root_entry) { + goto out; + } + for (i = 0; i < ROOT_ENTRY_NR; i++) { + root = &iommu->root_entry[i]; + context = get_context_addr_from_root(root); + if (context) + free_pgtable_page(context); + } + free_pgtable_page(iommu->root_entry); + iommu->root_entry = NULL; +out: + spin_unlock_irqrestore(&iommu->lock, flags); +} + +static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, + unsigned long pfn, int large_level) +{ + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; + struct dma_pte *parent, *pte = NULL; + int level = agaw_to_level(domain->agaw); + int offset, target_level; + + BUG_ON(!domain->pgd); + BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); + parent = domain->pgd; + + /* Search pte */ + if (!large_level) + target_level = 1; + else + target_level = large_level; + + while (level > 0) { + void *tmp_page; + + offset = pfn_level_offset(pfn, level); + pte = &parent[offset]; + if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) + break; + if (level == target_level) + break; + + if (!dma_pte_present(pte)) { + uint64_t pteval; + + tmp_page = alloc_pgtable_page(domain->nid); + + if (!tmp_page) + return NULL; + + domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); + pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; + if (cmpxchg64(&pte->val, 0ULL, pteval)) { + /* Someone else set it while we were thinking; use theirs. */ + free_pgtable_page(tmp_page); + } else { + dma_pte_addr(pte); + domain_flush_cache(domain, pte, sizeof(*pte)); + } + } + parent = phys_to_virt(dma_pte_addr(pte)); + level--; + } + + return pte; +} + + +/* return address's pte at specific level */ +static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, + unsigned long pfn, + int level, int *large_page) +{ + struct dma_pte *parent, *pte = NULL; + int total = agaw_to_level(domain->agaw); + int offset; + + parent = domain->pgd; + while (level <= total) { + offset = pfn_level_offset(pfn, total); + pte = &parent[offset]; + if (level == total) + return pte; + + if (!dma_pte_present(pte)) { + *large_page = total; + break; + } + + if (pte->val & DMA_PTE_LARGE_PAGE) { + *large_page = total; + return pte; + } + + parent = phys_to_virt(dma_pte_addr(pte)); + total--; + } + return NULL; +} + +/* clear last level pte, a tlb flush should be followed */ +static void dma_pte_clear_range(struct dmar_domain *domain, + unsigned long start_pfn, + unsigned long last_pfn) +{ + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; + unsigned int large_page = 1; + struct dma_pte *first_pte, *pte; + + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); + BUG_ON(start_pfn > last_pfn); + + /* we don't need lock here; nobody else touches the iova range */ + do { + large_page = 1; + first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); + if (!pte) { + start_pfn = align_to_level(start_pfn + 1, large_page + 1); + continue; + } + do { + dma_clear_pte(pte); + start_pfn += lvl_to_nr_pages(large_page); + pte++; + } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); + + domain_flush_cache(domain, first_pte, + (void *)pte - (void *)first_pte); + + } while (start_pfn && start_pfn <= last_pfn); +} + +/* free page table pages. last level pte should already be cleared */ +static void dma_pte_free_pagetable(struct dmar_domain *domain, + unsigned long start_pfn, + unsigned long last_pfn) +{ + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; + struct dma_pte *first_pte, *pte; + int total = agaw_to_level(domain->agaw); + int level; + unsigned long tmp; + int large_page = 2; + + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); + BUG_ON(start_pfn > last_pfn); + + /* We don't need lock here; nobody else touches the iova range */ + level = 2; + while (level <= total) { + tmp = align_to_level(start_pfn, level); + + /* If we can't even clear one PTE at this level, we're done */ + if (tmp + level_size(level) - 1 > last_pfn) + return; + + do { + large_page = level; + first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page); + if (large_page > level) + level = large_page + 1; + if (!pte) { + tmp = align_to_level(tmp + 1, level + 1); + continue; + } + do { + if (dma_pte_present(pte)) { + free_pgtable_page(phys_to_virt(dma_pte_addr(pte))); + dma_clear_pte(pte); + } + pte++; + tmp += level_size(level); + } while (!first_pte_in_page(pte) && + tmp + level_size(level) - 1 <= last_pfn); + + domain_flush_cache(domain, first_pte, + (void *)pte - (void *)first_pte); + + } while (tmp && tmp + level_size(level) - 1 <= last_pfn); + level++; + } + /* free pgd */ + if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { + free_pgtable_page(domain->pgd); + domain->pgd = NULL; + } +} + +/* iommu handling */ +static int iommu_alloc_root_entry(struct intel_iommu *iommu) +{ + struct root_entry *root; + unsigned long flags; + + root = (struct root_entry *)alloc_pgtable_page(iommu->node); + if (!root) + return -ENOMEM; + + __iommu_flush_cache(iommu, root, ROOT_SIZE); + + spin_lock_irqsave(&iommu->lock, flags); + iommu->root_entry = root; + spin_unlock_irqrestore(&iommu->lock, flags); + + return 0; +} + +static void iommu_set_root_entry(struct intel_iommu *iommu) +{ + void *addr; + u32 sts; + unsigned long flag; + + addr = iommu->root_entry; + + spin_lock_irqsave(&iommu->register_lock, flag); + dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); + + writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); + + /* Make sure hardware complete it */ + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_RTPS), sts); + + spin_unlock_irqrestore(&iommu->register_lock, flag); +} + +static void iommu_flush_write_buffer(struct intel_iommu *iommu) +{ + u32 val; + unsigned long flag; + + if (!rwbf_quirk && !cap_rwbf(iommu->cap)) + return; + + spin_lock_irqsave(&iommu->register_lock, flag); + writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); + + /* Make sure hardware complete it */ + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (!(val & DMA_GSTS_WBFS)), val); + + spin_unlock_irqrestore(&iommu->register_lock, flag); +} + +/* return value determine if we need a write buffer flush */ +static void __iommu_flush_context(struct intel_iommu *iommu, + u16 did, u16 source_id, u8 function_mask, + u64 type) +{ + u64 val = 0; + unsigned long flag; + + switch (type) { + case DMA_CCMD_GLOBAL_INVL: + val = DMA_CCMD_GLOBAL_INVL; + break; + case DMA_CCMD_DOMAIN_INVL: + val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did); + break; + case DMA_CCMD_DEVICE_INVL: + val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did) + | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask); + break; + default: + BUG(); + } + val |= DMA_CCMD_ICC; + + spin_lock_irqsave(&iommu->register_lock, flag); + dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); + + /* Make sure hardware complete it */ + IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, + dmar_readq, (!(val & DMA_CCMD_ICC)), val); + + spin_unlock_irqrestore(&iommu->register_lock, flag); +} + +/* return value determine if we need a write buffer flush */ +static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, + u64 addr, unsigned int size_order, u64 type) +{ + int tlb_offset = ecap_iotlb_offset(iommu->ecap); + u64 val = 0, val_iva = 0; + unsigned long flag; + + switch (type) { + case DMA_TLB_GLOBAL_FLUSH: + /* global flush doesn't need set IVA_REG */ + val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT; + break; + case DMA_TLB_DSI_FLUSH: + val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); + break; + case DMA_TLB_PSI_FLUSH: + val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); + /* Note: always flush non-leaf currently */ + val_iva = size_order | addr; + break; + default: + BUG(); + } + /* Note: set drain read/write */ +#if 0 + /* + * This is probably to be super secure.. Looks like we can + * ignore it without any impact. + */ + if (cap_read_drain(iommu->cap)) + val |= DMA_TLB_READ_DRAIN; +#endif + if (cap_write_drain(iommu->cap)) + val |= DMA_TLB_WRITE_DRAIN; + + spin_lock_irqsave(&iommu->register_lock, flag); + /* Note: Only uses first TLB reg currently */ + if (val_iva) + dmar_writeq(iommu->reg + tlb_offset, val_iva); + dmar_writeq(iommu->reg + tlb_offset + 8, val); + + /* Make sure hardware complete it */ + IOMMU_WAIT_OP(iommu, tlb_offset + 8, + dmar_readq, (!(val & DMA_TLB_IVT)), val); + + spin_unlock_irqrestore(&iommu->register_lock, flag); + + /* check IOTLB invalidation granularity */ + if (DMA_TLB_IAIG(val) == 0) + printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); + if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) + pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", + (unsigned long long)DMA_TLB_IIRG(type), + (unsigned long long)DMA_TLB_IAIG(val)); +} + +static struct device_domain_info *iommu_support_dev_iotlb( + struct dmar_domain *domain, int segment, u8 bus, u8 devfn) +{ + int found = 0; + unsigned long flags; + struct device_domain_info *info; + struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn); + + if (!ecap_dev_iotlb_support(iommu->ecap)) + return NULL; + + if (!iommu->qi) + return NULL; + + spin_lock_irqsave(&device_domain_lock, flags); + list_for_each_entry(info, &domain->devices, link) + if (info->bus == bus && info->devfn == devfn) { + found = 1; + break; + } + spin_unlock_irqrestore(&device_domain_lock, flags); + + if (!found || !info->dev) + return NULL; + + if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS)) + return NULL; + + if (!dmar_find_matched_atsr_unit(info->dev)) + return NULL; + + info->iommu = iommu; + + return info; +} + +static void iommu_enable_dev_iotlb(struct device_domain_info *info) +{ + if (!info) + return; + + pci_enable_ats(info->dev, VTD_PAGE_SHIFT); +} + +static void iommu_disable_dev_iotlb(struct device_domain_info *info) +{ + if (!info->dev || !pci_ats_enabled(info->dev)) + return; + + pci_disable_ats(info->dev); +} + +static void iommu_flush_dev_iotlb(struct dmar_domain *domain, + u64 addr, unsigned mask) +{ + u16 sid, qdep; + unsigned long flags; + struct device_domain_info *info; + + spin_lock_irqsave(&device_domain_lock, flags); + list_for_each_entry(info, &domain->devices, link) { + if (!info->dev || !pci_ats_enabled(info->dev)) + continue; + + sid = info->bus << 8 | info->devfn; + qdep = pci_ats_queue_depth(info->dev); + qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); + } + spin_unlock_irqrestore(&device_domain_lock, flags); +} + +static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, + unsigned long pfn, unsigned int pages, int map) +{ + unsigned int mask = ilog2(__roundup_pow_of_two(pages)); + uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; + + BUG_ON(pages == 0); + + /* + * Fallback to domain selective flush if no PSI support or the size is + * too big. + * PSI requires page size to be 2 ^ x, and the base address is naturally + * aligned to the size + */ + if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) + iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH); + else + iommu->flush.flush_iotlb(iommu, did, addr, mask, + DMA_TLB_PSI_FLUSH); + + /* + * In caching mode, changes of pages from non-present to present require + * flush. However, device IOTLB doesn't need to be flushed in this case. + */ + if (!cap_caching_mode(iommu->cap) || !map) + iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); +} + +static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) +{ + u32 pmen; + unsigned long flags; + + spin_lock_irqsave(&iommu->register_lock, flags); + pmen = readl(iommu->reg + DMAR_PMEN_REG); + pmen &= ~DMA_PMEN_EPM; + writel(pmen, iommu->reg + DMAR_PMEN_REG); + + /* wait for the protected region status bit to clear */ + IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, + readl, !(pmen & DMA_PMEN_PRS), pmen); + + spin_unlock_irqrestore(&iommu->register_lock, flags); +} + +static int iommu_enable_translation(struct intel_iommu *iommu) +{ + u32 sts; + unsigned long flags; + + spin_lock_irqsave(&iommu->register_lock, flags); + iommu->gcmd |= DMA_GCMD_TE; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + + /* Make sure hardware complete it */ + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_TES), sts); + + spin_unlock_irqrestore(&iommu->register_lock, flags); + return 0; +} + +static int iommu_disable_translation(struct intel_iommu *iommu) +{ + u32 sts; + unsigned long flag; + + spin_lock_irqsave(&iommu->register_lock, flag); + iommu->gcmd &= ~DMA_GCMD_TE; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + + /* Make sure hardware complete it */ + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (!(sts & DMA_GSTS_TES)), sts); + + spin_unlock_irqrestore(&iommu->register_lock, flag); + return 0; +} + + +static int iommu_init_domains(struct intel_iommu *iommu) +{ + unsigned long ndomains; + unsigned long nlongs; + + ndomains = cap_ndoms(iommu->cap); + pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id, + ndomains); + nlongs = BITS_TO_LONGS(ndomains); + + spin_lock_init(&iommu->lock); + + /* TBD: there might be 64K domains, + * consider other allocation for future chip + */ + iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); + if (!iommu->domain_ids) { + printk(KERN_ERR "Allocating domain id array failed\n"); + return -ENOMEM; + } + iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), + GFP_KERNEL); + if (!iommu->domains) { + printk(KERN_ERR "Allocating domain array failed\n"); + return -ENOMEM; + } + + /* + * if Caching mode is set, then invalid translations are tagged + * with domainid 0. Hence we need to pre-allocate it. + */ + if (cap_caching_mode(iommu->cap)) + set_bit(0, iommu->domain_ids); + return 0; +} + + +static void domain_exit(struct dmar_domain *domain); +static void vm_domain_exit(struct dmar_domain *domain); + +void free_dmar_iommu(struct intel_iommu *iommu) +{ + struct dmar_domain *domain; + int i; + unsigned long flags; + + if ((iommu->domains) && (iommu->domain_ids)) { + for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { + domain = iommu->domains[i]; + clear_bit(i, iommu->domain_ids); + + spin_lock_irqsave(&domain->iommu_lock, flags); + if (--domain->iommu_count == 0) { + if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) + vm_domain_exit(domain); + else + domain_exit(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags); + } + } + + if (iommu->gcmd & DMA_GCMD_TE) + iommu_disable_translation(iommu); + + if (iommu->irq) { + irq_set_handler_data(iommu->irq, NULL); + /* This will mask the irq */ + free_irq(iommu->irq, iommu); + destroy_irq(iommu->irq); + } + + kfree(iommu->domains); + kfree(iommu->domain_ids); + + g_iommus[iommu->seq_id] = NULL; + + /* if all iommus are freed, free g_iommus */ + for (i = 0; i < g_num_of_iommus; i++) { + if (g_iommus[i]) + break; + } + + if (i == g_num_of_iommus) + kfree(g_iommus); + + /* free context mapping */ + free_context_table(iommu); +} + +static struct dmar_domain *alloc_domain(void) +{ + struct dmar_domain *domain; + + domain = alloc_domain_mem(); + if (!domain) + return NULL; + + domain->nid = -1; + memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); + domain->flags = 0; + + return domain; +} + +static int iommu_attach_domain(struct dmar_domain *domain, + struct intel_iommu *iommu) +{ + int num; + unsigned long ndomains; + unsigned long flags; + + ndomains = cap_ndoms(iommu->cap); + + spin_lock_irqsave(&iommu->lock, flags); + + num = find_first_zero_bit(iommu->domain_ids, ndomains); + if (num >= ndomains) { + spin_unlock_irqrestore(&iommu->lock, flags); + printk(KERN_ERR "IOMMU: no free domain ids\n"); + return -ENOMEM; + } + + domain->id = num; + set_bit(num, iommu->domain_ids); + set_bit(iommu->seq_id, &domain->iommu_bmp); + iommu->domains[num] = domain; + spin_unlock_irqrestore(&iommu->lock, flags); + + return 0; +} + +static void iommu_detach_domain(struct dmar_domain *domain, + struct intel_iommu *iommu) +{ + unsigned long flags; + int num, ndomains; + int found = 0; + + spin_lock_irqsave(&iommu->lock, flags); + ndomains = cap_ndoms(iommu->cap); + for_each_set_bit(num, iommu->domain_ids, ndomains) { + if (iommu->domains[num] == domain) { + found = 1; + break; + } + } + + if (found) { + clear_bit(num, iommu->domain_ids); + clear_bit(iommu->seq_id, &domain->iommu_bmp); + iommu->domains[num] = NULL; + } + spin_unlock_irqrestore(&iommu->lock, flags); +} + +static struct iova_domain reserved_iova_list; +static struct lock_class_key reserved_rbtree_key; + +static int dmar_init_reserved_ranges(void) +{ + struct pci_dev *pdev = NULL; + struct iova *iova; + int i; + + init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); + + lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, + &reserved_rbtree_key); + + /* IOAPIC ranges shouldn't be accessed by DMA */ + iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), + IOVA_PFN(IOAPIC_RANGE_END)); + if (!iova) { + printk(KERN_ERR "Reserve IOAPIC range failed\n"); + return -ENODEV; + } + + /* Reserve all PCI MMIO to avoid peer-to-peer access */ + for_each_pci_dev(pdev) { + struct resource *r; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + r = &pdev->resource[i]; + if (!r->flags || !(r->flags & IORESOURCE_MEM)) + continue; + iova = reserve_iova(&reserved_iova_list, + IOVA_PFN(r->start), + IOVA_PFN(r->end)); + if (!iova) { + printk(KERN_ERR "Reserve iova failed\n"); + return -ENODEV; + } + } + } + return 0; +} + +static void domain_reserve_special_ranges(struct dmar_domain *domain) +{ + copy_reserved_iova(&reserved_iova_list, &domain->iovad); +} + +static inline int guestwidth_to_adjustwidth(int gaw) +{ + int agaw; + int r = (gaw - 12) % 9; + + if (r == 0) + agaw = gaw; + else + agaw = gaw + 9 - r; + if (agaw > 64) + agaw = 64; + return agaw; +} + +static int domain_init(struct dmar_domain *domain, int guest_width) +{ + struct intel_iommu *iommu; + int adjust_width, agaw; + unsigned long sagaw; + + init_iova_domain(&domain->iovad, DMA_32BIT_PFN); + spin_lock_init(&domain->iommu_lock); + + domain_reserve_special_ranges(domain); + + /* calculate AGAW */ + iommu = domain_get_iommu(domain); + if (guest_width > cap_mgaw(iommu->cap)) + guest_width = cap_mgaw(iommu->cap); + domain->gaw = guest_width; + adjust_width = guestwidth_to_adjustwidth(guest_width); + agaw = width_to_agaw(adjust_width); + sagaw = cap_sagaw(iommu->cap); + if (!test_bit(agaw, &sagaw)) { + /* hardware doesn't support it, choose a bigger one */ + pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw); + agaw = find_next_bit(&sagaw, 5, agaw); + if (agaw >= 5) + return -ENODEV; + } + domain->agaw = agaw; + INIT_LIST_HEAD(&domain->devices); + + if (ecap_coherent(iommu->ecap)) + domain->iommu_coherency = 1; + else + domain->iommu_coherency = 0; + + if (ecap_sc_support(iommu->ecap)) + domain->iommu_snooping = 1; + else + domain->iommu_snooping = 0; + + domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); + domain->iommu_count = 1; + domain->nid = iommu->node; + + /* always allocate the top pgd */ + domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); + if (!domain->pgd) + return -ENOMEM; + __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); + return 0; +} + +static void domain_exit(struct dmar_domain *domain) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + + /* Domain 0 is reserved, so dont process it */ + if (!domain) + return; + + /* Flush any lazy unmaps that may reference this domain */ + if (!intel_iommu_strict) + flush_unmaps_timeout(0); + + domain_remove_dev_info(domain); + /* destroy iovas */ + put_iova_domain(&domain->iovad); + + /* clear ptes */ + dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + + /* free page tables */ + dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + + for_each_active_iommu(iommu, drhd) + if (test_bit(iommu->seq_id, &domain->iommu_bmp)) + iommu_detach_domain(domain, iommu); + + free_domain_mem(domain); +} + +static int domain_context_mapping_one(struct dmar_domain *domain, int segment, + u8 bus, u8 devfn, int translation) +{ + struct context_entry *context; + unsigned long flags; + struct intel_iommu *iommu; + struct dma_pte *pgd; + unsigned long num; + unsigned long ndomains; + int id; + int agaw; + struct device_domain_info *info = NULL; + + pr_debug("Set context mapping for %02x:%02x.%d\n", + bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + BUG_ON(!domain->pgd); + BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && + translation != CONTEXT_TT_MULTI_LEVEL); + + iommu = device_to_iommu(segment, bus, devfn); + if (!iommu) + return -ENODEV; + + context = device_to_context_entry(iommu, bus, devfn); + if (!context) + return -ENOMEM; + spin_lock_irqsave(&iommu->lock, flags); + if (context_present(context)) { + spin_unlock_irqrestore(&iommu->lock, flags); + return 0; + } + + id = domain->id; + pgd = domain->pgd; + + if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || + domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { + int found = 0; + + /* find an available domain id for this device in iommu */ + ndomains = cap_ndoms(iommu->cap); + for_each_set_bit(num, iommu->domain_ids, ndomains) { + if (iommu->domains[num] == domain) { + id = num; + found = 1; + break; + } + } + + if (found == 0) { + num = find_first_zero_bit(iommu->domain_ids, ndomains); + if (num >= ndomains) { + spin_unlock_irqrestore(&iommu->lock, flags); + printk(KERN_ERR "IOMMU: no free domain ids\n"); + return -EFAULT; + } + + set_bit(num, iommu->domain_ids); + iommu->domains[num] = domain; + id = num; + } + + /* Skip top levels of page tables for + * iommu which has less agaw than default. + * Unnecessary for PT mode. + */ + if (translation != CONTEXT_TT_PASS_THROUGH) { + for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { + pgd = phys_to_virt(dma_pte_addr(pgd)); + if (!dma_pte_present(pgd)) { + spin_unlock_irqrestore(&iommu->lock, flags); + return -ENOMEM; + } + } + } + } + + context_set_domain_id(context, id); + + if (translation != CONTEXT_TT_PASS_THROUGH) { + info = iommu_support_dev_iotlb(domain, segment, bus, devfn); + translation = info ? CONTEXT_TT_DEV_IOTLB : + CONTEXT_TT_MULTI_LEVEL; + } + /* + * In pass through mode, AW must be programmed to indicate the largest + * AGAW value supported by hardware. And ASR is ignored by hardware. + */ + if (unlikely(translation == CONTEXT_TT_PASS_THROUGH)) + context_set_address_width(context, iommu->msagaw); + else { + context_set_address_root(context, virt_to_phys(pgd)); + context_set_address_width(context, iommu->agaw); + } + + context_set_translation_type(context, translation); + context_set_fault_enable(context); + context_set_present(context); + domain_flush_cache(domain, context, sizeof(*context)); + + /* + * It's a non-present to present mapping. If hardware doesn't cache + * non-present entry we only need to flush the write-buffer. If the + * _does_ cache non-present entries, then it does so in the special + * domain #0, which we have to flush: + */ + if (cap_caching_mode(iommu->cap)) { + iommu->flush.flush_context(iommu, 0, + (((u16)bus) << 8) | devfn, + DMA_CCMD_MASK_NOBIT, + DMA_CCMD_DEVICE_INVL); + iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH); + } else { + iommu_flush_write_buffer(iommu); + } + iommu_enable_dev_iotlb(info); + spin_unlock_irqrestore(&iommu->lock, flags); + + spin_lock_irqsave(&domain->iommu_lock, flags); + if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { + domain->iommu_count++; + if (domain->iommu_count == 1) + domain->nid = iommu->node; + domain_update_iommu_cap(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags); + return 0; +} + +static int +domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, + int translation) +{ + int ret; + struct pci_dev *tmp, *parent; + + ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), + pdev->bus->number, pdev->devfn, + translation); + if (ret) + return ret; + + /* dependent device mapping */ + tmp = pci_find_upstream_pcie_bridge(pdev); + if (!tmp) + return 0; + /* Secondary interface's bus number and devfn 0 */ + parent = pdev->bus->self; + while (parent != tmp) { + ret = domain_context_mapping_one(domain, + pci_domain_nr(parent->bus), + parent->bus->number, + parent->devfn, translation); + if (ret) + return ret; + parent = parent->bus->self; + } + if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ + return domain_context_mapping_one(domain, + pci_domain_nr(tmp->subordinate), + tmp->subordinate->number, 0, + translation); + else /* this is a legacy PCI bridge */ + return domain_context_mapping_one(domain, + pci_domain_nr(tmp->bus), + tmp->bus->number, + tmp->devfn, + translation); +} + +static int domain_context_mapped(struct pci_dev *pdev) +{ + int ret; + struct pci_dev *tmp, *parent; + struct intel_iommu *iommu; + + iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, + pdev->devfn); + if (!iommu) + return -ENODEV; + + ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); + if (!ret) + return ret; + /* dependent device mapping */ + tmp = pci_find_upstream_pcie_bridge(pdev); + if (!tmp) + return ret; + /* Secondary interface's bus number and devfn 0 */ + parent = pdev->bus->self; + while (parent != tmp) { + ret = device_context_mapped(iommu, parent->bus->number, + parent->devfn); + if (!ret) + return ret; + parent = parent->bus->self; + } + if (pci_is_pcie(tmp)) + return device_context_mapped(iommu, tmp->subordinate->number, + 0); + else + return device_context_mapped(iommu, tmp->bus->number, + tmp->devfn); +} + +/* Returns a number of VTD pages, but aligned to MM page size */ +static inline unsigned long aligned_nrpages(unsigned long host_addr, + size_t size) +{ + host_addr &= ~PAGE_MASK; + return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; +} + +/* Return largest possible superpage level for a given mapping */ +static inline int hardware_largepage_caps(struct dmar_domain *domain, + unsigned long iov_pfn, + unsigned long phy_pfn, + unsigned long pages) +{ + int support, level = 1; + unsigned long pfnmerge; + + support = domain->iommu_superpage; + + /* To use a large page, the virtual *and* physical addresses + must be aligned to 2MiB/1GiB/etc. Lower bits set in either + of them will mean we have to use smaller pages. So just + merge them and check both at once. */ + pfnmerge = iov_pfn | phy_pfn; + + while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) { + pages >>= VTD_STRIDE_SHIFT; + if (!pages) + break; + pfnmerge >>= VTD_STRIDE_SHIFT; + level++; + support--; + } + return level; +} + +static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + struct scatterlist *sg, unsigned long phys_pfn, + unsigned long nr_pages, int prot) +{ + struct dma_pte *first_pte = NULL, *pte = NULL; + phys_addr_t uninitialized_var(pteval); + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; + unsigned long sg_res; + unsigned int largepage_lvl = 0; + unsigned long lvl_pages = 0; + + BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); + + if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) + return -EINVAL; + + prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; + + if (sg) + sg_res = 0; + else { + sg_res = nr_pages + 1; + pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; + } + + while (nr_pages > 0) { + uint64_t tmp; + + if (!sg_res) { + sg_res = aligned_nrpages(sg->offset, sg->length); + sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; + sg->dma_length = sg->length; + pteval = page_to_phys(sg_page(sg)) | prot; + phys_pfn = pteval >> VTD_PAGE_SHIFT; + } + + if (!pte) { + largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); + + first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl); + if (!pte) + return -ENOMEM; + /* It is large page*/ + if (largepage_lvl > 1) + pteval |= DMA_PTE_LARGE_PAGE; + else + pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; + + } + /* We don't need lock here, nobody else + * touches the iova range + */ + tmp = cmpxchg64_local(&pte->val, 0ULL, pteval); + if (tmp) { + static int dumps = 5; + printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", + iov_pfn, tmp, (unsigned long long)pteval); + if (dumps) { + dumps--; + debug_dma_dump_mappings(NULL); + } + WARN_ON(1); + } + + lvl_pages = lvl_to_nr_pages(largepage_lvl); + + BUG_ON(nr_pages < lvl_pages); + BUG_ON(sg_res < lvl_pages); + + nr_pages -= lvl_pages; + iov_pfn += lvl_pages; + phys_pfn += lvl_pages; + pteval += lvl_pages * VTD_PAGE_SIZE; + sg_res -= lvl_pages; + + /* If the next PTE would be the first in a new page, then we + need to flush the cache on the entries we've just written. + And then we'll need to recalculate 'pte', so clear it and + let it get set again in the if (!pte) block above. + + If we're done (!nr_pages) we need to flush the cache too. + + Also if we've been setting superpages, we may need to + recalculate 'pte' and switch back to smaller pages for the + end of the mapping, if the trailing size is not enough to + use another superpage (i.e. sg_res < lvl_pages). */ + pte++; + if (!nr_pages || first_pte_in_page(pte) || + (largepage_lvl > 1 && sg_res < lvl_pages)) { + domain_flush_cache(domain, first_pte, + (void *)pte - (void *)first_pte); + pte = NULL; + } + + if (!sg_res && nr_pages) + sg = sg_next(sg); + } + return 0; +} + +static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + struct scatterlist *sg, unsigned long nr_pages, + int prot) +{ + return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); +} + +static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + unsigned long phys_pfn, unsigned long nr_pages, + int prot) +{ + return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); +} + +static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) +{ + if (!iommu) + return; + + clear_context_table(iommu, bus, devfn); + iommu->flush.flush_context(iommu, 0, 0, 0, + DMA_CCMD_GLOBAL_INVL); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); +} + +static void domain_remove_dev_info(struct dmar_domain *domain) +{ + struct device_domain_info *info; + unsigned long flags; + struct intel_iommu *iommu; + + spin_lock_irqsave(&device_domain_lock, flags); + while (!list_empty(&domain->devices)) { + info = list_entry(domain->devices.next, + struct device_domain_info, link); + list_del(&info->link); + list_del(&info->global); + if (info->dev) + info->dev->dev.archdata.iommu = NULL; + spin_unlock_irqrestore(&device_domain_lock, flags); + + iommu_disable_dev_iotlb(info); + iommu = device_to_iommu(info->segment, info->bus, info->devfn); + iommu_detach_dev(iommu, info->bus, info->devfn); + free_devinfo_mem(info); + + spin_lock_irqsave(&device_domain_lock, flags); + } + spin_unlock_irqrestore(&device_domain_lock, flags); +} + +/* + * find_domain + * Note: we use struct pci_dev->dev.archdata.iommu stores the info + */ +static struct dmar_domain * +find_domain(struct pci_dev *pdev) +{ + struct device_domain_info *info; + + /* No lock here, assumes no domain exit in normal case */ + info = pdev->dev.archdata.iommu; + if (info) + return info->domain; + return NULL; +} + +/* domain is initialized */ +static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) +{ + struct dmar_domain *domain, *found = NULL; + struct intel_iommu *iommu; + struct dmar_drhd_unit *drhd; + struct device_domain_info *info, *tmp; + struct pci_dev *dev_tmp; + unsigned long flags; + int bus = 0, devfn = 0; + int segment; + int ret; + + domain = find_domain(pdev); + if (domain) + return domain; + + segment = pci_domain_nr(pdev->bus); + + dev_tmp = pci_find_upstream_pcie_bridge(pdev); + if (dev_tmp) { + if (pci_is_pcie(dev_tmp)) { + bus = dev_tmp->subordinate->number; + devfn = 0; + } else { + bus = dev_tmp->bus->number; + devfn = dev_tmp->devfn; + } + spin_lock_irqsave(&device_domain_lock, flags); + list_for_each_entry(info, &device_domain_list, global) { + if (info->segment == segment && + info->bus == bus && info->devfn == devfn) { + found = info->domain; + break; + } + } + spin_unlock_irqrestore(&device_domain_lock, flags); + /* pcie-pci bridge already has a domain, uses it */ + if (found) { + domain = found; + goto found_domain; + } + } + + domain = alloc_domain(); + if (!domain) + goto error; + + /* Allocate new domain for the device */ + drhd = dmar_find_matched_drhd_unit(pdev); + if (!drhd) { + printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", + pci_name(pdev)); + return NULL; + } + iommu = drhd->iommu; + + ret = iommu_attach_domain(domain, iommu); + if (ret) { + free_domain_mem(domain); + goto error; + } + + if (domain_init(domain, gaw)) { + domain_exit(domain); + goto error; + } + + /* register pcie-to-pci device */ + if (dev_tmp) { + info = alloc_devinfo_mem(); + if (!info) { + domain_exit(domain); + goto error; + } + info->segment = segment; + info->bus = bus; + info->devfn = devfn; + info->dev = NULL; + info->domain = domain; + /* This domain is shared by devices under p2p bridge */ + domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; + + /* pcie-to-pci bridge already has a domain, uses it */ + found = NULL; + spin_lock_irqsave(&device_domain_lock, flags); + list_for_each_entry(tmp, &device_domain_list, global) { + if (tmp->segment == segment && + tmp->bus == bus && tmp->devfn == devfn) { + found = tmp->domain; + break; + } + } + if (found) { + spin_unlock_irqrestore(&device_domain_lock, flags); + free_devinfo_mem(info); + domain_exit(domain); + domain = found; + } else { + list_add(&info->link, &domain->devices); + list_add(&info->global, &device_domain_list); + spin_unlock_irqrestore(&device_domain_lock, flags); + } + } + +found_domain: + info = alloc_devinfo_mem(); + if (!info) + goto error; + info->segment = segment; + info->bus = pdev->bus->number; + info->devfn = pdev->devfn; + info->dev = pdev; + info->domain = domain; + spin_lock_irqsave(&device_domain_lock, flags); + /* somebody is fast */ + found = find_domain(pdev); + if (found != NULL) { + spin_unlock_irqrestore(&device_domain_lock, flags); + if (found != domain) { + domain_exit(domain); + domain = found; + } + free_devinfo_mem(info); + return domain; + } + list_add(&info->link, &domain->devices); + list_add(&info->global, &device_domain_list); + pdev->dev.archdata.iommu = info; + spin_unlock_irqrestore(&device_domain_lock, flags); + return domain; +error: + /* recheck it here, maybe others set it */ + return find_domain(pdev); +} + +static int iommu_identity_mapping; +#define IDENTMAP_ALL 1 +#define IDENTMAP_GFX 2 +#define IDENTMAP_AZALIA 4 + +static int iommu_domain_identity_map(struct dmar_domain *domain, + unsigned long long start, + unsigned long long end) +{ + unsigned long first_vpfn = start >> VTD_PAGE_SHIFT; + unsigned long last_vpfn = end >> VTD_PAGE_SHIFT; + + if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), + dma_to_mm_pfn(last_vpfn))) { + printk(KERN_ERR "IOMMU: reserve iova failed\n"); + return -ENOMEM; + } + + pr_debug("Mapping reserved region %llx-%llx for domain %d\n", + start, end, domain->id); + /* + * RMRR range might have overlap with physical memory range, + * clear it first + */ + dma_pte_clear_range(domain, first_vpfn, last_vpfn); + + return domain_pfn_mapping(domain, first_vpfn, first_vpfn, + last_vpfn - first_vpfn + 1, + DMA_PTE_READ|DMA_PTE_WRITE); +} + +static int iommu_prepare_identity_map(struct pci_dev *pdev, + unsigned long long start, + unsigned long long end) +{ + struct dmar_domain *domain; + int ret; + + domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); + if (!domain) + return -ENOMEM; + + /* For _hardware_ passthrough, don't bother. But for software + passthrough, we do it anyway -- it may indicate a memory + range which is reserved in E820, so which didn't get set + up to start with in si_domain */ + if (domain == si_domain && hw_pass_through) { + printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", + pci_name(pdev), start, end); + return 0; + } + + printk(KERN_INFO + "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", + pci_name(pdev), start, end); + + if (end < start) { + WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + ret = -EIO; + goto error; + } + + if (end >> agaw_to_width(domain->agaw)) { + WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + agaw_to_width(domain->agaw), + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + ret = -EIO; + goto error; + } + + ret = iommu_domain_identity_map(domain, start, end); + if (ret) + goto error; + + /* context entry init */ + ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); + if (ret) + goto error; + + return 0; + + error: + domain_exit(domain); + return ret; +} + +static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, + struct pci_dev *pdev) +{ + if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) + return 0; + return iommu_prepare_identity_map(pdev, rmrr->base_address, + rmrr->end_address); +} + +#ifdef CONFIG_DMAR_FLOPPY_WA +static inline void iommu_prepare_isa(void) +{ + struct pci_dev *pdev; + int ret; + + pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); + if (!pdev) + return; + + printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); + ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1); + + if (ret) + printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " + "floppy might not work\n"); + +} +#else +static inline void iommu_prepare_isa(void) +{ + return; +} +#endif /* !CONFIG_DMAR_FLPY_WA */ + +static int md_domain_init(struct dmar_domain *domain, int guest_width); + +static int __init si_domain_work_fn(unsigned long start_pfn, + unsigned long end_pfn, void *datax) +{ + int *ret = datax; + + *ret = iommu_domain_identity_map(si_domain, + (uint64_t)start_pfn << PAGE_SHIFT, + (uint64_t)end_pfn << PAGE_SHIFT); + return *ret; + +} + +static int __init si_domain_init(int hw) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + int nid, ret = 0; + + si_domain = alloc_domain(); + if (!si_domain) + return -EFAULT; + + pr_debug("Identity mapping domain is domain %d\n", si_domain->id); + + for_each_active_iommu(iommu, drhd) { + ret = iommu_attach_domain(si_domain, iommu); + if (ret) { + domain_exit(si_domain); + return -EFAULT; + } + } + + if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + domain_exit(si_domain); + return -EFAULT; + } + + si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; + + if (hw) + return 0; + + for_each_online_node(nid) { + work_with_active_regions(nid, si_domain_work_fn, &ret); + if (ret) + return ret; + } + + return 0; +} + +static void domain_remove_one_dev_info(struct dmar_domain *domain, + struct pci_dev *pdev); +static int identity_mapping(struct pci_dev *pdev) +{ + struct device_domain_info *info; + + if (likely(!iommu_identity_mapping)) + return 0; + + info = pdev->dev.archdata.iommu; + if (info && info != DUMMY_DEVICE_DOMAIN_INFO) + return (info->domain == si_domain); + + return 0; +} + +static int domain_add_dev_info(struct dmar_domain *domain, + struct pci_dev *pdev, + int translation) +{ + struct device_domain_info *info; + unsigned long flags; + int ret; + + info = alloc_devinfo_mem(); + if (!info) + return -ENOMEM; + + ret = domain_context_mapping(domain, pdev, translation); + if (ret) { + free_devinfo_mem(info); + return ret; + } + + info->segment = pci_domain_nr(pdev->bus); + info->bus = pdev->bus->number; + info->devfn = pdev->devfn; + info->dev = pdev; + info->domain = domain; + + spin_lock_irqsave(&device_domain_lock, flags); + list_add(&info->link, &domain->devices); + list_add(&info->global, &device_domain_list); + pdev->dev.archdata.iommu = info; + spin_unlock_irqrestore(&device_domain_lock, flags); + + return 0; +} + +static int iommu_should_identity_map(struct pci_dev *pdev, int startup) +{ + if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) + return 1; + + if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) + return 1; + + if (!(iommu_identity_mapping & IDENTMAP_ALL)) + return 0; + + /* + * We want to start off with all devices in the 1:1 domain, and + * take them out later if we find they can't access all of memory. + * + * However, we can't do this for PCI devices behind bridges, + * because all PCI devices behind the same bridge will end up + * with the same source-id on their transactions. + * + * Practically speaking, we can't change things around for these + * devices at run-time, because we can't be sure there'll be no + * DMA transactions in flight for any of their siblings. + * + * So PCI devices (unless they're on the root bus) as well as + * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of + * the 1:1 domain, just in _case_ one of their siblings turns out + * not to be able to map all of memory. + */ + if (!pci_is_pcie(pdev)) { + if (!pci_is_root_bus(pdev->bus)) + return 0; + if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) + return 0; + } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) + return 0; + + /* + * At boot time, we don't yet know if devices will be 64-bit capable. + * Assume that they will -- if they turn out not to be, then we can + * take them out of the 1:1 domain later. + */ + if (!startup) { + /* + * If the device's dma_mask is less than the system's memory + * size then this is not a candidate for identity mapping. + */ + u64 dma_mask = pdev->dma_mask; + + if (pdev->dev.coherent_dma_mask && + pdev->dev.coherent_dma_mask < dma_mask) + dma_mask = pdev->dev.coherent_dma_mask; + + return dma_mask >= dma_get_required_mask(&pdev->dev); + } + + return 1; +} + +static int __init iommu_prepare_static_identity_mapping(int hw) +{ + struct pci_dev *pdev = NULL; + int ret; + + ret = si_domain_init(hw); + if (ret) + return -EFAULT; + + for_each_pci_dev(pdev) { + /* Skip Host/PCI Bridge devices */ + if (IS_BRIDGE_HOST_DEVICE(pdev)) + continue; + if (iommu_should_identity_map(pdev, 1)) { + printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n", + hw ? "hardware" : "software", pci_name(pdev)); + + ret = domain_add_dev_info(si_domain, pdev, + hw ? CONTEXT_TT_PASS_THROUGH : + CONTEXT_TT_MULTI_LEVEL); + if (ret) + return ret; + } + } + + return 0; +} + +static int __init init_dmars(void) +{ + struct dmar_drhd_unit *drhd; + struct dmar_rmrr_unit *rmrr; + struct pci_dev *pdev; + struct intel_iommu *iommu; + int i, ret; + + /* + * for each drhd + * allocate root + * initialize and program root entry to not present + * endfor + */ + for_each_drhd_unit(drhd) { + g_num_of_iommus++; + /* + * lock not needed as this is only incremented in the single + * threaded kernel __init code path all other access are read + * only + */ + } + + g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), + GFP_KERNEL); + if (!g_iommus) { + printk(KERN_ERR "Allocating global iommu array failed\n"); + ret = -ENOMEM; + goto error; + } + + deferred_flush = kzalloc(g_num_of_iommus * + sizeof(struct deferred_flush_tables), GFP_KERNEL); + if (!deferred_flush) { + ret = -ENOMEM; + goto error; + } + + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + + iommu = drhd->iommu; + g_iommus[iommu->seq_id] = iommu; + + ret = iommu_init_domains(iommu); + if (ret) + goto error; + + /* + * TBD: + * we could share the same root & context tables + * among all IOMMU's. Need to Split it later. + */ + ret = iommu_alloc_root_entry(iommu); + if (ret) { + printk(KERN_ERR "IOMMU: allocate root entry failed\n"); + goto error; + } + if (!ecap_pass_through(iommu->ecap)) + hw_pass_through = 0; + } + + /* + * Start from the sane iommu hardware state. + */ + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + + iommu = drhd->iommu; + + /* + * If the queued invalidation is already initialized by us + * (for example, while enabling interrupt-remapping) then + * we got the things already rolling from a sane state. + */ + if (iommu->qi) + continue; + + /* + * Clear any previous faults. + */ + dmar_fault(-1, iommu); + /* + * Disable queued invalidation if supported and already enabled + * before OS handover. + */ + dmar_disable_qi(iommu); + } + + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + + iommu = drhd->iommu; + + if (dmar_enable_qi(iommu)) { + /* + * Queued Invalidate not enabled, use Register Based + * Invalidate + */ + iommu->flush.flush_context = __iommu_flush_context; + iommu->flush.flush_iotlb = __iommu_flush_iotlb; + printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " + "invalidation\n", + iommu->seq_id, + (unsigned long long)drhd->reg_base_addr); + } else { + iommu->flush.flush_context = qi_flush_context; + iommu->flush.flush_iotlb = qi_flush_iotlb; + printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " + "invalidation\n", + iommu->seq_id, + (unsigned long long)drhd->reg_base_addr); + } + } + + if (iommu_pass_through) + iommu_identity_mapping |= IDENTMAP_ALL; + +#ifdef CONFIG_DMAR_BROKEN_GFX_WA + iommu_identity_mapping |= IDENTMAP_GFX; +#endif + + check_tylersburg_isoch(); + + /* + * If pass through is not set or not enabled, setup context entries for + * identity mappings for rmrr, gfx, and isa and may fall back to static + * identity mapping if iommu_identity_mapping is set. + */ + if (iommu_identity_mapping) { + ret = iommu_prepare_static_identity_mapping(hw_pass_through); + if (ret) { + printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); + goto error; + } + } + /* + * For each rmrr + * for each dev attached to rmrr + * do + * locate drhd for dev, alloc domain for dev + * allocate free domain + * allocate page table entries for rmrr + * if context not allocated for bus + * allocate and init context + * set present in root table for this bus + * init context with domain, translation etc + * endfor + * endfor + */ + printk(KERN_INFO "IOMMU: Setting RMRR:\n"); + for_each_rmrr_units(rmrr) { + for (i = 0; i < rmrr->devices_cnt; i++) { + pdev = rmrr->devices[i]; + /* + * some BIOS lists non-exist devices in DMAR + * table. + */ + if (!pdev) + continue; + ret = iommu_prepare_rmrr_dev(rmrr, pdev); + if (ret) + printk(KERN_ERR + "IOMMU: mapping reserved region failed\n"); + } + } + + iommu_prepare_isa(); + + /* + * for each drhd + * enable fault log + * global invalidate context cache + * global invalidate iotlb + * enable translation + */ + for_each_drhd_unit(drhd) { + if (drhd->ignored) { + /* + * we always have to disable PMRs or DMA may fail on + * this device + */ + if (force_on) + iommu_disable_protect_mem_regions(drhd->iommu); + continue; + } + iommu = drhd->iommu; + + iommu_flush_write_buffer(iommu); + + ret = dmar_set_interrupt(iommu); + if (ret) + goto error; + + iommu_set_root_entry(iommu); + + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); + + ret = iommu_enable_translation(iommu); + if (ret) + goto error; + + iommu_disable_protect_mem_regions(iommu); + } + + return 0; +error: + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + iommu = drhd->iommu; + free_iommu(iommu); + } + kfree(g_iommus); + return ret; +} + +/* This takes a number of _MM_ pages, not VTD pages */ +static struct iova *intel_alloc_iova(struct device *dev, + struct dmar_domain *domain, + unsigned long nrpages, uint64_t dma_mask) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct iova *iova = NULL; + + /* Restrict dma_mask to the width that the iommu can handle */ + dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); + + if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { + /* + * First try to allocate an io virtual address in + * DMA_BIT_MASK(32) and if that fails then try allocating + * from higher range + */ + iova = alloc_iova(&domain->iovad, nrpages, + IOVA_PFN(DMA_BIT_MASK(32)), 1); + if (iova) + return iova; + } + iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); + if (unlikely(!iova)) { + printk(KERN_ERR "Allocating %ld-page iova for %s failed", + nrpages, pci_name(pdev)); + return NULL; + } + + return iova; +} + +static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) +{ + struct dmar_domain *domain; + int ret; + + domain = get_domain_for_dev(pdev, + DEFAULT_DOMAIN_ADDRESS_WIDTH); + if (!domain) { + printk(KERN_ERR + "Allocating domain for %s failed", pci_name(pdev)); + return NULL; + } + + /* make sure context mapping is ok */ + if (unlikely(!domain_context_mapped(pdev))) { + ret = domain_context_mapping(domain, pdev, + CONTEXT_TT_MULTI_LEVEL); + if (ret) { + printk(KERN_ERR + "Domain context map for %s failed", + pci_name(pdev)); + return NULL; + } + } + + return domain; +} + +static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev) +{ + struct device_domain_info *info; + + /* No lock here, assumes no domain exit in normal case */ + info = dev->dev.archdata.iommu; + if (likely(info)) + return info->domain; + + return __get_valid_domain_for_dev(dev); +} + +static int iommu_dummy(struct pci_dev *pdev) +{ + return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; +} + +/* Check if the pdev needs to go through non-identity map and unmap process.*/ +static int iommu_no_mapping(struct device *dev) +{ + struct pci_dev *pdev; + int found; + + if (unlikely(dev->bus != &pci_bus_type)) + return 1; + + pdev = to_pci_dev(dev); + if (iommu_dummy(pdev)) + return 1; + + if (!iommu_identity_mapping) + return 0; + + found = identity_mapping(pdev); + if (found) { + if (iommu_should_identity_map(pdev, 0)) + return 1; + else { + /* + * 32 bit DMA is removed from si_domain and fall back + * to non-identity mapping. + */ + domain_remove_one_dev_info(si_domain, pdev); + printk(KERN_INFO "32bit %s uses non-identity mapping\n", + pci_name(pdev)); + return 0; + } + } else { + /* + * In case of a detached 64 bit DMA device from vm, the device + * is put into si_domain for identity mapping. + */ + if (iommu_should_identity_map(pdev, 0)) { + int ret; + ret = domain_add_dev_info(si_domain, pdev, + hw_pass_through ? + CONTEXT_TT_PASS_THROUGH : + CONTEXT_TT_MULTI_LEVEL); + if (!ret) { + printk(KERN_INFO "64bit %s uses identity mapping\n", + pci_name(pdev)); + return 1; + } + } + } + + return 0; +} + +static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir, u64 dma_mask) +{ + struct pci_dev *pdev = to_pci_dev(hwdev); + struct dmar_domain *domain; + phys_addr_t start_paddr; + struct iova *iova; + int prot = 0; + int ret; + struct intel_iommu *iommu; + unsigned long paddr_pfn = paddr >> PAGE_SHIFT; + + BUG_ON(dir == DMA_NONE); + + if (iommu_no_mapping(hwdev)) + return paddr; + + domain = get_valid_domain_for_dev(pdev); + if (!domain) + return 0; + + iommu = domain_get_iommu(domain); + size = aligned_nrpages(paddr, size); + + iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask); + if (!iova) + goto error; + + /* + * Check if DMAR supports zero-length reads on write only + * mappings.. + */ + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ + !cap_zlr(iommu->cap)) + prot |= DMA_PTE_READ; + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + prot |= DMA_PTE_WRITE; + /* + * paddr - (paddr + size) might be partial page, we should map the whole + * page. Note: if two part of one page are separately mapped, we + * might have two guest_addr mapping to the same host paddr, but this + * is not a big problem + */ + ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), + mm_to_dma_pfn(paddr_pfn), size, prot); + if (ret) + goto error; + + /* it's a non-present to present mapping. Only flush if caching mode */ + if (cap_caching_mode(iommu->cap)) + iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1); + else + iommu_flush_write_buffer(iommu); + + start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; + start_paddr += paddr & ~PAGE_MASK; + return start_paddr; + +error: + if (iova) + __free_iova(&domain->iovad, iova); + printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", + pci_name(pdev), size, (unsigned long long)paddr, dir); + return 0; +} + +static dma_addr_t intel_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + return __intel_map_single(dev, page_to_phys(page) + offset, size, + dir, to_pci_dev(dev)->dma_mask); +} + +static void flush_unmaps(void) +{ + int i, j; + + timer_on = 0; + + /* just flush them all */ + for (i = 0; i < g_num_of_iommus; i++) { + struct intel_iommu *iommu = g_iommus[i]; + if (!iommu) + continue; + + if (!deferred_flush[i].next) + continue; + + /* In caching mode, global flushes turn emulation expensive */ + if (!cap_caching_mode(iommu->cap)) + iommu->flush.flush_iotlb(iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH); + for (j = 0; j < deferred_flush[i].next; j++) { + unsigned long mask; + struct iova *iova = deferred_flush[i].iova[j]; + struct dmar_domain *domain = deferred_flush[i].domain[j]; + + /* On real hardware multiple invalidations are expensive */ + if (cap_caching_mode(iommu->cap)) + iommu_flush_iotlb_psi(iommu, domain->id, + iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0); + else { + mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); + iommu_flush_dev_iotlb(deferred_flush[i].domain[j], + (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); + } + __free_iova(&deferred_flush[i].domain[j]->iovad, iova); + } + deferred_flush[i].next = 0; + } + + list_size = 0; +} + +static void flush_unmaps_timeout(unsigned long data) +{ + unsigned long flags; + + spin_lock_irqsave(&async_umap_flush_lock, flags); + flush_unmaps(); + spin_unlock_irqrestore(&async_umap_flush_lock, flags); +} + +static void add_unmap(struct dmar_domain *dom, struct iova *iova) +{ + unsigned long flags; + int next, iommu_id; + struct intel_iommu *iommu; + + spin_lock_irqsave(&async_umap_flush_lock, flags); + if (list_size == HIGH_WATER_MARK) + flush_unmaps(); + + iommu = domain_get_iommu(dom); + iommu_id = iommu->seq_id; + + next = deferred_flush[iommu_id].next; + deferred_flush[iommu_id].domain[next] = dom; + deferred_flush[iommu_id].iova[next] = iova; + deferred_flush[iommu_id].next++; + + if (!timer_on) { + mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10)); + timer_on = 1; + } + list_size++; + spin_unlock_irqrestore(&async_umap_flush_lock, flags); +} + +static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct dmar_domain *domain; + unsigned long start_pfn, last_pfn; + struct iova *iova; + struct intel_iommu *iommu; + + if (iommu_no_mapping(dev)) + return; + + domain = find_domain(pdev); + BUG_ON(!domain); + + iommu = domain_get_iommu(domain); + + iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); + if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n", + (unsigned long long)dev_addr)) + return; + + start_pfn = mm_to_dma_pfn(iova->pfn_lo); + last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; + + pr_debug("Device %s unmapping: pfn %lx-%lx\n", + pci_name(pdev), start_pfn, last_pfn); + + /* clear the whole page */ + dma_pte_clear_range(domain, start_pfn, last_pfn); + + /* free page tables */ + dma_pte_free_pagetable(domain, start_pfn, last_pfn); + + if (intel_iommu_strict) { + iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, + last_pfn - start_pfn + 1, 0); + /* free iova */ + __free_iova(&domain->iovad, iova); + } else { + add_unmap(domain, iova); + /* + * queue up the release of the unmap to save the 1/6th of the + * cpu used up by the iotlb flush operation... + */ + } +} + +static void *intel_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + void *vaddr; + int order; + + size = PAGE_ALIGN(size); + order = get_order(size); + + if (!iommu_no_mapping(hwdev)) + flags &= ~(GFP_DMA | GFP_DMA32); + else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) { + if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32)) + flags |= GFP_DMA; + else + flags |= GFP_DMA32; + } + + vaddr = (void *)__get_free_pages(flags, order); + if (!vaddr) + return NULL; + memset(vaddr, 0, size); + + *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, + DMA_BIDIRECTIONAL, + hwdev->coherent_dma_mask); + if (*dma_handle) + return vaddr; + free_pages((unsigned long)vaddr, order); + return NULL; +} + +static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) +{ + int order; + + size = PAGE_ALIGN(size); + order = get_order(size); + + intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); + free_pages((unsigned long)vaddr, order); +} + +static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, + int nelems, enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + struct pci_dev *pdev = to_pci_dev(hwdev); + struct dmar_domain *domain; + unsigned long start_pfn, last_pfn; + struct iova *iova; + struct intel_iommu *iommu; + + if (iommu_no_mapping(hwdev)) + return; + + domain = find_domain(pdev); + BUG_ON(!domain); + + iommu = domain_get_iommu(domain); + + iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); + if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n", + (unsigned long long)sglist[0].dma_address)) + return; + + start_pfn = mm_to_dma_pfn(iova->pfn_lo); + last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; + + /* clear the whole page */ + dma_pte_clear_range(domain, start_pfn, last_pfn); + + /* free page tables */ + dma_pte_free_pagetable(domain, start_pfn, last_pfn); + + if (intel_iommu_strict) { + iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, + last_pfn - start_pfn + 1, 0); + /* free iova */ + __free_iova(&domain->iovad, iova); + } else { + add_unmap(domain, iova); + /* + * queue up the release of the unmap to save the 1/6th of the + * cpu used up by the iotlb flush operation... + */ + } +} + +static int intel_nontranslate_map_sg(struct device *hddev, + struct scatterlist *sglist, int nelems, int dir) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sglist, sg, nelems, i) { + BUG_ON(!sg_page(sg)); + sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; + sg->dma_length = sg->length; + } + return nelems; +} + +static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + int i; + struct pci_dev *pdev = to_pci_dev(hwdev); + struct dmar_domain *domain; + size_t size = 0; + int prot = 0; + struct iova *iova = NULL; + int ret; + struct scatterlist *sg; + unsigned long start_vpfn; + struct intel_iommu *iommu; + + BUG_ON(dir == DMA_NONE); + if (iommu_no_mapping(hwdev)) + return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); + + domain = get_valid_domain_for_dev(pdev); + if (!domain) + return 0; + + iommu = domain_get_iommu(domain); + + for_each_sg(sglist, sg, nelems, i) + size += aligned_nrpages(sg->offset, sg->length); + + iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), + pdev->dma_mask); + if (!iova) { + sglist->dma_length = 0; + return 0; + } + + /* + * Check if DMAR supports zero-length reads on write only + * mappings.. + */ + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ + !cap_zlr(iommu->cap)) + prot |= DMA_PTE_READ; + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + prot |= DMA_PTE_WRITE; + + start_vpfn = mm_to_dma_pfn(iova->pfn_lo); + + ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); + if (unlikely(ret)) { + /* clear the page */ + dma_pte_clear_range(domain, start_vpfn, + start_vpfn + size - 1); + /* free page tables */ + dma_pte_free_pagetable(domain, start_vpfn, + start_vpfn + size - 1); + /* free iova */ + __free_iova(&domain->iovad, iova); + return 0; + } + + /* it's a non-present to present mapping. Only flush if caching mode */ + if (cap_caching_mode(iommu->cap)) + iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1); + else + iommu_flush_write_buffer(iommu); + + return nelems; +} + +static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return !dma_addr; +} + +struct dma_map_ops intel_dma_ops = { + .alloc_coherent = intel_alloc_coherent, + .free_coherent = intel_free_coherent, + .map_sg = intel_map_sg, + .unmap_sg = intel_unmap_sg, + .map_page = intel_map_page, + .unmap_page = intel_unmap_page, + .mapping_error = intel_mapping_error, +}; + +static inline int iommu_domain_cache_init(void) +{ + int ret = 0; + + iommu_domain_cache = kmem_cache_create("iommu_domain", + sizeof(struct dmar_domain), + 0, + SLAB_HWCACHE_ALIGN, + + NULL); + if (!iommu_domain_cache) { + printk(KERN_ERR "Couldn't create iommu_domain cache\n"); + ret = -ENOMEM; + } + + return ret; +} + +static inline int iommu_devinfo_cache_init(void) +{ + int ret = 0; + + iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", + sizeof(struct device_domain_info), + 0, + SLAB_HWCACHE_ALIGN, + NULL); + if (!iommu_devinfo_cache) { + printk(KERN_ERR "Couldn't create devinfo cache\n"); + ret = -ENOMEM; + } + + return ret; +} + +static inline int iommu_iova_cache_init(void) +{ + int ret = 0; + + iommu_iova_cache = kmem_cache_create("iommu_iova", + sizeof(struct iova), + 0, + SLAB_HWCACHE_ALIGN, + NULL); + if (!iommu_iova_cache) { + printk(KERN_ERR "Couldn't create iova cache\n"); + ret = -ENOMEM; + } + + return ret; +} + +static int __init iommu_init_mempool(void) +{ + int ret; + ret = iommu_iova_cache_init(); + if (ret) + return ret; + + ret = iommu_domain_cache_init(); + if (ret) + goto domain_error; + + ret = iommu_devinfo_cache_init(); + if (!ret) + return ret; + + kmem_cache_destroy(iommu_domain_cache); +domain_error: + kmem_cache_destroy(iommu_iova_cache); + + return -ENOMEM; +} + +static void __init iommu_exit_mempool(void) +{ + kmem_cache_destroy(iommu_devinfo_cache); + kmem_cache_destroy(iommu_domain_cache); + kmem_cache_destroy(iommu_iova_cache); + +} + +static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) +{ + struct dmar_drhd_unit *drhd; + u32 vtbar; + int rc; + + /* We know that this device on this chipset has its own IOMMU. + * If we find it under a different IOMMU, then the BIOS is lying + * to us. Hope that the IOMMU for this device is actually + * disabled, and it needs no translation... + */ + rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); + if (rc) { + /* "can't" happen */ + dev_info(&pdev->dev, "failed to run vt-d quirk\n"); + return; + } + vtbar &= 0xffff0000; + + /* we know that the this iommu should be at offset 0xa000 from vtbar */ + drhd = dmar_find_matched_drhd_unit(pdev); + if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000, + TAINT_FIRMWARE_WORKAROUND, + "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n")) + pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; +} +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); + +static void __init init_no_remapping_devices(void) +{ + struct dmar_drhd_unit *drhd; + + for_each_drhd_unit(drhd) { + if (!drhd->include_all) { + int i; + for (i = 0; i < drhd->devices_cnt; i++) + if (drhd->devices[i] != NULL) + break; + /* ignore DMAR unit if no pci devices exist */ + if (i == drhd->devices_cnt) + drhd->ignored = 1; + } + } + + if (dmar_map_gfx) + return; + + for_each_drhd_unit(drhd) { + int i; + if (drhd->ignored || drhd->include_all) + continue; + + for (i = 0; i < drhd->devices_cnt; i++) + if (drhd->devices[i] && + !IS_GFX_DEVICE(drhd->devices[i])) + break; + + if (i < drhd->devices_cnt) + continue; + + /* bypass IOMMU if it is just for gfx devices */ + drhd->ignored = 1; + for (i = 0; i < drhd->devices_cnt; i++) { + if (!drhd->devices[i]) + continue; + drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; + } + } +} + +#ifdef CONFIG_SUSPEND +static int init_iommu_hw(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu = NULL; + + for_each_active_iommu(iommu, drhd) + if (iommu->qi) + dmar_reenable_qi(iommu); + + for_each_iommu(iommu, drhd) { + if (drhd->ignored) { + /* + * we always have to disable PMRs or DMA may fail on + * this device + */ + if (force_on) + iommu_disable_protect_mem_regions(iommu); + continue; + } + + iommu_flush_write_buffer(iommu); + + iommu_set_root_entry(iommu); + + iommu->flush.flush_context(iommu, 0, 0, 0, + DMA_CCMD_GLOBAL_INVL); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH); + if (iommu_enable_translation(iommu)) + return 1; + iommu_disable_protect_mem_regions(iommu); + } + + return 0; +} + +static void iommu_flush_all(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + + for_each_active_iommu(iommu, drhd) { + iommu->flush.flush_context(iommu, 0, 0, 0, + DMA_CCMD_GLOBAL_INVL); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH); + } +} + +static int iommu_suspend(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu = NULL; + unsigned long flag; + + for_each_active_iommu(iommu, drhd) { + iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, + GFP_ATOMIC); + if (!iommu->iommu_state) + goto nomem; + } + + iommu_flush_all(); + + for_each_active_iommu(iommu, drhd) { + iommu_disable_translation(iommu); + + spin_lock_irqsave(&iommu->register_lock, flag); + + iommu->iommu_state[SR_DMAR_FECTL_REG] = + readl(iommu->reg + DMAR_FECTL_REG); + iommu->iommu_state[SR_DMAR_FEDATA_REG] = + readl(iommu->reg + DMAR_FEDATA_REG); + iommu->iommu_state[SR_DMAR_FEADDR_REG] = + readl(iommu->reg + DMAR_FEADDR_REG); + iommu->iommu_state[SR_DMAR_FEUADDR_REG] = + readl(iommu->reg + DMAR_FEUADDR_REG); + + spin_unlock_irqrestore(&iommu->register_lock, flag); + } + return 0; + +nomem: + for_each_active_iommu(iommu, drhd) + kfree(iommu->iommu_state); + + return -ENOMEM; +} + +static void iommu_resume(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu = NULL; + unsigned long flag; + + if (init_iommu_hw()) { + if (force_on) + panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); + else + WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); + return; + } + + for_each_active_iommu(iommu, drhd) { + + spin_lock_irqsave(&iommu->register_lock, flag); + + writel(iommu->iommu_state[SR_DMAR_FECTL_REG], + iommu->reg + DMAR_FECTL_REG); + writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], + iommu->reg + DMAR_FEDATA_REG); + writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], + iommu->reg + DMAR_FEADDR_REG); + writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], + iommu->reg + DMAR_FEUADDR_REG); + + spin_unlock_irqrestore(&iommu->register_lock, flag); + } + + for_each_active_iommu(iommu, drhd) + kfree(iommu->iommu_state); +} + +static struct syscore_ops iommu_syscore_ops = { + .resume = iommu_resume, + .suspend = iommu_suspend, +}; + +static void __init init_iommu_pm_ops(void) +{ + register_syscore_ops(&iommu_syscore_ops); +} + +#else +static inline void init_iommu_pm_ops(void) {} +#endif /* CONFIG_PM */ + +/* + * Here we only respond to action of unbound device from driver. + * + * Added device is not attached to its DMAR domain here yet. That will happen + * when mapping the device to iova. + */ +static int device_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + struct dmar_domain *domain; + + if (iommu_no_mapping(dev)) + return 0; + + domain = find_domain(pdev); + if (!domain) + return 0; + + if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { + domain_remove_one_dev_info(domain, pdev); + + if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && + list_empty(&domain->devices)) + domain_exit(domain); + } + + return 0; +} + +static struct notifier_block device_nb = { + .notifier_call = device_notifier, +}; + +int __init intel_iommu_init(void) +{ + int ret = 0; + + /* VT-d is required for a TXT/tboot launch, so enforce that */ + force_on = tboot_force_iommu(); + + if (dmar_table_init()) { + if (force_on) + panic("tboot: Failed to initialize DMAR table\n"); + return -ENODEV; + } + + if (dmar_dev_scope_init()) { + if (force_on) + panic("tboot: Failed to initialize DMAR device scope\n"); + return -ENODEV; + } + + /* + * Check the need for DMA-remapping initialization now. + * Above initialization will also be used by Interrupt-remapping. + */ + if (no_iommu || dmar_disabled) + return -ENODEV; + + if (iommu_init_mempool()) { + if (force_on) + panic("tboot: Failed to initialize iommu memory\n"); + return -ENODEV; + } + + if (dmar_init_reserved_ranges()) { + if (force_on) + panic("tboot: Failed to reserve iommu ranges\n"); + return -ENODEV; + } + + init_no_remapping_devices(); + + ret = init_dmars(); + if (ret) { + if (force_on) + panic("tboot: Failed to initialize DMARs\n"); + printk(KERN_ERR "IOMMU: dmar init failed\n"); + put_iova_domain(&reserved_iova_list); + iommu_exit_mempool(); + return ret; + } + printk(KERN_INFO + "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); + + init_timer(&unmap_timer); +#ifdef CONFIG_SWIOTLB + swiotlb = 0; +#endif + dma_ops = &intel_dma_ops; + + init_iommu_pm_ops(); + + register_iommu(&intel_iommu_ops); + + bus_register_notifier(&pci_bus_type, &device_nb); + + return 0; +} + +static void iommu_detach_dependent_devices(struct intel_iommu *iommu, + struct pci_dev *pdev) +{ + struct pci_dev *tmp, *parent; + + if (!iommu || !pdev) + return; + + /* dependent device detach */ + tmp = pci_find_upstream_pcie_bridge(pdev); + /* Secondary interface's bus number and devfn 0 */ + if (tmp) { + parent = pdev->bus->self; + while (parent != tmp) { + iommu_detach_dev(iommu, parent->bus->number, + parent->devfn); + parent = parent->bus->self; + } + if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ + iommu_detach_dev(iommu, + tmp->subordinate->number, 0); + else /* this is a legacy PCI bridge */ + iommu_detach_dev(iommu, tmp->bus->number, + tmp->devfn); + } +} + +static void domain_remove_one_dev_info(struct dmar_domain *domain, + struct pci_dev *pdev) +{ + struct device_domain_info *info; + struct intel_iommu *iommu; + unsigned long flags; + int found = 0; + struct list_head *entry, *tmp; + + iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, + pdev->devfn); + if (!iommu) + return; + + spin_lock_irqsave(&device_domain_lock, flags); + list_for_each_safe(entry, tmp, &domain->devices) { + info = list_entry(entry, struct device_domain_info, link); + if (info->segment == pci_domain_nr(pdev->bus) && + info->bus == pdev->bus->number && + info->devfn == pdev->devfn) { + list_del(&info->link); + list_del(&info->global); + if (info->dev) + info->dev->dev.archdata.iommu = NULL; + spin_unlock_irqrestore(&device_domain_lock, flags); + + iommu_disable_dev_iotlb(info); + iommu_detach_dev(iommu, info->bus, info->devfn); + iommu_detach_dependent_devices(iommu, pdev); + free_devinfo_mem(info); + + spin_lock_irqsave(&device_domain_lock, flags); + + if (found) + break; + else + continue; + } + + /* if there is no other devices under the same iommu + * owned by this domain, clear this iommu in iommu_bmp + * update iommu count and coherency + */ + if (iommu == device_to_iommu(info->segment, info->bus, + info->devfn)) + found = 1; + } + + if (found == 0) { + unsigned long tmp_flags; + spin_lock_irqsave(&domain->iommu_lock, tmp_flags); + clear_bit(iommu->seq_id, &domain->iommu_bmp); + domain->iommu_count--; + domain_update_iommu_cap(domain); + spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); + + if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) { + spin_lock_irqsave(&iommu->lock, tmp_flags); + clear_bit(domain->id, iommu->domain_ids); + iommu->domains[domain->id] = NULL; + spin_unlock_irqrestore(&iommu->lock, tmp_flags); + } + } + + spin_unlock_irqrestore(&device_domain_lock, flags); +} + +static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) +{ + struct device_domain_info *info; + struct intel_iommu *iommu; + unsigned long flags1, flags2; + + spin_lock_irqsave(&device_domain_lock, flags1); + while (!list_empty(&domain->devices)) { + info = list_entry(domain->devices.next, + struct device_domain_info, link); + list_del(&info->link); + list_del(&info->global); + if (info->dev) + info->dev->dev.archdata.iommu = NULL; + + spin_unlock_irqrestore(&device_domain_lock, flags1); + + iommu_disable_dev_iotlb(info); + iommu = device_to_iommu(info->segment, info->bus, info->devfn); + iommu_detach_dev(iommu, info->bus, info->devfn); + iommu_detach_dependent_devices(iommu, info->dev); + + /* clear this iommu in iommu_bmp, update iommu count + * and capabilities + */ + spin_lock_irqsave(&domain->iommu_lock, flags2); + if (test_and_clear_bit(iommu->seq_id, + &domain->iommu_bmp)) { + domain->iommu_count--; + domain_update_iommu_cap(domain); + } + spin_unlock_irqrestore(&domain->iommu_lock, flags2); + + free_devinfo_mem(info); + spin_lock_irqsave(&device_domain_lock, flags1); + } + spin_unlock_irqrestore(&device_domain_lock, flags1); +} + +/* domain id for virtual machine, it won't be set in context */ +static unsigned long vm_domid; + +static struct dmar_domain *iommu_alloc_vm_domain(void) +{ + struct dmar_domain *domain; + + domain = alloc_domain_mem(); + if (!domain) + return NULL; + + domain->id = vm_domid++; + domain->nid = -1; + memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); + domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; + + return domain; +} + +static int md_domain_init(struct dmar_domain *domain, int guest_width) +{ + int adjust_width; + + init_iova_domain(&domain->iovad, DMA_32BIT_PFN); + spin_lock_init(&domain->iommu_lock); + + domain_reserve_special_ranges(domain); + + /* calculate AGAW */ + domain->gaw = guest_width; + adjust_width = guestwidth_to_adjustwidth(guest_width); + domain->agaw = width_to_agaw(adjust_width); + + INIT_LIST_HEAD(&domain->devices); + + domain->iommu_count = 0; + domain->iommu_coherency = 0; + domain->iommu_snooping = 0; + domain->iommu_superpage = 0; + domain->max_addr = 0; + domain->nid = -1; + + /* always allocate the top pgd */ + domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); + if (!domain->pgd) + return -ENOMEM; + domain_flush_cache(domain, domain->pgd, PAGE_SIZE); + return 0; +} + +static void iommu_free_vm_domain(struct dmar_domain *domain) +{ + unsigned long flags; + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + unsigned long i; + unsigned long ndomains; + + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + iommu = drhd->iommu; + + ndomains = cap_ndoms(iommu->cap); + for_each_set_bit(i, iommu->domain_ids, ndomains) { + if (iommu->domains[i] == domain) { + spin_lock_irqsave(&iommu->lock, flags); + clear_bit(i, iommu->domain_ids); + iommu->domains[i] = NULL; + spin_unlock_irqrestore(&iommu->lock, flags); + break; + } + } + } +} + +static void vm_domain_exit(struct dmar_domain *domain) +{ + /* Domain 0 is reserved, so dont process it */ + if (!domain) + return; + + vm_domain_remove_all_dev_info(domain); + /* destroy iovas */ + put_iova_domain(&domain->iovad); + + /* clear ptes */ + dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + + /* free page tables */ + dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + + iommu_free_vm_domain(domain); + free_domain_mem(domain); +} + +static int intel_iommu_domain_init(struct iommu_domain *domain) +{ + struct dmar_domain *dmar_domain; + + dmar_domain = iommu_alloc_vm_domain(); + if (!dmar_domain) { + printk(KERN_ERR + "intel_iommu_domain_init: dmar_domain == NULL\n"); + return -ENOMEM; + } + if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + printk(KERN_ERR + "intel_iommu_domain_init() failed\n"); + vm_domain_exit(dmar_domain); + return -ENOMEM; + } + domain->priv = dmar_domain; + + return 0; +} + +static void intel_iommu_domain_destroy(struct iommu_domain *domain) +{ + struct dmar_domain *dmar_domain = domain->priv; + + domain->priv = NULL; + vm_domain_exit(dmar_domain); +} + +static int intel_iommu_attach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct dmar_domain *dmar_domain = domain->priv; + struct pci_dev *pdev = to_pci_dev(dev); + struct intel_iommu *iommu; + int addr_width; + + /* normally pdev is not mapped */ + if (unlikely(domain_context_mapped(pdev))) { + struct dmar_domain *old_domain; + + old_domain = find_domain(pdev); + if (old_domain) { + if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || + dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) + domain_remove_one_dev_info(old_domain, pdev); + else + domain_remove_dev_info(old_domain); + } + } + + iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, + pdev->devfn); + if (!iommu) + return -ENODEV; + + /* check if this iommu agaw is sufficient for max mapped address */ + addr_width = agaw_to_width(iommu->agaw); + if (addr_width > cap_mgaw(iommu->cap)) + addr_width = cap_mgaw(iommu->cap); + + if (dmar_domain->max_addr > (1LL << addr_width)) { + printk(KERN_ERR "%s: iommu width (%d) is not " + "sufficient for the mapped address (%llx)\n", + __func__, addr_width, dmar_domain->max_addr); + return -EFAULT; + } + dmar_domain->gaw = addr_width; + + /* + * Knock out extra levels of page tables if necessary + */ + while (iommu->agaw < dmar_domain->agaw) { + struct dma_pte *pte; + + pte = dmar_domain->pgd; + if (dma_pte_present(pte)) { + dmar_domain->pgd = (struct dma_pte *) + phys_to_virt(dma_pte_addr(pte)); + free_pgtable_page(pte); + } + dmar_domain->agaw--; + } + + return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); +} + +static void intel_iommu_detach_device(struct iommu_domain *domain, + struct device *dev) +{ + struct dmar_domain *dmar_domain = domain->priv; + struct pci_dev *pdev = to_pci_dev(dev); + + domain_remove_one_dev_info(dmar_domain, pdev); +} + +static int intel_iommu_map(struct iommu_domain *domain, + unsigned long iova, phys_addr_t hpa, + int gfp_order, int iommu_prot) +{ + struct dmar_domain *dmar_domain = domain->priv; + u64 max_addr; + int prot = 0; + size_t size; + int ret; + + if (iommu_prot & IOMMU_READ) + prot |= DMA_PTE_READ; + if (iommu_prot & IOMMU_WRITE) + prot |= DMA_PTE_WRITE; + if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) + prot |= DMA_PTE_SNP; + + size = PAGE_SIZE << gfp_order; + max_addr = iova + size; + if (dmar_domain->max_addr < max_addr) { + u64 end; + + /* check if minimum agaw is sufficient for mapped address */ + end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; + if (end < max_addr) { + printk(KERN_ERR "%s: iommu width (%d) is not " + "sufficient for the mapped address (%llx)\n", + __func__, dmar_domain->gaw, max_addr); + return -EFAULT; + } + dmar_domain->max_addr = max_addr; + } + /* Round up size to next multiple of PAGE_SIZE, if it and + the low bits of hpa would take us onto the next page */ + size = aligned_nrpages(hpa, size); + ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, + hpa >> VTD_PAGE_SHIFT, size, prot); + return ret; +} + +static int intel_iommu_unmap(struct iommu_domain *domain, + unsigned long iova, int gfp_order) +{ + struct dmar_domain *dmar_domain = domain->priv; + size_t size = PAGE_SIZE << gfp_order; + + dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, + (iova + size - 1) >> VTD_PAGE_SHIFT); + + if (dmar_domain->max_addr == iova + size) + dmar_domain->max_addr = iova; + + return gfp_order; +} + +static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, + unsigned long iova) +{ + struct dmar_domain *dmar_domain = domain->priv; + struct dma_pte *pte; + u64 phys = 0; + + pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0); + if (pte) + phys = dma_pte_addr(pte); + + return phys; +} + +static int intel_iommu_domain_has_cap(struct iommu_domain *domain, + unsigned long cap) +{ + struct dmar_domain *dmar_domain = domain->priv; + + if (cap == IOMMU_CAP_CACHE_COHERENCY) + return dmar_domain->iommu_snooping; + if (cap == IOMMU_CAP_INTR_REMAP) + return intr_remapping_enabled; + + return 0; +} + +static struct iommu_ops intel_iommu_ops = { + .domain_init = intel_iommu_domain_init, + .domain_destroy = intel_iommu_domain_destroy, + .attach_dev = intel_iommu_attach_device, + .detach_dev = intel_iommu_detach_device, + .map = intel_iommu_map, + .unmap = intel_iommu_unmap, + .iova_to_phys = intel_iommu_iova_to_phys, + .domain_has_cap = intel_iommu_domain_has_cap, +}; + +static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) +{ + /* + * Mobile 4 Series Chipset neglects to set RWBF capability, + * but needs it: + */ + printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); + rwbf_quirk = 1; + + /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */ + if (dev->revision == 0x07) { + printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); + dmar_map_gfx = 0; + } +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); + +#define GGC 0x52 +#define GGC_MEMORY_SIZE_MASK (0xf << 8) +#define GGC_MEMORY_SIZE_NONE (0x0 << 8) +#define GGC_MEMORY_SIZE_1M (0x1 << 8) +#define GGC_MEMORY_SIZE_2M (0x3 << 8) +#define GGC_MEMORY_VT_ENABLED (0x8 << 8) +#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8) +#define GGC_MEMORY_SIZE_3M_VT (0xa << 8) +#define GGC_MEMORY_SIZE_4M_VT (0xb << 8) + +static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) +{ + unsigned short ggc; + + if (pci_read_config_word(dev, GGC, &ggc)) + return; + + if (!(ggc & GGC_MEMORY_VT_ENABLED)) { + printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); + dmar_map_gfx = 0; + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); + +/* On Tylersburg chipsets, some BIOSes have been known to enable the + ISOCH DMAR unit for the Azalia sound device, but not give it any + TLB entries, which causes it to deadlock. Check for that. We do + this in a function called from init_dmars(), instead of in a PCI + quirk, because we don't want to print the obnoxious "BIOS broken" + message if VT-d is actually disabled. +*/ +static void __init check_tylersburg_isoch(void) +{ + struct pci_dev *pdev; + uint32_t vtisochctrl; + + /* If there's no Azalia in the system anyway, forget it. */ + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); + if (!pdev) + return; + pci_dev_put(pdev); + + /* System Management Registers. Might be hidden, in which case + we can't do the sanity check. But that's OK, because the + known-broken BIOSes _don't_ actually hide it, so far. */ + pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL); + if (!pdev) + return; + + if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { + pci_dev_put(pdev); + return; + } + + pci_dev_put(pdev); + + /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ + if (vtisochctrl & 1) + return; + + /* Drop all bits other than the number of TLB entries */ + vtisochctrl &= 0x1c; + + /* If we have the recommended number of TLB entries (16), fine. */ + if (vtisochctrl == 0x10) + return; + + /* Zero TLB entries? You get to ride the short bus to school. */ + if (!vtisochctrl) { + WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + iommu_identity_mapping |= IDENTMAP_AZALIA; + return; + } + + printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n", + vtisochctrl); +} diff --git a/drivers/iommu/intr_remapping.c b/drivers/iommu/intr_remapping.c new file mode 100644 index 000000000000..1a89d4a2cadf --- /dev/null +++ b/drivers/iommu/intr_remapping.c @@ -0,0 +1,797 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "intr_remapping.h" +#include +#include + +static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; +static struct hpet_scope ir_hpet[MAX_HPET_TBS]; +static int ir_ioapic_num, ir_hpet_num; +int intr_remapping_enabled; + +static int disable_intremap; +static int disable_sourceid_checking; + +static __init int setup_nointremap(char *str) +{ + disable_intremap = 1; + return 0; +} +early_param("nointremap", setup_nointremap); + +static __init int setup_intremap(char *str) +{ + if (!str) + return -EINVAL; + + if (!strncmp(str, "on", 2)) + disable_intremap = 0; + else if (!strncmp(str, "off", 3)) + disable_intremap = 1; + else if (!strncmp(str, "nosid", 5)) + disable_sourceid_checking = 1; + + return 0; +} +early_param("intremap", setup_intremap); + +static DEFINE_SPINLOCK(irq_2_ir_lock); + +static struct irq_2_iommu *irq_2_iommu(unsigned int irq) +{ + struct irq_cfg *cfg = irq_get_chip_data(irq); + return cfg ? &cfg->irq_2_iommu : NULL; +} + +int get_irte(int irq, struct irte *entry) +{ + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); + unsigned long flags; + int index; + + if (!entry || !irq_iommu) + return -1; + + spin_lock_irqsave(&irq_2_ir_lock, flags); + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + *entry = *(irq_iommu->iommu->ir_table->base + index); + + spin_unlock_irqrestore(&irq_2_ir_lock, flags); + return 0; +} + +int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) +{ + struct ir_table *table = iommu->ir_table; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); + u16 index, start_index; + unsigned int mask = 0; + unsigned long flags; + int i; + + if (!count || !irq_iommu) + return -1; + + /* + * start the IRTE search from index 0. + */ + index = start_index = 0; + + if (count > 1) { + count = __roundup_pow_of_two(count); + mask = ilog2(count); + } + + if (mask > ecap_max_handle_mask(iommu->ecap)) { + printk(KERN_ERR + "Requested mask %x exceeds the max invalidation handle" + " mask value %Lx\n", mask, + ecap_max_handle_mask(iommu->ecap)); + return -1; + } + + spin_lock_irqsave(&irq_2_ir_lock, flags); + do { + for (i = index; i < index + count; i++) + if (table->base[i].present) + break; + /* empty index found */ + if (i == index + count) + break; + + index = (index + count) % INTR_REMAP_TABLE_ENTRIES; + + if (index == start_index) { + spin_unlock_irqrestore(&irq_2_ir_lock, flags); + printk(KERN_ERR "can't allocate an IRTE\n"); + return -1; + } + } while (1); + + for (i = index; i < index + count; i++) + table->base[i].present = 1; + + irq_iommu->iommu = iommu; + irq_iommu->irte_index = index; + irq_iommu->sub_handle = 0; + irq_iommu->irte_mask = mask; + + spin_unlock_irqrestore(&irq_2_ir_lock, flags); + + return index; +} + +static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) +{ + struct qi_desc desc; + + desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) + | QI_IEC_SELECTIVE; + desc.high = 0; + + return qi_submit_sync(&desc, iommu); +} + +int map_irq_to_irte_handle(int irq, u16 *sub_handle) +{ + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); + unsigned long flags; + int index; + + if (!irq_iommu) + return -1; + + spin_lock_irqsave(&irq_2_ir_lock, flags); + *sub_handle = irq_iommu->sub_handle; + index = irq_iommu->irte_index; + spin_unlock_irqrestore(&irq_2_ir_lock, flags); + return index; +} + +int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) +{ + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); + unsigned long flags; + + if (!irq_iommu) + return -1; + + spin_lock_irqsave(&irq_2_ir_lock, flags); + + irq_iommu->iommu = iommu; + irq_iommu->irte_index = index; + irq_iommu->sub_handle = subhandle; + irq_iommu->irte_mask = 0; + + spin_unlock_irqrestore(&irq_2_ir_lock, flags); + + return 0; +} + +int modify_irte(int irq, struct irte *irte_modified) +{ + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); + struct intel_iommu *iommu; + unsigned long flags; + struct irte *irte; + int rc, index; + + if (!irq_iommu) + return -1; + + spin_lock_irqsave(&irq_2_ir_lock, flags); + + iommu = irq_iommu->iommu; + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + irte = &iommu->ir_table->base[index]; + + set_64bit(&irte->low, irte_modified->low); + set_64bit(&irte->high, irte_modified->high); + __iommu_flush_cache(iommu, irte, sizeof(*irte)); + + rc = qi_flush_iec(iommu, index, 0); + spin_unlock_irqrestore(&irq_2_ir_lock, flags); + + return rc; +} + +struct intel_iommu *map_hpet_to_ir(u8 hpet_id) +{ + int i; + + for (i = 0; i < MAX_HPET_TBS; i++) + if (ir_hpet[i].id == hpet_id) + return ir_hpet[i].iommu; + return NULL; +} + +struct intel_iommu *map_ioapic_to_ir(int apic) +{ + int i; + + for (i = 0; i < MAX_IO_APICS; i++) + if (ir_ioapic[i].id == apic) + return ir_ioapic[i].iommu; + return NULL; +} + +struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) +{ + struct dmar_drhd_unit *drhd; + + drhd = dmar_find_matched_drhd_unit(dev); + if (!drhd) + return NULL; + + return drhd->iommu; +} + +static int clear_entries(struct irq_2_iommu *irq_iommu) +{ + struct irte *start, *entry, *end; + struct intel_iommu *iommu; + int index; + + if (irq_iommu->sub_handle) + return 0; + + iommu = irq_iommu->iommu; + index = irq_iommu->irte_index + irq_iommu->sub_handle; + + start = iommu->ir_table->base + index; + end = start + (1 << irq_iommu->irte_mask); + + for (entry = start; entry < end; entry++) { + set_64bit(&entry->low, 0); + set_64bit(&entry->high, 0); + } + + return qi_flush_iec(iommu, index, irq_iommu->irte_mask); +} + +int free_irte(int irq) +{ + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); + unsigned long flags; + int rc; + + if (!irq_iommu) + return -1; + + spin_lock_irqsave(&irq_2_ir_lock, flags); + + rc = clear_entries(irq_iommu); + + irq_iommu->iommu = NULL; + irq_iommu->irte_index = 0; + irq_iommu->sub_handle = 0; + irq_iommu->irte_mask = 0; + + spin_unlock_irqrestore(&irq_2_ir_lock, flags); + + return rc; +} + +/* + * source validation type + */ +#define SVT_NO_VERIFY 0x0 /* no verification is required */ +#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */ +#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ + +/* + * source-id qualifier + */ +#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ +#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore + * the third least significant bit + */ +#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore + * the second and third least significant bits + */ +#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore + * the least three significant bits + */ + +/* + * set SVT, SQ and SID fields of irte to verify + * source ids of interrupt requests + */ +static void set_irte_sid(struct irte *irte, unsigned int svt, + unsigned int sq, unsigned int sid) +{ + if (disable_sourceid_checking) + svt = SVT_NO_VERIFY; + irte->svt = svt; + irte->sq = sq; + irte->sid = sid; +} + +int set_ioapic_sid(struct irte *irte, int apic) +{ + int i; + u16 sid = 0; + + if (!irte) + return -1; + + for (i = 0; i < MAX_IO_APICS; i++) { + if (ir_ioapic[i].id == apic) { + sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; + break; + } + } + + if (sid == 0) { + pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); + return -1; + } + + set_irte_sid(irte, 1, 0, sid); + + return 0; +} + +int set_hpet_sid(struct irte *irte, u8 id) +{ + int i; + u16 sid = 0; + + if (!irte) + return -1; + + for (i = 0; i < MAX_HPET_TBS; i++) { + if (ir_hpet[i].id == id) { + sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; + break; + } + } + + if (sid == 0) { + pr_warning("Failed to set source-id of HPET block (%d)\n", id); + return -1; + } + + /* + * Should really use SQ_ALL_16. Some platforms are broken. + * While we figure out the right quirks for these broken platforms, use + * SQ_13_IGNORE_3 for now. + */ + set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); + + return 0; +} + +int set_msi_sid(struct irte *irte, struct pci_dev *dev) +{ + struct pci_dev *bridge; + + if (!irte || !dev) + return -1; + + /* PCIe device or Root Complex integrated PCI device */ + if (pci_is_pcie(dev) || !dev->bus->parent) { + set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, + (dev->bus->number << 8) | dev->devfn); + return 0; + } + + bridge = pci_find_upstream_pcie_bridge(dev); + if (bridge) { + if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ + set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, + (bridge->bus->number << 8) | dev->bus->number); + else /* this is a legacy PCI bridge */ + set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, + (bridge->bus->number << 8) | bridge->devfn); + } + + return 0; +} + +static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) +{ + u64 addr; + u32 sts; + unsigned long flags; + + addr = virt_to_phys((void *)iommu->ir_table->base); + + spin_lock_irqsave(&iommu->register_lock, flags); + + dmar_writeq(iommu->reg + DMAR_IRTA_REG, + (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); + + /* Set interrupt-remapping table pointer */ + iommu->gcmd |= DMA_GCMD_SIRTP; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_IRTPS), sts); + spin_unlock_irqrestore(&iommu->register_lock, flags); + + /* + * global invalidation of interrupt entry cache before enabling + * interrupt-remapping. + */ + qi_global_iec(iommu); + + spin_lock_irqsave(&iommu->register_lock, flags); + + /* Enable interrupt-remapping */ + iommu->gcmd |= DMA_GCMD_IRE; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_IRES), sts); + + spin_unlock_irqrestore(&iommu->register_lock, flags); +} + + +static int setup_intr_remapping(struct intel_iommu *iommu, int mode) +{ + struct ir_table *ir_table; + struct page *pages; + + ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), + GFP_ATOMIC); + + if (!iommu->ir_table) + return -ENOMEM; + + pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, + INTR_REMAP_PAGE_ORDER); + + if (!pages) { + printk(KERN_ERR "failed to allocate pages of order %d\n", + INTR_REMAP_PAGE_ORDER); + kfree(iommu->ir_table); + return -ENOMEM; + } + + ir_table->base = page_address(pages); + + iommu_set_intr_remapping(iommu, mode); + return 0; +} + +/* + * Disable Interrupt Remapping. + */ +static void iommu_disable_intr_remapping(struct intel_iommu *iommu) +{ + unsigned long flags; + u32 sts; + + if (!ecap_ir_support(iommu->ecap)) + return; + + /* + * global invalidation of interrupt entry cache before disabling + * interrupt-remapping. + */ + qi_global_iec(iommu); + + spin_lock_irqsave(&iommu->register_lock, flags); + + sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); + if (!(sts & DMA_GSTS_IRES)) + goto end; + + iommu->gcmd &= ~DMA_GCMD_IRE; + writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); + + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, !(sts & DMA_GSTS_IRES), sts); + +end: + spin_unlock_irqrestore(&iommu->register_lock, flags); +} + +int __init intr_remapping_supported(void) +{ + struct dmar_drhd_unit *drhd; + + if (disable_intremap) + return 0; + + if (!dmar_ir_support()) + return 0; + + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (!ecap_ir_support(iommu->ecap)) + return 0; + } + + return 1; +} + +int __init enable_intr_remapping(int eim) +{ + struct dmar_drhd_unit *drhd; + int setup = 0; + + if (parse_ioapics_under_ir() != 1) { + printk(KERN_INFO "Not enable interrupt remapping\n"); + return -1; + } + + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + /* + * If the queued invalidation is already initialized, + * shouldn't disable it. + */ + if (iommu->qi) + continue; + + /* + * Clear previous faults. + */ + dmar_fault(-1, iommu); + + /* + * Disable intr remapping and queued invalidation, if already + * enabled prior to OS handover. + */ + iommu_disable_intr_remapping(iommu); + + dmar_disable_qi(iommu); + } + + /* + * check for the Interrupt-remapping support + */ + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (!ecap_ir_support(iommu->ecap)) + continue; + + if (eim && !ecap_eim_support(iommu->ecap)) { + printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " + " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); + return -1; + } + } + + /* + * Enable queued invalidation for all the DRHD's. + */ + for_each_drhd_unit(drhd) { + int ret; + struct intel_iommu *iommu = drhd->iommu; + ret = dmar_enable_qi(iommu); + + if (ret) { + printk(KERN_ERR "DRHD %Lx: failed to enable queued, " + " invalidation, ecap %Lx, ret %d\n", + drhd->reg_base_addr, iommu->ecap, ret); + return -1; + } + } + + /* + * Setup Interrupt-remapping for all the DRHD's now. + */ + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (!ecap_ir_support(iommu->ecap)) + continue; + + if (setup_intr_remapping(iommu, eim)) + goto error; + + setup = 1; + } + + if (!setup) + goto error; + + intr_remapping_enabled = 1; + + return 0; + +error: + /* + * handle error condition gracefully here! + */ + return -1; +} + +static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, + struct intel_iommu *iommu) +{ + struct acpi_dmar_pci_path *path; + u8 bus; + int count; + + bus = scope->bus; + path = (struct acpi_dmar_pci_path *)(scope + 1); + count = (scope->length - sizeof(struct acpi_dmar_device_scope)) + / sizeof(struct acpi_dmar_pci_path); + + while (--count > 0) { + /* + * Access PCI directly due to the PCI + * subsystem isn't initialized yet. + */ + bus = read_pci_config_byte(bus, path->dev, path->fn, + PCI_SECONDARY_BUS); + path++; + } + ir_hpet[ir_hpet_num].bus = bus; + ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); + ir_hpet[ir_hpet_num].iommu = iommu; + ir_hpet[ir_hpet_num].id = scope->enumeration_id; + ir_hpet_num++; +} + +static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, + struct intel_iommu *iommu) +{ + struct acpi_dmar_pci_path *path; + u8 bus; + int count; + + bus = scope->bus; + path = (struct acpi_dmar_pci_path *)(scope + 1); + count = (scope->length - sizeof(struct acpi_dmar_device_scope)) + / sizeof(struct acpi_dmar_pci_path); + + while (--count > 0) { + /* + * Access PCI directly due to the PCI + * subsystem isn't initialized yet. + */ + bus = read_pci_config_byte(bus, path->dev, path->fn, + PCI_SECONDARY_BUS); + path++; + } + + ir_ioapic[ir_ioapic_num].bus = bus; + ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); + ir_ioapic[ir_ioapic_num].iommu = iommu; + ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; + ir_ioapic_num++; +} + +static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, + struct intel_iommu *iommu) +{ + struct acpi_dmar_hardware_unit *drhd; + struct acpi_dmar_device_scope *scope; + void *start, *end; + + drhd = (struct acpi_dmar_hardware_unit *)header; + + start = (void *)(drhd + 1); + end = ((void *)drhd) + header->length; + + while (start < end) { + scope = start; + if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { + if (ir_ioapic_num == MAX_IO_APICS) { + printk(KERN_WARNING "Exceeded Max IO APICS\n"); + return -1; + } + + printk(KERN_INFO "IOAPIC id %d under DRHD base " + " 0x%Lx IOMMU %d\n", scope->enumeration_id, + drhd->address, iommu->seq_id); + + ir_parse_one_ioapic_scope(scope, iommu); + } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { + if (ir_hpet_num == MAX_HPET_TBS) { + printk(KERN_WARNING "Exceeded Max HPET blocks\n"); + return -1; + } + + printk(KERN_INFO "HPET id %d under DRHD base" + " 0x%Lx\n", scope->enumeration_id, + drhd->address); + + ir_parse_one_hpet_scope(scope, iommu); + } + start += scope->length; + } + + return 0; +} + +/* + * Finds the assocaition between IOAPIC's and its Interrupt-remapping + * hardware unit. + */ +int __init parse_ioapics_under_ir(void) +{ + struct dmar_drhd_unit *drhd; + int ir_supported = 0; + + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (ecap_ir_support(iommu->ecap)) { + if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) + return -1; + + ir_supported = 1; + } + } + + if (ir_supported && ir_ioapic_num != nr_ioapics) { + printk(KERN_WARNING + "Not all IO-APIC's listed under remapping hardware\n"); + return -1; + } + + return ir_supported; +} + +void disable_intr_remapping(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu = NULL; + + /* + * Disable Interrupt-remapping for all the DRHD's now. + */ + for_each_iommu(iommu, drhd) { + if (!ecap_ir_support(iommu->ecap)) + continue; + + iommu_disable_intr_remapping(iommu); + } +} + +int reenable_intr_remapping(int eim) +{ + struct dmar_drhd_unit *drhd; + int setup = 0; + struct intel_iommu *iommu = NULL; + + for_each_iommu(iommu, drhd) + if (iommu->qi) + dmar_reenable_qi(iommu); + + /* + * Setup Interrupt-remapping for all the DRHD's now. + */ + for_each_iommu(iommu, drhd) { + if (!ecap_ir_support(iommu->ecap)) + continue; + + /* Set up interrupt remapping for iommu.*/ + iommu_set_intr_remapping(iommu, eim); + setup = 1; + } + + if (!setup) + goto error; + + return 0; + +error: + /* + * handle error condition gracefully here! + */ + return -1; +} + diff --git a/drivers/iommu/intr_remapping.h b/drivers/iommu/intr_remapping.h new file mode 100644 index 000000000000..5662fecfee60 --- /dev/null +++ b/drivers/iommu/intr_remapping.h @@ -0,0 +1,17 @@ +#include + +struct ioapic_scope { + struct intel_iommu *iommu; + unsigned int id; + unsigned int bus; /* PCI bus number */ + unsigned int devfn; /* PCI devfn number */ +}; + +struct hpet_scope { + struct intel_iommu *iommu; + u8 id; + unsigned int bus; + unsigned int devfn; +}; + +#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c new file mode 100644 index 000000000000..c5c274ab5c5a --- /dev/null +++ b/drivers/iommu/iova.c @@ -0,0 +1,435 @@ +/* + * Copyright © 2006-2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + * Author: Anil S Keshavamurthy + */ + +#include + +void +init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) +{ + spin_lock_init(&iovad->iova_rbtree_lock); + iovad->rbroot = RB_ROOT; + iovad->cached32_node = NULL; + iovad->dma_32bit_pfn = pfn_32bit; +} + +static struct rb_node * +__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) +{ + if ((*limit_pfn != iovad->dma_32bit_pfn) || + (iovad->cached32_node == NULL)) + return rb_last(&iovad->rbroot); + else { + struct rb_node *prev_node = rb_prev(iovad->cached32_node); + struct iova *curr_iova = + container_of(iovad->cached32_node, struct iova, node); + *limit_pfn = curr_iova->pfn_lo - 1; + return prev_node; + } +} + +static void +__cached_rbnode_insert_update(struct iova_domain *iovad, + unsigned long limit_pfn, struct iova *new) +{ + if (limit_pfn != iovad->dma_32bit_pfn) + return; + iovad->cached32_node = &new->node; +} + +static void +__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) +{ + struct iova *cached_iova; + struct rb_node *curr; + + if (!iovad->cached32_node) + return; + curr = iovad->cached32_node; + cached_iova = container_of(curr, struct iova, node); + + if (free->pfn_lo >= cached_iova->pfn_lo) { + struct rb_node *node = rb_next(&free->node); + struct iova *iova = container_of(node, struct iova, node); + + /* only cache if it's below 32bit pfn */ + if (node && iova->pfn_lo < iovad->dma_32bit_pfn) + iovad->cached32_node = node; + else + iovad->cached32_node = NULL; + } +} + +/* Computes the padding size required, to make the + * the start address naturally aligned on its size + */ +static int +iova_get_pad_size(int size, unsigned int limit_pfn) +{ + unsigned int pad_size = 0; + unsigned int order = ilog2(size); + + if (order) + pad_size = (limit_pfn + 1) % (1 << order); + + return pad_size; +} + +static int __alloc_and_insert_iova_range(struct iova_domain *iovad, + unsigned long size, unsigned long limit_pfn, + struct iova *new, bool size_aligned) +{ + struct rb_node *prev, *curr = NULL; + unsigned long flags; + unsigned long saved_pfn; + unsigned int pad_size = 0; + + /* Walk the tree backwards */ + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + saved_pfn = limit_pfn; + curr = __get_cached_rbnode(iovad, &limit_pfn); + prev = curr; + while (curr) { + struct iova *curr_iova = container_of(curr, struct iova, node); + + if (limit_pfn < curr_iova->pfn_lo) + goto move_left; + else if (limit_pfn < curr_iova->pfn_hi) + goto adjust_limit_pfn; + else { + if (size_aligned) + pad_size = iova_get_pad_size(size, limit_pfn); + if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) + break; /* found a free slot */ + } +adjust_limit_pfn: + limit_pfn = curr_iova->pfn_lo - 1; +move_left: + prev = curr; + curr = rb_prev(curr); + } + + if (!curr) { + if (size_aligned) + pad_size = iova_get_pad_size(size, limit_pfn); + if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + return -ENOMEM; + } + } + + /* pfn_lo will point to size aligned address if size_aligned is set */ + new->pfn_lo = limit_pfn - (size + pad_size) + 1; + new->pfn_hi = new->pfn_lo + size - 1; + + /* Insert the new_iova into domain rbtree by holding writer lock */ + /* Add new node and rebalance tree. */ + { + struct rb_node **entry, *parent = NULL; + + /* If we have 'prev', it's a valid place to start the + insertion. Otherwise, start from the root. */ + if (prev) + entry = &prev; + else + entry = &iovad->rbroot.rb_node; + + /* Figure out where to put new node */ + while (*entry) { + struct iova *this = container_of(*entry, + struct iova, node); + parent = *entry; + + if (new->pfn_lo < this->pfn_lo) + entry = &((*entry)->rb_left); + else if (new->pfn_lo > this->pfn_lo) + entry = &((*entry)->rb_right); + else + BUG(); /* this should not happen */ + } + + /* Add new node and rebalance tree. */ + rb_link_node(&new->node, parent, entry); + rb_insert_color(&new->node, &iovad->rbroot); + } + __cached_rbnode_insert_update(iovad, saved_pfn, new); + + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + + + return 0; +} + +static void +iova_insert_rbtree(struct rb_root *root, struct iova *iova) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + /* Figure out where to put new node */ + while (*new) { + struct iova *this = container_of(*new, struct iova, node); + parent = *new; + + if (iova->pfn_lo < this->pfn_lo) + new = &((*new)->rb_left); + else if (iova->pfn_lo > this->pfn_lo) + new = &((*new)->rb_right); + else + BUG(); /* this should not happen */ + } + /* Add new node and rebalance tree. */ + rb_link_node(&iova->node, parent, new); + rb_insert_color(&iova->node, root); +} + +/** + * alloc_iova - allocates an iova + * @iovad - iova domain in question + * @size - size of page frames to allocate + * @limit_pfn - max limit address + * @size_aligned - set if size_aligned address range is required + * This function allocates an iova in the range limit_pfn to IOVA_START_PFN + * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned + * flag is set then the allocated address iova->pfn_lo will be naturally + * aligned on roundup_power_of_two(size). + */ +struct iova * +alloc_iova(struct iova_domain *iovad, unsigned long size, + unsigned long limit_pfn, + bool size_aligned) +{ + struct iova *new_iova; + int ret; + + new_iova = alloc_iova_mem(); + if (!new_iova) + return NULL; + + /* If size aligned is set then round the size to + * to next power of two. + */ + if (size_aligned) + size = __roundup_pow_of_two(size); + + ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, + new_iova, size_aligned); + + if (ret) { + free_iova_mem(new_iova); + return NULL; + } + + return new_iova; +} + +/** + * find_iova - find's an iova for a given pfn + * @iovad - iova domain in question. + * pfn - page frame number + * This function finds and returns an iova belonging to the + * given doamin which matches the given pfn. + */ +struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) +{ + unsigned long flags; + struct rb_node *node; + + /* Take the lock so that no other thread is manipulating the rbtree */ + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + node = iovad->rbroot.rb_node; + while (node) { + struct iova *iova = container_of(node, struct iova, node); + + /* If pfn falls within iova's range, return iova */ + if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + /* We are not holding the lock while this iova + * is referenced by the caller as the same thread + * which called this function also calls __free_iova() + * and it is by desing that only one thread can possibly + * reference a particular iova and hence no conflict. + */ + return iova; + } + + if (pfn < iova->pfn_lo) + node = node->rb_left; + else if (pfn > iova->pfn_lo) + node = node->rb_right; + } + + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + return NULL; +} + +/** + * __free_iova - frees the given iova + * @iovad: iova domain in question. + * @iova: iova in question. + * Frees the given iova belonging to the giving domain + */ +void +__free_iova(struct iova_domain *iovad, struct iova *iova) +{ + unsigned long flags; + + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + __cached_rbnode_delete_update(iovad, iova); + rb_erase(&iova->node, &iovad->rbroot); + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + free_iova_mem(iova); +} + +/** + * free_iova - finds and frees the iova for a given pfn + * @iovad: - iova domain in question. + * @pfn: - pfn that is allocated previously + * This functions finds an iova for a given pfn and then + * frees the iova from that domain. + */ +void +free_iova(struct iova_domain *iovad, unsigned long pfn) +{ + struct iova *iova = find_iova(iovad, pfn); + if (iova) + __free_iova(iovad, iova); + +} + +/** + * put_iova_domain - destroys the iova doamin + * @iovad: - iova domain in question. + * All the iova's in that domain are destroyed. + */ +void put_iova_domain(struct iova_domain *iovad) +{ + struct rb_node *node; + unsigned long flags; + + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + node = rb_first(&iovad->rbroot); + while (node) { + struct iova *iova = container_of(node, struct iova, node); + rb_erase(node, &iovad->rbroot); + free_iova_mem(iova); + node = rb_first(&iovad->rbroot); + } + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); +} + +static int +__is_range_overlap(struct rb_node *node, + unsigned long pfn_lo, unsigned long pfn_hi) +{ + struct iova *iova = container_of(node, struct iova, node); + + if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) + return 1; + return 0; +} + +static struct iova * +__insert_new_range(struct iova_domain *iovad, + unsigned long pfn_lo, unsigned long pfn_hi) +{ + struct iova *iova; + + iova = alloc_iova_mem(); + if (!iova) + return iova; + + iova->pfn_hi = pfn_hi; + iova->pfn_lo = pfn_lo; + iova_insert_rbtree(&iovad->rbroot, iova); + return iova; +} + +static void +__adjust_overlap_range(struct iova *iova, + unsigned long *pfn_lo, unsigned long *pfn_hi) +{ + if (*pfn_lo < iova->pfn_lo) + iova->pfn_lo = *pfn_lo; + if (*pfn_hi > iova->pfn_hi) + *pfn_lo = iova->pfn_hi + 1; +} + +/** + * reserve_iova - reserves an iova in the given range + * @iovad: - iova domain pointer + * @pfn_lo: - lower page frame address + * @pfn_hi:- higher pfn adderss + * This function allocates reserves the address range from pfn_lo to pfn_hi so + * that this address is not dished out as part of alloc_iova. + */ +struct iova * +reserve_iova(struct iova_domain *iovad, + unsigned long pfn_lo, unsigned long pfn_hi) +{ + struct rb_node *node; + unsigned long flags; + struct iova *iova; + unsigned int overlap = 0; + + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { + if (__is_range_overlap(node, pfn_lo, pfn_hi)) { + iova = container_of(node, struct iova, node); + __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); + if ((pfn_lo >= iova->pfn_lo) && + (pfn_hi <= iova->pfn_hi)) + goto finish; + overlap = 1; + + } else if (overlap) + break; + } + + /* We are here either because this is the first reserver node + * or need to insert remaining non overlap addr range + */ + iova = __insert_new_range(iovad, pfn_lo, pfn_hi); +finish: + + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); + return iova; +} + +/** + * copy_reserved_iova - copies the reserved between domains + * @from: - source doamin from where to copy + * @to: - destination domin where to copy + * This function copies reserved iova's from one doamin to + * other. + */ +void +copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) +{ + unsigned long flags; + struct rb_node *node; + + spin_lock_irqsave(&from->iova_rbtree_lock, flags); + for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { + struct iova *iova = container_of(node, struct iova, node); + struct iova *new_iova; + new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); + if (!new_iova) + printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", + iova->pfn_lo, iova->pfn_lo); + } + spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); +} diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 094308e41be5..825c02b40daa 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -29,11 +29,6 @@ obj-$(CONFIG_PCI_MSI) += msi.o # Build the Hypertransport interrupt support obj-$(CONFIG_HT_IRQ) += htirq.o -# Build Intel IOMMU support -obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o - -obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o - obj-$(CONFIG_PCI_IOV) += iov.o # diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c deleted file mode 100644 index 3dc9befa5aec..000000000000 --- a/drivers/pci/dmar.c +++ /dev/null @@ -1,1461 +0,0 @@ -/* - * Copyright (c) 2006, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Copyright (C) 2006-2008 Intel Corporation - * Author: Ashok Raj - * Author: Shaohua Li - * Author: Anil S Keshavamurthy - * - * This file implements early detection/parsing of Remapping Devices - * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI - * tables. - * - * These routines are used by both DMA-remapping and Interrupt-remapping - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define PREFIX "DMAR: " - -/* No locks are needed as DMA remapping hardware unit - * list is constructed at boot time and hotplug of - * these units are not supported by the architecture. - */ -LIST_HEAD(dmar_drhd_units); - -static struct acpi_table_header * __initdata dmar_tbl; -static acpi_size dmar_tbl_size; - -static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) -{ - /* - * add INCLUDE_ALL at the tail, so scan the list will find it at - * the very end. - */ - if (drhd->include_all) - list_add_tail(&drhd->list, &dmar_drhd_units); - else - list_add(&drhd->list, &dmar_drhd_units); -} - -static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, - struct pci_dev **dev, u16 segment) -{ - struct pci_bus *bus; - struct pci_dev *pdev = NULL; - struct acpi_dmar_pci_path *path; - int count; - - bus = pci_find_bus(segment, scope->bus); - path = (struct acpi_dmar_pci_path *)(scope + 1); - count = (scope->length - sizeof(struct acpi_dmar_device_scope)) - / sizeof(struct acpi_dmar_pci_path); - - while (count) { - if (pdev) - pci_dev_put(pdev); - /* - * Some BIOSes list non-exist devices in DMAR table, just - * ignore it - */ - if (!bus) { - printk(KERN_WARNING - PREFIX "Device scope bus [%d] not found\n", - scope->bus); - break; - } - pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn)); - if (!pdev) { - printk(KERN_WARNING PREFIX - "Device scope device [%04x:%02x:%02x.%02x] not found\n", - segment, bus->number, path->dev, path->fn); - break; - } - path ++; - count --; - bus = pdev->subordinate; - } - if (!pdev) { - printk(KERN_WARNING PREFIX - "Device scope device [%04x:%02x:%02x.%02x] not found\n", - segment, scope->bus, path->dev, path->fn); - *dev = NULL; - return 0; - } - if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ - pdev->subordinate) || (scope->entry_type == \ - ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { - pci_dev_put(pdev); - printk(KERN_WARNING PREFIX - "Device scope type does not match for %s\n", - pci_name(pdev)); - return -EINVAL; - } - *dev = pdev; - return 0; -} - -static int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, - struct pci_dev ***devices, u16 segment) -{ - struct acpi_dmar_device_scope *scope; - void * tmp = start; - int index; - int ret; - - *cnt = 0; - while (start < end) { - scope = start; - if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || - scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) - (*cnt)++; - else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) { - printk(KERN_WARNING PREFIX - "Unsupported device scope\n"); - } - start += scope->length; - } - if (*cnt == 0) - return 0; - - *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); - if (!*devices) - return -ENOMEM; - - start = tmp; - index = 0; - while (start < end) { - scope = start; - if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || - scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { - ret = dmar_parse_one_dev_scope(scope, - &(*devices)[index], segment); - if (ret) { - kfree(*devices); - return ret; - } - index ++; - } - start += scope->length; - } - - return 0; -} - -/** - * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition - * structure which uniquely represent one DMA remapping hardware unit - * present in the platform - */ -static int __init -dmar_parse_one_drhd(struct acpi_dmar_header *header) -{ - struct acpi_dmar_hardware_unit *drhd; - struct dmar_drhd_unit *dmaru; - int ret = 0; - - drhd = (struct acpi_dmar_hardware_unit *)header; - dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); - if (!dmaru) - return -ENOMEM; - - dmaru->hdr = header; - dmaru->reg_base_addr = drhd->address; - dmaru->segment = drhd->segment; - dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ - - ret = alloc_iommu(dmaru); - if (ret) { - kfree(dmaru); - return ret; - } - dmar_register_drhd_unit(dmaru); - return 0; -} - -static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) -{ - struct acpi_dmar_hardware_unit *drhd; - int ret = 0; - - drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; - - if (dmaru->include_all) - return 0; - - ret = dmar_parse_dev_scope((void *)(drhd + 1), - ((void *)drhd) + drhd->header.length, - &dmaru->devices_cnt, &dmaru->devices, - drhd->segment); - if (ret) { - list_del(&dmaru->list); - kfree(dmaru); - } - return ret; -} - -#ifdef CONFIG_DMAR -LIST_HEAD(dmar_rmrr_units); - -static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) -{ - list_add(&rmrr->list, &dmar_rmrr_units); -} - - -static int __init -dmar_parse_one_rmrr(struct acpi_dmar_header *header) -{ - struct acpi_dmar_reserved_memory *rmrr; - struct dmar_rmrr_unit *rmrru; - - rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); - if (!rmrru) - return -ENOMEM; - - rmrru->hdr = header; - rmrr = (struct acpi_dmar_reserved_memory *)header; - rmrru->base_address = rmrr->base_address; - rmrru->end_address = rmrr->end_address; - - dmar_register_rmrr_unit(rmrru); - return 0; -} - -static int __init -rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) -{ - struct acpi_dmar_reserved_memory *rmrr; - int ret; - - rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; - ret = dmar_parse_dev_scope((void *)(rmrr + 1), - ((void *)rmrr) + rmrr->header.length, - &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); - - if (ret || (rmrru->devices_cnt == 0)) { - list_del(&rmrru->list); - kfree(rmrru); - } - return ret; -} - -static LIST_HEAD(dmar_atsr_units); - -static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) -{ - struct acpi_dmar_atsr *atsr; - struct dmar_atsr_unit *atsru; - - atsr = container_of(hdr, struct acpi_dmar_atsr, header); - atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); - if (!atsru) - return -ENOMEM; - - atsru->hdr = hdr; - atsru->include_all = atsr->flags & 0x1; - - list_add(&atsru->list, &dmar_atsr_units); - - return 0; -} - -static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) -{ - int rc; - struct acpi_dmar_atsr *atsr; - - if (atsru->include_all) - return 0; - - atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); - rc = dmar_parse_dev_scope((void *)(atsr + 1), - (void *)atsr + atsr->header.length, - &atsru->devices_cnt, &atsru->devices, - atsr->segment); - if (rc || !atsru->devices_cnt) { - list_del(&atsru->list); - kfree(atsru); - } - - return rc; -} - -int dmar_find_matched_atsr_unit(struct pci_dev *dev) -{ - int i; - struct pci_bus *bus; - struct acpi_dmar_atsr *atsr; - struct dmar_atsr_unit *atsru; - - dev = pci_physfn(dev); - - list_for_each_entry(atsru, &dmar_atsr_units, list) { - atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); - if (atsr->segment == pci_domain_nr(dev->bus)) - goto found; - } - - return 0; - -found: - for (bus = dev->bus; bus; bus = bus->parent) { - struct pci_dev *bridge = bus->self; - - if (!bridge || !pci_is_pcie(bridge) || - bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) - return 0; - - if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { - for (i = 0; i < atsru->devices_cnt; i++) - if (atsru->devices[i] == bridge) - return 1; - break; - } - } - - if (atsru->include_all) - return 1; - - return 0; -} -#endif - -#ifdef CONFIG_ACPI_NUMA -static int __init -dmar_parse_one_rhsa(struct acpi_dmar_header *header) -{ - struct acpi_dmar_rhsa *rhsa; - struct dmar_drhd_unit *drhd; - - rhsa = (struct acpi_dmar_rhsa *)header; - for_each_drhd_unit(drhd) { - if (drhd->reg_base_addr == rhsa->base_address) { - int node = acpi_map_pxm_to_node(rhsa->proximity_domain); - - if (!node_online(node)) - node = -1; - drhd->iommu->node = node; - return 0; - } - } - WARN_TAINT( - 1, TAINT_FIRMWARE_WORKAROUND, - "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", - drhd->reg_base_addr, - dmi_get_system_info(DMI_BIOS_VENDOR), - dmi_get_system_info(DMI_BIOS_VERSION), - dmi_get_system_info(DMI_PRODUCT_VERSION)); - - return 0; -} -#endif - -static void __init -dmar_table_print_dmar_entry(struct acpi_dmar_header *header) -{ - struct acpi_dmar_hardware_unit *drhd; - struct acpi_dmar_reserved_memory *rmrr; - struct acpi_dmar_atsr *atsr; - struct acpi_dmar_rhsa *rhsa; - - switch (header->type) { - case ACPI_DMAR_TYPE_HARDWARE_UNIT: - drhd = container_of(header, struct acpi_dmar_hardware_unit, - header); - printk (KERN_INFO PREFIX - "DRHD base: %#016Lx flags: %#x\n", - (unsigned long long)drhd->address, drhd->flags); - break; - case ACPI_DMAR_TYPE_RESERVED_MEMORY: - rmrr = container_of(header, struct acpi_dmar_reserved_memory, - header); - printk (KERN_INFO PREFIX - "RMRR base: %#016Lx end: %#016Lx\n", - (unsigned long long)rmrr->base_address, - (unsigned long long)rmrr->end_address); - break; - case ACPI_DMAR_TYPE_ATSR: - atsr = container_of(header, struct acpi_dmar_atsr, header); - printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags); - break; - case ACPI_DMAR_HARDWARE_AFFINITY: - rhsa = container_of(header, struct acpi_dmar_rhsa, header); - printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n", - (unsigned long long)rhsa->base_address, - rhsa->proximity_domain); - break; - } -} - -/** - * dmar_table_detect - checks to see if the platform supports DMAR devices - */ -static int __init dmar_table_detect(void) -{ - acpi_status status = AE_OK; - - /* if we could find DMAR table, then there are DMAR devices */ - status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0, - (struct acpi_table_header **)&dmar_tbl, - &dmar_tbl_size); - - if (ACPI_SUCCESS(status) && !dmar_tbl) { - printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); - status = AE_NOT_FOUND; - } - - return (ACPI_SUCCESS(status) ? 1 : 0); -} - -/** - * parse_dmar_table - parses the DMA reporting table - */ -static int __init -parse_dmar_table(void) -{ - struct acpi_table_dmar *dmar; - struct acpi_dmar_header *entry_header; - int ret = 0; - - /* - * Do it again, earlier dmar_tbl mapping could be mapped with - * fixed map. - */ - dmar_table_detect(); - - /* - * ACPI tables may not be DMA protected by tboot, so use DMAR copy - * SINIT saved in SinitMleData in TXT heap (which is DMA protected) - */ - dmar_tbl = tboot_get_dmar_table(dmar_tbl); - - dmar = (struct acpi_table_dmar *)dmar_tbl; - if (!dmar) - return -ENODEV; - - if (dmar->width < PAGE_SHIFT - 1) { - printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); - return -EINVAL; - } - - printk (KERN_INFO PREFIX "Host address width %d\n", - dmar->width + 1); - - entry_header = (struct acpi_dmar_header *)(dmar + 1); - while (((unsigned long)entry_header) < - (((unsigned long)dmar) + dmar_tbl->length)) { - /* Avoid looping forever on bad ACPI tables */ - if (entry_header->length == 0) { - printk(KERN_WARNING PREFIX - "Invalid 0-length structure\n"); - ret = -EINVAL; - break; - } - - dmar_table_print_dmar_entry(entry_header); - - switch (entry_header->type) { - case ACPI_DMAR_TYPE_HARDWARE_UNIT: - ret = dmar_parse_one_drhd(entry_header); - break; - case ACPI_DMAR_TYPE_RESERVED_MEMORY: -#ifdef CONFIG_DMAR - ret = dmar_parse_one_rmrr(entry_header); -#endif - break; - case ACPI_DMAR_TYPE_ATSR: -#ifdef CONFIG_DMAR - ret = dmar_parse_one_atsr(entry_header); -#endif - break; - case ACPI_DMAR_HARDWARE_AFFINITY: -#ifdef CONFIG_ACPI_NUMA - ret = dmar_parse_one_rhsa(entry_header); -#endif - break; - default: - printk(KERN_WARNING PREFIX - "Unknown DMAR structure type %d\n", - entry_header->type); - ret = 0; /* for forward compatibility */ - break; - } - if (ret) - break; - - entry_header = ((void *)entry_header + entry_header->length); - } - return ret; -} - -static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, - struct pci_dev *dev) -{ - int index; - - while (dev) { - for (index = 0; index < cnt; index++) - if (dev == devices[index]) - return 1; - - /* Check our parent */ - dev = dev->bus->self; - } - - return 0; -} - -struct dmar_drhd_unit * -dmar_find_matched_drhd_unit(struct pci_dev *dev) -{ - struct dmar_drhd_unit *dmaru = NULL; - struct acpi_dmar_hardware_unit *drhd; - - dev = pci_physfn(dev); - - list_for_each_entry(dmaru, &dmar_drhd_units, list) { - drhd = container_of(dmaru->hdr, - struct acpi_dmar_hardware_unit, - header); - - if (dmaru->include_all && - drhd->segment == pci_domain_nr(dev->bus)) - return dmaru; - - if (dmar_pci_device_match(dmaru->devices, - dmaru->devices_cnt, dev)) - return dmaru; - } - - return NULL; -} - -int __init dmar_dev_scope_init(void) -{ - struct dmar_drhd_unit *drhd, *drhd_n; - int ret = -ENODEV; - - list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { - ret = dmar_parse_dev(drhd); - if (ret) - return ret; - } - -#ifdef CONFIG_DMAR - { - struct dmar_rmrr_unit *rmrr, *rmrr_n; - struct dmar_atsr_unit *atsr, *atsr_n; - - list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { - ret = rmrr_parse_dev(rmrr); - if (ret) - return ret; - } - - list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) { - ret = atsr_parse_dev(atsr); - if (ret) - return ret; - } - } -#endif - - return ret; -} - - -int __init dmar_table_init(void) -{ - static int dmar_table_initialized; - int ret; - - if (dmar_table_initialized) - return 0; - - dmar_table_initialized = 1; - - ret = parse_dmar_table(); - if (ret) { - if (ret != -ENODEV) - printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); - return ret; - } - - if (list_empty(&dmar_drhd_units)) { - printk(KERN_INFO PREFIX "No DMAR devices found\n"); - return -ENODEV; - } - -#ifdef CONFIG_DMAR - if (list_empty(&dmar_rmrr_units)) - printk(KERN_INFO PREFIX "No RMRR found\n"); - - if (list_empty(&dmar_atsr_units)) - printk(KERN_INFO PREFIX "No ATSR found\n"); -#endif - - return 0; -} - -static void warn_invalid_dmar(u64 addr, const char *message) -{ - WARN_TAINT_ONCE( - 1, TAINT_FIRMWARE_WORKAROUND, - "Your BIOS is broken; DMAR reported at address %llx%s!\n" - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", - addr, message, - dmi_get_system_info(DMI_BIOS_VENDOR), - dmi_get_system_info(DMI_BIOS_VERSION), - dmi_get_system_info(DMI_PRODUCT_VERSION)); -} - -int __init check_zero_address(void) -{ - struct acpi_table_dmar *dmar; - struct acpi_dmar_header *entry_header; - struct acpi_dmar_hardware_unit *drhd; - - dmar = (struct acpi_table_dmar *)dmar_tbl; - entry_header = (struct acpi_dmar_header *)(dmar + 1); - - while (((unsigned long)entry_header) < - (((unsigned long)dmar) + dmar_tbl->length)) { - /* Avoid looping forever on bad ACPI tables */ - if (entry_header->length == 0) { - printk(KERN_WARNING PREFIX - "Invalid 0-length structure\n"); - return 0; - } - - if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { - void __iomem *addr; - u64 cap, ecap; - - drhd = (void *)entry_header; - if (!drhd->address) { - warn_invalid_dmar(0, ""); - goto failed; - } - - addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); - if (!addr ) { - printk("IOMMU: can't validate: %llx\n", drhd->address); - goto failed; - } - cap = dmar_readq(addr + DMAR_CAP_REG); - ecap = dmar_readq(addr + DMAR_ECAP_REG); - early_iounmap(addr, VTD_PAGE_SIZE); - if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { - warn_invalid_dmar(drhd->address, - " returns all ones"); - goto failed; - } - } - - entry_header = ((void *)entry_header + entry_header->length); - } - return 1; - -failed: -#ifdef CONFIG_DMAR - dmar_disabled = 1; -#endif - return 0; -} - -int __init detect_intel_iommu(void) -{ - int ret; - - ret = dmar_table_detect(); - if (ret) - ret = check_zero_address(); - { -#ifdef CONFIG_INTR_REMAP - struct acpi_table_dmar *dmar; - - dmar = (struct acpi_table_dmar *) dmar_tbl; - if (ret && cpu_has_x2apic && dmar->flags & 0x1) - printk(KERN_INFO - "Queued invalidation will be enabled to support " - "x2apic and Intr-remapping.\n"); -#endif -#ifdef CONFIG_DMAR - if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { - iommu_detected = 1; - /* Make sure ACS will be enabled */ - pci_request_acs(); - } -#endif -#ifdef CONFIG_X86 - if (ret) - x86_init.iommu.iommu_init = intel_iommu_init; -#endif - } - early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); - dmar_tbl = NULL; - - return ret ? 1 : -ENODEV; -} - - -int alloc_iommu(struct dmar_drhd_unit *drhd) -{ - struct intel_iommu *iommu; - int map_size; - u32 ver; - static int iommu_allocated = 0; - int agaw = 0; - int msagaw = 0; - - if (!drhd->reg_base_addr) { - warn_invalid_dmar(0, ""); - return -EINVAL; - } - - iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); - if (!iommu) - return -ENOMEM; - - iommu->seq_id = iommu_allocated++; - sprintf (iommu->name, "dmar%d", iommu->seq_id); - - iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); - if (!iommu->reg) { - printk(KERN_ERR "IOMMU: can't map the region\n"); - goto error; - } - iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); - iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); - - if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { - warn_invalid_dmar(drhd->reg_base_addr, " returns all ones"); - goto err_unmap; - } - -#ifdef CONFIG_DMAR - agaw = iommu_calculate_agaw(iommu); - if (agaw < 0) { - printk(KERN_ERR - "Cannot get a valid agaw for iommu (seq_id = %d)\n", - iommu->seq_id); - goto err_unmap; - } - msagaw = iommu_calculate_max_sagaw(iommu); - if (msagaw < 0) { - printk(KERN_ERR - "Cannot get a valid max agaw for iommu (seq_id = %d)\n", - iommu->seq_id); - goto err_unmap; - } -#endif - iommu->agaw = agaw; - iommu->msagaw = msagaw; - - iommu->node = -1; - - /* the registers might be more than one page */ - map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), - cap_max_fault_reg_offset(iommu->cap)); - map_size = VTD_PAGE_ALIGN(map_size); - if (map_size > VTD_PAGE_SIZE) { - iounmap(iommu->reg); - iommu->reg = ioremap(drhd->reg_base_addr, map_size); - if (!iommu->reg) { - printk(KERN_ERR "IOMMU: can't map the region\n"); - goto error; - } - } - - ver = readl(iommu->reg + DMAR_VER_REG); - pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", - iommu->seq_id, - (unsigned long long)drhd->reg_base_addr, - DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), - (unsigned long long)iommu->cap, - (unsigned long long)iommu->ecap); - - spin_lock_init(&iommu->register_lock); - - drhd->iommu = iommu; - return 0; - - err_unmap: - iounmap(iommu->reg); - error: - kfree(iommu); - return -1; -} - -void free_iommu(struct intel_iommu *iommu) -{ - if (!iommu) - return; - -#ifdef CONFIG_DMAR - free_dmar_iommu(iommu); -#endif - - if (iommu->reg) - iounmap(iommu->reg); - kfree(iommu); -} - -/* - * Reclaim all the submitted descriptors which have completed its work. - */ -static inline void reclaim_free_desc(struct q_inval *qi) -{ - while (qi->desc_status[qi->free_tail] == QI_DONE || - qi->desc_status[qi->free_tail] == QI_ABORT) { - qi->desc_status[qi->free_tail] = QI_FREE; - qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; - qi->free_cnt++; - } -} - -static int qi_check_fault(struct intel_iommu *iommu, int index) -{ - u32 fault; - int head, tail; - struct q_inval *qi = iommu->qi; - int wait_index = (index + 1) % QI_LENGTH; - - if (qi->desc_status[wait_index] == QI_ABORT) - return -EAGAIN; - - fault = readl(iommu->reg + DMAR_FSTS_REG); - - /* - * If IQE happens, the head points to the descriptor associated - * with the error. No new descriptors are fetched until the IQE - * is cleared. - */ - if (fault & DMA_FSTS_IQE) { - head = readl(iommu->reg + DMAR_IQH_REG); - if ((head >> DMAR_IQ_SHIFT) == index) { - printk(KERN_ERR "VT-d detected invalid descriptor: " - "low=%llx, high=%llx\n", - (unsigned long long)qi->desc[index].low, - (unsigned long long)qi->desc[index].high); - memcpy(&qi->desc[index], &qi->desc[wait_index], - sizeof(struct qi_desc)); - __iommu_flush_cache(iommu, &qi->desc[index], - sizeof(struct qi_desc)); - writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG); - return -EINVAL; - } - } - - /* - * If ITE happens, all pending wait_desc commands are aborted. - * No new descriptors are fetched until the ITE is cleared. - */ - if (fault & DMA_FSTS_ITE) { - head = readl(iommu->reg + DMAR_IQH_REG); - head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; - head |= 1; - tail = readl(iommu->reg + DMAR_IQT_REG); - tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH; - - writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG); - - do { - if (qi->desc_status[head] == QI_IN_USE) - qi->desc_status[head] = QI_ABORT; - head = (head - 2 + QI_LENGTH) % QI_LENGTH; - } while (head != tail); - - if (qi->desc_status[wait_index] == QI_ABORT) - return -EAGAIN; - } - - if (fault & DMA_FSTS_ICE) - writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG); - - return 0; -} - -/* - * Submit the queued invalidation descriptor to the remapping - * hardware unit and wait for its completion. - */ -int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) -{ - int rc; - struct q_inval *qi = iommu->qi; - struct qi_desc *hw, wait_desc; - int wait_index, index; - unsigned long flags; - - if (!qi) - return 0; - - hw = qi->desc; - -restart: - rc = 0; - - spin_lock_irqsave(&qi->q_lock, flags); - while (qi->free_cnt < 3) { - spin_unlock_irqrestore(&qi->q_lock, flags); - cpu_relax(); - spin_lock_irqsave(&qi->q_lock, flags); - } - - index = qi->free_head; - wait_index = (index + 1) % QI_LENGTH; - - qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; - - hw[index] = *desc; - - wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) | - QI_IWD_STATUS_WRITE | QI_IWD_TYPE; - wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); - - hw[wait_index] = wait_desc; - - __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); - __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); - - qi->free_head = (qi->free_head + 2) % QI_LENGTH; - qi->free_cnt -= 2; - - /* - * update the HW tail register indicating the presence of - * new descriptors. - */ - writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG); - - while (qi->desc_status[wait_index] != QI_DONE) { - /* - * We will leave the interrupts disabled, to prevent interrupt - * context to queue another cmd while a cmd is already submitted - * and waiting for completion on this cpu. This is to avoid - * a deadlock where the interrupt context can wait indefinitely - * for free slots in the queue. - */ - rc = qi_check_fault(iommu, index); - if (rc) - break; - - spin_unlock(&qi->q_lock); - cpu_relax(); - spin_lock(&qi->q_lock); - } - - qi->desc_status[index] = QI_DONE; - - reclaim_free_desc(qi); - spin_unlock_irqrestore(&qi->q_lock, flags); - - if (rc == -EAGAIN) - goto restart; - - return rc; -} - -/* - * Flush the global interrupt entry cache. - */ -void qi_global_iec(struct intel_iommu *iommu) -{ - struct qi_desc desc; - - desc.low = QI_IEC_TYPE; - desc.high = 0; - - /* should never fail */ - qi_submit_sync(&desc, iommu); -} - -void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, - u64 type) -{ - struct qi_desc desc; - - desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) - | QI_CC_GRAN(type) | QI_CC_TYPE; - desc.high = 0; - - qi_submit_sync(&desc, iommu); -} - -void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, - unsigned int size_order, u64 type) -{ - u8 dw = 0, dr = 0; - - struct qi_desc desc; - int ih = 0; - - if (cap_write_drain(iommu->cap)) - dw = 1; - - if (cap_read_drain(iommu->cap)) - dr = 1; - - desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) - | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; - desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) - | QI_IOTLB_AM(size_order); - - qi_submit_sync(&desc, iommu); -} - -void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, - u64 addr, unsigned mask) -{ - struct qi_desc desc; - - if (mask) { - BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); - addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1; - desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; - } else - desc.high = QI_DEV_IOTLB_ADDR(addr); - - if (qdep >= QI_DEV_IOTLB_MAX_INVS) - qdep = 0; - - desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | - QI_DIOTLB_TYPE; - - qi_submit_sync(&desc, iommu); -} - -/* - * Disable Queued Invalidation interface. - */ -void dmar_disable_qi(struct intel_iommu *iommu) -{ - unsigned long flags; - u32 sts; - cycles_t start_time = get_cycles(); - - if (!ecap_qis(iommu->ecap)) - return; - - spin_lock_irqsave(&iommu->register_lock, flags); - - sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); - if (!(sts & DMA_GSTS_QIES)) - goto end; - - /* - * Give a chance to HW to complete the pending invalidation requests. - */ - while ((readl(iommu->reg + DMAR_IQT_REG) != - readl(iommu->reg + DMAR_IQH_REG)) && - (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) - cpu_relax(); - - iommu->gcmd &= ~DMA_GCMD_QIE; - writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, - !(sts & DMA_GSTS_QIES), sts); -end: - spin_unlock_irqrestore(&iommu->register_lock, flags); -} - -/* - * Enable queued invalidation. - */ -static void __dmar_enable_qi(struct intel_iommu *iommu) -{ - u32 sts; - unsigned long flags; - struct q_inval *qi = iommu->qi; - - qi->free_head = qi->free_tail = 0; - qi->free_cnt = QI_LENGTH; - - spin_lock_irqsave(&iommu->register_lock, flags); - - /* write zero to the tail reg */ - writel(0, iommu->reg + DMAR_IQT_REG); - - dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); - - iommu->gcmd |= DMA_GCMD_QIE; - writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - - /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); - - spin_unlock_irqrestore(&iommu->register_lock, flags); -} - -/* - * Enable Queued Invalidation interface. This is a must to support - * interrupt-remapping. Also used by DMA-remapping, which replaces - * register based IOTLB invalidation. - */ -int dmar_enable_qi(struct intel_iommu *iommu) -{ - struct q_inval *qi; - struct page *desc_page; - - if (!ecap_qis(iommu->ecap)) - return -ENOENT; - - /* - * queued invalidation is already setup and enabled. - */ - if (iommu->qi) - return 0; - - iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); - if (!iommu->qi) - return -ENOMEM; - - qi = iommu->qi; - - - desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); - if (!desc_page) { - kfree(qi); - iommu->qi = 0; - return -ENOMEM; - } - - qi->desc = page_address(desc_page); - - qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC); - if (!qi->desc_status) { - free_page((unsigned long) qi->desc); - kfree(qi); - iommu->qi = 0; - return -ENOMEM; - } - - qi->free_head = qi->free_tail = 0; - qi->free_cnt = QI_LENGTH; - - spin_lock_init(&qi->q_lock); - - __dmar_enable_qi(iommu); - - return 0; -} - -/* iommu interrupt handling. Most stuff are MSI-like. */ - -enum faulttype { - DMA_REMAP, - INTR_REMAP, - UNKNOWN, -}; - -static const char *dma_remap_fault_reasons[] = -{ - "Software", - "Present bit in root entry is clear", - "Present bit in context entry is clear", - "Invalid context entry", - "Access beyond MGAW", - "PTE Write access is not set", - "PTE Read access is not set", - "Next page table ptr is invalid", - "Root table address invalid", - "Context table ptr is invalid", - "non-zero reserved fields in RTP", - "non-zero reserved fields in CTP", - "non-zero reserved fields in PTE", -}; - -static const char *intr_remap_fault_reasons[] = -{ - "Detected reserved fields in the decoded interrupt-remapped request", - "Interrupt index exceeded the interrupt-remapping table size", - "Present field in the IRTE entry is clear", - "Error accessing interrupt-remapping table pointed by IRTA_REG", - "Detected reserved fields in the IRTE entry", - "Blocked a compatibility format interrupt request", - "Blocked an interrupt request due to source-id verification failure", -}; - -#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) - -const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) -{ - if (fault_reason >= 0x20 && (fault_reason <= 0x20 + - ARRAY_SIZE(intr_remap_fault_reasons))) { - *fault_type = INTR_REMAP; - return intr_remap_fault_reasons[fault_reason - 0x20]; - } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { - *fault_type = DMA_REMAP; - return dma_remap_fault_reasons[fault_reason]; - } else { - *fault_type = UNKNOWN; - return "Unknown"; - } -} - -void dmar_msi_unmask(struct irq_data *data) -{ - struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); - unsigned long flag; - - /* unmask it */ - spin_lock_irqsave(&iommu->register_lock, flag); - writel(0, iommu->reg + DMAR_FECTL_REG); - /* Read a reg to force flush the post write */ - readl(iommu->reg + DMAR_FECTL_REG); - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -void dmar_msi_mask(struct irq_data *data) -{ - unsigned long flag; - struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); - - /* mask it */ - spin_lock_irqsave(&iommu->register_lock, flag); - writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); - /* Read a reg to force flush the post write */ - readl(iommu->reg + DMAR_FECTL_REG); - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -void dmar_msi_write(int irq, struct msi_msg *msg) -{ - struct intel_iommu *iommu = irq_get_handler_data(irq); - unsigned long flag; - - spin_lock_irqsave(&iommu->register_lock, flag); - writel(msg->data, iommu->reg + DMAR_FEDATA_REG); - writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); - writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -void dmar_msi_read(int irq, struct msi_msg *msg) -{ - struct intel_iommu *iommu = irq_get_handler_data(irq); - unsigned long flag; - - spin_lock_irqsave(&iommu->register_lock, flag); - msg->data = readl(iommu->reg + DMAR_FEDATA_REG); - msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); - msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -static int dmar_fault_do_one(struct intel_iommu *iommu, int type, - u8 fault_reason, u16 source_id, unsigned long long addr) -{ - const char *reason; - int fault_type; - - reason = dmar_get_fault_reason(fault_reason, &fault_type); - - if (fault_type == INTR_REMAP) - printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] " - "fault index %llx\n" - "INTR-REMAP:[fault reason %02d] %s\n", - (source_id >> 8), PCI_SLOT(source_id & 0xFF), - PCI_FUNC(source_id & 0xFF), addr >> 48, - fault_reason, reason); - else - printk(KERN_ERR - "DMAR:[%s] Request device [%02x:%02x.%d] " - "fault addr %llx \n" - "DMAR:[fault reason %02d] %s\n", - (type ? "DMA Read" : "DMA Write"), - (source_id >> 8), PCI_SLOT(source_id & 0xFF), - PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason); - return 0; -} - -#define PRIMARY_FAULT_REG_LEN (16) -irqreturn_t dmar_fault(int irq, void *dev_id) -{ - struct intel_iommu *iommu = dev_id; - int reg, fault_index; - u32 fault_status; - unsigned long flag; - - spin_lock_irqsave(&iommu->register_lock, flag); - fault_status = readl(iommu->reg + DMAR_FSTS_REG); - if (fault_status) - printk(KERN_ERR "DRHD: handling fault status reg %x\n", - fault_status); - - /* TBD: ignore advanced fault log currently */ - if (!(fault_status & DMA_FSTS_PPF)) - goto clear_rest; - - fault_index = dma_fsts_fault_record_index(fault_status); - reg = cap_fault_reg_offset(iommu->cap); - while (1) { - u8 fault_reason; - u16 source_id; - u64 guest_addr; - int type; - u32 data; - - /* highest 32 bits */ - data = readl(iommu->reg + reg + - fault_index * PRIMARY_FAULT_REG_LEN + 12); - if (!(data & DMA_FRCD_F)) - break; - - fault_reason = dma_frcd_fault_reason(data); - type = dma_frcd_type(data); - - data = readl(iommu->reg + reg + - fault_index * PRIMARY_FAULT_REG_LEN + 8); - source_id = dma_frcd_source_id(data); - - guest_addr = dmar_readq(iommu->reg + reg + - fault_index * PRIMARY_FAULT_REG_LEN); - guest_addr = dma_frcd_page_addr(guest_addr); - /* clear the fault */ - writel(DMA_FRCD_F, iommu->reg + reg + - fault_index * PRIMARY_FAULT_REG_LEN + 12); - - spin_unlock_irqrestore(&iommu->register_lock, flag); - - dmar_fault_do_one(iommu, type, fault_reason, - source_id, guest_addr); - - fault_index++; - if (fault_index >= cap_num_fault_regs(iommu->cap)) - fault_index = 0; - spin_lock_irqsave(&iommu->register_lock, flag); - } -clear_rest: - /* clear all the other faults */ - fault_status = readl(iommu->reg + DMAR_FSTS_REG); - writel(fault_status, iommu->reg + DMAR_FSTS_REG); - - spin_unlock_irqrestore(&iommu->register_lock, flag); - return IRQ_HANDLED; -} - -int dmar_set_interrupt(struct intel_iommu *iommu) -{ - int irq, ret; - - /* - * Check if the fault interrupt is already initialized. - */ - if (iommu->irq) - return 0; - - irq = create_irq(); - if (!irq) { - printk(KERN_ERR "IOMMU: no free vectors\n"); - return -EINVAL; - } - - irq_set_handler_data(irq, iommu); - iommu->irq = irq; - - ret = arch_setup_dmar_msi(irq); - if (ret) { - irq_set_handler_data(irq, NULL); - iommu->irq = 0; - destroy_irq(irq); - return ret; - } - - ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu); - if (ret) - printk(KERN_ERR "IOMMU: can't request irq\n"); - return ret; -} - -int __init enable_drhd_fault_handling(void) -{ - struct dmar_drhd_unit *drhd; - - /* - * Enable fault control interrupt. - */ - for_each_drhd_unit(drhd) { - int ret; - struct intel_iommu *iommu = drhd->iommu; - ret = dmar_set_interrupt(iommu); - - if (ret) { - printk(KERN_ERR "DRHD %Lx: failed to enable fault, " - " interrupt, ret %d\n", - (unsigned long long)drhd->reg_base_addr, ret); - return -1; - } - - /* - * Clear any previous faults. - */ - dmar_fault(iommu->irq, iommu); - } - - return 0; -} - -/* - * Re-enable Queued Invalidation interface. - */ -int dmar_reenable_qi(struct intel_iommu *iommu) -{ - if (!ecap_qis(iommu->ecap)) - return -ENOENT; - - if (!iommu->qi) - return -ENOENT; - - /* - * First disable queued invalidation. - */ - dmar_disable_qi(iommu); - /* - * Then enable queued invalidation again. Since there is no pending - * invalidation requests now, it's safe to re-enable queued - * invalidation. - */ - __dmar_enable_qi(iommu); - - return 0; -} - -/* - * Check interrupt remapping support in DMAR table description. - */ -int __init dmar_ir_support(void) -{ - struct acpi_table_dmar *dmar; - dmar = (struct acpi_table_dmar *)dmar_tbl; - if (!dmar) - return 0; - return dmar->flags & 0x1; -} -IOMMU_INIT_POST(detect_intel_iommu); diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c deleted file mode 100644 index f02c34d26d1b..000000000000 --- a/drivers/pci/intel-iommu.c +++ /dev/null @@ -1,4017 +0,0 @@ -/* - * Copyright (c) 2006, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Copyright (C) 2006-2008 Intel Corporation - * Author: Ashok Raj - * Author: Shaohua Li - * Author: Anil S Keshavamurthy - * Author: Fenghua Yu - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "pci.h" - -#define ROOT_SIZE VTD_PAGE_SIZE -#define CONTEXT_SIZE VTD_PAGE_SIZE - -#define IS_BRIDGE_HOST_DEVICE(pdev) \ - ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST) -#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) -#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) -#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e) - -#define IOAPIC_RANGE_START (0xfee00000) -#define IOAPIC_RANGE_END (0xfeefffff) -#define IOVA_START_ADDR (0x1000) - -#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 - -#define MAX_AGAW_WIDTH 64 - -#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1) -#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1) - -/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR - to match. That way, we can use 'unsigned long' for PFNs with impunity. */ -#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \ - __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) -#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) - -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) -#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) -#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) - -/* page table handling */ -#define LEVEL_STRIDE (9) -#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) - -static inline int agaw_to_level(int agaw) -{ - return agaw + 2; -} - -static inline int agaw_to_width(int agaw) -{ - return 30 + agaw * LEVEL_STRIDE; -} - -static inline int width_to_agaw(int width) -{ - return (width - 30) / LEVEL_STRIDE; -} - -static inline unsigned int level_to_offset_bits(int level) -{ - return (level - 1) * LEVEL_STRIDE; -} - -static inline int pfn_level_offset(unsigned long pfn, int level) -{ - return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; -} - -static inline unsigned long level_mask(int level) -{ - return -1UL << level_to_offset_bits(level); -} - -static inline unsigned long level_size(int level) -{ - return 1UL << level_to_offset_bits(level); -} - -static inline unsigned long align_to_level(unsigned long pfn, int level) -{ - return (pfn + level_size(level) - 1) & level_mask(level); -} - -static inline unsigned long lvl_to_nr_pages(unsigned int lvl) -{ - return 1 << ((lvl - 1) * LEVEL_STRIDE); -} - -/* VT-d pages must always be _smaller_ than MM pages. Otherwise things - are never going to work. */ -static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) -{ - return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT); -} - -static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) -{ - return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); -} -static inline unsigned long page_to_dma_pfn(struct page *pg) -{ - return mm_to_dma_pfn(page_to_pfn(pg)); -} -static inline unsigned long virt_to_dma_pfn(void *p) -{ - return page_to_dma_pfn(virt_to_page(p)); -} - -/* global iommu list, set NULL for ignored DMAR units */ -static struct intel_iommu **g_iommus; - -static void __init check_tylersburg_isoch(void); -static int rwbf_quirk; - -/* - * set to 1 to panic kernel if can't successfully enable VT-d - * (used when kernel is launched w/ TXT) - */ -static int force_on = 0; - -/* - * 0: Present - * 1-11: Reserved - * 12-63: Context Ptr (12 - (haw-1)) - * 64-127: Reserved - */ -struct root_entry { - u64 val; - u64 rsvd1; -}; -#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) -static inline bool root_present(struct root_entry *root) -{ - return (root->val & 1); -} -static inline void set_root_present(struct root_entry *root) -{ - root->val |= 1; -} -static inline void set_root_value(struct root_entry *root, unsigned long value) -{ - root->val |= value & VTD_PAGE_MASK; -} - -static inline struct context_entry * -get_context_addr_from_root(struct root_entry *root) -{ - return (struct context_entry *) - (root_present(root)?phys_to_virt( - root->val & VTD_PAGE_MASK) : - NULL); -} - -/* - * low 64 bits: - * 0: present - * 1: fault processing disable - * 2-3: translation type - * 12-63: address space root - * high 64 bits: - * 0-2: address width - * 3-6: aval - * 8-23: domain id - */ -struct context_entry { - u64 lo; - u64 hi; -}; - -static inline bool context_present(struct context_entry *context) -{ - return (context->lo & 1); -} -static inline void context_set_present(struct context_entry *context) -{ - context->lo |= 1; -} - -static inline void context_set_fault_enable(struct context_entry *context) -{ - context->lo &= (((u64)-1) << 2) | 1; -} - -static inline void context_set_translation_type(struct context_entry *context, - unsigned long value) -{ - context->lo &= (((u64)-1) << 4) | 3; - context->lo |= (value & 3) << 2; -} - -static inline void context_set_address_root(struct context_entry *context, - unsigned long value) -{ - context->lo |= value & VTD_PAGE_MASK; -} - -static inline void context_set_address_width(struct context_entry *context, - unsigned long value) -{ - context->hi |= value & 7; -} - -static inline void context_set_domain_id(struct context_entry *context, - unsigned long value) -{ - context->hi |= (value & ((1 << 16) - 1)) << 8; -} - -static inline void context_clear_entry(struct context_entry *context) -{ - context->lo = 0; - context->hi = 0; -} - -/* - * 0: readable - * 1: writable - * 2-6: reserved - * 7: super page - * 8-10: available - * 11: snoop behavior - * 12-63: Host physcial address - */ -struct dma_pte { - u64 val; -}; - -static inline void dma_clear_pte(struct dma_pte *pte) -{ - pte->val = 0; -} - -static inline void dma_set_pte_readable(struct dma_pte *pte) -{ - pte->val |= DMA_PTE_READ; -} - -static inline void dma_set_pte_writable(struct dma_pte *pte) -{ - pte->val |= DMA_PTE_WRITE; -} - -static inline void dma_set_pte_snp(struct dma_pte *pte) -{ - pte->val |= DMA_PTE_SNP; -} - -static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) -{ - pte->val = (pte->val & ~3) | (prot & 3); -} - -static inline u64 dma_pte_addr(struct dma_pte *pte) -{ -#ifdef CONFIG_64BIT - return pte->val & VTD_PAGE_MASK; -#else - /* Must have a full atomic 64-bit read */ - return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; -#endif -} - -static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn) -{ - pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT; -} - -static inline bool dma_pte_present(struct dma_pte *pte) -{ - return (pte->val & 3) != 0; -} - -static inline int first_pte_in_page(struct dma_pte *pte) -{ - return !((unsigned long)pte & ~VTD_PAGE_MASK); -} - -/* - * This domain is a statically identity mapping domain. - * 1. This domain creats a static 1:1 mapping to all usable memory. - * 2. It maps to each iommu if successful. - * 3. Each iommu mapps to this domain if successful. - */ -static struct dmar_domain *si_domain; -static int hw_pass_through = 1; - -/* devices under the same p2p bridge are owned in one domain */ -#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) - -/* domain represents a virtual machine, more than one devices - * across iommus may be owned in one domain, e.g. kvm guest. - */ -#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) - -/* si_domain contains mulitple devices */ -#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2) - -struct dmar_domain { - int id; /* domain id */ - int nid; /* node id */ - unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ - - struct list_head devices; /* all devices' list */ - struct iova_domain iovad; /* iova's that belong to this domain */ - - struct dma_pte *pgd; /* virtual address */ - int gaw; /* max guest address width */ - - /* adjusted guest address width, 0 is level 2 30-bit */ - int agaw; - - int flags; /* flags to find out type of domain */ - - int iommu_coherency;/* indicate coherency of iommu access */ - int iommu_snooping; /* indicate snooping control feature*/ - int iommu_count; /* reference count of iommu */ - int iommu_superpage;/* Level of superpages supported: - 0 == 4KiB (no superpages), 1 == 2MiB, - 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ - spinlock_t iommu_lock; /* protect iommu set in domain */ - u64 max_addr; /* maximum mapped address */ -}; - -/* PCI domain-device relationship */ -struct device_domain_info { - struct list_head link; /* link to domain siblings */ - struct list_head global; /* link to global list */ - int segment; /* PCI domain */ - u8 bus; /* PCI bus number */ - u8 devfn; /* PCI devfn number */ - struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */ - struct intel_iommu *iommu; /* IOMMU used by this device */ - struct dmar_domain *domain; /* pointer to domain */ -}; - -static void flush_unmaps_timeout(unsigned long data); - -DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); - -#define HIGH_WATER_MARK 250 -struct deferred_flush_tables { - int next; - struct iova *iova[HIGH_WATER_MARK]; - struct dmar_domain *domain[HIGH_WATER_MARK]; -}; - -static struct deferred_flush_tables *deferred_flush; - -/* bitmap for indexing intel_iommus */ -static int g_num_of_iommus; - -static DEFINE_SPINLOCK(async_umap_flush_lock); -static LIST_HEAD(unmaps_to_do); - -static int timer_on; -static long list_size; - -static void domain_remove_dev_info(struct dmar_domain *domain); - -#ifdef CONFIG_DMAR_DEFAULT_ON -int dmar_disabled = 0; -#else -int dmar_disabled = 1; -#endif /*CONFIG_DMAR_DEFAULT_ON*/ - -static int dmar_map_gfx = 1; -static int dmar_forcedac; -static int intel_iommu_strict; -static int intel_iommu_superpage = 1; - -#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) -static DEFINE_SPINLOCK(device_domain_lock); -static LIST_HEAD(device_domain_list); - -static struct iommu_ops intel_iommu_ops; - -static int __init intel_iommu_setup(char *str) -{ - if (!str) - return -EINVAL; - while (*str) { - if (!strncmp(str, "on", 2)) { - dmar_disabled = 0; - printk(KERN_INFO "Intel-IOMMU: enabled\n"); - } else if (!strncmp(str, "off", 3)) { - dmar_disabled = 1; - printk(KERN_INFO "Intel-IOMMU: disabled\n"); - } else if (!strncmp(str, "igfx_off", 8)) { - dmar_map_gfx = 0; - printk(KERN_INFO - "Intel-IOMMU: disable GFX device mapping\n"); - } else if (!strncmp(str, "forcedac", 8)) { - printk(KERN_INFO - "Intel-IOMMU: Forcing DAC for PCI devices\n"); - dmar_forcedac = 1; - } else if (!strncmp(str, "strict", 6)) { - printk(KERN_INFO - "Intel-IOMMU: disable batched IOTLB flush\n"); - intel_iommu_strict = 1; - } else if (!strncmp(str, "sp_off", 6)) { - printk(KERN_INFO - "Intel-IOMMU: disable supported super page\n"); - intel_iommu_superpage = 0; - } - - str += strcspn(str, ","); - while (*str == ',') - str++; - } - return 0; -} -__setup("intel_iommu=", intel_iommu_setup); - -static struct kmem_cache *iommu_domain_cache; -static struct kmem_cache *iommu_devinfo_cache; -static struct kmem_cache *iommu_iova_cache; - -static inline void *alloc_pgtable_page(int node) -{ - struct page *page; - void *vaddr = NULL; - - page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0); - if (page) - vaddr = page_address(page); - return vaddr; -} - -static inline void free_pgtable_page(void *vaddr) -{ - free_page((unsigned long)vaddr); -} - -static inline void *alloc_domain_mem(void) -{ - return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC); -} - -static void free_domain_mem(void *vaddr) -{ - kmem_cache_free(iommu_domain_cache, vaddr); -} - -static inline void * alloc_devinfo_mem(void) -{ - return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC); -} - -static inline void free_devinfo_mem(void *vaddr) -{ - kmem_cache_free(iommu_devinfo_cache, vaddr); -} - -struct iova *alloc_iova_mem(void) -{ - return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); -} - -void free_iova_mem(struct iova *iova) -{ - kmem_cache_free(iommu_iova_cache, iova); -} - - -static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) -{ - unsigned long sagaw; - int agaw = -1; - - sagaw = cap_sagaw(iommu->cap); - for (agaw = width_to_agaw(max_gaw); - agaw >= 0; agaw--) { - if (test_bit(agaw, &sagaw)) - break; - } - - return agaw; -} - -/* - * Calculate max SAGAW for each iommu. - */ -int iommu_calculate_max_sagaw(struct intel_iommu *iommu) -{ - return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); -} - -/* - * calculate agaw for each iommu. - * "SAGAW" may be different across iommus, use a default agaw, and - * get a supported less agaw for iommus that don't support the default agaw. - */ -int iommu_calculate_agaw(struct intel_iommu *iommu) -{ - return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); -} - -/* This functionin only returns single iommu in a domain */ -static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) -{ - int iommu_id; - - /* si_domain and vm domain should not get here. */ - BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); - BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY); - - iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); - if (iommu_id < 0 || iommu_id >= g_num_of_iommus) - return NULL; - - return g_iommus[iommu_id]; -} - -static void domain_update_iommu_coherency(struct dmar_domain *domain) -{ - int i; - - domain->iommu_coherency = 1; - - for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { - if (!ecap_coherent(g_iommus[i]->ecap)) { - domain->iommu_coherency = 0; - break; - } - } -} - -static void domain_update_iommu_snooping(struct dmar_domain *domain) -{ - int i; - - domain->iommu_snooping = 1; - - for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { - if (!ecap_sc_support(g_iommus[i]->ecap)) { - domain->iommu_snooping = 0; - break; - } - } -} - -static void domain_update_iommu_superpage(struct dmar_domain *domain) -{ - int i, mask = 0xf; - - if (!intel_iommu_superpage) { - domain->iommu_superpage = 0; - return; - } - - domain->iommu_superpage = 4; /* 1TiB */ - - for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { - mask |= cap_super_page_val(g_iommus[i]->cap); - if (!mask) { - break; - } - } - domain->iommu_superpage = fls(mask); -} - -/* Some capabilities may be different across iommus */ -static void domain_update_iommu_cap(struct dmar_domain *domain) -{ - domain_update_iommu_coherency(domain); - domain_update_iommu_snooping(domain); - domain_update_iommu_superpage(domain); -} - -static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) -{ - struct dmar_drhd_unit *drhd = NULL; - int i; - - for_each_drhd_unit(drhd) { - if (drhd->ignored) - continue; - if (segment != drhd->segment) - continue; - - for (i = 0; i < drhd->devices_cnt; i++) { - if (drhd->devices[i] && - drhd->devices[i]->bus->number == bus && - drhd->devices[i]->devfn == devfn) - return drhd->iommu; - if (drhd->devices[i] && - drhd->devices[i]->subordinate && - drhd->devices[i]->subordinate->number <= bus && - drhd->devices[i]->subordinate->subordinate >= bus) - return drhd->iommu; - } - - if (drhd->include_all) - return drhd->iommu; - } - - return NULL; -} - -static void domain_flush_cache(struct dmar_domain *domain, - void *addr, int size) -{ - if (!domain->iommu_coherency) - clflush_cache_range(addr, size); -} - -/* Gets context entry for a given bus and devfn */ -static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, - u8 bus, u8 devfn) -{ - struct root_entry *root; - struct context_entry *context; - unsigned long phy_addr; - unsigned long flags; - - spin_lock_irqsave(&iommu->lock, flags); - root = &iommu->root_entry[bus]; - context = get_context_addr_from_root(root); - if (!context) { - context = (struct context_entry *) - alloc_pgtable_page(iommu->node); - if (!context) { - spin_unlock_irqrestore(&iommu->lock, flags); - return NULL; - } - __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); - phy_addr = virt_to_phys((void *)context); - set_root_value(root, phy_addr); - set_root_present(root); - __iommu_flush_cache(iommu, root, sizeof(*root)); - } - spin_unlock_irqrestore(&iommu->lock, flags); - return &context[devfn]; -} - -static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) -{ - struct root_entry *root; - struct context_entry *context; - int ret; - unsigned long flags; - - spin_lock_irqsave(&iommu->lock, flags); - root = &iommu->root_entry[bus]; - context = get_context_addr_from_root(root); - if (!context) { - ret = 0; - goto out; - } - ret = context_present(&context[devfn]); -out: - spin_unlock_irqrestore(&iommu->lock, flags); - return ret; -} - -static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) -{ - struct root_entry *root; - struct context_entry *context; - unsigned long flags; - - spin_lock_irqsave(&iommu->lock, flags); - root = &iommu->root_entry[bus]; - context = get_context_addr_from_root(root); - if (context) { - context_clear_entry(&context[devfn]); - __iommu_flush_cache(iommu, &context[devfn], \ - sizeof(*context)); - } - spin_unlock_irqrestore(&iommu->lock, flags); -} - -static void free_context_table(struct intel_iommu *iommu) -{ - struct root_entry *root; - int i; - unsigned long flags; - struct context_entry *context; - - spin_lock_irqsave(&iommu->lock, flags); - if (!iommu->root_entry) { - goto out; - } - for (i = 0; i < ROOT_ENTRY_NR; i++) { - root = &iommu->root_entry[i]; - context = get_context_addr_from_root(root); - if (context) - free_pgtable_page(context); - } - free_pgtable_page(iommu->root_entry); - iommu->root_entry = NULL; -out: - spin_unlock_irqrestore(&iommu->lock, flags); -} - -static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, - unsigned long pfn, int large_level) -{ - int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - struct dma_pte *parent, *pte = NULL; - int level = agaw_to_level(domain->agaw); - int offset, target_level; - - BUG_ON(!domain->pgd); - BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); - parent = domain->pgd; - - /* Search pte */ - if (!large_level) - target_level = 1; - else - target_level = large_level; - - while (level > 0) { - void *tmp_page; - - offset = pfn_level_offset(pfn, level); - pte = &parent[offset]; - if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) - break; - if (level == target_level) - break; - - if (!dma_pte_present(pte)) { - uint64_t pteval; - - tmp_page = alloc_pgtable_page(domain->nid); - - if (!tmp_page) - return NULL; - - domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); - pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; - if (cmpxchg64(&pte->val, 0ULL, pteval)) { - /* Someone else set it while we were thinking; use theirs. */ - free_pgtable_page(tmp_page); - } else { - dma_pte_addr(pte); - domain_flush_cache(domain, pte, sizeof(*pte)); - } - } - parent = phys_to_virt(dma_pte_addr(pte)); - level--; - } - - return pte; -} - - -/* return address's pte at specific level */ -static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, - unsigned long pfn, - int level, int *large_page) -{ - struct dma_pte *parent, *pte = NULL; - int total = agaw_to_level(domain->agaw); - int offset; - - parent = domain->pgd; - while (level <= total) { - offset = pfn_level_offset(pfn, total); - pte = &parent[offset]; - if (level == total) - return pte; - - if (!dma_pte_present(pte)) { - *large_page = total; - break; - } - - if (pte->val & DMA_PTE_LARGE_PAGE) { - *large_page = total; - return pte; - } - - parent = phys_to_virt(dma_pte_addr(pte)); - total--; - } - return NULL; -} - -/* clear last level pte, a tlb flush should be followed */ -static void dma_pte_clear_range(struct dmar_domain *domain, - unsigned long start_pfn, - unsigned long last_pfn) -{ - int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - unsigned int large_page = 1; - struct dma_pte *first_pte, *pte; - - BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); - BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); - BUG_ON(start_pfn > last_pfn); - - /* we don't need lock here; nobody else touches the iova range */ - do { - large_page = 1; - first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); - if (!pte) { - start_pfn = align_to_level(start_pfn + 1, large_page + 1); - continue; - } - do { - dma_clear_pte(pte); - start_pfn += lvl_to_nr_pages(large_page); - pte++; - } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); - - domain_flush_cache(domain, first_pte, - (void *)pte - (void *)first_pte); - - } while (start_pfn && start_pfn <= last_pfn); -} - -/* free page table pages. last level pte should already be cleared */ -static void dma_pte_free_pagetable(struct dmar_domain *domain, - unsigned long start_pfn, - unsigned long last_pfn) -{ - int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - struct dma_pte *first_pte, *pte; - int total = agaw_to_level(domain->agaw); - int level; - unsigned long tmp; - int large_page = 2; - - BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); - BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); - BUG_ON(start_pfn > last_pfn); - - /* We don't need lock here; nobody else touches the iova range */ - level = 2; - while (level <= total) { - tmp = align_to_level(start_pfn, level); - - /* If we can't even clear one PTE at this level, we're done */ - if (tmp + level_size(level) - 1 > last_pfn) - return; - - do { - large_page = level; - first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page); - if (large_page > level) - level = large_page + 1; - if (!pte) { - tmp = align_to_level(tmp + 1, level + 1); - continue; - } - do { - if (dma_pte_present(pte)) { - free_pgtable_page(phys_to_virt(dma_pte_addr(pte))); - dma_clear_pte(pte); - } - pte++; - tmp += level_size(level); - } while (!first_pte_in_page(pte) && - tmp + level_size(level) - 1 <= last_pfn); - - domain_flush_cache(domain, first_pte, - (void *)pte - (void *)first_pte); - - } while (tmp && tmp + level_size(level) - 1 <= last_pfn); - level++; - } - /* free pgd */ - if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { - free_pgtable_page(domain->pgd); - domain->pgd = NULL; - } -} - -/* iommu handling */ -static int iommu_alloc_root_entry(struct intel_iommu *iommu) -{ - struct root_entry *root; - unsigned long flags; - - root = (struct root_entry *)alloc_pgtable_page(iommu->node); - if (!root) - return -ENOMEM; - - __iommu_flush_cache(iommu, root, ROOT_SIZE); - - spin_lock_irqsave(&iommu->lock, flags); - iommu->root_entry = root; - spin_unlock_irqrestore(&iommu->lock, flags); - - return 0; -} - -static void iommu_set_root_entry(struct intel_iommu *iommu) -{ - void *addr; - u32 sts; - unsigned long flag; - - addr = iommu->root_entry; - - spin_lock_irqsave(&iommu->register_lock, flag); - dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); - - writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); - - /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, - readl, (sts & DMA_GSTS_RTPS), sts); - - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -static void iommu_flush_write_buffer(struct intel_iommu *iommu) -{ - u32 val; - unsigned long flag; - - if (!rwbf_quirk && !cap_rwbf(iommu->cap)) - return; - - spin_lock_irqsave(&iommu->register_lock, flag); - writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); - - /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, - readl, (!(val & DMA_GSTS_WBFS)), val); - - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -/* return value determine if we need a write buffer flush */ -static void __iommu_flush_context(struct intel_iommu *iommu, - u16 did, u16 source_id, u8 function_mask, - u64 type) -{ - u64 val = 0; - unsigned long flag; - - switch (type) { - case DMA_CCMD_GLOBAL_INVL: - val = DMA_CCMD_GLOBAL_INVL; - break; - case DMA_CCMD_DOMAIN_INVL: - val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did); - break; - case DMA_CCMD_DEVICE_INVL: - val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did) - | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask); - break; - default: - BUG(); - } - val |= DMA_CCMD_ICC; - - spin_lock_irqsave(&iommu->register_lock, flag); - dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); - - /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, - dmar_readq, (!(val & DMA_CCMD_ICC)), val); - - spin_unlock_irqrestore(&iommu->register_lock, flag); -} - -/* return value determine if we need a write buffer flush */ -static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, - u64 addr, unsigned int size_order, u64 type) -{ - int tlb_offset = ecap_iotlb_offset(iommu->ecap); - u64 val = 0, val_iva = 0; - unsigned long flag; - - switch (type) { - case DMA_TLB_GLOBAL_FLUSH: - /* global flush doesn't need set IVA_REG */ - val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT; - break; - case DMA_TLB_DSI_FLUSH: - val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); - break; - case DMA_TLB_PSI_FLUSH: - val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); - /* Note: always flush non-leaf currently */ - val_iva = size_order | addr; - break; - default: - BUG(); - } - /* Note: set drain read/write */ -#if 0 - /* - * This is probably to be super secure.. Looks like we can - * ignore it without any impact. - */ - if (cap_read_drain(iommu->cap)) - val |= DMA_TLB_READ_DRAIN; -#endif - if (cap_write_drain(iommu->cap)) - val |= DMA_TLB_WRITE_DRAIN; - - spin_lock_irqsave(&iommu->register_lock, flag); - /* Note: Only uses first TLB reg currently */ - if (val_iva) - dmar_writeq(iommu->reg + tlb_offset, val_iva); - dmar_writeq(iommu->reg + tlb_offset + 8, val); - - /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, tlb_offset + 8, - dmar_readq, (!(val & DMA_TLB_IVT)), val); - - spin_unlock_irqrestore(&iommu->register_lock, flag); - - /* check IOTLB invalidation granularity */ - if (DMA_TLB_IAIG(val) == 0) - printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); - if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) - pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", - (unsigned long long)DMA_TLB_IIRG(type), - (unsigned long long)DMA_TLB_IAIG(val)); -} - -static struct device_domain_info *iommu_support_dev_iotlb( - struct dmar_domain *domain, int segment, u8 bus, u8 devfn) -{ - int found = 0; - unsigned long flags; - struct device_domain_info *info; - struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn); - - if (!ecap_dev_iotlb_support(iommu->ecap)) - return NULL; - - if (!iommu->qi) - return NULL; - - spin_lock_irqsave(&device_domain_lock, flags); - list_for_each_entry(info, &domain->devices, link) - if (info->bus == bus && info->devfn == devfn) { - found = 1; - break; - } - spin_unlock_irqrestore(&device_domain_lock, flags); - - if (!found || !info->dev) - return NULL; - - if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS)) - return NULL; - - if (!dmar_find_matched_atsr_unit(info->dev)) - return NULL; - - info->iommu = iommu; - - return info; -} - -static void iommu_enable_dev_iotlb(struct device_domain_info *info) -{ - if (!info) - return; - - pci_enable_ats(info->dev, VTD_PAGE_SHIFT); -} - -static void iommu_disable_dev_iotlb(struct device_domain_info *info) -{ - if (!info->dev || !pci_ats_enabled(info->dev)) - return; - - pci_disable_ats(info->dev); -} - -static void iommu_flush_dev_iotlb(struct dmar_domain *domain, - u64 addr, unsigned mask) -{ - u16 sid, qdep; - unsigned long flags; - struct device_domain_info *info; - - spin_lock_irqsave(&device_domain_lock, flags); - list_for_each_entry(info, &domain->devices, link) { - if (!info->dev || !pci_ats_enabled(info->dev)) - continue; - - sid = info->bus << 8 | info->devfn; - qdep = pci_ats_queue_depth(info->dev); - qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); - } - spin_unlock_irqrestore(&device_domain_lock, flags); -} - -static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, - unsigned long pfn, unsigned int pages, int map) -{ - unsigned int mask = ilog2(__roundup_pow_of_two(pages)); - uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; - - BUG_ON(pages == 0); - - /* - * Fallback to domain selective flush if no PSI support or the size is - * too big. - * PSI requires page size to be 2 ^ x, and the base address is naturally - * aligned to the size - */ - if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) - iommu->flush.flush_iotlb(iommu, did, 0, 0, - DMA_TLB_DSI_FLUSH); - else - iommu->flush.flush_iotlb(iommu, did, addr, mask, - DMA_TLB_PSI_FLUSH); - - /* - * In caching mode, changes of pages from non-present to present require - * flush. However, device IOTLB doesn't need to be flushed in this case. - */ - if (!cap_caching_mode(iommu->cap) || !map) - iommu_flush_dev_iotlb(iommu->domains[did], addr, mask); -} - -static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) -{ - u32 pmen; - unsigned long flags; - - spin_lock_irqsave(&iommu->register_lock, flags); - pmen = readl(iommu->reg + DMAR_PMEN_REG); - pmen &= ~DMA_PMEN_EPM; - writel(pmen, iommu->reg + DMAR_PMEN_REG); - - /* wait for the protected region status bit to clear */ - IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, - readl, !(pmen & DMA_PMEN_PRS), pmen); - - spin_unlock_irqrestore(&iommu->register_lock, flags); -} - -static int iommu_enable_translation(struct intel_iommu *iommu) -{ - u32 sts; - unsigned long flags; - - spin_lock_irqsave(&iommu->register_lock, flags); - iommu->gcmd |= DMA_GCMD_TE; - writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - - /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, - readl, (sts & DMA_GSTS_TES), sts); - - spin_unlock_irqrestore(&iommu->register_lock, flags); - return 0; -} - -static int iommu_disable_translation(struct intel_iommu *iommu) -{ - u32 sts; - unsigned long flag; - - spin_lock_irqsave(&iommu->register_lock, flag); - iommu->gcmd &= ~DMA_GCMD_TE; - writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - - /* Make sure hardware complete it */ - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, - readl, (!(sts & DMA_GSTS_TES)), sts); - - spin_unlock_irqrestore(&iommu->register_lock, flag); - return 0; -} - - -static int iommu_init_domains(struct intel_iommu *iommu) -{ - unsigned long ndomains; - unsigned long nlongs; - - ndomains = cap_ndoms(iommu->cap); - pr_debug("IOMMU %d: Number of Domains supportd <%ld>\n", iommu->seq_id, - ndomains); - nlongs = BITS_TO_LONGS(ndomains); - - spin_lock_init(&iommu->lock); - - /* TBD: there might be 64K domains, - * consider other allocation for future chip - */ - iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); - if (!iommu->domain_ids) { - printk(KERN_ERR "Allocating domain id array failed\n"); - return -ENOMEM; - } - iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), - GFP_KERNEL); - if (!iommu->domains) { - printk(KERN_ERR "Allocating domain array failed\n"); - return -ENOMEM; - } - - /* - * if Caching mode is set, then invalid translations are tagged - * with domainid 0. Hence we need to pre-allocate it. - */ - if (cap_caching_mode(iommu->cap)) - set_bit(0, iommu->domain_ids); - return 0; -} - - -static void domain_exit(struct dmar_domain *domain); -static void vm_domain_exit(struct dmar_domain *domain); - -void free_dmar_iommu(struct intel_iommu *iommu) -{ - struct dmar_domain *domain; - int i; - unsigned long flags; - - if ((iommu->domains) && (iommu->domain_ids)) { - for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { - domain = iommu->domains[i]; - clear_bit(i, iommu->domain_ids); - - spin_lock_irqsave(&domain->iommu_lock, flags); - if (--domain->iommu_count == 0) { - if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) - vm_domain_exit(domain); - else - domain_exit(domain); - } - spin_unlock_irqrestore(&domain->iommu_lock, flags); - } - } - - if (iommu->gcmd & DMA_GCMD_TE) - iommu_disable_translation(iommu); - - if (iommu->irq) { - irq_set_handler_data(iommu->irq, NULL); - /* This will mask the irq */ - free_irq(iommu->irq, iommu); - destroy_irq(iommu->irq); - } - - kfree(iommu->domains); - kfree(iommu->domain_ids); - - g_iommus[iommu->seq_id] = NULL; - - /* if all iommus are freed, free g_iommus */ - for (i = 0; i < g_num_of_iommus; i++) { - if (g_iommus[i]) - break; - } - - if (i == g_num_of_iommus) - kfree(g_iommus); - - /* free context mapping */ - free_context_table(iommu); -} - -static struct dmar_domain *alloc_domain(void) -{ - struct dmar_domain *domain; - - domain = alloc_domain_mem(); - if (!domain) - return NULL; - - domain->nid = -1; - memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); - domain->flags = 0; - - return domain; -} - -static int iommu_attach_domain(struct dmar_domain *domain, - struct intel_iommu *iommu) -{ - int num; - unsigned long ndomains; - unsigned long flags; - - ndomains = cap_ndoms(iommu->cap); - - spin_lock_irqsave(&iommu->lock, flags); - - num = find_first_zero_bit(iommu->domain_ids, ndomains); - if (num >= ndomains) { - spin_unlock_irqrestore(&iommu->lock, flags); - printk(KERN_ERR "IOMMU: no free domain ids\n"); - return -ENOMEM; - } - - domain->id = num; - set_bit(num, iommu->domain_ids); - set_bit(iommu->seq_id, &domain->iommu_bmp); - iommu->domains[num] = domain; - spin_unlock_irqrestore(&iommu->lock, flags); - - return 0; -} - -static void iommu_detach_domain(struct dmar_domain *domain, - struct intel_iommu *iommu) -{ - unsigned long flags; - int num, ndomains; - int found = 0; - - spin_lock_irqsave(&iommu->lock, flags); - ndomains = cap_ndoms(iommu->cap); - for_each_set_bit(num, iommu->domain_ids, ndomains) { - if (iommu->domains[num] == domain) { - found = 1; - break; - } - } - - if (found) { - clear_bit(num, iommu->domain_ids); - clear_bit(iommu->seq_id, &domain->iommu_bmp); - iommu->domains[num] = NULL; - } - spin_unlock_irqrestore(&iommu->lock, flags); -} - -static struct iova_domain reserved_iova_list; -static struct lock_class_key reserved_rbtree_key; - -static int dmar_init_reserved_ranges(void) -{ - struct pci_dev *pdev = NULL; - struct iova *iova; - int i; - - init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); - - lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, - &reserved_rbtree_key); - - /* IOAPIC ranges shouldn't be accessed by DMA */ - iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), - IOVA_PFN(IOAPIC_RANGE_END)); - if (!iova) { - printk(KERN_ERR "Reserve IOAPIC range failed\n"); - return -ENODEV; - } - - /* Reserve all PCI MMIO to avoid peer-to-peer access */ - for_each_pci_dev(pdev) { - struct resource *r; - - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - r = &pdev->resource[i]; - if (!r->flags || !(r->flags & IORESOURCE_MEM)) - continue; - iova = reserve_iova(&reserved_iova_list, - IOVA_PFN(r->start), - IOVA_PFN(r->end)); - if (!iova) { - printk(KERN_ERR "Reserve iova failed\n"); - return -ENODEV; - } - } - } - return 0; -} - -static void domain_reserve_special_ranges(struct dmar_domain *domain) -{ - copy_reserved_iova(&reserved_iova_list, &domain->iovad); -} - -static inline int guestwidth_to_adjustwidth(int gaw) -{ - int agaw; - int r = (gaw - 12) % 9; - - if (r == 0) - agaw = gaw; - else - agaw = gaw + 9 - r; - if (agaw > 64) - agaw = 64; - return agaw; -} - -static int domain_init(struct dmar_domain *domain, int guest_width) -{ - struct intel_iommu *iommu; - int adjust_width, agaw; - unsigned long sagaw; - - init_iova_domain(&domain->iovad, DMA_32BIT_PFN); - spin_lock_init(&domain->iommu_lock); - - domain_reserve_special_ranges(domain); - - /* calculate AGAW */ - iommu = domain_get_iommu(domain); - if (guest_width > cap_mgaw(iommu->cap)) - guest_width = cap_mgaw(iommu->cap); - domain->gaw = guest_width; - adjust_width = guestwidth_to_adjustwidth(guest_width); - agaw = width_to_agaw(adjust_width); - sagaw = cap_sagaw(iommu->cap); - if (!test_bit(agaw, &sagaw)) { - /* hardware doesn't support it, choose a bigger one */ - pr_debug("IOMMU: hardware doesn't support agaw %d\n", agaw); - agaw = find_next_bit(&sagaw, 5, agaw); - if (agaw >= 5) - return -ENODEV; - } - domain->agaw = agaw; - INIT_LIST_HEAD(&domain->devices); - - if (ecap_coherent(iommu->ecap)) - domain->iommu_coherency = 1; - else - domain->iommu_coherency = 0; - - if (ecap_sc_support(iommu->ecap)) - domain->iommu_snooping = 1; - else - domain->iommu_snooping = 0; - - domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); - domain->iommu_count = 1; - domain->nid = iommu->node; - - /* always allocate the top pgd */ - domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); - if (!domain->pgd) - return -ENOMEM; - __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); - return 0; -} - -static void domain_exit(struct dmar_domain *domain) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; - - /* Domain 0 is reserved, so dont process it */ - if (!domain) - return; - - /* Flush any lazy unmaps that may reference this domain */ - if (!intel_iommu_strict) - flush_unmaps_timeout(0); - - domain_remove_dev_info(domain); - /* destroy iovas */ - put_iova_domain(&domain->iovad); - - /* clear ptes */ - dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); - - /* free page tables */ - dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); - - for_each_active_iommu(iommu, drhd) - if (test_bit(iommu->seq_id, &domain->iommu_bmp)) - iommu_detach_domain(domain, iommu); - - free_domain_mem(domain); -} - -static int domain_context_mapping_one(struct dmar_domain *domain, int segment, - u8 bus, u8 devfn, int translation) -{ - struct context_entry *context; - unsigned long flags; - struct intel_iommu *iommu; - struct dma_pte *pgd; - unsigned long num; - unsigned long ndomains; - int id; - int agaw; - struct device_domain_info *info = NULL; - - pr_debug("Set context mapping for %02x:%02x.%d\n", - bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); - - BUG_ON(!domain->pgd); - BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && - translation != CONTEXT_TT_MULTI_LEVEL); - - iommu = device_to_iommu(segment, bus, devfn); - if (!iommu) - return -ENODEV; - - context = device_to_context_entry(iommu, bus, devfn); - if (!context) - return -ENOMEM; - spin_lock_irqsave(&iommu->lock, flags); - if (context_present(context)) { - spin_unlock_irqrestore(&iommu->lock, flags); - return 0; - } - - id = domain->id; - pgd = domain->pgd; - - if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || - domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) { - int found = 0; - - /* find an available domain id for this device in iommu */ - ndomains = cap_ndoms(iommu->cap); - for_each_set_bit(num, iommu->domain_ids, ndomains) { - if (iommu->domains[num] == domain) { - id = num; - found = 1; - break; - } - } - - if (found == 0) { - num = find_first_zero_bit(iommu->domain_ids, ndomains); - if (num >= ndomains) { - spin_unlock_irqrestore(&iommu->lock, flags); - printk(KERN_ERR "IOMMU: no free domain ids\n"); - return -EFAULT; - } - - set_bit(num, iommu->domain_ids); - iommu->domains[num] = domain; - id = num; - } - - /* Skip top levels of page tables for - * iommu which has less agaw than default. - * Unnecessary for PT mode. - */ - if (translation != CONTEXT_TT_PASS_THROUGH) { - for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { - pgd = phys_to_virt(dma_pte_addr(pgd)); - if (!dma_pte_present(pgd)) { - spin_unlock_irqrestore(&iommu->lock, flags); - return -ENOMEM; - } - } - } - } - - context_set_domain_id(context, id); - - if (translation != CONTEXT_TT_PASS_THROUGH) { - info = iommu_support_dev_iotlb(domain, segment, bus, devfn); - translation = info ? CONTEXT_TT_DEV_IOTLB : - CONTEXT_TT_MULTI_LEVEL; - } - /* - * In pass through mode, AW must be programmed to indicate the largest - * AGAW value supported by hardware. And ASR is ignored by hardware. - */ - if (unlikely(translation == CONTEXT_TT_PASS_THROUGH)) - context_set_address_width(context, iommu->msagaw); - else { - context_set_address_root(context, virt_to_phys(pgd)); - context_set_address_width(context, iommu->agaw); - } - - context_set_translation_type(context, translation); - context_set_fault_enable(context); - context_set_present(context); - domain_flush_cache(domain, context, sizeof(*context)); - - /* - * It's a non-present to present mapping. If hardware doesn't cache - * non-present entry we only need to flush the write-buffer. If the - * _does_ cache non-present entries, then it does so in the special - * domain #0, which we have to flush: - */ - if (cap_caching_mode(iommu->cap)) { - iommu->flush.flush_context(iommu, 0, - (((u16)bus) << 8) | devfn, - DMA_CCMD_MASK_NOBIT, - DMA_CCMD_DEVICE_INVL); - iommu->flush.flush_iotlb(iommu, domain->id, 0, 0, DMA_TLB_DSI_FLUSH); - } else { - iommu_flush_write_buffer(iommu); - } - iommu_enable_dev_iotlb(info); - spin_unlock_irqrestore(&iommu->lock, flags); - - spin_lock_irqsave(&domain->iommu_lock, flags); - if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { - domain->iommu_count++; - if (domain->iommu_count == 1) - domain->nid = iommu->node; - domain_update_iommu_cap(domain); - } - spin_unlock_irqrestore(&domain->iommu_lock, flags); - return 0; -} - -static int -domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, - int translation) -{ - int ret; - struct pci_dev *tmp, *parent; - - ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), - pdev->bus->number, pdev->devfn, - translation); - if (ret) - return ret; - - /* dependent device mapping */ - tmp = pci_find_upstream_pcie_bridge(pdev); - if (!tmp) - return 0; - /* Secondary interface's bus number and devfn 0 */ - parent = pdev->bus->self; - while (parent != tmp) { - ret = domain_context_mapping_one(domain, - pci_domain_nr(parent->bus), - parent->bus->number, - parent->devfn, translation); - if (ret) - return ret; - parent = parent->bus->self; - } - if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ - return domain_context_mapping_one(domain, - pci_domain_nr(tmp->subordinate), - tmp->subordinate->number, 0, - translation); - else /* this is a legacy PCI bridge */ - return domain_context_mapping_one(domain, - pci_domain_nr(tmp->bus), - tmp->bus->number, - tmp->devfn, - translation); -} - -static int domain_context_mapped(struct pci_dev *pdev) -{ - int ret; - struct pci_dev *tmp, *parent; - struct intel_iommu *iommu; - - iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, - pdev->devfn); - if (!iommu) - return -ENODEV; - - ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); - if (!ret) - return ret; - /* dependent device mapping */ - tmp = pci_find_upstream_pcie_bridge(pdev); - if (!tmp) - return ret; - /* Secondary interface's bus number and devfn 0 */ - parent = pdev->bus->self; - while (parent != tmp) { - ret = device_context_mapped(iommu, parent->bus->number, - parent->devfn); - if (!ret) - return ret; - parent = parent->bus->self; - } - if (pci_is_pcie(tmp)) - return device_context_mapped(iommu, tmp->subordinate->number, - 0); - else - return device_context_mapped(iommu, tmp->bus->number, - tmp->devfn); -} - -/* Returns a number of VTD pages, but aligned to MM page size */ -static inline unsigned long aligned_nrpages(unsigned long host_addr, - size_t size) -{ - host_addr &= ~PAGE_MASK; - return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; -} - -/* Return largest possible superpage level for a given mapping */ -static inline int hardware_largepage_caps(struct dmar_domain *domain, - unsigned long iov_pfn, - unsigned long phy_pfn, - unsigned long pages) -{ - int support, level = 1; - unsigned long pfnmerge; - - support = domain->iommu_superpage; - - /* To use a large page, the virtual *and* physical addresses - must be aligned to 2MiB/1GiB/etc. Lower bits set in either - of them will mean we have to use smaller pages. So just - merge them and check both at once. */ - pfnmerge = iov_pfn | phy_pfn; - - while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) { - pages >>= VTD_STRIDE_SHIFT; - if (!pages) - break; - pfnmerge >>= VTD_STRIDE_SHIFT; - level++; - support--; - } - return level; -} - -static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, - struct scatterlist *sg, unsigned long phys_pfn, - unsigned long nr_pages, int prot) -{ - struct dma_pte *first_pte = NULL, *pte = NULL; - phys_addr_t uninitialized_var(pteval); - int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - unsigned long sg_res; - unsigned int largepage_lvl = 0; - unsigned long lvl_pages = 0; - - BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width); - - if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) - return -EINVAL; - - prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; - - if (sg) - sg_res = 0; - else { - sg_res = nr_pages + 1; - pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; - } - - while (nr_pages > 0) { - uint64_t tmp; - - if (!sg_res) { - sg_res = aligned_nrpages(sg->offset, sg->length); - sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; - sg->dma_length = sg->length; - pteval = page_to_phys(sg_page(sg)) | prot; - phys_pfn = pteval >> VTD_PAGE_SHIFT; - } - - if (!pte) { - largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); - - first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl); - if (!pte) - return -ENOMEM; - /* It is large page*/ - if (largepage_lvl > 1) - pteval |= DMA_PTE_LARGE_PAGE; - else - pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; - - } - /* We don't need lock here, nobody else - * touches the iova range - */ - tmp = cmpxchg64_local(&pte->val, 0ULL, pteval); - if (tmp) { - static int dumps = 5; - printk(KERN_CRIT "ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", - iov_pfn, tmp, (unsigned long long)pteval); - if (dumps) { - dumps--; - debug_dma_dump_mappings(NULL); - } - WARN_ON(1); - } - - lvl_pages = lvl_to_nr_pages(largepage_lvl); - - BUG_ON(nr_pages < lvl_pages); - BUG_ON(sg_res < lvl_pages); - - nr_pages -= lvl_pages; - iov_pfn += lvl_pages; - phys_pfn += lvl_pages; - pteval += lvl_pages * VTD_PAGE_SIZE; - sg_res -= lvl_pages; - - /* If the next PTE would be the first in a new page, then we - need to flush the cache on the entries we've just written. - And then we'll need to recalculate 'pte', so clear it and - let it get set again in the if (!pte) block above. - - If we're done (!nr_pages) we need to flush the cache too. - - Also if we've been setting superpages, we may need to - recalculate 'pte' and switch back to smaller pages for the - end of the mapping, if the trailing size is not enough to - use another superpage (i.e. sg_res < lvl_pages). */ - pte++; - if (!nr_pages || first_pte_in_page(pte) || - (largepage_lvl > 1 && sg_res < lvl_pages)) { - domain_flush_cache(domain, first_pte, - (void *)pte - (void *)first_pte); - pte = NULL; - } - - if (!sg_res && nr_pages) - sg = sg_next(sg); - } - return 0; -} - -static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, - struct scatterlist *sg, unsigned long nr_pages, - int prot) -{ - return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); -} - -static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, - unsigned long phys_pfn, unsigned long nr_pages, - int prot) -{ - return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); -} - -static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) -{ - if (!iommu) - return; - - clear_context_table(iommu, bus, devfn); - iommu->flush.flush_context(iommu, 0, 0, 0, - DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); -} - -static void domain_remove_dev_info(struct dmar_domain *domain) -{ - struct device_domain_info *info; - unsigned long flags; - struct intel_iommu *iommu; - - spin_lock_irqsave(&device_domain_lock, flags); - while (!list_empty(&domain->devices)) { - info = list_entry(domain->devices.next, - struct device_domain_info, link); - list_del(&info->link); - list_del(&info->global); - if (info->dev) - info->dev->dev.archdata.iommu = NULL; - spin_unlock_irqrestore(&device_domain_lock, flags); - - iommu_disable_dev_iotlb(info); - iommu = device_to_iommu(info->segment, info->bus, info->devfn); - iommu_detach_dev(iommu, info->bus, info->devfn); - free_devinfo_mem(info); - - spin_lock_irqsave(&device_domain_lock, flags); - } - spin_unlock_irqrestore(&device_domain_lock, flags); -} - -/* - * find_domain - * Note: we use struct pci_dev->dev.archdata.iommu stores the info - */ -static struct dmar_domain * -find_domain(struct pci_dev *pdev) -{ - struct device_domain_info *info; - - /* No lock here, assumes no domain exit in normal case */ - info = pdev->dev.archdata.iommu; - if (info) - return info->domain; - return NULL; -} - -/* domain is initialized */ -static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) -{ - struct dmar_domain *domain, *found = NULL; - struct intel_iommu *iommu; - struct dmar_drhd_unit *drhd; - struct device_domain_info *info, *tmp; - struct pci_dev *dev_tmp; - unsigned long flags; - int bus = 0, devfn = 0; - int segment; - int ret; - - domain = find_domain(pdev); - if (domain) - return domain; - - segment = pci_domain_nr(pdev->bus); - - dev_tmp = pci_find_upstream_pcie_bridge(pdev); - if (dev_tmp) { - if (pci_is_pcie(dev_tmp)) { - bus = dev_tmp->subordinate->number; - devfn = 0; - } else { - bus = dev_tmp->bus->number; - devfn = dev_tmp->devfn; - } - spin_lock_irqsave(&device_domain_lock, flags); - list_for_each_entry(info, &device_domain_list, global) { - if (info->segment == segment && - info->bus == bus && info->devfn == devfn) { - found = info->domain; - break; - } - } - spin_unlock_irqrestore(&device_domain_lock, flags); - /* pcie-pci bridge already has a domain, uses it */ - if (found) { - domain = found; - goto found_domain; - } - } - - domain = alloc_domain(); - if (!domain) - goto error; - - /* Allocate new domain for the device */ - drhd = dmar_find_matched_drhd_unit(pdev); - if (!drhd) { - printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", - pci_name(pdev)); - return NULL; - } - iommu = drhd->iommu; - - ret = iommu_attach_domain(domain, iommu); - if (ret) { - free_domain_mem(domain); - goto error; - } - - if (domain_init(domain, gaw)) { - domain_exit(domain); - goto error; - } - - /* register pcie-to-pci device */ - if (dev_tmp) { - info = alloc_devinfo_mem(); - if (!info) { - domain_exit(domain); - goto error; - } - info->segment = segment; - info->bus = bus; - info->devfn = devfn; - info->dev = NULL; - info->domain = domain; - /* This domain is shared by devices under p2p bridge */ - domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; - - /* pcie-to-pci bridge already has a domain, uses it */ - found = NULL; - spin_lock_irqsave(&device_domain_lock, flags); - list_for_each_entry(tmp, &device_domain_list, global) { - if (tmp->segment == segment && - tmp->bus == bus && tmp->devfn == devfn) { - found = tmp->domain; - break; - } - } - if (found) { - spin_unlock_irqrestore(&device_domain_lock, flags); - free_devinfo_mem(info); - domain_exit(domain); - domain = found; - } else { - list_add(&info->link, &domain->devices); - list_add(&info->global, &device_domain_list); - spin_unlock_irqrestore(&device_domain_lock, flags); - } - } - -found_domain: - info = alloc_devinfo_mem(); - if (!info) - goto error; - info->segment = segment; - info->bus = pdev->bus->number; - info->devfn = pdev->devfn; - info->dev = pdev; - info->domain = domain; - spin_lock_irqsave(&device_domain_lock, flags); - /* somebody is fast */ - found = find_domain(pdev); - if (found != NULL) { - spin_unlock_irqrestore(&device_domain_lock, flags); - if (found != domain) { - domain_exit(domain); - domain = found; - } - free_devinfo_mem(info); - return domain; - } - list_add(&info->link, &domain->devices); - list_add(&info->global, &device_domain_list); - pdev->dev.archdata.iommu = info; - spin_unlock_irqrestore(&device_domain_lock, flags); - return domain; -error: - /* recheck it here, maybe others set it */ - return find_domain(pdev); -} - -static int iommu_identity_mapping; -#define IDENTMAP_ALL 1 -#define IDENTMAP_GFX 2 -#define IDENTMAP_AZALIA 4 - -static int iommu_domain_identity_map(struct dmar_domain *domain, - unsigned long long start, - unsigned long long end) -{ - unsigned long first_vpfn = start >> VTD_PAGE_SHIFT; - unsigned long last_vpfn = end >> VTD_PAGE_SHIFT; - - if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), - dma_to_mm_pfn(last_vpfn))) { - printk(KERN_ERR "IOMMU: reserve iova failed\n"); - return -ENOMEM; - } - - pr_debug("Mapping reserved region %llx-%llx for domain %d\n", - start, end, domain->id); - /* - * RMRR range might have overlap with physical memory range, - * clear it first - */ - dma_pte_clear_range(domain, first_vpfn, last_vpfn); - - return domain_pfn_mapping(domain, first_vpfn, first_vpfn, - last_vpfn - first_vpfn + 1, - DMA_PTE_READ|DMA_PTE_WRITE); -} - -static int iommu_prepare_identity_map(struct pci_dev *pdev, - unsigned long long start, - unsigned long long end) -{ - struct dmar_domain *domain; - int ret; - - domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); - if (!domain) - return -ENOMEM; - - /* For _hardware_ passthrough, don't bother. But for software - passthrough, we do it anyway -- it may indicate a memory - range which is reserved in E820, so which didn't get set - up to start with in si_domain */ - if (domain == si_domain && hw_pass_through) { - printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", - pci_name(pdev), start, end); - return 0; - } - - printk(KERN_INFO - "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", - pci_name(pdev), start, end); - - if (end < start) { - WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", - dmi_get_system_info(DMI_BIOS_VENDOR), - dmi_get_system_info(DMI_BIOS_VERSION), - dmi_get_system_info(DMI_PRODUCT_VERSION)); - ret = -EIO; - goto error; - } - - if (end >> agaw_to_width(domain->agaw)) { - WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", - agaw_to_width(domain->agaw), - dmi_get_system_info(DMI_BIOS_VENDOR), - dmi_get_system_info(DMI_BIOS_VERSION), - dmi_get_system_info(DMI_PRODUCT_VERSION)); - ret = -EIO; - goto error; - } - - ret = iommu_domain_identity_map(domain, start, end); - if (ret) - goto error; - - /* context entry init */ - ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); - if (ret) - goto error; - - return 0; - - error: - domain_exit(domain); - return ret; -} - -static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, - struct pci_dev *pdev) -{ - if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) - return 0; - return iommu_prepare_identity_map(pdev, rmrr->base_address, - rmrr->end_address); -} - -#ifdef CONFIG_DMAR_FLOPPY_WA -static inline void iommu_prepare_isa(void) -{ - struct pci_dev *pdev; - int ret; - - pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); - if (!pdev) - return; - - printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); - ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1); - - if (ret) - printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " - "floppy might not work\n"); - -} -#else -static inline void iommu_prepare_isa(void) -{ - return; -} -#endif /* !CONFIG_DMAR_FLPY_WA */ - -static int md_domain_init(struct dmar_domain *domain, int guest_width); - -static int __init si_domain_work_fn(unsigned long start_pfn, - unsigned long end_pfn, void *datax) -{ - int *ret = datax; - - *ret = iommu_domain_identity_map(si_domain, - (uint64_t)start_pfn << PAGE_SHIFT, - (uint64_t)end_pfn << PAGE_SHIFT); - return *ret; - -} - -static int __init si_domain_init(int hw) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; - int nid, ret = 0; - - si_domain = alloc_domain(); - if (!si_domain) - return -EFAULT; - - pr_debug("Identity mapping domain is domain %d\n", si_domain->id); - - for_each_active_iommu(iommu, drhd) { - ret = iommu_attach_domain(si_domain, iommu); - if (ret) { - domain_exit(si_domain); - return -EFAULT; - } - } - - if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { - domain_exit(si_domain); - return -EFAULT; - } - - si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; - - if (hw) - return 0; - - for_each_online_node(nid) { - work_with_active_regions(nid, si_domain_work_fn, &ret); - if (ret) - return ret; - } - - return 0; -} - -static void domain_remove_one_dev_info(struct dmar_domain *domain, - struct pci_dev *pdev); -static int identity_mapping(struct pci_dev *pdev) -{ - struct device_domain_info *info; - - if (likely(!iommu_identity_mapping)) - return 0; - - info = pdev->dev.archdata.iommu; - if (info && info != DUMMY_DEVICE_DOMAIN_INFO) - return (info->domain == si_domain); - - return 0; -} - -static int domain_add_dev_info(struct dmar_domain *domain, - struct pci_dev *pdev, - int translation) -{ - struct device_domain_info *info; - unsigned long flags; - int ret; - - info = alloc_devinfo_mem(); - if (!info) - return -ENOMEM; - - ret = domain_context_mapping(domain, pdev, translation); - if (ret) { - free_devinfo_mem(info); - return ret; - } - - info->segment = pci_domain_nr(pdev->bus); - info->bus = pdev->bus->number; - info->devfn = pdev->devfn; - info->dev = pdev; - info->domain = domain; - - spin_lock_irqsave(&device_domain_lock, flags); - list_add(&info->link, &domain->devices); - list_add(&info->global, &device_domain_list); - pdev->dev.archdata.iommu = info; - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; -} - -static int iommu_should_identity_map(struct pci_dev *pdev, int startup) -{ - if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) - return 1; - - if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) - return 1; - - if (!(iommu_identity_mapping & IDENTMAP_ALL)) - return 0; - - /* - * We want to start off with all devices in the 1:1 domain, and - * take them out later if we find they can't access all of memory. - * - * However, we can't do this for PCI devices behind bridges, - * because all PCI devices behind the same bridge will end up - * with the same source-id on their transactions. - * - * Practically speaking, we can't change things around for these - * devices at run-time, because we can't be sure there'll be no - * DMA transactions in flight for any of their siblings. - * - * So PCI devices (unless they're on the root bus) as well as - * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of - * the 1:1 domain, just in _case_ one of their siblings turns out - * not to be able to map all of memory. - */ - if (!pci_is_pcie(pdev)) { - if (!pci_is_root_bus(pdev->bus)) - return 0; - if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) - return 0; - } else if (pdev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) - return 0; - - /* - * At boot time, we don't yet know if devices will be 64-bit capable. - * Assume that they will -- if they turn out not to be, then we can - * take them out of the 1:1 domain later. - */ - if (!startup) { - /* - * If the device's dma_mask is less than the system's memory - * size then this is not a candidate for identity mapping. - */ - u64 dma_mask = pdev->dma_mask; - - if (pdev->dev.coherent_dma_mask && - pdev->dev.coherent_dma_mask < dma_mask) - dma_mask = pdev->dev.coherent_dma_mask; - - return dma_mask >= dma_get_required_mask(&pdev->dev); - } - - return 1; -} - -static int __init iommu_prepare_static_identity_mapping(int hw) -{ - struct pci_dev *pdev = NULL; - int ret; - - ret = si_domain_init(hw); - if (ret) - return -EFAULT; - - for_each_pci_dev(pdev) { - /* Skip Host/PCI Bridge devices */ - if (IS_BRIDGE_HOST_DEVICE(pdev)) - continue; - if (iommu_should_identity_map(pdev, 1)) { - printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n", - hw ? "hardware" : "software", pci_name(pdev)); - - ret = domain_add_dev_info(si_domain, pdev, - hw ? CONTEXT_TT_PASS_THROUGH : - CONTEXT_TT_MULTI_LEVEL); - if (ret) - return ret; - } - } - - return 0; -} - -static int __init init_dmars(void) -{ - struct dmar_drhd_unit *drhd; - struct dmar_rmrr_unit *rmrr; - struct pci_dev *pdev; - struct intel_iommu *iommu; - int i, ret; - - /* - * for each drhd - * allocate root - * initialize and program root entry to not present - * endfor - */ - for_each_drhd_unit(drhd) { - g_num_of_iommus++; - /* - * lock not needed as this is only incremented in the single - * threaded kernel __init code path all other access are read - * only - */ - } - - g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), - GFP_KERNEL); - if (!g_iommus) { - printk(KERN_ERR "Allocating global iommu array failed\n"); - ret = -ENOMEM; - goto error; - } - - deferred_flush = kzalloc(g_num_of_iommus * - sizeof(struct deferred_flush_tables), GFP_KERNEL); - if (!deferred_flush) { - ret = -ENOMEM; - goto error; - } - - for_each_drhd_unit(drhd) { - if (drhd->ignored) - continue; - - iommu = drhd->iommu; - g_iommus[iommu->seq_id] = iommu; - - ret = iommu_init_domains(iommu); - if (ret) - goto error; - - /* - * TBD: - * we could share the same root & context tables - * among all IOMMU's. Need to Split it later. - */ - ret = iommu_alloc_root_entry(iommu); - if (ret) { - printk(KERN_ERR "IOMMU: allocate root entry failed\n"); - goto error; - } - if (!ecap_pass_through(iommu->ecap)) - hw_pass_through = 0; - } - - /* - * Start from the sane iommu hardware state. - */ - for_each_drhd_unit(drhd) { - if (drhd->ignored) - continue; - - iommu = drhd->iommu; - - /* - * If the queued invalidation is already initialized by us - * (for example, while enabling interrupt-remapping) then - * we got the things already rolling from a sane state. - */ - if (iommu->qi) - continue; - - /* - * Clear any previous faults. - */ - dmar_fault(-1, iommu); - /* - * Disable queued invalidation if supported and already enabled - * before OS handover. - */ - dmar_disable_qi(iommu); - } - - for_each_drhd_unit(drhd) { - if (drhd->ignored) - continue; - - iommu = drhd->iommu; - - if (dmar_enable_qi(iommu)) { - /* - * Queued Invalidate not enabled, use Register Based - * Invalidate - */ - iommu->flush.flush_context = __iommu_flush_context; - iommu->flush.flush_iotlb = __iommu_flush_iotlb; - printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based " - "invalidation\n", - iommu->seq_id, - (unsigned long long)drhd->reg_base_addr); - } else { - iommu->flush.flush_context = qi_flush_context; - iommu->flush.flush_iotlb = qi_flush_iotlb; - printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued " - "invalidation\n", - iommu->seq_id, - (unsigned long long)drhd->reg_base_addr); - } - } - - if (iommu_pass_through) - iommu_identity_mapping |= IDENTMAP_ALL; - -#ifdef CONFIG_DMAR_BROKEN_GFX_WA - iommu_identity_mapping |= IDENTMAP_GFX; -#endif - - check_tylersburg_isoch(); - - /* - * If pass through is not set or not enabled, setup context entries for - * identity mappings for rmrr, gfx, and isa and may fall back to static - * identity mapping if iommu_identity_mapping is set. - */ - if (iommu_identity_mapping) { - ret = iommu_prepare_static_identity_mapping(hw_pass_through); - if (ret) { - printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); - goto error; - } - } - /* - * For each rmrr - * for each dev attached to rmrr - * do - * locate drhd for dev, alloc domain for dev - * allocate free domain - * allocate page table entries for rmrr - * if context not allocated for bus - * allocate and init context - * set present in root table for this bus - * init context with domain, translation etc - * endfor - * endfor - */ - printk(KERN_INFO "IOMMU: Setting RMRR:\n"); - for_each_rmrr_units(rmrr) { - for (i = 0; i < rmrr->devices_cnt; i++) { - pdev = rmrr->devices[i]; - /* - * some BIOS lists non-exist devices in DMAR - * table. - */ - if (!pdev) - continue; - ret = iommu_prepare_rmrr_dev(rmrr, pdev); - if (ret) - printk(KERN_ERR - "IOMMU: mapping reserved region failed\n"); - } - } - - iommu_prepare_isa(); - - /* - * for each drhd - * enable fault log - * global invalidate context cache - * global invalidate iotlb - * enable translation - */ - for_each_drhd_unit(drhd) { - if (drhd->ignored) { - /* - * we always have to disable PMRs or DMA may fail on - * this device - */ - if (force_on) - iommu_disable_protect_mem_regions(drhd->iommu); - continue; - } - iommu = drhd->iommu; - - iommu_flush_write_buffer(iommu); - - ret = dmar_set_interrupt(iommu); - if (ret) - goto error; - - iommu_set_root_entry(iommu); - - iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); - - ret = iommu_enable_translation(iommu); - if (ret) - goto error; - - iommu_disable_protect_mem_regions(iommu); - } - - return 0; -error: - for_each_drhd_unit(drhd) { - if (drhd->ignored) - continue; - iommu = drhd->iommu; - free_iommu(iommu); - } - kfree(g_iommus); - return ret; -} - -/* This takes a number of _MM_ pages, not VTD pages */ -static struct iova *intel_alloc_iova(struct device *dev, - struct dmar_domain *domain, - unsigned long nrpages, uint64_t dma_mask) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct iova *iova = NULL; - - /* Restrict dma_mask to the width that the iommu can handle */ - dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); - - if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { - /* - * First try to allocate an io virtual address in - * DMA_BIT_MASK(32) and if that fails then try allocating - * from higher range - */ - iova = alloc_iova(&domain->iovad, nrpages, - IOVA_PFN(DMA_BIT_MASK(32)), 1); - if (iova) - return iova; - } - iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); - if (unlikely(!iova)) { - printk(KERN_ERR "Allocating %ld-page iova for %s failed", - nrpages, pci_name(pdev)); - return NULL; - } - - return iova; -} - -static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) -{ - struct dmar_domain *domain; - int ret; - - domain = get_domain_for_dev(pdev, - DEFAULT_DOMAIN_ADDRESS_WIDTH); - if (!domain) { - printk(KERN_ERR - "Allocating domain for %s failed", pci_name(pdev)); - return NULL; - } - - /* make sure context mapping is ok */ - if (unlikely(!domain_context_mapped(pdev))) { - ret = domain_context_mapping(domain, pdev, - CONTEXT_TT_MULTI_LEVEL); - if (ret) { - printk(KERN_ERR - "Domain context map for %s failed", - pci_name(pdev)); - return NULL; - } - } - - return domain; -} - -static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev) -{ - struct device_domain_info *info; - - /* No lock here, assumes no domain exit in normal case */ - info = dev->dev.archdata.iommu; - if (likely(info)) - return info->domain; - - return __get_valid_domain_for_dev(dev); -} - -static int iommu_dummy(struct pci_dev *pdev) -{ - return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; -} - -/* Check if the pdev needs to go through non-identity map and unmap process.*/ -static int iommu_no_mapping(struct device *dev) -{ - struct pci_dev *pdev; - int found; - - if (unlikely(dev->bus != &pci_bus_type)) - return 1; - - pdev = to_pci_dev(dev); - if (iommu_dummy(pdev)) - return 1; - - if (!iommu_identity_mapping) - return 0; - - found = identity_mapping(pdev); - if (found) { - if (iommu_should_identity_map(pdev, 0)) - return 1; - else { - /* - * 32 bit DMA is removed from si_domain and fall back - * to non-identity mapping. - */ - domain_remove_one_dev_info(si_domain, pdev); - printk(KERN_INFO "32bit %s uses non-identity mapping\n", - pci_name(pdev)); - return 0; - } - } else { - /* - * In case of a detached 64 bit DMA device from vm, the device - * is put into si_domain for identity mapping. - */ - if (iommu_should_identity_map(pdev, 0)) { - int ret; - ret = domain_add_dev_info(si_domain, pdev, - hw_pass_through ? - CONTEXT_TT_PASS_THROUGH : - CONTEXT_TT_MULTI_LEVEL); - if (!ret) { - printk(KERN_INFO "64bit %s uses identity mapping\n", - pci_name(pdev)); - return 1; - } - } - } - - return 0; -} - -static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, - size_t size, int dir, u64 dma_mask) -{ - struct pci_dev *pdev = to_pci_dev(hwdev); - struct dmar_domain *domain; - phys_addr_t start_paddr; - struct iova *iova; - int prot = 0; - int ret; - struct intel_iommu *iommu; - unsigned long paddr_pfn = paddr >> PAGE_SHIFT; - - BUG_ON(dir == DMA_NONE); - - if (iommu_no_mapping(hwdev)) - return paddr; - - domain = get_valid_domain_for_dev(pdev); - if (!domain) - return 0; - - iommu = domain_get_iommu(domain); - size = aligned_nrpages(paddr, size); - - iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask); - if (!iova) - goto error; - - /* - * Check if DMAR supports zero-length reads on write only - * mappings.. - */ - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ - !cap_zlr(iommu->cap)) - prot |= DMA_PTE_READ; - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) - prot |= DMA_PTE_WRITE; - /* - * paddr - (paddr + size) might be partial page, we should map the whole - * page. Note: if two part of one page are separately mapped, we - * might have two guest_addr mapping to the same host paddr, but this - * is not a big problem - */ - ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), - mm_to_dma_pfn(paddr_pfn), size, prot); - if (ret) - goto error; - - /* it's a non-present to present mapping. Only flush if caching mode */ - if (cap_caching_mode(iommu->cap)) - iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1); - else - iommu_flush_write_buffer(iommu); - - start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; - start_paddr += paddr & ~PAGE_MASK; - return start_paddr; - -error: - if (iova) - __free_iova(&domain->iovad, iova); - printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", - pci_name(pdev), size, (unsigned long long)paddr, dir); - return 0; -} - -static dma_addr_t intel_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - return __intel_map_single(dev, page_to_phys(page) + offset, size, - dir, to_pci_dev(dev)->dma_mask); -} - -static void flush_unmaps(void) -{ - int i, j; - - timer_on = 0; - - /* just flush them all */ - for (i = 0; i < g_num_of_iommus; i++) { - struct intel_iommu *iommu = g_iommus[i]; - if (!iommu) - continue; - - if (!deferred_flush[i].next) - continue; - - /* In caching mode, global flushes turn emulation expensive */ - if (!cap_caching_mode(iommu->cap)) - iommu->flush.flush_iotlb(iommu, 0, 0, 0, - DMA_TLB_GLOBAL_FLUSH); - for (j = 0; j < deferred_flush[i].next; j++) { - unsigned long mask; - struct iova *iova = deferred_flush[i].iova[j]; - struct dmar_domain *domain = deferred_flush[i].domain[j]; - - /* On real hardware multiple invalidations are expensive */ - if (cap_caching_mode(iommu->cap)) - iommu_flush_iotlb_psi(iommu, domain->id, - iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0); - else { - mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); - iommu_flush_dev_iotlb(deferred_flush[i].domain[j], - (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); - } - __free_iova(&deferred_flush[i].domain[j]->iovad, iova); - } - deferred_flush[i].next = 0; - } - - list_size = 0; -} - -static void flush_unmaps_timeout(unsigned long data) -{ - unsigned long flags; - - spin_lock_irqsave(&async_umap_flush_lock, flags); - flush_unmaps(); - spin_unlock_irqrestore(&async_umap_flush_lock, flags); -} - -static void add_unmap(struct dmar_domain *dom, struct iova *iova) -{ - unsigned long flags; - int next, iommu_id; - struct intel_iommu *iommu; - - spin_lock_irqsave(&async_umap_flush_lock, flags); - if (list_size == HIGH_WATER_MARK) - flush_unmaps(); - - iommu = domain_get_iommu(dom); - iommu_id = iommu->seq_id; - - next = deferred_flush[iommu_id].next; - deferred_flush[iommu_id].domain[next] = dom; - deferred_flush[iommu_id].iova[next] = iova; - deferred_flush[iommu_id].next++; - - if (!timer_on) { - mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10)); - timer_on = 1; - } - list_size++; - spin_unlock_irqrestore(&async_umap_flush_lock, flags); -} - -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct dmar_domain *domain; - unsigned long start_pfn, last_pfn; - struct iova *iova; - struct intel_iommu *iommu; - - if (iommu_no_mapping(dev)) - return; - - domain = find_domain(pdev); - BUG_ON(!domain); - - iommu = domain_get_iommu(domain); - - iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); - if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n", - (unsigned long long)dev_addr)) - return; - - start_pfn = mm_to_dma_pfn(iova->pfn_lo); - last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; - - pr_debug("Device %s unmapping: pfn %lx-%lx\n", - pci_name(pdev), start_pfn, last_pfn); - - /* clear the whole page */ - dma_pte_clear_range(domain, start_pfn, last_pfn); - - /* free page tables */ - dma_pte_free_pagetable(domain, start_pfn, last_pfn); - - if (intel_iommu_strict) { - iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, - last_pfn - start_pfn + 1, 0); - /* free iova */ - __free_iova(&domain->iovad, iova); - } else { - add_unmap(domain, iova); - /* - * queue up the release of the unmap to save the 1/6th of the - * cpu used up by the iotlb flush operation... - */ - } -} - -static void *intel_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) -{ - void *vaddr; - int order; - - size = PAGE_ALIGN(size); - order = get_order(size); - - if (!iommu_no_mapping(hwdev)) - flags &= ~(GFP_DMA | GFP_DMA32); - else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) { - if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32)) - flags |= GFP_DMA; - else - flags |= GFP_DMA32; - } - - vaddr = (void *)__get_free_pages(flags, order); - if (!vaddr) - return NULL; - memset(vaddr, 0, size); - - *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, - DMA_BIDIRECTIONAL, - hwdev->coherent_dma_mask); - if (*dma_handle) - return vaddr; - free_pages((unsigned long)vaddr, order); - return NULL; -} - -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, - dma_addr_t dma_handle) -{ - int order; - - size = PAGE_ALIGN(size); - order = get_order(size); - - intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); - free_pages((unsigned long)vaddr, order); -} - -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - struct pci_dev *pdev = to_pci_dev(hwdev); - struct dmar_domain *domain; - unsigned long start_pfn, last_pfn; - struct iova *iova; - struct intel_iommu *iommu; - - if (iommu_no_mapping(hwdev)) - return; - - domain = find_domain(pdev); - BUG_ON(!domain); - - iommu = domain_get_iommu(domain); - - iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); - if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n", - (unsigned long long)sglist[0].dma_address)) - return; - - start_pfn = mm_to_dma_pfn(iova->pfn_lo); - last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; - - /* clear the whole page */ - dma_pte_clear_range(domain, start_pfn, last_pfn); - - /* free page tables */ - dma_pte_free_pagetable(domain, start_pfn, last_pfn); - - if (intel_iommu_strict) { - iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, - last_pfn - start_pfn + 1, 0); - /* free iova */ - __free_iova(&domain->iovad, iova); - } else { - add_unmap(domain, iova); - /* - * queue up the release of the unmap to save the 1/6th of the - * cpu used up by the iotlb flush operation... - */ - } -} - -static int intel_nontranslate_map_sg(struct device *hddev, - struct scatterlist *sglist, int nelems, int dir) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) { - BUG_ON(!sg_page(sg)); - sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; - sg->dma_length = sg->length; - } - return nelems; -} - -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir, struct dma_attrs *attrs) -{ - int i; - struct pci_dev *pdev = to_pci_dev(hwdev); - struct dmar_domain *domain; - size_t size = 0; - int prot = 0; - struct iova *iova = NULL; - int ret; - struct scatterlist *sg; - unsigned long start_vpfn; - struct intel_iommu *iommu; - - BUG_ON(dir == DMA_NONE); - if (iommu_no_mapping(hwdev)) - return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); - - domain = get_valid_domain_for_dev(pdev); - if (!domain) - return 0; - - iommu = domain_get_iommu(domain); - - for_each_sg(sglist, sg, nelems, i) - size += aligned_nrpages(sg->offset, sg->length); - - iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), - pdev->dma_mask); - if (!iova) { - sglist->dma_length = 0; - return 0; - } - - /* - * Check if DMAR supports zero-length reads on write only - * mappings.. - */ - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ - !cap_zlr(iommu->cap)) - prot |= DMA_PTE_READ; - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) - prot |= DMA_PTE_WRITE; - - start_vpfn = mm_to_dma_pfn(iova->pfn_lo); - - ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); - if (unlikely(ret)) { - /* clear the page */ - dma_pte_clear_range(domain, start_vpfn, - start_vpfn + size - 1); - /* free page tables */ - dma_pte_free_pagetable(domain, start_vpfn, - start_vpfn + size - 1); - /* free iova */ - __free_iova(&domain->iovad, iova); - return 0; - } - - /* it's a non-present to present mapping. Only flush if caching mode */ - if (cap_caching_mode(iommu->cap)) - iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1); - else - iommu_flush_write_buffer(iommu); - - return nelems; -} - -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return !dma_addr; -} - -struct dma_map_ops intel_dma_ops = { - .alloc_coherent = intel_alloc_coherent, - .free_coherent = intel_free_coherent, - .map_sg = intel_map_sg, - .unmap_sg = intel_unmap_sg, - .map_page = intel_map_page, - .unmap_page = intel_unmap_page, - .mapping_error = intel_mapping_error, -}; - -static inline int iommu_domain_cache_init(void) -{ - int ret = 0; - - iommu_domain_cache = kmem_cache_create("iommu_domain", - sizeof(struct dmar_domain), - 0, - SLAB_HWCACHE_ALIGN, - - NULL); - if (!iommu_domain_cache) { - printk(KERN_ERR "Couldn't create iommu_domain cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -static inline int iommu_devinfo_cache_init(void) -{ - int ret = 0; - - iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", - sizeof(struct device_domain_info), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - if (!iommu_devinfo_cache) { - printk(KERN_ERR "Couldn't create devinfo cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -static inline int iommu_iova_cache_init(void) -{ - int ret = 0; - - iommu_iova_cache = kmem_cache_create("iommu_iova", - sizeof(struct iova), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - if (!iommu_iova_cache) { - printk(KERN_ERR "Couldn't create iova cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -static int __init iommu_init_mempool(void) -{ - int ret; - ret = iommu_iova_cache_init(); - if (ret) - return ret; - - ret = iommu_domain_cache_init(); - if (ret) - goto domain_error; - - ret = iommu_devinfo_cache_init(); - if (!ret) - return ret; - - kmem_cache_destroy(iommu_domain_cache); -domain_error: - kmem_cache_destroy(iommu_iova_cache); - - return -ENOMEM; -} - -static void __init iommu_exit_mempool(void) -{ - kmem_cache_destroy(iommu_devinfo_cache); - kmem_cache_destroy(iommu_domain_cache); - kmem_cache_destroy(iommu_iova_cache); - -} - -static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) -{ - struct dmar_drhd_unit *drhd; - u32 vtbar; - int rc; - - /* We know that this device on this chipset has its own IOMMU. - * If we find it under a different IOMMU, then the BIOS is lying - * to us. Hope that the IOMMU for this device is actually - * disabled, and it needs no translation... - */ - rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); - if (rc) { - /* "can't" happen */ - dev_info(&pdev->dev, "failed to run vt-d quirk\n"); - return; - } - vtbar &= 0xffff0000; - - /* we know that the this iommu should be at offset 0xa000 from vtbar */ - drhd = dmar_find_matched_drhd_unit(pdev); - if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000, - TAINT_FIRMWARE_WORKAROUND, - "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n")) - pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; -} -DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); - -static void __init init_no_remapping_devices(void) -{ - struct dmar_drhd_unit *drhd; - - for_each_drhd_unit(drhd) { - if (!drhd->include_all) { - int i; - for (i = 0; i < drhd->devices_cnt; i++) - if (drhd->devices[i] != NULL) - break; - /* ignore DMAR unit if no pci devices exist */ - if (i == drhd->devices_cnt) - drhd->ignored = 1; - } - } - - if (dmar_map_gfx) - return; - - for_each_drhd_unit(drhd) { - int i; - if (drhd->ignored || drhd->include_all) - continue; - - for (i = 0; i < drhd->devices_cnt; i++) - if (drhd->devices[i] && - !IS_GFX_DEVICE(drhd->devices[i])) - break; - - if (i < drhd->devices_cnt) - continue; - - /* bypass IOMMU if it is just for gfx devices */ - drhd->ignored = 1; - for (i = 0; i < drhd->devices_cnt; i++) { - if (!drhd->devices[i]) - continue; - drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; - } - } -} - -#ifdef CONFIG_SUSPEND -static int init_iommu_hw(void) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu = NULL; - - for_each_active_iommu(iommu, drhd) - if (iommu->qi) - dmar_reenable_qi(iommu); - - for_each_iommu(iommu, drhd) { - if (drhd->ignored) { - /* - * we always have to disable PMRs or DMA may fail on - * this device - */ - if (force_on) - iommu_disable_protect_mem_regions(iommu); - continue; - } - - iommu_flush_write_buffer(iommu); - - iommu_set_root_entry(iommu); - - iommu->flush.flush_context(iommu, 0, 0, 0, - DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, - DMA_TLB_GLOBAL_FLUSH); - if (iommu_enable_translation(iommu)) - return 1; - iommu_disable_protect_mem_regions(iommu); - } - - return 0; -} - -static void iommu_flush_all(void) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; - - for_each_active_iommu(iommu, drhd) { - iommu->flush.flush_context(iommu, 0, 0, 0, - DMA_CCMD_GLOBAL_INVL); - iommu->flush.flush_iotlb(iommu, 0, 0, 0, - DMA_TLB_GLOBAL_FLUSH); - } -} - -static int iommu_suspend(void) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu = NULL; - unsigned long flag; - - for_each_active_iommu(iommu, drhd) { - iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, - GFP_ATOMIC); - if (!iommu->iommu_state) - goto nomem; - } - - iommu_flush_all(); - - for_each_active_iommu(iommu, drhd) { - iommu_disable_translation(iommu); - - spin_lock_irqsave(&iommu->register_lock, flag); - - iommu->iommu_state[SR_DMAR_FECTL_REG] = - readl(iommu->reg + DMAR_FECTL_REG); - iommu->iommu_state[SR_DMAR_FEDATA_REG] = - readl(iommu->reg + DMAR_FEDATA_REG); - iommu->iommu_state[SR_DMAR_FEADDR_REG] = - readl(iommu->reg + DMAR_FEADDR_REG); - iommu->iommu_state[SR_DMAR_FEUADDR_REG] = - readl(iommu->reg + DMAR_FEUADDR_REG); - - spin_unlock_irqrestore(&iommu->register_lock, flag); - } - return 0; - -nomem: - for_each_active_iommu(iommu, drhd) - kfree(iommu->iommu_state); - - return -ENOMEM; -} - -static void iommu_resume(void) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu = NULL; - unsigned long flag; - - if (init_iommu_hw()) { - if (force_on) - panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); - else - WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); - return; - } - - for_each_active_iommu(iommu, drhd) { - - spin_lock_irqsave(&iommu->register_lock, flag); - - writel(iommu->iommu_state[SR_DMAR_FECTL_REG], - iommu->reg + DMAR_FECTL_REG); - writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], - iommu->reg + DMAR_FEDATA_REG); - writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], - iommu->reg + DMAR_FEADDR_REG); - writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], - iommu->reg + DMAR_FEUADDR_REG); - - spin_unlock_irqrestore(&iommu->register_lock, flag); - } - - for_each_active_iommu(iommu, drhd) - kfree(iommu->iommu_state); -} - -static struct syscore_ops iommu_syscore_ops = { - .resume = iommu_resume, - .suspend = iommu_suspend, -}; - -static void __init init_iommu_pm_ops(void) -{ - register_syscore_ops(&iommu_syscore_ops); -} - -#else -static inline void init_iommu_pm_ops(void) {} -#endif /* CONFIG_PM */ - -/* - * Here we only respond to action of unbound device from driver. - * - * Added device is not attached to its DMAR domain here yet. That will happen - * when mapping the device to iova. - */ -static int device_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct device *dev = data; - struct pci_dev *pdev = to_pci_dev(dev); - struct dmar_domain *domain; - - if (iommu_no_mapping(dev)) - return 0; - - domain = find_domain(pdev); - if (!domain) - return 0; - - if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { - domain_remove_one_dev_info(domain, pdev); - - if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && - list_empty(&domain->devices)) - domain_exit(domain); - } - - return 0; -} - -static struct notifier_block device_nb = { - .notifier_call = device_notifier, -}; - -int __init intel_iommu_init(void) -{ - int ret = 0; - - /* VT-d is required for a TXT/tboot launch, so enforce that */ - force_on = tboot_force_iommu(); - - if (dmar_table_init()) { - if (force_on) - panic("tboot: Failed to initialize DMAR table\n"); - return -ENODEV; - } - - if (dmar_dev_scope_init()) { - if (force_on) - panic("tboot: Failed to initialize DMAR device scope\n"); - return -ENODEV; - } - - /* - * Check the need for DMA-remapping initialization now. - * Above initialization will also be used by Interrupt-remapping. - */ - if (no_iommu || dmar_disabled) - return -ENODEV; - - if (iommu_init_mempool()) { - if (force_on) - panic("tboot: Failed to initialize iommu memory\n"); - return -ENODEV; - } - - if (dmar_init_reserved_ranges()) { - if (force_on) - panic("tboot: Failed to reserve iommu ranges\n"); - return -ENODEV; - } - - init_no_remapping_devices(); - - ret = init_dmars(); - if (ret) { - if (force_on) - panic("tboot: Failed to initialize DMARs\n"); - printk(KERN_ERR "IOMMU: dmar init failed\n"); - put_iova_domain(&reserved_iova_list); - iommu_exit_mempool(); - return ret; - } - printk(KERN_INFO - "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); - - init_timer(&unmap_timer); -#ifdef CONFIG_SWIOTLB - swiotlb = 0; -#endif - dma_ops = &intel_dma_ops; - - init_iommu_pm_ops(); - - register_iommu(&intel_iommu_ops); - - bus_register_notifier(&pci_bus_type, &device_nb); - - return 0; -} - -static void iommu_detach_dependent_devices(struct intel_iommu *iommu, - struct pci_dev *pdev) -{ - struct pci_dev *tmp, *parent; - - if (!iommu || !pdev) - return; - - /* dependent device detach */ - tmp = pci_find_upstream_pcie_bridge(pdev); - /* Secondary interface's bus number and devfn 0 */ - if (tmp) { - parent = pdev->bus->self; - while (parent != tmp) { - iommu_detach_dev(iommu, parent->bus->number, - parent->devfn); - parent = parent->bus->self; - } - if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ - iommu_detach_dev(iommu, - tmp->subordinate->number, 0); - else /* this is a legacy PCI bridge */ - iommu_detach_dev(iommu, tmp->bus->number, - tmp->devfn); - } -} - -static void domain_remove_one_dev_info(struct dmar_domain *domain, - struct pci_dev *pdev) -{ - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - int found = 0; - struct list_head *entry, *tmp; - - iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, - pdev->devfn); - if (!iommu) - return; - - spin_lock_irqsave(&device_domain_lock, flags); - list_for_each_safe(entry, tmp, &domain->devices) { - info = list_entry(entry, struct device_domain_info, link); - if (info->segment == pci_domain_nr(pdev->bus) && - info->bus == pdev->bus->number && - info->devfn == pdev->devfn) { - list_del(&info->link); - list_del(&info->global); - if (info->dev) - info->dev->dev.archdata.iommu = NULL; - spin_unlock_irqrestore(&device_domain_lock, flags); - - iommu_disable_dev_iotlb(info); - iommu_detach_dev(iommu, info->bus, info->devfn); - iommu_detach_dependent_devices(iommu, pdev); - free_devinfo_mem(info); - - spin_lock_irqsave(&device_domain_lock, flags); - - if (found) - break; - else - continue; - } - - /* if there is no other devices under the same iommu - * owned by this domain, clear this iommu in iommu_bmp - * update iommu count and coherency - */ - if (iommu == device_to_iommu(info->segment, info->bus, - info->devfn)) - found = 1; - } - - if (found == 0) { - unsigned long tmp_flags; - spin_lock_irqsave(&domain->iommu_lock, tmp_flags); - clear_bit(iommu->seq_id, &domain->iommu_bmp); - domain->iommu_count--; - domain_update_iommu_cap(domain); - spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); - - if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) { - spin_lock_irqsave(&iommu->lock, tmp_flags); - clear_bit(domain->id, iommu->domain_ids); - iommu->domains[domain->id] = NULL; - spin_unlock_irqrestore(&iommu->lock, tmp_flags); - } - } - - spin_unlock_irqrestore(&device_domain_lock, flags); -} - -static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) -{ - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags1, flags2; - - spin_lock_irqsave(&device_domain_lock, flags1); - while (!list_empty(&domain->devices)) { - info = list_entry(domain->devices.next, - struct device_domain_info, link); - list_del(&info->link); - list_del(&info->global); - if (info->dev) - info->dev->dev.archdata.iommu = NULL; - - spin_unlock_irqrestore(&device_domain_lock, flags1); - - iommu_disable_dev_iotlb(info); - iommu = device_to_iommu(info->segment, info->bus, info->devfn); - iommu_detach_dev(iommu, info->bus, info->devfn); - iommu_detach_dependent_devices(iommu, info->dev); - - /* clear this iommu in iommu_bmp, update iommu count - * and capabilities - */ - spin_lock_irqsave(&domain->iommu_lock, flags2); - if (test_and_clear_bit(iommu->seq_id, - &domain->iommu_bmp)) { - domain->iommu_count--; - domain_update_iommu_cap(domain); - } - spin_unlock_irqrestore(&domain->iommu_lock, flags2); - - free_devinfo_mem(info); - spin_lock_irqsave(&device_domain_lock, flags1); - } - spin_unlock_irqrestore(&device_domain_lock, flags1); -} - -/* domain id for virtual machine, it won't be set in context */ -static unsigned long vm_domid; - -static struct dmar_domain *iommu_alloc_vm_domain(void) -{ - struct dmar_domain *domain; - - domain = alloc_domain_mem(); - if (!domain) - return NULL; - - domain->id = vm_domid++; - domain->nid = -1; - memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); - domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; - - return domain; -} - -static int md_domain_init(struct dmar_domain *domain, int guest_width) -{ - int adjust_width; - - init_iova_domain(&domain->iovad, DMA_32BIT_PFN); - spin_lock_init(&domain->iommu_lock); - - domain_reserve_special_ranges(domain); - - /* calculate AGAW */ - domain->gaw = guest_width; - adjust_width = guestwidth_to_adjustwidth(guest_width); - domain->agaw = width_to_agaw(adjust_width); - - INIT_LIST_HEAD(&domain->devices); - - domain->iommu_count = 0; - domain->iommu_coherency = 0; - domain->iommu_snooping = 0; - domain->iommu_superpage = 0; - domain->max_addr = 0; - domain->nid = -1; - - /* always allocate the top pgd */ - domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); - if (!domain->pgd) - return -ENOMEM; - domain_flush_cache(domain, domain->pgd, PAGE_SIZE); - return 0; -} - -static void iommu_free_vm_domain(struct dmar_domain *domain) -{ - unsigned long flags; - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; - unsigned long i; - unsigned long ndomains; - - for_each_drhd_unit(drhd) { - if (drhd->ignored) - continue; - iommu = drhd->iommu; - - ndomains = cap_ndoms(iommu->cap); - for_each_set_bit(i, iommu->domain_ids, ndomains) { - if (iommu->domains[i] == domain) { - spin_lock_irqsave(&iommu->lock, flags); - clear_bit(i, iommu->domain_ids); - iommu->domains[i] = NULL; - spin_unlock_irqrestore(&iommu->lock, flags); - break; - } - } - } -} - -static void vm_domain_exit(struct dmar_domain *domain) -{ - /* Domain 0 is reserved, so dont process it */ - if (!domain) - return; - - vm_domain_remove_all_dev_info(domain); - /* destroy iovas */ - put_iova_domain(&domain->iovad); - - /* clear ptes */ - dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); - - /* free page tables */ - dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); - - iommu_free_vm_domain(domain); - free_domain_mem(domain); -} - -static int intel_iommu_domain_init(struct iommu_domain *domain) -{ - struct dmar_domain *dmar_domain; - - dmar_domain = iommu_alloc_vm_domain(); - if (!dmar_domain) { - printk(KERN_ERR - "intel_iommu_domain_init: dmar_domain == NULL\n"); - return -ENOMEM; - } - if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { - printk(KERN_ERR - "intel_iommu_domain_init() failed\n"); - vm_domain_exit(dmar_domain); - return -ENOMEM; - } - domain->priv = dmar_domain; - - return 0; -} - -static void intel_iommu_domain_destroy(struct iommu_domain *domain) -{ - struct dmar_domain *dmar_domain = domain->priv; - - domain->priv = NULL; - vm_domain_exit(dmar_domain); -} - -static int intel_iommu_attach_device(struct iommu_domain *domain, - struct device *dev) -{ - struct dmar_domain *dmar_domain = domain->priv; - struct pci_dev *pdev = to_pci_dev(dev); - struct intel_iommu *iommu; - int addr_width; - - /* normally pdev is not mapped */ - if (unlikely(domain_context_mapped(pdev))) { - struct dmar_domain *old_domain; - - old_domain = find_domain(pdev); - if (old_domain) { - if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || - dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) - domain_remove_one_dev_info(old_domain, pdev); - else - domain_remove_dev_info(old_domain); - } - } - - iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, - pdev->devfn); - if (!iommu) - return -ENODEV; - - /* check if this iommu agaw is sufficient for max mapped address */ - addr_width = agaw_to_width(iommu->agaw); - if (addr_width > cap_mgaw(iommu->cap)) - addr_width = cap_mgaw(iommu->cap); - - if (dmar_domain->max_addr > (1LL << addr_width)) { - printk(KERN_ERR "%s: iommu width (%d) is not " - "sufficient for the mapped address (%llx)\n", - __func__, addr_width, dmar_domain->max_addr); - return -EFAULT; - } - dmar_domain->gaw = addr_width; - - /* - * Knock out extra levels of page tables if necessary - */ - while (iommu->agaw < dmar_domain->agaw) { - struct dma_pte *pte; - - pte = dmar_domain->pgd; - if (dma_pte_present(pte)) { - dmar_domain->pgd = (struct dma_pte *) - phys_to_virt(dma_pte_addr(pte)); - free_pgtable_page(pte); - } - dmar_domain->agaw--; - } - - return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); -} - -static void intel_iommu_detach_device(struct iommu_domain *domain, - struct device *dev) -{ - struct dmar_domain *dmar_domain = domain->priv; - struct pci_dev *pdev = to_pci_dev(dev); - - domain_remove_one_dev_info(dmar_domain, pdev); -} - -static int intel_iommu_map(struct iommu_domain *domain, - unsigned long iova, phys_addr_t hpa, - int gfp_order, int iommu_prot) -{ - struct dmar_domain *dmar_domain = domain->priv; - u64 max_addr; - int prot = 0; - size_t size; - int ret; - - if (iommu_prot & IOMMU_READ) - prot |= DMA_PTE_READ; - if (iommu_prot & IOMMU_WRITE) - prot |= DMA_PTE_WRITE; - if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) - prot |= DMA_PTE_SNP; - - size = PAGE_SIZE << gfp_order; - max_addr = iova + size; - if (dmar_domain->max_addr < max_addr) { - u64 end; - - /* check if minimum agaw is sufficient for mapped address */ - end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; - if (end < max_addr) { - printk(KERN_ERR "%s: iommu width (%d) is not " - "sufficient for the mapped address (%llx)\n", - __func__, dmar_domain->gaw, max_addr); - return -EFAULT; - } - dmar_domain->max_addr = max_addr; - } - /* Round up size to next multiple of PAGE_SIZE, if it and - the low bits of hpa would take us onto the next page */ - size = aligned_nrpages(hpa, size); - ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, - hpa >> VTD_PAGE_SHIFT, size, prot); - return ret; -} - -static int intel_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, int gfp_order) -{ - struct dmar_domain *dmar_domain = domain->priv; - size_t size = PAGE_SIZE << gfp_order; - - dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, - (iova + size - 1) >> VTD_PAGE_SHIFT); - - if (dmar_domain->max_addr == iova + size) - dmar_domain->max_addr = iova; - - return gfp_order; -} - -static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, - unsigned long iova) -{ - struct dmar_domain *dmar_domain = domain->priv; - struct dma_pte *pte; - u64 phys = 0; - - pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0); - if (pte) - phys = dma_pte_addr(pte); - - return phys; -} - -static int intel_iommu_domain_has_cap(struct iommu_domain *domain, - unsigned long cap) -{ - struct dmar_domain *dmar_domain = domain->priv; - - if (cap == IOMMU_CAP_CACHE_COHERENCY) - return dmar_domain->iommu_snooping; - if (cap == IOMMU_CAP_INTR_REMAP) - return intr_remapping_enabled; - - return 0; -} - -static struct iommu_ops intel_iommu_ops = { - .domain_init = intel_iommu_domain_init, - .domain_destroy = intel_iommu_domain_destroy, - .attach_dev = intel_iommu_attach_device, - .detach_dev = intel_iommu_detach_device, - .map = intel_iommu_map, - .unmap = intel_iommu_unmap, - .iova_to_phys = intel_iommu_iova_to_phys, - .domain_has_cap = intel_iommu_domain_has_cap, -}; - -static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) -{ - /* - * Mobile 4 Series Chipset neglects to set RWBF capability, - * but needs it: - */ - printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); - rwbf_quirk = 1; - - /* https://bugzilla.redhat.com/show_bug.cgi?id=538163 */ - if (dev->revision == 0x07) { - printk(KERN_INFO "DMAR: Disabling IOMMU for graphics on this chipset\n"); - dmar_map_gfx = 0; - } -} - -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); - -#define GGC 0x52 -#define GGC_MEMORY_SIZE_MASK (0xf << 8) -#define GGC_MEMORY_SIZE_NONE (0x0 << 8) -#define GGC_MEMORY_SIZE_1M (0x1 << 8) -#define GGC_MEMORY_SIZE_2M (0x3 << 8) -#define GGC_MEMORY_VT_ENABLED (0x8 << 8) -#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8) -#define GGC_MEMORY_SIZE_3M_VT (0xa << 8) -#define GGC_MEMORY_SIZE_4M_VT (0xb << 8) - -static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) -{ - unsigned short ggc; - - if (pci_read_config_word(dev, GGC, &ggc)) - return; - - if (!(ggc & GGC_MEMORY_VT_ENABLED)) { - printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); - dmar_map_gfx = 0; - } -} -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); - -/* On Tylersburg chipsets, some BIOSes have been known to enable the - ISOCH DMAR unit for the Azalia sound device, but not give it any - TLB entries, which causes it to deadlock. Check for that. We do - this in a function called from init_dmars(), instead of in a PCI - quirk, because we don't want to print the obnoxious "BIOS broken" - message if VT-d is actually disabled. -*/ -static void __init check_tylersburg_isoch(void) -{ - struct pci_dev *pdev; - uint32_t vtisochctrl; - - /* If there's no Azalia in the system anyway, forget it. */ - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); - if (!pdev) - return; - pci_dev_put(pdev); - - /* System Management Registers. Might be hidden, in which case - we can't do the sanity check. But that's OK, because the - known-broken BIOSes _don't_ actually hide it, so far. */ - pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL); - if (!pdev) - return; - - if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { - pci_dev_put(pdev); - return; - } - - pci_dev_put(pdev); - - /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ - if (vtisochctrl & 1) - return; - - /* Drop all bits other than the number of TLB entries */ - vtisochctrl &= 0x1c; - - /* If we have the recommended number of TLB entries (16), fine. */ - if (vtisochctrl == 0x10) - return; - - /* Zero TLB entries? You get to ride the short bus to school. */ - if (!vtisochctrl) { - WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n" - "BIOS vendor: %s; Ver: %s; Product Version: %s\n", - dmi_get_system_info(DMI_BIOS_VENDOR), - dmi_get_system_info(DMI_BIOS_VERSION), - dmi_get_system_info(DMI_PRODUCT_VERSION)); - iommu_identity_mapping |= IDENTMAP_AZALIA; - return; - } - - printk(KERN_WARNING "DMAR: Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n", - vtisochctrl); -} diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c deleted file mode 100644 index 3607faf28a4d..000000000000 --- a/drivers/pci/intr_remapping.c +++ /dev/null @@ -1,798 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "intr_remapping.h" -#include -#include -#include "pci.h" - -static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; -static struct hpet_scope ir_hpet[MAX_HPET_TBS]; -static int ir_ioapic_num, ir_hpet_num; -int intr_remapping_enabled; - -static int disable_intremap; -static int disable_sourceid_checking; - -static __init int setup_nointremap(char *str) -{ - disable_intremap = 1; - return 0; -} -early_param("nointremap", setup_nointremap); - -static __init int setup_intremap(char *str) -{ - if (!str) - return -EINVAL; - - if (!strncmp(str, "on", 2)) - disable_intremap = 0; - else if (!strncmp(str, "off", 3)) - disable_intremap = 1; - else if (!strncmp(str, "nosid", 5)) - disable_sourceid_checking = 1; - - return 0; -} -early_param("intremap", setup_intremap); - -static DEFINE_SPINLOCK(irq_2_ir_lock); - -static struct irq_2_iommu *irq_2_iommu(unsigned int irq) -{ - struct irq_cfg *cfg = irq_get_chip_data(irq); - return cfg ? &cfg->irq_2_iommu : NULL; -} - -int get_irte(int irq, struct irte *entry) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - int index; - - if (!entry || !irq_iommu) - return -1; - - spin_lock_irqsave(&irq_2_ir_lock, flags); - - index = irq_iommu->irte_index + irq_iommu->sub_handle; - *entry = *(irq_iommu->iommu->ir_table->base + index); - - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return 0; -} - -int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) -{ - struct ir_table *table = iommu->ir_table; - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - u16 index, start_index; - unsigned int mask = 0; - unsigned long flags; - int i; - - if (!count || !irq_iommu) - return -1; - - /* - * start the IRTE search from index 0. - */ - index = start_index = 0; - - if (count > 1) { - count = __roundup_pow_of_two(count); - mask = ilog2(count); - } - - if (mask > ecap_max_handle_mask(iommu->ecap)) { - printk(KERN_ERR - "Requested mask %x exceeds the max invalidation handle" - " mask value %Lx\n", mask, - ecap_max_handle_mask(iommu->ecap)); - return -1; - } - - spin_lock_irqsave(&irq_2_ir_lock, flags); - do { - for (i = index; i < index + count; i++) - if (table->base[i].present) - break; - /* empty index found */ - if (i == index + count) - break; - - index = (index + count) % INTR_REMAP_TABLE_ENTRIES; - - if (index == start_index) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - printk(KERN_ERR "can't allocate an IRTE\n"); - return -1; - } - } while (1); - - for (i = index; i < index + count; i++) - table->base[i].present = 1; - - irq_iommu->iommu = iommu; - irq_iommu->irte_index = index; - irq_iommu->sub_handle = 0; - irq_iommu->irte_mask = mask; - - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return index; -} - -static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) -{ - struct qi_desc desc; - - desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) - | QI_IEC_SELECTIVE; - desc.high = 0; - - return qi_submit_sync(&desc, iommu); -} - -int map_irq_to_irte_handle(int irq, u16 *sub_handle) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - int index; - - if (!irq_iommu) - return -1; - - spin_lock_irqsave(&irq_2_ir_lock, flags); - *sub_handle = irq_iommu->sub_handle; - index = irq_iommu->irte_index; - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return index; -} - -int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - - if (!irq_iommu) - return -1; - - spin_lock_irqsave(&irq_2_ir_lock, flags); - - irq_iommu->iommu = iommu; - irq_iommu->irte_index = index; - irq_iommu->sub_handle = subhandle; - irq_iommu->irte_mask = 0; - - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return 0; -} - -int modify_irte(int irq, struct irte *irte_modified) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - struct intel_iommu *iommu; - unsigned long flags; - struct irte *irte; - int rc, index; - - if (!irq_iommu) - return -1; - - spin_lock_irqsave(&irq_2_ir_lock, flags); - - iommu = irq_iommu->iommu; - - index = irq_iommu->irte_index + irq_iommu->sub_handle; - irte = &iommu->ir_table->base[index]; - - set_64bit(&irte->low, irte_modified->low); - set_64bit(&irte->high, irte_modified->high); - __iommu_flush_cache(iommu, irte, sizeof(*irte)); - - rc = qi_flush_iec(iommu, index, 0); - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return rc; -} - -struct intel_iommu *map_hpet_to_ir(u8 hpet_id) -{ - int i; - - for (i = 0; i < MAX_HPET_TBS; i++) - if (ir_hpet[i].id == hpet_id) - return ir_hpet[i].iommu; - return NULL; -} - -struct intel_iommu *map_ioapic_to_ir(int apic) -{ - int i; - - for (i = 0; i < MAX_IO_APICS; i++) - if (ir_ioapic[i].id == apic) - return ir_ioapic[i].iommu; - return NULL; -} - -struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) -{ - struct dmar_drhd_unit *drhd; - - drhd = dmar_find_matched_drhd_unit(dev); - if (!drhd) - return NULL; - - return drhd->iommu; -} - -static int clear_entries(struct irq_2_iommu *irq_iommu) -{ - struct irte *start, *entry, *end; - struct intel_iommu *iommu; - int index; - - if (irq_iommu->sub_handle) - return 0; - - iommu = irq_iommu->iommu; - index = irq_iommu->irte_index + irq_iommu->sub_handle; - - start = iommu->ir_table->base + index; - end = start + (1 << irq_iommu->irte_mask); - - for (entry = start; entry < end; entry++) { - set_64bit(&entry->low, 0); - set_64bit(&entry->high, 0); - } - - return qi_flush_iec(iommu, index, irq_iommu->irte_mask); -} - -int free_irte(int irq) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - int rc; - - if (!irq_iommu) - return -1; - - spin_lock_irqsave(&irq_2_ir_lock, flags); - - rc = clear_entries(irq_iommu); - - irq_iommu->iommu = NULL; - irq_iommu->irte_index = 0; - irq_iommu->sub_handle = 0; - irq_iommu->irte_mask = 0; - - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return rc; -} - -/* - * source validation type - */ -#define SVT_NO_VERIFY 0x0 /* no verification is required */ -#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */ -#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ - -/* - * source-id qualifier - */ -#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ -#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore - * the third least significant bit - */ -#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore - * the second and third least significant bits - */ -#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore - * the least three significant bits - */ - -/* - * set SVT, SQ and SID fields of irte to verify - * source ids of interrupt requests - */ -static void set_irte_sid(struct irte *irte, unsigned int svt, - unsigned int sq, unsigned int sid) -{ - if (disable_sourceid_checking) - svt = SVT_NO_VERIFY; - irte->svt = svt; - irte->sq = sq; - irte->sid = sid; -} - -int set_ioapic_sid(struct irte *irte, int apic) -{ - int i; - u16 sid = 0; - - if (!irte) - return -1; - - for (i = 0; i < MAX_IO_APICS; i++) { - if (ir_ioapic[i].id == apic) { - sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; - break; - } - } - - if (sid == 0) { - pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); - return -1; - } - - set_irte_sid(irte, 1, 0, sid); - - return 0; -} - -int set_hpet_sid(struct irte *irte, u8 id) -{ - int i; - u16 sid = 0; - - if (!irte) - return -1; - - for (i = 0; i < MAX_HPET_TBS; i++) { - if (ir_hpet[i].id == id) { - sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; - break; - } - } - - if (sid == 0) { - pr_warning("Failed to set source-id of HPET block (%d)\n", id); - return -1; - } - - /* - * Should really use SQ_ALL_16. Some platforms are broken. - * While we figure out the right quirks for these broken platforms, use - * SQ_13_IGNORE_3 for now. - */ - set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); - - return 0; -} - -int set_msi_sid(struct irte *irte, struct pci_dev *dev) -{ - struct pci_dev *bridge; - - if (!irte || !dev) - return -1; - - /* PCIe device or Root Complex integrated PCI device */ - if (pci_is_pcie(dev) || !dev->bus->parent) { - set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, - (dev->bus->number << 8) | dev->devfn); - return 0; - } - - bridge = pci_find_upstream_pcie_bridge(dev); - if (bridge) { - if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ - set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, - (bridge->bus->number << 8) | dev->bus->number); - else /* this is a legacy PCI bridge */ - set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, - (bridge->bus->number << 8) | bridge->devfn); - } - - return 0; -} - -static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) -{ - u64 addr; - u32 sts; - unsigned long flags; - - addr = virt_to_phys((void *)iommu->ir_table->base); - - spin_lock_irqsave(&iommu->register_lock, flags); - - dmar_writeq(iommu->reg + DMAR_IRTA_REG, - (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); - - /* Set interrupt-remapping table pointer */ - iommu->gcmd |= DMA_GCMD_SIRTP; - writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, - readl, (sts & DMA_GSTS_IRTPS), sts); - spin_unlock_irqrestore(&iommu->register_lock, flags); - - /* - * global invalidation of interrupt entry cache before enabling - * interrupt-remapping. - */ - qi_global_iec(iommu); - - spin_lock_irqsave(&iommu->register_lock, flags); - - /* Enable interrupt-remapping */ - iommu->gcmd |= DMA_GCMD_IRE; - writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, - readl, (sts & DMA_GSTS_IRES), sts); - - spin_unlock_irqrestore(&iommu->register_lock, flags); -} - - -static int setup_intr_remapping(struct intel_iommu *iommu, int mode) -{ - struct ir_table *ir_table; - struct page *pages; - - ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), - GFP_ATOMIC); - - if (!iommu->ir_table) - return -ENOMEM; - - pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, - INTR_REMAP_PAGE_ORDER); - - if (!pages) { - printk(KERN_ERR "failed to allocate pages of order %d\n", - INTR_REMAP_PAGE_ORDER); - kfree(iommu->ir_table); - return -ENOMEM; - } - - ir_table->base = page_address(pages); - - iommu_set_intr_remapping(iommu, mode); - return 0; -} - -/* - * Disable Interrupt Remapping. - */ -static void iommu_disable_intr_remapping(struct intel_iommu *iommu) -{ - unsigned long flags; - u32 sts; - - if (!ecap_ir_support(iommu->ecap)) - return; - - /* - * global invalidation of interrupt entry cache before disabling - * interrupt-remapping. - */ - qi_global_iec(iommu); - - spin_lock_irqsave(&iommu->register_lock, flags); - - sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); - if (!(sts & DMA_GSTS_IRES)) - goto end; - - iommu->gcmd &= ~DMA_GCMD_IRE; - writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); - - IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, - readl, !(sts & DMA_GSTS_IRES), sts); - -end: - spin_unlock_irqrestore(&iommu->register_lock, flags); -} - -int __init intr_remapping_supported(void) -{ - struct dmar_drhd_unit *drhd; - - if (disable_intremap) - return 0; - - if (!dmar_ir_support()) - return 0; - - for_each_drhd_unit(drhd) { - struct intel_iommu *iommu = drhd->iommu; - - if (!ecap_ir_support(iommu->ecap)) - return 0; - } - - return 1; -} - -int __init enable_intr_remapping(int eim) -{ - struct dmar_drhd_unit *drhd; - int setup = 0; - - if (parse_ioapics_under_ir() != 1) { - printk(KERN_INFO "Not enable interrupt remapping\n"); - return -1; - } - - for_each_drhd_unit(drhd) { - struct intel_iommu *iommu = drhd->iommu; - - /* - * If the queued invalidation is already initialized, - * shouldn't disable it. - */ - if (iommu->qi) - continue; - - /* - * Clear previous faults. - */ - dmar_fault(-1, iommu); - - /* - * Disable intr remapping and queued invalidation, if already - * enabled prior to OS handover. - */ - iommu_disable_intr_remapping(iommu); - - dmar_disable_qi(iommu); - } - - /* - * check for the Interrupt-remapping support - */ - for_each_drhd_unit(drhd) { - struct intel_iommu *iommu = drhd->iommu; - - if (!ecap_ir_support(iommu->ecap)) - continue; - - if (eim && !ecap_eim_support(iommu->ecap)) { - printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " - " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); - return -1; - } - } - - /* - * Enable queued invalidation for all the DRHD's. - */ - for_each_drhd_unit(drhd) { - int ret; - struct intel_iommu *iommu = drhd->iommu; - ret = dmar_enable_qi(iommu); - - if (ret) { - printk(KERN_ERR "DRHD %Lx: failed to enable queued, " - " invalidation, ecap %Lx, ret %d\n", - drhd->reg_base_addr, iommu->ecap, ret); - return -1; - } - } - - /* - * Setup Interrupt-remapping for all the DRHD's now. - */ - for_each_drhd_unit(drhd) { - struct intel_iommu *iommu = drhd->iommu; - - if (!ecap_ir_support(iommu->ecap)) - continue; - - if (setup_intr_remapping(iommu, eim)) - goto error; - - setup = 1; - } - - if (!setup) - goto error; - - intr_remapping_enabled = 1; - - return 0; - -error: - /* - * handle error condition gracefully here! - */ - return -1; -} - -static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, - struct intel_iommu *iommu) -{ - struct acpi_dmar_pci_path *path; - u8 bus; - int count; - - bus = scope->bus; - path = (struct acpi_dmar_pci_path *)(scope + 1); - count = (scope->length - sizeof(struct acpi_dmar_device_scope)) - / sizeof(struct acpi_dmar_pci_path); - - while (--count > 0) { - /* - * Access PCI directly due to the PCI - * subsystem isn't initialized yet. - */ - bus = read_pci_config_byte(bus, path->dev, path->fn, - PCI_SECONDARY_BUS); - path++; - } - ir_hpet[ir_hpet_num].bus = bus; - ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); - ir_hpet[ir_hpet_num].iommu = iommu; - ir_hpet[ir_hpet_num].id = scope->enumeration_id; - ir_hpet_num++; -} - -static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, - struct intel_iommu *iommu) -{ - struct acpi_dmar_pci_path *path; - u8 bus; - int count; - - bus = scope->bus; - path = (struct acpi_dmar_pci_path *)(scope + 1); - count = (scope->length - sizeof(struct acpi_dmar_device_scope)) - / sizeof(struct acpi_dmar_pci_path); - - while (--count > 0) { - /* - * Access PCI directly due to the PCI - * subsystem isn't initialized yet. - */ - bus = read_pci_config_byte(bus, path->dev, path->fn, - PCI_SECONDARY_BUS); - path++; - } - - ir_ioapic[ir_ioapic_num].bus = bus; - ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); - ir_ioapic[ir_ioapic_num].iommu = iommu; - ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; - ir_ioapic_num++; -} - -static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, - struct intel_iommu *iommu) -{ - struct acpi_dmar_hardware_unit *drhd; - struct acpi_dmar_device_scope *scope; - void *start, *end; - - drhd = (struct acpi_dmar_hardware_unit *)header; - - start = (void *)(drhd + 1); - end = ((void *)drhd) + header->length; - - while (start < end) { - scope = start; - if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { - if (ir_ioapic_num == MAX_IO_APICS) { - printk(KERN_WARNING "Exceeded Max IO APICS\n"); - return -1; - } - - printk(KERN_INFO "IOAPIC id %d under DRHD base " - " 0x%Lx IOMMU %d\n", scope->enumeration_id, - drhd->address, iommu->seq_id); - - ir_parse_one_ioapic_scope(scope, iommu); - } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { - if (ir_hpet_num == MAX_HPET_TBS) { - printk(KERN_WARNING "Exceeded Max HPET blocks\n"); - return -1; - } - - printk(KERN_INFO "HPET id %d under DRHD base" - " 0x%Lx\n", scope->enumeration_id, - drhd->address); - - ir_parse_one_hpet_scope(scope, iommu); - } - start += scope->length; - } - - return 0; -} - -/* - * Finds the assocaition between IOAPIC's and its Interrupt-remapping - * hardware unit. - */ -int __init parse_ioapics_under_ir(void) -{ - struct dmar_drhd_unit *drhd; - int ir_supported = 0; - - for_each_drhd_unit(drhd) { - struct intel_iommu *iommu = drhd->iommu; - - if (ecap_ir_support(iommu->ecap)) { - if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) - return -1; - - ir_supported = 1; - } - } - - if (ir_supported && ir_ioapic_num != nr_ioapics) { - printk(KERN_WARNING - "Not all IO-APIC's listed under remapping hardware\n"); - return -1; - } - - return ir_supported; -} - -void disable_intr_remapping(void) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu = NULL; - - /* - * Disable Interrupt-remapping for all the DRHD's now. - */ - for_each_iommu(iommu, drhd) { - if (!ecap_ir_support(iommu->ecap)) - continue; - - iommu_disable_intr_remapping(iommu); - } -} - -int reenable_intr_remapping(int eim) -{ - struct dmar_drhd_unit *drhd; - int setup = 0; - struct intel_iommu *iommu = NULL; - - for_each_iommu(iommu, drhd) - if (iommu->qi) - dmar_reenable_qi(iommu); - - /* - * Setup Interrupt-remapping for all the DRHD's now. - */ - for_each_iommu(iommu, drhd) { - if (!ecap_ir_support(iommu->ecap)) - continue; - - /* Set up interrupt remapping for iommu.*/ - iommu_set_intr_remapping(iommu, eim); - setup = 1; - } - - if (!setup) - goto error; - - return 0; - -error: - /* - * handle error condition gracefully here! - */ - return -1; -} - diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h deleted file mode 100644 index 5662fecfee60..000000000000 --- a/drivers/pci/intr_remapping.h +++ /dev/null @@ -1,17 +0,0 @@ -#include - -struct ioapic_scope { - struct intel_iommu *iommu; - unsigned int id; - unsigned int bus; /* PCI bus number */ - unsigned int devfn; /* PCI devfn number */ -}; - -struct hpet_scope { - struct intel_iommu *iommu; - u8 id; - unsigned int bus; - unsigned int devfn; -}; - -#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c deleted file mode 100644 index c5c274ab5c5a..000000000000 --- a/drivers/pci/iova.c +++ /dev/null @@ -1,435 +0,0 @@ -/* - * Copyright © 2006-2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Author: Anil S Keshavamurthy - */ - -#include - -void -init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) -{ - spin_lock_init(&iovad->iova_rbtree_lock); - iovad->rbroot = RB_ROOT; - iovad->cached32_node = NULL; - iovad->dma_32bit_pfn = pfn_32bit; -} - -static struct rb_node * -__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) -{ - if ((*limit_pfn != iovad->dma_32bit_pfn) || - (iovad->cached32_node == NULL)) - return rb_last(&iovad->rbroot); - else { - struct rb_node *prev_node = rb_prev(iovad->cached32_node); - struct iova *curr_iova = - container_of(iovad->cached32_node, struct iova, node); - *limit_pfn = curr_iova->pfn_lo - 1; - return prev_node; - } -} - -static void -__cached_rbnode_insert_update(struct iova_domain *iovad, - unsigned long limit_pfn, struct iova *new) -{ - if (limit_pfn != iovad->dma_32bit_pfn) - return; - iovad->cached32_node = &new->node; -} - -static void -__cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) -{ - struct iova *cached_iova; - struct rb_node *curr; - - if (!iovad->cached32_node) - return; - curr = iovad->cached32_node; - cached_iova = container_of(curr, struct iova, node); - - if (free->pfn_lo >= cached_iova->pfn_lo) { - struct rb_node *node = rb_next(&free->node); - struct iova *iova = container_of(node, struct iova, node); - - /* only cache if it's below 32bit pfn */ - if (node && iova->pfn_lo < iovad->dma_32bit_pfn) - iovad->cached32_node = node; - else - iovad->cached32_node = NULL; - } -} - -/* Computes the padding size required, to make the - * the start address naturally aligned on its size - */ -static int -iova_get_pad_size(int size, unsigned int limit_pfn) -{ - unsigned int pad_size = 0; - unsigned int order = ilog2(size); - - if (order) - pad_size = (limit_pfn + 1) % (1 << order); - - return pad_size; -} - -static int __alloc_and_insert_iova_range(struct iova_domain *iovad, - unsigned long size, unsigned long limit_pfn, - struct iova *new, bool size_aligned) -{ - struct rb_node *prev, *curr = NULL; - unsigned long flags; - unsigned long saved_pfn; - unsigned int pad_size = 0; - - /* Walk the tree backwards */ - spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - saved_pfn = limit_pfn; - curr = __get_cached_rbnode(iovad, &limit_pfn); - prev = curr; - while (curr) { - struct iova *curr_iova = container_of(curr, struct iova, node); - - if (limit_pfn < curr_iova->pfn_lo) - goto move_left; - else if (limit_pfn < curr_iova->pfn_hi) - goto adjust_limit_pfn; - else { - if (size_aligned) - pad_size = iova_get_pad_size(size, limit_pfn); - if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) - break; /* found a free slot */ - } -adjust_limit_pfn: - limit_pfn = curr_iova->pfn_lo - 1; -move_left: - prev = curr; - curr = rb_prev(curr); - } - - if (!curr) { - if (size_aligned) - pad_size = iova_get_pad_size(size, limit_pfn); - if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - return -ENOMEM; - } - } - - /* pfn_lo will point to size aligned address if size_aligned is set */ - new->pfn_lo = limit_pfn - (size + pad_size) + 1; - new->pfn_hi = new->pfn_lo + size - 1; - - /* Insert the new_iova into domain rbtree by holding writer lock */ - /* Add new node and rebalance tree. */ - { - struct rb_node **entry, *parent = NULL; - - /* If we have 'prev', it's a valid place to start the - insertion. Otherwise, start from the root. */ - if (prev) - entry = &prev; - else - entry = &iovad->rbroot.rb_node; - - /* Figure out where to put new node */ - while (*entry) { - struct iova *this = container_of(*entry, - struct iova, node); - parent = *entry; - - if (new->pfn_lo < this->pfn_lo) - entry = &((*entry)->rb_left); - else if (new->pfn_lo > this->pfn_lo) - entry = &((*entry)->rb_right); - else - BUG(); /* this should not happen */ - } - - /* Add new node and rebalance tree. */ - rb_link_node(&new->node, parent, entry); - rb_insert_color(&new->node, &iovad->rbroot); - } - __cached_rbnode_insert_update(iovad, saved_pfn, new); - - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - - - return 0; -} - -static void -iova_insert_rbtree(struct rb_root *root, struct iova *iova) -{ - struct rb_node **new = &(root->rb_node), *parent = NULL; - /* Figure out where to put new node */ - while (*new) { - struct iova *this = container_of(*new, struct iova, node); - parent = *new; - - if (iova->pfn_lo < this->pfn_lo) - new = &((*new)->rb_left); - else if (iova->pfn_lo > this->pfn_lo) - new = &((*new)->rb_right); - else - BUG(); /* this should not happen */ - } - /* Add new node and rebalance tree. */ - rb_link_node(&iova->node, parent, new); - rb_insert_color(&iova->node, root); -} - -/** - * alloc_iova - allocates an iova - * @iovad - iova domain in question - * @size - size of page frames to allocate - * @limit_pfn - max limit address - * @size_aligned - set if size_aligned address range is required - * This function allocates an iova in the range limit_pfn to IOVA_START_PFN - * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned - * flag is set then the allocated address iova->pfn_lo will be naturally - * aligned on roundup_power_of_two(size). - */ -struct iova * -alloc_iova(struct iova_domain *iovad, unsigned long size, - unsigned long limit_pfn, - bool size_aligned) -{ - struct iova *new_iova; - int ret; - - new_iova = alloc_iova_mem(); - if (!new_iova) - return NULL; - - /* If size aligned is set then round the size to - * to next power of two. - */ - if (size_aligned) - size = __roundup_pow_of_two(size); - - ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, - new_iova, size_aligned); - - if (ret) { - free_iova_mem(new_iova); - return NULL; - } - - return new_iova; -} - -/** - * find_iova - find's an iova for a given pfn - * @iovad - iova domain in question. - * pfn - page frame number - * This function finds and returns an iova belonging to the - * given doamin which matches the given pfn. - */ -struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) -{ - unsigned long flags; - struct rb_node *node; - - /* Take the lock so that no other thread is manipulating the rbtree */ - spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - node = iovad->rbroot.rb_node; - while (node) { - struct iova *iova = container_of(node, struct iova, node); - - /* If pfn falls within iova's range, return iova */ - if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - /* We are not holding the lock while this iova - * is referenced by the caller as the same thread - * which called this function also calls __free_iova() - * and it is by desing that only one thread can possibly - * reference a particular iova and hence no conflict. - */ - return iova; - } - - if (pfn < iova->pfn_lo) - node = node->rb_left; - else if (pfn > iova->pfn_lo) - node = node->rb_right; - } - - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - return NULL; -} - -/** - * __free_iova - frees the given iova - * @iovad: iova domain in question. - * @iova: iova in question. - * Frees the given iova belonging to the giving domain - */ -void -__free_iova(struct iova_domain *iovad, struct iova *iova) -{ - unsigned long flags; - - spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - __cached_rbnode_delete_update(iovad, iova); - rb_erase(&iova->node, &iovad->rbroot); - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - free_iova_mem(iova); -} - -/** - * free_iova - finds and frees the iova for a given pfn - * @iovad: - iova domain in question. - * @pfn: - pfn that is allocated previously - * This functions finds an iova for a given pfn and then - * frees the iova from that domain. - */ -void -free_iova(struct iova_domain *iovad, unsigned long pfn) -{ - struct iova *iova = find_iova(iovad, pfn); - if (iova) - __free_iova(iovad, iova); - -} - -/** - * put_iova_domain - destroys the iova doamin - * @iovad: - iova domain in question. - * All the iova's in that domain are destroyed. - */ -void put_iova_domain(struct iova_domain *iovad) -{ - struct rb_node *node; - unsigned long flags; - - spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - node = rb_first(&iovad->rbroot); - while (node) { - struct iova *iova = container_of(node, struct iova, node); - rb_erase(node, &iovad->rbroot); - free_iova_mem(iova); - node = rb_first(&iovad->rbroot); - } - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); -} - -static int -__is_range_overlap(struct rb_node *node, - unsigned long pfn_lo, unsigned long pfn_hi) -{ - struct iova *iova = container_of(node, struct iova, node); - - if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) - return 1; - return 0; -} - -static struct iova * -__insert_new_range(struct iova_domain *iovad, - unsigned long pfn_lo, unsigned long pfn_hi) -{ - struct iova *iova; - - iova = alloc_iova_mem(); - if (!iova) - return iova; - - iova->pfn_hi = pfn_hi; - iova->pfn_lo = pfn_lo; - iova_insert_rbtree(&iovad->rbroot, iova); - return iova; -} - -static void -__adjust_overlap_range(struct iova *iova, - unsigned long *pfn_lo, unsigned long *pfn_hi) -{ - if (*pfn_lo < iova->pfn_lo) - iova->pfn_lo = *pfn_lo; - if (*pfn_hi > iova->pfn_hi) - *pfn_lo = iova->pfn_hi + 1; -} - -/** - * reserve_iova - reserves an iova in the given range - * @iovad: - iova domain pointer - * @pfn_lo: - lower page frame address - * @pfn_hi:- higher pfn adderss - * This function allocates reserves the address range from pfn_lo to pfn_hi so - * that this address is not dished out as part of alloc_iova. - */ -struct iova * -reserve_iova(struct iova_domain *iovad, - unsigned long pfn_lo, unsigned long pfn_hi) -{ - struct rb_node *node; - unsigned long flags; - struct iova *iova; - unsigned int overlap = 0; - - spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { - if (__is_range_overlap(node, pfn_lo, pfn_hi)) { - iova = container_of(node, struct iova, node); - __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); - if ((pfn_lo >= iova->pfn_lo) && - (pfn_hi <= iova->pfn_hi)) - goto finish; - overlap = 1; - - } else if (overlap) - break; - } - - /* We are here either because this is the first reserver node - * or need to insert remaining non overlap addr range - */ - iova = __insert_new_range(iovad, pfn_lo, pfn_hi); -finish: - - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - return iova; -} - -/** - * copy_reserved_iova - copies the reserved between domains - * @from: - source doamin from where to copy - * @to: - destination domin where to copy - * This function copies reserved iova's from one doamin to - * other. - */ -void -copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) -{ - unsigned long flags; - struct rb_node *node; - - spin_lock_irqsave(&from->iova_rbtree_lock, flags); - for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { - struct iova *iova = container_of(node, struct iova, node); - struct iova *new_iova; - new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); - if (!new_iova) - printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", - iova->pfn_lo, iova->pfn_lo); - } - spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); -} diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 731e20265ace..b7bf11dd546a 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -184,8 +184,6 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev) return NULL; } -struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); - /* PCI slot sysfs helper code */ #define to_pci_slot(s) container_of(s, struct pci_slot, kobj) diff --git a/include/linux/pci.h b/include/linux/pci.h index c446b5ca2d38..970bfe0941c3 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1589,5 +1589,16 @@ int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt); int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, unsigned int len, const char *kw); +/** + * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device + * @pdev: the PCI device + * + * if the device is PCIE, return NULL + * if the device isn't connected to a PCIe bridge (that is its parent is a + * legacy PCI bridge and the bridge is directly connected to bus 0), return its + * parent + */ +struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); + #endif /* __KERNEL__ */ #endif /* LINUX_PCI_H */ -- cgit v1.2.3 From 403f81d8ee532c976d50a5e1051f14ec78ae8db3 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 14 Jun 2011 16:44:25 +0200 Subject: iommu/amd: Move missing parts to drivers/iommu A few parts of the driver were missing in drivers/iommu. Move them there to have the complete driver in that directory. Signed-off-by: Joerg Roedel --- arch/x86/include/asm/amd_iommu.h | 35 - arch/x86/include/asm/amd_iommu_proto.h | 54 -- arch/x86/include/asm/amd_iommu_types.h | 580 ------------ arch/x86/kernel/Makefile | 1 - arch/x86/kernel/amd_iommu_init.c | 1572 ------------------------------- drivers/iommu/Makefile | 2 +- drivers/iommu/amd_iommu.c | 7 +- drivers/iommu/amd_iommu_init.c | 1574 ++++++++++++++++++++++++++++++++ drivers/iommu/amd_iommu_proto.h | 54 ++ drivers/iommu/amd_iommu_types.h | 580 ++++++++++++ include/linux/amd-iommu.h | 35 + 11 files changed, 2248 insertions(+), 2246 deletions(-) delete mode 100644 arch/x86/include/asm/amd_iommu.h delete mode 100644 arch/x86/include/asm/amd_iommu_proto.h delete mode 100644 arch/x86/include/asm/amd_iommu_types.h delete mode 100644 arch/x86/kernel/amd_iommu_init.c create mode 100644 drivers/iommu/amd_iommu_init.c create mode 100644 drivers/iommu/amd_iommu_proto.h create mode 100644 drivers/iommu/amd_iommu_types.h create mode 100644 include/linux/amd-iommu.h (limited to 'include/linux') diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h deleted file mode 100644 index a6863a2dec1f..000000000000 --- a/arch/x86/include/asm/amd_iommu.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel - * Leo Duran - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_X86_AMD_IOMMU_H -#define _ASM_X86_AMD_IOMMU_H - -#include - -#ifdef CONFIG_AMD_IOMMU - -extern int amd_iommu_detect(void); - -#else - -static inline int amd_iommu_detect(void) { return -ENODEV; } - -#endif - -#endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h deleted file mode 100644 index 55d95eb789b3..000000000000 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_X86_AMD_IOMMU_PROTO_H -#define _ASM_X86_AMD_IOMMU_PROTO_H - -#include - -extern int amd_iommu_init_dma_ops(void); -extern int amd_iommu_init_passthrough(void); -extern irqreturn_t amd_iommu_int_thread(int irq, void *data); -extern irqreturn_t amd_iommu_int_handler(int irq, void *data); -extern void amd_iommu_apply_erratum_63(u16 devid); -extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); -extern int amd_iommu_init_devices(void); -extern void amd_iommu_uninit_devices(void); -extern void amd_iommu_init_notifier(void); -extern void amd_iommu_init_api(void); -#ifndef CONFIG_AMD_IOMMU_STATS - -static inline void amd_iommu_stats_init(void) { } - -#endif /* !CONFIG_AMD_IOMMU_STATS */ - -static inline bool is_rd890_iommu(struct pci_dev *pdev) -{ - return (pdev->vendor == PCI_VENDOR_ID_ATI) && - (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); -} - -static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) -{ - if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) - return false; - - return !!(iommu->features & f); -} - -#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h deleted file mode 100644 index 4c9982995414..000000000000 --- a/arch/x86/include/asm/amd_iommu_types.h +++ /dev/null @@ -1,580 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel - * Leo Duran - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _ASM_X86_AMD_IOMMU_TYPES_H -#define _ASM_X86_AMD_IOMMU_TYPES_H - -#include -#include -#include -#include - -/* - * Maximum number of IOMMUs supported - */ -#define MAX_IOMMUS 32 - -/* - * some size calculation constants - */ -#define DEV_TABLE_ENTRY_SIZE 32 -#define ALIAS_TABLE_ENTRY_SIZE 2 -#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) - -/* Length of the MMIO region for the AMD IOMMU */ -#define MMIO_REGION_LENGTH 0x4000 - -/* Capability offsets used by the driver */ -#define MMIO_CAP_HDR_OFFSET 0x00 -#define MMIO_RANGE_OFFSET 0x0c -#define MMIO_MISC_OFFSET 0x10 - -/* Masks, shifts and macros to parse the device range capability */ -#define MMIO_RANGE_LD_MASK 0xff000000 -#define MMIO_RANGE_FD_MASK 0x00ff0000 -#define MMIO_RANGE_BUS_MASK 0x0000ff00 -#define MMIO_RANGE_LD_SHIFT 24 -#define MMIO_RANGE_FD_SHIFT 16 -#define MMIO_RANGE_BUS_SHIFT 8 -#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) -#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) -#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) -#define MMIO_MSI_NUM(x) ((x) & 0x1f) - -/* Flag masks for the AMD IOMMU exclusion range */ -#define MMIO_EXCL_ENABLE_MASK 0x01ULL -#define MMIO_EXCL_ALLOW_MASK 0x02ULL - -/* Used offsets into the MMIO space */ -#define MMIO_DEV_TABLE_OFFSET 0x0000 -#define MMIO_CMD_BUF_OFFSET 0x0008 -#define MMIO_EVT_BUF_OFFSET 0x0010 -#define MMIO_CONTROL_OFFSET 0x0018 -#define MMIO_EXCL_BASE_OFFSET 0x0020 -#define MMIO_EXCL_LIMIT_OFFSET 0x0028 -#define MMIO_EXT_FEATURES 0x0030 -#define MMIO_CMD_HEAD_OFFSET 0x2000 -#define MMIO_CMD_TAIL_OFFSET 0x2008 -#define MMIO_EVT_HEAD_OFFSET 0x2010 -#define MMIO_EVT_TAIL_OFFSET 0x2018 -#define MMIO_STATUS_OFFSET 0x2020 - - -/* Extended Feature Bits */ -#define FEATURE_PREFETCH (1ULL<<0) -#define FEATURE_PPR (1ULL<<1) -#define FEATURE_X2APIC (1ULL<<2) -#define FEATURE_NX (1ULL<<3) -#define FEATURE_GT (1ULL<<4) -#define FEATURE_IA (1ULL<<6) -#define FEATURE_GA (1ULL<<7) -#define FEATURE_HE (1ULL<<8) -#define FEATURE_PC (1ULL<<9) - -/* MMIO status bits */ -#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 - -/* event logging constants */ -#define EVENT_ENTRY_SIZE 0x10 -#define EVENT_TYPE_SHIFT 28 -#define EVENT_TYPE_MASK 0xf -#define EVENT_TYPE_ILL_DEV 0x1 -#define EVENT_TYPE_IO_FAULT 0x2 -#define EVENT_TYPE_DEV_TAB_ERR 0x3 -#define EVENT_TYPE_PAGE_TAB_ERR 0x4 -#define EVENT_TYPE_ILL_CMD 0x5 -#define EVENT_TYPE_CMD_HARD_ERR 0x6 -#define EVENT_TYPE_IOTLB_INV_TO 0x7 -#define EVENT_TYPE_INV_DEV_REQ 0x8 -#define EVENT_DEVID_MASK 0xffff -#define EVENT_DEVID_SHIFT 0 -#define EVENT_DOMID_MASK 0xffff -#define EVENT_DOMID_SHIFT 0 -#define EVENT_FLAGS_MASK 0xfff -#define EVENT_FLAGS_SHIFT 0x10 - -/* feature control bits */ -#define CONTROL_IOMMU_EN 0x00ULL -#define CONTROL_HT_TUN_EN 0x01ULL -#define CONTROL_EVT_LOG_EN 0x02ULL -#define CONTROL_EVT_INT_EN 0x03ULL -#define CONTROL_COMWAIT_EN 0x04ULL -#define CONTROL_PASSPW_EN 0x08ULL -#define CONTROL_RESPASSPW_EN 0x09ULL -#define CONTROL_COHERENT_EN 0x0aULL -#define CONTROL_ISOC_EN 0x0bULL -#define CONTROL_CMDBUF_EN 0x0cULL -#define CONTROL_PPFLOG_EN 0x0dULL -#define CONTROL_PPFINT_EN 0x0eULL - -/* command specific defines */ -#define CMD_COMPL_WAIT 0x01 -#define CMD_INV_DEV_ENTRY 0x02 -#define CMD_INV_IOMMU_PAGES 0x03 -#define CMD_INV_IOTLB_PAGES 0x04 -#define CMD_INV_ALL 0x08 - -#define CMD_COMPL_WAIT_STORE_MASK 0x01 -#define CMD_COMPL_WAIT_INT_MASK 0x02 -#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 -#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 - -#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL - -/* macros and definitions for device table entries */ -#define DEV_ENTRY_VALID 0x00 -#define DEV_ENTRY_TRANSLATION 0x01 -#define DEV_ENTRY_IR 0x3d -#define DEV_ENTRY_IW 0x3e -#define DEV_ENTRY_NO_PAGE_FAULT 0x62 -#define DEV_ENTRY_EX 0x67 -#define DEV_ENTRY_SYSMGT1 0x68 -#define DEV_ENTRY_SYSMGT2 0x69 -#define DEV_ENTRY_INIT_PASS 0xb8 -#define DEV_ENTRY_EINT_PASS 0xb9 -#define DEV_ENTRY_NMI_PASS 0xba -#define DEV_ENTRY_LINT0_PASS 0xbe -#define DEV_ENTRY_LINT1_PASS 0xbf -#define DEV_ENTRY_MODE_MASK 0x07 -#define DEV_ENTRY_MODE_SHIFT 0x09 - -/* constants to configure the command buffer */ -#define CMD_BUFFER_SIZE 8192 -#define CMD_BUFFER_UNINITIALIZED 1 -#define CMD_BUFFER_ENTRIES 512 -#define MMIO_CMD_SIZE_SHIFT 56 -#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) - -/* constants for event buffer handling */ -#define EVT_BUFFER_SIZE 8192 /* 512 entries */ -#define EVT_LEN_MASK (0x9ULL << 56) - -#define PAGE_MODE_NONE 0x00 -#define PAGE_MODE_1_LEVEL 0x01 -#define PAGE_MODE_2_LEVEL 0x02 -#define PAGE_MODE_3_LEVEL 0x03 -#define PAGE_MODE_4_LEVEL 0x04 -#define PAGE_MODE_5_LEVEL 0x05 -#define PAGE_MODE_6_LEVEL 0x06 - -#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) -#define PM_LEVEL_SIZE(x) (((x) < 6) ? \ - ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ - (0xffffffffffffffffULL)) -#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) -#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) -#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ - IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) -#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) - -#define PM_MAP_4k 0 -#define PM_ADDR_MASK 0x000ffffffffff000ULL -#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ - (~((1ULL << (12 + ((lvl) * 9))) - 1))) -#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) - -/* - * Returns the page table level to use for a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_LEVEL(pagesize) \ - ((__ffs(pagesize) - 12) / 9) -/* - * Returns the number of ptes to use for a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_PTE_COUNT(pagesize) \ - (1ULL << ((__ffs(pagesize) - 12) % 9)) - -/* - * Aligns a given io-virtual address to a given page size - * Pagesize is expected to be a power-of-two - */ -#define PAGE_SIZE_ALIGN(address, pagesize) \ - ((address) & ~((pagesize) - 1)) -/* - * Creates an IOMMU PTE for an address an a given pagesize - * The PTE has no permission bits set - * Pagesize is expected to be a power-of-two larger than 4096 - */ -#define PAGE_SIZE_PTE(address, pagesize) \ - (((address) | ((pagesize) - 1)) & \ - (~(pagesize >> 1)) & PM_ADDR_MASK) - -/* - * Takes a PTE value with mode=0x07 and returns the page size it maps - */ -#define PTE_PAGE_SIZE(pte) \ - (1ULL << (1 + ffz(((pte) | 0xfffULL)))) - -#define IOMMU_PTE_P (1ULL << 0) -#define IOMMU_PTE_TV (1ULL << 1) -#define IOMMU_PTE_U (1ULL << 59) -#define IOMMU_PTE_FC (1ULL << 60) -#define IOMMU_PTE_IR (1ULL << 61) -#define IOMMU_PTE_IW (1ULL << 62) - -#define DTE_FLAG_IOTLB 0x01 - -#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) -#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) -#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) -#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) - -#define IOMMU_PROT_MASK 0x03 -#define IOMMU_PROT_IR 0x01 -#define IOMMU_PROT_IW 0x02 - -/* IOMMU capabilities */ -#define IOMMU_CAP_IOTLB 24 -#define IOMMU_CAP_NPCACHE 26 -#define IOMMU_CAP_EFR 27 - -#define MAX_DOMAIN_ID 65536 - -/* FIXME: move this macro to */ -#define PCI_BUS(x) (((x) >> 8) & 0xff) - -/* Protection domain flags */ -#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ -#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops - domain for an IOMMU */ -#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page - translation */ - -extern bool amd_iommu_dump; -#define DUMP_printk(format, arg...) \ - do { \ - if (amd_iommu_dump) \ - printk(KERN_INFO "AMD-Vi: " format, ## arg); \ - } while(0); - -/* global flag if IOMMUs cache non-present entries */ -extern bool amd_iommu_np_cache; -/* Only true if all IOMMUs support device IOTLBs */ -extern bool amd_iommu_iotlb_sup; - -/* - * Make iterating over all IOMMUs easier - */ -#define for_each_iommu(iommu) \ - list_for_each_entry((iommu), &amd_iommu_list, list) -#define for_each_iommu_safe(iommu, next) \ - list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) - -#define APERTURE_RANGE_SHIFT 27 /* 128 MB */ -#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) -#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) -#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ -#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) -#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) - -/* - * This structure contains generic data for IOMMU protection domains - * independent of their use. - */ -struct protection_domain { - struct list_head list; /* for list of all protection domains */ - struct list_head dev_list; /* List of all devices in this domain */ - spinlock_t lock; /* mostly used to lock the page table*/ - struct mutex api_lock; /* protect page tables in the iommu-api path */ - u16 id; /* the domain id written to the device table */ - int mode; /* paging mode (0-6 levels) */ - u64 *pt_root; /* page table root pointer */ - unsigned long flags; /* flags to find out type of domain */ - bool updated; /* complete domain flush required */ - unsigned dev_cnt; /* devices assigned to this domain */ - unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ - void *priv; /* private data */ - -}; - -/* - * This struct contains device specific data for the IOMMU - */ -struct iommu_dev_data { - struct list_head list; /* For domain->dev_list */ - struct device *dev; /* Device this data belong to */ - struct device *alias; /* The Alias Device */ - struct protection_domain *domain; /* Domain the device is bound to */ - atomic_t bind; /* Domain attach reverent count */ -}; - -/* - * For dynamic growth the aperture size is split into ranges of 128MB of - * DMA address space each. This struct represents one such range. - */ -struct aperture_range { - - /* address allocation bitmap */ - unsigned long *bitmap; - - /* - * Array of PTE pages for the aperture. In this array we save all the - * leaf pages of the domain page table used for the aperture. This way - * we don't need to walk the page table to find a specific PTE. We can - * just calculate its address in constant time. - */ - u64 *pte_pages[64]; - - unsigned long offset; -}; - -/* - * Data container for a dma_ops specific protection domain - */ -struct dma_ops_domain { - struct list_head list; - - /* generic protection domain information */ - struct protection_domain domain; - - /* size of the aperture for the mappings */ - unsigned long aperture_size; - - /* address we start to search for free addresses */ - unsigned long next_address; - - /* address space relevant data */ - struct aperture_range *aperture[APERTURE_MAX_RANGES]; - - /* This will be set to true when TLB needs to be flushed */ - bool need_flush; - - /* - * if this is a preallocated domain, keep the device for which it was - * preallocated in this variable - */ - u16 target_dev; -}; - -/* - * Structure where we save information about one hardware AMD IOMMU in the - * system. - */ -struct amd_iommu { - struct list_head list; - - /* Index within the IOMMU array */ - int index; - - /* locks the accesses to the hardware */ - spinlock_t lock; - - /* Pointer to PCI device of this IOMMU */ - struct pci_dev *dev; - - /* physical address of MMIO space */ - u64 mmio_phys; - /* virtual address of MMIO space */ - u8 *mmio_base; - - /* capabilities of that IOMMU read from ACPI */ - u32 cap; - - /* flags read from acpi table */ - u8 acpi_flags; - - /* Extended features */ - u64 features; - - /* - * Capability pointer. There could be more than one IOMMU per PCI - * device function if there are more than one AMD IOMMU capability - * pointers. - */ - u16 cap_ptr; - - /* pci domain of this IOMMU */ - u16 pci_seg; - - /* first device this IOMMU handles. read from PCI */ - u16 first_device; - /* last device this IOMMU handles. read from PCI */ - u16 last_device; - - /* start of exclusion range of that IOMMU */ - u64 exclusion_start; - /* length of exclusion range of that IOMMU */ - u64 exclusion_length; - - /* command buffer virtual address */ - u8 *cmd_buf; - /* size of command buffer */ - u32 cmd_buf_size; - - /* size of event buffer */ - u32 evt_buf_size; - /* event buffer virtual address */ - u8 *evt_buf; - /* MSI number for event interrupt */ - u16 evt_msi_num; - - /* true if interrupts for this IOMMU are already enabled */ - bool int_enabled; - - /* if one, we need to send a completion wait command */ - bool need_sync; - - /* default dma_ops domain for that IOMMU */ - struct dma_ops_domain *default_dom; - - /* - * We can't rely on the BIOS to restore all values on reinit, so we - * need to stash them - */ - - /* The iommu BAR */ - u32 stored_addr_lo; - u32 stored_addr_hi; - - /* - * Each iommu has 6 l1s, each of which is documented as having 0x12 - * registers - */ - u32 stored_l1[6][0x12]; - - /* The l2 indirect registers */ - u32 stored_l2[0x83]; -}; - -/* - * List with all IOMMUs in the system. This list is not locked because it is - * only written and read at driver initialization or suspend time - */ -extern struct list_head amd_iommu_list; - -/* - * Array with pointers to each IOMMU struct - * The indices are referenced in the protection domains - */ -extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; - -/* Number of IOMMUs present in the system */ -extern int amd_iommus_present; - -/* - * Declarations for the global list of all protection domains - */ -extern spinlock_t amd_iommu_pd_lock; -extern struct list_head amd_iommu_pd_list; - -/* - * Structure defining one entry in the device table - */ -struct dev_table_entry { - u32 data[8]; -}; - -/* - * One entry for unity mappings parsed out of the ACPI table. - */ -struct unity_map_entry { - struct list_head list; - - /* starting device id this entry is used for (including) */ - u16 devid_start; - /* end device id this entry is used for (including) */ - u16 devid_end; - - /* start address to unity map (including) */ - u64 address_start; - /* end address to unity map (including) */ - u64 address_end; - - /* required protection */ - int prot; -}; - -/* - * List of all unity mappings. It is not locked because as runtime it is only - * read. It is created at ACPI table parsing time. - */ -extern struct list_head amd_iommu_unity_map; - -/* - * Data structures for device handling - */ - -/* - * Device table used by hardware. Read and write accesses by software are - * locked with the amd_iommu_pd_table lock. - */ -extern struct dev_table_entry *amd_iommu_dev_table; - -/* - * Alias table to find requestor ids to device ids. Not locked because only - * read on runtime. - */ -extern u16 *amd_iommu_alias_table; - -/* - * Reverse lookup table to find the IOMMU which translates a specific device. - */ -extern struct amd_iommu **amd_iommu_rlookup_table; - -/* size of the dma_ops aperture as power of 2 */ -extern unsigned amd_iommu_aperture_order; - -/* largest PCI device id we expect translation requests for */ -extern u16 amd_iommu_last_bdf; - -/* allocation bitmap for domain ids */ -extern unsigned long *amd_iommu_pd_alloc_bitmap; - -/* - * If true, the addresses will be flushed on unmap time, not when - * they are reused - */ -extern bool amd_iommu_unmap_flush; - -/* takes bus and device/function and returns the device id - * FIXME: should that be in generic PCI code? */ -static inline u16 calc_devid(u8 bus, u8 devfn) -{ - return (((u16)bus) << 8) | devfn; -} - -#ifdef CONFIG_AMD_IOMMU_STATS - -struct __iommu_counter { - char *name; - struct dentry *dent; - u64 value; -}; - -#define DECLARE_STATS_COUNTER(nm) \ - static struct __iommu_counter nm = { \ - .name = #nm, \ - } - -#define INC_STATS_COUNTER(name) name.value += 1 -#define ADD_STATS_COUNTER(name, x) name.value += (x) -#define SUB_STATS_COUNTER(name, x) name.value -= (x) - -#else /* CONFIG_AMD_IOMMU_STATS */ - -#define DECLARE_STATS_COUNTER(name) -#define INC_STATS_COUNTER(name) -#define ADD_STATS_COUNTER(name, x) -#define SUB_STATS_COUNTER(name, x) - -#endif /* CONFIG_AMD_IOMMU_STATS */ - -#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index aef02989c930..11817ff85399 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -123,7 +123,6 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o - obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c deleted file mode 100644 index bfc8453bd98d..000000000000 --- a/arch/x86/kernel/amd_iommu_init.c +++ /dev/null @@ -1,1572 +0,0 @@ -/* - * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. - * Author: Joerg Roedel - * Leo Duran - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -/* - * definitions for the ACPI scanning code - */ -#define IVRS_HEADER_LENGTH 48 - -#define ACPI_IVHD_TYPE 0x10 -#define ACPI_IVMD_TYPE_ALL 0x20 -#define ACPI_IVMD_TYPE 0x21 -#define ACPI_IVMD_TYPE_RANGE 0x22 - -#define IVHD_DEV_ALL 0x01 -#define IVHD_DEV_SELECT 0x02 -#define IVHD_DEV_SELECT_RANGE_START 0x03 -#define IVHD_DEV_RANGE_END 0x04 -#define IVHD_DEV_ALIAS 0x42 -#define IVHD_DEV_ALIAS_RANGE 0x43 -#define IVHD_DEV_EXT_SELECT 0x46 -#define IVHD_DEV_EXT_SELECT_RANGE 0x47 - -#define IVHD_FLAG_HT_TUN_EN_MASK 0x01 -#define IVHD_FLAG_PASSPW_EN_MASK 0x02 -#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 -#define IVHD_FLAG_ISOC_EN_MASK 0x08 - -#define IVMD_FLAG_EXCL_RANGE 0x08 -#define IVMD_FLAG_UNITY_MAP 0x01 - -#define ACPI_DEVFLAG_INITPASS 0x01 -#define ACPI_DEVFLAG_EXTINT 0x02 -#define ACPI_DEVFLAG_NMI 0x04 -#define ACPI_DEVFLAG_SYSMGT1 0x10 -#define ACPI_DEVFLAG_SYSMGT2 0x20 -#define ACPI_DEVFLAG_LINT0 0x40 -#define ACPI_DEVFLAG_LINT1 0x80 -#define ACPI_DEVFLAG_ATSDIS 0x10000000 - -/* - * ACPI table definitions - * - * These data structures are laid over the table to parse the important values - * out of it. - */ - -/* - * structure describing one IOMMU in the ACPI table. Typically followed by one - * or more ivhd_entrys. - */ -struct ivhd_header { - u8 type; - u8 flags; - u16 length; - u16 devid; - u16 cap_ptr; - u64 mmio_phys; - u16 pci_seg; - u16 info; - u32 reserved; -} __attribute__((packed)); - -/* - * A device entry describing which devices a specific IOMMU translates and - * which requestor ids they use. - */ -struct ivhd_entry { - u8 type; - u16 devid; - u8 flags; - u32 ext; -} __attribute__((packed)); - -/* - * An AMD IOMMU memory definition structure. It defines things like exclusion - * ranges for devices and regions that should be unity mapped. - */ -struct ivmd_header { - u8 type; - u8 flags; - u16 length; - u16 devid; - u16 aux; - u64 resv; - u64 range_start; - u64 range_length; -} __attribute__((packed)); - -bool amd_iommu_dump; - -static int __initdata amd_iommu_detected; -static bool __initdata amd_iommu_disabled; - -u16 amd_iommu_last_bdf; /* largest PCI device id we have - to handle */ -LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings - we find in ACPI */ -bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ - -LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the - system */ - -/* Array to assign indices to IOMMUs*/ -struct amd_iommu *amd_iommus[MAX_IOMMUS]; -int amd_iommus_present; - -/* IOMMUs have a non-present cache? */ -bool amd_iommu_np_cache __read_mostly; -bool amd_iommu_iotlb_sup __read_mostly = true; - -/* - * The ACPI table parsing functions set this variable on an error - */ -static int __initdata amd_iommu_init_err; - -/* - * List of protection domains - used during resume - */ -LIST_HEAD(amd_iommu_pd_list); -spinlock_t amd_iommu_pd_lock; - -/* - * Pointer to the device table which is shared by all AMD IOMMUs - * it is indexed by the PCI device id or the HT unit id and contains - * information about the domain the device belongs to as well as the - * page table root pointer. - */ -struct dev_table_entry *amd_iommu_dev_table; - -/* - * The alias table is a driver specific data structure which contains the - * mappings of the PCI device ids to the actual requestor ids on the IOMMU. - * More than one device can share the same requestor id. - */ -u16 *amd_iommu_alias_table; - -/* - * The rlookup table is used to find the IOMMU which is responsible - * for a specific device. It is also indexed by the PCI device id. - */ -struct amd_iommu **amd_iommu_rlookup_table; - -/* - * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap - * to know which ones are already in use. - */ -unsigned long *amd_iommu_pd_alloc_bitmap; - -static u32 dev_table_size; /* size of the device table */ -static u32 alias_table_size; /* size of the alias table */ -static u32 rlookup_table_size; /* size if the rlookup table */ - -/* - * This function flushes all internal caches of - * the IOMMU used by this driver. - */ -extern void iommu_flush_all_caches(struct amd_iommu *iommu); - -static inline void update_last_devid(u16 devid) -{ - if (devid > amd_iommu_last_bdf) - amd_iommu_last_bdf = devid; -} - -static inline unsigned long tbl_size(int entry_size) -{ - unsigned shift = PAGE_SHIFT + - get_order(((int)amd_iommu_last_bdf + 1) * entry_size); - - return 1UL << shift; -} - -/* Access to l1 and l2 indexed register spaces */ - -static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) -{ - u32 val; - - pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); - pci_read_config_dword(iommu->dev, 0xfc, &val); - return val; -} - -static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) -{ - pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); - pci_write_config_dword(iommu->dev, 0xfc, val); - pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); -} - -static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) -{ - u32 val; - - pci_write_config_dword(iommu->dev, 0xf0, address); - pci_read_config_dword(iommu->dev, 0xf4, &val); - return val; -} - -static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) -{ - pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); - pci_write_config_dword(iommu->dev, 0xf4, val); -} - -/**************************************************************************** - * - * AMD IOMMU MMIO register space handling functions - * - * These functions are used to program the IOMMU device registers in - * MMIO space required for that driver. - * - ****************************************************************************/ - -/* - * This function set the exclusion range in the IOMMU. DMA accesses to the - * exclusion range are passed through untranslated - */ -static void iommu_set_exclusion_range(struct amd_iommu *iommu) -{ - u64 start = iommu->exclusion_start & PAGE_MASK; - u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; - u64 entry; - - if (!iommu->exclusion_start) - return; - - entry = start | MMIO_EXCL_ENABLE_MASK; - memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, - &entry, sizeof(entry)); - - entry = limit; - memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, - &entry, sizeof(entry)); -} - -/* Programs the physical address of the device table into the IOMMU hardware */ -static void __init iommu_set_device_table(struct amd_iommu *iommu) -{ - u64 entry; - - BUG_ON(iommu->mmio_base == NULL); - - entry = virt_to_phys(amd_iommu_dev_table); - entry |= (dev_table_size >> 12) - 1; - memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, - &entry, sizeof(entry)); -} - -/* Generic functions to enable/disable certain features of the IOMMU. */ -static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) -{ - u32 ctrl; - - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl |= (1 << bit); - writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); -} - -static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) -{ - u32 ctrl; - - ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); - ctrl &= ~(1 << bit); - writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); -} - -/* Function to enable the hardware */ -static void iommu_enable(struct amd_iommu *iommu) -{ - static const char * const feat_str[] = { - "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", - "IA", "GA", "HE", "PC", NULL - }; - int i; - - printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", - dev_name(&iommu->dev->dev), iommu->cap_ptr); - - if (iommu->cap & (1 << IOMMU_CAP_EFR)) { - printk(KERN_CONT " extended features: "); - for (i = 0; feat_str[i]; ++i) - if (iommu_feature(iommu, (1ULL << i))) - printk(KERN_CONT " %s", feat_str[i]); - } - printk(KERN_CONT "\n"); - - iommu_feature_enable(iommu, CONTROL_IOMMU_EN); -} - -static void iommu_disable(struct amd_iommu *iommu) -{ - /* Disable command buffer */ - iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); - - /* Disable event logging and event interrupts */ - iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); - iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); - - /* Disable IOMMU hardware itself */ - iommu_feature_disable(iommu, CONTROL_IOMMU_EN); -} - -/* - * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in - * the system has one. - */ -static u8 * __init iommu_map_mmio_space(u64 address) -{ - u8 *ret; - - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { - pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", - address); - pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); - return NULL; - } - - ret = ioremap_nocache(address, MMIO_REGION_LENGTH); - if (ret != NULL) - return ret; - - release_mem_region(address, MMIO_REGION_LENGTH); - - return NULL; -} - -static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) -{ - if (iommu->mmio_base) - iounmap(iommu->mmio_base); - release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); -} - -/**************************************************************************** - * - * The functions below belong to the first pass of AMD IOMMU ACPI table - * parsing. In this pass we try to find out the highest device id this - * code has to handle. Upon this information the size of the shared data - * structures is determined later. - * - ****************************************************************************/ - -/* - * This function calculates the length of a given IVHD entry - */ -static inline int ivhd_entry_length(u8 *ivhd) -{ - return 0x04 << (*ivhd >> 6); -} - -/* - * This function reads the last device id the IOMMU has to handle from the PCI - * capability header for this IOMMU - */ -static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) -{ - u32 cap; - - cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); - update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); - - return 0; -} - -/* - * After reading the highest device id from the IOMMU PCI capability header - * this function looks if there is a higher device id defined in the ACPI table - */ -static int __init find_last_devid_from_ivhd(struct ivhd_header *h) -{ - u8 *p = (void *)h, *end = (void *)h; - struct ivhd_entry *dev; - - p += sizeof(*h); - end += h->length; - - find_last_devid_on_pci(PCI_BUS(h->devid), - PCI_SLOT(h->devid), - PCI_FUNC(h->devid), - h->cap_ptr); - - while (p < end) { - dev = (struct ivhd_entry *)p; - switch (dev->type) { - case IVHD_DEV_SELECT: - case IVHD_DEV_RANGE_END: - case IVHD_DEV_ALIAS: - case IVHD_DEV_EXT_SELECT: - /* all the above subfield types refer to device ids */ - update_last_devid(dev->devid); - break; - default: - break; - } - p += ivhd_entry_length(p); - } - - WARN_ON(p != end); - - return 0; -} - -/* - * Iterate over all IVHD entries in the ACPI table and find the highest device - * id which we need to handle. This is the first of three functions which parse - * the ACPI table. So we check the checksum here. - */ -static int __init find_last_devid_acpi(struct acpi_table_header *table) -{ - int i; - u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; - struct ivhd_header *h; - - /* - * Validate checksum here so we don't need to do it when - * we actually parse the table - */ - for (i = 0; i < table->length; ++i) - checksum += p[i]; - if (checksum != 0) { - /* ACPI table corrupt */ - amd_iommu_init_err = -ENODEV; - return 0; - } - - p += IVRS_HEADER_LENGTH; - - end += table->length; - while (p < end) { - h = (struct ivhd_header *)p; - switch (h->type) { - case ACPI_IVHD_TYPE: - find_last_devid_from_ivhd(h); - break; - default: - break; - } - p += h->length; - } - WARN_ON(p != end); - - return 0; -} - -/**************************************************************************** - * - * The following functions belong the the code path which parses the ACPI table - * the second time. In this ACPI parsing iteration we allocate IOMMU specific - * data structures, initialize the device/alias/rlookup table and also - * basically initialize the hardware. - * - ****************************************************************************/ - -/* - * Allocates the command buffer. This buffer is per AMD IOMMU. We can - * write commands to that buffer later and the IOMMU will execute them - * asynchronously - */ -static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) -{ - u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(CMD_BUFFER_SIZE)); - - if (cmd_buf == NULL) - return NULL; - - iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; - - return cmd_buf; -} - -/* - * This function resets the command buffer if the IOMMU stopped fetching - * commands from it. - */ -void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) -{ - iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); - - writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); -} - -/* - * This function writes the command buffer address to the hardware and - * enables it. - */ -static void iommu_enable_command_buffer(struct amd_iommu *iommu) -{ - u64 entry; - - BUG_ON(iommu->cmd_buf == NULL); - - entry = (u64)virt_to_phys(iommu->cmd_buf); - entry |= MMIO_CMD_SIZE_512; - - memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, - &entry, sizeof(entry)); - - amd_iommu_reset_cmd_buffer(iommu); - iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); -} - -static void __init free_command_buffer(struct amd_iommu *iommu) -{ - free_pages((unsigned long)iommu->cmd_buf, - get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); -} - -/* allocates the memory where the IOMMU will log its events to */ -static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) -{ - iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(EVT_BUFFER_SIZE)); - - if (iommu->evt_buf == NULL) - return NULL; - - iommu->evt_buf_size = EVT_BUFFER_SIZE; - - return iommu->evt_buf; -} - -static void iommu_enable_event_buffer(struct amd_iommu *iommu) -{ - u64 entry; - - BUG_ON(iommu->evt_buf == NULL); - - entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; - - memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, - &entry, sizeof(entry)); - - /* set head and tail to zero manually */ - writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); -} - -static void __init free_event_buffer(struct amd_iommu *iommu) -{ - free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); -} - -/* sets a specific bit in the device table entry. */ -static void set_dev_entry_bit(u16 devid, u8 bit) -{ - int i = (bit >> 5) & 0x07; - int _bit = bit & 0x1f; - - amd_iommu_dev_table[devid].data[i] |= (1 << _bit); -} - -static int get_dev_entry_bit(u16 devid, u8 bit) -{ - int i = (bit >> 5) & 0x07; - int _bit = bit & 0x1f; - - return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; -} - - -void amd_iommu_apply_erratum_63(u16 devid) -{ - int sysmgt; - - sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | - (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); - - if (sysmgt == 0x01) - set_dev_entry_bit(devid, DEV_ENTRY_IW); -} - -/* Writes the specific IOMMU for a device into the rlookup table */ -static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) -{ - amd_iommu_rlookup_table[devid] = iommu; -} - -/* - * This function takes the device specific flags read from the ACPI - * table and sets up the device table entry with that information - */ -static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, - u16 devid, u32 flags, u32 ext_flags) -{ - if (flags & ACPI_DEVFLAG_INITPASS) - set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); - if (flags & ACPI_DEVFLAG_EXTINT) - set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); - if (flags & ACPI_DEVFLAG_NMI) - set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); - if (flags & ACPI_DEVFLAG_SYSMGT1) - set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); - if (flags & ACPI_DEVFLAG_SYSMGT2) - set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); - if (flags & ACPI_DEVFLAG_LINT0) - set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); - if (flags & ACPI_DEVFLAG_LINT1) - set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); - - amd_iommu_apply_erratum_63(devid); - - set_iommu_for_device(iommu, devid); -} - -/* - * Reads the device exclusion range from ACPI and initialize IOMMU with - * it - */ -static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) -{ - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; - - if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) - return; - - if (iommu) { - /* - * We only can configure exclusion ranges per IOMMU, not - * per device. But we can enable the exclusion range per - * device. This is done here - */ - set_dev_entry_bit(m->devid, DEV_ENTRY_EX); - iommu->exclusion_start = m->range_start; - iommu->exclusion_length = m->range_length; - } -} - -/* - * This function reads some important data from the IOMMU PCI space and - * initializes the driver data structure with it. It reads the hardware - * capabilities and the first/last device entries - */ -static void __init init_iommu_from_pci(struct amd_iommu *iommu) -{ - int cap_ptr = iommu->cap_ptr; - u32 range, misc, low, high; - int i, j; - - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, - &iommu->cap); - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, - &range); - pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, - &misc); - - iommu->first_device = calc_devid(MMIO_GET_BUS(range), - MMIO_GET_FD(range)); - iommu->last_device = calc_devid(MMIO_GET_BUS(range), - MMIO_GET_LD(range)); - iommu->evt_msi_num = MMIO_MSI_NUM(misc); - - if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) - amd_iommu_iotlb_sup = false; - - /* read extended feature bits */ - low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); - high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); - - iommu->features = ((u64)high << 32) | low; - - if (!is_rd890_iommu(iommu->dev)) - return; - - /* - * Some rd890 systems may not be fully reconfigured by the BIOS, so - * it's necessary for us to store this information so it can be - * reprogrammed on resume - */ - - pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, - &iommu->stored_addr_lo); - pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, - &iommu->stored_addr_hi); - - /* Low bit locks writes to configuration space */ - iommu->stored_addr_lo &= ~1; - - for (i = 0; i < 6; i++) - for (j = 0; j < 0x12; j++) - iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); - - for (i = 0; i < 0x83; i++) - iommu->stored_l2[i] = iommu_read_l2(iommu, i); -} - -/* - * Takes a pointer to an AMD IOMMU entry in the ACPI table and - * initializes the hardware and our data structures with it. - */ -static void __init init_iommu_from_acpi(struct amd_iommu *iommu, - struct ivhd_header *h) -{ - u8 *p = (u8 *)h; - u8 *end = p, flags = 0; - u16 devid = 0, devid_start = 0, devid_to = 0; - u32 dev_i, ext_flags = 0; - bool alias = false; - struct ivhd_entry *e; - - /* - * First save the recommended feature enable bits from ACPI - */ - iommu->acpi_flags = h->flags; - - /* - * Done. Now parse the device entries - */ - p += sizeof(struct ivhd_header); - end += h->length; - - - while (p < end) { - e = (struct ivhd_entry *)p; - switch (e->type) { - case IVHD_DEV_ALL: - - DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" - " last device %02x:%02x.%x flags: %02x\n", - PCI_BUS(iommu->first_device), - PCI_SLOT(iommu->first_device), - PCI_FUNC(iommu->first_device), - PCI_BUS(iommu->last_device), - PCI_SLOT(iommu->last_device), - PCI_FUNC(iommu->last_device), - e->flags); - - for (dev_i = iommu->first_device; - dev_i <= iommu->last_device; ++dev_i) - set_dev_entry_from_acpi(iommu, dev_i, - e->flags, 0); - break; - case IVHD_DEV_SELECT: - - DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " - "flags: %02x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags); - - devid = e->devid; - set_dev_entry_from_acpi(iommu, devid, e->flags, 0); - break; - case IVHD_DEV_SELECT_RANGE_START: - - DUMP_printk(" DEV_SELECT_RANGE_START\t " - "devid: %02x:%02x.%x flags: %02x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags); - - devid_start = e->devid; - flags = e->flags; - ext_flags = 0; - alias = false; - break; - case IVHD_DEV_ALIAS: - - DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " - "flags: %02x devid_to: %02x:%02x.%x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, - PCI_BUS(e->ext >> 8), - PCI_SLOT(e->ext >> 8), - PCI_FUNC(e->ext >> 8)); - - devid = e->devid; - devid_to = e->ext >> 8; - set_dev_entry_from_acpi(iommu, devid , e->flags, 0); - set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); - amd_iommu_alias_table[devid] = devid_to; - break; - case IVHD_DEV_ALIAS_RANGE: - - DUMP_printk(" DEV_ALIAS_RANGE\t\t " - "devid: %02x:%02x.%x flags: %02x " - "devid_to: %02x:%02x.%x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, - PCI_BUS(e->ext >> 8), - PCI_SLOT(e->ext >> 8), - PCI_FUNC(e->ext >> 8)); - - devid_start = e->devid; - flags = e->flags; - devid_to = e->ext >> 8; - ext_flags = 0; - alias = true; - break; - case IVHD_DEV_EXT_SELECT: - - DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " - "flags: %02x ext: %08x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, e->ext); - - devid = e->devid; - set_dev_entry_from_acpi(iommu, devid, e->flags, - e->ext); - break; - case IVHD_DEV_EXT_SELECT_RANGE: - - DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " - "%02x:%02x.%x flags: %02x ext: %08x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid), - e->flags, e->ext); - - devid_start = e->devid; - flags = e->flags; - ext_flags = e->ext; - alias = false; - break; - case IVHD_DEV_RANGE_END: - - DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", - PCI_BUS(e->devid), - PCI_SLOT(e->devid), - PCI_FUNC(e->devid)); - - devid = e->devid; - for (dev_i = devid_start; dev_i <= devid; ++dev_i) { - if (alias) { - amd_iommu_alias_table[dev_i] = devid_to; - set_dev_entry_from_acpi(iommu, - devid_to, flags, ext_flags); - } - set_dev_entry_from_acpi(iommu, dev_i, - flags, ext_flags); - } - break; - default: - break; - } - - p += ivhd_entry_length(p); - } -} - -/* Initializes the device->iommu mapping for the driver */ -static int __init init_iommu_devices(struct amd_iommu *iommu) -{ - u32 i; - - for (i = iommu->first_device; i <= iommu->last_device; ++i) - set_iommu_for_device(iommu, i); - - return 0; -} - -static void __init free_iommu_one(struct amd_iommu *iommu) -{ - free_command_buffer(iommu); - free_event_buffer(iommu); - iommu_unmap_mmio_space(iommu); -} - -static void __init free_iommu_all(void) -{ - struct amd_iommu *iommu, *next; - - for_each_iommu_safe(iommu, next) { - list_del(&iommu->list); - free_iommu_one(iommu); - kfree(iommu); - } -} - -/* - * This function clues the initialization function for one IOMMU - * together and also allocates the command buffer and programs the - * hardware. It does NOT enable the IOMMU. This is done afterwards. - */ -static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) -{ - spin_lock_init(&iommu->lock); - - /* Add IOMMU to internal data structures */ - list_add_tail(&iommu->list, &amd_iommu_list); - iommu->index = amd_iommus_present++; - - if (unlikely(iommu->index >= MAX_IOMMUS)) { - WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); - return -ENOSYS; - } - - /* Index is fine - add IOMMU to the array */ - amd_iommus[iommu->index] = iommu; - - /* - * Copy data from ACPI table entry to the iommu struct - */ - iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); - if (!iommu->dev) - return 1; - - iommu->cap_ptr = h->cap_ptr; - iommu->pci_seg = h->pci_seg; - iommu->mmio_phys = h->mmio_phys; - iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); - if (!iommu->mmio_base) - return -ENOMEM; - - iommu->cmd_buf = alloc_command_buffer(iommu); - if (!iommu->cmd_buf) - return -ENOMEM; - - iommu->evt_buf = alloc_event_buffer(iommu); - if (!iommu->evt_buf) - return -ENOMEM; - - iommu->int_enabled = false; - - init_iommu_from_pci(iommu); - init_iommu_from_acpi(iommu, h); - init_iommu_devices(iommu); - - if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) - amd_iommu_np_cache = true; - - return pci_enable_device(iommu->dev); -} - -/* - * Iterates over all IOMMU entries in the ACPI table, allocates the - * IOMMU structure and initializes it with init_iommu_one() - */ -static int __init init_iommu_all(struct acpi_table_header *table) -{ - u8 *p = (u8 *)table, *end = (u8 *)table; - struct ivhd_header *h; - struct amd_iommu *iommu; - int ret; - - end += table->length; - p += IVRS_HEADER_LENGTH; - - while (p < end) { - h = (struct ivhd_header *)p; - switch (*p) { - case ACPI_IVHD_TYPE: - - DUMP_printk("device: %02x:%02x.%01x cap: %04x " - "seg: %d flags: %01x info %04x\n", - PCI_BUS(h->devid), PCI_SLOT(h->devid), - PCI_FUNC(h->devid), h->cap_ptr, - h->pci_seg, h->flags, h->info); - DUMP_printk(" mmio-addr: %016llx\n", - h->mmio_phys); - - iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); - if (iommu == NULL) { - amd_iommu_init_err = -ENOMEM; - return 0; - } - - ret = init_iommu_one(iommu, h); - if (ret) { - amd_iommu_init_err = ret; - return 0; - } - break; - default: - break; - } - p += h->length; - - } - WARN_ON(p != end); - - return 0; -} - -/**************************************************************************** - * - * The following functions initialize the MSI interrupts for all IOMMUs - * in the system. Its a bit challenging because there could be multiple - * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per - * pci_dev. - * - ****************************************************************************/ - -static int iommu_setup_msi(struct amd_iommu *iommu) -{ - int r; - - if (pci_enable_msi(iommu->dev)) - return 1; - - r = request_threaded_irq(iommu->dev->irq, - amd_iommu_int_handler, - amd_iommu_int_thread, - 0, "AMD-Vi", - iommu->dev); - - if (r) { - pci_disable_msi(iommu->dev); - return 1; - } - - iommu->int_enabled = true; - iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); - - return 0; -} - -static int iommu_init_msi(struct amd_iommu *iommu) -{ - if (iommu->int_enabled) - return 0; - - if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) - return iommu_setup_msi(iommu); - - return 1; -} - -/**************************************************************************** - * - * The next functions belong to the third pass of parsing the ACPI - * table. In this last pass the memory mapping requirements are - * gathered (like exclusion and unity mapping reanges). - * - ****************************************************************************/ - -static void __init free_unity_maps(void) -{ - struct unity_map_entry *entry, *next; - - list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { - list_del(&entry->list); - kfree(entry); - } -} - -/* called when we find an exclusion range definition in ACPI */ -static int __init init_exclusion_range(struct ivmd_header *m) -{ - int i; - - switch (m->type) { - case ACPI_IVMD_TYPE: - set_device_exclusion_range(m->devid, m); - break; - case ACPI_IVMD_TYPE_ALL: - for (i = 0; i <= amd_iommu_last_bdf; ++i) - set_device_exclusion_range(i, m); - break; - case ACPI_IVMD_TYPE_RANGE: - for (i = m->devid; i <= m->aux; ++i) - set_device_exclusion_range(i, m); - break; - default: - break; - } - - return 0; -} - -/* called for unity map ACPI definition */ -static int __init init_unity_map_range(struct ivmd_header *m) -{ - struct unity_map_entry *e = 0; - char *s; - - e = kzalloc(sizeof(*e), GFP_KERNEL); - if (e == NULL) - return -ENOMEM; - - switch (m->type) { - default: - kfree(e); - return 0; - case ACPI_IVMD_TYPE: - s = "IVMD_TYPEi\t\t\t"; - e->devid_start = e->devid_end = m->devid; - break; - case ACPI_IVMD_TYPE_ALL: - s = "IVMD_TYPE_ALL\t\t"; - e->devid_start = 0; - e->devid_end = amd_iommu_last_bdf; - break; - case ACPI_IVMD_TYPE_RANGE: - s = "IVMD_TYPE_RANGE\t\t"; - e->devid_start = m->devid; - e->devid_end = m->aux; - break; - } - e->address_start = PAGE_ALIGN(m->range_start); - e->address_end = e->address_start + PAGE_ALIGN(m->range_length); - e->prot = m->flags >> 1; - - DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" - " range_start: %016llx range_end: %016llx flags: %x\n", s, - PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start), - PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end), - PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), - e->address_start, e->address_end, m->flags); - - list_add_tail(&e->list, &amd_iommu_unity_map); - - return 0; -} - -/* iterates over all memory definitions we find in the ACPI table */ -static int __init init_memory_definitions(struct acpi_table_header *table) -{ - u8 *p = (u8 *)table, *end = (u8 *)table; - struct ivmd_header *m; - - end += table->length; - p += IVRS_HEADER_LENGTH; - - while (p < end) { - m = (struct ivmd_header *)p; - if (m->flags & IVMD_FLAG_EXCL_RANGE) - init_exclusion_range(m); - else if (m->flags & IVMD_FLAG_UNITY_MAP) - init_unity_map_range(m); - - p += m->length; - } - - return 0; -} - -/* - * Init the device table to not allow DMA access for devices and - * suppress all page faults - */ -static void init_device_table(void) -{ - u32 devid; - - for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { - set_dev_entry_bit(devid, DEV_ENTRY_VALID); - set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); - } -} - -static void iommu_init_flags(struct amd_iommu *iommu) -{ - iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : - iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); - - iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_PASSPW_EN); - - iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); - - iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_ISOC_EN) : - iommu_feature_disable(iommu, CONTROL_ISOC_EN); - - /* - * make IOMMU memory accesses cache coherent - */ - iommu_feature_enable(iommu, CONTROL_COHERENT_EN); -} - -static void iommu_apply_resume_quirks(struct amd_iommu *iommu) -{ - int i, j; - u32 ioc_feature_control; - struct pci_dev *pdev = NULL; - - /* RD890 BIOSes may not have completely reconfigured the iommu */ - if (!is_rd890_iommu(iommu->dev)) - return; - - /* - * First, we need to ensure that the iommu is enabled. This is - * controlled by a register in the northbridge - */ - pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); - - if (!pdev) - return; - - /* Select Northbridge indirect register 0x75 and enable writing */ - pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); - pci_read_config_dword(pdev, 0x64, &ioc_feature_control); - - /* Enable the iommu */ - if (!(ioc_feature_control & 0x1)) - pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); - - pci_dev_put(pdev); - - /* Restore the iommu BAR */ - pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, - iommu->stored_addr_lo); - pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, - iommu->stored_addr_hi); - - /* Restore the l1 indirect regs for each of the 6 l1s */ - for (i = 0; i < 6; i++) - for (j = 0; j < 0x12; j++) - iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); - - /* Restore the l2 indirect regs */ - for (i = 0; i < 0x83; i++) - iommu_write_l2(iommu, i, iommu->stored_l2[i]); - - /* Lock PCI setup registers */ - pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, - iommu->stored_addr_lo | 1); -} - -/* - * This function finally enables all IOMMUs found in the system after - * they have been initialized - */ -static void enable_iommus(void) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) { - iommu_disable(iommu); - iommu_init_flags(iommu); - iommu_set_device_table(iommu); - iommu_enable_command_buffer(iommu); - iommu_enable_event_buffer(iommu); - iommu_set_exclusion_range(iommu); - iommu_init_msi(iommu); - iommu_enable(iommu); - iommu_flush_all_caches(iommu); - } -} - -static void disable_iommus(void) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) - iommu_disable(iommu); -} - -/* - * Suspend/Resume support - * disable suspend until real resume implemented - */ - -static void amd_iommu_resume(void) -{ - struct amd_iommu *iommu; - - for_each_iommu(iommu) - iommu_apply_resume_quirks(iommu); - - /* re-load the hardware */ - enable_iommus(); - - /* - * we have to flush after the IOMMUs are enabled because a - * disabled IOMMU will never execute the commands we send - */ - for_each_iommu(iommu) - iommu_flush_all_caches(iommu); -} - -static int amd_iommu_suspend(void) -{ - /* disable IOMMUs to go out of the way for BIOS */ - disable_iommus(); - - return 0; -} - -static struct syscore_ops amd_iommu_syscore_ops = { - .suspend = amd_iommu_suspend, - .resume = amd_iommu_resume, -}; - -/* - * This is the core init function for AMD IOMMU hardware in the system. - * This function is called from the generic x86 DMA layer initialization - * code. - * - * This function basically parses the ACPI table for AMD IOMMU (IVRS) - * three times: - * - * 1 pass) Find the highest PCI device id the driver has to handle. - * Upon this information the size of the data structures is - * determined that needs to be allocated. - * - * 2 pass) Initialize the data structures just allocated with the - * information in the ACPI table about available AMD IOMMUs - * in the system. It also maps the PCI devices in the - * system to specific IOMMUs - * - * 3 pass) After the basic data structures are allocated and - * initialized we update them with information about memory - * remapping requirements parsed out of the ACPI table in - * this last pass. - * - * After that the hardware is initialized and ready to go. In the last - * step we do some Linux specific things like registering the driver in - * the dma_ops interface and initializing the suspend/resume support - * functions. Finally it prints some information about AMD IOMMUs and - * the driver state and enables the hardware. - */ -static int __init amd_iommu_init(void) -{ - int i, ret = 0; - - /* - * First parse ACPI tables to find the largest Bus/Dev/Func - * we need to handle. Upon this information the shared data - * structures for the IOMMUs in the system will be allocated - */ - if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) - return -ENODEV; - - ret = amd_iommu_init_err; - if (ret) - goto out; - - dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); - alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); - rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); - - ret = -ENOMEM; - - /* Device table - directly used by all IOMMUs */ - amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, - get_order(dev_table_size)); - if (amd_iommu_dev_table == NULL) - goto out; - - /* - * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the - * IOMMU see for that device - */ - amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, - get_order(alias_table_size)); - if (amd_iommu_alias_table == NULL) - goto free; - - /* IOMMU rlookup table - find the IOMMU for a specific device */ - amd_iommu_rlookup_table = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(rlookup_table_size)); - if (amd_iommu_rlookup_table == NULL) - goto free; - - amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( - GFP_KERNEL | __GFP_ZERO, - get_order(MAX_DOMAIN_ID/8)); - if (amd_iommu_pd_alloc_bitmap == NULL) - goto free; - - /* init the device table */ - init_device_table(); - - /* - * let all alias entries point to itself - */ - for (i = 0; i <= amd_iommu_last_bdf; ++i) - amd_iommu_alias_table[i] = i; - - /* - * never allocate domain 0 because its used as the non-allocated and - * error value placeholder - */ - amd_iommu_pd_alloc_bitmap[0] = 1; - - spin_lock_init(&amd_iommu_pd_lock); - - /* - * now the data structures are allocated and basically initialized - * start the real acpi table scan - */ - ret = -ENODEV; - if (acpi_table_parse("IVRS", init_iommu_all) != 0) - goto free; - - if (amd_iommu_init_err) { - ret = amd_iommu_init_err; - goto free; - } - - if (acpi_table_parse("IVRS", init_memory_definitions) != 0) - goto free; - - if (amd_iommu_init_err) { - ret = amd_iommu_init_err; - goto free; - } - - ret = amd_iommu_init_devices(); - if (ret) - goto free; - - enable_iommus(); - - if (iommu_pass_through) - ret = amd_iommu_init_passthrough(); - else - ret = amd_iommu_init_dma_ops(); - - if (ret) - goto free_disable; - - amd_iommu_init_api(); - - amd_iommu_init_notifier(); - - register_syscore_ops(&amd_iommu_syscore_ops); - - if (iommu_pass_through) - goto out; - - if (amd_iommu_unmap_flush) - printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); - else - printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); - - x86_platform.iommu_shutdown = disable_iommus; -out: - return ret; - -free_disable: - disable_iommus(); - -free: - amd_iommu_uninit_devices(); - - free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, - get_order(MAX_DOMAIN_ID/8)); - - free_pages((unsigned long)amd_iommu_rlookup_table, - get_order(rlookup_table_size)); - - free_pages((unsigned long)amd_iommu_alias_table, - get_order(alias_table_size)); - - free_pages((unsigned long)amd_iommu_dev_table, - get_order(dev_table_size)); - - free_iommu_all(); - - free_unity_maps(); - -#ifdef CONFIG_GART_IOMMU - /* - * We failed to initialize the AMD IOMMU - try fallback to GART - * if possible. - */ - gart_iommu_init(); - -#endif - - goto out; -} - -/**************************************************************************** - * - * Early detect code. This code runs at IOMMU detection time in the DMA - * layer. It just looks if there is an IVRS ACPI table to detect AMD - * IOMMUs - * - ****************************************************************************/ -static int __init early_amd_iommu_detect(struct acpi_table_header *table) -{ - return 0; -} - -int __init amd_iommu_detect(void) -{ - if (no_iommu || (iommu_detected && !gart_iommu_aperture)) - return -ENODEV; - - if (amd_iommu_disabled) - return -ENODEV; - - if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { - iommu_detected = 1; - amd_iommu_detected = 1; - x86_init.iommu.iommu_init = amd_iommu_init; - - /* Make sure ACS will be enabled */ - pci_request_acs(); - return 1; - } - return -ENODEV; -} - -/**************************************************************************** - * - * Parsing functions for the AMD IOMMU specific kernel command line - * options. - * - ****************************************************************************/ - -static int __init parse_amd_iommu_dump(char *str) -{ - amd_iommu_dump = true; - - return 1; -} - -static int __init parse_amd_iommu_options(char *str) -{ - for (; *str; ++str) { - if (strncmp(str, "fullflush", 9) == 0) - amd_iommu_unmap_flush = true; - if (strncmp(str, "off", 3) == 0) - amd_iommu_disabled = true; - } - - return 1; -} - -__setup("amd_iommu_dump", parse_amd_iommu_dump); -__setup("amd_iommu=", parse_amd_iommu_options); - -IOMMU_INIT_FINISH(amd_iommu_detect, - gart_iommu_hole_init, - 0, - 0); diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 49e9c0f46bd5..4d4d77df7cac 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o -obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o +obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 7c3a95e54ec5..5aa12eaabd21 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -27,13 +27,14 @@ #include #include #include +#include #include #include #include #include -#include -#include -#include + +#include "amd_iommu_proto.h" +#include "amd_iommu_types.h" #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c new file mode 100644 index 000000000000..82d2410f4205 --- /dev/null +++ b/drivers/iommu/amd_iommu_init.c @@ -0,0 +1,1574 @@ +/* + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. + * Author: Joerg Roedel + * Leo Duran + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "amd_iommu_proto.h" +#include "amd_iommu_types.h" + +/* + * definitions for the ACPI scanning code + */ +#define IVRS_HEADER_LENGTH 48 + +#define ACPI_IVHD_TYPE 0x10 +#define ACPI_IVMD_TYPE_ALL 0x20 +#define ACPI_IVMD_TYPE 0x21 +#define ACPI_IVMD_TYPE_RANGE 0x22 + +#define IVHD_DEV_ALL 0x01 +#define IVHD_DEV_SELECT 0x02 +#define IVHD_DEV_SELECT_RANGE_START 0x03 +#define IVHD_DEV_RANGE_END 0x04 +#define IVHD_DEV_ALIAS 0x42 +#define IVHD_DEV_ALIAS_RANGE 0x43 +#define IVHD_DEV_EXT_SELECT 0x46 +#define IVHD_DEV_EXT_SELECT_RANGE 0x47 + +#define IVHD_FLAG_HT_TUN_EN_MASK 0x01 +#define IVHD_FLAG_PASSPW_EN_MASK 0x02 +#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 +#define IVHD_FLAG_ISOC_EN_MASK 0x08 + +#define IVMD_FLAG_EXCL_RANGE 0x08 +#define IVMD_FLAG_UNITY_MAP 0x01 + +#define ACPI_DEVFLAG_INITPASS 0x01 +#define ACPI_DEVFLAG_EXTINT 0x02 +#define ACPI_DEVFLAG_NMI 0x04 +#define ACPI_DEVFLAG_SYSMGT1 0x10 +#define ACPI_DEVFLAG_SYSMGT2 0x20 +#define ACPI_DEVFLAG_LINT0 0x40 +#define ACPI_DEVFLAG_LINT1 0x80 +#define ACPI_DEVFLAG_ATSDIS 0x10000000 + +/* + * ACPI table definitions + * + * These data structures are laid over the table to parse the important values + * out of it. + */ + +/* + * structure describing one IOMMU in the ACPI table. Typically followed by one + * or more ivhd_entrys. + */ +struct ivhd_header { + u8 type; + u8 flags; + u16 length; + u16 devid; + u16 cap_ptr; + u64 mmio_phys; + u16 pci_seg; + u16 info; + u32 reserved; +} __attribute__((packed)); + +/* + * A device entry describing which devices a specific IOMMU translates and + * which requestor ids they use. + */ +struct ivhd_entry { + u8 type; + u16 devid; + u8 flags; + u32 ext; +} __attribute__((packed)); + +/* + * An AMD IOMMU memory definition structure. It defines things like exclusion + * ranges for devices and regions that should be unity mapped. + */ +struct ivmd_header { + u8 type; + u8 flags; + u16 length; + u16 devid; + u16 aux; + u64 resv; + u64 range_start; + u64 range_length; +} __attribute__((packed)); + +bool amd_iommu_dump; + +static int __initdata amd_iommu_detected; +static bool __initdata amd_iommu_disabled; + +u16 amd_iommu_last_bdf; /* largest PCI device id we have + to handle */ +LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings + we find in ACPI */ +bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ + +LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the + system */ + +/* Array to assign indices to IOMMUs*/ +struct amd_iommu *amd_iommus[MAX_IOMMUS]; +int amd_iommus_present; + +/* IOMMUs have a non-present cache? */ +bool amd_iommu_np_cache __read_mostly; +bool amd_iommu_iotlb_sup __read_mostly = true; + +/* + * The ACPI table parsing functions set this variable on an error + */ +static int __initdata amd_iommu_init_err; + +/* + * List of protection domains - used during resume + */ +LIST_HEAD(amd_iommu_pd_list); +spinlock_t amd_iommu_pd_lock; + +/* + * Pointer to the device table which is shared by all AMD IOMMUs + * it is indexed by the PCI device id or the HT unit id and contains + * information about the domain the device belongs to as well as the + * page table root pointer. + */ +struct dev_table_entry *amd_iommu_dev_table; + +/* + * The alias table is a driver specific data structure which contains the + * mappings of the PCI device ids to the actual requestor ids on the IOMMU. + * More than one device can share the same requestor id. + */ +u16 *amd_iommu_alias_table; + +/* + * The rlookup table is used to find the IOMMU which is responsible + * for a specific device. It is also indexed by the PCI device id. + */ +struct amd_iommu **amd_iommu_rlookup_table; + +/* + * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap + * to know which ones are already in use. + */ +unsigned long *amd_iommu_pd_alloc_bitmap; + +static u32 dev_table_size; /* size of the device table */ +static u32 alias_table_size; /* size of the alias table */ +static u32 rlookup_table_size; /* size if the rlookup table */ + +/* + * This function flushes all internal caches of + * the IOMMU used by this driver. + */ +extern void iommu_flush_all_caches(struct amd_iommu *iommu); + +static inline void update_last_devid(u16 devid) +{ + if (devid > amd_iommu_last_bdf) + amd_iommu_last_bdf = devid; +} + +static inline unsigned long tbl_size(int entry_size) +{ + unsigned shift = PAGE_SHIFT + + get_order(((int)amd_iommu_last_bdf + 1) * entry_size); + + return 1UL << shift; +} + +/* Access to l1 and l2 indexed register spaces */ + +static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) +{ + u32 val; + + pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); + pci_read_config_dword(iommu->dev, 0xfc, &val); + return val; +} + +static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) +{ + pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); + pci_write_config_dword(iommu->dev, 0xfc, val); + pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); +} + +static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) +{ + u32 val; + + pci_write_config_dword(iommu->dev, 0xf0, address); + pci_read_config_dword(iommu->dev, 0xf4, &val); + return val; +} + +static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) +{ + pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); + pci_write_config_dword(iommu->dev, 0xf4, val); +} + +/**************************************************************************** + * + * AMD IOMMU MMIO register space handling functions + * + * These functions are used to program the IOMMU device registers in + * MMIO space required for that driver. + * + ****************************************************************************/ + +/* + * This function set the exclusion range in the IOMMU. DMA accesses to the + * exclusion range are passed through untranslated + */ +static void iommu_set_exclusion_range(struct amd_iommu *iommu) +{ + u64 start = iommu->exclusion_start & PAGE_MASK; + u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; + u64 entry; + + if (!iommu->exclusion_start) + return; + + entry = start | MMIO_EXCL_ENABLE_MASK; + memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, + &entry, sizeof(entry)); + + entry = limit; + memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, + &entry, sizeof(entry)); +} + +/* Programs the physical address of the device table into the IOMMU hardware */ +static void __init iommu_set_device_table(struct amd_iommu *iommu) +{ + u64 entry; + + BUG_ON(iommu->mmio_base == NULL); + + entry = virt_to_phys(amd_iommu_dev_table); + entry |= (dev_table_size >> 12) - 1; + memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, + &entry, sizeof(entry)); +} + +/* Generic functions to enable/disable certain features of the IOMMU. */ +static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) +{ + u32 ctrl; + + ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl |= (1 << bit); + writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); +} + +static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) +{ + u32 ctrl; + + ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); + ctrl &= ~(1 << bit); + writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); +} + +/* Function to enable the hardware */ +static void iommu_enable(struct amd_iommu *iommu) +{ + static const char * const feat_str[] = { + "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", + "IA", "GA", "HE", "PC", NULL + }; + int i; + + printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", + dev_name(&iommu->dev->dev), iommu->cap_ptr); + + if (iommu->cap & (1 << IOMMU_CAP_EFR)) { + printk(KERN_CONT " extended features: "); + for (i = 0; feat_str[i]; ++i) + if (iommu_feature(iommu, (1ULL << i))) + printk(KERN_CONT " %s", feat_str[i]); + } + printk(KERN_CONT "\n"); + + iommu_feature_enable(iommu, CONTROL_IOMMU_EN); +} + +static void iommu_disable(struct amd_iommu *iommu) +{ + /* Disable command buffer */ + iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); + + /* Disable event logging and event interrupts */ + iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); + iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); + + /* Disable IOMMU hardware itself */ + iommu_feature_disable(iommu, CONTROL_IOMMU_EN); +} + +/* + * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in + * the system has one. + */ +static u8 * __init iommu_map_mmio_space(u64 address) +{ + u8 *ret; + + if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { + pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", + address); + pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); + return NULL; + } + + ret = ioremap_nocache(address, MMIO_REGION_LENGTH); + if (ret != NULL) + return ret; + + release_mem_region(address, MMIO_REGION_LENGTH); + + return NULL; +} + +static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) +{ + if (iommu->mmio_base) + iounmap(iommu->mmio_base); + release_mem_region(iommu->mmio_phys, MMIO_REGION_LENGTH); +} + +/**************************************************************************** + * + * The functions below belong to the first pass of AMD IOMMU ACPI table + * parsing. In this pass we try to find out the highest device id this + * code has to handle. Upon this information the size of the shared data + * structures is determined later. + * + ****************************************************************************/ + +/* + * This function calculates the length of a given IVHD entry + */ +static inline int ivhd_entry_length(u8 *ivhd) +{ + return 0x04 << (*ivhd >> 6); +} + +/* + * This function reads the last device id the IOMMU has to handle from the PCI + * capability header for this IOMMU + */ +static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) +{ + u32 cap; + + cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); + update_last_devid(calc_devid(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); + + return 0; +} + +/* + * After reading the highest device id from the IOMMU PCI capability header + * this function looks if there is a higher device id defined in the ACPI table + */ +static int __init find_last_devid_from_ivhd(struct ivhd_header *h) +{ + u8 *p = (void *)h, *end = (void *)h; + struct ivhd_entry *dev; + + p += sizeof(*h); + end += h->length; + + find_last_devid_on_pci(PCI_BUS(h->devid), + PCI_SLOT(h->devid), + PCI_FUNC(h->devid), + h->cap_ptr); + + while (p < end) { + dev = (struct ivhd_entry *)p; + switch (dev->type) { + case IVHD_DEV_SELECT: + case IVHD_DEV_RANGE_END: + case IVHD_DEV_ALIAS: + case IVHD_DEV_EXT_SELECT: + /* all the above subfield types refer to device ids */ + update_last_devid(dev->devid); + break; + default: + break; + } + p += ivhd_entry_length(p); + } + + WARN_ON(p != end); + + return 0; +} + +/* + * Iterate over all IVHD entries in the ACPI table and find the highest device + * id which we need to handle. This is the first of three functions which parse + * the ACPI table. So we check the checksum here. + */ +static int __init find_last_devid_acpi(struct acpi_table_header *table) +{ + int i; + u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; + struct ivhd_header *h; + + /* + * Validate checksum here so we don't need to do it when + * we actually parse the table + */ + for (i = 0; i < table->length; ++i) + checksum += p[i]; + if (checksum != 0) { + /* ACPI table corrupt */ + amd_iommu_init_err = -ENODEV; + return 0; + } + + p += IVRS_HEADER_LENGTH; + + end += table->length; + while (p < end) { + h = (struct ivhd_header *)p; + switch (h->type) { + case ACPI_IVHD_TYPE: + find_last_devid_from_ivhd(h); + break; + default: + break; + } + p += h->length; + } + WARN_ON(p != end); + + return 0; +} + +/**************************************************************************** + * + * The following functions belong the the code path which parses the ACPI table + * the second time. In this ACPI parsing iteration we allocate IOMMU specific + * data structures, initialize the device/alias/rlookup table and also + * basically initialize the hardware. + * + ****************************************************************************/ + +/* + * Allocates the command buffer. This buffer is per AMD IOMMU. We can + * write commands to that buffer later and the IOMMU will execute them + * asynchronously + */ +static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) +{ + u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(CMD_BUFFER_SIZE)); + + if (cmd_buf == NULL) + return NULL; + + iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; + + return cmd_buf; +} + +/* + * This function resets the command buffer if the IOMMU stopped fetching + * commands from it. + */ +void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) +{ + iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); + + writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + + iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); +} + +/* + * This function writes the command buffer address to the hardware and + * enables it. + */ +static void iommu_enable_command_buffer(struct amd_iommu *iommu) +{ + u64 entry; + + BUG_ON(iommu->cmd_buf == NULL); + + entry = (u64)virt_to_phys(iommu->cmd_buf); + entry |= MMIO_CMD_SIZE_512; + + memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, + &entry, sizeof(entry)); + + amd_iommu_reset_cmd_buffer(iommu); + iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); +} + +static void __init free_command_buffer(struct amd_iommu *iommu) +{ + free_pages((unsigned long)iommu->cmd_buf, + get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); +} + +/* allocates the memory where the IOMMU will log its events to */ +static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) +{ + iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(EVT_BUFFER_SIZE)); + + if (iommu->evt_buf == NULL) + return NULL; + + iommu->evt_buf_size = EVT_BUFFER_SIZE; + + return iommu->evt_buf; +} + +static void iommu_enable_event_buffer(struct amd_iommu *iommu) +{ + u64 entry; + + BUG_ON(iommu->evt_buf == NULL); + + entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; + + memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, + &entry, sizeof(entry)); + + /* set head and tail to zero manually */ + writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); + + iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); +} + +static void __init free_event_buffer(struct amd_iommu *iommu) +{ + free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); +} + +/* sets a specific bit in the device table entry. */ +static void set_dev_entry_bit(u16 devid, u8 bit) +{ + int i = (bit >> 5) & 0x07; + int _bit = bit & 0x1f; + + amd_iommu_dev_table[devid].data[i] |= (1 << _bit); +} + +static int get_dev_entry_bit(u16 devid, u8 bit) +{ + int i = (bit >> 5) & 0x07; + int _bit = bit & 0x1f; + + return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; +} + + +void amd_iommu_apply_erratum_63(u16 devid) +{ + int sysmgt; + + sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | + (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); + + if (sysmgt == 0x01) + set_dev_entry_bit(devid, DEV_ENTRY_IW); +} + +/* Writes the specific IOMMU for a device into the rlookup table */ +static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) +{ + amd_iommu_rlookup_table[devid] = iommu; +} + +/* + * This function takes the device specific flags read from the ACPI + * table and sets up the device table entry with that information + */ +static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, + u16 devid, u32 flags, u32 ext_flags) +{ + if (flags & ACPI_DEVFLAG_INITPASS) + set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); + if (flags & ACPI_DEVFLAG_EXTINT) + set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); + if (flags & ACPI_DEVFLAG_NMI) + set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); + if (flags & ACPI_DEVFLAG_SYSMGT1) + set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); + if (flags & ACPI_DEVFLAG_SYSMGT2) + set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); + if (flags & ACPI_DEVFLAG_LINT0) + set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); + if (flags & ACPI_DEVFLAG_LINT1) + set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); + + amd_iommu_apply_erratum_63(devid); + + set_iommu_for_device(iommu, devid); +} + +/* + * Reads the device exclusion range from ACPI and initialize IOMMU with + * it + */ +static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) +{ + struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; + + if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) + return; + + if (iommu) { + /* + * We only can configure exclusion ranges per IOMMU, not + * per device. But we can enable the exclusion range per + * device. This is done here + */ + set_dev_entry_bit(m->devid, DEV_ENTRY_EX); + iommu->exclusion_start = m->range_start; + iommu->exclusion_length = m->range_length; + } +} + +/* + * This function reads some important data from the IOMMU PCI space and + * initializes the driver data structure with it. It reads the hardware + * capabilities and the first/last device entries + */ +static void __init init_iommu_from_pci(struct amd_iommu *iommu) +{ + int cap_ptr = iommu->cap_ptr; + u32 range, misc, low, high; + int i, j; + + pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, + &iommu->cap); + pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, + &range); + pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, + &misc); + + iommu->first_device = calc_devid(MMIO_GET_BUS(range), + MMIO_GET_FD(range)); + iommu->last_device = calc_devid(MMIO_GET_BUS(range), + MMIO_GET_LD(range)); + iommu->evt_msi_num = MMIO_MSI_NUM(misc); + + if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) + amd_iommu_iotlb_sup = false; + + /* read extended feature bits */ + low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); + high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); + + iommu->features = ((u64)high << 32) | low; + + if (!is_rd890_iommu(iommu->dev)) + return; + + /* + * Some rd890 systems may not be fully reconfigured by the BIOS, so + * it's necessary for us to store this information so it can be + * reprogrammed on resume + */ + + pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, + &iommu->stored_addr_lo); + pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, + &iommu->stored_addr_hi); + + /* Low bit locks writes to configuration space */ + iommu->stored_addr_lo &= ~1; + + for (i = 0; i < 6; i++) + for (j = 0; j < 0x12; j++) + iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); + + for (i = 0; i < 0x83; i++) + iommu->stored_l2[i] = iommu_read_l2(iommu, i); +} + +/* + * Takes a pointer to an AMD IOMMU entry in the ACPI table and + * initializes the hardware and our data structures with it. + */ +static void __init init_iommu_from_acpi(struct amd_iommu *iommu, + struct ivhd_header *h) +{ + u8 *p = (u8 *)h; + u8 *end = p, flags = 0; + u16 devid = 0, devid_start = 0, devid_to = 0; + u32 dev_i, ext_flags = 0; + bool alias = false; + struct ivhd_entry *e; + + /* + * First save the recommended feature enable bits from ACPI + */ + iommu->acpi_flags = h->flags; + + /* + * Done. Now parse the device entries + */ + p += sizeof(struct ivhd_header); + end += h->length; + + + while (p < end) { + e = (struct ivhd_entry *)p; + switch (e->type) { + case IVHD_DEV_ALL: + + DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" + " last device %02x:%02x.%x flags: %02x\n", + PCI_BUS(iommu->first_device), + PCI_SLOT(iommu->first_device), + PCI_FUNC(iommu->first_device), + PCI_BUS(iommu->last_device), + PCI_SLOT(iommu->last_device), + PCI_FUNC(iommu->last_device), + e->flags); + + for (dev_i = iommu->first_device; + dev_i <= iommu->last_device; ++dev_i) + set_dev_entry_from_acpi(iommu, dev_i, + e->flags, 0); + break; + case IVHD_DEV_SELECT: + + DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " + "flags: %02x\n", + PCI_BUS(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid), + e->flags); + + devid = e->devid; + set_dev_entry_from_acpi(iommu, devid, e->flags, 0); + break; + case IVHD_DEV_SELECT_RANGE_START: + + DUMP_printk(" DEV_SELECT_RANGE_START\t " + "devid: %02x:%02x.%x flags: %02x\n", + PCI_BUS(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid), + e->flags); + + devid_start = e->devid; + flags = e->flags; + ext_flags = 0; + alias = false; + break; + case IVHD_DEV_ALIAS: + + DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " + "flags: %02x devid_to: %02x:%02x.%x\n", + PCI_BUS(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid), + e->flags, + PCI_BUS(e->ext >> 8), + PCI_SLOT(e->ext >> 8), + PCI_FUNC(e->ext >> 8)); + + devid = e->devid; + devid_to = e->ext >> 8; + set_dev_entry_from_acpi(iommu, devid , e->flags, 0); + set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); + amd_iommu_alias_table[devid] = devid_to; + break; + case IVHD_DEV_ALIAS_RANGE: + + DUMP_printk(" DEV_ALIAS_RANGE\t\t " + "devid: %02x:%02x.%x flags: %02x " + "devid_to: %02x:%02x.%x\n", + PCI_BUS(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid), + e->flags, + PCI_BUS(e->ext >> 8), + PCI_SLOT(e->ext >> 8), + PCI_FUNC(e->ext >> 8)); + + devid_start = e->devid; + flags = e->flags; + devid_to = e->ext >> 8; + ext_flags = 0; + alias = true; + break; + case IVHD_DEV_EXT_SELECT: + + DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " + "flags: %02x ext: %08x\n", + PCI_BUS(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid), + e->flags, e->ext); + + devid = e->devid; + set_dev_entry_from_acpi(iommu, devid, e->flags, + e->ext); + break; + case IVHD_DEV_EXT_SELECT_RANGE: + + DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " + "%02x:%02x.%x flags: %02x ext: %08x\n", + PCI_BUS(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid), + e->flags, e->ext); + + devid_start = e->devid; + flags = e->flags; + ext_flags = e->ext; + alias = false; + break; + case IVHD_DEV_RANGE_END: + + DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", + PCI_BUS(e->devid), + PCI_SLOT(e->devid), + PCI_FUNC(e->devid)); + + devid = e->devid; + for (dev_i = devid_start; dev_i <= devid; ++dev_i) { + if (alias) { + amd_iommu_alias_table[dev_i] = devid_to; + set_dev_entry_from_acpi(iommu, + devid_to, flags, ext_flags); + } + set_dev_entry_from_acpi(iommu, dev_i, + flags, ext_flags); + } + break; + default: + break; + } + + p += ivhd_entry_length(p); + } +} + +/* Initializes the device->iommu mapping for the driver */ +static int __init init_iommu_devices(struct amd_iommu *iommu) +{ + u32 i; + + for (i = iommu->first_device; i <= iommu->last_device; ++i) + set_iommu_for_device(iommu, i); + + return 0; +} + +static void __init free_iommu_one(struct amd_iommu *iommu) +{ + free_command_buffer(iommu); + free_event_buffer(iommu); + iommu_unmap_mmio_space(iommu); +} + +static void __init free_iommu_all(void) +{ + struct amd_iommu *iommu, *next; + + for_each_iommu_safe(iommu, next) { + list_del(&iommu->list); + free_iommu_one(iommu); + kfree(iommu); + } +} + +/* + * This function clues the initialization function for one IOMMU + * together and also allocates the command buffer and programs the + * hardware. It does NOT enable the IOMMU. This is done afterwards. + */ +static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) +{ + spin_lock_init(&iommu->lock); + + /* Add IOMMU to internal data structures */ + list_add_tail(&iommu->list, &amd_iommu_list); + iommu->index = amd_iommus_present++; + + if (unlikely(iommu->index >= MAX_IOMMUS)) { + WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); + return -ENOSYS; + } + + /* Index is fine - add IOMMU to the array */ + amd_iommus[iommu->index] = iommu; + + /* + * Copy data from ACPI table entry to the iommu struct + */ + iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); + if (!iommu->dev) + return 1; + + iommu->cap_ptr = h->cap_ptr; + iommu->pci_seg = h->pci_seg; + iommu->mmio_phys = h->mmio_phys; + iommu->mmio_base = iommu_map_mmio_space(h->mmio_phys); + if (!iommu->mmio_base) + return -ENOMEM; + + iommu->cmd_buf = alloc_command_buffer(iommu); + if (!iommu->cmd_buf) + return -ENOMEM; + + iommu->evt_buf = alloc_event_buffer(iommu); + if (!iommu->evt_buf) + return -ENOMEM; + + iommu->int_enabled = false; + + init_iommu_from_pci(iommu); + init_iommu_from_acpi(iommu, h); + init_iommu_devices(iommu); + + if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) + amd_iommu_np_cache = true; + + return pci_enable_device(iommu->dev); +} + +/* + * Iterates over all IOMMU entries in the ACPI table, allocates the + * IOMMU structure and initializes it with init_iommu_one() + */ +static int __init init_iommu_all(struct acpi_table_header *table) +{ + u8 *p = (u8 *)table, *end = (u8 *)table; + struct ivhd_header *h; + struct amd_iommu *iommu; + int ret; + + end += table->length; + p += IVRS_HEADER_LENGTH; + + while (p < end) { + h = (struct ivhd_header *)p; + switch (*p) { + case ACPI_IVHD_TYPE: + + DUMP_printk("device: %02x:%02x.%01x cap: %04x " + "seg: %d flags: %01x info %04x\n", + PCI_BUS(h->devid), PCI_SLOT(h->devid), + PCI_FUNC(h->devid), h->cap_ptr, + h->pci_seg, h->flags, h->info); + DUMP_printk(" mmio-addr: %016llx\n", + h->mmio_phys); + + iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); + if (iommu == NULL) { + amd_iommu_init_err = -ENOMEM; + return 0; + } + + ret = init_iommu_one(iommu, h); + if (ret) { + amd_iommu_init_err = ret; + return 0; + } + break; + default: + break; + } + p += h->length; + + } + WARN_ON(p != end); + + return 0; +} + +/**************************************************************************** + * + * The following functions initialize the MSI interrupts for all IOMMUs + * in the system. Its a bit challenging because there could be multiple + * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per + * pci_dev. + * + ****************************************************************************/ + +static int iommu_setup_msi(struct amd_iommu *iommu) +{ + int r; + + if (pci_enable_msi(iommu->dev)) + return 1; + + r = request_threaded_irq(iommu->dev->irq, + amd_iommu_int_handler, + amd_iommu_int_thread, + 0, "AMD-Vi", + iommu->dev); + + if (r) { + pci_disable_msi(iommu->dev); + return 1; + } + + iommu->int_enabled = true; + iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); + + return 0; +} + +static int iommu_init_msi(struct amd_iommu *iommu) +{ + if (iommu->int_enabled) + return 0; + + if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) + return iommu_setup_msi(iommu); + + return 1; +} + +/**************************************************************************** + * + * The next functions belong to the third pass of parsing the ACPI + * table. In this last pass the memory mapping requirements are + * gathered (like exclusion and unity mapping reanges). + * + ****************************************************************************/ + +static void __init free_unity_maps(void) +{ + struct unity_map_entry *entry, *next; + + list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { + list_del(&entry->list); + kfree(entry); + } +} + +/* called when we find an exclusion range definition in ACPI */ +static int __init init_exclusion_range(struct ivmd_header *m) +{ + int i; + + switch (m->type) { + case ACPI_IVMD_TYPE: + set_device_exclusion_range(m->devid, m); + break; + case ACPI_IVMD_TYPE_ALL: + for (i = 0; i <= amd_iommu_last_bdf; ++i) + set_device_exclusion_range(i, m); + break; + case ACPI_IVMD_TYPE_RANGE: + for (i = m->devid; i <= m->aux; ++i) + set_device_exclusion_range(i, m); + break; + default: + break; + } + + return 0; +} + +/* called for unity map ACPI definition */ +static int __init init_unity_map_range(struct ivmd_header *m) +{ + struct unity_map_entry *e = 0; + char *s; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (e == NULL) + return -ENOMEM; + + switch (m->type) { + default: + kfree(e); + return 0; + case ACPI_IVMD_TYPE: + s = "IVMD_TYPEi\t\t\t"; + e->devid_start = e->devid_end = m->devid; + break; + case ACPI_IVMD_TYPE_ALL: + s = "IVMD_TYPE_ALL\t\t"; + e->devid_start = 0; + e->devid_end = amd_iommu_last_bdf; + break; + case ACPI_IVMD_TYPE_RANGE: + s = "IVMD_TYPE_RANGE\t\t"; + e->devid_start = m->devid; + e->devid_end = m->aux; + break; + } + e->address_start = PAGE_ALIGN(m->range_start); + e->address_end = e->address_start + PAGE_ALIGN(m->range_length); + e->prot = m->flags >> 1; + + DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" + " range_start: %016llx range_end: %016llx flags: %x\n", s, + PCI_BUS(e->devid_start), PCI_SLOT(e->devid_start), + PCI_FUNC(e->devid_start), PCI_BUS(e->devid_end), + PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), + e->address_start, e->address_end, m->flags); + + list_add_tail(&e->list, &amd_iommu_unity_map); + + return 0; +} + +/* iterates over all memory definitions we find in the ACPI table */ +static int __init init_memory_definitions(struct acpi_table_header *table) +{ + u8 *p = (u8 *)table, *end = (u8 *)table; + struct ivmd_header *m; + + end += table->length; + p += IVRS_HEADER_LENGTH; + + while (p < end) { + m = (struct ivmd_header *)p; + if (m->flags & IVMD_FLAG_EXCL_RANGE) + init_exclusion_range(m); + else if (m->flags & IVMD_FLAG_UNITY_MAP) + init_unity_map_range(m); + + p += m->length; + } + + return 0; +} + +/* + * Init the device table to not allow DMA access for devices and + * suppress all page faults + */ +static void init_device_table(void) +{ + u32 devid; + + for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { + set_dev_entry_bit(devid, DEV_ENTRY_VALID); + set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); + } +} + +static void iommu_init_flags(struct amd_iommu *iommu) +{ + iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : + iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); + + iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : + iommu_feature_disable(iommu, CONTROL_PASSPW_EN); + + iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : + iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); + + iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_ISOC_EN) : + iommu_feature_disable(iommu, CONTROL_ISOC_EN); + + /* + * make IOMMU memory accesses cache coherent + */ + iommu_feature_enable(iommu, CONTROL_COHERENT_EN); +} + +static void iommu_apply_resume_quirks(struct amd_iommu *iommu) +{ + int i, j; + u32 ioc_feature_control; + struct pci_dev *pdev = NULL; + + /* RD890 BIOSes may not have completely reconfigured the iommu */ + if (!is_rd890_iommu(iommu->dev)) + return; + + /* + * First, we need to ensure that the iommu is enabled. This is + * controlled by a register in the northbridge + */ + pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0)); + + if (!pdev) + return; + + /* Select Northbridge indirect register 0x75 and enable writing */ + pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); + pci_read_config_dword(pdev, 0x64, &ioc_feature_control); + + /* Enable the iommu */ + if (!(ioc_feature_control & 0x1)) + pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); + + pci_dev_put(pdev); + + /* Restore the iommu BAR */ + pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, + iommu->stored_addr_lo); + pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, + iommu->stored_addr_hi); + + /* Restore the l1 indirect regs for each of the 6 l1s */ + for (i = 0; i < 6; i++) + for (j = 0; j < 0x12; j++) + iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); + + /* Restore the l2 indirect regs */ + for (i = 0; i < 0x83; i++) + iommu_write_l2(iommu, i, iommu->stored_l2[i]); + + /* Lock PCI setup registers */ + pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, + iommu->stored_addr_lo | 1); +} + +/* + * This function finally enables all IOMMUs found in the system after + * they have been initialized + */ +static void enable_iommus(void) +{ + struct amd_iommu *iommu; + + for_each_iommu(iommu) { + iommu_disable(iommu); + iommu_init_flags(iommu); + iommu_set_device_table(iommu); + iommu_enable_command_buffer(iommu); + iommu_enable_event_buffer(iommu); + iommu_set_exclusion_range(iommu); + iommu_init_msi(iommu); + iommu_enable(iommu); + iommu_flush_all_caches(iommu); + } +} + +static void disable_iommus(void) +{ + struct amd_iommu *iommu; + + for_each_iommu(iommu) + iommu_disable(iommu); +} + +/* + * Suspend/Resume support + * disable suspend until real resume implemented + */ + +static void amd_iommu_resume(void) +{ + struct amd_iommu *iommu; + + for_each_iommu(iommu) + iommu_apply_resume_quirks(iommu); + + /* re-load the hardware */ + enable_iommus(); + + /* + * we have to flush after the IOMMUs are enabled because a + * disabled IOMMU will never execute the commands we send + */ + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); +} + +static int amd_iommu_suspend(void) +{ + /* disable IOMMUs to go out of the way for BIOS */ + disable_iommus(); + + return 0; +} + +static struct syscore_ops amd_iommu_syscore_ops = { + .suspend = amd_iommu_suspend, + .resume = amd_iommu_resume, +}; + +/* + * This is the core init function for AMD IOMMU hardware in the system. + * This function is called from the generic x86 DMA layer initialization + * code. + * + * This function basically parses the ACPI table for AMD IOMMU (IVRS) + * three times: + * + * 1 pass) Find the highest PCI device id the driver has to handle. + * Upon this information the size of the data structures is + * determined that needs to be allocated. + * + * 2 pass) Initialize the data structures just allocated with the + * information in the ACPI table about available AMD IOMMUs + * in the system. It also maps the PCI devices in the + * system to specific IOMMUs + * + * 3 pass) After the basic data structures are allocated and + * initialized we update them with information about memory + * remapping requirements parsed out of the ACPI table in + * this last pass. + * + * After that the hardware is initialized and ready to go. In the last + * step we do some Linux specific things like registering the driver in + * the dma_ops interface and initializing the suspend/resume support + * functions. Finally it prints some information about AMD IOMMUs and + * the driver state and enables the hardware. + */ +static int __init amd_iommu_init(void) +{ + int i, ret = 0; + + /* + * First parse ACPI tables to find the largest Bus/Dev/Func + * we need to handle. Upon this information the shared data + * structures for the IOMMUs in the system will be allocated + */ + if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) + return -ENODEV; + + ret = amd_iommu_init_err; + if (ret) + goto out; + + dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); + alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); + rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); + + ret = -ENOMEM; + + /* Device table - directly used by all IOMMUs */ + amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(dev_table_size)); + if (amd_iommu_dev_table == NULL) + goto out; + + /* + * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the + * IOMMU see for that device + */ + amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, + get_order(alias_table_size)); + if (amd_iommu_alias_table == NULL) + goto free; + + /* IOMMU rlookup table - find the IOMMU for a specific device */ + amd_iommu_rlookup_table = (void *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, + get_order(rlookup_table_size)); + if (amd_iommu_rlookup_table == NULL) + goto free; + + amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_ID/8)); + if (amd_iommu_pd_alloc_bitmap == NULL) + goto free; + + /* init the device table */ + init_device_table(); + + /* + * let all alias entries point to itself + */ + for (i = 0; i <= amd_iommu_last_bdf; ++i) + amd_iommu_alias_table[i] = i; + + /* + * never allocate domain 0 because its used as the non-allocated and + * error value placeholder + */ + amd_iommu_pd_alloc_bitmap[0] = 1; + + spin_lock_init(&amd_iommu_pd_lock); + + /* + * now the data structures are allocated and basically initialized + * start the real acpi table scan + */ + ret = -ENODEV; + if (acpi_table_parse("IVRS", init_iommu_all) != 0) + goto free; + + if (amd_iommu_init_err) { + ret = amd_iommu_init_err; + goto free; + } + + if (acpi_table_parse("IVRS", init_memory_definitions) != 0) + goto free; + + if (amd_iommu_init_err) { + ret = amd_iommu_init_err; + goto free; + } + + ret = amd_iommu_init_devices(); + if (ret) + goto free; + + enable_iommus(); + + if (iommu_pass_through) + ret = amd_iommu_init_passthrough(); + else + ret = amd_iommu_init_dma_ops(); + + if (ret) + goto free_disable; + + amd_iommu_init_api(); + + amd_iommu_init_notifier(); + + register_syscore_ops(&amd_iommu_syscore_ops); + + if (iommu_pass_through) + goto out; + + if (amd_iommu_unmap_flush) + printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); + else + printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); + + x86_platform.iommu_shutdown = disable_iommus; +out: + return ret; + +free_disable: + disable_iommus(); + +free: + amd_iommu_uninit_devices(); + + free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, + get_order(MAX_DOMAIN_ID/8)); + + free_pages((unsigned long)amd_iommu_rlookup_table, + get_order(rlookup_table_size)); + + free_pages((unsigned long)amd_iommu_alias_table, + get_order(alias_table_size)); + + free_pages((unsigned long)amd_iommu_dev_table, + get_order(dev_table_size)); + + free_iommu_all(); + + free_unity_maps(); + +#ifdef CONFIG_GART_IOMMU + /* + * We failed to initialize the AMD IOMMU - try fallback to GART + * if possible. + */ + gart_iommu_init(); + +#endif + + goto out; +} + +/**************************************************************************** + * + * Early detect code. This code runs at IOMMU detection time in the DMA + * layer. It just looks if there is an IVRS ACPI table to detect AMD + * IOMMUs + * + ****************************************************************************/ +static int __init early_amd_iommu_detect(struct acpi_table_header *table) +{ + return 0; +} + +int __init amd_iommu_detect(void) +{ + if (no_iommu || (iommu_detected && !gart_iommu_aperture)) + return -ENODEV; + + if (amd_iommu_disabled) + return -ENODEV; + + if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { + iommu_detected = 1; + amd_iommu_detected = 1; + x86_init.iommu.iommu_init = amd_iommu_init; + + /* Make sure ACS will be enabled */ + pci_request_acs(); + return 1; + } + return -ENODEV; +} + +/**************************************************************************** + * + * Parsing functions for the AMD IOMMU specific kernel command line + * options. + * + ****************************************************************************/ + +static int __init parse_amd_iommu_dump(char *str) +{ + amd_iommu_dump = true; + + return 1; +} + +static int __init parse_amd_iommu_options(char *str) +{ + for (; *str; ++str) { + if (strncmp(str, "fullflush", 9) == 0) + amd_iommu_unmap_flush = true; + if (strncmp(str, "off", 3) == 0) + amd_iommu_disabled = true; + } + + return 1; +} + +__setup("amd_iommu_dump", parse_amd_iommu_dump); +__setup("amd_iommu=", parse_amd_iommu_options); + +IOMMU_INIT_FINISH(amd_iommu_detect, + gart_iommu_hole_init, + 0, + 0); diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h new file mode 100644 index 000000000000..7ffaa64410b0 --- /dev/null +++ b/drivers/iommu/amd_iommu_proto.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2009-2010 Advanced Micro Devices, Inc. + * Author: Joerg Roedel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_AMD_IOMMU_PROTO_H +#define _ASM_X86_AMD_IOMMU_PROTO_H + +#include "amd_iommu_types.h" + +extern int amd_iommu_init_dma_ops(void); +extern int amd_iommu_init_passthrough(void); +extern irqreturn_t amd_iommu_int_thread(int irq, void *data); +extern irqreturn_t amd_iommu_int_handler(int irq, void *data); +extern void amd_iommu_apply_erratum_63(u16 devid); +extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); +extern int amd_iommu_init_devices(void); +extern void amd_iommu_uninit_devices(void); +extern void amd_iommu_init_notifier(void); +extern void amd_iommu_init_api(void); +#ifndef CONFIG_AMD_IOMMU_STATS + +static inline void amd_iommu_stats_init(void) { } + +#endif /* !CONFIG_AMD_IOMMU_STATS */ + +static inline bool is_rd890_iommu(struct pci_dev *pdev) +{ + return (pdev->vendor == PCI_VENDOR_ID_ATI) && + (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); +} + +static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) +{ + if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) + return false; + + return !!(iommu->features & f); +} + +#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h new file mode 100644 index 000000000000..4c9982995414 --- /dev/null +++ b/drivers/iommu/amd_iommu_types.h @@ -0,0 +1,580 @@ +/* + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. + * Author: Joerg Roedel + * Leo Duran + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_AMD_IOMMU_TYPES_H +#define _ASM_X86_AMD_IOMMU_TYPES_H + +#include +#include +#include +#include + +/* + * Maximum number of IOMMUs supported + */ +#define MAX_IOMMUS 32 + +/* + * some size calculation constants + */ +#define DEV_TABLE_ENTRY_SIZE 32 +#define ALIAS_TABLE_ENTRY_SIZE 2 +#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) + +/* Length of the MMIO region for the AMD IOMMU */ +#define MMIO_REGION_LENGTH 0x4000 + +/* Capability offsets used by the driver */ +#define MMIO_CAP_HDR_OFFSET 0x00 +#define MMIO_RANGE_OFFSET 0x0c +#define MMIO_MISC_OFFSET 0x10 + +/* Masks, shifts and macros to parse the device range capability */ +#define MMIO_RANGE_LD_MASK 0xff000000 +#define MMIO_RANGE_FD_MASK 0x00ff0000 +#define MMIO_RANGE_BUS_MASK 0x0000ff00 +#define MMIO_RANGE_LD_SHIFT 24 +#define MMIO_RANGE_FD_SHIFT 16 +#define MMIO_RANGE_BUS_SHIFT 8 +#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) +#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) +#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) +#define MMIO_MSI_NUM(x) ((x) & 0x1f) + +/* Flag masks for the AMD IOMMU exclusion range */ +#define MMIO_EXCL_ENABLE_MASK 0x01ULL +#define MMIO_EXCL_ALLOW_MASK 0x02ULL + +/* Used offsets into the MMIO space */ +#define MMIO_DEV_TABLE_OFFSET 0x0000 +#define MMIO_CMD_BUF_OFFSET 0x0008 +#define MMIO_EVT_BUF_OFFSET 0x0010 +#define MMIO_CONTROL_OFFSET 0x0018 +#define MMIO_EXCL_BASE_OFFSET 0x0020 +#define MMIO_EXCL_LIMIT_OFFSET 0x0028 +#define MMIO_EXT_FEATURES 0x0030 +#define MMIO_CMD_HEAD_OFFSET 0x2000 +#define MMIO_CMD_TAIL_OFFSET 0x2008 +#define MMIO_EVT_HEAD_OFFSET 0x2010 +#define MMIO_EVT_TAIL_OFFSET 0x2018 +#define MMIO_STATUS_OFFSET 0x2020 + + +/* Extended Feature Bits */ +#define FEATURE_PREFETCH (1ULL<<0) +#define FEATURE_PPR (1ULL<<1) +#define FEATURE_X2APIC (1ULL<<2) +#define FEATURE_NX (1ULL<<3) +#define FEATURE_GT (1ULL<<4) +#define FEATURE_IA (1ULL<<6) +#define FEATURE_GA (1ULL<<7) +#define FEATURE_HE (1ULL<<8) +#define FEATURE_PC (1ULL<<9) + +/* MMIO status bits */ +#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 + +/* event logging constants */ +#define EVENT_ENTRY_SIZE 0x10 +#define EVENT_TYPE_SHIFT 28 +#define EVENT_TYPE_MASK 0xf +#define EVENT_TYPE_ILL_DEV 0x1 +#define EVENT_TYPE_IO_FAULT 0x2 +#define EVENT_TYPE_DEV_TAB_ERR 0x3 +#define EVENT_TYPE_PAGE_TAB_ERR 0x4 +#define EVENT_TYPE_ILL_CMD 0x5 +#define EVENT_TYPE_CMD_HARD_ERR 0x6 +#define EVENT_TYPE_IOTLB_INV_TO 0x7 +#define EVENT_TYPE_INV_DEV_REQ 0x8 +#define EVENT_DEVID_MASK 0xffff +#define EVENT_DEVID_SHIFT 0 +#define EVENT_DOMID_MASK 0xffff +#define EVENT_DOMID_SHIFT 0 +#define EVENT_FLAGS_MASK 0xfff +#define EVENT_FLAGS_SHIFT 0x10 + +/* feature control bits */ +#define CONTROL_IOMMU_EN 0x00ULL +#define CONTROL_HT_TUN_EN 0x01ULL +#define CONTROL_EVT_LOG_EN 0x02ULL +#define CONTROL_EVT_INT_EN 0x03ULL +#define CONTROL_COMWAIT_EN 0x04ULL +#define CONTROL_PASSPW_EN 0x08ULL +#define CONTROL_RESPASSPW_EN 0x09ULL +#define CONTROL_COHERENT_EN 0x0aULL +#define CONTROL_ISOC_EN 0x0bULL +#define CONTROL_CMDBUF_EN 0x0cULL +#define CONTROL_PPFLOG_EN 0x0dULL +#define CONTROL_PPFINT_EN 0x0eULL + +/* command specific defines */ +#define CMD_COMPL_WAIT 0x01 +#define CMD_INV_DEV_ENTRY 0x02 +#define CMD_INV_IOMMU_PAGES 0x03 +#define CMD_INV_IOTLB_PAGES 0x04 +#define CMD_INV_ALL 0x08 + +#define CMD_COMPL_WAIT_STORE_MASK 0x01 +#define CMD_COMPL_WAIT_INT_MASK 0x02 +#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 +#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 + +#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL + +/* macros and definitions for device table entries */ +#define DEV_ENTRY_VALID 0x00 +#define DEV_ENTRY_TRANSLATION 0x01 +#define DEV_ENTRY_IR 0x3d +#define DEV_ENTRY_IW 0x3e +#define DEV_ENTRY_NO_PAGE_FAULT 0x62 +#define DEV_ENTRY_EX 0x67 +#define DEV_ENTRY_SYSMGT1 0x68 +#define DEV_ENTRY_SYSMGT2 0x69 +#define DEV_ENTRY_INIT_PASS 0xb8 +#define DEV_ENTRY_EINT_PASS 0xb9 +#define DEV_ENTRY_NMI_PASS 0xba +#define DEV_ENTRY_LINT0_PASS 0xbe +#define DEV_ENTRY_LINT1_PASS 0xbf +#define DEV_ENTRY_MODE_MASK 0x07 +#define DEV_ENTRY_MODE_SHIFT 0x09 + +/* constants to configure the command buffer */ +#define CMD_BUFFER_SIZE 8192 +#define CMD_BUFFER_UNINITIALIZED 1 +#define CMD_BUFFER_ENTRIES 512 +#define MMIO_CMD_SIZE_SHIFT 56 +#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) + +/* constants for event buffer handling */ +#define EVT_BUFFER_SIZE 8192 /* 512 entries */ +#define EVT_LEN_MASK (0x9ULL << 56) + +#define PAGE_MODE_NONE 0x00 +#define PAGE_MODE_1_LEVEL 0x01 +#define PAGE_MODE_2_LEVEL 0x02 +#define PAGE_MODE_3_LEVEL 0x03 +#define PAGE_MODE_4_LEVEL 0x04 +#define PAGE_MODE_5_LEVEL 0x05 +#define PAGE_MODE_6_LEVEL 0x06 + +#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) +#define PM_LEVEL_SIZE(x) (((x) < 6) ? \ + ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ + (0xffffffffffffffffULL)) +#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) +#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) +#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ + IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) +#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) + +#define PM_MAP_4k 0 +#define PM_ADDR_MASK 0x000ffffffffff000ULL +#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ + (~((1ULL << (12 + ((lvl) * 9))) - 1))) +#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) + +/* + * Returns the page table level to use for a given page size + * Pagesize is expected to be a power-of-two + */ +#define PAGE_SIZE_LEVEL(pagesize) \ + ((__ffs(pagesize) - 12) / 9) +/* + * Returns the number of ptes to use for a given page size + * Pagesize is expected to be a power-of-two + */ +#define PAGE_SIZE_PTE_COUNT(pagesize) \ + (1ULL << ((__ffs(pagesize) - 12) % 9)) + +/* + * Aligns a given io-virtual address to a given page size + * Pagesize is expected to be a power-of-two + */ +#define PAGE_SIZE_ALIGN(address, pagesize) \ + ((address) & ~((pagesize) - 1)) +/* + * Creates an IOMMU PTE for an address an a given pagesize + * The PTE has no permission bits set + * Pagesize is expected to be a power-of-two larger than 4096 + */ +#define PAGE_SIZE_PTE(address, pagesize) \ + (((address) | ((pagesize) - 1)) & \ + (~(pagesize >> 1)) & PM_ADDR_MASK) + +/* + * Takes a PTE value with mode=0x07 and returns the page size it maps + */ +#define PTE_PAGE_SIZE(pte) \ + (1ULL << (1 + ffz(((pte) | 0xfffULL)))) + +#define IOMMU_PTE_P (1ULL << 0) +#define IOMMU_PTE_TV (1ULL << 1) +#define IOMMU_PTE_U (1ULL << 59) +#define IOMMU_PTE_FC (1ULL << 60) +#define IOMMU_PTE_IR (1ULL << 61) +#define IOMMU_PTE_IW (1ULL << 62) + +#define DTE_FLAG_IOTLB 0x01 + +#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) +#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) +#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) +#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) + +#define IOMMU_PROT_MASK 0x03 +#define IOMMU_PROT_IR 0x01 +#define IOMMU_PROT_IW 0x02 + +/* IOMMU capabilities */ +#define IOMMU_CAP_IOTLB 24 +#define IOMMU_CAP_NPCACHE 26 +#define IOMMU_CAP_EFR 27 + +#define MAX_DOMAIN_ID 65536 + +/* FIXME: move this macro to */ +#define PCI_BUS(x) (((x) >> 8) & 0xff) + +/* Protection domain flags */ +#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ +#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops + domain for an IOMMU */ +#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page + translation */ + +extern bool amd_iommu_dump; +#define DUMP_printk(format, arg...) \ + do { \ + if (amd_iommu_dump) \ + printk(KERN_INFO "AMD-Vi: " format, ## arg); \ + } while(0); + +/* global flag if IOMMUs cache non-present entries */ +extern bool amd_iommu_np_cache; +/* Only true if all IOMMUs support device IOTLBs */ +extern bool amd_iommu_iotlb_sup; + +/* + * Make iterating over all IOMMUs easier + */ +#define for_each_iommu(iommu) \ + list_for_each_entry((iommu), &amd_iommu_list, list) +#define for_each_iommu_safe(iommu, next) \ + list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) + +#define APERTURE_RANGE_SHIFT 27 /* 128 MB */ +#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) +#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) +#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ +#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) +#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) + +/* + * This structure contains generic data for IOMMU protection domains + * independent of their use. + */ +struct protection_domain { + struct list_head list; /* for list of all protection domains */ + struct list_head dev_list; /* List of all devices in this domain */ + spinlock_t lock; /* mostly used to lock the page table*/ + struct mutex api_lock; /* protect page tables in the iommu-api path */ + u16 id; /* the domain id written to the device table */ + int mode; /* paging mode (0-6 levels) */ + u64 *pt_root; /* page table root pointer */ + unsigned long flags; /* flags to find out type of domain */ + bool updated; /* complete domain flush required */ + unsigned dev_cnt; /* devices assigned to this domain */ + unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ + void *priv; /* private data */ + +}; + +/* + * This struct contains device specific data for the IOMMU + */ +struct iommu_dev_data { + struct list_head list; /* For domain->dev_list */ + struct device *dev; /* Device this data belong to */ + struct device *alias; /* The Alias Device */ + struct protection_domain *domain; /* Domain the device is bound to */ + atomic_t bind; /* Domain attach reverent count */ +}; + +/* + * For dynamic growth the aperture size is split into ranges of 128MB of + * DMA address space each. This struct represents one such range. + */ +struct aperture_range { + + /* address allocation bitmap */ + unsigned long *bitmap; + + /* + * Array of PTE pages for the aperture. In this array we save all the + * leaf pages of the domain page table used for the aperture. This way + * we don't need to walk the page table to find a specific PTE. We can + * just calculate its address in constant time. + */ + u64 *pte_pages[64]; + + unsigned long offset; +}; + +/* + * Data container for a dma_ops specific protection domain + */ +struct dma_ops_domain { + struct list_head list; + + /* generic protection domain information */ + struct protection_domain domain; + + /* size of the aperture for the mappings */ + unsigned long aperture_size; + + /* address we start to search for free addresses */ + unsigned long next_address; + + /* address space relevant data */ + struct aperture_range *aperture[APERTURE_MAX_RANGES]; + + /* This will be set to true when TLB needs to be flushed */ + bool need_flush; + + /* + * if this is a preallocated domain, keep the device for which it was + * preallocated in this variable + */ + u16 target_dev; +}; + +/* + * Structure where we save information about one hardware AMD IOMMU in the + * system. + */ +struct amd_iommu { + struct list_head list; + + /* Index within the IOMMU array */ + int index; + + /* locks the accesses to the hardware */ + spinlock_t lock; + + /* Pointer to PCI device of this IOMMU */ + struct pci_dev *dev; + + /* physical address of MMIO space */ + u64 mmio_phys; + /* virtual address of MMIO space */ + u8 *mmio_base; + + /* capabilities of that IOMMU read from ACPI */ + u32 cap; + + /* flags read from acpi table */ + u8 acpi_flags; + + /* Extended features */ + u64 features; + + /* + * Capability pointer. There could be more than one IOMMU per PCI + * device function if there are more than one AMD IOMMU capability + * pointers. + */ + u16 cap_ptr; + + /* pci domain of this IOMMU */ + u16 pci_seg; + + /* first device this IOMMU handles. read from PCI */ + u16 first_device; + /* last device this IOMMU handles. read from PCI */ + u16 last_device; + + /* start of exclusion range of that IOMMU */ + u64 exclusion_start; + /* length of exclusion range of that IOMMU */ + u64 exclusion_length; + + /* command buffer virtual address */ + u8 *cmd_buf; + /* size of command buffer */ + u32 cmd_buf_size; + + /* size of event buffer */ + u32 evt_buf_size; + /* event buffer virtual address */ + u8 *evt_buf; + /* MSI number for event interrupt */ + u16 evt_msi_num; + + /* true if interrupts for this IOMMU are already enabled */ + bool int_enabled; + + /* if one, we need to send a completion wait command */ + bool need_sync; + + /* default dma_ops domain for that IOMMU */ + struct dma_ops_domain *default_dom; + + /* + * We can't rely on the BIOS to restore all values on reinit, so we + * need to stash them + */ + + /* The iommu BAR */ + u32 stored_addr_lo; + u32 stored_addr_hi; + + /* + * Each iommu has 6 l1s, each of which is documented as having 0x12 + * registers + */ + u32 stored_l1[6][0x12]; + + /* The l2 indirect registers */ + u32 stored_l2[0x83]; +}; + +/* + * List with all IOMMUs in the system. This list is not locked because it is + * only written and read at driver initialization or suspend time + */ +extern struct list_head amd_iommu_list; + +/* + * Array with pointers to each IOMMU struct + * The indices are referenced in the protection domains + */ +extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; + +/* Number of IOMMUs present in the system */ +extern int amd_iommus_present; + +/* + * Declarations for the global list of all protection domains + */ +extern spinlock_t amd_iommu_pd_lock; +extern struct list_head amd_iommu_pd_list; + +/* + * Structure defining one entry in the device table + */ +struct dev_table_entry { + u32 data[8]; +}; + +/* + * One entry for unity mappings parsed out of the ACPI table. + */ +struct unity_map_entry { + struct list_head list; + + /* starting device id this entry is used for (including) */ + u16 devid_start; + /* end device id this entry is used for (including) */ + u16 devid_end; + + /* start address to unity map (including) */ + u64 address_start; + /* end address to unity map (including) */ + u64 address_end; + + /* required protection */ + int prot; +}; + +/* + * List of all unity mappings. It is not locked because as runtime it is only + * read. It is created at ACPI table parsing time. + */ +extern struct list_head amd_iommu_unity_map; + +/* + * Data structures for device handling + */ + +/* + * Device table used by hardware. Read and write accesses by software are + * locked with the amd_iommu_pd_table lock. + */ +extern struct dev_table_entry *amd_iommu_dev_table; + +/* + * Alias table to find requestor ids to device ids. Not locked because only + * read on runtime. + */ +extern u16 *amd_iommu_alias_table; + +/* + * Reverse lookup table to find the IOMMU which translates a specific device. + */ +extern struct amd_iommu **amd_iommu_rlookup_table; + +/* size of the dma_ops aperture as power of 2 */ +extern unsigned amd_iommu_aperture_order; + +/* largest PCI device id we expect translation requests for */ +extern u16 amd_iommu_last_bdf; + +/* allocation bitmap for domain ids */ +extern unsigned long *amd_iommu_pd_alloc_bitmap; + +/* + * If true, the addresses will be flushed on unmap time, not when + * they are reused + */ +extern bool amd_iommu_unmap_flush; + +/* takes bus and device/function and returns the device id + * FIXME: should that be in generic PCI code? */ +static inline u16 calc_devid(u8 bus, u8 devfn) +{ + return (((u16)bus) << 8) | devfn; +} + +#ifdef CONFIG_AMD_IOMMU_STATS + +struct __iommu_counter { + char *name; + struct dentry *dent; + u64 value; +}; + +#define DECLARE_STATS_COUNTER(nm) \ + static struct __iommu_counter nm = { \ + .name = #nm, \ + } + +#define INC_STATS_COUNTER(name) name.value += 1 +#define ADD_STATS_COUNTER(name, x) name.value += (x) +#define SUB_STATS_COUNTER(name, x) name.value -= (x) + +#else /* CONFIG_AMD_IOMMU_STATS */ + +#define DECLARE_STATS_COUNTER(name) +#define INC_STATS_COUNTER(name) +#define ADD_STATS_COUNTER(name, x) +#define SUB_STATS_COUNTER(name, x) + +#endif /* CONFIG_AMD_IOMMU_STATS */ + +#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h new file mode 100644 index 000000000000..a6863a2dec1f --- /dev/null +++ b/include/linux/amd-iommu.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. + * Author: Joerg Roedel + * Leo Duran + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef _ASM_X86_AMD_IOMMU_H +#define _ASM_X86_AMD_IOMMU_H + +#include + +#ifdef CONFIG_AMD_IOMMU + +extern int amd_iommu_detect(void); + +#else + +static inline int amd_iommu_detect(void) { return -ENODEV; } + +#endif + +#endif /* _ASM_X86_AMD_IOMMU_H */ -- cgit v1.2.3