aboutsummaryrefslogtreecommitdiff
path: root/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c')
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c90
1 files changed, 71 insertions, 19 deletions
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
index 29117444e5a0..01e9b50b10a1 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c
@@ -7,6 +7,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <soc/tegra/mc.h>
+
#include "arm-smmu.h"
/*
@@ -15,18 +17,32 @@
* interleaved IOVA accesses across them and translates accesses from
* non-isochronous HW devices.
* Third one is used for translating accesses from isochronous HW devices.
+ *
+ * In addition, the SMMU driver needs to coordinate with the memory controller
+ * driver to ensure that the right SID override is programmed for any given
+ * memory client. This is necessary to allow for use-case such as seamlessly
+ * handing over the display controller configuration from the firmware to the
+ * kernel.
+ *
* This implementation supports programming of the two instances that must
- * be programmed identically.
- * The third instance usage is through standard arm-smmu driver itself and
- * is out of scope of this implementation.
+ * be programmed identically and takes care of invoking the memory controller
+ * driver for SID override programming after devices have been attached to an
+ * SMMU instance.
*/
-#define NUM_SMMU_INSTANCES 2
+#define MAX_SMMU_INSTANCES 2
struct nvidia_smmu {
- struct arm_smmu_device smmu;
- void __iomem *bases[NUM_SMMU_INSTANCES];
+ struct arm_smmu_device smmu;
+ void __iomem *bases[MAX_SMMU_INSTANCES];
+ unsigned int num_instances;
+ struct tegra_mc *mc;
};
+static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu)
+{
+ return container_of(smmu, struct nvidia_smmu, smmu);
+}
+
static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu,
unsigned int inst, int page)
{
@@ -47,9 +63,10 @@ static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu,
static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu,
int page, int offset, u32 val)
{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
- for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
writel_relaxed(val, reg);
@@ -67,9 +84,10 @@ static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu,
static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
int page, int offset, u64 val)
{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
- for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset;
writeq_relaxed(val, reg);
@@ -79,6 +97,7 @@ static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu,
static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
int sync, int status)
{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int delay;
arm_smmu_writel(smmu, page, sync, 0);
@@ -90,7 +109,7 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
u32 val = 0;
unsigned int i;
- for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ for (i = 0; i < nvidia->num_instances; i++) {
void __iomem *reg;
reg = nvidia_smmu_page(smmu, i, page) + status;
@@ -112,9 +131,10 @@ static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page,
static int nvidia_smmu_reset(struct arm_smmu_device *smmu)
{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
unsigned int i;
- for (i = 0; i < NUM_SMMU_INSTANCES; i++) {
+ for (i = 0; i < nvidia->num_instances; i++) {
u32 val;
void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) +
ARM_SMMU_GR0_sGFSR;
@@ -157,8 +177,9 @@ static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev)
unsigned int inst;
irqreturn_t ret = IRQ_NONE;
struct arm_smmu_device *smmu = dev;
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
- for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+ for (inst = 0; inst < nvidia->num_instances; inst++) {
irqreturn_t irq_ret;
irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst);
@@ -202,11 +223,13 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
struct arm_smmu_device *smmu;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain;
+ struct nvidia_smmu *nvidia;
smmu_domain = container_of(domain, struct arm_smmu_domain, domain);
smmu = smmu_domain->smmu;
+ nvidia = to_nvidia_smmu(smmu);
- for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) {
+ for (inst = 0; inst < nvidia->num_instances; inst++) {
irqreturn_t irq_ret;
/*
@@ -224,6 +247,17 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev)
return ret;
}
+static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct device *dev)
+{
+ struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu);
+ int err;
+
+ err = tegra_mc_probe_device(nvidia->mc, dev);
+ if (err < 0)
+ dev_err(smmu->dev, "memory controller probe failed for %s: %d\n",
+ dev_name(dev), err);
+}
+
static const struct arm_smmu_impl nvidia_smmu_impl = {
.read_reg = nvidia_smmu_read_reg,
.write_reg = nvidia_smmu_write_reg,
@@ -233,6 +267,11 @@ static const struct arm_smmu_impl nvidia_smmu_impl = {
.tlb_sync = nvidia_smmu_tlb_sync,
.global_fault = nvidia_smmu_global_fault,
.context_fault = nvidia_smmu_context_fault,
+ .probe_finalize = nvidia_smmu_probe_finalize,
+};
+
+static const struct arm_smmu_impl nvidia_smmu_single_impl = {
+ .probe_finalize = nvidia_smmu_probe_finalize,
};
struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
@@ -241,23 +280,36 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu)
struct device *dev = smmu->dev;
struct nvidia_smmu *nvidia_smmu;
struct platform_device *pdev = to_platform_device(dev);
+ unsigned int i;
nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL);
if (!nvidia_smmu)
return ERR_PTR(-ENOMEM);
+ nvidia_smmu->mc = devm_tegra_memory_controller_get(dev);
+ if (IS_ERR(nvidia_smmu->mc))
+ return ERR_CAST(nvidia_smmu->mc);
+
/* Instance 0 is ioremapped by arm-smmu.c. */
nvidia_smmu->bases[0] = smmu->base;
+ nvidia_smmu->num_instances++;
+
+ for (i = 1; i < MAX_SMMU_INSTANCES; i++) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return ERR_PTR(-ENODEV);
+ nvidia_smmu->bases[i] = devm_ioremap_resource(dev, res);
+ if (IS_ERR(nvidia_smmu->bases[i]))
+ return ERR_CAST(nvidia_smmu->bases[i]);
- nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res);
- if (IS_ERR(nvidia_smmu->bases[1]))
- return ERR_CAST(nvidia_smmu->bases[1]);
+ nvidia_smmu->num_instances++;
+ }
- nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
+ if (nvidia_smmu->num_instances == 1)
+ nvidia_smmu->smmu.impl = &nvidia_smmu_single_impl;
+ else
+ nvidia_smmu->smmu.impl = &nvidia_smmu_impl;
return &nvidia_smmu->smmu;
}