aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds2018-04-05 21:21:08 -0700
committerLinus Torvalds2018-04-05 21:21:08 -0700
commit167569343fac74ec6825a3ab982f795b5880e63e (patch)
tree965adb59fbe10d9f45a7fb90cb1ec1bc18d4613c /drivers
parentb240b419db5d624ce7a5a397d6f62a1a686009ec (diff)
parentcd903711fd9dce808b5cc07e509135886d962b0c (diff)
Merge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC platform updates from Arnd Bergmann: "This release brings up a new platform based on the old ARM9 core: the Nuvoton NPCM is used as a baseboard management controller, competing with the better known ASpeed AST2xx series. Another important change is the addition of ARMv7-A based chips in mach-stm32. The older parts in this platform are ARMv7-M based microcontrollers, now they are expanding to general-purpose workloads. The other changes are the usual defconfig updates to enable additional drivers, lesser bugfixes. The largest updates as often are the ongoing OMAP cleanups, but we also have a number of changes for the older PXA and davinci platforms this time. For the Renesas shmobile/r-car platform, some new infrastructure is needed to make the watchdog work correctly. Supporting Multiprocessing on Allwinner A80 required a significant amount of new code, but is not doing anything unexpected" * tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (179 commits) arm: npcm: modify configuration for the NPCM7xx BMC. MAINTAINERS: update entry for ARM/berlin ARM: omap2: fix am43xx build without L2X0 ARM: davinci: da8xx: simplify CFGCHIP regmap_config ARM: davinci: da8xx: fix oops in USB PHY driver due to stack allocated platform_data ARM: multi_v7_defconfig: add NXP FlexCAN IP support ARM: multi_v7_defconfig: enable thermal driver for i.MX devices ARM: multi_v7_defconfig: add RN5T618 PMIC family support ARM: multi_v7_defconfig: add NXP graphics drivers ARM: multi_v7_defconfig: add GPMI NAND controller support ARM: multi_v7_defconfig: add OCOTP driver for NXP SoCs ARM: multi_v7_defconfig: configure I2C driver built-in arm64: defconfig: add CONFIG_UNIPHIER_THERMAL and CONFIG_SNI_AVE ARM: imx: fix imx6sll-only build ARM: imx: select ARM_CPU_SUSPEND for CPU_IDLE as well ARM: mxs_defconfig: Re-sync defconfig ARM: imx_v4_v5_defconfig: Use the generic fsl-asoc-card driver ARM: imx_v4_v5_defconfig: Re-sync defconfig arm64: defconfig: enable stmmac ethernet to defconfig ARM: EXYNOS: Simplify code in coupled CPU idle hot path ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/bus/ti-sysc.c526
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/timer-ti-dm.c1000
-rw-r--r--drivers/mtd/nand/Kconfig11
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/marvell_nand.c3
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2105
-rw-r--r--drivers/phy/ti/phy-da8xx-usb.c16
-rw-r--r--drivers/power/avs/smartreflex.c41
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c68
-rw-r--r--drivers/soc/renesas/Kconfig14
-rw-r--r--drivers/soc/renesas/Makefile2
-rw-r--r--drivers/soc/renesas/r8a77965-sysc.c37
-rw-r--r--drivers/soc/renesas/r8a77970-sysc.c12
-rw-r--r--drivers/soc/renesas/r8a77980-sysc.c52
-rw-r--r--drivers/soc/renesas/rcar-rst.c37
-rw-r--r--drivers/soc/renesas/rcar-sysc.c8
-rw-r--r--drivers/soc/renesas/rcar-sysc.h2
-rw-r--r--drivers/soc/renesas/renesas-soc.c16
-rw-r--r--drivers/soc/ti/Kconfig9
-rw-r--r--drivers/soc/ti/Makefile1
-rw-r--r--drivers/soc/ti/pm33xx.c349
23 files changed, 2107 insertions, 2207 deletions
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index cdaeeea7999c..7cd2fd04b212 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -13,22 +13,20 @@
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/delay.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/slab.h>
+
#include <linux/platform_data/ti-sysc.h>
#include <dt-bindings/bus/ti-sysc.h>
-enum sysc_registers {
- SYSC_REVISION,
- SYSC_SYSCONFIG,
- SYSC_SYSSTATUS,
- SYSC_MAX_REGS,
-};
-
static const char * const reg_names[] = { "rev", "sysc", "syss", };
enum sysc_clocks {
@@ -55,6 +53,7 @@ static const char * const clock_names[] = { "fck", "ick", };
* @cfg: interconnect target module configuration
* @name: name if available
* @revision: interconnect target module revision
+ * @needs_resume: runtime resume needed on resume from suspend
*/
struct sysc {
struct device *dev;
@@ -66,8 +65,13 @@ struct sysc {
const char *legacy_mode;
const struct sysc_capabilities *cap;
struct sysc_config cfg;
+ struct ti_sysc_cookie cookie;
const char *name;
u32 revision;
+ bool enabled;
+ bool needs_resume;
+ bool child_needs_resume;
+ struct delayed_work idle_work;
};
static u32 sysc_read(struct sysc *ddata, int offset)
@@ -136,9 +140,6 @@ static int sysc_get_clocks(struct sysc *ddata)
{
int i, error;
- if (ddata->legacy_mode)
- return 0;
-
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
error = sysc_get_one_clock(ddata, i);
if (error && error != -ENOENT)
@@ -197,12 +198,53 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
ddata->module_pa = of_translate_address(np, ranges++);
ddata->module_size = be32_to_cpup(ranges);
- dev_dbg(ddata->dev, "interconnect target 0x%llx size 0x%x for %pOF\n",
- ddata->module_pa, ddata->module_size, np);
-
return 0;
}
+static struct device_node *stdout_path;
+
+static void sysc_init_stdout_path(struct sysc *ddata)
+{
+ struct device_node *np = NULL;
+ const char *uart;
+
+ if (IS_ERR(stdout_path))
+ return;
+
+ if (stdout_path)
+ return;
+
+ np = of_find_node_by_path("/chosen");
+ if (!np)
+ goto err;
+
+ uart = of_get_property(np, "stdout-path", NULL);
+ if (!uart)
+ goto err;
+
+ np = of_find_node_by_path(uart);
+ if (!np)
+ goto err;
+
+ stdout_path = np;
+
+ return;
+
+err:
+ stdout_path = ERR_PTR(-ENODEV);
+}
+
+static void sysc_check_quirk_stdout(struct sysc *ddata,
+ struct device_node *np)
+{
+ sysc_init_stdout_path(ddata);
+ if (np != stdout_path)
+ return;
+
+ ddata->cfg.quirks |= SYSC_QUIRK_NO_IDLE_ON_INIT |
+ SYSC_QUIRK_NO_RESET_ON_INIT;
+}
+
/**
* sysc_check_one_child - check child configuration
* @ddata: device driver data
@@ -221,6 +263,8 @@ static int sysc_check_one_child(struct sysc *ddata,
if (name)
dev_warn(ddata->dev, "really a child ti,hwmods property?");
+ sysc_check_quirk_stdout(ddata, np);
+
return 0;
}
@@ -246,11 +290,8 @@ static int sysc_check_children(struct sysc *ddata)
*/
static void sysc_check_quirk_16bit(struct sysc *ddata, struct resource *res)
{
- if (resource_size(res) == 8) {
- dev_dbg(ddata->dev,
- "enabling 16-bit and clockactivity quirks\n");
+ if (resource_size(res) == 8)
ddata->cfg.quirks |= SYSC_QUIRK_16BIT | SYSC_QUIRK_USE_CLOCKACT;
- }
}
/**
@@ -276,7 +317,6 @@ static int sysc_parse_one(struct sysc *ddata, enum sysc_registers reg)
res = platform_get_resource_byname(to_platform_device(ddata->dev),
IORESOURCE_MEM, name);
if (!res) {
- dev_dbg(ddata->dev, "has no %s register\n", name);
ddata->offsets[reg] = -ENODEV;
return 0;
@@ -437,6 +477,14 @@ static int sysc_show_reg(struct sysc *ddata,
return sprintf(bufp, ":%x", ddata->offsets[reg]);
}
+static int sysc_show_name(char *bufp, struct sysc *ddata)
+{
+ if (!ddata->name)
+ return 0;
+
+ return sprintf(bufp, ":%s", ddata->name);
+}
+
/**
* sysc_show_registers - show information about interconnect target module
* @ddata: device driver data
@@ -451,6 +499,7 @@ static void sysc_show_registers(struct sysc *ddata)
bufp += sysc_show_reg(ddata, bufp, i);
bufp += sysc_show_rev(bufp, ddata);
+ bufp += sysc_show_name(bufp, ddata);
dev_dbg(ddata->dev, "%llx:%x%s\n",
ddata->module_pa, ddata->module_size,
@@ -459,33 +508,70 @@ static void sysc_show_registers(struct sysc *ddata)
static int __maybe_unused sysc_runtime_suspend(struct device *dev)
{
+ struct ti_sysc_platform_data *pdata;
struct sysc *ddata;
- int i;
+ int error = 0, i;
ddata = dev_get_drvdata(dev);
- if (ddata->legacy_mode)
+ if (!ddata->enabled)
return 0;
+ if (ddata->legacy_mode) {
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->idle_module)
+ return -ENODEV;
+
+ error = pdata->idle_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not idle: %i\n",
+ __func__, error);
+
+ goto idled;
+ }
+
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
if (IS_ERR_OR_NULL(ddata->clocks[i]))
continue;
clk_disable(ddata->clocks[i]);
}
- return 0;
+idled:
+ ddata->enabled = false;
+
+ return error;
}
static int __maybe_unused sysc_runtime_resume(struct device *dev)
{
+ struct ti_sysc_platform_data *pdata;
struct sysc *ddata;
- int i, error;
+ int error = 0, i;
ddata = dev_get_drvdata(dev);
- if (ddata->legacy_mode)
+ if (ddata->enabled)
return 0;
+ if (ddata->legacy_mode) {
+ pdata = dev_get_platdata(ddata->dev);
+ if (!pdata)
+ return 0;
+
+ if (!pdata->enable_module)
+ return -ENODEV;
+
+ error = pdata->enable_module(dev, &ddata->cookie);
+ if (error)
+ dev_err(dev, "%s: could not enable: %i\n",
+ __func__, error);
+
+ goto awake;
+ }
+
for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
if (IS_ERR_OR_NULL(ddata->clocks[i]))
continue;
@@ -494,20 +580,136 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev)
return error;
}
+awake:
+ ddata->enabled = true;
+
+ return error;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sysc_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+
+ ddata = dev_get_drvdata(dev);
+
+ if (!ddata->enabled)
+ return 0;
+
+ ddata->needs_resume = true;
+
+ return sysc_runtime_suspend(dev);
+}
+
+static int sysc_resume(struct device *dev)
+{
+ struct sysc *ddata;
+
+ ddata = dev_get_drvdata(dev);
+ if (ddata->needs_resume) {
+ ddata->needs_resume = false;
+
+ return sysc_runtime_resume(dev);
+ }
+
return 0;
}
+#endif
static const struct dev_pm_ops sysc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sysc_suspend, sysc_resume)
SET_RUNTIME_PM_OPS(sysc_runtime_suspend,
sysc_runtime_resume,
NULL)
};
+/* Module revision register based quirks */
+struct sysc_revision_quirk {
+ const char *name;
+ u32 base;
+ int rev_offset;
+ int sysc_offset;
+ int syss_offset;
+ u32 revision;
+ u32 revision_mask;
+ u32 quirks;
+};
+
+#define SYSC_QUIRK(optname, optbase, optrev, optsysc, optsyss, \
+ optrev_val, optrevmask, optquirkmask) \
+ { \
+ .name = (optname), \
+ .base = (optbase), \
+ .rev_offset = (optrev), \
+ .sysc_offset = (optsysc), \
+ .syss_offset = (optsyss), \
+ .revision = (optrev_val), \
+ .revision_mask = (optrevmask), \
+ .quirks = (optquirkmask), \
+ }
+
+static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ /* These drivers need to be fixed to not use pm_runtime_irq_safe() */
+ SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("mmu", 0, 0, 0x10, 0x14, 0x00000030, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+};
+
+static void sysc_init_revision_quirks(struct sysc *ddata)
+{
+ const struct sysc_revision_quirk *q;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sysc_revision_quirks); i++) {
+ q = &sysc_revision_quirks[i];
+
+ if (q->base && q->base != ddata->module_pa)
+ continue;
+
+ if (q->rev_offset >= 0 &&
+ q->rev_offset != ddata->offsets[SYSC_REVISION])
+ continue;
+
+ if (q->sysc_offset >= 0 &&
+ q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
+ continue;
+
+ if (q->syss_offset >= 0 &&
+ q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
+ continue;
+
+ if (q->revision == ddata->revision ||
+ (q->revision & q->revision_mask) ==
+ (ddata->revision & q->revision_mask)) {
+ ddata->name = q->name;
+ ddata->cfg.quirks |= q->quirks;
+ }
+ }
+}
+
/* At this point the module is configured enough to read the revision */
static int sysc_init_module(struct sysc *ddata)
{
int error;
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE_ON_INIT) {
+ ddata->revision = sysc_read_revision(ddata);
+ goto rev_quirks;
+ }
+
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
pm_runtime_put_noidle(ddata->dev);
@@ -517,6 +719,9 @@ static int sysc_init_module(struct sysc *ddata)
ddata->revision = sysc_read_revision(ddata);
pm_runtime_put_sync(ddata->dev);
+rev_quirks:
+ sysc_init_revision_quirks(ddata);
+
return 0;
}
@@ -605,6 +810,196 @@ static int sysc_init_syss_mask(struct sysc *ddata)
return 0;
}
+/*
+ * Many child device drivers need to have fck available to get the clock
+ * rate for device internal configuration.
+ */
+static int sysc_child_add_fck(struct sysc *ddata,
+ struct device *child)
+{
+ struct clk *fck;
+ struct clk_lookup *l;
+ const char *name = clock_names[SYSC_FCK];
+
+ if (IS_ERR_OR_NULL(ddata->clocks[SYSC_FCK]))
+ return 0;
+
+ fck = clk_get(child, name);
+ if (!IS_ERR(fck)) {
+ clk_put(fck);
+
+ return -EEXIST;
+ }
+
+ l = clkdev_create(ddata->clocks[SYSC_FCK], name, dev_name(child));
+
+ return l ? 0 : -ENODEV;
+}
+
+static struct device_type sysc_device_type = {
+};
+
+static struct sysc *sysc_child_to_parent(struct device *dev)
+{
+ struct device *parent = dev->parent;
+
+ if (!parent || parent->type != &sysc_device_type)
+ return NULL;
+
+ return dev_get_drvdata(parent);
+}
+
+static int __maybe_unused sysc_child_runtime_suspend(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ error = pm_generic_runtime_suspend(dev);
+ if (error)
+ return error;
+
+ if (!ddata->enabled)
+ return 0;
+
+ return sysc_runtime_suspend(ddata->dev);
+}
+
+static int __maybe_unused sysc_child_runtime_resume(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ if (!ddata->enabled) {
+ error = sysc_runtime_resume(ddata->dev);
+ if (error < 0)
+ dev_err(ddata->dev,
+ "%s error: %i\n", __func__, error);
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sysc_child_suspend_noirq(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ error = pm_generic_suspend_noirq(dev);
+ if (error)
+ return error;
+
+ if (!pm_runtime_status_suspended(dev)) {
+ error = pm_generic_runtime_suspend(dev);
+ if (error)
+ return error;
+
+ error = sysc_runtime_suspend(ddata->dev);
+ if (error)
+ return error;
+
+ ddata->child_needs_resume = true;
+ }
+
+ return 0;
+}
+
+static int sysc_child_resume_noirq(struct device *dev)
+{
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+
+ if (ddata->child_needs_resume) {
+ ddata->child_needs_resume = false;
+
+ error = sysc_runtime_resume(ddata->dev);
+ if (error)
+ dev_err(ddata->dev,
+ "%s runtime resume error: %i\n",
+ __func__, error);
+
+ error = pm_generic_runtime_resume(dev);
+ if (error)
+ dev_err(ddata->dev,
+ "%s generic runtime resume: %i\n",
+ __func__, error);
+ }
+
+ return pm_generic_resume_noirq(dev);
+}
+#endif
+
+struct dev_pm_domain sysc_child_pm_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend,
+ sysc_child_runtime_resume,
+ NULL)
+ USE_PLATFORM_PM_SLEEP_OPS
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq,
+ sysc_child_resume_noirq)
+ }
+};
+
+/**
+ * sysc_legacy_idle_quirk - handle children in omap_device compatible way
+ * @ddata: device driver data
+ * @child: child device driver
+ *
+ * Allow idle for child devices as done with _od_runtime_suspend().
+ * Otherwise many child devices will not idle because of the permanent
+ * parent usecount set in pm_runtime_irq_safe().
+ *
+ * Note that the long term solution is to just modify the child device
+ * drivers to not set pm_runtime_irq_safe() and then this can be just
+ * dropped.
+ */
+static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
+{
+ if (!ddata->legacy_mode)
+ return;
+
+ if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+ dev_pm_domain_set(child, &sysc_child_pm_domain);
+}
+
+static int sysc_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *device)
+{
+ struct device *dev = device;
+ struct sysc *ddata;
+ int error;
+
+ ddata = sysc_child_to_parent(dev);
+ if (!ddata)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ error = sysc_child_add_fck(ddata, dev);
+ if (error && error != -EEXIST)
+ dev_warn(ddata->dev, "could not add %s fck: %i\n",
+ dev_name(dev), error);
+ sysc_legacy_idle_quirk(ddata, dev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sysc_nb = {
+ .notifier_call = sysc_notifier_call,
+};
+
/* Device tree configured quirks */
struct sysc_dts_quirk {
const char *name;
@@ -797,7 +1192,8 @@ static const struct sysc_capabilities sysc_34xx_sr = {
.type = TI_SYSC_OMAP34XX_SR,
.sysc_mask = SYSC_OMAP2_CLOCKACTIVITY,
.regbits = &sysc_regbits_omap34xx_sr,
- .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED,
+ .mod_quirks = SYSC_QUIRK_USE_CLOCKACT | SYSC_QUIRK_UNCACHED |
+ SYSC_QUIRK_LEGACY_IDLE,
};
/*
@@ -818,12 +1214,13 @@ static const struct sysc_capabilities sysc_36xx_sr = {
.type = TI_SYSC_OMAP36XX_SR,
.sysc_mask = SYSC_OMAP3_SR_ENAWAKEUP,
.regbits = &sysc_regbits_omap36xx_sr,
- .mod_quirks = SYSC_QUIRK_UNCACHED,
+ .mod_quirks = SYSC_QUIRK_UNCACHED | SYSC_QUIRK_LEGACY_IDLE,
};
static const struct sysc_capabilities sysc_omap4_sr = {
.type = TI_SYSC_OMAP4_SR,
.regbits = &sysc_regbits_omap36xx_sr,
+ .mod_quirks = SYSC_QUIRK_LEGACY_IDLE,
};
/*
@@ -865,6 +1262,33 @@ static const struct sysc_capabilities sysc_omap4_usb_host_fs = {
.regbits = &sysc_regbits_omap4_usb_host_fs,
};
+static int sysc_init_pdata(struct sysc *ddata)
+{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
+ struct ti_sysc_module_data mdata;
+ int error = 0;
+
+ if (!pdata || !ddata->legacy_mode)
+ return 0;
+
+ mdata.name = ddata->legacy_mode;
+ mdata.module_pa = ddata->module_pa;
+ mdata.module_size = ddata->module_size;
+ mdata.offsets = ddata->offsets;
+ mdata.nr_offsets = SYSC_MAX_REGS;
+ mdata.cap = ddata->cap;
+ mdata.cfg = &ddata->cfg;
+
+ if (!pdata->init_module)
+ return -ENODEV;
+
+ error = pdata->init_module(ddata->dev, &mdata, &ddata->cookie);
+ if (error == -EEXIST)
+ error = 0;
+
+ return error;
+}
+
static int sysc_init_match(struct sysc *ddata)
{
const struct sysc_capabilities *cap;
@@ -880,8 +1304,19 @@ static int sysc_init_match(struct sysc *ddata)
return 0;
}
+static void ti_sysc_idle(struct work_struct *work)
+{
+ struct sysc *ddata;
+
+ ddata = container_of(work, struct sysc, idle_work.work);
+
+ if (pm_runtime_active(ddata->dev))
+ pm_runtime_put_sync(ddata->dev);
+}
+
static int sysc_probe(struct platform_device *pdev)
{
+ struct ti_sysc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct sysc *ddata;
int error;
@@ -920,6 +1355,10 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
goto unprepare;
+ error = sysc_init_pdata(ddata);
+ if (error)
+ goto unprepare;
+
pm_runtime_enable(ddata->dev);
error = sysc_init_module(ddata);
@@ -933,22 +1372,28 @@ static int sysc_probe(struct platform_device *pdev)
goto unprepare;
}
- pm_runtime_use_autosuspend(ddata->dev);
-
sysc_show_registers(ddata);
+ ddata->dev->type = &sysc_device_type;
error = of_platform_populate(ddata->dev->of_node,
- NULL, NULL, ddata->dev);
+ NULL, pdata ? pdata->auxdata : NULL,
+ ddata->dev);
if (error)
goto err;
- pm_runtime_mark_last_busy(ddata->dev);
- pm_runtime_put_autosuspend(ddata->dev);
+ INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
+
+ /* At least earlycon won't survive without deferred idle */
+ if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE_ON_INIT |
+ SYSC_QUIRK_NO_RESET_ON_INIT)) {
+ schedule_delayed_work(&ddata->idle_work, 3000);
+ } else {
+ pm_runtime_put(&pdev->dev);
+ }
return 0;
err:
- pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
unprepare:
@@ -962,6 +1407,8 @@ static int sysc_remove(struct platform_device *pdev)
struct sysc *ddata = platform_get_drvdata(pdev);
int error;
+ cancel_delayed_work_sync(&ddata->idle_work);
+
error = pm_runtime_get_sync(ddata->dev);
if (error < 0) {
pm_runtime_put_noidle(ddata->dev);
@@ -971,7 +1418,6 @@ static int sysc_remove(struct platform_device *pdev)
of_platform_depopulate(&pdev->dev);
- pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -1008,7 +1454,21 @@ static struct platform_driver sysc_driver = {
.pm = &sysc_pm_ops,
},
};
-module_platform_driver(sysc_driver);
+
+static int __init sysc_init(void)
+{
+ bus_register_notifier(&platform_bus_type, &sysc_nb);
+
+ return platform_driver_register(&sysc_driver);
+}
+module_init(sysc_init);
+
+static void __exit sysc_exit(void)
+{
+ bus_unregister_notifier(&platform_bus_type, &sysc_nb);
+ platform_driver_unregister(&sysc_driver);
+}
+module_exit(sysc_exit);
MODULE_DESCRIPTION("TI sysc interconnect target driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 6021a5af21da..9ee2888275c1 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -21,6 +21,9 @@ config CLKEVT_I8253
config I8253_LOCK
bool
+config OMAP_DM_TIMER
+ bool
+
config CLKBLD_I8253
def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index f0cb07637a65..e8e76dfef00b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DIGICOLOR_TIMER) += timer-digicolor.o
+obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
new file mode 100644
index 000000000000..4cce6b224b87
--- /dev/null
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -0,0 +1,1000 @@
+/*
+ * linux/arch/arm/plat-omap/dmtimer.c
+ *
+ * OMAP Dual-Mode Timers
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * dmtimer adaptation to platform_driver.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * OMAP2 support by Juha Yrjola
+ * API improvements and OMAP2 clock framework support by Timo Teras
+ *
+ * Copyright (C) 2009 Texas Instruments
+ * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/dmtimer-omap.h>
+
+#include <clocksource/timer-ti-dm.h>
+
+static u32 omap_reserved_systimers;
+static LIST_HEAD(omap_timer_list);
+static DEFINE_SPINLOCK(dm_timer_lock);
+
+enum {
+ REQUEST_ANY = 0,
+ REQUEST_BY_ID,
+ REQUEST_BY_CAP,
+ REQUEST_BY_NODE,
+};
+
+/**
+ * omap_dm_timer_read_reg - read timer registers in posted and non-posted mode
+ * @timer: timer pointer over which read operation to perform
+ * @reg: lowest byte holds the register offset
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode write
+ * pending bit must be checked. Otherwise a read of a non completed write
+ * will produce an error.
+ */
+static inline u32 omap_dm_timer_read_reg(struct omap_dm_timer *timer, u32 reg)
+{
+ WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
+ return __omap_dm_timer_read(timer, reg, timer->posted);
+}
+
+/**
+ * omap_dm_timer_write_reg - write timer registers in posted and non-posted mode
+ * @timer: timer pointer over which write operation is to perform
+ * @reg: lowest byte holds the register offset
+ * @value: data to write into the register
+ *
+ * The posted mode bit is encoded in reg. Note that in posted mode the write
+ * pending bit must be checked. Otherwise a write on a register which has a
+ * pending write will be lost.
+ */
+static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
+ u32 value)
+{
+ WARN_ON((reg & 0xff) < _OMAP_TIMER_WAKEUP_EN_OFFSET);
+ __omap_dm_timer_write(timer, reg, value, timer->posted);
+}
+
+static void omap_timer_restore_context(struct omap_dm_timer *timer)
+{
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
+ timer->context.twer);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
+ timer->context.tcrr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG,
+ timer->context.tldr);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG,
+ timer->context.tmar);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG,
+ timer->context.tsicr);
+ writel_relaxed(timer->context.tier, timer->irq_ena);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG,
+ timer->context.tclr);
+}
+
+static int omap_dm_timer_reset(struct omap_dm_timer *timer)
+{
+ u32 l, timeout = 100000;
+
+ if (timer->revision != 1)
+ return -EINVAL;
+
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_IF_CTRL_REG, 0x06);
+
+ do {
+ l = __omap_dm_timer_read(timer,
+ OMAP_TIMER_V1_SYS_STAT_OFFSET, 0);
+ } while (!l && timeout--);
+
+ if (!timeout) {
+ dev_err(&timer->pdev->dev, "Timer failed to reset\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Configure timer for smart-idle mode */
+ l = __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
+ l |= 0x2 << 0x3;
+ __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET, l, 0);
+
+ timer->posted = 0;
+
+ return 0;
+}
+
+static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer)
+{
+ int ret;
+ struct clk *parent;
+
+ /*
+ * FIXME: OMAP1 devices do not use the clock framework for dmtimers so
+ * do not call clk_get() for these devices.
+ */
+ if (!timer->fclk)
+ return -ENODEV;
+
+ parent = clk_get(&timer->pdev->dev, NULL);
+ if (IS_ERR(parent))
+ return -ENODEV;
+
+ ret = clk_set_parent(timer->fclk, parent);
+ if (ret < 0)
+ pr_err("%s: failed to set parent\n", __func__);
+
+ clk_put(parent);
+
+ return ret;
+}
+
+static int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
+{
+ int ret;
+ const char *parent_name;
+ struct clk *parent;
+ struct dmtimer_platform_data *pdata;
+
+ if (unlikely(!timer) || IS_ERR(timer->fclk))
+ return -EINVAL;
+
+ switch (source) {
+ case OMAP_TIMER_SRC_SYS_CLK:
+ parent_name = "timer_sys_ck";
+ break;
+ case OMAP_TIMER_SRC_32_KHZ:
+ parent_name = "timer_32k_ck";
+ break;
+ case OMAP_TIMER_SRC_EXT_CLK:
+ parent_name = "timer_ext_ck";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pdata = timer->pdev->dev.platform_data;
+
+ /*
+ * FIXME: Used for OMAP1 devices only because they do not currently
+ * use the clock framework to set the parent clock. To be removed
+ * once OMAP1 migrated to using clock framework for dmtimers
+ */
+ if (pdata && pdata->set_timer_src)
+ return pdata->set_timer_src(timer->pdev, source);
+
+#if defined(CONFIG_COMMON_CLK)
+ /* Check if the clock has configurable parents */
+ if (clk_hw_get_num_parents(__clk_get_hw(timer->fclk)) < 2)
+ return 0;
+#endif
+
+ parent = clk_get(&timer->pdev->dev, parent_name);
+ if (IS_ERR(parent)) {
+ pr_err("%s: %s not found\n", __func__, parent_name);
+ return -EINVAL;
+ }
+
+ ret = clk_set_parent(timer->fclk, parent);
+ if (ret < 0)
+ pr_err("%s: failed to set %s as parent\n", __func__,
+ parent_name);
+
+ clk_put(parent);
+
+ return ret;
+}
+
+static void omap_dm_timer_enable(struct omap_dm_timer *timer)
+{
+ int c;
+
+ pm_runtime_get_sync(&timer->pdev->dev);
+
+ if (!(timer->capability & OMAP_TIMER_ALWON)) {
+ if (timer->get_context_loss_count) {
+ c = timer->get_context_loss_count(&timer->pdev->dev);
+ if (c != timer->ctx_loss_count) {
+ omap_timer_restore_context(timer);
+ timer->ctx_loss_count = c;
+ }
+ } else {
+ omap_timer_restore_context(timer);
+ }
+ }
+}
+
+static void omap_dm_timer_disable(struct omap_dm_timer *timer)
+{
+ pm_runtime_put_sync(&timer->pdev->dev);
+}
+
+static int omap_dm_timer_prepare(struct omap_dm_timer *timer)
+{
+ int rc;
+
+ /*
+ * FIXME: OMAP1 devices do not use the clock framework for dmtimers so
+ * do not call clk_get() for these devices.
+ */
+ if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
+ timer->fclk = clk_get(&timer->pdev->dev, "fck");
+ if (WARN_ON_ONCE(IS_ERR(timer->fclk))) {
+ dev_err(&timer->pdev->dev, ": No fclk handle.\n");
+ return -EINVAL;
+ }
+ }
+
+ omap_dm_timer_enable(timer);
+
+ if (timer->capability & OMAP_TIMER_NEEDS_RESET) {
+ rc = omap_dm_timer_reset(timer);
+ if (rc) {
+ omap_dm_timer_disable(timer);
+ return rc;
+ }
+ }
+
+ __omap_dm_timer_enable_posted(timer);
+ omap_dm_timer_disable(timer);
+
+ rc = omap_dm_timer_of_set_source(timer);
+ if (rc == -ENODEV)
+ return omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
+
+ return rc;
+}
+
+static inline u32 omap_dm_timer_reserved_systimer(int id)
+{
+ return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
+}
+
+int omap_dm_timer_reserve_systimer(int id)
+{
+ if (omap_dm_timer_reserved_systimer(id))
+ return -ENODEV;
+
+ omap_reserved_systimers |= (1 << (id - 1));
+
+ return 0;
+}
+
+static struct omap_dm_timer *_omap_dm_timer_request(int req_type, void *data)
+{
+ struct omap_dm_timer *timer = NULL, *t;
+ struct device_node *np = NULL;
+ unsigned long flags;
+ u32 cap = 0;
+ int id = 0;
+
+ switch (req_type) {
+ case REQUEST_BY_ID:
+ id = *(int *)data;
+ break;
+ case REQUEST_BY_CAP:
+ cap = *(u32 *)data;
+ break;
+ case REQUEST_BY_NODE:
+ np = (struct device_node *)data;
+ break;
+ default:
+ /* REQUEST_ANY */
+ break;
+ }
+
+ spin_lock_irqsave(&dm_timer_lock, flags);
+ list_for_each_entry(t, &omap_timer_list, node) {
+ if (t->reserved)
+ continue;
+
+ switch (req_type) {
+ case REQUEST_BY_ID:
+ if (id == t->pdev->id) {
+ timer = t;
+ timer->reserved = 1;
+ goto found;
+ }
+ break;
+ case REQUEST_BY_CAP:
+ if (cap == (t->capability & cap)) {
+ /*
+ * If timer is not NULL, we have already found
+ * one timer. But it was not an exact match
+ * because it had more capabilities than what
+ * was required. Therefore, unreserve the last
+ * timer found and see if this one is a better
+ * match.
+ */
+ if (timer)
+ timer->reserved = 0;
+ timer = t;
+ timer->reserved = 1;
+
+ /* Exit loop early if we find an exact match */
+ if (t->capability == cap)
+ goto found;
+ }
+ break;
+ case REQUEST_BY_NODE:
+ if (np == t->pdev->dev.of_node) {
+ timer = t;
+ timer->reserved = 1;
+ goto found;
+ }
+ break;
+ default:
+ /* REQUEST_ANY */
+ timer = t;
+ timer->reserved = 1;
+ goto found;
+ }
+ }
+found:
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
+
+ if (timer && omap_dm_timer_prepare(timer)) {
+ timer->reserved = 0;
+ timer = NULL;
+ }
+
+ if (!timer)
+ pr_debug("%s: timer request failed!\n", __func__);
+
+ return timer;
+}
+
+static struct omap_dm_timer *omap_dm_timer_request(void)
+{
+ return _omap_dm_timer_request(REQUEST_ANY, NULL);
+}
+
+static struct omap_dm_timer *omap_dm_timer_request_specific(int id)
+{
+ /* Requesting timer by ID is not supported when device tree is used */
+ if (of_have_populated_dt()) {
+ pr_warn("%s: Please use omap_dm_timer_request_by_node()\n",
+ __func__);
+ return NULL;
+ }
+
+ return _omap_dm_timer_request(REQUEST_BY_ID, &id);
+}
+
+/**
+ * omap_dm_timer_request_by_cap - Request a timer by capability
+ * @cap: Bit mask of capabilities to match
+ *
+ * Find a timer based upon capabilities bit mask. Callers of this function
+ * should use the definitions found in the plat/dmtimer.h file under the
+ * comment "timer capabilities used in hwmod database". Returns pointer to
+ * timer handle on success and a NULL pointer on failure.
+ */
+struct omap_dm_timer *omap_dm_timer_request_by_cap(u32 cap)
+{
+ return _omap_dm_timer_request(REQUEST_BY_CAP, &cap);
+}
+
+/**
+ * omap_dm_timer_request_by_node - Request a timer by device-tree node
+ * @np: Pointer to device-tree timer node
+ *
+ * Request a timer based upon a device node pointer. Returns pointer to
+ * timer handle on success and a NULL pointer on failure.
+ */
+static struct omap_dm_timer *omap_dm_timer_request_by_node(struct device_node *np)
+{
+ if (!np)
+ return NULL;
+
+ return _omap_dm_timer_request(REQUEST_BY_NODE, np);
+}
+
+static int omap_dm_timer_free(struct omap_dm_timer *timer)
+{
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ clk_put(timer->fclk);
+
+ WARN_ON(!timer->reserved);
+ timer->reserved = 0;
+ return 0;
+}
+
+int omap_dm_timer_get_irq(struct omap_dm_timer *timer)
+{
+ if (timer)
+ return timer->irq;
+ return -EINVAL;
+}
+
+#if defined(CONFIG_ARCH_OMAP1)
+#include <mach/hardware.h>
+
+static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
+{
+ return NULL;
+}
+
+/**
+ * omap_dm_timer_modify_idlect_mask - Check if any running timers use ARMXOR
+ * @inputmask: current value of idlect mask
+ */
+__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
+{
+ int i = 0;
+ struct omap_dm_timer *timer = NULL;
+ unsigned long flags;
+
+ /* If ARMXOR cannot be idled this function call is unnecessary */
+ if (!(inputmask & (1 << 1)))
+ return inputmask;
+
+ /* If any active timer is using ARMXOR return modified mask */
+ spin_lock_irqsave(&dm_timer_lock, flags);
+ list_for_each_entry(timer, &omap_timer_list, node) {
+ u32 l;
+
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ if (l & OMAP_TIMER_CTRL_ST) {
+ if (((omap_readl(MOD_CONF_CTRL_1) >> (i * 2)) & 0x03) == 0)
+ inputmask &= ~(1 << 1);
+ else
+ inputmask &= ~(1 << 2);
+ }
+ i++;
+ }
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
+
+ return inputmask;
+}
+
+#else
+
+static struct clk *omap_dm_timer_get_fclk(struct omap_dm_timer *timer)
+{
+ if (timer && !IS_ERR(timer->fclk))
+ return timer->fclk;
+ return NULL;
+}
+
+__u32 omap_dm_timer_modify_idlect_mask(__u32 inputmask)
+{
+ BUG();
+
+ return 0;
+}
+
+#endif
+
+int omap_dm_timer_trigger(struct omap_dm_timer *timer)
+{
+ if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
+ pr_err("%s: timer not available or enabled.\n", __func__);
+ return -EINVAL;
+ }
+
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+ return 0;
+}
+
+static int omap_dm_timer_start(struct omap_dm_timer *timer)
+{
+ u32 l;
+
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ omap_dm_timer_enable(timer);
+
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ if (!(l & OMAP_TIMER_CTRL_ST)) {
+ l |= OMAP_TIMER_CTRL_ST;
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ }
+
+ /* Save the context */
+ timer->context.tclr = l;
+ return 0;
+}
+
+static int omap_dm_timer_stop(struct omap_dm_timer *timer)
+{
+ unsigned long rate = 0;
+
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ if (!(timer->capability & OMAP_TIMER_NEEDS_RESET))
+ rate = clk_get_rate(timer->fclk);
+
+ __omap_dm_timer_stop(timer, timer->posted, rate);
+
+ /*
+ * Since the register values are computed and written within
+ * __omap_dm_timer_stop, we need to use read to retrieve the
+ * context.
+ */
+ timer->context.tclr =
+ omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ omap_dm_timer_disable(timer);
+ return 0;
+}
+
+static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
+ unsigned int load)
+{
+ u32 l;
+
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ omap_dm_timer_enable(timer);
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ if (autoreload)
+ l |= OMAP_TIMER_CTRL_AR;
+ else
+ l &= ~OMAP_TIMER_CTRL_AR;
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
+
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_TRIGGER_REG, 0);
+ /* Save the context */
+ timer->context.tclr = l;
+ timer->context.tldr = load;
+ omap_dm_timer_disable(timer);
+ return 0;
+}
+
+/* Optimized set_load which removes costly spin wait in timer_start */
+int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
+ unsigned int load)
+{
+ u32 l;
+
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ omap_dm_timer_enable(timer);
+
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ if (autoreload) {
+ l |= OMAP_TIMER_CTRL_AR;
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
+ } else {
+ l &= ~OMAP_TIMER_CTRL_AR;
+ }
+ l |= OMAP_TIMER_CTRL_ST;
+
+ __omap_dm_timer_load_start(timer, l, load, timer->posted);
+
+ /* Save the context */
+ timer->context.tclr = l;
+ timer->context.tldr = load;
+ timer->context.tcrr = load;
+ return 0;
+}
+static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
+ unsigned int match)
+{
+ u32 l;
+
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ omap_dm_timer_enable(timer);
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ if (enable)
+ l |= OMAP_TIMER_CTRL_CE;
+ else
+ l &= ~OMAP_TIMER_CTRL_CE;
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_MATCH_REG, match);
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+
+ /* Save the context */
+ timer->context.tclr = l;
+ timer->context.tmar = match;
+ omap_dm_timer_disable(timer);
+ return 0;
+}
+
+static int omap_dm_timer_set_pwm(struct omap_dm_timer *timer, int def_on,
+ int toggle, int trigger)
+{
+ u32 l;
+
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ omap_dm_timer_enable(timer);
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ l &= ~(OMAP_TIMER_CTRL_GPOCFG | OMAP_TIMER_CTRL_SCPWM |
+ OMAP_TIMER_CTRL_PT | (0x03 << 10));
+ if (def_on)
+ l |= OMAP_TIMER_CTRL_SCPWM;
+ if (toggle)
+ l |= OMAP_TIMER_CTRL_PT;
+ l |= trigger << 10;
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+
+ /* Save the context */
+ timer->context.tclr = l;
+ omap_dm_timer_disable(timer);
+ return 0;
+}
+
+static int omap_dm_timer_set_prescaler(struct omap_dm_timer *timer,
+ int prescaler)
+{
+ u32 l;
+
+ if (unlikely(!timer) || prescaler < -1 || prescaler > 7)
+ return -EINVAL;
+
+ omap_dm_timer_enable(timer);
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
+ l &= ~(OMAP_TIMER_CTRL_PRE | (0x07 << 2));
+ if (prescaler >= 0) {
+ l |= OMAP_TIMER_CTRL_PRE;
+ l |= prescaler << 2;
+ }
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_CTRL_REG, l);
+
+ /* Save the context */
+ timer->context.tclr = l;
+ omap_dm_timer_disable(timer);
+ return 0;
+}
+
+static int omap_dm_timer_set_int_enable(struct omap_dm_timer *timer,
+ unsigned int value)
+{
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ omap_dm_timer_enable(timer);
+ __omap_dm_timer_int_enable(timer, value);
+
+ /* Save the context */
+ timer->context.tier = value;
+ timer->context.twer = value;
+ omap_dm_timer_disable(timer);
+ return 0;
+}
+
+/**
+ * omap_dm_timer_set_int_disable - disable timer interrupts
+ * @timer: pointer to timer handle
+ * @mask: bit mask of interrupts to be disabled
+ *
+ * Disables the specified timer interrupts for a timer.
+ */
+static int omap_dm_timer_set_int_disable(struct omap_dm_timer *timer, u32 mask)
+{
+ u32 l = mask;
+
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ omap_dm_timer_enable(timer);
+
+ if (timer->revision == 1)
+ l = readl_relaxed(timer->irq_ena) & ~mask;
+
+ writel_relaxed(l, timer->irq_dis);
+ l = omap_dm_timer_read_reg(timer, OMAP_TIMER_WAKEUP_EN_REG) & ~mask;
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG, l);
+
+ /* Save the context */
+ timer->context.tier &= ~mask;
+ timer->context.twer &= ~mask;
+ omap_dm_timer_disable(timer);
+ return 0;
+}
+
+static unsigned int omap_dm_timer_read_status(struct omap_dm_timer *timer)
+{
+ unsigned int l;
+
+ if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
+ pr_err("%s: timer not available or enabled.\n", __func__);
+ return 0;
+ }
+
+ l = readl_relaxed(timer->irq_stat);
+
+ return l;
+}
+
+static int omap_dm_timer_write_status(struct omap_dm_timer *timer, unsigned int value)
+{
+ if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev)))
+ return -EINVAL;
+
+ __omap_dm_timer_write_status(timer, value);
+
+ return 0;
+}
+
+static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *timer)
+{
+ if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
+ pr_err("%s: timer not iavailable or enabled.\n", __func__);
+ return 0;
+ }
+
+ return __omap_dm_timer_read_counter(timer, timer->posted);
+}
+
+static int omap_dm_timer_write_counter(struct omap_dm_timer *timer, unsigned int value)
+{
+ if (unlikely(!timer || pm_runtime_suspended(&timer->pdev->dev))) {
+ pr_err("%s: timer not available or enabled.\n", __func__);
+ return -EINVAL;
+ }
+
+ omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG, value);
+
+ /* Save the context */
+ timer->context.tcrr = value;
+ return 0;
+}
+
+int omap_dm_timers_active(void)
+{
+ struct omap_dm_timer *timer;
+
+ list_for_each_entry(timer, &omap_timer_list, node) {
+ if (!timer->reserved)
+ continue;
+
+ if (omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG) &
+ OMAP_TIMER_CTRL_ST) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static const struct of_device_id omap_timer_match[];
+
+/**
+ * omap_dm_timer_probe - probe function called for every registered device
+ * @pdev: pointer to current timer platform device
+ *
+ * Called by driver framework at the end of device registration for all
+ * timer devices.
+ */
+static int omap_dm_timer_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct omap_dm_timer *timer;
+ struct resource *mem, *irq;
+ struct device *dev = &pdev->dev;
+ const struct dmtimer_platform_data *pdata;
+ int ret;
+
+ pdata = of_device_get_match_data(dev);
+ if (!pdata)
+ pdata = dev_get_platdata(dev);
+ else
+ dev->platform_data = (void *)pdata;
+
+ if (!pdata) {
+ dev_err(dev, "%s: no platform data.\n", __func__);
+ return -ENODEV;
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (unlikely(!irq)) {
+ dev_err(dev, "%s: no IRQ resource.\n", __func__);
+ return -ENODEV;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!mem)) {
+ dev_err(dev, "%s: no memory resource.\n", __func__);
+ return -ENODEV;
+ }
+
+ timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL);
+ if (!timer)
+ return -ENOMEM;
+
+ timer->fclk = ERR_PTR(-ENODEV);
+ timer->io_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(timer->io_base))
+ return PTR_ERR(timer->io_base);
+
+ if (dev->of_node) {
+ if (of_find_property(dev->of_node, "ti,timer-alwon", NULL))
+ timer->capability |= OMAP_TIMER_ALWON;
+ if (of_find_property(dev->of_node, "ti,timer-dsp", NULL))
+ timer->capability |= OMAP_TIMER_HAS_DSP_IRQ;
+ if (of_find_property(dev->of_node, "ti,timer-pwm", NULL))
+ timer->capability |= OMAP_TIMER_HAS_PWM;
+ if (of_find_property(dev->of_node, "ti,timer-secure", NULL))
+ timer->capability |= OMAP_TIMER_SECURE;
+ } else {
+ timer->id = pdev->id;
+ timer->capability = pdata->timer_capability;
+ timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
+ timer->get_context_loss_count = pdata->get_context_loss_count;
+ }
+
+ if (pdata)
+ timer->errata = pdata->timer_errata;
+
+ timer->irq = irq->start;
+ timer->pdev = pdev;
+
+ pm_runtime_enable(dev);
+ pm_runtime_irq_safe(dev);
+
+ if (!timer->reserved) {
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
+ __func__);
+ goto err_get_sync;
+ }
+ __omap_dm_timer_init_regs(timer);
+ pm_runtime_put(dev);
+ }
+
+ /* add the timer element to the list */
+ spin_lock_irqsave(&dm_timer_lock, flags);
+ list_add_tail(&timer->node, &omap_timer_list);
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
+
+ dev_dbg(dev, "Device Probed.\n");
+
+ return 0;
+
+err_get_sync:
+ pm_runtime_put_noidle(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+/**
+ * omap_dm_timer_remove - cleanup a registered timer device
+ * @pdev: pointer to current timer platform device
+ *
+ * Called by driver framework whenever a timer device is unregistered.
+ * In addition to freeing platform resources it also deletes the timer
+ * entry from the local list.
+ */
+static int omap_dm_timer_remove(struct platform_device *pdev)
+{
+ struct omap_dm_timer *timer;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&dm_timer_lock, flags);
+ list_for_each_entry(timer, &omap_timer_list, node)
+ if (!strcmp(dev_name(&timer->pdev->dev),
+ dev_name(&pdev->dev))) {
+ list_del(&timer->node);
+ ret = 0;
+ break;
+ }
+ spin_unlock_irqrestore(&dm_timer_lock, flags);
+
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+const static struct omap_dm_timer_ops dmtimer_ops = {
+ .request_by_node = omap_dm_timer_request_by_node,
+ .request_specific = omap_dm_timer_request_specific,
+ .request = omap_dm_timer_request,
+ .set_source = omap_dm_timer_set_source,
+ .get_irq = omap_dm_timer_get_irq,
+ .set_int_enable = omap_dm_timer_set_int_enable,
+ .set_int_disable = omap_dm_timer_set_int_disable,
+ .free = omap_dm_timer_free,
+ .enable = omap_dm_timer_enable,
+ .disable = omap_dm_timer_disable,
+ .get_fclk = omap_dm_timer_get_fclk,
+ .start = omap_dm_timer_start,
+ .stop = omap_dm_timer_stop,
+ .set_load = omap_dm_timer_set_load,
+ .set_match = omap_dm_timer_set_match,
+ .set_pwm = omap_dm_timer_set_pwm,
+ .set_prescaler = omap_dm_timer_set_prescaler,
+ .read_counter = omap_dm_timer_read_counter,
+ .write_counter = omap_dm_timer_write_counter,
+ .read_status = omap_dm_timer_read_status,
+ .write_status = omap_dm_timer_write_status,
+};
+
+static const struct dmtimer_platform_data omap3plus_pdata = {
+ .timer_errata = OMAP_TIMER_ERRATA_I103_I767,
+ .timer_ops = &dmtimer_ops,
+};
+
+static const struct of_device_id omap_timer_match[] = {
+ {
+ .compatible = "ti,omap2420-timer",
+ },
+ {
+ .compatible = "ti,omap3430-timer",
+ .data = &omap3plus_pdata,
+ },
+ {
+ .compatible = "ti,omap4430-timer",
+ .data = &omap3plus_pdata,
+ },
+ {
+ .compatible = "ti,omap5430-timer",
+ .data = &omap3plus_pdata,
+ },
+ {
+ .compatible = "ti,am335x-timer",
+ .data = &omap3plus_pdata,
+ },
+ {
+ .compatible = "ti,am335x-timer-1ms",
+ .data = &omap3plus_pdata,
+ },
+ {
+ .compatible = "ti,dm816-timer",
+ .data = &omap3plus_pdata,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_timer_match);
+
+static struct platform_driver omap_dm_timer_driver = {
+ .probe = omap_dm_timer_probe,
+ .remove = omap_dm_timer_remove,
+ .driver = {
+ .name = "omap_timer",
+ .of_match_table = of_match_ptr(omap_timer_match),
+ },
+};
+
+early_platform_init("earlytimer", &omap_dm_timer_driver);
+module_platform_driver(omap_dm_timer_driver);
+
+MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 736ac887303c..605ec8cce67b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -313,17 +313,6 @@ config MTD_NAND_ATMEL
Enables support for NAND Flash / Smart Media Card interface
on Atmel AT91 processors.
-config MTD_NAND_PXA3xx
- tristate "NAND support on PXA3xx and Armada 370/XP"
- depends on !MTD_NAND_MARVELL
- depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
- help
-
- This enables the driver for the NAND flash device found on
- PXA3xx processors (NFCv1) and also on 32-bit Armada
- platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
- platforms (7K, 8K) (NFCv2).
-
config MTD_NAND_MARVELL
tristate "NAND controller support on Marvell boards"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 921634ba400c..c882d5ef192a 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -31,7 +31,6 @@ omap2_nand-objs := omap2.o
obj-$(CONFIG_MTD_NAND_OMAP2) += omap2_nand.o
obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD) += omap_elm.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
-obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_TMIO) += tmio_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
index 2196f2a233d6..03805f9669da 100644
--- a/drivers/mtd/nand/marvell_nand.c
+++ b/drivers/mtd/nand/marvell_nand.c
@@ -2520,8 +2520,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
if (pdata)
/* Legacy bindings support only one chip */
- ret = mtd_device_register(mtd, pdata->parts[0],
- pdata->nr_parts[0]);
+ ret = mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
else
ret = mtd_device_register(mtd, NULL, 0);
if (ret) {
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
deleted file mode 100644
index d1979c7dbe7e..000000000000
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ /dev/null
@@ -1,2105 +0,0 @@
-/*
- * drivers/mtd/nand/pxa3xx_nand.c
- *
- * Copyright © 2005 Intel Corporation
- * Copyright © 2006 Marvell International Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma/pxa-dma.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_data/mtd-nand-pxa3xx.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-
-#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
-#define NAND_STOP_DELAY msecs_to_jiffies(40)
-#define PAGE_CHUNK_SIZE (2048)
-
-/*
- * Define a buffer size for the initial command that detects the flash device:
- * STATUS, READID and PARAM.
- * ONFI param page is 256 bytes, and there are three redundant copies
- * to be read. JEDEC param page is 512 bytes, and there are also three
- * redundant copies to be read.
- * Hence this buffer should be at least 512 x 3. Let's pick 2048.
- */
-#define INIT_BUFFER_SIZE 2048
-
-/* System control register and bit to enable NAND on some SoCs */
-#define GENCONF_SOC_DEVICE_MUX 0x208
-#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
-
-/* registers and bit definitions */
-#define NDCR (0x00) /* Control register */
-#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
-#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
-#define NDSR (0x14) /* Status Register */
-#define NDPCR (0x18) /* Page Count Register */
-#define NDBDR0 (0x1C) /* Bad Block Register 0 */
-#define NDBDR1 (0x20) /* Bad Block Register 1 */
-#define NDECCCTRL (0x28) /* ECC control */
-#define NDDB (0x40) /* Data Buffer */
-#define NDCB0 (0x48) /* Command Buffer0 */
-#define NDCB1 (0x4C) /* Command Buffer1 */
-#define NDCB2 (0x50) /* Command Buffer2 */
-
-#define NDCR_SPARE_EN (0x1 << 31)
-#define NDCR_ECC_EN (0x1 << 30)
-#define NDCR_DMA_EN (0x1 << 29)
-#define NDCR_ND_RUN (0x1 << 28)
-#define NDCR_DWIDTH_C (0x1 << 27)
-#define NDCR_DWIDTH_M (0x1 << 26)
-#define NDCR_PAGE_SZ (0x1 << 24)
-#define NDCR_NCSX (0x1 << 23)
-#define NDCR_ND_MODE (0x3 << 21)
-#define NDCR_NAND_MODE (0x0)
-#define NDCR_CLR_PG_CNT (0x1 << 20)
-#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
-#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
-#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
-#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
-
-#define NDCR_RA_START (0x1 << 15)
-#define NDCR_PG_PER_BLK (0x1 << 14)
-#define NDCR_ND_ARB_EN (0x1 << 12)
-#define NDCR_INT_MASK (0xFFF)
-
-#define NDSR_MASK (0xfff)
-#define NDSR_ERR_CNT_OFF (16)
-#define NDSR_ERR_CNT_MASK (0x1f)
-#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
-#define NDSR_RDY (0x1 << 12)
-#define NDSR_FLASH_RDY (0x1 << 11)
-#define NDSR_CS0_PAGED (0x1 << 10)
-#define NDSR_CS1_PAGED (0x1 << 9)
-#define NDSR_CS0_CMDD (0x1 << 8)
-#define NDSR_CS1_CMDD (0x1 << 7)
-#define NDSR_CS0_BBD (0x1 << 6)
-#define NDSR_CS1_BBD (0x1 << 5)
-#define NDSR_UNCORERR (0x1 << 4)
-#define NDSR_CORERR (0x1 << 3)
-#define NDSR_WRDREQ (0x1 << 2)
-#define NDSR_RDDREQ (0x1 << 1)
-#define NDSR_WRCMDREQ (0x1)
-
-#define NDCB0_LEN_OVRD (0x1 << 28)
-#define NDCB0_ST_ROW_EN (0x1 << 26)
-#define NDCB0_AUTO_RS (0x1 << 25)
-#define NDCB0_CSEL (0x1 << 24)
-#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
-#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
-#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
-#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
-#define NDCB0_NC (0x1 << 20)
-#define NDCB0_DBC (0x1 << 19)
-#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
-#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
-#define NDCB0_CMD2_MASK (0xff << 8)
-#define NDCB0_CMD1_MASK (0xff)
-#define NDCB0_ADDR_CYC_SHIFT (16)
-
-#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
-#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
-#define EXT_CMD_TYPE_READ 4 /* Read */
-#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
-#define EXT_CMD_TYPE_FINAL 3 /* Final command */
-#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
-#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
-
-/*
- * This should be large enough to read 'ONFI' and 'JEDEC'.
- * Let's use 7 bytes, which is the maximum ID count supported
- * by the controller (see NDCR_RD_ID_CNT_MASK).
- */
-#define READ_ID_BYTES 7
-
-/* macros for registers read/write */
-#define nand_writel(info, off, val) \
- do { \
- dev_vdbg(&info->pdev->dev, \
- "%s():%d nand_writel(0x%x, 0x%04x)\n", \
- __func__, __LINE__, (val), (off)); \
- writel_relaxed((val), (info)->mmio_base + (off)); \
- } while (0)
-
-#define nand_readl(info, off) \
- ({ \
- unsigned int _v; \
- _v = readl_relaxed((info)->mmio_base + (off)); \
- dev_vdbg(&info->pdev->dev, \
- "%s():%d nand_readl(0x%04x) = 0x%x\n", \
- __func__, __LINE__, (off), _v); \
- _v; \
- })
-
-/* error code and state */
-enum {
- ERR_NONE = 0,
- ERR_DMABUSERR = -1,
- ERR_SENDCMD = -2,
- ERR_UNCORERR = -3,
- ERR_BBERR = -4,
- ERR_CORERR = -5,
-};
-
-enum {
- STATE_IDLE = 0,
- STATE_PREPARED,
- STATE_CMD_HANDLE,
- STATE_DMA_READING,
- STATE_DMA_WRITING,
- STATE_DMA_DONE,
- STATE_PIO_READING,
- STATE_PIO_WRITING,
- STATE_CMD_DONE,
- STATE_READY,
-};
-
-enum pxa3xx_nand_variant {
- PXA3XX_NAND_VARIANT_PXA,
- PXA3XX_NAND_VARIANT_ARMADA370,
- PXA3XX_NAND_VARIANT_ARMADA_8K,
-};
-
-struct pxa3xx_nand_host {
- struct nand_chip chip;
- void *info_data;
-
- /* page size of attached chip */
- int use_ecc;
- int cs;
-
- /* calculated from pxa3xx_nand_flash data */
- unsigned int col_addr_cycles;
- unsigned int row_addr_cycles;
-};
-
-struct pxa3xx_nand_info {
- struct nand_hw_control controller;
- struct platform_device *pdev;
-
- struct clk *clk;
- void __iomem *mmio_base;
- unsigned long mmio_phys;
- struct completion cmd_complete, dev_ready;
-
- unsigned int buf_start;
- unsigned int buf_count;
- unsigned int buf_size;
- unsigned int data_buff_pos;
- unsigned int oob_buff_pos;
-
- /* DMA information */
- struct scatterlist sg;
- enum dma_data_direction dma_dir;
- struct dma_chan *dma_chan;
- dma_cookie_t dma_cookie;
- int drcmr_dat;
-
- unsigned char *data_buff;
- unsigned char *oob_buff;
- dma_addr_t data_buff_phys;
- int data_dma_ch;
-
- struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
- unsigned int state;
-
- /*
- * This driver supports NFCv1 (as found in PXA SoC)
- * and NFCv2 (as found in Armada 370/XP SoC).
- */
- enum pxa3xx_nand_variant variant;
-
- int cs;
- int use_ecc; /* use HW ECC ? */
- int ecc_bch; /* using BCH ECC? */
- int use_dma; /* use DMA ? */
- int use_spare; /* use spare ? */
- int need_wait;
-
- /* Amount of real data per full chunk */
- unsigned int chunk_size;
-
- /* Amount of spare data per full chunk */
- unsigned int spare_size;
-
- /* Number of full chunks (i.e chunk_size + spare_size) */
- unsigned int nfullchunks;
-
- /*
- * Total number of chunks. If equal to nfullchunks, then there
- * are only full chunks. Otherwise, there is one last chunk of
- * size (last_chunk_size + last_spare_size)
- */
- unsigned int ntotalchunks;
-
- /* Amount of real data in the last chunk */
- unsigned int last_chunk_size;
-
- /* Amount of spare data in the last chunk */
- unsigned int last_spare_size;
-
- unsigned int ecc_size;
- unsigned int ecc_err_cnt;
- unsigned int max_bitflips;
- int retcode;
-
- /*
- * Variables only valid during command
- * execution. step_chunk_size and step_spare_size is the
- * amount of real data and spare data in the current
- * chunk. cur_chunk is the current chunk being
- * read/programmed.
- */
- unsigned int step_chunk_size;
- unsigned int step_spare_size;
- unsigned int cur_chunk;
-
- /* cached register value */
- uint32_t reg_ndcr;
- uint32_t ndtr0cs0;
- uint32_t ndtr1cs0;
-
- /* generated NDCBx register values */
- uint32_t ndcb0;
- uint32_t ndcb1;
- uint32_t ndcb2;
- uint32_t ndcb3;
-};
-
-static bool use_dma = 1;
-module_param(use_dma, bool, 0444);
-MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
-
-struct pxa3xx_nand_timing {
- unsigned int tCH; /* Enable signal hold time */
- unsigned int tCS; /* Enable signal setup time */
- unsigned int tWH; /* ND_nWE high duration */
- unsigned int tWP; /* ND_nWE pulse time */
- unsigned int tRH; /* ND_nRE high duration */
- unsigned int tRP; /* ND_nRE pulse width */
- unsigned int tR; /* ND_nWE high to ND_nRE low for read */
- unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
- unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
-};
-
-struct pxa3xx_nand_flash {
- uint32_t chip_id;
- unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
- unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
- struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
-};
-
-static struct pxa3xx_nand_timing timing[] = {
- { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
- { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
- { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
- { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
-};
-
-static struct pxa3xx_nand_flash builtin_flash_types[] = {
- { 0x46ec, 16, 16, &timing[1] },
- { 0xdaec, 8, 8, &timing[1] },
- { 0xd7ec, 8, 8, &timing[1] },
- { 0xa12c, 8, 8, &timing[2] },
- { 0xb12c, 16, 16, &timing[2] },
- { 0xdc2c, 8, 8, &timing[2] },
- { 0xcc2c, 16, 16, &timing[2] },
- { 0xba20, 16, 16, &timing[3] },
-};
-
-static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
- int nchunks = mtd->writesize / info->chunk_size;
-
- if (section >= nchunks)
- return -ERANGE;
-
- oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
- info->spare_size;
- oobregion->length = info->ecc_size;
-
- return 0;
-}
-
-static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
- int nchunks = mtd->writesize / info->chunk_size;
-
- if (section >= nchunks)
- return -ERANGE;
-
- if (!info->spare_size)
- return 0;
-
- oobregion->offset = section * (info->ecc_size + info->spare_size);
- oobregion->length = info->spare_size;
- if (!section) {
- /*
- * Bootrom looks in bytes 0 & 5 for bad blocks for the
- * 4KB page / 4bit BCH combination.
- */
- if (mtd->writesize == 4096 && info->chunk_size == 2048) {
- oobregion->offset += 6;
- oobregion->length -= 6;
- } else {
- oobregion->offset += 2;
- oobregion->length -= 2;
- }
- }
-
- return 0;
-}
-
-static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
- .ecc = pxa3xx_ooblayout_ecc,
- .free = pxa3xx_ooblayout_free,
-};
-
-static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
-static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
-
-static struct nand_bbt_descr bbt_main_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION,
- .offs = 8,
- .len = 6,
- .veroffs = 14,
- .maxblocks = 8, /* Last 8 blocks in each chip */
- .pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr bbt_mirror_descr = {
- .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
- | NAND_BBT_2BIT | NAND_BBT_VERSION,
- .offs = 8,
- .len = 6,
- .veroffs = 14,
- .maxblocks = 8, /* Last 8 blocks in each chip */
- .pattern = bbt_mirror_pattern
-};
-
-#define NDTR0_tCH(c) (min((c), 7) << 19)
-#define NDTR0_tCS(c) (min((c), 7) << 16)
-#define NDTR0_tWH(c) (min((c), 7) << 11)
-#define NDTR0_tWP(c) (min((c), 7) << 8)
-#define NDTR0_tRH(c) (min((c), 7) << 3)
-#define NDTR0_tRP(c) (min((c), 7) << 0)
-
-#define NDTR1_tR(c) (min((c), 65535) << 16)
-#define NDTR1_tWHR(c) (min((c), 15) << 4)
-#define NDTR1_tAR(c) (min((c), 15) << 0)
-
-/* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
-
-static const struct of_device_id pxa3xx_nand_dt_ids[] = {
- {
- .compatible = "marvell,pxa3xx-nand",
- .data = (void *)PXA3XX_NAND_VARIANT_PXA,
- },
- {
- .compatible = "marvell,armada370-nand",
- .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
- },
- {
- .compatible = "marvell,armada-8k-nand",
- .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
- },
- {}
-};
-MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
-
-static enum pxa3xx_nand_variant
-pxa3xx_nand_get_variant(struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
- if (!of_id)
- return PXA3XX_NAND_VARIANT_PXA;
- return (enum pxa3xx_nand_variant)of_id->data;
-}
-
-static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
- const struct pxa3xx_nand_timing *t)
-{
- struct pxa3xx_nand_info *info = host->info_data;
- unsigned long nand_clk = clk_get_rate(info->clk);
- uint32_t ndtr0, ndtr1;
-
- ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
- NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
- NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
- NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
- NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
- NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
-
- ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
- NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
- NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
-
- info->ndtr0cs0 = ndtr0;
- info->ndtr1cs0 = ndtr1;
- nand_writel(info, NDTR0CS0, ndtr0);
- nand_writel(info, NDTR1CS0, ndtr1);
-}
-
-static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
- const struct nand_sdr_timings *t)
-{
- struct pxa3xx_nand_info *info = host->info_data;
- struct nand_chip *chip = &host->chip;
- unsigned long nand_clk = clk_get_rate(info->clk);
- uint32_t ndtr0, ndtr1;
-
- u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
- u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
- u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
- u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
- u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
- u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
- u32 tR = chip->chip_delay * 1000;
- u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
- u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
-
- /* fallback to a default value if tR = 0 */
- if (!tR)
- tR = 20000;
-
- ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
- NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
- NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
- NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
- NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
- NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
-
- ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
- NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
- NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
-
- info->ndtr0cs0 = ndtr0;
- info->ndtr1cs0 = ndtr1;
- nand_writel(info, NDTR0CS0, ndtr0);
- nand_writel(info, NDTR1CS0, ndtr1);
-}
-
-static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
- unsigned int *flash_width,
- unsigned int *dfc_width)
-{
- struct nand_chip *chip = &host->chip;
- struct pxa3xx_nand_info *info = host->info_data;
- const struct pxa3xx_nand_flash *f = NULL;
- int i, id, ntypes;
- u8 idbuf[2];
-
- ntypes = ARRAY_SIZE(builtin_flash_types);
-
- nand_readid_op(chip, 0, idbuf, sizeof(idbuf));
- id = idbuf[0] | (idbuf[1] << 8);
-
- for (i = 0; i < ntypes; i++) {
- f = &builtin_flash_types[i];
-
- if (f->chip_id == id)
- break;
- }
-
- if (i == ntypes) {
- dev_err(&info->pdev->dev, "Error: timings not found\n");
- return -EINVAL;
- }
-
- pxa3xx_nand_set_timing(host, f->timing);
-
- *flash_width = f->flash_width;
- *dfc_width = f->dfc_width;
-
- return 0;
-}
-
-static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
- int mode)
-{
- const struct nand_sdr_timings *timings;
-
- mode = fls(mode) - 1;
- if (mode < 0)
- mode = 0;
-
- timings = onfi_async_timing_mode_to_sdr_timings(mode);
- if (IS_ERR(timings))
- return PTR_ERR(timings);
-
- pxa3xx_nand_set_sdr_timing(host, timings);
-
- return 0;
-}
-
-static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
-{
- struct nand_chip *chip = &host->chip;
- struct pxa3xx_nand_info *info = host->info_data;
- unsigned int flash_width = 0, dfc_width = 0;
- int mode, err;
-
- mode = onfi_get_async_timing_mode(chip);
- if (mode == ONFI_TIMING_MODE_UNKNOWN) {
- err = pxa3xx_nand_init_timings_compat(host, &flash_width,
- &dfc_width);
- if (err)
- return err;
-
- if (flash_width == 16) {
- info->reg_ndcr |= NDCR_DWIDTH_M;
- chip->options |= NAND_BUSWIDTH_16;
- }
-
- info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
- } else {
- err = pxa3xx_nand_init_timings_onfi(host, mode);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * NOTE: it is a must to set ND_RUN firstly, then write
- * command buffer, otherwise, it does not work.
- * We enable all the interrupt at the same time, and
- * let pxa3xx_nand_irq to handle all logic.
- */
-static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
-{
- uint32_t ndcr;
-
- ndcr = info->reg_ndcr;
-
- if (info->use_ecc) {
- ndcr |= NDCR_ECC_EN;
- if (info->ecc_bch)
- nand_writel(info, NDECCCTRL, 0x1);
- } else {
- ndcr &= ~NDCR_ECC_EN;
- if (info->ecc_bch)
- nand_writel(info, NDECCCTRL, 0x0);
- }
-
- if (info->use_dma)
- ndcr |= NDCR_DMA_EN;
- else
- ndcr &= ~NDCR_DMA_EN;
-
- if (info->use_spare)
- ndcr |= NDCR_SPARE_EN;
- else
- ndcr &= ~NDCR_SPARE_EN;
-
- ndcr |= NDCR_ND_RUN;
-
- /* clear status bits and run */
- nand_writel(info, NDSR, NDSR_MASK);
- nand_writel(info, NDCR, 0);
- nand_writel(info, NDCR, ndcr);
-}
-
-static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
-{
- uint32_t ndcr;
- int timeout = NAND_STOP_DELAY;
-
- /* wait RUN bit in NDCR become 0 */
- ndcr = nand_readl(info, NDCR);
- while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
- ndcr = nand_readl(info, NDCR);
- udelay(1);
- }
-
- if (timeout <= 0) {
- ndcr &= ~NDCR_ND_RUN;
- nand_writel(info, NDCR, ndcr);
- }
- if (info->dma_chan)
- dmaengine_terminate_all(info->dma_chan);
-
- /* clear status bits */
- nand_writel(info, NDSR, NDSR_MASK);
-}
-
-static void __maybe_unused
-enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
-{
- uint32_t ndcr;
-
- ndcr = nand_readl(info, NDCR);
- nand_writel(info, NDCR, ndcr & ~int_mask);
-}
-
-static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
-{
- uint32_t ndcr;
-
- ndcr = nand_readl(info, NDCR);
- nand_writel(info, NDCR, ndcr | int_mask);
-}
-
-static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
-{
- if (info->ecc_bch) {
- u32 val;
- int ret;
-
- /*
- * According to the datasheet, when reading from NDDB
- * with BCH enabled, after each 32 bytes reads, we
- * have to make sure that the NDSR.RDDREQ bit is set.
- *
- * Drain the FIFO 8 32 bits reads at a time, and skip
- * the polling on the last read.
- */
- while (len > 8) {
- ioread32_rep(info->mmio_base + NDDB, data, 8);
-
- ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
- val & NDSR_RDDREQ, 1000, 5000);
- if (ret) {
- dev_err(&info->pdev->dev,
- "Timeout on RDDREQ while draining the FIFO\n");
- return;
- }
-
- data += 32;
- len -= 8;
- }
- }
-
- ioread32_rep(info->mmio_base + NDDB, data, len);
-}
-
-static void handle_data_pio(struct pxa3xx_nand_info *info)
-{
- switch (info->state) {
- case STATE_PIO_WRITING:
- if (info->step_chunk_size)
- writesl(info->mmio_base + NDDB,
- info->data_buff + info->data_buff_pos,
- DIV_ROUND_UP(info->step_chunk_size, 4));
-
- if (info->step_spare_size)
- writesl(info->mmio_base + NDDB,
- info->oob_buff + info->oob_buff_pos,
- DIV_ROUND_UP(info->step_spare_size, 4));
- break;
- case STATE_PIO_READING:
- if (info->step_chunk_size)
- drain_fifo(info,
- info->data_buff + info->data_buff_pos,
- DIV_ROUND_UP(info->step_chunk_size, 4));
-
- if (info->step_spare_size)
- drain_fifo(info,
- info->oob_buff + info->oob_buff_pos,
- DIV_ROUND_UP(info->step_spare_size, 4));
- break;
- default:
- dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
- info->state);
- BUG();
- }
-
- /* Update buffer pointers for multi-page read/write */
- info->data_buff_pos += info->step_chunk_size;
- info->oob_buff_pos += info->step_spare_size;
-}
-
-static void pxa3xx_nand_data_dma_irq(void *data)
-{
- struct pxa3xx_nand_info *info = data;
- struct dma_tx_state state;
- enum dma_status status;
-
- status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
- if (likely(status == DMA_COMPLETE)) {
- info->state = STATE_DMA_DONE;
- } else {
- dev_err(&info->pdev->dev, "DMA error on data channel\n");
- info->retcode = ERR_DMABUSERR;
- }
- dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
-
- nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
- enable_int(info, NDCR_INT_MASK);
-}
-
-static void start_data_dma(struct pxa3xx_nand_info *info)
-{
- enum dma_transfer_direction direction;
- struct dma_async_tx_descriptor *tx;
-
- switch (info->state) {
- case STATE_DMA_WRITING:
- info->dma_dir = DMA_TO_DEVICE;
- direction = DMA_MEM_TO_DEV;
- break;
- case STATE_DMA_READING:
- info->dma_dir = DMA_FROM_DEVICE;
- direction = DMA_DEV_TO_MEM;
- break;
- default:
- dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
- info->state);
- BUG();
- }
- info->sg.length = info->chunk_size;
- if (info->use_spare)
- info->sg.length += info->spare_size + info->ecc_size;
- dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
-
- tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
- DMA_PREP_INTERRUPT);
- if (!tx) {
- dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
- return;
- }
- tx->callback = pxa3xx_nand_data_dma_irq;
- tx->callback_param = info;
- info->dma_cookie = dmaengine_submit(tx);
- dma_async_issue_pending(info->dma_chan);
- dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
- __func__, direction, info->dma_cookie, info->sg.length);
-}
-
-static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
-{
- struct pxa3xx_nand_info *info = data;
-
- handle_data_pio(info);
-
- info->state = STATE_CMD_DONE;
- nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
-{
- struct pxa3xx_nand_info *info = devid;
- unsigned int status, is_completed = 0, is_ready = 0;
- unsigned int ready, cmd_done;
- irqreturn_t ret = IRQ_HANDLED;
-
- if (info->cs == 0) {
- ready = NDSR_FLASH_RDY;
- cmd_done = NDSR_CS0_CMDD;
- } else {
- ready = NDSR_RDY;
- cmd_done = NDSR_CS1_CMDD;
- }
-
- status = nand_readl(info, NDSR);
-
- if (status & NDSR_UNCORERR)
- info->retcode = ERR_UNCORERR;
- if (status & NDSR_CORERR) {
- info->retcode = ERR_CORERR;
- if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
- info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
- info->ecc_bch)
- info->ecc_err_cnt = NDSR_ERR_CNT(status);
- else
- info->ecc_err_cnt = 1;
-
- /*
- * Each chunk composing a page is corrected independently,
- * and we need to store maximum number of corrected bitflips
- * to return it to the MTD layer in ecc.read_page().
- */
- info->max_bitflips = max_t(unsigned int,
- info->max_bitflips,
- info->ecc_err_cnt);
- }
- if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
- /* whether use dma to transfer data */
- if (info->use_dma) {
- disable_int(info, NDCR_INT_MASK);
- info->state = (status & NDSR_RDDREQ) ?
- STATE_DMA_READING : STATE_DMA_WRITING;
- start_data_dma(info);
- goto NORMAL_IRQ_EXIT;
- } else {
- info->state = (status & NDSR_RDDREQ) ?
- STATE_PIO_READING : STATE_PIO_WRITING;
- ret = IRQ_WAKE_THREAD;
- goto NORMAL_IRQ_EXIT;
- }
- }
- if (status & cmd_done) {
- info->state = STATE_CMD_DONE;
- is_completed = 1;
- }
- if (status & ready) {
- info->state = STATE_READY;
- is_ready = 1;
- }
-
- /*
- * Clear all status bit before issuing the next command, which
- * can and will alter the status bits and will deserve a new
- * interrupt on its own. This lets the controller exit the IRQ
- */
- nand_writel(info, NDSR, status);
-
- if (status & NDSR_WRCMDREQ) {
- status &= ~NDSR_WRCMDREQ;
- info->state = STATE_CMD_HANDLE;
-
- /*
- * Command buffer registers NDCB{0-2} (and optionally NDCB3)
- * must be loaded by writing directly either 12 or 16
- * bytes directly to NDCB0, four bytes at a time.
- *
- * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
- * but each NDCBx register can be read.
- */
- nand_writel(info, NDCB0, info->ndcb0);
- nand_writel(info, NDCB0, info->ndcb1);
- nand_writel(info, NDCB0, info->ndcb2);
-
- /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
- info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
- nand_writel(info, NDCB0, info->ndcb3);
- }
-
- if (is_completed)
- complete(&info->cmd_complete);
- if (is_ready)
- complete(&info->dev_ready);
-NORMAL_IRQ_EXIT:
- return ret;
-}
-
-static inline int is_buf_blank(uint8_t *buf, size_t len)
-{
- for (; len > 0; len--)
- if (*buf++ != 0xff)
- return 0;
- return 1;
-}
-
-static void set_command_address(struct pxa3xx_nand_info *info,
- unsigned int page_size, uint16_t column, int page_addr)
-{
- /* small page addr setting */
- if (page_size < PAGE_CHUNK_SIZE) {
- info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
- | (column & 0xFF);
-
- info->ndcb2 = 0;
- } else {
- info->ndcb1 = ((page_addr & 0xFFFF) << 16)
- | (column & 0xFFFF);
-
- if (page_addr & 0xFF0000)
- info->ndcb2 = (page_addr & 0xFF0000) >> 16;
- else
- info->ndcb2 = 0;
- }
-}
-
-static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
-{
- struct pxa3xx_nand_host *host = info->host[info->cs];
- struct mtd_info *mtd = nand_to_mtd(&host->chip);
-
- /* reset data and oob column point to handle data */
- info->buf_start = 0;
- info->buf_count = 0;
- info->data_buff_pos = 0;
- info->oob_buff_pos = 0;
- info->step_chunk_size = 0;
- info->step_spare_size = 0;
- info->cur_chunk = 0;
- info->use_ecc = 0;
- info->use_spare = 1;
- info->retcode = ERR_NONE;
- info->ecc_err_cnt = 0;
- info->ndcb3 = 0;
- info->need_wait = 0;
-
- switch (command) {
- case NAND_CMD_READ0:
- case NAND_CMD_READOOB:
- case NAND_CMD_PAGEPROG:
- info->use_ecc = 1;
- break;
- case NAND_CMD_PARAM:
- info->use_spare = 0;
- break;
- default:
- info->ndcb1 = 0;
- info->ndcb2 = 0;
- break;
- }
-
- /*
- * If we are about to issue a read command, or about to set
- * the write address, then clean the data buffer.
- */
- if (command == NAND_CMD_READ0 ||
- command == NAND_CMD_READOOB ||
- command == NAND_CMD_SEQIN) {
-
- info->buf_count = mtd->writesize + mtd->oobsize;
- memset(info->data_buff, 0xFF, info->buf_count);
- }
-
-}
-
-static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
- int ext_cmd_type, uint16_t column, int page_addr)
-{
- int addr_cycle, exec_cmd;
- struct pxa3xx_nand_host *host;
- struct mtd_info *mtd;
-
- host = info->host[info->cs];
- mtd = nand_to_mtd(&host->chip);
- addr_cycle = 0;
- exec_cmd = 1;
-
- if (info->cs != 0)
- info->ndcb0 = NDCB0_CSEL;
- else
- info->ndcb0 = 0;
-
- if (command == NAND_CMD_SEQIN)
- exec_cmd = 0;
-
- addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
- + host->col_addr_cycles);
-
- switch (command) {
- case NAND_CMD_READOOB:
- case NAND_CMD_READ0:
- info->buf_start = column;
- info->ndcb0 |= NDCB0_CMD_TYPE(0)
- | addr_cycle
- | NAND_CMD_READ0;
-
- if (command == NAND_CMD_READOOB)
- info->buf_start += mtd->writesize;
-
- if (info->cur_chunk < info->nfullchunks) {
- info->step_chunk_size = info->chunk_size;
- info->step_spare_size = info->spare_size;
- } else {
- info->step_chunk_size = info->last_chunk_size;
- info->step_spare_size = info->last_spare_size;
- }
-
- /*
- * Multiple page read needs an 'extended command type' field,
- * which is either naked-read or last-read according to the
- * state.
- */
- if (mtd->writesize == PAGE_CHUNK_SIZE) {
- info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
- } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
- info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
- | NDCB0_LEN_OVRD
- | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
- info->ndcb3 = info->step_chunk_size +
- info->step_spare_size;
- }
-
- set_command_address(info, mtd->writesize, column, page_addr);
- break;
-
- case NAND_CMD_SEQIN:
-
- info->buf_start = column;
- set_command_address(info, mtd->writesize, 0, page_addr);
-
- /*
- * Multiple page programming needs to execute the initial
- * SEQIN command that sets the page address.
- */
- if (mtd->writesize > PAGE_CHUNK_SIZE) {
- info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
- | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
- | addr_cycle
- | command;
- exec_cmd = 1;
- }
- break;
-
- case NAND_CMD_PAGEPROG:
- if (is_buf_blank(info->data_buff,
- (mtd->writesize + mtd->oobsize))) {
- exec_cmd = 0;
- break;
- }
-
- if (info->cur_chunk < info->nfullchunks) {
- info->step_chunk_size = info->chunk_size;
- info->step_spare_size = info->spare_size;
- } else {
- info->step_chunk_size = info->last_chunk_size;
- info->step_spare_size = info->last_spare_size;
- }
-
- /* Second command setting for large pages */
- if (mtd->writesize > PAGE_CHUNK_SIZE) {
- /*
- * Multiple page write uses the 'extended command'
- * field. This can be used to issue a command dispatch
- * or a naked-write depending on the current stage.
- */
- info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
- | NDCB0_LEN_OVRD
- | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
- info->ndcb3 = info->step_chunk_size +
- info->step_spare_size;
-
- /*
- * This is the command dispatch that completes a chunked
- * page program operation.
- */
- if (info->cur_chunk == info->ntotalchunks) {
- info->ndcb0 = NDCB0_CMD_TYPE(0x1)
- | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
- | command;
- info->ndcb1 = 0;
- info->ndcb2 = 0;
- info->ndcb3 = 0;
- }
- } else {
- info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
- | NDCB0_AUTO_RS
- | NDCB0_ST_ROW_EN
- | NDCB0_DBC
- | (NAND_CMD_PAGEPROG << 8)
- | NAND_CMD_SEQIN
- | addr_cycle;
- }
- break;
-
- case NAND_CMD_PARAM:
- info->buf_count = INIT_BUFFER_SIZE;
- info->ndcb0 |= NDCB0_CMD_TYPE(0)
- | NDCB0_ADDR_CYC(1)
- | NDCB0_LEN_OVRD
- | command;
- info->ndcb1 = (column & 0xFF);
- info->ndcb3 = INIT_BUFFER_SIZE;
- info->step_chunk_size = INIT_BUFFER_SIZE;
- break;
-
- case NAND_CMD_READID:
- info->buf_count = READ_ID_BYTES;
- info->ndcb0 |= NDCB0_CMD_TYPE(3)
- | NDCB0_ADDR_CYC(1)
- | command;
- info->ndcb1 = (column & 0xFF);
-
- info->step_chunk_size = 8;
- break;
- case NAND_CMD_STATUS:
- info->buf_count = 1;
- info->ndcb0 |= NDCB0_CMD_TYPE(4)
- | NDCB0_ADDR_CYC(1)
- | command;
-
- info->step_chunk_size = 8;
- break;
-
- case NAND_CMD_ERASE1:
- info->ndcb0 |= NDCB0_CMD_TYPE(2)
- | NDCB0_AUTO_RS
- | NDCB0_ADDR_CYC(3)
- | NDCB0_DBC
- | (NAND_CMD_ERASE2 << 8)
- | NAND_CMD_ERASE1;
- info->ndcb1 = page_addr;
- info->ndcb2 = 0;
-
- break;
- case NAND_CMD_RESET:
- info->ndcb0 |= NDCB0_CMD_TYPE(5)
- | command;
-
- break;
-
- case NAND_CMD_ERASE2:
- exec_cmd = 0;
- break;
-
- default:
- exec_cmd = 0;
- dev_err(&info->pdev->dev, "non-supported command %x\n",
- command);
- break;
- }
-
- return exec_cmd;
-}
-
-static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
- int column, int page_addr)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
- int exec_cmd;
-
- /*
- * if this is a x16 device ,then convert the input
- * "byte" address into a "word" address appropriate
- * for indexing a word-oriented device
- */
- if (info->reg_ndcr & NDCR_DWIDTH_M)
- column /= 2;
-
- /*
- * There may be different NAND chip hooked to
- * different chip select, so check whether
- * chip select has been changed, if yes, reset the timing
- */
- if (info->cs != host->cs) {
- info->cs = host->cs;
- nand_writel(info, NDTR0CS0, info->ndtr0cs0);
- nand_writel(info, NDTR1CS0, info->ndtr1cs0);
- }
-
- prepare_start_command(info, command);
-
- info->state = STATE_PREPARED;
- exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
-
- if (exec_cmd) {
- init_completion(&info->cmd_complete);
- init_completion(&info->dev_ready);
- info->need_wait = 1;
- pxa3xx_nand_start(info);
-
- if (!wait_for_completion_timeout(&info->cmd_complete,
- CHIP_DELAY_TIMEOUT)) {
- dev_err(&info->pdev->dev, "Wait time out!!!\n");
- /* Stop State Machine for next command cycle */
- pxa3xx_nand_stop(info);
- }
- }
- info->state = STATE_IDLE;
-}
-
-static void nand_cmdfunc_extended(struct mtd_info *mtd,
- const unsigned command,
- int column, int page_addr)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
- int exec_cmd, ext_cmd_type;
-
- /*
- * if this is a x16 device then convert the input
- * "byte" address into a "word" address appropriate
- * for indexing a word-oriented device
- */
- if (info->reg_ndcr & NDCR_DWIDTH_M)
- column /= 2;
-
- /*
- * There may be different NAND chip hooked to
- * different chip select, so check whether
- * chip select has been changed, if yes, reset the timing
- */
- if (info->cs != host->cs) {
- info->cs = host->cs;
- nand_writel(info, NDTR0CS0, info->ndtr0cs0);
- nand_writel(info, NDTR1CS0, info->ndtr1cs0);
- }
-
- /* Select the extended command for the first command */
- switch (command) {
- case NAND_CMD_READ0:
- case NAND_CMD_READOOB:
- ext_cmd_type = EXT_CMD_TYPE_MONO;
- break;
- case NAND_CMD_SEQIN:
- ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
- break;
- case NAND_CMD_PAGEPROG:
- ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
- break;
- default:
- ext_cmd_type = 0;
- break;
- }
-
- prepare_start_command(info, command);
-
- /*
- * Prepare the "is ready" completion before starting a command
- * transaction sequence. If the command is not executed the
- * completion will be completed, see below.
- *
- * We can do that inside the loop because the command variable
- * is invariant and thus so is the exec_cmd.
- */
- info->need_wait = 1;
- init_completion(&info->dev_ready);
- do {
- info->state = STATE_PREPARED;
-
- exec_cmd = prepare_set_command(info, command, ext_cmd_type,
- column, page_addr);
- if (!exec_cmd) {
- info->need_wait = 0;
- complete(&info->dev_ready);
- break;
- }
-
- init_completion(&info->cmd_complete);
- pxa3xx_nand_start(info);
-
- if (!wait_for_completion_timeout(&info->cmd_complete,
- CHIP_DELAY_TIMEOUT)) {
- dev_err(&info->pdev->dev, "Wait time out!!!\n");
- /* Stop State Machine for next command cycle */
- pxa3xx_nand_stop(info);
- break;
- }
-
- /* Only a few commands need several steps */
- if (command != NAND_CMD_PAGEPROG &&
- command != NAND_CMD_READ0 &&
- command != NAND_CMD_READOOB)
- break;
-
- info->cur_chunk++;
-
- /* Check if the sequence is complete */
- if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
- break;
-
- /*
- * After a splitted program command sequence has issued
- * the command dispatch, the command sequence is complete.
- */
- if (info->cur_chunk == (info->ntotalchunks + 1) &&
- command == NAND_CMD_PAGEPROG &&
- ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
- break;
-
- if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
- /* Last read: issue a 'last naked read' */
- if (info->cur_chunk == info->ntotalchunks - 1)
- ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
- else
- ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
-
- /*
- * If a splitted program command has no more data to transfer,
- * the command dispatch must be issued to complete.
- */
- } else if (command == NAND_CMD_PAGEPROG &&
- info->cur_chunk == info->ntotalchunks) {
- ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
- }
- } while (1);
-
- info->state = STATE_IDLE;
-}
-
-static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf, int oob_required,
- int page)
-{
- nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- return nand_prog_page_end_op(chip);
-}
-
-static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int oob_required,
- int page)
-{
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
-
- nand_read_page_op(chip, page, 0, buf, mtd->writesize);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
- if (info->retcode == ERR_CORERR && info->use_ecc) {
- mtd->ecc_stats.corrected += info->ecc_err_cnt;
-
- } else if (info->retcode == ERR_UNCORERR) {
- /*
- * for blank page (all 0xff), HW will calculate its ECC as
- * 0, which is different from the ECC information within
- * OOB, ignore such uncorrectable errors
- */
- if (is_buf_blank(buf, mtd->writesize))
- info->retcode = ERR_NONE;
- else
- mtd->ecc_stats.failed++;
- }
-
- return info->max_bitflips;
-}
-
-static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
- char retval = 0xFF;
-
- if (info->buf_start < info->buf_count)
- /* Has just send a new command? */
- retval = info->data_buff[info->buf_start++];
-
- return retval;
-}
-
-static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
- u16 retval = 0xFFFF;
-
- if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
- retval = *((u16 *)(info->data_buff+info->buf_start));
- info->buf_start += 2;
- }
- return retval;
-}
-
-static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
- int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
-
- memcpy(buf, info->data_buff + info->buf_start, real_len);
- info->buf_start += real_len;
-}
-
-static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
- const uint8_t *buf, int len)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
- int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
-
- memcpy(info->data_buff + info->buf_start, buf, real_len);
- info->buf_start += real_len;
-}
-
-static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
-{
- return;
-}
-
-static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
-
- if (info->need_wait) {
- info->need_wait = 0;
- if (!wait_for_completion_timeout(&info->dev_ready,
- CHIP_DELAY_TIMEOUT)) {
- dev_err(&info->pdev->dev, "Ready time out!!!\n");
- return NAND_STATUS_FAIL;
- }
- }
-
- /* pxa3xx_nand_send_command has waited for command complete */
- if (this->state == FL_WRITING || this->state == FL_ERASING) {
- if (info->retcode == ERR_NONE)
- return 0;
- else
- return NAND_STATUS_FAIL;
- }
-
- return NAND_STATUS_READY;
-}
-
-static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
-{
- struct pxa3xx_nand_host *host = info->host[info->cs];
- struct platform_device *pdev = info->pdev;
- struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
- const struct nand_sdr_timings *timings;
-
- /* Configure default flash values */
- info->chunk_size = PAGE_CHUNK_SIZE;
- info->reg_ndcr = 0x0; /* enable all interrupts */
- info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
- info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
- info->reg_ndcr |= NDCR_SPARE_EN;
-
- /* use the common timing to make a try */
- timings = onfi_async_timing_mode_to_sdr_timings(0);
- if (IS_ERR(timings))
- return PTR_ERR(timings);
-
- pxa3xx_nand_set_sdr_timing(host, timings);
- return 0;
-}
-
-static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
-{
- struct pxa3xx_nand_host *host = info->host[info->cs];
- struct nand_chip *chip = &host->chip;
- struct mtd_info *mtd = nand_to_mtd(chip);
-
- info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
- info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
- info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
-}
-
-static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
-{
- struct platform_device *pdev = info->pdev;
- struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
- uint32_t ndcr = nand_readl(info, NDCR);
-
- /* Set an initial chunk size */
- info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
- info->reg_ndcr = ndcr &
- ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
- info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
- info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
- info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
-}
-
-static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
-{
- struct platform_device *pdev = info->pdev;
- struct dma_slave_config config;
- dma_cap_mask_t mask;
- struct pxad_param param;
- int ret;
-
- info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
- if (info->data_buff == NULL)
- return -ENOMEM;
- if (use_dma == 0)
- return 0;
-
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- sg_init_one(&info->sg, info->data_buff, info->buf_size);
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- param.prio = PXAD_PRIO_LOWEST;
- param.drcmr = info->drcmr_dat;
- info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
- &param, &pdev->dev,
- "data");
- if (!info->dma_chan) {
- dev_err(&pdev->dev, "unable to request data dma channel\n");
- return -ENODEV;
- }
-
- memset(&config, 0, sizeof(config));
- config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- config.src_addr = info->mmio_phys + NDDB;
- config.dst_addr = info->mmio_phys + NDDB;
- config.src_maxburst = 32;
- config.dst_maxburst = 32;
- ret = dmaengine_slave_config(info->dma_chan, &config);
- if (ret < 0) {
- dev_err(&info->pdev->dev,
- "dma channel configuration failed: %d\n",
- ret);
- return ret;
- }
-
- /*
- * Now that DMA buffers are allocated we turn on
- * DMA proper for I/O operations.
- */
- info->use_dma = 1;
- return 0;
-}
-
-static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
-{
- if (info->use_dma) {
- dmaengine_terminate_all(info->dma_chan);
- dma_release_channel(info->dma_chan);
- }
- kfree(info->data_buff);
-}
-
-static int pxa_ecc_init(struct pxa3xx_nand_info *info,
- struct mtd_info *mtd,
- int strength, int ecc_stepsize, int page_size)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
-
- if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
- info->nfullchunks = 1;
- info->ntotalchunks = 1;
- info->chunk_size = 2048;
- info->spare_size = 40;
- info->ecc_size = 24;
- ecc->mode = NAND_ECC_HW;
- ecc->size = 512;
- ecc->strength = 1;
-
- } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
- info->nfullchunks = 1;
- info->ntotalchunks = 1;
- info->chunk_size = 512;
- info->spare_size = 8;
- info->ecc_size = 8;
- ecc->mode = NAND_ECC_HW;
- ecc->size = 512;
- ecc->strength = 1;
-
- /*
- * Required ECC: 4-bit correction per 512 bytes
- * Select: 16-bit correction per 2048 bytes
- */
- } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
- info->ecc_bch = 1;
- info->nfullchunks = 1;
- info->ntotalchunks = 1;
- info->chunk_size = 2048;
- info->spare_size = 32;
- info->ecc_size = 32;
- ecc->mode = NAND_ECC_HW;
- ecc->size = info->chunk_size;
- mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
- ecc->strength = 16;
-
- } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
- info->ecc_bch = 1;
- info->nfullchunks = 2;
- info->ntotalchunks = 2;
- info->chunk_size = 2048;
- info->spare_size = 32;
- info->ecc_size = 32;
- ecc->mode = NAND_ECC_HW;
- ecc->size = info->chunk_size;
- mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
- ecc->strength = 16;
-
- /*
- * Required ECC: 8-bit correction per 512 bytes
- * Select: 16-bit correction per 1024 bytes
- */
- } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
- info->ecc_bch = 1;
- info->nfullchunks = 4;
- info->ntotalchunks = 5;
- info->chunk_size = 1024;
- info->spare_size = 0;
- info->last_chunk_size = 0;
- info->last_spare_size = 64;
- info->ecc_size = 32;
- ecc->mode = NAND_ECC_HW;
- ecc->size = info->chunk_size;
- mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
- ecc->strength = 16;
- } else {
- dev_err(&info->pdev->dev,
- "ECC strength %d at page size %d is not supported\n",
- strength, page_size);
- return -ENODEV;
- }
-
- dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
- ecc->strength, ecc->size);
- return 0;
-}
-
-static int pxa3xx_nand_scan(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
- struct pxa3xx_nand_info *info = host->info_data;
- struct platform_device *pdev = info->pdev;
- struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret;
- uint16_t ecc_strength, ecc_step;
-
- if (pdata->keep_config) {
- pxa3xx_nand_detect_config(info);
- } else {
- ret = pxa3xx_nand_config_ident(info);
- if (ret)
- return ret;
- }
-
- if (info->reg_ndcr & NDCR_DWIDTH_M)
- chip->options |= NAND_BUSWIDTH_16;
-
- /* Device detection must be done with ECC disabled */
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
- info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
- nand_writel(info, NDECCCTRL, 0x0);
-
- if (pdata->flash_bbt)
- chip->bbt_options |= NAND_BBT_USE_FLASH;
-
- chip->ecc.strength = pdata->ecc_strength;
- chip->ecc.size = pdata->ecc_step_size;
-
- ret = nand_scan_ident(mtd, 1, NULL);
- if (ret)
- return ret;
-
- if (!pdata->keep_config) {
- ret = pxa3xx_nand_init(host);
- if (ret) {
- dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
- ret);
- return ret;
- }
- }
-
- if (chip->bbt_options & NAND_BBT_USE_FLASH) {
- /*
- * We'll use a bad block table stored in-flash and don't
- * allow writing the bad block marker to the flash.
- */
- chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
- chip->bbt_td = &bbt_main_descr;
- chip->bbt_md = &bbt_mirror_descr;
- }
-
- /*
- * If the page size is bigger than the FIFO size, let's check
- * we are given the right variant and then switch to the extended
- * (aka splitted) command handling,
- */
- if (mtd->writesize > PAGE_CHUNK_SIZE) {
- if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
- info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
- chip->cmdfunc = nand_cmdfunc_extended;
- } else {
- dev_err(&info->pdev->dev,
- "unsupported page size on this variant\n");
- return -ENODEV;
- }
- }
-
- ecc_strength = chip->ecc.strength;
- ecc_step = chip->ecc.size;
- if (!ecc_strength || !ecc_step) {
- ecc_strength = chip->ecc_strength_ds;
- ecc_step = chip->ecc_step_ds;
- }
-
- /* Set default ECC strength requirements on non-ONFI devices */
- if (ecc_strength < 1 && ecc_step < 1) {
- ecc_strength = 1;
- ecc_step = 512;
- }
-
- ret = pxa_ecc_init(info, mtd, ecc_strength,
- ecc_step, mtd->writesize);
- if (ret)
- return ret;
-
- /* calculate addressing information */
- if (mtd->writesize >= 2048)
- host->col_addr_cycles = 2;
- else
- host->col_addr_cycles = 1;
-
- /* release the initial buffer */
- kfree(info->data_buff);
-
- /* allocate the real data + oob buffer */
- info->buf_size = mtd->writesize + mtd->oobsize;
- ret = pxa3xx_nand_init_buff(info);
- if (ret)
- return ret;
- info->oob_buff = info->data_buff + mtd->writesize;
-
- if ((mtd->size >> chip->page_shift) > 65536)
- host->row_addr_cycles = 3;
- else
- host->row_addr_cycles = 2;
-
- if (!pdata->keep_config)
- pxa3xx_nand_config_tail(info);
-
- return nand_scan_tail(mtd);
-}
-
-static int alloc_nand_resource(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct pxa3xx_nand_platform_data *pdata;
- struct pxa3xx_nand_info *info;
- struct pxa3xx_nand_host *host;
- struct nand_chip *chip = NULL;
- struct mtd_info *mtd;
- struct resource *r;
- int ret, irq, cs;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata->num_cs <= 0) {
- dev_err(&pdev->dev, "invalid number of chip selects\n");
- return -ENODEV;
- }
-
- info = devm_kzalloc(&pdev->dev,
- sizeof(*info) + sizeof(*host) * pdata->num_cs,
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->pdev = pdev;
- info->variant = pxa3xx_nand_get_variant(pdev);
- for (cs = 0; cs < pdata->num_cs; cs++) {
- host = (void *)&info[1] + sizeof(*host) * cs;
- chip = &host->chip;
- nand_set_controller_data(chip, host);
- mtd = nand_to_mtd(chip);
- info->host[cs] = host;
- host->cs = cs;
- host->info_data = info;
- mtd->dev.parent = &pdev->dev;
- /* FIXME: all chips use the same device tree partitions */
- nand_set_flash_node(chip, np);
-
- nand_set_controller_data(chip, host);
- chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
- chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
- chip->controller = &info->controller;
- chip->waitfunc = pxa3xx_nand_waitfunc;
- chip->select_chip = pxa3xx_nand_select_chip;
- chip->read_word = pxa3xx_nand_read_word;
- chip->read_byte = pxa3xx_nand_read_byte;
- chip->read_buf = pxa3xx_nand_read_buf;
- chip->write_buf = pxa3xx_nand_write_buf;
- chip->options |= NAND_NO_SUBPAGE_WRITE;
- chip->cmdfunc = nand_cmdfunc;
- chip->onfi_set_features = nand_onfi_get_set_features_notsupp;
- chip->onfi_get_features = nand_onfi_get_set_features_notsupp;
- }
-
- nand_hw_control_init(chip->controller);
- info->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(info->clk)) {
- ret = PTR_ERR(info->clk);
- dev_err(&pdev->dev, "failed to get nand clock: %d\n", ret);
- return ret;
- }
- ret = clk_prepare_enable(info->clk);
- if (ret < 0)
- return ret;
-
- if (!np && use_dma) {
- r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (r == NULL) {
- dev_err(&pdev->dev,
- "no resource defined for data DMA\n");
- ret = -ENXIO;
- goto fail_disable_clk;
- }
- info->drcmr_dat = r->start;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource defined\n");
- ret = -ENXIO;
- goto fail_disable_clk;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(info->mmio_base)) {
- ret = PTR_ERR(info->mmio_base);
- dev_err(&pdev->dev, "failed to map register space: %d\n", ret);
- goto fail_disable_clk;
- }
- info->mmio_phys = r->start;
-
- /* Allocate a buffer to allow flash detection */
- info->buf_size = INIT_BUFFER_SIZE;
- info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
- if (info->data_buff == NULL) {
- ret = -ENOMEM;
- goto fail_disable_clk;
- }
-
- /* initialize all interrupts to be disabled */
- disable_int(info, NDSR_MASK);
-
- ret = request_threaded_irq(irq, pxa3xx_nand_irq,
- pxa3xx_nand_irq_thread, IRQF_ONESHOT,
- pdev->name, info);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
- goto fail_free_buf;
- }
-
- platform_set_drvdata(pdev, info);
-
- return 0;
-
-fail_free_buf:
- free_irq(irq, info);
- kfree(info->data_buff);
-fail_disable_clk:
- clk_disable_unprepare(info->clk);
- return ret;
-}
-
-static int pxa3xx_nand_remove(struct platform_device *pdev)
-{
- struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
- struct pxa3xx_nand_platform_data *pdata;
- int irq, cs;
-
- if (!info)
- return 0;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- irq = platform_get_irq(pdev, 0);
- if (irq >= 0)
- free_irq(irq, info);
- pxa3xx_nand_free_buff(info);
-
- /*
- * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
- * In order to prevent a lockup of the system bus, the DFI bus
- * arbitration is granted to SMC upon driver removal. This is done by
- * setting the x_ARB_CNTL bit, which also prevents the NAND to have
- * access to the bus anymore.
- */
- nand_writel(info, NDCR,
- (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
- NFCV1_NDCR_ARB_CNTL);
- clk_disable_unprepare(info->clk);
-
- for (cs = 0; cs < pdata->num_cs; cs++)
- nand_release(nand_to_mtd(&info->host[cs]->chip));
- return 0;
-}
-
-static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
-{
- struct pxa3xx_nand_platform_data *pdata;
- struct device_node *np = pdev->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
-
- if (!of_id)
- return 0;
-
- /*
- * Some SoCs like A7k/A8k need to enable manually the NAND
- * controller to avoid being bootloader dependent. This is done
- * through the use of a single bit in the System Functions registers.
- */
- if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
- struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
- pdev->dev.of_node, "marvell,system-controller");
- u32 reg;
-
- if (IS_ERR(sysctrl_base))
- return PTR_ERR(sysctrl_base);
-
- regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
- reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
- regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
- }
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
- pdata->enable_arbiter = 1;
- if (of_get_property(np, "marvell,nand-keep-config", NULL))
- pdata->keep_config = 1;
- of_property_read_u32(np, "num-cs", &pdata->num_cs);
-
- pdev->dev.platform_data = pdata;
-
- return 0;
-}
-
-static int pxa3xx_nand_probe(struct platform_device *pdev)
-{
- struct pxa3xx_nand_platform_data *pdata;
- struct pxa3xx_nand_info *info;
- int ret, cs, probe_success, dma_available;
-
- dma_available = IS_ENABLED(CONFIG_ARM) &&
- (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
- if (use_dma && !dma_available) {
- use_dma = 0;
- dev_warn(&pdev->dev,
- "This platform can't do DMA on this device\n");
- }
-
- ret = pxa3xx_nand_probe_dt(pdev);
- if (ret)
- return ret;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -ENODEV;
- }
-
- ret = alloc_nand_resource(pdev);
- if (ret)
- return ret;
-
- info = platform_get_drvdata(pdev);
- probe_success = 0;
- for (cs = 0; cs < pdata->num_cs; cs++) {
- struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
-
- /*
- * The mtd name matches the one used in 'mtdparts' kernel
- * parameter. This name cannot be changed or otherwise
- * user's mtd partitions configuration would get broken.
- */
- mtd->name = "pxa3xx_nand-0";
- info->cs = cs;
- ret = pxa3xx_nand_scan(mtd);
- if (ret) {
- dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
- cs);
- continue;
- }
-
- ret = mtd_device_register(mtd, pdata->parts[cs],
- pdata->nr_parts[cs]);
- if (!ret)
- probe_success = 1;
- }
-
- if (!probe_success) {
- pxa3xx_nand_remove(pdev);
- return -ENODEV;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int pxa3xx_nand_suspend(struct device *dev)
-{
- struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
-
- if (info->state) {
- dev_err(dev, "driver busy, state = %d\n", info->state);
- return -EAGAIN;
- }
-
- clk_disable(info->clk);
- return 0;
-}
-
-static int pxa3xx_nand_resume(struct device *dev)
-{
- struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_enable(info->clk);
- if (ret < 0)
- return ret;
-
- /* We don't want to handle interrupt without calling mtd routine */
- disable_int(info, NDCR_INT_MASK);
-
- /*
- * Directly set the chip select to a invalid value,
- * then the driver would reset the timing according
- * to current chip select at the beginning of cmdfunc
- */
- info->cs = 0xff;
-
- /*
- * As the spec says, the NDSR would be updated to 0x1800 when
- * doing the nand_clk disable/enable.
- * To prevent it damaging state machine of the driver, clear
- * all status before resume
- */
- nand_writel(info, NDSR, NDSR_MASK);
-
- return 0;
-}
-#else
-#define pxa3xx_nand_suspend NULL
-#define pxa3xx_nand_resume NULL
-#endif
-
-static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
- .suspend = pxa3xx_nand_suspend,
- .resume = pxa3xx_nand_resume,
-};
-
-static struct platform_driver pxa3xx_nand_driver = {
- .driver = {
- .name = "pxa3xx-nand",
- .of_match_table = pxa3xx_nand_dt_ids,
- .pm = &pxa3xx_nand_pm_ops,
- },
- .probe = pxa3xx_nand_probe,
- .remove = pxa3xx_nand_remove,
-};
-
-module_platform_driver(pxa3xx_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/phy/ti/phy-da8xx-usb.c b/drivers/phy/ti/phy-da8xx-usb.c
index 1b82bff6330f..befb886ff121 100644
--- a/drivers/phy/ti/phy-da8xx-usb.c
+++ b/drivers/phy/ti/phy-da8xx-usb.c
@@ -20,6 +20,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
+#include <linux/platform_data/phy-da8xx-usb.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -145,6 +146,7 @@ static struct phy *da8xx_usb_phy_of_xlate(struct device *dev,
static int da8xx_usb_phy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct da8xx_usb_phy_platform_data *pdata = dev->platform_data;
struct device_node *node = dev->of_node;
struct da8xx_usb_phy *d_phy;
@@ -152,25 +154,25 @@ static int da8xx_usb_phy_probe(struct platform_device *pdev)
if (!d_phy)
return -ENOMEM;
- if (node)
+ if (pdata)
+ d_phy->regmap = pdata->cfgchip;
+ else
d_phy->regmap = syscon_regmap_lookup_by_compatible(
"ti,da830-cfgchip");
- else
- d_phy->regmap = syscon_regmap_lookup_by_pdevname("syscon");
if (IS_ERR(d_phy->regmap)) {
dev_err(dev, "Failed to get syscon\n");
return PTR_ERR(d_phy->regmap);
}
- d_phy->usb11_clk = devm_clk_get(dev, "usb11_phy");
+ d_phy->usb11_clk = devm_clk_get(dev, "usb1_clk48");
if (IS_ERR(d_phy->usb11_clk)) {
- dev_err(dev, "Failed to get usb11_phy clock\n");
+ dev_err(dev, "Failed to get usb1_clk48\n");
return PTR_ERR(d_phy->usb11_clk);
}
- d_phy->usb20_clk = devm_clk_get(dev, "usb20_phy");
+ d_phy->usb20_clk = devm_clk_get(dev, "usb0_clk48");
if (IS_ERR(d_phy->usb20_clk)) {
- dev_err(dev, "Failed to get usb20_phy clock\n");
+ dev_err(dev, "Failed to get usb0_clk48\n");
return PTR_ERR(d_phy->usb20_clk);
}
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index 89bf4d6cb486..cb0237143dbe 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -132,12 +132,16 @@ static void sr_set_clk_length(struct omap_sr *sr)
struct clk *fck;
u32 fclk_speed;
- fck = clk_get(&sr->pdev->dev, "fck");
-
+ /* Try interconnect target module fck first if it already exists */
+ fck = clk_get(sr->pdev->dev.parent, "fck");
if (IS_ERR(fck)) {
- dev_err(&sr->pdev->dev, "%s: unable to get fck for device %s\n",
- __func__, dev_name(&sr->pdev->dev));
- return;
+ fck = clk_get(&sr->pdev->dev, "fck");
+ if (IS_ERR(fck)) {
+ dev_err(&sr->pdev->dev,
+ "%s: unable to get fck for device %s\n",
+ __func__, dev_name(&sr->pdev->dev));
+ return;
+ }
}
fclk_speed = clk_get_rate(fck);
@@ -838,7 +842,7 @@ static int omap_sr_autocomp_store(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
omap_sr_autocomp_store, "%llu\n");
-static int __init omap_sr_probe(struct platform_device *pdev)
+static int omap_sr_probe(struct platform_device *pdev)
{
struct omap_sr *sr_info;
struct omap_sr_data *pdata = pdev->dev.platform_data;
@@ -898,6 +902,12 @@ static int __init omap_sr_probe(struct platform_device *pdev)
list_add(&sr_info->node, &sr_list);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ goto err_list_del;
+ }
+
/*
* Call into late init to do initializations that require
* both sr driver and sr class driver to be initiallized.
@@ -966,12 +976,17 @@ static int __init omap_sr_probe(struct platform_device *pdev)
}
+ pm_runtime_put_sync(&pdev->dev);
+
return ret;
err_debugfs:
debugfs_remove_recursive(sr_info->dbg_dir);
err_list_del:
list_del(&sr_info->node);
+
+ pm_runtime_put_sync(&pdev->dev);
+
return ret;
}
@@ -1025,11 +1040,23 @@ static void omap_sr_shutdown(struct platform_device *pdev)
return;
}
+static const struct of_device_id omap_sr_match[] = {
+ { .compatible = "ti,omap3-smartreflex-core", },
+ { .compatible = "ti,omap3-smartreflex-mpu-iva", },
+ { .compatible = "ti,omap4-smartreflex-core", },
+ { .compatible = "ti,omap4-smartreflex-mpu", },
+ { .compatible = "ti,omap4-smartreflex-iva", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_sr_match);
+
static struct platform_driver smartreflex_driver = {
+ .probe = omap_sr_probe,
.remove = omap_sr_remove,
.shutdown = omap_sr_shutdown,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = omap_sr_match,
},
};
@@ -1048,7 +1075,7 @@ static int __init sr_init(void)
else
pr_warn("%s: No PMIC hook to init smartreflex\n", __func__);
- ret = platform_driver_probe(&smartreflex_driver, omap_sr_probe);
+ ret = platform_driver_register(&smartreflex_driver);
if (ret) {
pr_err("%s: platform driver register failed for SR\n",
__func__);
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 5ad42f33e70c..665da3c8fbce 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_data/dmtimer-omap.h>
#include <linux/platform_data/pwm_omap_dmtimer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -37,7 +38,7 @@ struct pwm_omap_dmtimer_chip {
struct pwm_chip chip;
struct mutex mutex;
pwm_omap_dmtimer *dm_timer;
- struct pwm_omap_dmtimer_pdata *pdata;
+ const struct omap_dm_timer_ops *pdata;
struct platform_device *dm_timer_pdev;
};
@@ -242,19 +243,35 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *timer;
+ struct platform_device *timer_pdev;
struct pwm_omap_dmtimer_chip *omap;
- struct pwm_omap_dmtimer_pdata *pdata;
+ struct dmtimer_platform_data *timer_pdata;
+ const struct omap_dm_timer_ops *pdata;
pwm_omap_dmtimer *dm_timer;
u32 v;
- int status;
+ int ret = 0;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "Missing dmtimer platform data\n");
- return -EINVAL;
+ timer = of_parse_phandle(np, "ti,timers", 0);
+ if (!timer)
+ return -ENODEV;
+
+ timer_pdev = of_find_device_by_node(timer);
+ if (!timer_pdev) {
+ dev_err(&pdev->dev, "Unable to find Timer pdev\n");
+ ret = -ENODEV;
+ goto put;
}
- if (!pdata->request_by_node ||
+ timer_pdata = dev_get_platdata(&timer_pdev->dev);
+ if (!timer_pdata) {
+ dev_err(&pdev->dev, "dmtimer pdata structure NULL\n");
+ ret = -EINVAL;
+ goto put;
+ }
+
+ pdata = timer_pdata->timer_ops;
+
+ if (!pdata || !pdata->request_by_node ||
!pdata->free ||
!pdata->enable ||
!pdata->disable ||
@@ -267,21 +284,26 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
!pdata->set_prescaler ||
!pdata->write_counter) {
dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto put;
}
- timer = of_parse_phandle(np, "ti,timers", 0);
- if (!timer)
- return -ENODEV;
-
if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto put;
}
dm_timer = pdata->request_by_node(timer);
- if (!dm_timer)
- return -EPROBE_DEFER;
+ if (!dm_timer) {
+ ret = -EPROBE_DEFER;
+ goto put;
+ }
+
+put:
+ of_node_put(timer);
+ if (ret < 0)
+ return ret;
omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
if (!omap) {
@@ -291,13 +313,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
omap->pdata = pdata;
omap->dm_timer = dm_timer;
-
- omap->dm_timer_pdev = of_find_device_by_node(timer);
- if (!omap->dm_timer_pdev) {
- dev_err(&pdev->dev, "Unable to find timer pdev\n");
- omap->pdata->free(dm_timer);
- return -EINVAL;
- }
+ omap->dm_timer_pdev = timer_pdev;
/*
* Ensure that the timer is stopped before we allow PWM core to call
@@ -322,11 +338,11 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
mutex_init(&omap->mutex);
- status = pwmchip_add(&omap->chip);
- if (status < 0) {
+ ret = pwmchip_add(&omap->chip);
+ if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM\n");
omap->pdata->free(omap->dm_timer);
- return status;
+ return ret;
}
platform_set_drvdata(pdev, omap);
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 09550b1da56d..3bbe6114a420 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -3,8 +3,8 @@ config SOC_RENESAS
default y if ARCH_RENESAS
select SOC_BUS
select RST_RCAR if ARCH_RCAR_GEN1 || ARCH_RCAR_GEN2 || \
- ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77970 || \
- ARCH_R8A77995
+ ARCH_R8A7795 || ARCH_R8A7796 || ARCH_R8A77965 || \
+ ARCH_R8A77970 || ARCH_R8A77980 || ARCH_R8A77995
select SYSC_R8A7743 if ARCH_R8A7743
select SYSC_R8A7745 if ARCH_R8A7745
select SYSC_R8A7779 if ARCH_R8A7779
@@ -14,7 +14,9 @@ config SOC_RENESAS
select SYSC_R8A7794 if ARCH_R8A7794
select SYSC_R8A7795 if ARCH_R8A7795
select SYSC_R8A7796 if ARCH_R8A7796
+ select SYSC_R8A77965 if ARCH_R8A77965
select SYSC_R8A77970 if ARCH_R8A77970
+ select SYSC_R8A77980 if ARCH_R8A77980
select SYSC_R8A77995 if ARCH_R8A77995
if SOC_RENESAS
@@ -56,10 +58,18 @@ config SYSC_R8A7796
bool "R-Car M3-W System Controller support" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A77965
+ bool "R-Car M3-N System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A77970
bool "R-Car V3M System Controller support" if COMPILE_TEST
select SYSC_RCAR
+config SYSC_R8A77980
+ bool "R-Car V3H System Controller support" if COMPILE_TEST
+ select SYSC_RCAR
+
config SYSC_R8A77995
bool "R-Car D3 System Controller support" if COMPILE_TEST
select SYSC_RCAR
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 845d62a08ce1..ccb5ec57a262 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -12,7 +12,9 @@ obj-$(CONFIG_SYSC_R8A7792) += r8a7792-sysc.o
obj-$(CONFIG_SYSC_R8A7794) += r8a7794-sysc.o
obj-$(CONFIG_SYSC_R8A7795) += r8a7795-sysc.o
obj-$(CONFIG_SYSC_R8A7796) += r8a7796-sysc.o
+obj-$(CONFIG_SYSC_R8A77965) += r8a77965-sysc.o
obj-$(CONFIG_SYSC_R8A77970) += r8a77970-sysc.o
+obj-$(CONFIG_SYSC_R8A77980) += r8a77980-sysc.o
obj-$(CONFIG_SYSC_R8A77995) += r8a77995-sysc.o
# Family
diff --git a/drivers/soc/renesas/r8a77965-sysc.c b/drivers/soc/renesas/r8a77965-sysc.c
new file mode 100644
index 000000000000..d7f7928e3c07
--- /dev/null
+++ b/drivers/soc/renesas/r8a77965-sysc.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car M3-N System Controller
+ * Copyright (C) 2018 Jacopo Mondi <jacopo+renesas@jmondi.org>
+ *
+ * Based on Renesas R-Car M3-W System Controller
+ * Copyright (C) 2016 Glider bvba
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a77965-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a77965_areas[] __initconst = {
+ { "always-on", 0, 0, R8A77965_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca57-scu", 0x1c0, 0, R8A77965_PD_CA57_SCU, R8A77965_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca57-cpu0", 0x80, 0, R8A77965_PD_CA57_CPU0, R8A77965_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "ca57-cpu1", 0x80, 1, R8A77965_PD_CA57_CPU1, R8A77965_PD_CA57_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A77965_PD_CR7, R8A77965_PD_ALWAYS_ON },
+ { "a3vc", 0x380, 0, R8A77965_PD_A3VC, R8A77965_PD_ALWAYS_ON },
+ { "a3vp", 0x340, 0, R8A77965_PD_A3VP, R8A77965_PD_ALWAYS_ON },
+ { "a2vc1", 0x3c0, 1, R8A77965_PD_A2VC1, R8A77965_PD_A3VC },
+ { "3dg-a", 0x100, 0, R8A77965_PD_3DG_A, R8A77965_PD_ALWAYS_ON },
+ { "3dg-b", 0x100, 1, R8A77965_PD_3DG_B, R8A77965_PD_3DG_A },
+ { "a3ir", 0x180, 0, R8A77965_PD_A3IR, R8A77965_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a77965_sysc_info __initconst = {
+ .areas = r8a77965_areas,
+ .num_areas = ARRAY_SIZE(r8a77965_areas),
+};
diff --git a/drivers/soc/renesas/r8a77970-sysc.c b/drivers/soc/renesas/r8a77970-sysc.c
index 8c614164718e..caf894f193ed 100644
--- a/drivers/soc/renesas/r8a77970-sysc.c
+++ b/drivers/soc/renesas/r8a77970-sysc.c
@@ -25,12 +25,12 @@ static const struct rcar_sysc_area r8a77970_areas[] __initconst = {
PD_CPU_NOCR },
{ "cr7", 0x240, 0, R8A77970_PD_CR7, R8A77970_PD_ALWAYS_ON },
{ "a3ir", 0x180, 0, R8A77970_PD_A3IR, R8A77970_PD_ALWAYS_ON },
- { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_ALWAYS_ON },
- { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A2IR0 },
- { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A2IR0 },
- { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A2IR0 },
- { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_ALWAYS_ON },
- { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A2SC0 },
+ { "a2ir0", 0x400, 0, R8A77970_PD_A2IR0, R8A77970_PD_A3IR },
+ { "a2ir1", 0x400, 1, R8A77970_PD_A2IR1, R8A77970_PD_A3IR },
+ { "a2ir2", 0x400, 2, R8A77970_PD_A2IR2, R8A77970_PD_A3IR },
+ { "a2ir3", 0x400, 3, R8A77970_PD_A2IR3, R8A77970_PD_A3IR },
+ { "a2sc0", 0x400, 4, R8A77970_PD_A2SC0, R8A77970_PD_A3IR },
+ { "a2sc1", 0x400, 5, R8A77970_PD_A2SC1, R8A77970_PD_A3IR },
};
const struct rcar_sysc_info r8a77970_sysc_info __initconst = {
diff --git a/drivers/soc/renesas/r8a77980-sysc.c b/drivers/soc/renesas/r8a77980-sysc.c
new file mode 100644
index 000000000000..9265fb525ef3
--- /dev/null
+++ b/drivers/soc/renesas/r8a77980-sysc.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas R-Car V3H System Controller
+ *
+ * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018 Cogent Embedded, Inc.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a77980-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a77980_areas[] __initconst = {
+ { "always-on", 0, 0, R8A77980_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca53-scu", 0x140, 0, R8A77980_PD_CA53_SCU, R8A77980_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca53-cpu0", 0x200, 0, R8A77980_PD_CA53_CPU0, R8A77980_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu1", 0x200, 1, R8A77980_PD_CA53_CPU1, R8A77980_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu2", 0x200, 2, R8A77980_PD_CA53_CPU2, R8A77980_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "ca53-cpu3", 0x200, 3, R8A77980_PD_CA53_CPU3, R8A77980_PD_CA53_SCU,
+ PD_CPU_NOCR },
+ { "cr7", 0x240, 0, R8A77980_PD_CR7, R8A77980_PD_ALWAYS_ON },
+ { "a3ir", 0x180, 0, R8A77980_PD_A3IR, R8A77980_PD_ALWAYS_ON },
+ { "a2ir0", 0x400, 0, R8A77980_PD_A2IR0, R8A77980_PD_A3IR },
+ { "a2ir1", 0x400, 1, R8A77980_PD_A2IR1, R8A77980_PD_A3IR },
+ { "a2ir2", 0x400, 2, R8A77980_PD_A2IR2, R8A77980_PD_A3IR },
+ { "a2ir3", 0x400, 3, R8A77980_PD_A2IR3, R8A77980_PD_A3IR },
+ { "a2ir4", 0x400, 4, R8A77980_PD_A2IR4, R8A77980_PD_A3IR },
+ { "a2ir5", 0x400, 5, R8A77980_PD_A2IR5, R8A77980_PD_A3IR },
+ { "a2sc0", 0x400, 6, R8A77980_PD_A2SC0, R8A77980_PD_A3IR },
+ { "a2sc1", 0x400, 7, R8A77980_PD_A2SC1, R8A77980_PD_A3IR },
+ { "a2sc2", 0x400, 8, R8A77980_PD_A2SC2, R8A77980_PD_A3IR },
+ { "a2sc3", 0x400, 9, R8A77980_PD_A2SC3, R8A77980_PD_A3IR },
+ { "a2sc4", 0x400, 10, R8A77980_PD_A2SC4, R8A77980_PD_A3IR },
+ { "a2pd0", 0x400, 11, R8A77980_PD_A2PD0, R8A77980_PD_A3IR },
+ { "a2pd1", 0x400, 12, R8A77980_PD_A2PD1, R8A77980_PD_A3IR },
+ { "a2cn", 0x400, 13, R8A77980_PD_A2CN, R8A77980_PD_A3IR },
+ { "a3vip", 0x2c0, 0, R8A77980_PD_A3VIP, R8A77980_PD_ALWAYS_ON },
+ { "a3vip1", 0x300, 0, R8A77980_PD_A3VIP1, R8A77980_PD_A3VIP },
+ { "a3vip2", 0x280, 0, R8A77980_PD_A3VIP2, R8A77980_PD_A3VIP },
+};
+
+const struct rcar_sysc_info r8a77980_sysc_info __initconst = {
+ .areas = r8a77980_areas,
+ .num_areas = ARRAY_SIZE(r8a77980_areas),
+};
diff --git a/drivers/soc/renesas/rcar-rst.c b/drivers/soc/renesas/rcar-rst.c
index 3316b028f231..8e9cb7996ab0 100644
--- a/drivers/soc/renesas/rcar-rst.c
+++ b/drivers/soc/renesas/rcar-rst.c
@@ -13,8 +13,18 @@
#include <linux/of_address.h>
#include <linux/soc/renesas/rcar-rst.h>
+#define WDTRSTCR_RESET 0xA55A0002
+#define WDTRSTCR 0x0054
+
+static int rcar_rst_enable_wdt_reset(void __iomem *base)
+{
+ iowrite32(WDTRSTCR_RESET, base + WDTRSTCR);
+ return 0;
+}
+
struct rst_config {
- unsigned int modemr; /* Mode Monitoring Register Offset */
+ unsigned int modemr; /* Mode Monitoring Register Offset */
+ int (*configure)(void *base); /* Platform specific configuration */
};
static const struct rst_config rcar_rst_gen1 __initconst = {
@@ -23,6 +33,11 @@ static const struct rst_config rcar_rst_gen1 __initconst = {
static const struct rst_config rcar_rst_gen2 __initconst = {
.modemr = 0x60,
+ .configure = rcar_rst_enable_wdt_reset,
+};
+
+static const struct rst_config rcar_rst_gen3 __initconst = {
+ .modemr = 0x60,
};
static const struct of_device_id rcar_rst_matches[] __initconst = {
@@ -38,11 +53,13 @@ static const struct of_device_id rcar_rst_matches[] __initconst = {
{ .compatible = "renesas,r8a7792-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7793-rst", .data = &rcar_rst_gen2 },
{ .compatible = "renesas,r8a7794-rst", .data = &rcar_rst_gen2 },
- /* R-Car Gen3 is handled like R-Car Gen2 */
- { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen2 },
- { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen2 },
- { .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen2 },
- { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen2 },
+ /* R-Car Gen3 */
+ { .compatible = "renesas,r8a7795-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a7796-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77965-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77970-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77980-rst", .data = &rcar_rst_gen3 },
+ { .compatible = "renesas,r8a77995-rst", .data = &rcar_rst_gen3 },
{ /* sentinel */ }
};
@@ -71,6 +88,14 @@ static int __init rcar_rst_init(void)
rcar_rst_base = base;
cfg = match->data;
saved_mode = ioread32(base + cfg->modemr);
+ if (cfg->configure) {
+ error = cfg->configure(base);
+ if (error) {
+ pr_warn("%pOF: Cannot run SoC specific configuration\n",
+ np);
+ goto out_put;
+ }
+ }
pr_debug("%pOF: MODE = 0x%08x\n", np, saved_mode);
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 52c25a5e2646..faf20e719361 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -254,7 +254,7 @@ finalize:
pm_genpd_init(genpd, gov, false);
}
-static const struct of_device_id rcar_sysc_matches[] = {
+static const struct of_device_id rcar_sysc_matches[] __initconst = {
#ifdef CONFIG_SYSC_R8A7743
{ .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
#endif
@@ -284,9 +284,15 @@ static const struct of_device_id rcar_sysc_matches[] = {
#ifdef CONFIG_SYSC_R8A7796
{ .compatible = "renesas,r8a7796-sysc", .data = &r8a7796_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A77965
+ { .compatible = "renesas,r8a77965-sysc", .data = &r8a77965_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A77970
{ .compatible = "renesas,r8a77970-sysc", .data = &r8a77970_sysc_info },
#endif
+#ifdef CONFIG_SYSC_R8A77980
+ { .compatible = "renesas,r8a77980-sysc", .data = &r8a77980_sysc_info },
+#endif
#ifdef CONFIG_SYSC_R8A77995
{ .compatible = "renesas,r8a77995-sysc", .data = &r8a77995_sysc_info },
#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 9d9daf9eb91b..dcdc9ec8eba7 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -58,7 +58,9 @@ extern const struct rcar_sysc_info r8a7792_sysc_info;
extern const struct rcar_sysc_info r8a7794_sysc_info;
extern const struct rcar_sysc_info r8a7795_sysc_info;
extern const struct rcar_sysc_info r8a7796_sysc_info;
+extern const struct rcar_sysc_info r8a77965_sysc_info;
extern const struct rcar_sysc_info r8a77970_sysc_info;
+extern const struct rcar_sysc_info r8a77980_sysc_info;
extern const struct rcar_sysc_info r8a77995_sysc_info;
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index 926b7fd6db2d..ea71c413c926 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -144,11 +144,21 @@ static const struct renesas_soc soc_rcar_m3_w __initconst __maybe_unused = {
.id = 0x52,
};
+static const struct renesas_soc soc_rcar_m3_n __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x55,
+};
+
static const struct renesas_soc soc_rcar_v3m __initconst __maybe_unused = {
.family = &fam_rcar_gen3,
.id = 0x54,
};
+static const struct renesas_soc soc_rcar_v3h __initconst __maybe_unused = {
+ .family = &fam_rcar_gen3,
+ .id = 0x56,
+};
+
static const struct renesas_soc soc_rcar_d3 __initconst __maybe_unused = {
.family = &fam_rcar_gen3,
.id = 0x58,
@@ -209,9 +219,15 @@ static const struct of_device_id renesas_socs[] __initconst = {
#ifdef CONFIG_ARCH_R8A7796
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
#endif
+#ifdef CONFIG_ARCH_R8A77965
+ { .compatible = "renesas,r8a77965", .data = &soc_rcar_m3_n },
+#endif
#ifdef CONFIG_ARCH_R8A77970
{ .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m },
#endif
+#ifdef CONFIG_ARCH_R8A77980
+ { .compatible = "renesas,r8a77980", .data = &soc_rcar_v3h },
+#endif
#ifdef CONFIG_ARCH_R8A77995
{ .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
#endif
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index 39e152abe6b9..92770d84a288 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -28,6 +28,15 @@ config KEYSTONE_NAVIGATOR_DMA
If unsure, say N.
+config AMX3_PM
+ tristate "AMx3 Power Management"
+ depends on SOC_AM33XX || SOC_AM43XX
+ depends on WKUP_M3_IPC && TI_EMIF_SRAM && SRAM
+ help
+ Enable power management on AM335x and AM437x. Required for suspend to mem
+ and standby states on both AM335x and AM437x platforms and for deeper cpuidle
+ c-states on AM335x.
+
config WKUP_M3_IPC
tristate "TI AMx3 Wkup-M3 IPC Driver"
depends on WKUP_M3_RPROC
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile
index 8e205287f120..a22edc0b258a 100644
--- a/drivers/soc/ti/Makefile
+++ b/drivers/soc/ti/Makefile
@@ -5,5 +5,6 @@
obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss.o
knav_qmss-y := knav_qmss_queue.o knav_qmss_acc.o
obj-$(CONFIG_KEYSTONE_NAVIGATOR_DMA) += knav_dma.o
+obj-$(CONFIG_AMX3_PM) += pm33xx.o
obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o
obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
new file mode 100644
index 000000000000..652739c7f718
--- /dev/null
+++ b/drivers/soc/ti/pm33xx.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AM33XX Power Management Routines
+ *
+ * Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Bedia, Dave Gerlach
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/pm33xx.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/sram.h>
+#include <linux/suspend.h>
+#include <linux/ti-emif-sram.h>
+#include <linux/wkup_m3_ipc.h>
+
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+#include <asm/system_misc.h>
+
+#define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
+ (unsigned long)pm_sram->do_wfi)
+
+static int (*am33xx_do_wfi_sram)(unsigned long unused);
+static phys_addr_t am33xx_do_wfi_sram_phys;
+
+static struct gen_pool *sram_pool, *sram_pool_data;
+static unsigned long ocmcram_location, ocmcram_location_data;
+
+static struct am33xx_pm_platform_data *pm_ops;
+static struct am33xx_pm_sram_addr *pm_sram;
+
+static struct device *pm33xx_dev;
+static struct wkup_m3_ipc *m3_ipc;
+
+static u32 sram_suspend_address(unsigned long addr)
+{
+ return ((unsigned long)am33xx_do_wfi_sram +
+ AMX3_PM_SRAM_SYMBOL_OFFSET(addr));
+}
+
+#ifdef CONFIG_SUSPEND
+static int am33xx_pm_suspend(suspend_state_t suspend_state)
+{
+ int i, ret = 0;
+
+ ret = pm_ops->soc_suspend((unsigned long)suspend_state,
+ am33xx_do_wfi_sram);
+
+ if (ret) {
+ dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
+ } else {
+ i = m3_ipc->ops->request_pm_status(m3_ipc);
+
+ switch (i) {
+ case 0:
+ dev_info(pm33xx_dev,
+ "PM: Successfully put all powerdomains to target state\n");
+ break;
+ case 1:
+ dev_err(pm33xx_dev,
+ "PM: Could not transition all powerdomains to target state\n");
+ ret = -1;
+ break;
+ default:
+ dev_err(pm33xx_dev,
+ "PM: CM3 returned unknown result = %d\n", i);
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+static int am33xx_pm_enter(suspend_state_t suspend_state)
+{
+ int ret = 0;
+
+ switch (suspend_state) {
+ case PM_SUSPEND_MEM:
+ case PM_SUSPEND_STANDBY:
+ ret = am33xx_pm_suspend(suspend_state);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int am33xx_pm_begin(suspend_state_t state)
+{
+ int ret = -EINVAL;
+
+ switch (state) {
+ case PM_SUSPEND_MEM:
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
+ break;
+ case PM_SUSPEND_STANDBY:
+ ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_STANDBY);
+ break;
+ }
+
+ return ret;
+}
+
+static void am33xx_pm_end(void)
+{
+ m3_ipc->ops->finish_low_power(m3_ipc);
+}
+
+static int am33xx_pm_valid(suspend_state_t state)
+{
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static const struct platform_suspend_ops am33xx_pm_ops = {
+ .begin = am33xx_pm_begin,
+ .end = am33xx_pm_end,
+ .enter = am33xx_pm_enter,
+ .valid = am33xx_pm_valid,
+};
+#endif /* CONFIG_SUSPEND */
+
+static void am33xx_pm_set_ipc_ops(void)
+{
+ u32 resume_address;
+ int temp;
+
+ temp = ti_emif_get_mem_type();
+ if (temp < 0) {
+ dev_err(pm33xx_dev, "PM: Cannot determine memory type, no PM available\n");
+ return;
+ }
+ m3_ipc->ops->set_mem_type(m3_ipc, temp);
+
+ /* Physical resume address to be used by ROM code */
+ resume_address = am33xx_do_wfi_sram_phys +
+ *pm_sram->resume_offset + 0x4;
+
+ m3_ipc->ops->set_resume_address(m3_ipc, (void *)resume_address);
+}
+
+static void am33xx_pm_free_sram(void)
+{
+ gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
+ gen_pool_free(sram_pool_data, ocmcram_location_data,
+ sizeof(struct am33xx_pm_ro_sram_data));
+}
+
+/*
+ * Push the minimal suspend-resume code to SRAM
+ */
+static int am33xx_pm_alloc_sram(void)
+{
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
+ if (!np) {
+ np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+ if (!np) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ sram_pool = of_gen_pool_get(np, "pm-sram", 0);
+ if (!sram_pool) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
+ __func__);
+ ret = -ENODEV;
+ goto mpu_put_node;
+ }
+
+ sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
+ if (!sram_pool_data) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
+ __func__);
+ ret = -ENODEV;
+ goto mpu_put_node;
+ }
+
+ ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
+ if (!ocmcram_location) {
+ dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
+ __func__);
+ ret = -ENOMEM;
+ goto mpu_put_node;
+ }
+
+ ocmcram_location_data = gen_pool_alloc(sram_pool_data,
+ sizeof(struct emif_regs_amx3));
+ if (!ocmcram_location_data) {
+ dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
+ gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
+ ret = -ENOMEM;
+ }
+
+mpu_put_node:
+ of_node_put(np);
+ return ret;
+}
+
+static int am33xx_push_sram_idle(void)
+{
+ struct am33xx_pm_ro_sram_data ro_sram_data;
+ int ret;
+ u32 table_addr, ro_data_addr;
+ void *copy_addr;
+
+ ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
+ ro_sram_data.amx3_pm_sram_data_phys =
+ gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
+
+ /* Save physical address to calculate resume offset during pm init */
+ am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
+ ocmcram_location);
+
+ am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
+ pm_sram->do_wfi,
+ *pm_sram->do_wfi_sz);
+ if (!am33xx_do_wfi_sram) {
+ dev_err(pm33xx_dev,
+ "PM: %s: am33xx_do_wfi copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ table_addr =
+ sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
+ ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
+ if (ret) {
+ dev_dbg(pm33xx_dev,
+ "PM: %s: EMIF function copy failed\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ ro_data_addr =
+ sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
+ copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
+ &ro_sram_data,
+ sizeof(ro_sram_data));
+ if (!copy_addr) {
+ dev_err(pm33xx_dev,
+ "PM: %s: ro_sram_data copy to sram failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int am33xx_pm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (!of_machine_is_compatible("ti,am33xx") &&
+ !of_machine_is_compatible("ti,am43"))
+ return -ENODEV;
+
+ pm_ops = dev->platform_data;
+ if (!pm_ops) {
+ dev_err(dev, "PM: Cannot get core PM ops!\n");
+ return -ENODEV;
+ }
+
+ pm_sram = pm_ops->get_sram_addrs();
+ if (!pm_sram) {
+ dev_err(dev, "PM: Cannot get PM asm function addresses!!\n");
+ return -ENODEV;
+ }
+
+ pm33xx_dev = dev;
+
+ ret = am33xx_pm_alloc_sram();
+ if (ret)
+ return ret;
+
+ ret = am33xx_push_sram_idle();
+ if (ret)
+ goto err_free_sram;
+
+ m3_ipc = wkup_m3_ipc_get();
+ if (!m3_ipc) {
+ dev_dbg(dev, "PM: Cannot get wkup_m3_ipc handle\n");
+ ret = -EPROBE_DEFER;
+ goto err_free_sram;
+ }
+
+ am33xx_pm_set_ipc_ops();
+
+#ifdef CONFIG_SUSPEND
+ suspend_set_ops(&am33xx_pm_ops);
+#endif /* CONFIG_SUSPEND */
+
+ ret = pm_ops->init();
+ if (ret) {
+ dev_err(dev, "Unable to call core pm init!\n");
+ ret = -ENODEV;
+ goto err_put_wkup_m3_ipc;
+ }
+
+ return 0;
+
+err_put_wkup_m3_ipc:
+ wkup_m3_ipc_put(m3_ipc);
+err_free_sram:
+ am33xx_pm_free_sram();
+ pm33xx_dev = NULL;
+ return ret;
+}
+
+static int am33xx_pm_remove(struct platform_device *pdev)
+{
+ suspend_set_ops(NULL);
+ wkup_m3_ipc_put(m3_ipc);
+ am33xx_pm_free_sram();
+ return 0;
+}
+
+static struct platform_driver am33xx_pm_driver = {
+ .driver = {
+ .name = "pm33xx",
+ },
+ .probe = am33xx_pm_probe,
+ .remove = am33xx_pm_remove,
+};
+module_platform_driver(am33xx_pm_driver);
+
+MODULE_ALIAS("platform:pm33xx");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("am33xx power management driver");