aboutsummaryrefslogtreecommitdiff
path: root/drivers/clk/clk-qoriq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk/clk-qoriq.c')
-rw-r--r--drivers/clk/clk-qoriq.c1354
1 files changed, 1135 insertions, 219 deletions
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index cda90a971e39..1ab0fb81c6a0 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -10,7 +10,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/fsl/guts.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -19,213 +21,1029 @@
#include <linux/of.h>
#include <linux/slab.h>
-struct cmux_clk {
+#define PLL_DIV1 0
+#define PLL_DIV2 1
+#define PLL_DIV3 2
+#define PLL_DIV4 3
+
+#define PLATFORM_PLL 0
+#define CGA_PLL1 1
+#define CGA_PLL2 2
+#define CGA_PLL3 3
+#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
+#define CGB_PLL1 4
+#define CGB_PLL2 5
+
+struct clockgen_pll_div {
+ struct clk *clk;
+ char name[32];
+};
+
+struct clockgen_pll {
+ struct clockgen_pll_div div[4];
+};
+
+#define CLKSEL_VALID 1
+#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
+
+struct clockgen_sourceinfo {
+ u32 flags; /* CLKSEL_xxx */
+ int pll; /* CGx_PLLn */
+ int div; /* PLL_DIVn */
+};
+
+#define NUM_MUX_PARENTS 16
+
+struct clockgen_muxinfo {
+ struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
+};
+
+#define NUM_HWACCEL 5
+#define NUM_CMUX 8
+
+struct clockgen;
+
+/*
+ * cmux freq must be >= platform pll.
+ * If not set, cmux freq must be >= platform pll/2
+ */
+#define CG_CMUX_GE_PLAT 1
+
+#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
+#define CG_VER3 4 /* version 3 cg: reg layout different */
+#define CG_LITTLE_ENDIAN 8
+
+struct clockgen_chipinfo {
+ const char *compat, *guts_compat;
+ const struct clockgen_muxinfo *cmux_groups[2];
+ const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
+ void (*init_periph)(struct clockgen *cg);
+ int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */
+ u32 pll_mask; /* 1 << n bit set if PLL n is valid */
+ u32 flags; /* CG_xxx */
+};
+
+struct clockgen {
+ struct device_node *node;
+ void __iomem *regs;
+ struct clockgen_chipinfo info; /* mutable copy */
+ struct clk *sysclk;
+ struct clockgen_pll pll[6];
+ struct clk *cmux[NUM_CMUX];
+ struct clk *hwaccel[NUM_HWACCEL];
+ struct clk *fman[2];
+ struct ccsr_guts __iomem *guts;
+};
+
+static struct clockgen clockgen;
+
+static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
+{
+ if (cg->info.flags & CG_LITTLE_ENDIAN)
+ iowrite32(val, reg);
+ else
+ iowrite32be(val, reg);
+}
+
+static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
+{
+ u32 val;
+
+ if (cg->info.flags & CG_LITTLE_ENDIAN)
+ val = ioread32(reg);
+ else
+ val = ioread32be(reg);
+
+ return val;
+}
+
+static const struct clockgen_muxinfo p2041_cmux_grp1 = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ }
+};
+
+static const struct clockgen_muxinfo p2041_cmux_grp2 = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ }
+};
+
+static const struct clockgen_muxinfo p5020_cmux_grp1 = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
+ }
+};
+
+static const struct clockgen_muxinfo p5020_cmux_grp2 = {
+ {
+ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ }
+};
+
+static const struct clockgen_muxinfo p5040_cmux_grp1 = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
+ [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
+ }
+};
+
+static const struct clockgen_muxinfo p5040_cmux_grp2 = {
+ {
+ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
+ [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ }
+};
+
+static const struct clockgen_muxinfo p4080_cmux_grp1 = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
+ }
+};
+
+static const struct clockgen_muxinfo p4080_cmux_grp2 = {
+ {
+ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
+ [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
+ [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
+ [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
+ [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
+ }
+};
+
+static const struct clockgen_muxinfo t1023_cmux = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ }
+};
+
+static const struct clockgen_muxinfo t1040_cmux = {
+ {
+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ }
+};
+
+
+static const struct clockgen_muxinfo clockgen2_cmux_cga = {
+ {
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
+ },
+};
+
+static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
+ {
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+ },
+};
+
+static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
+ {
+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1043a_hwa1 = {
+ {
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ {},
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo ls1043a_hwa2 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo t1023_hwa1 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo t1023_hwa2 = {
+ {
+ [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ },
+};
+
+static const struct clockgen_muxinfo t2080_hwa1 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo t2080_hwa2 = {
+ {
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo t4240_hwa1 = {
+ {
+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
+ {},
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
+ },
+};
+
+static const struct clockgen_muxinfo t4240_hwa4 = {
+ {
+ [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
+ [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
+ [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
+ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
+ },
+};
+
+static const struct clockgen_muxinfo t4240_hwa5 = {
+ {
+ [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
+ [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
+ [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
+ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
+ [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
+ [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
+ },
+};
+
+#define RCWSR7_FM1_CLK_SEL 0x40000000
+#define RCWSR7_FM2_CLK_SEL 0x20000000
+#define RCWSR7_HWA_ASYNC_DIV 0x04000000
+
+static void __init p2041_init_periph(struct clockgen *cg)
+{
+ u32 reg;
+
+ reg = ioread32be(&cg->guts->rcwsr[7]);
+
+ if (reg & RCWSR7_FM1_CLK_SEL)
+ cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
+ else
+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
+}
+
+static void __init p4080_init_periph(struct clockgen *cg)
+{
+ u32 reg;
+
+ reg = ioread32be(&cg->guts->rcwsr[7]);
+
+ if (reg & RCWSR7_FM1_CLK_SEL)
+ cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
+ else
+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
+
+ if (reg & RCWSR7_FM2_CLK_SEL)
+ cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
+ else
+ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
+}
+
+static void __init p5020_init_periph(struct clockgen *cg)
+{
+ u32 reg;
+ int div = PLL_DIV2;
+
+ reg = ioread32be(&cg->guts->rcwsr[7]);
+ if (reg & RCWSR7_HWA_ASYNC_DIV)
+ div = PLL_DIV4;
+
+ if (reg & RCWSR7_FM1_CLK_SEL)
+ cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
+ else
+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
+}
+
+static void __init p5040_init_periph(struct clockgen *cg)
+{
+ u32 reg;
+ int div = PLL_DIV2;
+
+ reg = ioread32be(&cg->guts->rcwsr[7]);
+ if (reg & RCWSR7_HWA_ASYNC_DIV)
+ div = PLL_DIV4;
+
+ if (reg & RCWSR7_FM1_CLK_SEL)
+ cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
+ else
+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
+
+ if (reg & RCWSR7_FM2_CLK_SEL)
+ cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
+ else
+ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
+}
+
+static void __init t1023_init_periph(struct clockgen *cg)
+{
+ cg->fman[0] = cg->hwaccel[1];
+}
+
+static void __init t1040_init_periph(struct clockgen *cg)
+{
+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
+}
+
+static void __init t2080_init_periph(struct clockgen *cg)
+{
+ cg->fman[0] = cg->hwaccel[0];
+}
+
+static void __init t4240_init_periph(struct clockgen *cg)
+{
+ cg->fman[0] = cg->hwaccel[3];
+ cg->fman[1] = cg->hwaccel[4];
+}
+
+static const struct clockgen_chipinfo chipinfo[] = {
+ {
+ .compat = "fsl,b4420-clockgen",
+ .guts_compat = "fsl,b4860-device-config",
+ .init_periph = t2080_init_periph,
+ .cmux_groups = {
+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
+ },
+ .hwaccel = {
+ &t2080_hwa1
+ },
+ .cmux_to_group = {
+ 0, 1, 1, 1, -1
+ },
+ .pll_mask = 0x3f,
+ .flags = CG_PLL_8BIT,
+ },
+ {
+ .compat = "fsl,b4860-clockgen",
+ .guts_compat = "fsl,b4860-device-config",
+ .init_periph = t2080_init_periph,
+ .cmux_groups = {
+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
+ },
+ .hwaccel = {
+ &t2080_hwa1
+ },
+ .cmux_to_group = {
+ 0, 1, 1, 1, -1
+ },
+ .pll_mask = 0x3f,
+ .flags = CG_PLL_8BIT,
+ },
+ {
+ .compat = "fsl,ls1021a-clockgen",
+ .cmux_groups = {
+ &t1023_cmux
+ },
+ .cmux_to_group = {
+ 0, -1
+ },
+ .pll_mask = 0x03,
+ },
+ {
+ .compat = "fsl,ls1043a-clockgen",
+ .init_periph = t2080_init_periph,
+ .cmux_groups = {
+ &t1040_cmux
+ },
+ .hwaccel = {
+ &ls1043a_hwa1, &ls1043a_hwa2
+ },
+ .cmux_to_group = {
+ 0, -1
+ },
+ .pll_mask = 0x07,
+ .flags = CG_PLL_8BIT,
+ },
+ {
+ .compat = "fsl,ls2080a-clockgen",
+ .cmux_groups = {
+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
+ },
+ .cmux_to_group = {
+ 0, 0, 1, 1, -1
+ },
+ .pll_mask = 0x37,
+ .flags = CG_VER3 | CG_LITTLE_ENDIAN,
+ },
+ {
+ .compat = "fsl,p2041-clockgen",
+ .guts_compat = "fsl,qoriq-device-config-1.0",
+ .init_periph = p2041_init_periph,
+ .cmux_groups = {
+ &p2041_cmux_grp1, &p2041_cmux_grp2
+ },
+ .cmux_to_group = {
+ 0, 0, 1, 1, -1
+ },
+ .pll_mask = 0x07,
+ },
+ {
+ .compat = "fsl,p3041-clockgen",
+ .guts_compat = "fsl,qoriq-device-config-1.0",
+ .init_periph = p2041_init_periph,
+ .cmux_groups = {
+ &p2041_cmux_grp1, &p2041_cmux_grp2
+ },
+ .cmux_to_group = {
+ 0, 0, 1, 1, -1
+ },
+ .pll_mask = 0x07,
+ },
+ {
+ .compat = "fsl,p4080-clockgen",
+ .guts_compat = "fsl,qoriq-device-config-1.0",
+ .init_periph = p4080_init_periph,
+ .cmux_groups = {
+ &p4080_cmux_grp1, &p4080_cmux_grp2
+ },
+ .cmux_to_group = {
+ 0, 0, 0, 0, 1, 1, 1, 1
+ },
+ .pll_mask = 0x1f,
+ },
+ {
+ .compat = "fsl,p5020-clockgen",
+ .guts_compat = "fsl,qoriq-device-config-1.0",
+ .init_periph = p5020_init_periph,
+ .cmux_groups = {
+ &p2041_cmux_grp1, &p2041_cmux_grp2
+ },
+ .cmux_to_group = {
+ 0, 1, -1
+ },
+ .pll_mask = 0x07,
+ },
+ {
+ .compat = "fsl,p5040-clockgen",
+ .guts_compat = "fsl,p5040-device-config",
+ .init_periph = p5040_init_periph,
+ .cmux_groups = {
+ &p5040_cmux_grp1, &p5040_cmux_grp2
+ },
+ .cmux_to_group = {
+ 0, 0, 1, 1, -1
+ },
+ .pll_mask = 0x0f,
+ },
+ {
+ .compat = "fsl,t1023-clockgen",
+ .guts_compat = "fsl,t1023-device-config",
+ .init_periph = t1023_init_periph,
+ .cmux_groups = {
+ &t1023_cmux
+ },
+ .hwaccel = {
+ &t1023_hwa1, &t1023_hwa2
+ },
+ .cmux_to_group = {
+ 0, 0, -1
+ },
+ .pll_mask = 0x03,
+ .flags = CG_PLL_8BIT,
+ },
+ {
+ .compat = "fsl,t1040-clockgen",
+ .guts_compat = "fsl,t1040-device-config",
+ .init_periph = t1040_init_periph,
+ .cmux_groups = {
+ &t1040_cmux
+ },
+ .cmux_to_group = {
+ 0, 0, 0, 0, -1
+ },
+ .pll_mask = 0x07,
+ .flags = CG_PLL_8BIT,
+ },
+ {
+ .compat = "fsl,t2080-clockgen",
+ .guts_compat = "fsl,t2080-device-config",
+ .init_periph = t2080_init_periph,
+ .cmux_groups = {
+ &clockgen2_cmux_cga12
+ },
+ .hwaccel = {
+ &t2080_hwa1, &t2080_hwa2
+ },
+ .cmux_to_group = {
+ 0, -1
+ },
+ .pll_mask = 0x07,
+ .flags = CG_PLL_8BIT,
+ },
+ {
+ .compat = "fsl,t4240-clockgen",
+ .guts_compat = "fsl,t4240-device-config",
+ .init_periph = t4240_init_periph,
+ .cmux_groups = {
+ &clockgen2_cmux_cga, &clockgen2_cmux_cgb
+ },
+ .hwaccel = {
+ &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
+ },
+ .cmux_to_group = {
+ 0, 0, 1, -1
+ },
+ .pll_mask = 0x3f,
+ .flags = CG_PLL_8BIT,
+ },
+ {},
+};
+
+struct mux_hwclock {
struct clk_hw hw;
- void __iomem *reg;
- unsigned int clk_per_pll;
- u32 flags;
+ struct clockgen *cg;
+ const struct clockgen_muxinfo *info;
+ u32 __iomem *reg;
+ u8 parent_to_clksel[NUM_MUX_PARENTS];
+ s8 clksel_to_parent[NUM_MUX_PARENTS];
+ int num_parents;
};
-#define PLL_KILL BIT(31)
+#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
+#define CLKSEL_MASK 0x78000000
#define CLKSEL_SHIFT 27
-#define CLKSEL_ADJUST BIT(0)
-#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw)
-static int cmux_set_parent(struct clk_hw *hw, u8 idx)
+static int mux_set_parent(struct clk_hw *hw, u8 idx)
{
- struct cmux_clk *clk = to_cmux_clk(hw);
+ struct mux_hwclock *hwc = to_mux_hwclock(hw);
u32 clksel;
- clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll;
- if (clk->flags & CLKSEL_ADJUST)
- clksel += 8;
- clksel = (clksel & 0xf) << CLKSEL_SHIFT;
- iowrite32be(clksel, clk->reg);
+ if (idx >= hwc->num_parents)
+ return -EINVAL;
+
+ clksel = hwc->parent_to_clksel[idx];
+ cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
return 0;
}
-static u8 cmux_get_parent(struct clk_hw *hw)
+static u8 mux_get_parent(struct clk_hw *hw)
{
- struct cmux_clk *clk = to_cmux_clk(hw);
+ struct mux_hwclock *hwc = to_mux_hwclock(hw);
u32 clksel;
+ s8 ret;
- clksel = ioread32be(clk->reg);
- clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
- if (clk->flags & CLKSEL_ADJUST)
- clksel -= 8;
- clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4;
+ clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
- return clksel;
+ ret = hwc->clksel_to_parent[clksel];
+ if (ret < 0) {
+ pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
+ return 0;
+ }
+
+ return ret;
}
static const struct clk_ops cmux_ops = {
- .get_parent = cmux_get_parent,
- .set_parent = cmux_set_parent,
+ .get_parent = mux_get_parent,
+ .set_parent = mux_set_parent,
};
-static void __init core_mux_init(struct device_node *np)
+/*
+ * Don't allow setting for now, as the clock options haven't been
+ * sanitized for additional restrictions.
+ */
+static const struct clk_ops hwaccel_ops = {
+ .get_parent = mux_get_parent,
+};
+
+static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
+ struct mux_hwclock *hwc,
+ int idx)
{
- struct clk *clk;
- struct clk_init_data init;
- struct cmux_clk *cmux_clk;
- struct device_node *node;
- int rc, count, i;
- u32 offset;
- const char *clk_name;
- const char **parent_names;
- struct of_phandle_args clkspec;
+ int pll, div;
- rc = of_property_read_u32(np, "reg", &offset);
- if (rc) {
- pr_err("%s: could not get reg property\n", np->name);
- return;
- }
+ if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
+ return NULL;
- /* get the input clock source count */
- count = of_property_count_strings(np, "clock-names");
- if (count < 0) {
- pr_err("%s: get clock count error\n", np->name);
- return;
- }
- parent_names = kcalloc(count, sizeof(char *), GFP_KERNEL);
- if (!parent_names)
- return;
+ pll = hwc->info->clksel[idx].pll;
+ div = hwc->info->clksel[idx].div;
- for (i = 0; i < count; i++)
- parent_names[i] = of_clk_get_parent_name(np, i);
+ return &cg->pll[pll].div[div];
+}
- cmux_clk = kzalloc(sizeof(*cmux_clk), GFP_KERNEL);
- if (!cmux_clk)
- goto err_name;
+static struct clk * __init create_mux_common(struct clockgen *cg,
+ struct mux_hwclock *hwc,
+ const struct clk_ops *ops,
+ unsigned long min_rate,
+ unsigned long pct80_rate,
+ const char *fmt, int idx)
+{
+ struct clk_init_data init = {};
+ struct clk *clk;
+ const struct clockgen_pll_div *div;
+ const char *parent_names[NUM_MUX_PARENTS];
+ char name[32];
+ int i, j;
- cmux_clk->reg = of_iomap(np, 0);
- if (!cmux_clk->reg) {
- pr_err("%s: could not map register\n", __func__);
- goto err_clk;
- }
+ snprintf(name, sizeof(name), fmt, idx);
- rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
- &clkspec);
- if (rc) {
- pr_err("%s: parse clock node error\n", __func__);
- goto err_clk;
- }
+ for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
+ unsigned long rate;
- cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np,
- "clock-output-names");
- of_node_put(clkspec.np);
+ hwc->clksel_to_parent[i] = -1;
- node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
- if (node && (offset >= 0x80))
- cmux_clk->flags = CLKSEL_ADJUST;
+ div = get_pll_div(cg, hwc, i);
+ if (!div)
+ continue;
- rc = of_property_read_string_index(np, "clock-output-names",
- 0, &clk_name);
- if (rc) {
- pr_err("%s: read clock names error\n", np->name);
- goto err_clk;
+ rate = clk_get_rate(div->clk);
+
+ if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
+ rate > pct80_rate)
+ continue;
+ if (rate < min_rate)
+ continue;
+
+ parent_names[j] = div->name;
+ hwc->parent_to_clksel[j] = i;
+ hwc->clksel_to_parent[i] = j;
+ j++;
}
- init.name = clk_name;
- init.ops = &cmux_ops;
+ init.name = name;
+ init.ops = ops;
init.parent_names = parent_names;
- init.num_parents = count;
+ init.num_parents = hwc->num_parents = j;
init.flags = 0;
- cmux_clk->hw.init = &init;
+ hwc->hw.init = &init;
+ hwc->cg = cg;
- clk = clk_register(NULL, &cmux_clk->hw);
+ clk = clk_register(NULL, &hwc->hw);
if (IS_ERR(clk)) {
- pr_err("%s: could not register clock\n", clk_name);
- goto err_clk;
+ pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
+ PTR_ERR(clk));
+ kfree(hwc);
+ return NULL;
+ }
+
+ return clk;
+}
+
+static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
+{
+ struct mux_hwclock *hwc;
+ const struct clockgen_pll_div *div;
+ unsigned long plat_rate, min_rate;
+ u64 pct80_rate;
+ u32 clksel;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+ if (!hwc)
+ return NULL;
+
+ hwc->reg = cg->regs + 0x20 * idx;
+ hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
+
+ /*
+ * Find the rate for the default clksel, and treat it as the
+ * maximum rated core frequency. If this is an incorrect
+ * assumption, certain clock options (possibly including the
+ * default clksel) may be inappropriately excluded on certain
+ * chips.
+ */
+ clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
+ div = get_pll_div(cg, hwc, clksel);
+ if (!div)
+ return NULL;
+
+ pct80_rate = clk_get_rate(div->clk);
+ pct80_rate *= 8;
+ do_div(pct80_rate, 10);
+
+ plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
+
+ if (cg->info.flags & CG_CMUX_GE_PLAT)
+ min_rate = plat_rate;
+ else
+ min_rate = plat_rate / 2;
+
+ return create_mux_common(cg, hwc, &cmux_ops, min_rate,
+ pct80_rate, "cg-cmux%d", idx);
+}
+
+static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
+{
+ struct mux_hwclock *hwc;
+
+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
+ if (!hwc)
+ return NULL;
+
+ hwc->reg = cg->regs + 0x20 * idx + 0x10;
+ hwc->info = cg->info.hwaccel[idx];
+
+ return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0,
+ "cg-hwaccel%d", idx);
+}
+
+static void __init create_muxes(struct clockgen *cg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
+ if (cg->info.cmux_to_group[i] < 0)
+ break;
+ if (cg->info.cmux_to_group[i] >=
+ ARRAY_SIZE(cg->info.cmux_groups)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ cg->cmux[i] = create_one_cmux(cg, i);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
+ if (!cg->info.hwaccel[i])
+ continue;
+
+ cg->hwaccel[i] = create_one_hwaccel(cg, i);
}
+}
+
+static void __init clockgen_init(struct device_node *np);
+
+/* Legacy nodes may get probed before the parent clockgen node */
+static void __init legacy_init_clockgen(struct device_node *np)
+{
+ if (!clockgen.node)
+ clockgen_init(of_get_parent(np));
+}
+
+/* Legacy node */
+static void __init core_mux_init(struct device_node *np)
+{
+ struct clk *clk;
+ struct resource res;
+ int idx, rc;
+
+ legacy_init_clockgen(np);
+
+ if (of_address_to_resource(np, 0, &res))
+ return;
+
+ idx = (res.start & 0xf0) >> 5;
+ clk = clockgen.cmux[idx];
rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
if (rc) {
- pr_err("Could not register clock provider for node:%s\n",
- np->name);
- goto err_clk;
+ pr_err("%s: Couldn't register clk provider for node %s: %d\n",
+ __func__, np->name, rc);
+ return;
}
- goto err_name;
+}
+
+static struct clk *sysclk_from_fixed(struct device_node *node, const char *name)
+{
+ u32 rate;
+
+ if (of_property_read_u32(node, "clock-frequency", &rate))
+ return ERR_PTR(-ENODEV);
-err_clk:
- kfree(cmux_clk);
-err_name:
- /* free *_names because they are reallocated when registered */
- kfree(parent_names);
+ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
}
-static void __init core_pll_init(struct device_node *np)
+static struct clk *sysclk_from_parent(const char *name)
+{
+ struct clk *clk;
+ const char *parent_name;
+
+ clk = of_clk_get(clockgen.node, 0);
+ if (IS_ERR(clk))
+ return clk;
+
+ /* Register the input clock under the desired name. */
+ parent_name = __clk_get_name(clk);
+ clk = clk_register_fixed_factor(NULL, name, parent_name,
+ 0, 1, 1);
+ if (IS_ERR(clk))
+ pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
+ PTR_ERR(clk));
+
+ return clk;
+}
+
+static struct clk * __init create_sysclk(const char *name)
+{
+ struct device_node *sysclk;
+ struct clk *clk;
+
+ clk = sysclk_from_fixed(clockgen.node, name);
+ if (!IS_ERR(clk))
+ return clk;
+
+ clk = sysclk_from_parent(name);
+ if (!IS_ERR(clk))
+ return clk;
+
+ sysclk = of_get_child_by_name(clockgen.node, "sysclk");
+ if (sysclk) {
+ clk = sysclk_from_fixed(sysclk, name);
+ if (!IS_ERR(clk))
+ return clk;
+ }
+
+ pr_err("%s: No input clock\n", __func__);
+ return NULL;
+}
+
+/* Legacy node */
+static void __init sysclk_init(struct device_node *node)
{
+ struct clk *clk;
+
+ legacy_init_clockgen(node);
+
+ clk = clockgen.sysclk;
+ if (clk)
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+
+#define PLL_KILL BIT(31)
+
+static void __init create_one_pll(struct clockgen *cg, int idx)
+{
+ u32 __iomem *reg;
u32 mult;
- int i, rc, count;
- const char *clk_name, *parent_name;
- struct clk_onecell_data *onecell_data;
- struct clk **subclks;
- void __iomem *base;
+ struct clockgen_pll *pll = &cg->pll[idx];
+ int i;
- base = of_iomap(np, 0);
- if (!base) {
- pr_err("iomap error\n");
+ if (!(cg->info.pll_mask & (1 << idx)))
return;
+
+ if (cg->info.flags & CG_VER3) {
+ switch (idx) {
+ case PLATFORM_PLL:
+ reg = cg->regs + 0x60080;
+ break;
+ case CGA_PLL1:
+ reg = cg->regs + 0x80;
+ break;
+ case CGA_PLL2:
+ reg = cg->regs + 0xa0;
+ break;
+ case CGB_PLL1:
+ reg = cg->regs + 0x10080;
+ break;
+ case CGB_PLL2:
+ reg = cg->regs + 0x100a0;
+ break;
+ default:
+ WARN_ONCE(1, "index %d\n", idx);
+ return;
+ }
+ } else {
+ if (idx == PLATFORM_PLL)
+ reg = cg->regs + 0xc00;
+ else
+ reg = cg->regs + 0x800 + 0x20 * (idx - 1);
}
- /* get the multiple of PLL */
- mult = ioread32be(base);
+ /* Get the multiple of PLL */
+ mult = cg_in(cg, reg);
- /* check if this PLL is disabled */
+ /* Check if this PLL is disabled */
if (mult & PLL_KILL) {
- pr_debug("PLL:%s is disabled\n", np->name);
- goto err_map;
+ pr_debug("%s(): pll %p disabled\n", __func__, reg);
+ return;
}
- mult = (mult >> 1) & 0x3f;
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name) {
- pr_err("PLL: %s must have a parent\n", np->name);
- goto err_map;
+ if ((cg->info.flags & CG_VER3) ||
+ ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
+ mult = (mult & GENMASK(8, 1)) >> 1;
+ else
+ mult = (mult & GENMASK(6, 1)) >> 1;
+
+ for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
+ struct clk *clk;
+
+ snprintf(pll->div[i].name, sizeof(pll->div[i].name),
+ "cg-pll%d-div%d", idx, i + 1);
+
+ clk = clk_register_fixed_factor(NULL,
+ pll->div[i].name, "cg-sysclk", 0, mult, i + 1);
+ if (IS_ERR(clk)) {
+ pr_err("%s: %s: register failed %ld\n",
+ __func__, pll->div[i].name, PTR_ERR(clk));
+ continue;
+ }
+
+ pll->div[i].clk = clk;
}
+}
+static void __init create_plls(struct clockgen *cg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
+ create_one_pll(cg, i);
+}
+
+static void __init legacy_pll_init(struct device_node *np, int idx)
+{
+ struct clockgen_pll *pll;
+ struct clk_onecell_data *onecell_data;
+ struct clk **subclks;
+ int count, rc;
+
+ legacy_init_clockgen(np);
+
+ pll = &clockgen.pll[idx];
count = of_property_count_strings(np, "clock-output-names");
- if (count < 0 || count > 4) {
- pr_err("%s: clock is not supported\n", np->name);
- goto err_map;
- }
- subclks = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
+ BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
+ subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
if (!subclks)
- goto err_map;
+ return;
onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
if (!onecell_data)
goto err_clks;
- for (i = 0; i < count; i++) {
- rc = of_property_read_string_index(np, "clock-output-names",
- i, &clk_name);
- if (rc) {
- pr_err("%s: could not get clock names\n", np->name);
- goto err_cell;
- }
-
- /*
- * when count == 4, there are 4 output clocks:
- * /1, /2, /3, /4 respectively
- * when count < 4, there are at least 2 output clocks:
- * /1, /2, (/4, if count == 3) respectively.
- */
- if (count == 4)
- subclks[i] = clk_register_fixed_factor(NULL, clk_name,
- parent_name, 0, mult, 1 + i);
- else
-
- subclks[i] = clk_register_fixed_factor(NULL, clk_name,
- parent_name, 0, mult, 1 << i);
-
- if (IS_ERR(subclks[i])) {
- pr_err("%s: could not register clock\n", clk_name);
- goto err_cell;
- }
+ if (count <= 3) {
+ subclks[0] = pll->div[0].clk;
+ subclks[1] = pll->div[1].clk;
+ subclks[2] = pll->div[3].clk;
+ } else {
+ subclks[0] = pll->div[0].clk;
+ subclks[1] = pll->div[1].clk;
+ subclks[2] = pll->div[2].clk;
+ subclks[3] = pll->div[3].clk;
}
onecell_data->clks = subclks;
@@ -233,125 +1051,223 @@ static void __init core_pll_init(struct device_node *np)
rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
if (rc) {
- pr_err("Could not register clk provider for node:%s\n",
- np->name);
+ pr_err("%s: Couldn't register clk provider for node %s: %d\n",
+ __func__, np->name, rc);
goto err_cell;
}
- iounmap(base);
return;
err_cell:
kfree(onecell_data);
err_clks:
kfree(subclks);
-err_map:
- iounmap(base);
}
-static void __init sysclk_init(struct device_node *node)
+/* Legacy node */
+static void __init pltfrm_pll_init(struct device_node *np)
{
- struct clk *clk;
- const char *clk_name = node->name;
- struct device_node *np = of_get_parent(node);
- u32 rate;
+ legacy_pll_init(np, PLATFORM_PLL);
+}
- if (!np) {
- pr_err("could not get parent node\n");
+/* Legacy node */
+static void __init core_pll_init(struct device_node *np)
+{
+ struct resource res;
+ int idx;
+
+ if (of_address_to_resource(np, 0, &res))
return;
+
+ if ((res.start & 0xfff) == 0xc00) {
+ /*
+ * ls1021a devtree labels the platform PLL
+ * with the core PLL compatible
+ */
+ pltfrm_pll_init(np);
+ } else {
+ idx = (res.start & 0xf0) >> 5;
+ legacy_pll_init(np, CGA_PLL1 + idx);
}
+}
- if (of_property_read_u32(np, "clock-frequency", &rate)) {
- of_node_put(node);
- return;
+static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ struct clockgen *cg = data;
+ struct clk *clk;
+ struct clockgen_pll *pll;
+ u32 type, idx;
+
+ if (clkspec->args_count < 2) {
+ pr_err("%s: insufficient phandle args\n", __func__);
+ return ERR_PTR(-EINVAL);
}
- of_property_read_string(np, "clock-output-names", &clk_name);
+ type = clkspec->args[0];
+ idx = clkspec->args[1];
- clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
- if (!IS_ERR(clk))
- of_clk_add_provider(np, of_clk_src_simple_get, clk);
+ switch (type) {
+ case 0:
+ if (idx != 0)
+ goto bad_args;
+ clk = cg->sysclk;
+ break;
+ case 1:
+ if (idx >= ARRAY_SIZE(cg->cmux))
+ goto bad_args;
+ clk = cg->cmux[idx];
+ break;
+ case 2:
+ if (idx >= ARRAY_SIZE(cg->hwaccel))
+ goto bad_args;
+ clk = cg->hwaccel[idx];
+ break;
+ case 3:
+ if (idx >= ARRAY_SIZE(cg->fman))
+ goto bad_args;
+ clk = cg->fman[idx];
+ break;
+ case 4:
+ pll = &cg->pll[PLATFORM_PLL];
+ if (idx >= ARRAY_SIZE(pll->div))
+ goto bad_args;
+ clk = pll->div[idx].clk;
+ break;
+ default:
+ goto bad_args;
+ }
+
+ if (!clk)
+ return ERR_PTR(-ENOENT);
+ return clk;
+
+bad_args:
+ pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
+ return ERR_PTR(-EINVAL);
}
-static void __init pltfrm_pll_init(struct device_node *np)
+#ifdef CONFIG_PPC
+#include <asm/mpc85xx.h>
+
+static const u32 a4510_svrs[] __initconst = {
+ (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
+ (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
+ (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
+ (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
+ (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
+ (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
+ (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
+ (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
+ (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
+ (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
+ (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
+ (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
+ (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
+};
+
+#define SVR_SECURITY 0x80000 /* The Security (E) bit */
+
+static bool __init has_erratum_a4510(void)
{
- void __iomem *base;
- uint32_t mult;
- const char *parent_name, *clk_name;
- int i, _errno;
- struct clk_onecell_data *cod;
+ u32 svr = mfspr(SPRN_SVR);
+ int i;
- base = of_iomap(np, 0);
- if (!base) {
- pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
- return;
+ svr &= ~SVR_SECURITY;
+
+ for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
+ if (svr == a4510_svrs[i])
+ return true;
}
- /* Get the multiple of PLL */
- mult = ioread32be(base);
+ return false;
+}
+#else
+static bool __init has_erratum_a4510(void)
+{
+ return false;
+}
+#endif
- iounmap(base);
+static void __init clockgen_init(struct device_node *np)
+{
+ int i, ret;
+ bool is_old_ls1021a = false;
- /* Check if this PLL is disabled */
- if (mult & PLL_KILL) {
- pr_debug("%s(): %s: Disabled\n", __func__, np->name);
+ /* May have already been called by a legacy probe */
+ if (clockgen.node)
return;
- }
- mult = (mult & GENMASK(6, 1)) >> 1;
- parent_name = of_clk_get_parent_name(np, 0);
- if (!parent_name) {
- pr_err("%s(): %s: of_clk_get_parent_name() failed\n",
- __func__, np->name);
+ clockgen.node = np;
+ clockgen.regs = of_iomap(np, 0);
+ if (!clockgen.regs &&
+ of_device_is_compatible(of_root, "fsl,ls1021a")) {
+ /* Compatibility hack for old, broken device trees */
+ clockgen.regs = ioremap(0x1ee1000, 0x1000);
+ is_old_ls1021a = true;
+ }
+ if (!clockgen.regs) {
+ pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
return;
}
- i = of_property_count_strings(np, "clock-output-names");
- if (i < 0) {
- pr_err("%s(): %s: of_property_count_strings(clock-output-names) = %d\n",
- __func__, np->name, i);
- return;
+ for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
+ if (of_device_is_compatible(np, chipinfo[i].compat))
+ break;
+ if (is_old_ls1021a &&
+ !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
+ break;
}
- cod = kmalloc(sizeof(*cod) + i * sizeof(struct clk *), GFP_KERNEL);
- if (!cod)
- return;
- cod->clks = (struct clk **)(cod + 1);
- cod->clk_num = i;
-
- for (i = 0; i < cod->clk_num; i++) {
- _errno = of_property_read_string_index(np, "clock-output-names",
- i, &clk_name);
- if (_errno < 0) {
- pr_err("%s(): %s: of_property_read_string_index(clock-output-names) = %d\n",
- __func__, np->name, _errno);
- goto return_clk_unregister;
- }
+ if (i == ARRAY_SIZE(chipinfo)) {
+ pr_err("%s: unknown clockgen node %s\n", __func__,
+ np->full_name);
+ goto err;
+ }
+ clockgen.info = chipinfo[i];
+
+ if (clockgen.info.guts_compat) {
+ struct device_node *guts;
- cod->clks[i] = clk_register_fixed_factor(NULL, clk_name,
- parent_name, 0, mult, 1 + i);
- if (IS_ERR(cod->clks[i])) {
- pr_err("%s(): %s: clk_register_fixed_factor(%s) = %ld\n",
- __func__, np->name,
- clk_name, PTR_ERR(cod->clks[i]));
- goto return_clk_unregister;
+ guts = of_find_compatible_node(NULL, NULL,
+ clockgen.info.guts_compat);
+ if (guts) {
+ clockgen.guts = of_iomap(guts, 0);
+ if (!clockgen.guts) {
+ pr_err("%s: Couldn't map %s regs\n", __func__,
+ guts->full_name);
+ }
}
+
}
- _errno = of_clk_add_provider(np, of_clk_src_onecell_get, cod);
- if (_errno < 0) {
- pr_err("%s(): %s: of_clk_add_provider() = %d\n",
- __func__, np->name, _errno);
- goto return_clk_unregister;
+ if (has_erratum_a4510())
+ clockgen.info.flags |= CG_CMUX_GE_PLAT;
+
+ clockgen.sysclk = create_sysclk("cg-sysclk");
+ create_plls(&clockgen);
+ create_muxes(&clockgen);
+
+ if (clockgen.info.init_periph)
+ clockgen.info.init_periph(&clockgen);
+
+ ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
+ if (ret) {
+ pr_err("%s: Couldn't register clk provider for node %s: %d\n",
+ __func__, np->name, ret);
}
return;
-
-return_clk_unregister:
- while (--i >= 0)
- clk_unregister(cod->clks[i]);
- kfree(cod);
+err:
+ iounmap(clockgen.regs);
+ clockgen.regs = NULL;
}
+CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
+
+/* Legacy nodes */
CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);