aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds2017-07-03 11:09:38 -0700
committerLinus Torvalds2017-07-03 11:09:38 -0700
commit26d3a77d2cb3cb31bbaa2de37b7a4e6375f204ee (patch)
tree6d591db96159db3b0e43db7b8095c14db9f92b37
parentc6b1e36c8fa04a6680c44fe0321d0370400e90b6 (diff)
parent164c29244d4beb9a105102c42821f4925e4a0c7a (diff)
Merge tag 'edac_for_4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp
Pull EDAC updates from Borislav Petkov: "Nothing earth-shattering - just the normal development flow of cleanups, improvements, fixes and such. Summary: - i31200_edac: Add Kabylake support (Jason Baron) - sb_edac: resolve memory controller detection issues on asymmetric setups with not all DIMM slots being populated (Tony Luck and Qiuxu Zhuo) - misc cleanups and fixlets all over" * tag 'edac_for_4.13' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp: (22 commits) EDAC, pnd2: Fix Apollo Lake DIMM detection EDAC, i5000, i5400: Fix definition of NRECMEMB register EDAC, pnd2: Make function sbi_send() static EDAC, pnd2: Return proper error value from apl_rd_reg() EDAC, altera: Simplify calculation of total memory EDAC, sb_edac: Avoid creating SOCK memory controller EDAC, mce_amd: Fix typo in SMCA error description EDAC, mv64x60: Sanity check edac_op_state before registering EDAC, thunderx: Fix a warning during l2c debugfs node creation EDAC, mv64x60: Check driver registration success EDAC, ie31200: Add Intel Kaby Lake CPU support EDAC, mv64x60: Replace in_le32()/out_le32() with readl()/writel() EDAC, mv64x60: Fix pdata->name EDAC, sb_edac: Bump driver version and do some cleanups EDAC, sb_edac: Check if ECC enabled when at least one DIMM is present EDAC, sb_edac: Drop NUM_CHANNELS from 8 back to 4 EDAC, sb_edac: Carve out dimm-populating loop EDAC, sb_edac: Fix mod_name EDAC, sb_edac: Assign EDAC memory controller per h/w controller EDAC, sb_edac: Don't use "Socket#" in the memory controller name ...
-rw-r--r--drivers/edac/altera_edac.c26
-rw-r--r--drivers/edac/i5000_edac.c6
-rw-r--r--drivers/edac/i5400_edac.c4
-rw-r--r--drivers/edac/ie31200_edac.c13
-rw-r--r--drivers/edac/mce_amd.c2
-rw-r--r--drivers/edac/mv64x60_edac.c88
-rw-r--r--drivers/edac/pnd2_edac.c20
-rw-r--r--drivers/edac/sb_edac.c682
-rw-r--r--drivers/edac/thunderx_edac.c2
9 files changed, 399 insertions, 444 deletions
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 7717b094fabb..db75d4b614f7 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -214,24 +214,16 @@ static void altr_sdr_mc_create_debugfs_nodes(struct mem_ctl_info *mci)
static unsigned long get_total_mem(void)
{
struct device_node *np = NULL;
- const unsigned int *reg, *reg_end;
- int len, sw, aw;
- unsigned long start, size, total_mem = 0;
+ struct resource res;
+ int ret;
+ unsigned long total_mem = 0;
for_each_node_by_type(np, "memory") {
- aw = of_n_addr_cells(np);
- sw = of_n_size_cells(np);
- reg = (const unsigned int *)of_get_property(np, "reg", &len);
- reg_end = reg + (len / sizeof(u32));
-
- total_mem = 0;
- do {
- start = of_read_number(reg, aw);
- reg += aw;
- size = of_read_number(reg, sw);
- reg += sw;
- total_mem += size;
- } while (reg < reg_end);
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret)
+ continue;
+
+ total_mem += resource_size(&res);
}
edac_dbg(0, "total_mem 0x%lx\n", total_mem);
return total_mem;
@@ -1839,7 +1831,7 @@ static int a10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq,
return 0;
}
-static struct irq_domain_ops a10_eccmgr_ic_ops = {
+static const struct irq_domain_ops a10_eccmgr_ic_ops = {
.map = a10_eccmgr_irqdomain_map,
.xlate = irq_domain_xlate_twocell,
};
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index f683919981b0..8f5a56e25bd2 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -227,7 +227,7 @@
#define NREC_RDWR(x) (((x)>>11) & 1)
#define NREC_RANK(x) (((x)>>8) & 0x7)
#define NRECMEMB 0xC0
-#define NREC_CAS(x) (((x)>>16) & 0xFFFFFF)
+#define NREC_CAS(x) (((x)>>16) & 0xFFF)
#define NREC_RAS(x) ((x) & 0x7FFF)
#define NRECFGLOG 0xC4
#define NREEECFBDA 0xC8
@@ -371,7 +371,7 @@ struct i5000_error_info {
/* These registers are input ONLY if there was a
* Non-Recoverable Error */
u16 nrecmema; /* Non-Recoverable Mem log A */
- u16 nrecmemb; /* Non-Recoverable Mem log B */
+ u32 nrecmemb; /* Non-Recoverable Mem log B */
};
@@ -407,7 +407,7 @@ static void i5000_get_error_info(struct mem_ctl_info *mci,
NERR_FAT_FBD, &info->nerr_fat_fbd);
pci_read_config_word(pvt->branchmap_werrors,
NRECMEMA, &info->nrecmema);
- pci_read_config_word(pvt->branchmap_werrors,
+ pci_read_config_dword(pvt->branchmap_werrors,
NRECMEMB, &info->nrecmemb);
/* Clear the error bits, by writing them back */
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 37a9ba71da44..cd889edc8516 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -368,7 +368,7 @@ struct i5400_error_info {
/* These registers are input ONLY if there was a Non-Rec Error */
u16 nrecmema; /* Non-Recoverable Mem log A */
- u16 nrecmemb; /* Non-Recoverable Mem log B */
+ u32 nrecmemb; /* Non-Recoverable Mem log B */
};
@@ -458,7 +458,7 @@ static void i5400_get_error_info(struct mem_ctl_info *mci,
NERR_FAT_FBD, &info->nerr_fat_fbd);
pci_read_config_word(pvt->branchmap_werrors,
NRECMEMA, &info->nrecmema);
- pci_read_config_word(pvt->branchmap_werrors,
+ pci_read_config_dword(pvt->branchmap_werrors,
NRECMEMB, &info->nrecmemb);
/* Clear the error bits, by writing them back */
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 2733fb5938a4..4260579e6901 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -18,10 +18,12 @@
* 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
* 0c08: Xeon E3-1200 v3 Processor DRAM Controller
* 1918: Xeon E3-1200 v5 Skylake Host Bridge/DRAM Registers
+ * 5918: Xeon E3-1200 Xeon E3-1200 v6/7th Gen Core Processor Host Bridge/DRAM Registers
*
* Based on Intel specification:
* http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
* http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html
+ * http://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-h-processor-lines-datasheet-vol-2.html
*
* According to the above datasheet (p.16):
* "
@@ -57,6 +59,7 @@
#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
#define PCI_DEVICE_ID_INTEL_IE31200_HB_8 0x1918
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_9 0x5918
#define IE31200_DIMMS 4
#define IE31200_RANKS 8
@@ -376,7 +379,12 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
void __iomem *window;
struct ie31200_priv *priv;
u32 addr_decode, mad_offset;
- bool skl = (pdev->device == PCI_DEVICE_ID_INTEL_IE31200_HB_8);
+
+ /*
+ * Kaby Lake seems to work like Skylake. Please re-visit this logic
+ * when adding new CPU support.
+ */
+ bool skl = (pdev->device >= PCI_DEVICE_ID_INTEL_IE31200_HB_8);
edac_dbg(0, "MC:\n");
@@ -560,6 +568,9 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
PCI_VEND_DEV(INTEL, IE31200_HB_8), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
IE31200},
{
+ PCI_VEND_DEV(INTEL, IE31200_HB_9), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ IE31200},
+ {
0,
} /* 0 terminated list. */
};
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ba35b7ea3686..9a2658a256a9 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -161,7 +161,7 @@ static const char * const smca_ls_mce_desc[] = {
"Sys Read data error thread 0",
"Sys read data error thread 1",
"DC tag error type 2",
- "DC data error type 1 (poison comsumption)",
+ "DC data error type 1 (poison consumption)",
"DC data error type 2",
"DC data error type 3",
"DC tag error type 4",
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 14b7e7b71eaa..d3650df94fe8 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -32,21 +32,21 @@ static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
struct mv64x60_pci_pdata *pdata = pci->pvt_info;
u32 cause;
- cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+ cause = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
if (!cause)
return;
printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
printk(KERN_ERR "Cause register: 0x%08x\n", cause);
printk(KERN_ERR "Address Low: 0x%08x\n",
- in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
+ readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
printk(KERN_ERR "Address High: 0x%08x\n",
- in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
+ readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
printk(KERN_ERR "Attribute: 0x%08x\n",
- in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
+ readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
printk(KERN_ERR "Command: 0x%08x\n",
- in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
- out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
+ readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
+ writel(~cause, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
if (cause & MV64X60_PCI_PE_MASK)
edac_pci_handle_pe(pci, pci->ctl_name);
@@ -61,7 +61,7 @@ static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
struct mv64x60_pci_pdata *pdata = pci->pvt_info;
u32 val;
- val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+ val = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
if (!val)
return IRQ_NONE;
@@ -93,7 +93,7 @@ static int __init mv64x60_pci_fixup(struct platform_device *pdev)
if (!pci_serr)
return -ENOMEM;
- out_le32(pci_serr, in_le32(pci_serr) & ~0x1);
+ writel(readl(pci_serr) & ~0x1, pci_serr);
iounmap(pci_serr);
return 0;
@@ -116,7 +116,7 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev)
pdata = pci->pvt_info;
pdata->pci_hose = pdev->id;
- pdata->name = "mpc85xx_pci_err";
+ pdata->name = "mv64x60_pci_err";
platform_set_drvdata(pdev, pci);
pci->dev = &pdev->dev;
pci->dev_name = dev_name(&pdev->dev);
@@ -161,10 +161,10 @@ static int mv64x60_pci_err_probe(struct platform_device *pdev)
goto err;
}
- out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
- out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
- out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
- MV64X60_PCIx_ERR_MASK_VAL);
+ writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+ writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
+ writel(MV64X60_PCIx_ERR_MASK_VAL,
+ pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
edac_dbg(3, "failed edac_pci_add_device()\n");
@@ -233,23 +233,23 @@ static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
u32 cause;
- cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+ cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
if (!cause)
return;
printk(KERN_ERR "Error in internal SRAM\n");
printk(KERN_ERR "Cause register: 0x%08x\n", cause);
printk(KERN_ERR "Address Low: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
+ readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
printk(KERN_ERR "Address High: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
+ readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
printk(KERN_ERR "Data Low: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
+ readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
printk(KERN_ERR "Data High: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
+ readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
printk(KERN_ERR "Parity: 0x%08x\n",
- in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
- out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+ readl(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
+ writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
}
@@ -260,7 +260,7 @@ static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
u32 cause;
- cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+ cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
if (!cause)
return IRQ_NONE;
@@ -322,7 +322,7 @@ static int mv64x60_sram_err_probe(struct platform_device *pdev)
}
/* setup SRAM err registers */
- out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+ writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
edac_dev->mod_name = EDAC_MOD_STR;
edac_dev->ctl_name = pdata->name;
@@ -398,7 +398,7 @@ static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
u32 cause;
- cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+ cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
MV64x60_CPU_CAUSE_MASK;
if (!cause)
return;
@@ -406,16 +406,16 @@ static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
printk(KERN_ERR "Error on CPU interface\n");
printk(KERN_ERR "Cause register: 0x%08x\n", cause);
printk(KERN_ERR "Address Low: 0x%08x\n",
- in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
+ readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
printk(KERN_ERR "Address High: 0x%08x\n",
- in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
+ readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
printk(KERN_ERR "Data Low: 0x%08x\n",
- in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
+ readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
printk(KERN_ERR "Data High: 0x%08x\n",
- in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
+ readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
printk(KERN_ERR "Parity: 0x%08x\n",
- in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
- out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+ readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
+ writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
}
@@ -426,7 +426,7 @@ static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
u32 cause;
- cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+ cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
MV64x60_CPU_CAUSE_MASK;
if (!cause)
return IRQ_NONE;
@@ -515,9 +515,9 @@ static int mv64x60_cpu_err_probe(struct platform_device *pdev)
}
/* setup CPU err registers */
- out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
- out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
- out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
+ writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
+ writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
+ writel(0x000000ff, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
edac_dev->mod_name = EDAC_MOD_STR;
edac_dev->ctl_name = pdata->name;
@@ -596,13 +596,13 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
u32 comp_ecc;
u32 syndrome;
- reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+ reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
if (!reg)
return;
err_addr = reg & ~0x3;
- sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
- comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
+ sdram_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
+ comp_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
syndrome = sdram_ecc ^ comp_ecc;
/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
@@ -620,7 +620,7 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
mci->ctl_name, "");
/* clear the error */
- out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+ writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
}
static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
@@ -629,7 +629,7 @@ static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
struct mv64x60_mc_pdata *pdata = mci->pvt_info;
u32 reg;
- reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+ reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
if (!reg)
return IRQ_NONE;
@@ -664,7 +664,7 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
get_total_mem(pdata);
- ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+ ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
csrow = mci->csrows[0];
dimm = csrow->channels[0]->dimm;
@@ -753,7 +753,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
goto err;
}
- ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+ ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
if (!(ctl & MV64X60_SDRAM_ECC)) {
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
@@ -779,10 +779,10 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
mv64x60_init_csrows(mci, pdata);
/* setup MC registers */
- out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
- ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
+ writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+ ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
ctl = (ctl & 0xff00ffff) | 0x10000;
- out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
+ writel(ctl, pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
res = edac_mc_add_mc(mci);
if (res) {
@@ -853,10 +853,10 @@ static struct platform_driver * const drivers[] = {
static int __init mv64x60_edac_init(void)
{
- int ret = 0;
printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
+
/* make sure error reporting method is sane */
switch (edac_op_state) {
case EDAC_OPSTATE_POLL:
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
index 1cad5a9af8d0..8e599490f6de 100644
--- a/drivers/edac/pnd2_edac.c
+++ b/drivers/edac/pnd2_edac.c
@@ -131,7 +131,7 @@ static struct mem_ctl_info *pnd2_mci;
#ifdef CONFIG_X86_INTEL_SBI_APL
#include "linux/platform_data/sbi_apl.h"
-int sbi_send(int port, int off, int op, u32 *data)
+static int sbi_send(int port, int off, int op, u32 *data)
{
struct sbi_apl_message sbi_arg;
int ret, read = 0;
@@ -160,7 +160,7 @@ int sbi_send(int port, int off, int op, u32 *data)
return ret;
}
#else
-int sbi_send(int port, int off, int op, u32 *data)
+static int sbi_send(int port, int off, int op, u32 *data)
{
return -EUNATCH;
}
@@ -168,14 +168,15 @@ int sbi_send(int port, int off, int op, u32 *data)
static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
{
- int ret = 0;
+ int ret = 0;
edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
switch (sz) {
case 8:
ret = sbi_send(port, off + 4, op, (u32 *)(data + 4));
+ /* fall through */
case 4:
- ret = sbi_send(port, off, op, (u32 *)data);
+ ret |= sbi_send(port, off, op, (u32 *)data);
pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
break;
@@ -423,16 +424,21 @@ static void dnv_mk_region(char *name, struct region *rp, void *asym)
static int apl_get_registers(void)
{
+ int ret = -ENODEV;
int i;
if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
return -ENODEV;
+ /*
+ * RD_REGP() will fail for unpopulated or non-existent
+ * DIMM slots. Return success if we find at least one DIMM.
+ */
for (i = 0; i < APL_NUM_CHANNELS; i++)
- if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
- return -ENODEV;
+ if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
+ ret = 0;
- return 0;
+ return ret;
}
static int dnv_get_registers(void)
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index ea21cb651b3c..80d860cb0746 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -35,7 +35,7 @@ static LIST_HEAD(sbridge_edac_list);
/*
* Alter this version for the module when modifications are made
*/
-#define SBRIDGE_REVISION " Ver: 1.1.1 "
+#define SBRIDGE_REVISION " Ver: 1.1.2 "
#define EDAC_MOD_STR "sbridge_edac"
/*
@@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
* sbridge structs
*/
-#define NUM_CHANNELS 8 /* 2MC per socket, four chan per MC */
+#define NUM_CHANNELS 4 /* Max channels per MC */
#define MAX_DIMMS 3 /* Max DIMMS per channel */
#define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */
#define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */
@@ -294,6 +294,12 @@ enum type {
KNIGHTS_LANDING,
};
+enum domain {
+ IMC0 = 0,
+ IMC1,
+ SOCK,
+};
+
struct sbridge_pvt;
struct sbridge_info {
enum type type;
@@ -324,11 +330,14 @@ struct sbridge_channel {
struct pci_id_descr {
int dev_id;
int optional;
+ enum domain dom;
};
struct pci_id_table {
const struct pci_id_descr *descr;
- int n_devs;
+ int n_devs_per_imc;
+ int n_devs_per_sock;
+ int n_imcs_per_sock;
enum type type;
};
@@ -337,7 +346,9 @@ struct sbridge_dev {
u8 bus, mc;
u8 node_id, source_id;
struct pci_dev **pdev;
+ enum domain dom;
int n_devs;
+ int i_devs;
struct mem_ctl_info *mci;
};
@@ -352,11 +363,12 @@ struct knl_pvt {
};
struct sbridge_pvt {
- struct pci_dev *pci_ta, *pci_ddrio, *pci_ras;
+ /* Devices per socket */
+ struct pci_dev *pci_ddrio;
struct pci_dev *pci_sad0, *pci_sad1;
- struct pci_dev *pci_ha0, *pci_ha1;
struct pci_dev *pci_br0, *pci_br1;
- struct pci_dev *pci_ha1_ta;
+ /* Devices per memory controller */
+ struct pci_dev *pci_ha, *pci_ta, *pci_ras;
struct pci_dev *pci_tad[NUM_CHANNELS];
struct sbridge_dev *sbridge_dev;
@@ -373,39 +385,42 @@ struct sbridge_pvt {
struct knl_pvt knl;
};
-#define PCI_DESCR(device_id, opt) \
+#define PCI_DESCR(device_id, opt, domain) \
.dev_id = (device_id), \
- .optional = opt
+ .optional = opt, \
+ .dom = domain
static const struct pci_id_descr pci_dev_descr_sbridge[] = {
/* Processor Home Agent */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0, IMC0) },
/* Memory controller */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
/* System Address Decoder */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0, SOCK) },
/* Broadcast Registers */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0, SOCK) },
};
-#define PCI_ID_TABLE_ENTRY(A, T) { \
+#define PCI_ID_TABLE_ENTRY(A, N, M, T) { \
.descr = A, \
- .n_devs = ARRAY_SIZE(A), \
+ .n_devs_per_imc = N, \
+ .n_devs_per_sock = ARRAY_SIZE(A), \
+ .n_imcs_per_sock = M, \
.type = T \
}
static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
{0,} /* 0 terminated list. */
};
@@ -439,40 +454,39 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
static const struct pci_id_descr pci_dev_descr_ibridge[] = {
/* Processor Home Agent */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
/* Memory controller */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
+
+ /* Optional, mode 2HA */
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1, IMC1) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
/* System Address Decoder */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0, SOCK) },
/* Broadcast Registers */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0, SOCK) },
- /* Optional, mode 2HA */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
-#if 0
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
-#endif
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
};
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
{0,} /* 0 terminated list. */
};
@@ -498,9 +512,9 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
-#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM 0x2f71
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
-#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM 0x2f79
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
@@ -517,35 +531,33 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
static const struct pci_id_descr pci_dev_descr_haswell[] = {
/* first item must be the HA */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1, IMC1) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1, SOCK) },
};
static const struct pci_id_table pci_dev_descr_haswell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
{0,} /* 0 terminated list. */
};
@@ -559,7 +571,7 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
/* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
-#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL 0x7843
+#define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN 0x7843
/* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
#define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844
/* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
@@ -579,17 +591,17 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
*/
static const struct pci_id_descr pci_dev_descr_knl[] = {
- [0] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0) },
- [1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0) },
- [2 ... 3] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0)},
- [4 ... 41] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0) },
- [42 ... 47] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL, 0) },
- [48] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0) },
- [49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0) },
+ [0 ... 1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0, IMC0)},
+ [2 ... 7] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN, 0, IMC0) },
+ [8] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0, IMC0) },
+ [9] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
+ [10] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0, SOCK) },
+ [11] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0, SOCK) },
+ [12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0, SOCK) },
};
static const struct pci_id_table pci_dev_descr_knl_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
{0,}
};
@@ -615,9 +627,9 @@ static const struct pci_id_table pci_dev_descr_knl_table[] = {
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8
-#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL 0x6f71
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM 0x6f71
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68
-#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL 0x6f79
+#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM 0x6f79
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
#define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
@@ -632,32 +644,30 @@ static const struct pci_id_table pci_dev_descr_knl_table[] = {
static const struct pci_id_descr pci_dev_descr_broadwell[] = {
/* first item must be the HA */
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1) },
-
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_THERMAL, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1) },
- { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1, IMC1) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1, SOCK) },
};
static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
- PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL),
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
{0,} /* 0 terminated list. */
};
@@ -709,7 +719,8 @@ static inline int numcol(u32 mtr)
return 1 << cols;
}
-static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus)
+static struct sbridge_dev *get_sbridge_dev(u8 bus, enum domain dom, int multi_bus,
+ struct sbridge_dev *prev)
{
struct sbridge_dev *sbridge_dev;
@@ -722,16 +733,19 @@ static struct sbridge_dev *get_sbridge_dev(u8 bus, int multi_bus)
struct sbridge_dev, list);
}
- list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
- if (sbridge_dev->bus == bus)
+ sbridge_dev = list_entry(prev ? prev->list.next
+ : sbridge_edac_list.next, struct sbridge_dev, list);
+
+ list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
+ if (sbridge_dev->bus == bus && (dom == SOCK || dom == sbridge_dev->dom))
return sbridge_dev;
}
return NULL;
}
-static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
- const struct pci_id_table *table)
+static struct sbridge_dev *alloc_sbridge_dev(u8 bus, enum domain dom,
+ const struct pci_id_table *table)
{
struct sbridge_dev *sbridge_dev;
@@ -739,15 +753,17 @@ static struct sbridge_dev *alloc_sbridge_dev(u8 bus,
if (!sbridge_dev)
return NULL;
- sbridge_dev->pdev = kzalloc(sizeof(*sbridge_dev->pdev) * table->n_devs,
- GFP_KERNEL);
+ sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
+ sizeof(*sbridge_dev->pdev),
+ GFP_KERNEL);
if (!sbridge_dev->pdev) {
kfree(sbridge_dev);
return NULL;
}
sbridge_dev->bus = bus;
- sbridge_dev->n_devs = table->n_devs;
+ sbridge_dev->dom = dom;
+ sbridge_dev->n_devs = table->n_devs_per_imc;
list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
return sbridge_dev;
@@ -1044,79 +1060,6 @@ static int haswell_chan_hash(int idx, u64 addr)
return idx;
}
-/****************************************************************************
- Memory check routines
- ****************************************************************************/
-static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id)
-{
- struct pci_dev *pdev = NULL;
-
- do {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev);
- if (pdev && pdev->bus->number == bus)
- break;
- } while (pdev);
-
- return pdev;
-}
-
-/**
- * check_if_ecc_is_active() - Checks if ECC is active
- * @bus: Device bus
- * @type: Memory controller type
- * returns: 0 in case ECC is active, -ENODEV if it can't be determined or
- * disabled
- */
-static int check_if_ecc_is_active(const u8 bus, enum type type)
-{
- struct pci_dev *pdev = NULL;
- u32 mcmtr, id;
-
- switch (type) {
- case IVY_BRIDGE:
- id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
- break;
- case HASWELL:
- id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
- break;
- case SANDY_BRIDGE:
- id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
- break;
- case BROADWELL:
- id = PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA;
- break;
- case KNIGHTS_LANDING:
- /*
- * KNL doesn't group things by bus the same way
- * SB/IB/Haswell does.
- */
- id = PCI_DEVICE_ID_INTEL_KNL_IMC_TA;
- break;
- default:
- return -ENODEV;
- }
-
- if (type != KNIGHTS_LANDING)
- pdev = get_pdev_same_bus(bus, id);
- else
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, 0);
-
- if (!pdev) {
- sbridge_printk(KERN_ERR, "Couldn't find PCI device "
- "%04x:%04x! on bus %02d\n",
- PCI_VENDOR_ID_INTEL, id, bus);
- return -ENODEV;
- }
-
- pci_read_config_dword(pdev,
- type == KNIGHTS_LANDING ? KNL_MCMTR : MCMTR, &mcmtr);
- if (!IS_ECC_ENABLED(mcmtr)) {
- sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
- return -ENODEV;
- }
- return 0;
-}
-
/* Low bits of TAD limit, and some metadata. */
static const u32 knl_tad_dram_limit_lo[] = {
0x400, 0x500, 0x600, 0x700,
@@ -1587,25 +1530,13 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
return 0;
}
-static int get_dimm_config(struct mem_ctl_info *mci)
+static void get_source_id(struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
- struct dimm_info *dimm;
- unsigned i, j, banks, ranks, rows, cols, npages;
- u64 size;
u32 reg;
- enum edac_type mode;
- enum mem_type mtype;
- int channels = pvt->info.type == KNIGHTS_LANDING ?
- KNL_MAX_CHANNELS : NUM_CHANNELS;
- u64 knl_mc_sizes[KNL_MAX_CHANNELS];
- if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
- pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, &reg);
- pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
- }
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
- pvt->info.type == KNIGHTS_LANDING)
+ pvt->info.type == KNIGHTS_LANDING)
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
else
pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
@@ -1614,50 +1545,19 @@ static int get_dimm_config(struct mem_ctl_info *mci)
pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
else
pvt->sbridge_dev->source_id = SOURCE_ID(reg);
+}
- pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
- edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
- pvt->sbridge_dev->mc,
- pvt->sbridge_dev->node_id,
- pvt->sbridge_dev->source_id);
-
- /* KNL doesn't support mirroring or lockstep,
- * and is always closed page
- */
- if (pvt->info.type == KNIGHTS_LANDING) {
- mode = EDAC_S4ECD4ED;
- pvt->is_mirrored = false;
-
- if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
- return -1;
- } else {
- pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
- if (IS_MIRROR_ENABLED(reg)) {
- edac_dbg(0, "Memory mirror is enabled\n");
- pvt->is_mirrored = true;
- } else {
- edac_dbg(0, "Memory mirror is disabled\n");
- pvt->is_mirrored = false;
- }
-
- pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
- if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
- edac_dbg(0, "Lockstep is enabled\n");
- mode = EDAC_S8ECD8ED;
- pvt->is_lockstep = true;
- } else {
- edac_dbg(0, "Lockstep is disabled\n");
- mode = EDAC_S4ECD4ED;
- pvt->is_lockstep = false;
- }
- if (IS_CLOSE_PG(pvt->info.mcmtr)) {
- edac_dbg(0, "address map is on closed page mode\n");
- pvt->is_close_pg = true;
- } else {
- edac_dbg(0, "address map is on open page mode\n");
- pvt->is_close_pg = false;
- }
- }
+static int __populate_dimms(struct mem_ctl_info *mci,
+ u64 knl_mc_sizes[KNL_MAX_CHANNELS],
+ enum edac_type mode)
+{
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
+ : NUM_CHANNELS;
+ unsigned int i, j, banks, ranks, rows, cols, npages;
+ struct dimm_info *dimm;
+ enum mem_type mtype;
+ u64 size;
mtype = pvt->info.get_memory_type(pvt);
if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
@@ -1688,8 +1588,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
}
for (j = 0; j < max_dimms_per_channel; j++) {
- dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
- i, j, 0);
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
if (pvt->info.type == KNIGHTS_LANDING) {
pci_read_config_dword(pvt->knl.pci_channel[i],
knl_mtr_reg, &mtr);
@@ -1699,6 +1598,12 @@ static int get_dimm_config(struct mem_ctl_info *mci)
}
edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
if (IS_DIMM_PRESENT(mtr)) {
+ if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
+ sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
+ pvt->sbridge_dev->source_id,
+ pvt->sbridge_dev->dom, i);
+ return -ENODEV;
+ }
pvt->channel[i].dimms++;
ranks = numrank(pvt->info.type, mtr);
@@ -1717,7 +1622,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
npages = MiB_TO_PAGES(size);
edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
- pvt->sbridge_dev->mc, i/4, i%4, j,
+ pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
size, npages,
banks, ranks, rows, cols);
@@ -1727,8 +1632,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
dimm->mtype = mtype;
dimm->edac_mode = mode;
snprintf(dimm->label, sizeof(dimm->label),
- "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
- pvt->sbridge_dev->source_id, i/4, i%4, j);
+ "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
}
}
}
@@ -1736,6 +1641,65 @@ static int get_dimm_config(struct mem_ctl_info *mci)
return 0;
}
+static int get_dimm_config(struct mem_ctl_info *mci)
+{
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ u64 knl_mc_sizes[KNL_MAX_CHANNELS];
+ enum edac_type mode;
+ u32 reg;
+
+ if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
+ pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
+ pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
+ }
+ pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
+ edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
+ pvt->sbridge_dev->mc,
+ pvt->sbridge_dev->node_id,
+ pvt->sbridge_dev->source_id);
+
+ /* KNL doesn't support mirroring or lockstep,
+ * and is always closed page
+ */
+ if (pvt->info.type == KNIGHTS_LANDING) {
+ mode = EDAC_S4ECD4ED;
+ pvt->is_mirrored = false;
+
+ if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
+ return -1;
+ pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr);
+ } else {
+ pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg);
+ if (IS_MIRROR_ENABLED(reg)) {
+ edac_dbg(0, "Memory mirror is enabled\n");
+ pvt->is_mirrored = true;
+ } else {
+ edac_dbg(0, "Memory mirror is disabled\n");
+ pvt->is_mirrored = false;
+ }
+
+ pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr);
+ if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
+ edac_dbg(0, "Lockstep is enabled\n");
+ mode = EDAC_S8ECD8ED;
+ pvt->is_lockstep = true;
+ } else {
+ edac_dbg(0, "Lockstep is disabled\n");
+ mode = EDAC_S4ECD4ED;
+ pvt->is_lockstep = false;
+ }
+ if (IS_CLOSE_PG(pvt->info.mcmtr)) {
+ edac_dbg(0, "address map is on closed page mode\n");
+ pvt->is_close_pg = true;
+ } else {
+ edac_dbg(0, "address map is on open page mode\n");
+ pvt->is_close_pg = false;
+ }
+ }
+
+ return __populate_dimms(mci, knl_mc_sizes, mode);
+}
+
static void get_memory_layout(const struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
@@ -1816,8 +1780,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
*/
prv = 0;
for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
- pci_read_config_dword(pvt->pci_ha0, tad_dram_rule[n_tads],
- &reg);
+ pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], &reg);
limit = TAD_LIMIT(reg);
if (limit <= prv)
break;
@@ -1899,12 +1862,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
}
}
-static struct mem_ctl_info *get_mci_for_node_id(u8 node_id)
+static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
{
struct sbridge_dev *sbridge_dev;
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
- if (sbridge_dev->node_id == node_id)
+ if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
return sbridge_dev->mci;
}
return NULL;
@@ -1925,7 +1888,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
int interleave_mode, shiftup = 0;
unsigned sad_interleave[pvt->info.max_interleave];
u32 reg, dram_rule;
- u8 ch_way, sck_way, pkg, sad_ha = 0, ch_add = 0;
+ u8 ch_way, sck_way, pkg, sad_ha = 0;
u32 tad_offset;
u32 rir_way;
u32 mb, gb;
@@ -2038,13 +2001,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
*socket = sad_pkg_socket(pkg);
sad_ha = sad_pkg_ha(pkg);
- if (sad_ha)
- ch_add = 4;
if (a7mode) {
/* MCChanShiftUpEnable */
- pci_read_config_dword(pvt->pci_ha0,
- HASWELL_HASYSDEFEATURE2, &reg);
+ pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
shiftup = GET_BITFIELD(reg, 22, 22);
}
@@ -2056,8 +2016,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
*socket = sad_pkg_socket(pkg);
sad_ha = sad_pkg_ha(pkg);
- if (sad_ha)
- ch_add = 4;
edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
idx, *socket, sad_ha);
}
@@ -2068,7 +2026,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
* Move to the proper node structure, in order to access the
* right PCI registers
*/
- new_mci = get_mci_for_node_id(*socket);
+ new_mci = get_mci_for_node_id(*socket, sad_ha);
if (!new_mci) {
sprintf(msg, "Struct for socket #%u wasn't initialized",
*socket);
@@ -2081,14 +2039,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
* Step 2) Get memory channel
*/
prv = 0;
- if (pvt->info.type == SANDY_BRIDGE)
- pci_ha = pvt->pci_ha0;
- else {
- if (sad_ha)
- pci_ha = pvt->pci_ha1;
- else
- pci_ha = pvt->pci_ha0;
- }
+ pci_ha = pvt->pci_ha;
for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
limit = TAD_LIMIT(reg);
@@ -2139,9 +2090,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
*channel_mask = 1 << base_ch;
- pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
- tad_ch_nilv_offset[n_tads],
- &tad_offset);
+ pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
if (pvt->is_mirrored) {
*channel_mask |= 1 << ((base_ch + 2) % 4);
@@ -2192,9 +2141,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
* Step 3) Decode rank
*/
for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
- pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
- rir_way_limit[n_rir],
- &reg);
+ pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], &reg);
if (!IS_RIR_VALID(reg))
continue;
@@ -2222,9 +2169,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
idx = (ch_addr >> 13); /* FIXME: Datasheet says to shift by 15 */
idx %= 1 << rir_way;
- pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
- rir_offset[n_rir][idx],
- &reg);
+ pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
*rank = RIR_RNK_TGT(pvt->info.type, reg);
edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
@@ -2277,10 +2222,11 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
const unsigned devno,
const int multi_bus)
{
- struct sbridge_dev *sbridge_dev;
+ struct sbridge_dev *sbridge_dev = NULL;
const struct pci_id_descr *dev_descr = &table->descr[devno];
struct pci_dev *pdev = NULL;
u8 bus = 0;
+ int i = 0;
sbridge_printk(KERN_DEBUG,
"Seeking for: PCI ID %04x:%04x\n",
@@ -2311,9 +2257,14 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
}
bus = pdev->bus->number;
- sbridge_dev = get_sbridge_dev(bus, multi_bus);
+next_imc:
+ sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev);
if (!sbridge_dev) {
- sbridge_dev = alloc_sbridge_dev(bus, table);
+
+ if (dev_descr->dom == SOCK)
+ goto out_imc;
+
+ sbridge_dev = alloc_sbridge_dev(bus, dev_descr->dom, table);
if (!sbridge_dev) {
pci_dev_put(pdev);
return -ENOMEM;
@@ -2321,7 +2272,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
(*num_mc)++;
}
- if (sbridge_dev->pdev[devno]) {
+ if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
sbridge_printk(KERN_ERR,
"Duplicated device for %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
@@ -2329,8 +2280,16 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
return -ENODEV;
}
- sbridge_dev->pdev[devno] = pdev;
+ sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
+
+ /* pdev belongs to more than one IMC, do extra gets */
+ if (++i > 1)
+ pci_dev_get(pdev);
+ if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
+ goto next_imc;
+
+out_imc:
/* Be sure that the device is enabled */
if (unlikely(pci_enable_device(pdev) < 0)) {
sbridge_printk(KERN_ERR,
@@ -2374,7 +2333,7 @@ static int sbridge_get_all_devices(u8 *num_mc,
if (table->type == KNIGHTS_LANDING)
allow_dups = multi_bus = 1;
while (table && table->descr) {
- for (i = 0; i < table->n_devs; i++) {
+ for (i = 0; i < table->n_devs_per_sock; i++) {
if (!allow_dups || i == 0 ||
table->descr[i].dev_id !=
table->descr[i-1].dev_id) {
@@ -2385,7 +2344,7 @@ static int sbridge_get_all_devices(u8 *num_mc,
table, i, multi_bus);
if (rc < 0) {
if (i == 0) {
- i = table->n_devs;
+ i = table->n_devs_per_sock;
break;
}
sbridge_put_all_devices();
@@ -2399,6 +2358,13 @@ static int sbridge_get_all_devices(u8 *num_mc,
return 0;
}
+/*
+ * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
+ * the format: XXXa. So we can convert from a device to the corresponding
+ * channel like this
+ */
+#define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
+
static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
struct sbridge_dev *sbridge_dev)
{
@@ -2423,7 +2389,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
pvt->pci_br0 = pdev;
break;
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
- pvt->pci_ha0 = pdev;
+ pvt->pci_ha = pdev;
break;
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
pvt->pci_ta = pdev;
@@ -2436,7 +2402,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
{
- int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
+ int id = TAD_DEV_TO_CHAN(pdev->device);
pvt->pci_tad[id] = pdev;
saw_chan_mask |= 1 << id;
}
@@ -2455,7 +2421,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
}
/* Check if everything were registered */
- if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha0 ||
+ if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
!pvt->pci_ras || !pvt->pci_ta)
goto enodev;
@@ -2488,19 +2454,26 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
- pvt->pci_ha0 = pdev;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
+ pvt->pci_ha = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
pvt->pci_ta = pdev;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
pvt->pci_ras = pdev;
break;
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
{
- int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0;
+ int id = TAD_DEV_TO_CHAN(pdev->device);
pvt->pci_tad[id] = pdev;
saw_chan_mask |= 1 << id;
}
@@ -2520,19 +2493,6 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
pvt->pci_br1 = pdev;
break;
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
- pvt->pci_ha1 = pdev;
- break;
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
- case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
- {
- int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 4;
- pvt->pci_tad[id] = pdev;
- saw_chan_mask |= 1 << id;
- }
- break;
default:
goto error;
}
@@ -2544,13 +2504,12 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
}
/* Check if everything were registered */
- if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_br0 ||
+ if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
!pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
goto enodev;
- if (saw_chan_mask != 0x0f && /* -EN */
- saw_chan_mask != 0x33 && /* -EP */
- saw_chan_mask != 0xff) /* -EX */
+ if (saw_chan_mask != 0x0f && /* -EN/-EX */
+ saw_chan_mask != 0x03) /* -EP */
goto enodev;
return 0;
@@ -2593,32 +2552,27 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
pvt->pci_sad1 = pdev;
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
- pvt->pci_ha0 = pdev;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
+ pvt->pci_ha = pdev;
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
pvt->pci_ta = pdev;
break;
- case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL:
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
pvt->pci_ras = pdev;
break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
- {
- int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0;
-
- pvt->pci_tad[id] = pdev;
- saw_chan_mask |= 1 << id;
- }
- break;
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
{
- int id = pdev->device - PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 + 4;
-
+ int id = TAD_DEV_TO_CHAN(pdev->device);
pvt->pci_tad[id] = pdev;
saw_chan_mask |= 1 << id;
}
@@ -2630,12 +2584,6 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
if (!pvt->pci_ddrio)
pvt->pci_ddrio = pdev;
break;
- case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
- pvt->pci_ha1 = pdev;
- break;
- case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
- pvt->pci_ha1_ta = pdev;
- break;
default:
break;
}
@@ -2647,13 +2595,12 @@ static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
}
/* Check if everything were registered */
- if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
+ if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
!pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
goto enodev;
- if (saw_chan_mask != 0x0f && /* -EN */
- saw_chan_mask != 0x33 && /* -EP */
- saw_chan_mask != 0xff) /* -EX */
+ if (saw_chan_mask != 0x0f && /* -EN/-EX */
+ saw_chan_mask != 0x03) /* -EP */
goto enodev;
return 0;
@@ -2690,30 +2637,27 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
pvt->pci_sad1 = pdev;
break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
- pvt->pci_ha0 = pdev;
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
+ pvt->pci_ha = pdev;
break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
pvt->pci_ta = pdev;
break;
- case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_THERMAL:
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
+ case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
pvt->pci_ras = pdev;
break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
- {
- int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0;
- pvt->pci_tad[id] = pdev;
- saw_chan_mask |= 1 << id;
- }
- break;
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
{
- int id = pdev->device - PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 + 4;
+ int id = TAD_DEV_TO_CHAN(pdev->device);
pvt->pci_tad[id] = pdev;
saw_chan_mask |= 1 << id;
}
@@ -2721,12 +2665,6 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
pvt->pci_ddrio = pdev;
break;
- case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
- pvt->pci_ha1 = pdev;
- break;
- case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
- pvt->pci_ha1_ta = pdev;
- break;
default:
break;
}
@@ -2738,13 +2676,12 @@ static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
}
/* Check if everything were registered */
- if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
+ if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
!pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
goto enodev;
- if (saw_chan_mask != 0x0f && /* -EN */
- saw_chan_mask != 0x33 && /* -EP */
- saw_chan_mask != 0xff) /* -EX */
+ if (saw_chan_mask != 0x0f && /* -EN/-EX */
+ saw_chan_mask != 0x03) /* -EP */
goto enodev;
return 0;
@@ -2812,7 +2749,7 @@ static int knl_mci_bind_devs(struct mem_ctl_info *mci,
pvt->knl.pci_cha[devidx] = pdev;
break;
- case PCI_DEVICE_ID_INTEL_KNL_IMC_CHANNEL:
+ case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
devidx = -1;
/*
@@ -3006,7 +2943,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
if (rc < 0)
goto err_parsing;
- new_mci = get_mci_for_node_id(socket);
+ new_mci = get_mci_for_node_id(socket, ha);
if (!new_mci) {
strcpy(msg, "Error: socket got corrupted!");
goto err_parsing;
@@ -3053,7 +2990,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
/* Call the helper to output message */
edac_mc_handle_error(tp_event, mci, core_err_cnt,
m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
- 4*ha+channel, dimm, -1,
+ channel, dimm, -1,
optype, msg);
return;
err_parsing:
@@ -3078,7 +3015,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
return NOTIFY_DONE;
- mci = get_mci_for_node_id(mce->socketid);
+ mci = get_mci_for_node_id(mce->socketid, IMC0);
if (!mci)
return NOTIFY_DONE;
pvt = mci->pvt_info;
@@ -3159,11 +3096,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
struct pci_dev *pdev = sbridge_dev->pdev[0];
int rc;
- /* Check the number of active and not disabled channels */
- rc = check_if_ecc_is_active(sbridge_dev->bus, type);
- if (unlikely(rc < 0))
- return rc;
-
/* allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
layers[0].size = type == KNIGHTS_LANDING ?
@@ -3192,7 +3124,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
- mci->mod_name = "sbridge_edac.c";
+ mci->mod_name = "sb_edac.c";
mci->mod_ver = SBRIDGE_REVISION;
mci->dev_name = pci_name(pdev);
mci->ctl_page_to_phys = NULL;
@@ -3215,12 +3147,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
pvt->info.interleave_pkg = ibridge_interleave_pkg;
pvt->info.get_width = ibridge_get_width;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge Socket#%d", mci->mc_idx);
/* Store pci devices at mci for faster access */
rc = ibridge_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
+ get_source_id(mci);
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
case SANDY_BRIDGE:
pvt->info.rankcfgr = SB_RANK_CFG_A;
@@ -3238,12 +3172,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
pvt->info.interleave_pkg = sbridge_interleave_pkg;
pvt->info.get_width = sbridge_get_width;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge Socket#%d", mci->mc_idx);
/* Store pci devices at mci for faster access */
rc = sbridge_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
+ get_source_id(mci);
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
case HASWELL:
/* rankcfgr isn't used */
@@ -3261,12 +3197,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
pvt->info.interleave_pkg = ibridge_interleave_pkg;
pvt->info.get_width = ibridge_get_width;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
/* Store pci devices at mci for faster access */
rc = haswell_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
+ get_source_id(mci);
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
case BROADWELL:
/* rankcfgr isn't used */
@@ -3284,12 +3222,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
pvt->info.interleave_pkg = ibridge_interleave_pkg;
pvt->info.get_width = broadwell_get_width;
- mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell Socket#%d", mci->mc_idx);
/* Store pci devices at mci for faster access */
rc = broadwell_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
+ get_source_id(mci);
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
case KNIGHTS_LANDING:
/* pvt->info.rankcfgr == ??? */
@@ -3307,17 +3247,22 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.max_interleave = ARRAY_SIZE(knl_interleave_list);
pvt->info.interleave_pkg = ibridge_interleave_pkg;
pvt->info.get_width = knl_get_width;
- mci->ctl_name = kasprintf(GFP_KERNEL,
- "Knights Landing Socket#%d", mci->mc_idx);
rc = knl_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
+ get_source_id(mci);
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
+ pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
break;
}
/* Get dimm basic config and the memory layout */
- get_dimm_config(mci);
+ rc = get_dimm_config(mci);
+ if (rc < 0) {
+ edac_dbg(0, "MC: failed to get_dimm_config()\n");
+ goto fail;
+ }
get_memory_layout(mci);
/* record ptr to the generic device */
@@ -3327,13 +3272,14 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
if (unlikely(edac_mc_add_mc(mci))) {
edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
rc = -EINVAL;
- goto fail0;
+ goto fail;
}
return 0;
-fail0:
+fail:
kfree(mci->ctl_name);
+fail0:
edac_mc_free(mci);
sbridge_dev->mci = NULL;
return rc;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index 86d585cb6d32..2d352b40ae1c 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -2080,7 +2080,7 @@ static int thunderx_l2c_probe(struct pci_dev *pdev,
if (IS_ENABLED(CONFIG_EDAC_DEBUG)) {
l2c->debugfs = edac_debugfs_create_dir(pdev->dev.kobj.name);
- thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr,
+ ret = thunderx_create_debugfs_nodes(l2c->debugfs, l2c_devattr,
l2c, dfs_entries);
if (ret != dfs_entries) {