diff options
author | Xu Yilun | 2023-01-29 00:31:22 +0800 |
---|---|---|
committer | Xu Yilun | 2023-01-29 00:34:59 +0800 |
commit | 9d18515e4a2b9d3828b6d64087feeeed3aa1cf65 (patch) | |
tree | c7e138716af0701ba1120f5601cca38f9d7e0dd9 /drivers/fpga | |
parent | 48ca6e5fa4ffc40a14fe45bd5499428a5a108a68 (diff) | |
parent | 869b9eddf0b38a22c27a400e2fa849d2ff2aa7e1 (diff) |
Merge tag 'ib-mfd-fpga-hwmon-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd into for-fpga-v6.3-rc1
Lee writes:
Immutable branch between MFD, FPGA and HWMON due for the v6.3 merge window
* tag 'ib-mfd-fpga-hwmon-v6.3' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/lee/mfd:
mfd: intel-m10-bmc: Add PMCI driver
fpga: m10bmc-sec: Make rsu status type specific
fpga: m10bmc-sec: Create helpers for rsu status/progress checks
mfd: intel-m10-bmc: Prefix register defines with M10BMC_N3000
fpga: intel-m10-bmc: Rework flash read/write
mfd: intel-m10-bmc: Support multiple CSR register layouts
mfd: intel-m10-bmc: Split into core and spi specific parts
mfd: intel-m10-bmc: Rename the local variables
mfd: intel-m10-bmc: Create m10bmc_platform_info for type specific info
mfd: intel-m10-bmc: Add missing includes to header
Diffstat (limited to 'drivers/fpga')
-rw-r--r-- | drivers/fpga/Kconfig | 2 | ||||
-rw-r--r-- | drivers/fpga/intel-m10-bmc-sec-update.c | 364 |
2 files changed, 228 insertions, 138 deletions
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 6ce143dafd04..0a00763b9f28 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -246,7 +246,7 @@ config FPGA_MGR_VERSAL_FPGA config FPGA_M10_BMC_SEC_UPDATE tristate "Intel MAX10 BMC Secure Update driver" - depends on MFD_INTEL_M10_BMC + depends on MFD_INTEL_M10_BMC_CORE select FW_LOADER select FW_UPLOAD help diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c index 79d48852825e..92145ee7333e 100644 --- a/drivers/fpga/intel-m10-bmc-sec-update.c +++ b/drivers/fpga/intel-m10-bmc-sec-update.c @@ -14,6 +14,12 @@ #include <linux/platform_device.h> #include <linux/slab.h> +struct m10bmc_sec; + +struct m10bmc_sec_ops { + int (*rsu_status)(struct m10bmc_sec *sec); +}; + struct m10bmc_sec { struct device *dev; struct intel_m10bmc *m10bmc; @@ -21,6 +27,7 @@ struct m10bmc_sec { char *fw_name; u32 fw_name_id; bool cancel_request; + const struct m10bmc_sec_ops *ops; }; static DEFINE_XARRAY_ALLOC(fw_upload_xa); @@ -31,6 +38,65 @@ static DEFINE_XARRAY_ALLOC(fw_upload_xa); #define REH_MAGIC GENMASK(15, 0) #define REH_SHA_NUM_BYTES GENMASK(31, 16) +static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size) +{ + struct intel_m10bmc *m10bmc = sec->m10bmc; + unsigned int stride = regmap_get_reg_stride(m10bmc->regmap); + u32 write_count = size / stride; + u32 leftover_offset = write_count * stride; + u32 leftover_size = size - leftover_offset; + u32 leftover_tmp = 0; + int ret; + + if (WARN_ON_ONCE(stride > sizeof(leftover_tmp))) + return -EINVAL; + + ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset, + buf + offset, write_count); + if (ret) + return ret; + + /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */ + if (leftover_size) { + memcpy(&leftover_tmp, buf + leftover_offset, leftover_size); + ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset, + leftover_tmp); + if (ret) + return ret; + } + + return 0; +} + +static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size) +{ + struct intel_m10bmc *m10bmc = sec->m10bmc; + unsigned int stride = regmap_get_reg_stride(m10bmc->regmap); + u32 read_count = size / stride; + u32 leftover_offset = read_count * stride; + u32 leftover_size = size - leftover_offset; + u32 leftover_tmp; + int ret; + + if (WARN_ON_ONCE(stride > sizeof(leftover_tmp))) + return -EINVAL; + + ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count); + if (ret) + return ret; + + /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */ + if (leftover_size) { + ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp); + if (ret) + return ret; + memcpy(buf + leftover_offset, &leftover_tmp, leftover_size); + } + + return 0; +} + + static ssize_t show_root_entry_hash(struct device *dev, u32 exp_magic, u32 prog_addr, u32 reh_addr, char *buf) @@ -38,11 +104,9 @@ show_root_entry_hash(struct device *dev, u32 exp_magic, struct m10bmc_sec *sec = dev_get_drvdata(dev); int sha_num_bytes, i, ret, cnt = 0; u8 hash[REH_SHA384_SIZE]; - unsigned int stride; u32 magic; - stride = regmap_get_reg_stride(sec->m10bmc->regmap); - ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic); + ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic)); if (ret) return ret; @@ -50,19 +114,16 @@ show_root_entry_hash(struct device *dev, u32 exp_magic, return sysfs_emit(buf, "hash not programmed\n"); sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8; - if ((sha_num_bytes % stride) || - (sha_num_bytes != REH_SHA256_SIZE && - sha_num_bytes != REH_SHA384_SIZE)) { + if (sha_num_bytes != REH_SHA256_SIZE && + sha_num_bytes != REH_SHA384_SIZE) { dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__, sha_num_bytes); return -EINVAL; } - ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr, - hash, sha_num_bytes / stride); + ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes); if (ret) { - dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n", - reh_addr, sha_num_bytes / stride, ret); + dev_err(dev, "failed to read root entry hash\n"); return ret; } @@ -73,16 +134,24 @@ show_root_entry_hash(struct device *dev, u32 exp_magic, return cnt; } -#define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \ +#define DEVICE_ATTR_SEC_REH_RO(_name) \ static ssize_t _name##_root_entry_hash_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ -{ return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \ +{ \ + struct m10bmc_sec *sec = dev_get_drvdata(dev); \ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \ + \ + return show_root_entry_hash(dev, csr_map->_name##_magic, \ + csr_map->_name##_prog_addr, \ + csr_map->_name##_reh_addr, \ + buf); \ +} \ static DEVICE_ATTR_RO(_name##_root_entry_hash) -DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR); -DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR); -DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR); +DEVICE_ATTR_SEC_REH_RO(bmc); +DEVICE_ATTR_SEC_REH_RO(sr); +DEVICE_ATTR_SEC_REH_RO(pr); #define CSK_BIT_LEN 128U #define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32) @@ -90,27 +159,16 @@ DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR); static ssize_t show_canceled_csk(struct device *dev, u32 addr, char *buf) { - unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32); + unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32); struct m10bmc_sec *sec = dev_get_drvdata(dev); DECLARE_BITMAP(csk_map, CSK_BIT_LEN); __le32 csk_le32[CSK_32ARRAY_SIZE]; u32 csk32[CSK_32ARRAY_SIZE]; int ret; - stride = regmap_get_reg_stride(sec->m10bmc->regmap); - if (size % stride) { - dev_err(sec->dev, - "CSK vector size (0x%x) not aligned to stride (0x%x)\n", - size, stride); - WARN_ON_ONCE(1); - return -EINVAL; - } - - ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32, - size / stride); + ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size); if (ret) { - dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n", - addr, size / stride, ret); + dev_err(sec->dev, "failed to read CSK vector\n"); return ret; } @@ -122,18 +180,25 @@ show_canceled_csk(struct device *dev, u32 addr, char *buf) return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN); } -#define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \ +#define DEVICE_ATTR_SEC_CSK_RO(_name) \ static ssize_t _name##_canceled_csks_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ -{ return show_canceled_csk(dev, _addr, buf); } \ +{ \ + struct m10bmc_sec *sec = dev_get_drvdata(dev); \ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \ + \ + return show_canceled_csk(dev, \ + csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \ + buf); \ +} \ static DEVICE_ATTR_RO(_name##_canceled_csks) #define CSK_VEC_OFFSET 0x34 -DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET); -DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET); -DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET); +DEVICE_ATTR_SEC_CSK_RO(bmc); +DEVICE_ATTR_SEC_CSK_RO(sr); +DEVICE_ATTR_SEC_CSK_RO(pr); #define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */ @@ -141,31 +206,21 @@ static ssize_t flash_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct m10bmc_sec *sec = dev_get_drvdata(dev); - unsigned int stride, num_bits; + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + unsigned int num_bits; u8 *flash_buf; int cnt, ret; - stride = regmap_get_reg_stride(sec->m10bmc->regmap); num_bits = FLASH_COUNT_SIZE * 8; - if (FLASH_COUNT_SIZE % stride) { - dev_err(sec->dev, - "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n", - FLASH_COUNT_SIZE, stride); - WARN_ON_ONCE(1); - return -EINVAL; - } - flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL); if (!flash_buf) return -ENOMEM; - ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT, - flash_buf, FLASH_COUNT_SIZE / stride); + ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter, + FLASH_COUNT_SIZE); if (ret) { - dev_err(sec->dev, - "failed to read flash count: %x cnt %x: %d\n", - STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret); + dev_err(sec->dev, "failed to read flash count\n"); goto exit_free; } cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits); @@ -200,25 +255,81 @@ static const struct attribute_group *m10bmc_sec_attr_groups[] = { static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell) { + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; u32 auth_result; - dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell); + dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell); - if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result)) + if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result)) dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result); } +static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 doorbell; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); + if (ret) + return ret; + + return FIELD_GET(DRBL_RSU_STATUS, doorbell); +} + +static bool rsu_status_ok(u32 status) +{ + return (status == RSU_STAT_NORMAL || + status == RSU_STAT_NIOS_OK || + status == RSU_STAT_USER_OK || + status == RSU_STAT_FACTORY_OK); +} + +static bool rsu_progress_done(u32 progress) +{ + return (progress == RSU_PROG_IDLE || + progress == RSU_PROG_RSU_DONE); +} + +static bool rsu_progress_busy(u32 progress) +{ + return (progress == RSU_PROG_AUTHENTICATING || + progress == RSU_PROG_COPYING || + progress == RSU_PROG_UPDATE_CANCEL || + progress == RSU_PROG_PROGRAM_KEY_HASH); +} + +static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg, + u32 *progress, u32 *status) +{ + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg); + if (ret) + return ret; + + ret = sec->ops->rsu_status(sec); + if (ret < 0) + return ret; + + *status = ret; + *progress = rsu_prog(*doorbell_reg); + + return 0; +} + static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec) { + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; u32 doorbell; int ret; - ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); if (ret) return FW_UPLOAD_ERR_RW_ERROR; - if (rsu_prog(doorbell) != RSU_PROG_IDLE && - rsu_prog(doorbell) != RSU_PROG_RSU_DONE) { + if (!rsu_progress_done(rsu_prog(doorbell))) { log_error_regs(sec, doorbell); return FW_UPLOAD_ERR_BUSY; } @@ -226,19 +337,15 @@ static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec) return FW_UPLOAD_ERR_NONE; } -static inline bool rsu_start_done(u32 doorbell) +static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status) { - u32 status, progress; - - if (doorbell & DRBL_RSU_REQUEST) + if (doorbell_reg & DRBL_RSU_REQUEST) return false; - status = rsu_stat(doorbell); if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT) return true; - progress = rsu_prog(doorbell); - if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE) + if (!rsu_progress_done(progress)) return true; return false; @@ -246,11 +353,12 @@ static inline bool rsu_start_done(u32 doorbell) static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec) { - u32 doorbell, status; - int ret; + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 doorbell_reg, progress, status; + int ret, err; ret = regmap_update_bits(sec->m10bmc->regmap, - M10BMC_SYS_BASE + M10BMC_DOORBELL, + csr_map->base + csr_map->doorbell, DRBL_RSU_REQUEST | DRBL_HOST_STATUS, DRBL_RSU_REQUEST | FIELD_PREP(DRBL_HOST_STATUS, @@ -258,26 +366,25 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec) if (ret) return FW_UPLOAD_ERR_RW_ERROR; - ret = regmap_read_poll_timeout(sec->m10bmc->regmap, - M10BMC_SYS_BASE + M10BMC_DOORBELL, - doorbell, - rsu_start_done(doorbell), - NIOS_HANDSHAKE_INTERVAL_US, - NIOS_HANDSHAKE_TIMEOUT_US); + ret = read_poll_timeout(m10bmc_sec_progress_status, err, + err < 0 || rsu_start_done(doorbell_reg, progress, status), + NIOS_HANDSHAKE_INTERVAL_US, + NIOS_HANDSHAKE_TIMEOUT_US, + false, + sec, &doorbell_reg, &progress, &status); if (ret == -ETIMEDOUT) { - log_error_regs(sec, doorbell); + log_error_regs(sec, doorbell_reg); return FW_UPLOAD_ERR_TIMEOUT; - } else if (ret) { + } else if (err) { return FW_UPLOAD_ERR_RW_ERROR; } - status = rsu_stat(doorbell); if (status == RSU_STAT_WEAROUT) { dev_warn(sec->dev, "Excessive flash update count detected\n"); return FW_UPLOAD_ERR_WEAROUT; } else if (status == RSU_STAT_ERASE_FAIL) { - log_error_regs(sec, doorbell); + log_error_regs(sec, doorbell_reg); return FW_UPLOAD_ERR_HW_ERROR; } @@ -286,11 +393,12 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec) static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec) { + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; unsigned long poll_timeout; u32 doorbell, progress; int ret; - ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); if (ret) return FW_UPLOAD_ERR_RW_ERROR; @@ -300,7 +408,7 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec) if (time_after(jiffies, poll_timeout)) break; - ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); if (ret) return FW_UPLOAD_ERR_RW_ERROR; } @@ -319,11 +427,12 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec) static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec) { - u32 doorbell; + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + u32 doorbell_reg, status; int ret; ret = regmap_update_bits(sec->m10bmc->regmap, - M10BMC_SYS_BASE + M10BMC_DOORBELL, + csr_map->base + csr_map->doorbell, DRBL_HOST_STATUS, FIELD_PREP(DRBL_HOST_STATUS, HOST_STATUS_WRITE_DONE)); @@ -331,68 +440,58 @@ static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec) return FW_UPLOAD_ERR_RW_ERROR; ret = regmap_read_poll_timeout(sec->m10bmc->regmap, - M10BMC_SYS_BASE + M10BMC_DOORBELL, - doorbell, - rsu_prog(doorbell) != RSU_PROG_READY, + csr_map->base + csr_map->doorbell, + doorbell_reg, + rsu_prog(doorbell_reg) != RSU_PROG_READY, NIOS_HANDSHAKE_INTERVAL_US, NIOS_HANDSHAKE_TIMEOUT_US); if (ret == -ETIMEDOUT) { - log_error_regs(sec, doorbell); + log_error_regs(sec, doorbell_reg); return FW_UPLOAD_ERR_TIMEOUT; } else if (ret) { return FW_UPLOAD_ERR_RW_ERROR; } - switch (rsu_stat(doorbell)) { - case RSU_STAT_NORMAL: - case RSU_STAT_NIOS_OK: - case RSU_STAT_USER_OK: - case RSU_STAT_FACTORY_OK: - break; - default: - log_error_regs(sec, doorbell); + ret = sec->ops->rsu_status(sec); + if (ret < 0) + return ret; + status = ret; + + if (!rsu_status_ok(status)) { + log_error_regs(sec, doorbell_reg); return FW_UPLOAD_ERR_HW_ERROR; } return FW_UPLOAD_ERR_NONE; } -static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell) +static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg) { - if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell)) + u32 progress, status; + + if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status)) return -EIO; - switch (rsu_stat(*doorbell)) { - case RSU_STAT_NORMAL: - case RSU_STAT_NIOS_OK: - case RSU_STAT_USER_OK: - case RSU_STAT_FACTORY_OK: - break; - default: + if (!rsu_status_ok(status)) return -EINVAL; - } - switch (rsu_prog(*doorbell)) { - case RSU_PROG_IDLE: - case RSU_PROG_RSU_DONE: + if (rsu_progress_done(progress)) return 0; - case RSU_PROG_AUTHENTICATING: - case RSU_PROG_COPYING: - case RSU_PROG_UPDATE_CANCEL: - case RSU_PROG_PROGRAM_KEY_HASH: + + if (rsu_progress_busy(progress)) return -EAGAIN; - default: - return -EINVAL; - } + + return -EINVAL; } static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec) { + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; u32 doorbell; int ret; - ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell); if (ret) return FW_UPLOAD_ERR_RW_ERROR; @@ -400,7 +499,7 @@ static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec) return FW_UPLOAD_ERR_BUSY; ret = regmap_update_bits(sec->m10bmc->regmap, - M10BMC_SYS_BASE + M10BMC_DOORBELL, + csr_map->base + csr_map->doorbell, DRBL_HOST_STATUS, FIELD_PREP(DRBL_HOST_STATUS, HOST_STATUS_ABORT_RSU)); @@ -441,19 +540,19 @@ static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl, #define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */ -static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data, - u32 offset, u32 size, u32 *written) +static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data, + u32 offset, u32 size, u32 *written) { struct m10bmc_sec *sec = fwl->dd_handle; - u32 blk_size, doorbell, extra_offset; - unsigned int stride, extra = 0; + const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; + struct intel_m10bmc *m10bmc = sec->m10bmc; + u32 blk_size, doorbell; int ret; - stride = regmap_get_reg_stride(sec->m10bmc->regmap); if (sec->cancel_request) return rsu_cancel(sec); - ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell); if (ret) { return FW_UPLOAD_ERR_RW_ERROR; } else if (rsu_prog(doorbell) != RSU_PROG_READY) { @@ -461,28 +560,12 @@ static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data return FW_UPLOAD_ERR_HW_ERROR; } - WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride); + WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap)); blk_size = min_t(u32, WRITE_BLOCK_SIZE, size); - ret = regmap_bulk_write(sec->m10bmc->regmap, - M10BMC_STAGING_BASE + offset, - (void *)data + offset, - blk_size / stride); + ret = m10bmc_sec_write(sec, data, offset, blk_size); if (ret) return FW_UPLOAD_ERR_RW_ERROR; - /* - * If blk_size is not aligned to stride, then handle the extra - * bytes with regmap_write. - */ - if (blk_size % stride) { - extra_offset = offset + ALIGN_DOWN(blk_size, stride); - memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride); - ret = regmap_write(sec->m10bmc->regmap, - M10BMC_STAGING_BASE + extra_offset, extra); - if (ret) - return FW_UPLOAD_ERR_RW_ERROR; - } - *written = blk_size; return FW_UPLOAD_ERR_NONE; } @@ -543,12 +626,16 @@ static void m10bmc_sec_cleanup(struct fw_upload *fwl) static const struct fw_upload_ops m10bmc_ops = { .prepare = m10bmc_sec_prepare, - .write = m10bmc_sec_write, + .write = m10bmc_sec_fw_write, .poll_complete = m10bmc_sec_poll_complete, .cancel = m10bmc_sec_cancel, .cleanup = m10bmc_sec_cleanup, }; +static const struct m10bmc_sec_ops m10sec_n3000_ops = { + .rsu_status = m10bmc_sec_n3000_rsu_status, +}; + #define SEC_UPDATE_LEN_MAX 32 static int m10bmc_sec_probe(struct platform_device *pdev) { @@ -564,6 +651,7 @@ static int m10bmc_sec_probe(struct platform_device *pdev) sec->dev = &pdev->dev; sec->m10bmc = dev_get_drvdata(pdev->dev.parent); + sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data; dev_set_drvdata(&pdev->dev, sec); ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec, @@ -604,9 +692,11 @@ static int m10bmc_sec_remove(struct platform_device *pdev) static const struct platform_device_id intel_m10bmc_sec_ids[] = { { .name = "n3000bmc-sec-update", + .driver_data = (kernel_ulong_t)&m10sec_n3000_ops, }, { .name = "d5005bmc-sec-update", + .driver_data = (kernel_ulong_t)&m10sec_n3000_ops, }, { } }; |