diff options
Diffstat (limited to 'drivers/firmware')
35 files changed, 5866 insertions, 288 deletions
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index bca172d42c74..6e4ed5a9c6fd 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -8,8 +8,20 @@ menu "Firmware Drivers" config ARM_PSCI_FW bool +config ARM_PSCI_CHECKER + bool "ARM PSCI checker" + depends on ARM_PSCI_FW && HOTPLUG_CPU && !TORTURE_TEST + help + Run the PSCI checker during startup. This checks that hotplug and + suspend operations work correctly when using PSCI. + + The torture tests may interfere with the PSCI checker by turning CPUs + on and off through hotplug, so for now torture tests and PSCI checker + are mutually exclusive. + config ARM_SCPI_PROTOCOL tristate "ARM System Control and Power Interface (SCPI) Message Protocol" + depends on ARM || ARM64 || COMPILE_TEST depends on MAILBOX help System Control and Power Interface (SCPI) Message Protocol is @@ -203,6 +215,21 @@ config QCOM_SCM_64 def_bool y depends on QCOM_SCM && ARM64 +config TI_SCI_PROTOCOL + tristate "TI System Control Interface (TISCI) Message Protocol" + depends on TI_MESSAGE_MANAGER + help + TI System Control Interface (TISCI) Message Protocol is used to manage + compute systems such as ARM, DSP etc with the system controller in + complex System on Chip(SoC) such as those found on certain keystone + generation SoC from TI. + + System controller provides various facilities including power + management function support. + + This protocol library is used by client drivers to use the features + provided by the system controller. + config HAVE_ARM_SMCCC bool @@ -210,5 +237,6 @@ source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" source "drivers/firmware/meson/Kconfig" +source "drivers/firmware/tegra/Kconfig" endmenu diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 898ac41fa8b3..a37f12e8d137 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -2,6 +2,7 @@ # Makefile for the linux kernel. # obj-$(CONFIG_ARM_PSCI_FW) += psci.o +obj-$(CONFIG_ARM_PSCI_CHECKER) += psci_checker.o obj-$(CONFIG_ARM_SCPI_PROTOCOL) += arm_scpi.o obj-$(CONFIG_ARM_SCPI_POWER_DOMAIN) += scpi_pm_domain.o obj-$(CONFIG_DMI) += dmi_scan.o @@ -20,9 +21,11 @@ obj-$(CONFIG_QCOM_SCM) += qcom_scm.o obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a +obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o obj-y += broadcom/ obj-y += meson/ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ obj-$(CONFIG_EFI) += efi/ obj-$(CONFIG_UEFI_CPER) += efi/ +obj-y += tegra/ diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index ce2bc2a38101..9ad0b1934be9 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -50,20 +50,27 @@ #define CMD_TOKEN_ID_MASK 0xff #define CMD_DATA_SIZE_SHIFT 16 #define CMD_DATA_SIZE_MASK 0x1ff +#define CMD_LEGACY_DATA_SIZE_SHIFT 20 +#define CMD_LEGACY_DATA_SIZE_MASK 0x1ff #define PACK_SCPI_CMD(cmd_id, tx_sz) \ ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ (((tx_sz) & CMD_DATA_SIZE_MASK) << CMD_DATA_SIZE_SHIFT)) #define ADD_SCPI_TOKEN(cmd, token) \ ((cmd) |= (((token) & CMD_TOKEN_ID_MASK) << CMD_TOKEN_ID_SHIFT)) +#define PACK_LEGACY_SCPI_CMD(cmd_id, tx_sz) \ + ((((cmd_id) & CMD_ID_MASK) << CMD_ID_SHIFT) | \ + (((tx_sz) & CMD_LEGACY_DATA_SIZE_MASK) << CMD_LEGACY_DATA_SIZE_SHIFT)) #define CMD_SIZE(cmd) (((cmd) >> CMD_DATA_SIZE_SHIFT) & CMD_DATA_SIZE_MASK) +#define CMD_LEGACY_SIZE(cmd) (((cmd) >> CMD_LEGACY_DATA_SIZE_SHIFT) & \ + CMD_LEGACY_DATA_SIZE_MASK) #define CMD_UNIQ_MASK (CMD_TOKEN_ID_MASK << CMD_TOKEN_ID_SHIFT | CMD_ID_MASK) #define CMD_XTRACT_UNIQ(cmd) ((cmd) & CMD_UNIQ_MASK) #define SCPI_SLOT 0 #define MAX_DVFS_DOMAINS 8 -#define MAX_DVFS_OPPS 8 +#define MAX_DVFS_OPPS 16 #define DVFS_LATENCY(hdr) (le32_to_cpu(hdr) >> 16) #define DVFS_OPP_COUNT(hdr) ((le32_to_cpu(hdr) >> 8) & 0xff) @@ -99,6 +106,7 @@ enum scpi_error_codes { SCPI_ERR_MAX }; +/* SCPI Standard commands */ enum scpi_std_cmd { SCPI_CMD_INVALID = 0x00, SCPI_CMD_SCPI_READY = 0x01, @@ -132,6 +140,108 @@ enum scpi_std_cmd { SCPI_CMD_COUNT }; +/* SCPI Legacy Commands */ +enum legacy_scpi_std_cmd { + LEGACY_SCPI_CMD_INVALID = 0x00, + LEGACY_SCPI_CMD_SCPI_READY = 0x01, + LEGACY_SCPI_CMD_SCPI_CAPABILITIES = 0x02, + LEGACY_SCPI_CMD_EVENT = 0x03, + LEGACY_SCPI_CMD_SET_CSS_PWR_STATE = 0x04, + LEGACY_SCPI_CMD_GET_CSS_PWR_STATE = 0x05, + LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT = 0x06, + LEGACY_SCPI_CMD_GET_PWR_STATE_STAT = 0x07, + LEGACY_SCPI_CMD_SYS_PWR_STATE = 0x08, + LEGACY_SCPI_CMD_L2_READY = 0x09, + LEGACY_SCPI_CMD_SET_AP_TIMER = 0x0a, + LEGACY_SCPI_CMD_CANCEL_AP_TIME = 0x0b, + LEGACY_SCPI_CMD_DVFS_CAPABILITIES = 0x0c, + LEGACY_SCPI_CMD_GET_DVFS_INFO = 0x0d, + LEGACY_SCPI_CMD_SET_DVFS = 0x0e, + LEGACY_SCPI_CMD_GET_DVFS = 0x0f, + LEGACY_SCPI_CMD_GET_DVFS_STAT = 0x10, + LEGACY_SCPI_CMD_SET_RTC = 0x11, + LEGACY_SCPI_CMD_GET_RTC = 0x12, + LEGACY_SCPI_CMD_CLOCK_CAPABILITIES = 0x13, + LEGACY_SCPI_CMD_SET_CLOCK_INDEX = 0x14, + LEGACY_SCPI_CMD_SET_CLOCK_VALUE = 0x15, + LEGACY_SCPI_CMD_GET_CLOCK_VALUE = 0x16, + LEGACY_SCPI_CMD_PSU_CAPABILITIES = 0x17, + LEGACY_SCPI_CMD_SET_PSU = 0x18, + LEGACY_SCPI_CMD_GET_PSU = 0x19, + LEGACY_SCPI_CMD_SENSOR_CAPABILITIES = 0x1a, + LEGACY_SCPI_CMD_SENSOR_INFO = 0x1b, + LEGACY_SCPI_CMD_SENSOR_VALUE = 0x1c, + LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC = 0x1d, + LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS = 0x1e, + LEGACY_SCPI_CMD_SENSOR_ASYNC_VALUE = 0x1f, + LEGACY_SCPI_CMD_COUNT +}; + +/* List all commands that are required to go through the high priority link */ +static int legacy_hpriority_cmds[] = { + LEGACY_SCPI_CMD_GET_CSS_PWR_STATE, + LEGACY_SCPI_CMD_CFG_PWR_STATE_STAT, + LEGACY_SCPI_CMD_GET_PWR_STATE_STAT, + LEGACY_SCPI_CMD_SET_DVFS, + LEGACY_SCPI_CMD_GET_DVFS, + LEGACY_SCPI_CMD_SET_RTC, + LEGACY_SCPI_CMD_GET_RTC, + LEGACY_SCPI_CMD_SET_CLOCK_INDEX, + LEGACY_SCPI_CMD_SET_CLOCK_VALUE, + LEGACY_SCPI_CMD_GET_CLOCK_VALUE, + LEGACY_SCPI_CMD_SET_PSU, + LEGACY_SCPI_CMD_GET_PSU, + LEGACY_SCPI_CMD_SENSOR_CFG_PERIODIC, + LEGACY_SCPI_CMD_SENSOR_CFG_BOUNDS, +}; + +/* List all commands used by this driver, used as indexes */ +enum scpi_drv_cmds { + CMD_SCPI_CAPABILITIES = 0, + CMD_GET_CLOCK_INFO, + CMD_GET_CLOCK_VALUE, + CMD_SET_CLOCK_VALUE, + CMD_GET_DVFS, + CMD_SET_DVFS, + CMD_GET_DVFS_INFO, + CMD_SENSOR_CAPABILITIES, + CMD_SENSOR_INFO, + CMD_SENSOR_VALUE, + CMD_SET_DEVICE_PWR_STATE, + CMD_GET_DEVICE_PWR_STATE, + CMD_MAX_COUNT, +}; + +static int scpi_std_commands[CMD_MAX_COUNT] = { + SCPI_CMD_SCPI_CAPABILITIES, + SCPI_CMD_GET_CLOCK_INFO, + SCPI_CMD_GET_CLOCK_VALUE, + SCPI_CMD_SET_CLOCK_VALUE, + SCPI_CMD_GET_DVFS, + SCPI_CMD_SET_DVFS, + SCPI_CMD_GET_DVFS_INFO, + SCPI_CMD_SENSOR_CAPABILITIES, + SCPI_CMD_SENSOR_INFO, + SCPI_CMD_SENSOR_VALUE, + SCPI_CMD_SET_DEVICE_PWR_STATE, + SCPI_CMD_GET_DEVICE_PWR_STATE, +}; + +static int scpi_legacy_commands[CMD_MAX_COUNT] = { + LEGACY_SCPI_CMD_SCPI_CAPABILITIES, + -1, /* GET_CLOCK_INFO */ + LEGACY_SCPI_CMD_GET_CLOCK_VALUE, + LEGACY_SCPI_CMD_SET_CLOCK_VALUE, + LEGACY_SCPI_CMD_GET_DVFS, + LEGACY_SCPI_CMD_SET_DVFS, + LEGACY_SCPI_CMD_GET_DVFS_INFO, + LEGACY_SCPI_CMD_SENSOR_CAPABILITIES, + LEGACY_SCPI_CMD_SENSOR_INFO, + LEGACY_SCPI_CMD_SENSOR_VALUE, + -1, /* SET_DEVICE_PWR_STATE */ + -1, /* GET_DEVICE_PWR_STATE */ +}; + struct scpi_xfer { u32 slot; /* has to be first element */ u32 cmd; @@ -160,7 +270,10 @@ struct scpi_chan { struct scpi_drvinfo { u32 protocol_version; u32 firmware_version; + bool is_legacy; int num_chans; + int *commands; + DECLARE_BITMAP(cmd_priority, LEGACY_SCPI_CMD_COUNT); atomic_t next_chan; struct scpi_ops *scpi_ops; struct scpi_chan *channels; @@ -177,6 +290,11 @@ struct scpi_shared_mem { u8 payload[0]; } __packed; +struct legacy_scpi_shared_mem { + __le32 status; + u8 payload[0]; +} __packed; + struct scp_capabilities { __le32 protocol_version; __le32 event_version; @@ -202,6 +320,12 @@ struct clk_set_value { __le32 rate; } __packed; +struct legacy_clk_set_value { + __le32 rate; + __le16 id; + __le16 reserved; +} __packed; + struct dvfs_info { __le32 header; struct { @@ -273,19 +397,43 @@ static void scpi_process_cmd(struct scpi_chan *ch, u32 cmd) return; } - list_for_each_entry(t, &ch->rx_pending, node) - if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) { - list_del(&t->node); - match = t; - break; - } + /* Command type is not replied by the SCP Firmware in legacy Mode + * We should consider that command is the head of pending RX commands + * if the list is not empty. In TX only mode, the list would be empty. + */ + if (scpi_info->is_legacy) { + match = list_first_entry(&ch->rx_pending, struct scpi_xfer, + node); + list_del(&match->node); + } else { + list_for_each_entry(t, &ch->rx_pending, node) + if (CMD_XTRACT_UNIQ(t->cmd) == CMD_XTRACT_UNIQ(cmd)) { + list_del(&t->node); + match = t; + break; + } + } /* check if wait_for_completion is in progress or timed-out */ if (match && !completion_done(&match->done)) { - struct scpi_shared_mem *mem = ch->rx_payload; - unsigned int len = min(match->rx_len, CMD_SIZE(cmd)); + unsigned int len; + + if (scpi_info->is_legacy) { + struct legacy_scpi_shared_mem *mem = ch->rx_payload; + + /* RX Length is not replied by the legacy Firmware */ + len = match->rx_len; + + match->status = le32_to_cpu(mem->status); + memcpy_fromio(match->rx_buf, mem->payload, len); + } else { + struct scpi_shared_mem *mem = ch->rx_payload; + + len = min(match->rx_len, CMD_SIZE(cmd)); + + match->status = le32_to_cpu(mem->status); + memcpy_fromio(match->rx_buf, mem->payload, len); + } - match->status = le32_to_cpu(mem->status); - memcpy_fromio(match->rx_buf, mem->payload, len); if (match->rx_len > len) memset(match->rx_buf + len, 0, match->rx_len - len); complete(&match->done); @@ -297,7 +445,10 @@ static void scpi_handle_remote_msg(struct mbox_client *c, void *msg) { struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); struct scpi_shared_mem *mem = ch->rx_payload; - u32 cmd = le32_to_cpu(mem->command); + u32 cmd = 0; + + if (!scpi_info->is_legacy) + cmd = le32_to_cpu(mem->command); scpi_process_cmd(ch, cmd); } @@ -309,8 +460,13 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg) struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload; - if (t->tx_buf) - memcpy_toio(mem->payload, t->tx_buf, t->tx_len); + if (t->tx_buf) { + if (scpi_info->is_legacy) + memcpy_toio(ch->tx_payload, t->tx_buf, t->tx_len); + else + memcpy_toio(mem->payload, t->tx_buf, t->tx_len); + } + if (t->rx_buf) { if (!(++ch->token)) ++ch->token; @@ -319,7 +475,9 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg) list_add_tail(&t->node, &ch->rx_pending); spin_unlock_irqrestore(&ch->rx_lock, flags); } - mem->command = cpu_to_le32(t->cmd); + + if (!scpi_info->is_legacy) + mem->command = cpu_to_le32(t->cmd); } static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) @@ -344,23 +502,38 @@ static void put_scpi_xfer(struct scpi_xfer *t, struct scpi_chan *ch) mutex_unlock(&ch->xfers_lock); } -static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int tx_len, +static int scpi_send_message(u8 idx, void *tx_buf, unsigned int tx_len, void *rx_buf, unsigned int rx_len) { int ret; u8 chan; + u8 cmd; struct scpi_xfer *msg; struct scpi_chan *scpi_chan; - chan = atomic_inc_return(&scpi_info->next_chan) % scpi_info->num_chans; + if (scpi_info->commands[idx] < 0) + return -EOPNOTSUPP; + + cmd = scpi_info->commands[idx]; + + if (scpi_info->is_legacy) + chan = test_bit(cmd, scpi_info->cmd_priority) ? 1 : 0; + else + chan = atomic_inc_return(&scpi_info->next_chan) % + scpi_info->num_chans; scpi_chan = scpi_info->channels + chan; msg = get_scpi_xfer(scpi_chan); if (!msg) return -ENOMEM; - msg->slot = BIT(SCPI_SLOT); - msg->cmd = PACK_SCPI_CMD(cmd, tx_len); + if (scpi_info->is_legacy) { + msg->cmd = PACK_LEGACY_SCPI_CMD(cmd, tx_len); + msg->slot = msg->cmd; + } else { + msg->slot = BIT(SCPI_SLOT); + msg->cmd = PACK_SCPI_CMD(cmd, tx_len); + } msg->tx_buf = tx_buf; msg->tx_len = tx_len; msg->rx_buf = rx_buf; @@ -397,7 +570,7 @@ scpi_clk_get_range(u16 clk_id, unsigned long *min, unsigned long *max) struct clk_get_info clk; __le16 le_clk_id = cpu_to_le16(clk_id); - ret = scpi_send_message(SCPI_CMD_GET_CLOCK_INFO, &le_clk_id, + ret = scpi_send_message(CMD_GET_CLOCK_INFO, &le_clk_id, sizeof(le_clk_id), &clk, sizeof(clk)); if (!ret) { *min = le32_to_cpu(clk.min_rate); @@ -412,8 +585,9 @@ static unsigned long scpi_clk_get_val(u16 clk_id) struct clk_get_value clk; __le16 le_clk_id = cpu_to_le16(clk_id); - ret = scpi_send_message(SCPI_CMD_GET_CLOCK_VALUE, &le_clk_id, + ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id, sizeof(le_clk_id), &clk, sizeof(clk)); + return ret ? ret : le32_to_cpu(clk.rate); } @@ -425,7 +599,19 @@ static int scpi_clk_set_val(u16 clk_id, unsigned long rate) .rate = cpu_to_le32(rate) }; - return scpi_send_message(SCPI_CMD_SET_CLOCK_VALUE, &clk, sizeof(clk), + return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk), + &stat, sizeof(stat)); +} + +static int legacy_scpi_clk_set_val(u16 clk_id, unsigned long rate) +{ + int stat; + struct legacy_clk_set_value clk = { + .id = cpu_to_le16(clk_id), + .rate = cpu_to_le32(rate) + }; + + return scpi_send_message(CMD_SET_CLOCK_VALUE, &clk, sizeof(clk), &stat, sizeof(stat)); } @@ -434,8 +620,9 @@ static int scpi_dvfs_get_idx(u8 domain) int ret; u8 dvfs_idx; - ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain), + ret = scpi_send_message(CMD_GET_DVFS, &domain, sizeof(domain), &dvfs_idx, sizeof(dvfs_idx)); + return ret ? ret : dvfs_idx; } @@ -444,7 +631,7 @@ static int scpi_dvfs_set_idx(u8 domain, u8 index) int stat; struct dvfs_set dvfs = {domain, index}; - return scpi_send_message(SCPI_CMD_SET_DVFS, &dvfs, sizeof(dvfs), + return scpi_send_message(CMD_SET_DVFS, &dvfs, sizeof(dvfs), &stat, sizeof(stat)); } @@ -468,9 +655,8 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) if (scpi_info->dvfs[domain]) /* data already populated */ return scpi_info->dvfs[domain]; - ret = scpi_send_message(SCPI_CMD_GET_DVFS_INFO, &domain, sizeof(domain), + ret = scpi_send_message(CMD_GET_DVFS_INFO, &domain, sizeof(domain), &buf, sizeof(buf)); - if (ret) return ERR_PTR(ret); @@ -503,7 +689,7 @@ static int scpi_sensor_get_capability(u16 *sensors) struct sensor_capabilities cap_buf; int ret; - ret = scpi_send_message(SCPI_CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf, + ret = scpi_send_message(CMD_SENSOR_CAPABILITIES, NULL, 0, &cap_buf, sizeof(cap_buf)); if (!ret) *sensors = le16_to_cpu(cap_buf.sensors); @@ -517,7 +703,7 @@ static int scpi_sensor_get_info(u16 sensor_id, struct scpi_sensor_info *info) struct _scpi_sensor_info _info; int ret; - ret = scpi_send_message(SCPI_CMD_SENSOR_INFO, &id, sizeof(id), + ret = scpi_send_message(CMD_SENSOR_INFO, &id, sizeof(id), &_info, sizeof(_info)); if (!ret) { memcpy(info, &_info, sizeof(*info)); @@ -533,13 +719,19 @@ static int scpi_sensor_get_value(u16 sensor, u64 *val) struct sensor_value buf; int ret; - ret = scpi_send_message(SCPI_CMD_SENSOR_VALUE, &id, sizeof(id), + ret = scpi_send_message(CMD_SENSOR_VALUE, &id, sizeof(id), &buf, sizeof(buf)); - if (!ret) + if (ret) + return ret; + + if (scpi_info->is_legacy) + /* only 32-bits supported, hi_val can be junk */ + *val = le32_to_cpu(buf.lo_val); + else *val = (u64)le32_to_cpu(buf.hi_val) << 32 | le32_to_cpu(buf.lo_val); - return ret; + return 0; } static int scpi_device_get_power_state(u16 dev_id) @@ -548,7 +740,7 @@ static int scpi_device_get_power_state(u16 dev_id) u8 pstate; __le16 id = cpu_to_le16(dev_id); - ret = scpi_send_message(SCPI_CMD_GET_DEVICE_PWR_STATE, &id, + ret = scpi_send_message(CMD_GET_DEVICE_PWR_STATE, &id, sizeof(id), &pstate, sizeof(pstate)); return ret ? ret : pstate; } @@ -561,7 +753,7 @@ static int scpi_device_set_power_state(u16 dev_id, u8 pstate) .pstate = pstate, }; - return scpi_send_message(SCPI_CMD_SET_DEVICE_PWR_STATE, &dev_set, + return scpi_send_message(CMD_SET_DEVICE_PWR_STATE, &dev_set, sizeof(dev_set), &stat, sizeof(stat)); } @@ -591,12 +783,16 @@ static int scpi_init_versions(struct scpi_drvinfo *info) int ret; struct scp_capabilities caps; - ret = scpi_send_message(SCPI_CMD_SCPI_CAPABILITIES, NULL, 0, + ret = scpi_send_message(CMD_SCPI_CAPABILITIES, NULL, 0, &caps, sizeof(caps)); if (!ret) { info->protocol_version = le32_to_cpu(caps.protocol_version); info->firmware_version = le32_to_cpu(caps.platform_version); } + /* Ignore error if not implemented */ + if (scpi_info->is_legacy && ret == -EOPNOTSUPP) + return 0; + return ret; } @@ -681,6 +877,11 @@ static int scpi_alloc_xfer_list(struct device *dev, struct scpi_chan *ch) return 0; } +static const struct of_device_id legacy_scpi_of_match[] = { + {.compatible = "arm,scpi-pre-1.0"}, + {}, +}; + static int scpi_probe(struct platform_device *pdev) { int count, idx, ret; @@ -693,6 +894,9 @@ static int scpi_probe(struct platform_device *pdev) if (!scpi_info) return -ENOMEM; + if (of_match_device(legacy_scpi_of_match, &pdev->dev)) + scpi_info->is_legacy = true; + count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); if (count < 0) { dev_err(dev, "no mboxes property in '%s'\n", np->full_name); @@ -755,8 +959,21 @@ err: scpi_info->channels = scpi_chan; scpi_info->num_chans = count; + scpi_info->commands = scpi_std_commands; + platform_set_drvdata(pdev, scpi_info); + if (scpi_info->is_legacy) { + /* Replace with legacy variants */ + scpi_ops.clk_set_val = legacy_scpi_clk_set_val; + scpi_info->commands = scpi_legacy_commands; + + /* Fill priority bitmap */ + for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++) + set_bit(legacy_hpriority_cmds[idx], + scpi_info->cmd_priority); + } + ret = scpi_init_versions(scpi_info); if (ret) { dev_err(dev, "incorrect or no SCP firmware found\n"); @@ -781,6 +998,7 @@ err: static const struct of_device_id scpi_of_match[] = { {.compatible = "arm,scpi"}, + {.compatible = "arm,scpi-pre-1.0"}, {}, }; diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 88bebe1968b7..54be60ead08f 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -560,7 +560,7 @@ static int __init dmi_present(const u8 *buf) dmi_ver >> 16, (dmi_ver >> 8) & 0xFF); } dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string)); - printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string); + pr_info("DMI: %s\n", dmi_ids_string); return 0; } } @@ -588,7 +588,7 @@ static int __init dmi_smbios3_present(const u8 *buf) dmi_ver >> 16, (dmi_ver >> 8) & 0xFF, dmi_ver & 0xFF); dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string)); - pr_debug("DMI: %s\n", dmi_ids_string); + pr_info("DMI: %s\n", dmi_ids_string); return 0; } } diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index c981be17d3c0..2e78b0b96d74 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -129,7 +129,25 @@ config EFI_TEST Say Y here to enable the runtime services support via /dev/efi_test. If unsure, say N. +config APPLE_PROPERTIES + bool "Apple Device Properties" + depends on EFI_STUB && X86 + select EFI_DEV_PATH_PARSER + select UCS2_STRING + help + Retrieve properties from EFI on Apple Macs and assign them to + devices, allowing for improved support of Apple hardware. + Properties that would otherwise be missing include the + Thunderbolt Device ROM and GPU configuration data. + + If unsure, say Y if you have a Mac. Otherwise N. + endmenu config UEFI_CPER bool + +config EFI_DEV_PATH_PARSER + bool + depends on ACPI + default n diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index c8a439f6d715..ad67342313ed 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -21,6 +21,8 @@ obj-$(CONFIG_EFI_STUB) += libstub/ obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o obj-$(CONFIG_EFI_BOOTLOADER_CONTROL) += efibc.o obj-$(CONFIG_EFI_TEST) += test/ +obj-$(CONFIG_EFI_DEV_PATH_PARSER) += dev-path-parser.o +obj-$(CONFIG_APPLE_PROPERTIES) += apple-properties.o arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o obj-$(CONFIG_ARM) += $(arm-obj-y) diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c new file mode 100644 index 000000000000..c473f4c5ca34 --- /dev/null +++ b/drivers/firmware/efi/apple-properties.c @@ -0,0 +1,248 @@ +/* + * apple-properties.c - EFI device properties on Macs + * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#define pr_fmt(fmt) "apple-properties: " fmt + +#include <linux/bootmem.h> +#include <linux/dmi.h> +#include <linux/efi.h> +#include <linux/property.h> +#include <linux/slab.h> +#include <linux/ucs2_string.h> +#include <asm/setup.h> + +static bool dump_properties __initdata; + +static int __init dump_properties_enable(char *arg) +{ + dump_properties = true; + return 0; +} + +__setup("dump_apple_properties", dump_properties_enable); + +struct dev_header { + u32 len; + u32 prop_count; + struct efi_dev_path path[0]; + /* + * followed by key/value pairs, each key and value preceded by u32 len, + * len includes itself, value may be empty (in which case its len is 4) + */ +}; + +struct properties_header { + u32 len; + u32 version; + u32 dev_count; + struct dev_header dev_header[0]; +}; + +static u8 one __initdata = 1; + +static void __init unmarshal_key_value_pairs(struct dev_header *dev_header, + struct device *dev, void *ptr, + struct property_entry entry[]) +{ + int i; + + for (i = 0; i < dev_header->prop_count; i++) { + int remaining = dev_header->len - (ptr - (void *)dev_header); + u32 key_len, val_len; + char *key; + + if (sizeof(key_len) > remaining) + break; + + key_len = *(typeof(key_len) *)ptr; + if (key_len + sizeof(val_len) > remaining || + key_len < sizeof(key_len) + sizeof(efi_char16_t) || + *(efi_char16_t *)(ptr + sizeof(key_len)) == 0) { + dev_err(dev, "invalid property name len at %#zx\n", + ptr - (void *)dev_header); + break; + } + + val_len = *(typeof(val_len) *)(ptr + key_len); + if (key_len + val_len > remaining || + val_len < sizeof(val_len)) { + dev_err(dev, "invalid property val len at %#zx\n", + ptr - (void *)dev_header + key_len); + break; + } + + /* 4 bytes to accommodate UTF-8 code points + null byte */ + key = kzalloc((key_len - sizeof(key_len)) * 4 + 1, GFP_KERNEL); + if (!key) { + dev_err(dev, "cannot allocate property name\n"); + break; + } + ucs2_as_utf8(key, ptr + sizeof(key_len), + key_len - sizeof(key_len)); + + entry[i].name = key; + entry[i].is_array = true; + entry[i].length = val_len - sizeof(val_len); + entry[i].pointer.raw_data = ptr + key_len + sizeof(val_len); + if (!entry[i].length) { + /* driver core doesn't accept empty properties */ + entry[i].length = 1; + entry[i].pointer.raw_data = &one; + } + + if (dump_properties) { + dev_info(dev, "property: %s\n", entry[i].name); + print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET, + 16, 1, entry[i].pointer.raw_data, + entry[i].length, true); + } + + ptr += key_len + val_len; + } + + if (i != dev_header->prop_count) { + dev_err(dev, "got %d device properties, expected %u\n", i, + dev_header->prop_count); + print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET, + 16, 1, dev_header, dev_header->len, true); + return; + } + + dev_info(dev, "assigning %d device properties\n", i); +} + +static int __init unmarshal_devices(struct properties_header *properties) +{ + size_t offset = offsetof(struct properties_header, dev_header[0]); + + while (offset + sizeof(struct dev_header) < properties->len) { + struct dev_header *dev_header = (void *)properties + offset; + struct property_entry *entry = NULL; + struct device *dev; + size_t len; + int ret, i; + void *ptr; + + if (offset + dev_header->len > properties->len || + dev_header->len <= sizeof(*dev_header)) { + pr_err("invalid len in dev_header at %#zx\n", offset); + return -EINVAL; + } + + ptr = dev_header->path; + len = dev_header->len - sizeof(*dev_header); + + dev = efi_get_device_by_path((struct efi_dev_path **)&ptr, &len); + if (IS_ERR(dev)) { + pr_err("device path parse error %ld at %#zx:\n", + PTR_ERR(dev), ptr - (void *)dev_header); + print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET, + 16, 1, dev_header, dev_header->len, true); + dev = NULL; + goto skip_device; + } + + entry = kcalloc(dev_header->prop_count + 1, sizeof(*entry), + GFP_KERNEL); + if (!entry) { + dev_err(dev, "cannot allocate properties\n"); + goto skip_device; + } + + unmarshal_key_value_pairs(dev_header, dev, ptr, entry); + if (!entry[0].name) + goto skip_device; + + ret = device_add_properties(dev, entry); /* makes deep copy */ + if (ret) + dev_err(dev, "error %d assigning properties\n", ret); + + for (i = 0; entry[i].name; i++) + kfree(entry[i].name); + +skip_device: + kfree(entry); + put_device(dev); + offset += dev_header->len; + } + + return 0; +} + +static int __init map_properties(void) +{ + struct properties_header *properties; + struct setup_data *data; + u32 data_len; + u64 pa_data; + int ret; + + if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc.") && + !dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.")) + return 0; + + pa_data = boot_params.hdr.setup_data; + while (pa_data) { + data = ioremap(pa_data, sizeof(*data)); + if (!data) { + pr_err("cannot map setup_data header\n"); + return -ENOMEM; + } + + if (data->type != SETUP_APPLE_PROPERTIES) { + pa_data = data->next; + iounmap(data); + continue; + } + + data_len = data->len; + iounmap(data); + + data = ioremap(pa_data, sizeof(*data) + data_len); + if (!data) { + pr_err("cannot map setup_data payload\n"); + return -ENOMEM; + } + + properties = (struct properties_header *)data->data; + if (properties->version != 1) { + pr_err("unsupported version:\n"); + print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET, + 16, 1, properties, data_len, true); + ret = -ENOTSUPP; + } else if (properties->len != data_len) { + pr_err("length mismatch, expected %u\n", data_len); + print_hex_dump(KERN_ERR, pr_fmt(), DUMP_PREFIX_OFFSET, + 16, 1, properties, data_len, true); + ret = -EINVAL; + } else + ret = unmarshal_devices(properties); + + /* + * Can only free the setup_data payload but not its header + * to avoid breaking the chain of ->next pointers. + */ + data->len = 0; + iounmap(data); + free_bootmem_late(pa_data + sizeof(*data), data_len); + + return ret; + } + return 0; +} + +fs_initcall(map_properties); diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index 8efe13075c92..1027d7b44358 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c @@ -244,11 +244,12 @@ void __init efi_init(void) "Unexpected EFI_MEMORY_DESCRIPTOR version %ld", efi.memmap.desc_version); - if (uefi_init() < 0) + if (uefi_init() < 0) { + efi_memmap_unmap(); return; + } reserve_regions(); - efi_memattr_init(); efi_esrt_init(); efi_memmap_unmap(); diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 7c75a8d9091a..349dc3e1e52e 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -39,7 +39,7 @@ static struct mm_struct efi_mm = { .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), }; -#ifdef CONFIG_ARM64_PTDUMP +#ifdef CONFIG_ARM64_PTDUMP_DEBUGFS #include <asm/ptdump.h> static struct ptdump_info efi_ptdump_info = { @@ -53,7 +53,7 @@ static struct ptdump_info efi_ptdump_info = { static int __init ptdump_init(void) { - return ptdump_register(&efi_ptdump_info, "efi_page_tables"); + return ptdump_debugfs_register(&efi_ptdump_info, "efi_page_tables"); } device_initcall(ptdump_init); diff --git a/drivers/firmware/efi/dev-path-parser.c b/drivers/firmware/efi/dev-path-parser.c new file mode 100644 index 000000000000..85d1834ee9b7 --- /dev/null +++ b/drivers/firmware/efi/dev-path-parser.c @@ -0,0 +1,203 @@ +/* + * dev-path-parser.c - EFI Device Path parser + * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License (version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/acpi.h> +#include <linux/efi.h> +#include <linux/pci.h> + +struct acpi_hid_uid { + struct acpi_device_id hid[2]; + char uid[11]; /* UINT_MAX + null byte */ +}; + +static int __init match_acpi_dev(struct device *dev, void *data) +{ + struct acpi_hid_uid hid_uid = *(struct acpi_hid_uid *)data; + struct acpi_device *adev = to_acpi_device(dev); + + if (acpi_match_device_ids(adev, hid_uid.hid)) + return 0; + + if (adev->pnp.unique_id) + return !strcmp(adev->pnp.unique_id, hid_uid.uid); + else + return !strcmp("0", hid_uid.uid); +} + +static long __init parse_acpi_path(struct efi_dev_path *node, + struct device *parent, struct device **child) +{ + struct acpi_hid_uid hid_uid = {}; + struct device *phys_dev; + + if (node->length != 12) + return -EINVAL; + + sprintf(hid_uid.hid[0].id, "%c%c%c%04X", + 'A' + ((node->acpi.hid >> 10) & 0x1f) - 1, + 'A' + ((node->acpi.hid >> 5) & 0x1f) - 1, + 'A' + ((node->acpi.hid >> 0) & 0x1f) - 1, + node->acpi.hid >> 16); + sprintf(hid_uid.uid, "%u", node->acpi.uid); + + *child = bus_find_device(&acpi_bus_type, NULL, &hid_uid, + match_acpi_dev); + if (!*child) + return -ENODEV; + + phys_dev = acpi_get_first_physical_node(to_acpi_device(*child)); + if (phys_dev) { + get_device(phys_dev); + put_device(*child); + *child = phys_dev; + } + + return 0; +} + +static int __init match_pci_dev(struct device *dev, void *data) +{ + unsigned int devfn = *(unsigned int *)data; + + return dev_is_pci(dev) && to_pci_dev(dev)->devfn == devfn; +} + +static long __init parse_pci_path(struct efi_dev_path *node, + struct device *parent, struct device **child) +{ + unsigned int devfn; + + if (node->length != 6) + return -EINVAL; + if (!parent) + return -EINVAL; + + devfn = PCI_DEVFN(node->pci.dev, node->pci.fn); + + *child = device_find_child(parent, &devfn, match_pci_dev); + if (!*child) + return -ENODEV; + + return 0; +} + +/* + * Insert parsers for further node types here. + * + * Each parser takes a pointer to the @node and to the @parent (will be NULL + * for the first device path node). If a device corresponding to @node was + * found below @parent, its reference count should be incremented and the + * device returned in @child. + * + * The return value should be 0 on success or a negative int on failure. + * The special return values 0x01 (EFI_DEV_END_INSTANCE) and 0xFF + * (EFI_DEV_END_ENTIRE) signal the end of the device path, only + * parse_end_path() is supposed to return this. + * + * Be sure to validate the node length and contents before commencing the + * search for a device. + */ + +static long __init parse_end_path(struct efi_dev_path *node, + struct device *parent, struct device **child) +{ + if (node->length != 4) + return -EINVAL; + if (node->sub_type != EFI_DEV_END_INSTANCE && + node->sub_type != EFI_DEV_END_ENTIRE) + return -EINVAL; + if (!parent) + return -ENODEV; + + *child = get_device(parent); + return node->sub_type; +} + +/** + * efi_get_device_by_path - find device by EFI Device Path + * @node: EFI Device Path + * @len: maximum length of EFI Device Path in bytes + * + * Parse a series of EFI Device Path nodes at @node and find the corresponding + * device. If the device was found, its reference count is incremented and a + * pointer to it is returned. The caller needs to drop the reference with + * put_device() after use. The @node pointer is updated to point to the + * location immediately after the "End of Hardware Device Path" node. + * + * If another Device Path instance follows, @len is decremented by the number + * of bytes consumed. Otherwise @len is set to %0. + * + * If a Device Path node is malformed or its corresponding device is not found, + * @node is updated to point to this offending node and an ERR_PTR is returned. + * + * If @len is initially %0, the function returns %NULL. Thus, to iterate over + * all instances in a path, the following idiom may be used: + * + * while (!IS_ERR_OR_NULL(dev = efi_get_device_by_path(&node, &len))) { + * // do something with dev + * put_device(dev); + * } + * if (IS_ERR(dev)) + * // report error + * + * Devices can only be found if they're already instantiated. Most buses + * instantiate devices in the "subsys" initcall level, hence the earliest + * initcall level in which this function should be called is "fs". + * + * Returns the device on success or + * %ERR_PTR(-ENODEV) if no device was found, + * %ERR_PTR(-EINVAL) if a node is malformed or exceeds @len, + * %ERR_PTR(-ENOTSUPP) if support for a node type is not yet implemented. + */ +struct device * __init efi_get_device_by_path(struct efi_dev_path **node, + size_t *len) +{ + struct device *parent = NULL, *child; + long ret = 0; + + if (!*len) + return NULL; + + while (!ret) { + if (*len < 4 || *len < (*node)->length) + ret = -EINVAL; + else if ((*node)->type == EFI_DEV_ACPI && + (*node)->sub_type == EFI_DEV_BASIC_ACPI) + ret = parse_acpi_path(*node, parent, &child); + else if ((*node)->type == EFI_DEV_HW && + (*node)->sub_type == EFI_DEV_PCI) + ret = parse_pci_path(*node, parent, &child); + else if (((*node)->type == EFI_DEV_END_PATH || + (*node)->type == EFI_DEV_END_PATH2)) + ret = parse_end_path(*node, parent, &child); + else + ret = -ENOTSUPP; + + put_device(parent); + if (ret < 0) + return ERR_PTR(ret); + + parent = child; + *node = (void *)*node + (*node)->length; + *len -= (*node)->length; + } + + if (ret == EFI_DEV_END_ENTIRE) + *len = 0; + + return child; +} diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 1ac199cd75e7..e7d404059b73 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -23,7 +23,10 @@ #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/io.h> +#include <linux/kexec.h> #include <linux/platform_device.h> +#include <linux/random.h> +#include <linux/reboot.h> #include <linux/slab.h> #include <linux/acpi.h> #include <linux/ucs2_string.h> @@ -48,6 +51,7 @@ struct efi __read_mostly efi = { .esrt = EFI_INVALID_TABLE_ADDR, .properties_table = EFI_INVALID_TABLE_ADDR, .mem_attr_table = EFI_INVALID_TABLE_ADDR, + .rng_seed = EFI_INVALID_TABLE_ADDR, }; EXPORT_SYMBOL(efi); @@ -259,8 +263,10 @@ static __init int efivar_ssdt_load(void) } data = kmalloc(size, GFP_KERNEL); - if (!data) + if (!data) { + ret = -ENOMEM; goto free_entry; + } ret = efivar_entry_get(entry, NULL, &size, data); if (ret) { @@ -438,6 +444,7 @@ static __initdata efi_config_table_type_t common_tables[] = { {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt}, {EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table}, {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table}, + {LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed}, {NULL_GUID, NULL, NULL}, }; @@ -499,6 +506,31 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, pr_cont("\n"); set_bit(EFI_CONFIG_TABLES, &efi.flags); + if (efi.rng_seed != EFI_INVALID_TABLE_ADDR) { + struct linux_efi_random_seed *seed; + u32 size = 0; + + seed = early_memremap(efi.rng_seed, sizeof(*seed)); + if (seed != NULL) { + size = seed->size; + early_memunmap(seed, sizeof(*seed)); + } else { + pr_err("Could not map UEFI random seed!\n"); + } + if (size > 0) { + seed = early_memremap(efi.rng_seed, + sizeof(*seed) + size); + if (seed != NULL) { + add_device_randomness(seed->bits, seed->size); + early_memunmap(seed, sizeof(*seed) + size); + } else { + pr_err("Could not map UEFI random seed!\n"); + } + } + } + + efi_memattr_init(); + /* Parse the EFI Properties table if it exists */ if (efi.properties_table != EFI_INVALID_TABLE_ADDR) { efi_properties_table_t *tbl; @@ -822,3 +854,47 @@ int efi_status_to_err(efi_status_t status) return err; } + +#ifdef CONFIG_KEXEC +static int update_efi_random_seed(struct notifier_block *nb, + unsigned long code, void *unused) +{ + struct linux_efi_random_seed *seed; + u32 size = 0; + + if (!kexec_in_progress) + return NOTIFY_DONE; + + seed = memremap(efi.rng_seed, sizeof(*seed), MEMREMAP_WB); + if (seed != NULL) { + size = min(seed->size, 32U); + memunmap(seed); + } else { + pr_err("Could not map UEFI random seed!\n"); + } + if (size > 0) { + seed = memremap(efi.rng_seed, sizeof(*seed) + size, + MEMREMAP_WB); + if (seed != NULL) { + seed->size = size; + get_random_bytes(seed->bits, seed->size); + memunmap(seed); + } else { + pr_err("Could not map UEFI random seed!\n"); + } + } + return NOTIFY_DONE; +} + +static struct notifier_block efi_random_seed_nb = { + .notifier_call = update_efi_random_seed, +}; + +static int register_update_efi_random_seed(void) +{ + if (efi.rng_seed == EFI_INVALID_TABLE_ADDR) + return 0; + return register_reboot_notifier(&efi_random_seed_nb); +} +late_initcall(register_update_efi_random_seed); +#endif diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c index 14914074f716..08b026864d4e 100644 --- a/drivers/firmware/efi/esrt.c +++ b/drivers/firmware/efi/esrt.c @@ -269,7 +269,7 @@ void __init efi_esrt_init(void) max -= efi.esrt; if (max < size) { - pr_err("ESRT header doen't fit on single memory map entry. (size: %zu max: %zu)\n", + pr_err("ESRT header doesn't fit on single memory map entry. (size: %zu max: %zu)\n", size, max); return; } diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c index 520a40e5e0e4..6c7d60c239b5 100644 --- a/drivers/firmware/efi/fake_mem.c +++ b/drivers/firmware/efi/fake_mem.c @@ -71,8 +71,7 @@ void __init efi_fake_memmap(void) } /* allocate memory for new EFI memmap */ - new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map, - PAGE_SIZE); + new_memmap_phy = efi_memmap_alloc(new_nr_map); if (!new_memmap_phy) return; diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 5e23e2d305e7..f7425960f6a5 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -6,12 +6,12 @@ # cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small -cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \ +cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \ -fPIC -fno-strict-aliasing -mno-red-zone \ -mno-mmx -mno-sse cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \ +cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ -fno-builtin -fpic -mno-single-pic-base cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt @@ -28,7 +28,7 @@ OBJECT_FILES_NON_STANDARD := y # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. KCOV_INSTRUMENT := n -lib-y := efi-stub-helper.o gop.o +lib-y := efi-stub-helper.o gop.o secureboot.o # include the stub's generic dependencies from lib/ when building for ARM/arm64 arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c @@ -36,11 +36,11 @@ arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE $(call if_changed_rule,cc_o_c) -lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \ +lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o random.o \ $(patsubst %.c,lib-%.o,$(arm-deps)) lib-$(CONFIG_ARM) += arm32-stub.o -lib-$(CONFIG_ARM64) += arm64-stub.o random.o +lib-$(CONFIG_ARM64) += arm64-stub.o CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) # @@ -60,7 +60,7 @@ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) extra-$(CONFIG_EFI_ARMSTUB) := $(lib-y) lib-$(CONFIG_EFI_ARMSTUB) := $(patsubst %.o,%.stub.o,$(lib-y)) -STUBCOPY_FLAGS-y := -R .debug* -R *ksymtab* -R *kcrctab* +STUBCOPY_RM-y := -R *ksymtab* -R *kcrctab* STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \ --prefix-symbols=__efistub_ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS @@ -68,17 +68,25 @@ STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS $(obj)/%.stub.o: $(obj)/%.o FORCE $(call if_changed,stubcopy) +# +# Strip debug sections and some other sections that may legally contain +# absolute relocations, so that we can inspect the remaining sections for +# such relocations. If none are found, regenerate the output object, but +# this time, use objcopy and leave all sections in place. +# quiet_cmd_stubcopy = STUBCPY $@ - cmd_stubcopy = if $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; then \ - $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y) \ - && (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \ - rm -f $@; /bin/false); else /bin/false; fi + cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \ + then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \ + then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \ + rm -f $@; /bin/false); \ + else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi \ + else /bin/false; fi # # ARM discards the .data section because it disallows r/w data in the # decompressor. So move our .data to .data.efistub, which is preserved # explicitly by the decompressor linker script. # -STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \ - -R ___ksymtab+sort -R ___kcrctab+sort +STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub +STUBCOPY_RM-$(CONFIG_ARM) += -R ___ksymtab+sort -R ___kcrctab+sort STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 993aa56755f6..d4056c6be1ec 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -20,52 +20,6 @@ bool __nokaslr; -static int efi_get_secureboot(efi_system_table_t *sys_table_arg) -{ - static efi_char16_t const sb_var_name[] = { - 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 }; - static efi_char16_t const sm_var_name[] = { - 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 }; - - efi_guid_t var_guid = EFI_GLOBAL_VARIABLE_GUID; - efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable; - u8 val; - unsigned long size = sizeof(val); - efi_status_t status; - - status = f_getvar((efi_char16_t *)sb_var_name, (efi_guid_t *)&var_guid, - NULL, &size, &val); - - if (status != EFI_SUCCESS) - goto out_efi_err; - - if (val == 0) - return 0; - - status = f_getvar((efi_char16_t *)sm_var_name, (efi_guid_t *)&var_guid, - NULL, &size, &val); - - if (status != EFI_SUCCESS) - goto out_efi_err; - - if (val == 1) - return 0; - - return 1; - -out_efi_err: - switch (status) { - case EFI_NOT_FOUND: - return 0; - case EFI_DEVICE_ERROR: - return -EIO; - case EFI_SECURITY_VIOLATION: - return -EACCES; - default: - return -EINVAL; - } -} - efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, void **__fh) { @@ -91,75 +45,6 @@ efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, return status; } -efi_status_t efi_file_close(void *handle) -{ - efi_file_handle_t *fh = handle; - - return fh->close(handle); -} - -efi_status_t -efi_file_read(void *handle, unsigned long *size, void *addr) -{ - efi_file_handle_t *fh = handle; - - return fh->read(handle, size, addr); -} - - -efi_status_t -efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, - efi_char16_t *filename_16, void **handle, u64 *file_sz) -{ - efi_file_handle_t *h, *fh = __fh; - efi_file_info_t *info; - efi_status_t status; - efi_guid_t info_guid = EFI_FILE_INFO_ID; - unsigned long info_sz; - - status = fh->open(fh, &h, filename_16, EFI_FILE_MODE_READ, (u64)0); - if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to open file: "); - efi_char16_printk(sys_table_arg, filename_16); - efi_printk(sys_table_arg, "\n"); - return status; - } - - *handle = h; - - info_sz = 0; - status = h->get_info(h, &info_guid, &info_sz, NULL); - if (status != EFI_BUFFER_TOO_SMALL) { - efi_printk(sys_table_arg, "Failed to get file info size\n"); - return status; - } - -grow: - status = sys_table_arg->boottime->allocate_pool(EFI_LOADER_DATA, - info_sz, (void **)&info); - if (status != EFI_SUCCESS) { - efi_printk(sys_table_arg, "Failed to alloc mem for file info\n"); - return status; - } - - status = h->get_info(h, &info_guid, &info_sz, - info); - if (status == EFI_BUFFER_TOO_SMALL) { - sys_table_arg->boottime->free_pool(info); - goto grow; - } - - *file_sz = info->file_size; - sys_table_arg->boottime->free_pool(info); - - if (status != EFI_SUCCESS) - efi_printk(sys_table_arg, "Failed to get initrd info\n"); - - return status; -} - - - void efi_char16_printk(efi_system_table_t *sys_table_arg, efi_char16_t *str) { @@ -226,7 +111,7 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID; unsigned long reserve_addr = 0; unsigned long reserve_size = 0; - int secure_boot = 0; + enum efi_secureboot_mode secure_boot; struct screen_info *si; /* Check if we were booted by the EFI firmware */ @@ -296,19 +181,14 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n"); secure_boot = efi_get_secureboot(sys_table); - if (secure_boot > 0) - pr_efi(sys_table, "UEFI Secure Boot is enabled.\n"); - - if (secure_boot < 0) { - pr_efi_err(sys_table, - "could not determine UEFI Secure Boot status.\n"); - } /* - * Unauthenticated device tree data is a security hazard, so - * ignore 'dtb=' unless UEFI Secure Boot is disabled. + * Unauthenticated device tree data is a security hazard, so ignore + * 'dtb=' unless UEFI Secure Boot is disabled. We assume that secure + * boot is enabled if we can't determine its state. */ - if (secure_boot != 0 && strstr(cmdline_ptr, "dtb=")) { + if (secure_boot != efi_secureboot_mode_disabled && + strstr(cmdline_ptr, "dtb=")) { pr_efi(sys_table, "Ignoring DTB from command line.\n"); } else { status = handle_cmdline_files(sys_table, image, cmdline_ptr, @@ -340,6 +220,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, if (status != EFI_SUCCESS) pr_efi_err(sys_table, "Failed initrd from command line!\n"); + efi_random_get_seed(sys_table); + new_fdt_addr = fdt_addr; status = allocate_new_fdt_and_exit_boot(sys_table, handle, &new_fdt_addr, dram_base + MAX_FDT_OFFSET, diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index aded10662020..919822b7773d 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -32,15 +32,6 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE; -/* - * Allow the platform to override the allocation granularity: this allows - * systems that have the capability to run with a larger page size to deal - * with the allocations for initrd and fdt more efficiently. - */ -#ifndef EFI_ALLOC_ALIGN -#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE -#endif - #define EFI_MMAP_NR_SLACK_SLOTS 8 struct file_info { @@ -186,14 +177,16 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, goto fail; /* - * Enforce minimum alignment that EFI requires when requesting - * a specific address. We are doing page-based allocations, - * so we must be aligned to a page. + * Enforce minimum alignment that EFI or Linux requires when + * requesting a specific address. We are doing page-based (or + * larger) allocations, and both the address and size must meet + * alignment constraints. */ if (align < EFI_ALLOC_ALIGN) align = EFI_ALLOC_ALIGN; - nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; + size = round_up(size, EFI_ALLOC_ALIGN); + nr_pages = size / EFI_PAGE_SIZE; again: for (i = 0; i < map_size / desc_size; i++) { efi_memory_desc_t *desc; @@ -208,7 +201,7 @@ again: continue; start = desc->phys_addr; - end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); + end = start + desc->num_pages * EFI_PAGE_SIZE; if (end > max) end = max; @@ -278,14 +271,16 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, goto fail; /* - * Enforce minimum alignment that EFI requires when requesting - * a specific address. We are doing page-based allocations, - * so we must be aligned to a page. + * Enforce minimum alignment that EFI or Linux requires when + * requesting a specific address. We are doing page-based (or + * larger) allocations, and both the address and size must meet + * alignment constraints. */ if (align < EFI_ALLOC_ALIGN) align = EFI_ALLOC_ALIGN; - nr_pages = round_up(size, EFI_ALLOC_ALIGN) / EFI_PAGE_SIZE; + size = round_up(size, EFI_ALLOC_ALIGN); + nr_pages = size / EFI_PAGE_SIZE; for (i = 0; i < map_size / desc_size; i++) { efi_memory_desc_t *desc; unsigned long m = (unsigned long)map; @@ -300,7 +295,7 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, continue; start = desc->phys_addr; - end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); + end = start + desc->num_pages * EFI_PAGE_SIZE; /* * Don't allocate at 0x0. It will confuse code that @@ -343,6 +338,69 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, efi_call_early(free_pages, addr, nr_pages); } +static efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, + efi_char16_t *filename_16, void **handle, + u64 *file_sz) +{ + efi_file_handle_t *h, *fh = __fh; + efi_file_info_t *info; + efi_status_t status; + efi_guid_t info_guid = EFI_FILE_INFO_ID; + unsigned long info_sz; + + status = efi_call_proto(efi_file_handle, open, fh, &h, filename_16, + EFI_FILE_MODE_READ, (u64)0); + if (status != EFI_SUCCESS) { + efi_printk(sys_table_arg, "Failed to open file: "); + efi_char16_printk(sys_table_arg, filename_16); + efi_printk(sys_table_arg, "\n"); + return status; + } + + *handle = h; + + info_sz = 0; + status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, + &info_sz, NULL); + if (status != EFI_BUFFER_TOO_SMALL) { + efi_printk(sys_table_arg, "Failed to get file info size\n"); + return status; + } + +grow: + status = efi_call_early(allocate_pool, EFI_LOADER_DATA, + info_sz, (void **)&info); + if (status != EFI_SUCCESS) { + efi_printk(sys_table_arg, "Failed to alloc mem for file info\n"); + return status; + } + + status = efi_call_proto(efi_file_handle, get_info, h, &info_guid, + &info_sz, info); + if (status == EFI_BUFFER_TOO_SMALL) { + efi_call_early(free_pool, info); + goto grow; + } + + *file_sz = info->file_size; + efi_call_early(free_pool, info); + + if (status != EFI_SUCCESS) + efi_printk(sys_table_arg, "Failed to get initrd info\n"); + + return status; +} + +static efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr) +{ + return efi_call_proto(efi_file_handle, read, handle, size, addr); +} + +static efi_status_t efi_file_close(void *handle) +{ + return efi_call_proto(efi_file_handle, close, handle); +} + /* * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi= * option, e.g. efi=nochunk. @@ -356,6 +414,14 @@ efi_status_t efi_parse_options(char *cmdline) char *str; /* + * Currently, the only efi= option we look for is 'nochunk', which + * is intended to work around known issues on certain x86 UEFI + * versions. So ignore for now on other architectures. + */ + if (!IS_ENABLED(CONFIG_X86)) + return EFI_SUCCESS; + + /* * If no EFI parameters were specified on the cmdline we've got * nothing to do. */ @@ -528,7 +594,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, size = files[j].size; while (size) { unsigned long chunksize; - if (size > __chunk_size) + + if (IS_ENABLED(CONFIG_X86) && size > __chunk_size) chunksize = __chunk_size; else chunksize = size; diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index ee49cd23ee63..71c4d0e3c4ed 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -15,29 +15,22 @@ */ #undef __init +/* + * Allow the platform to override the allocation granularity: this allows + * systems that have the capability to run with a larger page size to deal + * with the allocations for initrd and fdt more efficiently. + */ +#ifndef EFI_ALLOC_ALIGN +#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE +#endif + void efi_char16_printk(efi_system_table_t *, efi_char16_t *); efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image, void **__fh); -efi_status_t efi_file_size(efi_system_table_t *sys_table_arg, void *__fh, - efi_char16_t *filename_16, void **handle, - u64 *file_sz); - -efi_status_t efi_file_read(void *handle, unsigned long *size, void *addr); - -efi_status_t efi_file_close(void *handle); - unsigned long get_dram_base(efi_system_table_t *sys_table_arg); -efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - unsigned long orig_fdt_size, - void *fdt, int new_fdt_size, char *cmdline_ptr, - u64 initrd_addr, u64 initrd_size, - efi_memory_desc_t *memory_map, - unsigned long map_size, unsigned long desc_size, - u32 desc_ver); - efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, void *handle, unsigned long *new_fdt_addr, @@ -62,4 +55,6 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, efi_status_t check_platform_features(efi_system_table_t *sys_table_arg); +efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg); + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index a6a93116a8f0..260c4b4b492e 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -16,13 +16,10 @@ #include "efistub.h" -efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, - unsigned long orig_fdt_size, - void *fdt, int new_fdt_size, char *cmdline_ptr, - u64 initrd_addr, u64 initrd_size, - efi_memory_desc_t *memory_map, - unsigned long map_size, unsigned long desc_size, - u32 desc_ver) +static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, + unsigned long orig_fdt_size, + void *fdt, int new_fdt_size, char *cmdline_ptr, + u64 initrd_addr, u64 initrd_size) { int node, num_rsv; int status; @@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt, if (status) goto fdt_set_fail; - fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map); + fdt_val64 = U64_MAX; /* placeholder */ status = fdt_setprop(fdt, node, "linux,uefi-mmap-start", &fdt_val64, sizeof(fdt_val64)); if (status) goto fdt_set_fail; - fdt_val32 = cpu_to_fdt32(map_size); + fdt_val32 = U32_MAX; /* placeholder */ status = fdt_setprop(fdt, node, "linux,uefi-mmap-size", &fdt_val32, sizeof(fdt_val32)); if (status) goto fdt_set_fail; - fdt_val32 = cpu_to_fdt32(desc_size); status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size", &fdt_val32, sizeof(fdt_val32)); if (status) goto fdt_set_fail; - fdt_val32 = cpu_to_fdt32(desc_ver); status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver", &fdt_val32, sizeof(fdt_val32)); if (status) @@ -148,6 +143,43 @@ fdt_set_fail: return EFI_LOAD_ERROR; } +static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) +{ + int node = fdt_path_offset(fdt, "/chosen"); + u64 fdt_val64; + u32 fdt_val32; + int err; + + if (node < 0) + return EFI_LOAD_ERROR; + + fdt_val64 = cpu_to_fdt64((unsigned long)*map->map); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start", + &fdt_val64, sizeof(fdt_val64)); + if (err) + return EFI_LOAD_ERROR; + + fdt_val32 = cpu_to_fdt32(*map->map_size); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size", + &fdt_val32, sizeof(fdt_val32)); + if (err) + return EFI_LOAD_ERROR; + + fdt_val32 = cpu_to_fdt32(*map->desc_size); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size", + &fdt_val32, sizeof(fdt_val32)); + if (err) + return EFI_LOAD_ERROR; + + fdt_val32 = cpu_to_fdt32(*map->desc_ver); + err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver", + &fdt_val32, sizeof(fdt_val32)); + if (err) + return EFI_LOAD_ERROR; + + return EFI_SUCCESS; +} + #ifndef EFI_FDT_ALIGN #define EFI_FDT_ALIGN EFI_PAGE_SIZE #endif @@ -155,6 +187,7 @@ fdt_set_fail: struct exit_boot_struct { efi_memory_desc_t *runtime_map; int *runtime_entry_count; + void *new_fdt_addr; }; static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, @@ -170,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg, efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, p->runtime_map, p->runtime_entry_count); - return EFI_SUCCESS; + return update_fdt_memmap(p->new_fdt_addr, map); } /* @@ -243,20 +276,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, goto fail; } - /* - * Now that we have done our final memory allocation (and free) - * we can get the memory map key needed for - * exit_boot_services(). - */ - status = efi_get_memory_map(sys_table, &map); - if (status != EFI_SUCCESS) - goto fail_free_new_fdt; - status = update_fdt(sys_table, (void *)fdt_addr, fdt_size, (void *)*new_fdt_addr, new_fdt_size, - cmdline_ptr, initrd_addr, initrd_size, - memory_map, map_size, desc_size, desc_ver); + cmdline_ptr, initrd_addr, initrd_size); /* Succeeding the first time is the expected case. */ if (status == EFI_SUCCESS) @@ -266,22 +289,19 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, /* * We need to allocate more space for the new * device tree, so free existing buffer that is - * too small. Also free memory map, as we will need - * to get new one that reflects the free/alloc we do - * on the device tree buffer. + * too small. */ efi_free(sys_table, new_fdt_size, *new_fdt_addr); - sys_table->boottime->free_pool(memory_map); new_fdt_size += EFI_PAGE_SIZE; } else { pr_efi_err(sys_table, "Unable to construct new device tree.\n"); - goto fail_free_mmap; + goto fail_free_new_fdt; } } - sys_table->boottime->free_pool(memory_map); priv.runtime_map = runtime_map; priv.runtime_entry_count = &runtime_entry_count; + priv.new_fdt_addr = (void *)*new_fdt_addr; status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func); @@ -319,9 +339,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table, pr_efi_err(sys_table, "Exit boot services failed.\n"); -fail_free_mmap: - sys_table->boottime->free_pool(memory_map); - fail_free_new_fdt: efi_free(sys_table, new_fdt_size, *new_fdt_addr); diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 0c9f58c5ba50..7e72954d5860 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -8,6 +8,7 @@ */ #include <linux/efi.h> +#include <linux/log2.h> #include <asm/efi.h> #include "efistub.h" @@ -41,21 +42,23 @@ efi_status_t efi_get_random_bytes(efi_system_table_t *sys_table_arg, */ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, unsigned long size, - unsigned long align) + unsigned long align_shift) { - u64 start, end; + unsigned long align = 1UL << align_shift; + u64 first_slot, last_slot, region_end; if (md->type != EFI_CONVENTIONAL_MEMORY) return 0; - start = round_up(md->phys_addr, align); - end = round_down(md->phys_addr + md->num_pages * EFI_PAGE_SIZE - size, - align); + region_end = min((u64)ULONG_MAX, md->phys_addr + md->num_pages*EFI_PAGE_SIZE - 1); - if (start > end) + first_slot = round_up(md->phys_addr, align); + last_slot = round_down(region_end - size + 1, align); + + if (first_slot > last_slot) return 0; - return (end - start + 1) / align; + return ((unsigned long)(last_slot - first_slot) >> align_shift) + 1; } /* @@ -98,7 +101,7 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, efi_memory_desc_t *md = (void *)memory_map + map_offset; unsigned long slots; - slots = get_entry_num_slots(md, size, align); + slots = get_entry_num_slots(md, size, ilog2(align)); MD_NUM_SLOTS(md) = slots; total_slots += slots; } @@ -141,3 +144,51 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg, return status; } + +#define RANDOM_SEED_SIZE 32 + +efi_status_t efi_random_get_seed(efi_system_table_t *sys_table_arg) +{ + efi_guid_t rng_proto = EFI_RNG_PROTOCOL_GUID; + efi_guid_t rng_algo_raw = EFI_RNG_ALGORITHM_RAW; + efi_guid_t rng_table_guid = LINUX_EFI_RANDOM_SEED_TABLE_GUID; + struct efi_rng_protocol *rng; + struct linux_efi_random_seed *seed; + efi_status_t status; + + status = efi_call_early(locate_protocol, &rng_proto, NULL, + (void **)&rng); + if (status != EFI_SUCCESS) + return status; + + status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + sizeof(*seed) + RANDOM_SEED_SIZE, + (void **)&seed); + if (status != EFI_SUCCESS) + return status; + + status = rng->get_rng(rng, &rng_algo_raw, RANDOM_SEED_SIZE, + seed->bits); + if (status == EFI_UNSUPPORTED) + /* + * Use whatever algorithm we have available if the raw algorithm + * is not implemented. + */ + status = rng->get_rng(rng, NULL, RANDOM_SEED_SIZE, + seed->bits); + + if (status != EFI_SUCCESS) + goto err_freepool; + + seed->size = RANDOM_SEED_SIZE; + status = efi_call_early(install_configuration_table, &rng_table_guid, + seed); + if (status != EFI_SUCCESS) + goto err_freepool; + + return EFI_SUCCESS; + +err_freepool: + efi_call_early(free_pool, seed); + return status; +} diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c new file mode 100644 index 000000000000..6def402bf569 --- /dev/null +++ b/drivers/firmware/efi/libstub/secureboot.c @@ -0,0 +1,84 @@ +/* + * Secure boot handling. + * + * Copyright (C) 2013,2014 Linaro Limited + * Roy Franz <roy.franz@linaro.org + * Copyright (C) 2013 Red Hat, Inc. + * Mark Salter <msalter@redhat.com> + * + * This file is part of the Linux kernel, and is made available under the + * terms of the GNU General Public License version 2. + */ +#include <linux/efi.h> +#include <asm/efi.h> + +/* BIOS variables */ +static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; +static const efi_char16_t const efi_SecureBoot_name[] = { + 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 +}; +static const efi_char16_t const efi_SetupMode_name[] = { + 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 +}; + +/* SHIM variables */ +static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; +static efi_char16_t const shim_MokSBState_name[] = { + 'M', 'o', 'k', 'S', 'B', 'S', 't', 'a', 't', 'e', 0 +}; + +#define get_efi_var(name, vendor, ...) \ + efi_call_runtime(get_variable, \ + (efi_char16_t *)(name), (efi_guid_t *)(vendor), \ + __VA_ARGS__); + +/* + * Determine whether we're in secure boot mode. + */ +enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg) +{ + u32 attr; + u8 secboot, setupmode, moksbstate; + unsigned long size; + efi_status_t status; + + size = sizeof(secboot); + status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid, + NULL, &size, &secboot); + if (status != EFI_SUCCESS) + goto out_efi_err; + + size = sizeof(setupmode); + status = get_efi_var(efi_SetupMode_name, &efi_variable_guid, + NULL, &size, &setupmode); + if (status != EFI_SUCCESS) + goto out_efi_err; + + if (secboot == 0 || setupmode == 1) + return efi_secureboot_mode_disabled; + + /* + * See if a user has put the shim into insecure mode. If so, and if the + * variable doesn't have the runtime attribute set, we might as well + * honor that. + */ + size = sizeof(moksbstate); + status = get_efi_var(shim_MokSBState_name, &shim_guid, + &attr, &size, &moksbstate); + + /* If it fails, we don't care why. Default to secure */ + if (status != EFI_SUCCESS) + goto secure_boot_enabled; + if (!(attr & EFI_VARIABLE_RUNTIME_ACCESS) && moksbstate == 1) + return efi_secureboot_mode_disabled; + +secure_boot_enabled: + pr_efi(sys_table_arg, "UEFI Secure Boot is enabled.\n"); + return efi_secureboot_mode_enabled; + +out_efi_err: + pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n"); + if (status == EFI_NOT_FOUND) + return efi_secureboot_mode_disabled; + return efi_secureboot_mode_unknown; +} diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index 236004b9a50d..8986757eafaf 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -43,6 +43,7 @@ int __init efi_memattr_init(void) tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size; memblock_reserve(efi.mem_attr_table, tbl_size); + set_bit(EFI_MEM_ATTR, &efi.flags); unmap: early_memunmap(tbl, sizeof(*tbl)); @@ -174,8 +175,11 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm, md.phys_addr + size - 1, efi_md_typeattr_format(buf, sizeof(buf), &md)); - if (valid) + if (valid) { ret = fn(mm, &md); + if (ret) + pr_err("Error updating mappings, skipping subsequent md's\n"); + } } memunmap(tbl); return ret; diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c index f03ddecd232b..78686443cb37 100644 --- a/drivers/firmware/efi/memmap.c +++ b/drivers/firmware/efi/memmap.c @@ -9,6 +9,44 @@ #include <linux/efi.h> #include <linux/io.h> #include <asm/early_ioremap.h> +#include <linux/memblock.h> +#include <linux/slab.h> + +static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size) +{ + return memblock_alloc(size, 0); +} + +static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) +{ + unsigned int order = get_order(size); + struct page *p = alloc_pages(GFP_KERNEL, order); + + if (!p) + return 0; + + return PFN_PHYS(page_to_pfn(p)); +} + +/** + * efi_memmap_alloc - Allocate memory for the EFI memory map + * @num_entries: Number of entries in the allocated map. + * + * Depending on whether mm_init() has already been invoked or not, + * either memblock or "normal" page allocation is used. + * + * Returns the physical address of the allocated memory map on + * success, zero on failure. + */ +phys_addr_t __init efi_memmap_alloc(unsigned int num_entries) +{ + unsigned long size = num_entries * efi.memmap.desc_size; + + if (slab_is_available()) + return __efi_memmap_alloc_late(size); + + return __efi_memmap_alloc_early(size); +} /** * __efi_memmap_init - Common code for mapping the EFI memory map diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c index f61bb52be318..8cd578f62059 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c @@ -8,7 +8,6 @@ * */ -#include <linux/version.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/init.h> @@ -156,7 +155,7 @@ static long efi_runtime_get_variable(unsigned long arg) { struct efi_getvariable __user *getvariable_user; struct efi_getvariable getvariable; - unsigned long datasize, prev_datasize, *dz; + unsigned long datasize = 0, prev_datasize, *dz; efi_guid_t vendor_guid, *vd = NULL; efi_status_t status; efi_char16_t *name = NULL; @@ -266,14 +265,10 @@ static long efi_runtime_set_variable(unsigned long arg) return rv; } - data = kmalloc(setvariable.data_size, GFP_KERNEL); - if (!data) { + data = memdup_user(setvariable.data, setvariable.data_size); + if (IS_ERR(data)) { kfree(name); - return -ENOMEM; - } - if (copy_from_user(data, setvariable.data, setvariable.data_size)) { - rv = -EFAULT; - goto out; + return PTR_ERR(data); } status = efi.set_variable(name, &vendor_guid, @@ -429,7 +424,7 @@ static long efi_runtime_get_nextvariablename(unsigned long arg) efi_guid_t *vd = NULL; efi_guid_t vendor_guid; efi_char16_t *name = NULL; - int rv; + int rv = 0; getnextvariablename_user = (struct efi_getnextvariablename __user *)arg; diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index 8263429e21b8..493a56a4cfc4 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -383,7 +383,7 @@ static int psci_suspend_finisher(unsigned long index) u32 *state = __this_cpu_read(psci_power_state); return psci_ops.cpu_suspend(state[index - 1], - virt_to_phys(cpu_resume)); + __pa_symbol(cpu_resume)); } int psci_cpu_suspend_enter(unsigned long index) @@ -419,7 +419,7 @@ CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops); static int psci_system_suspend(unsigned long unused) { return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND), - virt_to_phys(cpu_resume), 0, 0); + __pa_symbol(cpu_resume), 0, 0); } static int psci_system_suspend_enter(suspend_state_t state) @@ -630,7 +630,7 @@ int __init psci_dt_init(void) np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np); - if (!np) + if (!np || !of_device_is_available(np)) return -ENODEV; init_fn = (psci_initcall_t)matched_np->data; diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c new file mode 100644 index 000000000000..6523ce962865 --- /dev/null +++ b/drivers/firmware/psci_checker.c @@ -0,0 +1,491 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2016 ARM Limited + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/atomic.h> +#include <linux/completion.h> +#include <linux/cpu.h> +#include <linux/cpuidle.h> +#include <linux/cpu_pm.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <uapi/linux/sched/types.h> +#include <linux/module.h> +#include <linux/preempt.h> +#include <linux/psci.h> +#include <linux/slab.h> +#include <linux/tick.h> +#include <linux/topology.h> + +#include <asm/cpuidle.h> + +#include <uapi/linux/psci.h> + +#define NUM_SUSPEND_CYCLE (10) + +static unsigned int nb_available_cpus; +static int tos_resident_cpu = -1; + +static atomic_t nb_active_threads; +static struct completion suspend_threads_started = + COMPLETION_INITIALIZER(suspend_threads_started); +static struct completion suspend_threads_done = + COMPLETION_INITIALIZER(suspend_threads_done); + +/* + * We assume that PSCI operations are used if they are available. This is not + * necessarily true on arm64, since the decision is based on the + * "enable-method" property of each CPU in the DT, but given that there is no + * arch-specific way to check this, we assume that the DT is sensible. + */ +static int psci_ops_check(void) +{ + int migrate_type = -1; + int cpu; + + if (!(psci_ops.cpu_off && psci_ops.cpu_on && psci_ops.cpu_suspend)) { + pr_warn("Missing PSCI operations, aborting tests\n"); + return -EOPNOTSUPP; + } + + if (psci_ops.migrate_info_type) + migrate_type = psci_ops.migrate_info_type(); + + if (migrate_type == PSCI_0_2_TOS_UP_MIGRATE || + migrate_type == PSCI_0_2_TOS_UP_NO_MIGRATE) { + /* There is a UP Trusted OS, find on which core it resides. */ + for_each_online_cpu(cpu) + if (psci_tos_resident_on(cpu)) { + tos_resident_cpu = cpu; + break; + } + if (tos_resident_cpu == -1) + pr_warn("UP Trusted OS resides on no online CPU\n"); + } + + return 0; +} + +static int find_clusters(const struct cpumask *cpus, + const struct cpumask **clusters) +{ + unsigned int nb = 0; + cpumask_var_t tmp; + + if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) + return -ENOMEM; + cpumask_copy(tmp, cpus); + + while (!cpumask_empty(tmp)) { + const struct cpumask *cluster = + topology_core_cpumask(cpumask_any(tmp)); + + clusters[nb++] = cluster; + cpumask_andnot(tmp, tmp, cluster); + } + + free_cpumask_var(tmp); + return nb; +} + +/* + * offlined_cpus is a temporary array but passing it as an argument avoids + * multiple allocations. + */ +static unsigned int down_and_up_cpus(const struct cpumask *cpus, + struct cpumask *offlined_cpus) +{ + int cpu; + int err = 0; + + cpumask_clear(offlined_cpus); + + /* Try to power down all CPUs in the mask. */ + for_each_cpu(cpu, cpus) { + int ret = cpu_down(cpu); + + /* + * cpu_down() checks the number of online CPUs before the TOS + * resident CPU. + */ + if (cpumask_weight(offlined_cpus) + 1 == nb_available_cpus) { + if (ret != -EBUSY) { + pr_err("Unexpected return code %d while trying " + "to power down last online CPU %d\n", + ret, cpu); + ++err; + } + } else if (cpu == tos_resident_cpu) { + if (ret != -EPERM) { + pr_err("Unexpected return code %d while trying " + "to power down TOS resident CPU %d\n", + ret, cpu); + ++err; + } + } else if (ret != 0) { + pr_err("Error occurred (%d) while trying " + "to power down CPU %d\n", ret, cpu); + ++err; + } + + if (ret == 0) + cpumask_set_cpu(cpu, offlined_cpus); + } + + /* Try to power up all the CPUs that have been offlined. */ + for_each_cpu(cpu, offlined_cpus) { + int ret = cpu_up(cpu); + + if (ret != 0) { + pr_err("Error occurred (%d) while trying " + "to power up CPU %d\n", ret, cpu); + ++err; + } else { + cpumask_clear_cpu(cpu, offlined_cpus); + } + } + + /* + * Something went bad at some point and some CPUs could not be turned + * back on. + */ + WARN_ON(!cpumask_empty(offlined_cpus) || + num_online_cpus() != nb_available_cpus); + + return err; +} + +static int hotplug_tests(void) +{ + int err; + cpumask_var_t offlined_cpus; + int i, nb_cluster; + const struct cpumask **clusters; + char *page_buf; + + err = -ENOMEM; + if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) + return err; + /* We may have up to nb_available_cpus clusters. */ + clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters), + GFP_KERNEL); + if (!clusters) + goto out_free_cpus; + page_buf = (char *)__get_free_page(GFP_KERNEL); + if (!page_buf) + goto out_free_clusters; + + err = 0; + nb_cluster = find_clusters(cpu_online_mask, clusters); + + /* + * Of course the last CPU cannot be powered down and cpu_down() should + * refuse doing that. + */ + pr_info("Trying to turn off and on again all CPUs\n"); + err += down_and_up_cpus(cpu_online_mask, offlined_cpus); + + /* + * Take down CPUs by cluster this time. When the last CPU is turned + * off, the cluster itself should shut down. + */ + for (i = 0; i < nb_cluster; ++i) { + int cluster_id = + topology_physical_package_id(cpumask_any(clusters[i])); + ssize_t len = cpumap_print_to_pagebuf(true, page_buf, + clusters[i]); + /* Remove trailing newline. */ + page_buf[len - 1] = '\0'; + pr_info("Trying to turn off and on again cluster %d " + "(CPUs %s)\n", cluster_id, page_buf); + err += down_and_up_cpus(clusters[i], offlined_cpus); + } + + free_page((unsigned long)page_buf); +out_free_clusters: + kfree(clusters); +out_free_cpus: + free_cpumask_var(offlined_cpus); + return err; +} + +static void dummy_callback(unsigned long ignored) {} + +static int suspend_cpu(int index, bool broadcast) +{ + int ret; + + arch_cpu_idle_enter(); + + if (broadcast) { + /* + * The local timer will be shut down, we need to enter tick + * broadcast. + */ + ret = tick_broadcast_enter(); + if (ret) { + /* + * In the absence of hardware broadcast mechanism, + * this CPU might be used to broadcast wakeups, which + * may be why entering tick broadcast has failed. + * There is little the kernel can do to work around + * that, so enter WFI instead (idle state 0). + */ + cpu_do_idle(); + ret = 0; + goto out_arch_exit; + } + } + + /* + * Replicate the common ARM cpuidle enter function + * (arm_enter_idle_state). + */ + ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index); + + if (broadcast) + tick_broadcast_exit(); + +out_arch_exit: + arch_cpu_idle_exit(); + + return ret; +} + +static int suspend_test_thread(void *arg) +{ + int cpu = (long)arg; + int i, nb_suspend = 0, nb_shallow_sleep = 0, nb_err = 0; + struct sched_param sched_priority = { .sched_priority = MAX_RT_PRIO-1 }; + struct cpuidle_device *dev; + struct cpuidle_driver *drv; + /* No need for an actual callback, we just want to wake up the CPU. */ + struct timer_list wakeup_timer; + + /* Wait for the main thread to give the start signal. */ + wait_for_completion(&suspend_threads_started); + + /* Set maximum priority to preempt all other threads on this CPU. */ + if (sched_setscheduler_nocheck(current, SCHED_FIFO, &sched_priority)) + pr_warn("Failed to set suspend thread scheduler on CPU %d\n", + cpu); + + dev = this_cpu_read(cpuidle_devices); + drv = cpuidle_get_cpu_driver(dev); + + pr_info("CPU %d entering suspend cycles, states 1 through %d\n", + cpu, drv->state_count - 1); + + setup_timer_on_stack(&wakeup_timer, dummy_callback, 0); + for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) { + int index; + /* + * Test all possible states, except 0 (which is usually WFI and + * doesn't use PSCI). + */ + for (index = 1; index < drv->state_count; ++index) { + struct cpuidle_state *state = &drv->states[index]; + bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; + int ret; + + /* + * Set the timer to wake this CPU up in some time (which + * should be largely sufficient for entering suspend). + * If the local tick is disabled when entering suspend, + * suspend_cpu() takes care of switching to a broadcast + * tick, so the timer will still wake us up. + */ + mod_timer(&wakeup_timer, jiffies + + usecs_to_jiffies(state->target_residency)); + + /* IRQs must be disabled during suspend operations. */ + local_irq_disable(); + + ret = suspend_cpu(index, broadcast); + + /* + * We have woken up. Re-enable IRQs to handle any + * pending interrupt, do not wait until the end of the + * loop. + */ + local_irq_enable(); + + if (ret == index) { + ++nb_suspend; + } else if (ret >= 0) { + /* We did not enter the expected state. */ + ++nb_shallow_sleep; + } else { + pr_err("Failed to suspend CPU %d: error %d " + "(requested state %d, cycle %d)\n", + cpu, ret, index, i); + ++nb_err; + } + } + } + + /* + * Disable the timer to make sure that the timer will not trigger + * later. + */ + del_timer(&wakeup_timer); + + if (atomic_dec_return_relaxed(&nb_active_threads) == 0) + complete(&suspend_threads_done); + + /* Give up on RT scheduling and wait for termination. */ + sched_priority.sched_priority = 0; + if (sched_setscheduler_nocheck(current, SCHED_NORMAL, &sched_priority)) + pr_warn("Failed to set suspend thread scheduler on CPU %d\n", + cpu); + for (;;) { + /* Needs to be set first to avoid missing a wakeup. */ + set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); + break; + } + schedule(); + } + + pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", + cpu, nb_suspend, nb_shallow_sleep, nb_err); + + return nb_err; +} + +static int suspend_tests(void) +{ + int i, cpu, err = 0; + struct task_struct **threads; + int nb_threads = 0; + + threads = kmalloc_array(nb_available_cpus, sizeof(*threads), + GFP_KERNEL); + if (!threads) + return -ENOMEM; + + /* + * Stop cpuidle to prevent the idle tasks from entering a deep sleep + * mode, as it might interfere with the suspend threads on other CPUs. + * This does not prevent the suspend threads from using cpuidle (only + * the idle tasks check this status). Take the idle lock so that + * the cpuidle driver and device look-up can be carried out safely. + */ + cpuidle_pause_and_lock(); + + for_each_online_cpu(cpu) { + struct task_struct *thread; + /* Check that cpuidle is available on that CPU. */ + struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); + struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); + + if (!dev || !drv) { + pr_warn("cpuidle not available on CPU %d, ignoring\n", + cpu); + continue; + } + + thread = kthread_create_on_cpu(suspend_test_thread, + (void *)(long)cpu, cpu, + "psci_suspend_test"); + if (IS_ERR(thread)) + pr_err("Failed to create kthread on CPU %d\n", cpu); + else + threads[nb_threads++] = thread; + } + + if (nb_threads < 1) { + err = -ENODEV; + goto out; + } + + atomic_set(&nb_active_threads, nb_threads); + + /* + * Wake up the suspend threads. To avoid the main thread being preempted + * before all the threads have been unparked, the suspend threads will + * wait for the completion of suspend_threads_started. + */ + for (i = 0; i < nb_threads; ++i) + wake_up_process(threads[i]); + complete_all(&suspend_threads_started); + + wait_for_completion(&suspend_threads_done); + + + /* Stop and destroy all threads, get return status. */ + for (i = 0; i < nb_threads; ++i) + err += kthread_stop(threads[i]); + out: + cpuidle_resume_and_unlock(); + kfree(threads); + return err; +} + +static int __init psci_checker(void) +{ + int ret; + + /* + * Since we're in an initcall, we assume that all the CPUs that all + * CPUs that can be onlined have been onlined. + * + * The tests assume that hotplug is enabled but nobody else is using it, + * otherwise the results will be unpredictable. However, since there + * is no userspace yet in initcalls, that should be fine, as long as + * no torture test is running at the same time (see Kconfig). + */ + nb_available_cpus = num_online_cpus(); + + /* Check PSCI operations are set up and working. */ + ret = psci_ops_check(); + if (ret) + return ret; + + pr_info("PSCI checker started using %u CPUs\n", nb_available_cpus); + + pr_info("Starting hotplug tests\n"); + ret = hotplug_tests(); + if (ret == 0) + pr_info("Hotplug tests passed OK\n"); + else if (ret > 0) + pr_err("%d error(s) encountered in hotplug tests\n", ret); + else { + pr_err("Out of memory\n"); + return ret; + } + + pr_info("Starting suspend tests (%d cycles per state)\n", + NUM_SUSPEND_CYCLE); + ret = suspend_tests(); + if (ret == 0) + pr_info("Suspend tests passed OK\n"); + else if (ret > 0) + pr_err("%d error(s) encountered in suspend tests\n", ret); + else { + switch (ret) { + case -ENOMEM: + pr_err("Out of memory\n"); + break; + case -ENODEV: + pr_warn("Could not start suspend tests on any CPU\n"); + break; + } + } + + pr_info("PSCI checker completed\n"); + return ret < 0 ? ret : 0; +} +late_initcall(psci_checker); diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index c6aeedbdcbb0..8ad226c60374 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c @@ -560,3 +560,21 @@ int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) return ret ? : le32_to_cpu(out); } + +int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) +{ + struct { + __le32 state; + __le32 id; + } req; + __le32 scm_ret = 0; + int ret; + + req.state = cpu_to_le32(state); + req.id = cpu_to_le32(id); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE, + &req, sizeof(req), &scm_ret, sizeof(scm_ret)); + + return ret ? : le32_to_cpu(scm_ret); +} diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c index 4a0f5ead4fb5..c9332590e8c6 100644 --- a/drivers/firmware/qcom_scm-64.c +++ b/drivers/firmware/qcom_scm-64.c @@ -91,6 +91,7 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, dma_addr_t args_phys = 0; void *args_virt = NULL; size_t alloc_len; + struct arm_smccc_quirk quirk = {.id = ARM_SMCCC_QUIRK_QCOM_A6}; if (unlikely(arglen > N_REGISTER_ARGS)) { alloc_len = N_EXT_QCOM_SCM_ARGS * sizeof(u64); @@ -131,10 +132,16 @@ static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id, qcom_smccc_convention, ARM_SMCCC_OWNER_SIP, fn_id); + quirk.state.a6 = 0; + do { - arm_smccc_smc(cmd, desc->arginfo, desc->args[0], - desc->args[1], desc->args[2], x5, 0, 0, - res); + arm_smccc_smc_quirk(cmd, desc->arginfo, desc->args[0], + desc->args[1], desc->args[2], x5, + quirk.state.a6, 0, res, &quirk); + + if (res->a0 == QCOM_SCM_INTERRUPTED) + cmd = res->a0; + } while (res->a0 == QCOM_SCM_INTERRUPTED); mutex_unlock(&qcom_scm_lock); @@ -358,3 +365,19 @@ int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) return ret ? : res.a1; } + +int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) +{ + struct qcom_scm_desc desc = {0}; + struct arm_smccc_res res; + int ret; + + desc.args[0] = state; + desc.args[1] = id; + desc.arginfo = QCOM_SCM_ARGS(2); + + ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE, + &desc, &res); + + return ret ? : res.a1; +} diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index d95c70227c05..d987bcc7489d 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -28,6 +28,10 @@ #include "qcom_scm.h" +#define SCM_HAS_CORE_CLK BIT(0) +#define SCM_HAS_IFACE_CLK BIT(1) +#define SCM_HAS_BUS_CLK BIT(2) + struct qcom_scm { struct device *dev; struct clk *core_clk; @@ -320,35 +324,49 @@ bool qcom_scm_is_available(void) } EXPORT_SYMBOL(qcom_scm_is_available); +int qcom_scm_set_remote_state(u32 state, u32 id) +{ + return __qcom_scm_set_remote_state(__scm->dev, state, id); +} +EXPORT_SYMBOL(qcom_scm_set_remote_state); + static int qcom_scm_probe(struct platform_device *pdev) { struct qcom_scm *scm; + unsigned long clks; int ret; scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); if (!scm) return -ENOMEM; - scm->core_clk = devm_clk_get(&pdev->dev, "core"); - if (IS_ERR(scm->core_clk)) { - if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) + clks = (unsigned long)of_device_get_match_data(&pdev->dev); + if (clks & SCM_HAS_CORE_CLK) { + scm->core_clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(scm->core_clk)) { + if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to acquire core clk\n"); return PTR_ERR(scm->core_clk); - - scm->core_clk = NULL; + } } - if (of_device_is_compatible(pdev->dev.of_node, "qcom,scm")) { + if (clks & SCM_HAS_IFACE_CLK) { scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); if (IS_ERR(scm->iface_clk)) { if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to acquire iface clk\n"); + dev_err(&pdev->dev, + "failed to acquire iface clk\n"); return PTR_ERR(scm->iface_clk); } + } + if (clks & SCM_HAS_BUS_CLK) { scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(scm->bus_clk)) { if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "failed to acquire bus clk\n"); + dev_err(&pdev->dev, + "failed to acquire bus clk\n"); return PTR_ERR(scm->bus_clk); } } @@ -356,7 +374,9 @@ static int qcom_scm_probe(struct platform_device *pdev) scm->reset.ops = &qcom_scm_pas_reset_ops; scm->reset.nr_resets = 1; scm->reset.of_node = pdev->dev.of_node; - reset_controller_register(&scm->reset); + ret = devm_reset_controller_register(&pdev->dev, &scm->reset); + if (ret) + return ret; /* vote for max clk rate for highest performance */ ret = clk_set_rate(scm->core_clk, INT_MAX); @@ -372,10 +392,23 @@ static int qcom_scm_probe(struct platform_device *pdev) } static const struct of_device_id qcom_scm_dt_match[] = { - { .compatible = "qcom,scm-apq8064",}, - { .compatible = "qcom,scm-msm8660",}, - { .compatible = "qcom,scm-msm8960",}, - { .compatible = "qcom,scm",}, + { .compatible = "qcom,scm-apq8064", + /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ + }, + { .compatible = "qcom,scm-msm8660", + .data = (void *) SCM_HAS_CORE_CLK, + }, + { .compatible = "qcom,scm-msm8960", + .data = (void *) SCM_HAS_CORE_CLK, + }, + { .compatible = "qcom,scm-msm8996", + .data = NULL, /* no clocks */ + }, + { .compatible = "qcom,scm", + .data = (void *)(SCM_HAS_CORE_CLK + | SCM_HAS_IFACE_CLK + | SCM_HAS_BUS_CLK), + }, {} }; diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h index 3584b00fe7e6..6a0f15469344 100644 --- a/drivers/firmware/qcom_scm.h +++ b/drivers/firmware/qcom_scm.h @@ -15,6 +15,8 @@ #define QCOM_SCM_SVC_BOOT 0x1 #define QCOM_SCM_BOOT_ADDR 0x1 #define QCOM_SCM_BOOT_ADDR_MC 0x11 +#define QCOM_SCM_SET_REMOTE_STATE 0xa +extern int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id); #define QCOM_SCM_FLAG_HLOS 0x01 #define QCOM_SCM_FLAG_COLDBOOT_MC 0x02 diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig new file mode 100644 index 000000000000..ff2730d5c468 --- /dev/null +++ b/drivers/firmware/tegra/Kconfig @@ -0,0 +1,25 @@ +menu "Tegra firmware driver" + +config TEGRA_IVC + bool "Tegra IVC protocol" + depends on ARCH_TEGRA + help + IVC (Inter-VM Communication) protocol is part of the IPC + (Inter Processor Communication) framework on Tegra. It maintains the + data and the different commuication channels in SysRAM or RAM and + keeps the content is synchronization between host CPU and remote + processors. + +config TEGRA_BPMP + bool "Tegra BPMP driver" + depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC + help + BPMP (Boot and Power Management Processor) is designed to off-loading + the PM functions which include clock/DVFS/thermal/power from the CPU. + It needs HSP as the HW synchronization and notification module and + IVC module as the message communication protocol. + + This driver manages the IPC interface between host CPU and the + firmware running on BPMP. + +endmenu diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile new file mode 100644 index 000000000000..e34a2f79e1ad --- /dev/null +++ b/drivers/firmware/tegra/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_TEGRA_BPMP) += bpmp.o +obj-$(CONFIG_TEGRA_IVC) += ivc.o diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c new file mode 100644 index 000000000000..84e4c9a58a0c --- /dev/null +++ b/drivers/firmware/tegra/bpmp.c @@ -0,0 +1,869 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/clk/tegra.h> +#include <linux/genalloc.h> +#include <linux/mailbox_client.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/semaphore.h> +#include <linux/sched/clock.h> + +#include <soc/tegra/bpmp.h> +#include <soc/tegra/bpmp-abi.h> +#include <soc/tegra/ivc.h> + +#define MSG_ACK BIT(0) +#define MSG_RING BIT(1) + +static inline struct tegra_bpmp * +mbox_client_to_bpmp(struct mbox_client *client) +{ + return container_of(client, struct tegra_bpmp, mbox.client); +} + +struct tegra_bpmp *tegra_bpmp_get(struct device *dev) +{ + struct platform_device *pdev; + struct tegra_bpmp *bpmp; + struct device_node *np; + + np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0); + if (!np) + return ERR_PTR(-ENOENT); + + pdev = of_find_device_by_node(np); + if (!pdev) { + bpmp = ERR_PTR(-ENODEV); + goto put; + } + + bpmp = platform_get_drvdata(pdev); + if (!bpmp) { + bpmp = ERR_PTR(-EPROBE_DEFER); + put_device(&pdev->dev); + goto put; + } + +put: + of_node_put(np); + return bpmp; +} +EXPORT_SYMBOL_GPL(tegra_bpmp_get); + +void tegra_bpmp_put(struct tegra_bpmp *bpmp) +{ + if (bpmp) + put_device(bpmp->dev); +} +EXPORT_SYMBOL_GPL(tegra_bpmp_put); + +static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel) +{ + return channel - channel->bpmp->channels; +} + +static int +tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel) +{ + struct tegra_bpmp *bpmp = channel->bpmp; + unsigned int offset, count; + int index; + + offset = bpmp->soc->channels.thread.offset; + count = bpmp->soc->channels.thread.count; + + index = tegra_bpmp_channel_get_index(channel); + if (index < 0) + return index; + + if (index < offset || index >= offset + count) + return -EINVAL; + + return index - offset; +} + +static struct tegra_bpmp_channel * +tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index) +{ + unsigned int offset = bpmp->soc->channels.thread.offset; + unsigned int count = bpmp->soc->channels.thread.count; + + if (index >= count) + return NULL; + + return &bpmp->channels[offset + index]; +} + +static struct tegra_bpmp_channel * +tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp) +{ + unsigned int offset = bpmp->soc->channels.cpu_tx.offset; + + return &bpmp->channels[offset + smp_processor_id()]; +} + +static struct tegra_bpmp_channel * +tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp) +{ + unsigned int offset = bpmp->soc->channels.cpu_rx.offset; + + return &bpmp->channels[offset]; +} + +static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg) +{ + return (msg->tx.size <= MSG_DATA_MIN_SZ) && + (msg->rx.size <= MSG_DATA_MIN_SZ) && + (msg->tx.size == 0 || msg->tx.data) && + (msg->rx.size == 0 || msg->rx.data); +} + +static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel) +{ + void *frame; + + frame = tegra_ivc_read_get_next_frame(channel->ivc); + if (IS_ERR(frame)) { + channel->ib = NULL; + return false; + } + + channel->ib = frame; + + return true; +} + +static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel) +{ + unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout; + ktime_t end; + + end = ktime_add_us(ktime_get(), timeout); + + do { + if (tegra_bpmp_master_acked(channel)) + return 0; + } while (ktime_before(ktime_get(), end)); + + return -ETIMEDOUT; +} + +static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel) +{ + void *frame; + + frame = tegra_ivc_write_get_next_frame(channel->ivc); + if (IS_ERR(frame)) { + channel->ob = NULL; + return false; + } + + channel->ob = frame; + + return true; +} + +static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel) +{ + unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout; + ktime_t start, now; + + start = ns_to_ktime(local_clock()); + + do { + if (tegra_bpmp_master_free(channel)) + return 0; + + now = ns_to_ktime(local_clock()); + } while (ktime_us_delta(now, start) < timeout); + + return -ETIMEDOUT; +} + +static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, + void *data, size_t size) +{ + if (data && size > 0) + memcpy(data, channel->ib->data, size); + + return tegra_ivc_read_advance(channel->ivc); +} + +static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel, + void *data, size_t size) +{ + struct tegra_bpmp *bpmp = channel->bpmp; + unsigned long flags; + ssize_t err; + int index; + + index = tegra_bpmp_channel_get_thread_index(channel); + if (index < 0) + return index; + + spin_lock_irqsave(&bpmp->lock, flags); + err = __tegra_bpmp_channel_read(channel, data, size); + clear_bit(index, bpmp->threaded.allocated); + spin_unlock_irqrestore(&bpmp->lock, flags); + + up(&bpmp->threaded.lock); + + return err; +} + +static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, + unsigned int mrq, unsigned long flags, + const void *data, size_t size) +{ + channel->ob->code = mrq; + channel->ob->flags = flags; + + if (data && size > 0) + memcpy(channel->ob->data, data, size); + + return tegra_ivc_write_advance(channel->ivc); +} + +static struct tegra_bpmp_channel * +tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq, + const void *data, size_t size) +{ + unsigned long timeout = bpmp->soc->channels.thread.timeout; + unsigned int count = bpmp->soc->channels.thread.count; + struct tegra_bpmp_channel *channel; + unsigned long flags; + unsigned int index; + int err; + + err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout)); + if (err < 0) + return ERR_PTR(err); + + spin_lock_irqsave(&bpmp->lock, flags); + + index = find_first_zero_bit(bpmp->threaded.allocated, count); + if (index == count) { + channel = ERR_PTR(-EBUSY); + goto unlock; + } + + channel = tegra_bpmp_channel_get_thread(bpmp, index); + if (!channel) { + channel = ERR_PTR(-EINVAL); + goto unlock; + } + + if (!tegra_bpmp_master_free(channel)) { + channel = ERR_PTR(-EBUSY); + goto unlock; + } + + set_bit(index, bpmp->threaded.allocated); + + err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING, + data, size); + if (err < 0) { + clear_bit(index, bpmp->threaded.allocated); + goto unlock; + } + + set_bit(index, bpmp->threaded.busy); + +unlock: + spin_unlock_irqrestore(&bpmp->lock, flags); + return channel; +} + +static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel, + unsigned int mrq, unsigned long flags, + const void *data, size_t size) +{ + int err; + + err = tegra_bpmp_wait_master_free(channel); + if (err < 0) + return err; + + return __tegra_bpmp_channel_write(channel, mrq, flags, data, size); +} + +int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp, + struct tegra_bpmp_message *msg) +{ + struct tegra_bpmp_channel *channel; + int err; + + if (WARN_ON(!irqs_disabled())) + return -EPERM; + + if (!tegra_bpmp_message_valid(msg)) + return -EINVAL; + + channel = tegra_bpmp_channel_get_tx(bpmp); + + err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK, + msg->tx.data, msg->tx.size); + if (err < 0) + return err; + + err = mbox_send_message(bpmp->mbox.channel, NULL); + if (err < 0) + return err; + + mbox_client_txdone(bpmp->mbox.channel, 0); + + err = tegra_bpmp_wait_ack(channel); + if (err < 0) + return err; + + return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size); +} +EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic); + +int tegra_bpmp_transfer(struct tegra_bpmp *bpmp, + struct tegra_bpmp_message *msg) +{ + struct tegra_bpmp_channel *channel; + unsigned long timeout; + int err; + + if (WARN_ON(irqs_disabled())) + return -EPERM; + + if (!tegra_bpmp_message_valid(msg)) + return -EINVAL; + + channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data, + msg->tx.size); + if (IS_ERR(channel)) + return PTR_ERR(channel); + + err = mbox_send_message(bpmp->mbox.channel, NULL); + if (err < 0) + return err; + + mbox_client_txdone(bpmp->mbox.channel, 0); + + timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout); + + err = wait_for_completion_timeout(&channel->completion, timeout); + if (err == 0) + return -ETIMEDOUT; + + return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size); +} +EXPORT_SYMBOL_GPL(tegra_bpmp_transfer); + +static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp, + unsigned int mrq) +{ + struct tegra_bpmp_mrq *entry; + + list_for_each_entry(entry, &bpmp->mrqs, list) + if (entry->mrq == mrq) + return entry; + + return NULL; +} + +static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel, + int code, const void *data, size_t size) +{ + unsigned long flags = channel->ib->flags; + struct tegra_bpmp *bpmp = channel->bpmp; + struct tegra_bpmp_mb_data *frame; + int err; + + if (WARN_ON(size > MSG_DATA_MIN_SZ)) + return; + + err = tegra_ivc_read_advance(channel->ivc); + if (WARN_ON(err < 0)) + return; + + if ((flags & MSG_ACK) == 0) + return; + + frame = tegra_ivc_write_get_next_frame(channel->ivc); + if (WARN_ON(IS_ERR(frame))) + return; + + frame->code = code; + + if (data && size > 0) + memcpy(frame->data, data, size); + + err = tegra_ivc_write_advance(channel->ivc); + if (WARN_ON(err < 0)) + return; + + if (flags & MSG_RING) { + err = mbox_send_message(bpmp->mbox.channel, NULL); + if (WARN_ON(err < 0)) + return; + + mbox_client_txdone(bpmp->mbox.channel, 0); + } +} + +static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp, + unsigned int mrq, + struct tegra_bpmp_channel *channel) +{ + struct tegra_bpmp_mrq *entry; + u32 zero = 0; + + spin_lock(&bpmp->lock); + + entry = tegra_bpmp_find_mrq(bpmp, mrq); + if (!entry) { + spin_unlock(&bpmp->lock); + tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero)); + return; + } + + entry->handler(mrq, channel, entry->data); + + spin_unlock(&bpmp->lock); +} + +int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, + tegra_bpmp_mrq_handler_t handler, void *data) +{ + struct tegra_bpmp_mrq *entry; + unsigned long flags; + + if (!handler) + return -EINVAL; + + entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&bpmp->lock, flags); + + entry->mrq = mrq; + entry->handler = handler; + entry->data = data; + list_add(&entry->list, &bpmp->mrqs); + + spin_unlock_irqrestore(&bpmp->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq); + +void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data) +{ + struct tegra_bpmp_mrq *entry; + unsigned long flags; + + spin_lock_irqsave(&bpmp->lock, flags); + + entry = tegra_bpmp_find_mrq(bpmp, mrq); + if (!entry) + goto unlock; + + list_del(&entry->list); + devm_kfree(bpmp->dev, entry); + +unlock: + spin_unlock_irqrestore(&bpmp->lock, flags); +} +EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq); + +static void tegra_bpmp_mrq_handle_ping(unsigned int mrq, + struct tegra_bpmp_channel *channel, + void *data) +{ + struct mrq_ping_request *request; + struct mrq_ping_response response; + + request = (struct mrq_ping_request *)channel->ib->data; + + memset(&response, 0, sizeof(response)); + response.reply = request->challenge << 1; + + tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response)); +} + +static int tegra_bpmp_ping(struct tegra_bpmp *bpmp) +{ + struct mrq_ping_response response; + struct mrq_ping_request request; + struct tegra_bpmp_message msg; + unsigned long flags; + ktime_t start, end; + int err; + + memset(&request, 0, sizeof(request)); + request.challenge = 1; + + memset(&response, 0, sizeof(response)); + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_PING; + msg.tx.data = &request; + msg.tx.size = sizeof(request); + msg.rx.data = &response; + msg.rx.size = sizeof(response); + + local_irq_save(flags); + start = ktime_get(); + err = tegra_bpmp_transfer_atomic(bpmp, &msg); + end = ktime_get(); + local_irq_restore(flags); + + if (!err) + dev_dbg(bpmp->dev, + "ping ok: challenge: %u, response: %u, time: %lld\n", + request.challenge, response.reply, + ktime_to_us(ktime_sub(end, start))); + + return err; +} + +static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag, + size_t size) +{ + struct mrq_query_tag_request request; + struct tegra_bpmp_message msg; + unsigned long flags; + dma_addr_t phys; + void *virt; + int err; + + virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys, + GFP_KERNEL | GFP_DMA32); + if (!virt) + return -ENOMEM; + + memset(&request, 0, sizeof(request)); + request.addr = phys; + + memset(&msg, 0, sizeof(msg)); + msg.mrq = MRQ_QUERY_TAG; + msg.tx.data = &request; + msg.tx.size = sizeof(request); + + local_irq_save(flags); + err = tegra_bpmp_transfer_atomic(bpmp, &msg); + local_irq_restore(flags); + + if (err == 0) + strlcpy(tag, virt, size); + + dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys); + + return err; +} + +static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel) +{ + unsigned long flags = channel->ob->flags; + + if ((flags & MSG_RING) == 0) + return; + + complete(&channel->completion); +} + +static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data) +{ + struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client); + struct tegra_bpmp_channel *channel; + unsigned int i, count; + unsigned long *busy; + + channel = tegra_bpmp_channel_get_rx(bpmp); + count = bpmp->soc->channels.thread.count; + busy = bpmp->threaded.busy; + + if (tegra_bpmp_master_acked(channel)) + tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel); + + spin_lock(&bpmp->lock); + + for_each_set_bit(i, busy, count) { + struct tegra_bpmp_channel *channel; + + channel = tegra_bpmp_channel_get_thread(bpmp, i); + if (!channel) + continue; + + if (tegra_bpmp_master_acked(channel)) { + tegra_bpmp_channel_signal(channel); + clear_bit(i, busy); + } + } + + spin_unlock(&bpmp->lock); +} + +static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data) +{ + struct tegra_bpmp *bpmp = data; + int err; + + if (WARN_ON(bpmp->mbox.channel == NULL)) + return; + + err = mbox_send_message(bpmp->mbox.channel, NULL); + if (err < 0) + return; + + mbox_client_txdone(bpmp->mbox.channel, 0); +} + +static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel, + struct tegra_bpmp *bpmp, + unsigned int index) +{ + size_t message_size, queue_size; + unsigned int offset; + int err; + + channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc), + GFP_KERNEL); + if (!channel->ivc) + return -ENOMEM; + + message_size = tegra_ivc_align(MSG_MIN_SZ); + queue_size = tegra_ivc_total_queue_size(message_size); + offset = queue_size * index; + + err = tegra_ivc_init(channel->ivc, NULL, + bpmp->rx.virt + offset, bpmp->rx.phys + offset, + bpmp->tx.virt + offset, bpmp->tx.phys + offset, + 1, message_size, tegra_bpmp_ivc_notify, + bpmp); + if (err < 0) { + dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n", + index, err); + return err; + } + + init_completion(&channel->completion); + channel->bpmp = bpmp; + + return 0; +} + +static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel) +{ + /* reset the channel state */ + tegra_ivc_reset(channel->ivc); + + /* sync the channel state with BPMP */ + while (tegra_ivc_notified(channel->ivc)) + ; +} + +static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel) +{ + tegra_ivc_cleanup(channel->ivc); +} + +static int tegra_bpmp_probe(struct platform_device *pdev) +{ + struct tegra_bpmp_channel *channel; + struct tegra_bpmp *bpmp; + unsigned int i; + char tag[32]; + size_t size; + int err; + + bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL); + if (!bpmp) + return -ENOMEM; + + bpmp->soc = of_device_get_match_data(&pdev->dev); + bpmp->dev = &pdev->dev; + + bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0); + if (!bpmp->tx.pool) { + dev_err(&pdev->dev, "TX shmem pool not found\n"); + return -ENOMEM; + } + + bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys); + if (!bpmp->tx.virt) { + dev_err(&pdev->dev, "failed to allocate from TX pool\n"); + return -ENOMEM; + } + + bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1); + if (!bpmp->rx.pool) { + dev_err(&pdev->dev, "RX shmem pool not found\n"); + err = -ENOMEM; + goto free_tx; + } + + bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys); + if (!bpmp->rx.pool) { + dev_err(&pdev->dev, "failed to allocate from RX pool\n"); + err = -ENOMEM; + goto free_tx; + } + + INIT_LIST_HEAD(&bpmp->mrqs); + spin_lock_init(&bpmp->lock); + + bpmp->threaded.count = bpmp->soc->channels.thread.count; + sema_init(&bpmp->threaded.lock, bpmp->threaded.count); + + size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long); + + bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!bpmp->threaded.allocated) { + err = -ENOMEM; + goto free_rx; + } + + bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!bpmp->threaded.busy) { + err = -ENOMEM; + goto free_rx; + } + + bpmp->num_channels = bpmp->soc->channels.cpu_tx.count + + bpmp->soc->channels.thread.count + + bpmp->soc->channels.cpu_rx.count; + + bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels, + sizeof(*channel), GFP_KERNEL); + if (!bpmp->channels) { + err = -ENOMEM; + goto free_rx; + } + + /* message channel initialization */ + for (i = 0; i < bpmp->num_channels; i++) { + struct tegra_bpmp_channel *channel = &bpmp->channels[i]; + + err = tegra_bpmp_channel_init(channel, bpmp, i); + if (err < 0) + goto cleanup_channels; + } + + /* mbox registration */ + bpmp->mbox.client.dev = &pdev->dev; + bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx; + bpmp->mbox.client.tx_block = false; + bpmp->mbox.client.knows_txdone = false; + + bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0); + if (IS_ERR(bpmp->mbox.channel)) { + err = PTR_ERR(bpmp->mbox.channel); + dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err); + goto cleanup_channels; + } + + /* reset message channels */ + for (i = 0; i < bpmp->num_channels; i++) { + struct tegra_bpmp_channel *channel = &bpmp->channels[i]; + + tegra_bpmp_channel_reset(channel); + } + + err = tegra_bpmp_request_mrq(bpmp, MRQ_PING, + tegra_bpmp_mrq_handle_ping, bpmp); + if (err < 0) + goto free_mbox; + + err = tegra_bpmp_ping(bpmp); + if (err < 0) { + dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err); + goto free_mrq; + } + + err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1); + if (err < 0) { + dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err); + goto free_mrq; + } + + dev_info(&pdev->dev, "firmware: %s\n", tag); + + err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev); + if (err < 0) + goto free_mrq; + + err = tegra_bpmp_init_clocks(bpmp); + if (err < 0) + goto free_mrq; + + err = tegra_bpmp_init_resets(bpmp); + if (err < 0) + goto free_mrq; + + platform_set_drvdata(pdev, bpmp); + + return 0; + +free_mrq: + tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp); +free_mbox: + mbox_free_channel(bpmp->mbox.channel); +cleanup_channels: + while (i--) + tegra_bpmp_channel_cleanup(&bpmp->channels[i]); +free_rx: + gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096); +free_tx: + gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096); + return err; +} + +static const struct tegra_bpmp_soc tegra186_soc = { + .channels = { + .cpu_tx = { + .offset = 0, + .count = 6, + .timeout = 60 * USEC_PER_SEC, + }, + .thread = { + .offset = 6, + .count = 7, + .timeout = 600 * USEC_PER_SEC, + }, + .cpu_rx = { + .offset = 13, + .count = 1, + .timeout = 0, + }, + }, + .num_resets = 193, +}; + +static const struct of_device_id tegra_bpmp_match[] = { + { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc }, + { } +}; + +static struct platform_driver tegra_bpmp_driver = { + .driver = { + .name = "tegra-bpmp", + .of_match_table = tegra_bpmp_match, + }, + .probe = tegra_bpmp_probe, +}; + +static int __init tegra_bpmp_init(void) +{ + return platform_driver_register(&tegra_bpmp_driver); +} +core_initcall(tegra_bpmp_init); diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c new file mode 100644 index 000000000000..29ecfd815320 --- /dev/null +++ b/drivers/firmware/tegra/ivc.c @@ -0,0 +1,695 @@ +/* + * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <soc/tegra/ivc.h> + +#define TEGRA_IVC_ALIGN 64 + +/* + * IVC channel reset protocol. + * + * Each end uses its tx_channel.state to indicate its synchronization state. + */ +enum tegra_ivc_state { + /* + * This value is zero for backwards compatibility with services that + * assume channels to be initially zeroed. Such channels are in an + * initially valid state, but cannot be asynchronously reset, and must + * maintain a valid state at all times. + * + * The transmitting end can enter the established state from the sync or + * ack state when it observes the receiving endpoint in the ack or + * established state, indicating that has cleared the counters in our + * rx_channel. + */ + TEGRA_IVC_STATE_ESTABLISHED = 0, + + /* + * If an endpoint is observed in the sync state, the remote endpoint is + * allowed to clear the counters it owns asynchronously with respect to + * the current endpoint. Therefore, the current endpoint is no longer + * allowed to communicate. + */ + TEGRA_IVC_STATE_SYNC, + + /* + * When the transmitting end observes the receiving end in the sync + * state, it can clear the w_count and r_count and transition to the ack + * state. If the remote endpoint observes us in the ack state, it can + * return to the established state once it has cleared its counters. + */ + TEGRA_IVC_STATE_ACK +}; + +/* + * This structure is divided into two-cache aligned parts, the first is only + * written through the tx.channel pointer, while the second is only written + * through the rx.channel pointer. This delineates ownership of the cache + * lines, which is critical to performance and necessary in non-cache coherent + * implementations. + */ +struct tegra_ivc_header { + union { + struct { + /* fields owned by the transmitting end */ + u32 count; + u32 state; + }; + + u8 pad[TEGRA_IVC_ALIGN]; + } tx; + + union { + /* fields owned by the receiving end */ + u32 count; + u8 pad[TEGRA_IVC_ALIGN]; + } rx; +}; + +static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys) +{ + if (!ivc->peer) + return; + + dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN, + DMA_FROM_DEVICE); +} + +static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys) +{ + if (!ivc->peer) + return; + + dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN, + DMA_TO_DEVICE); +} + +static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, + struct tegra_ivc_header *header) +{ + /* + * This function performs multiple checks on the same values with + * security implications, so create snapshots with ACCESS_ONCE() to + * ensure that these checks use the same values. + */ + u32 tx = ACCESS_ONCE(header->tx.count); + u32 rx = ACCESS_ONCE(header->rx.count); + + /* + * Perform an over-full check to prevent denial of service attacks + * where a server could be easily fooled into believing that there's + * an extremely large number of frames ready, since receivers are not + * expected to check for full or over-full conditions. + * + * Although the channel isn't empty, this is an invalid case caused by + * a potentially malicious peer, so returning empty is safer, because + * it gives the impression that the channel has gone silent. + */ + if (tx - rx > ivc->num_frames) + return true; + + return tx == rx; +} + +static inline bool tegra_ivc_full(struct tegra_ivc *ivc, + struct tegra_ivc_header *header) +{ + u32 tx = ACCESS_ONCE(header->tx.count); + u32 rx = ACCESS_ONCE(header->rx.count); + + /* + * Invalid cases where the counters indicate that the queue is over + * capacity also appear full. + */ + return tx - rx >= ivc->num_frames; +} + +static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, + struct tegra_ivc_header *header) +{ + u32 tx = ACCESS_ONCE(header->tx.count); + u32 rx = ACCESS_ONCE(header->rx.count); + + /* + * This function isn't expected to be used in scenarios where an + * over-full situation can lead to denial of service attacks. See the + * comment in tegra_ivc_empty() for an explanation about special + * over-full considerations. + */ + return tx - rx; +} + +static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) +{ + ACCESS_ONCE(ivc->tx.channel->tx.count) = + ACCESS_ONCE(ivc->tx.channel->tx.count) + 1; + + if (ivc->tx.position == ivc->num_frames - 1) + ivc->tx.position = 0; + else + ivc->tx.position++; +} + +static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) +{ + ACCESS_ONCE(ivc->rx.channel->rx.count) = + ACCESS_ONCE(ivc->rx.channel->rx.count) + 1; + + if (ivc->rx.position == ivc->num_frames - 1) + ivc->rx.position = 0; + else + ivc->rx.position++; +} + +static inline int tegra_ivc_check_read(struct tegra_ivc *ivc) +{ + unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); + + /* + * tx.channel->state is set locally, so it is not synchronized with + * state from the remote peer. The remote peer cannot reset its + * transmit counters until we've acknowledged its synchronization + * request, so no additional synchronization is required because an + * asynchronous transition of rx.channel->state to + * TEGRA_IVC_STATE_ACK is not allowed. + */ + if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) + return -ECONNRESET; + + /* + * Avoid unnecessary invalidations when performing repeated accesses + * to an IVC channel by checking the old queue pointers first. + * + * Synchronization is only necessary when these pointers indicate + * empty or full. + */ + if (!tegra_ivc_empty(ivc, ivc->rx.channel)) + return 0; + + tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); + + if (tegra_ivc_empty(ivc, ivc->rx.channel)) + return -ENOSPC; + + return 0; +} + +static inline int tegra_ivc_check_write(struct tegra_ivc *ivc) +{ + unsigned int offset = offsetof(struct tegra_ivc_header, rx.count); + + if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) + return -ECONNRESET; + + if (!tegra_ivc_full(ivc, ivc->tx.channel)) + return 0; + + tegra_ivc_invalidate(ivc, ivc->tx.phys + offset); + + if (tegra_ivc_full(ivc, ivc->tx.channel)) + return -ENOSPC; + + return 0; +} + +static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc, + struct tegra_ivc_header *header, + unsigned int frame) +{ + if (WARN_ON(frame >= ivc->num_frames)) + return ERR_PTR(-EINVAL); + + return (void *)(header + 1) + ivc->frame_size * frame; +} + +static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc, + dma_addr_t phys, + unsigned int frame) +{ + unsigned long offset; + + offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame; + + return phys + offset; +} + +static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc, + dma_addr_t phys, + unsigned int frame, + unsigned int offset, + size_t size) +{ + if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) + return; + + phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; + + dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE); +} + +static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc, + dma_addr_t phys, + unsigned int frame, + unsigned int offset, + size_t size) +{ + if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) + return; + + phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; + + dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE); +} + +/* directly peek at the next frame rx'ed */ +void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc) +{ + int err; + + if (WARN_ON(ivc == NULL)) + return ERR_PTR(-EINVAL); + + err = tegra_ivc_check_read(ivc); + if (err < 0) + return ERR_PTR(err); + + /* + * Order observation of ivc->rx.position potentially indicating new + * data before data read. + */ + smp_rmb(); + + tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0, + ivc->frame_size); + + return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position); +} +EXPORT_SYMBOL(tegra_ivc_read_get_next_frame); + +int tegra_ivc_read_advance(struct tegra_ivc *ivc) +{ + unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); + unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); + int err; + + /* + * No read barriers or synchronization here: the caller is expected to + * have already observed the channel non-empty. This check is just to + * catch programming errors. + */ + err = tegra_ivc_check_read(ivc); + if (err < 0) + return err; + + tegra_ivc_advance_rx(ivc); + + tegra_ivc_flush(ivc, ivc->rx.phys + rx); + + /* + * Ensure our write to ivc->rx.position occurs before our read from + * ivc->tx.position. + */ + smp_mb(); + + /* + * Notify only upon transition from full to non-full. The available + * count can only asynchronously increase, so the worst possible + * side-effect will be a spurious notification. + */ + tegra_ivc_invalidate(ivc, ivc->rx.phys + tx); + + if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1) + ivc->notify(ivc, ivc->notify_data); + + return 0; +} +EXPORT_SYMBOL(tegra_ivc_read_advance); + +/* directly poke at the next frame to be tx'ed */ +void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc) +{ + int err; + + err = tegra_ivc_check_write(ivc); + if (err < 0) + return ERR_PTR(err); + + return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position); +} +EXPORT_SYMBOL(tegra_ivc_write_get_next_frame); + +/* advance the tx buffer */ +int tegra_ivc_write_advance(struct tegra_ivc *ivc) +{ + unsigned int tx = offsetof(struct tegra_ivc_header, tx.count); + unsigned int rx = offsetof(struct tegra_ivc_header, rx.count); + int err; + + err = tegra_ivc_check_write(ivc); + if (err < 0) + return err; + + tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0, + ivc->frame_size); + + /* + * Order any possible stores to the frame before update of + * ivc->tx.position. + */ + smp_wmb(); + + tegra_ivc_advance_tx(ivc); + tegra_ivc_flush(ivc, ivc->tx.phys + tx); + + /* + * Ensure our write to ivc->tx.position occurs before our read from + * ivc->rx.position. + */ + smp_mb(); + + /* + * Notify only upon transition from empty to non-empty. The available + * count can only asynchronously decrease, so the worst possible + * side-effect will be a spurious notification. + */ + tegra_ivc_invalidate(ivc, ivc->tx.phys + rx); + + if (tegra_ivc_available(ivc, ivc->tx.channel) == 1) + ivc->notify(ivc, ivc->notify_data); + + return 0; +} +EXPORT_SYMBOL(tegra_ivc_write_advance); + +void tegra_ivc_reset(struct tegra_ivc *ivc) +{ + unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); + + ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC; + tegra_ivc_flush(ivc, ivc->tx.phys + offset); + ivc->notify(ivc, ivc->notify_data); +} +EXPORT_SYMBOL(tegra_ivc_reset); + +/* + * ======================================================= + * IVC State Transition Table - see tegra_ivc_notified() + * ======================================================= + * + * local remote action + * ----- ------ ----------------------------------- + * SYNC EST <none> + * SYNC ACK reset counters; move to EST; notify + * SYNC SYNC reset counters; move to ACK; notify + * ACK EST move to EST; notify + * ACK ACK move to EST; notify + * ACK SYNC reset counters; move to ACK; notify + * EST EST <none> + * EST ACK <none> + * EST SYNC reset counters; move to ACK; notify + * + * =============================================================== + */ + +int tegra_ivc_notified(struct tegra_ivc *ivc) +{ + unsigned int offset = offsetof(struct tegra_ivc_header, tx.count); + enum tegra_ivc_state state; + + /* Copy the receiver's state out of shared memory. */ + tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); + state = ACCESS_ONCE(ivc->rx.channel->tx.state); + + if (state == TEGRA_IVC_STATE_SYNC) { + offset = offsetof(struct tegra_ivc_header, tx.count); + + /* + * Order observation of TEGRA_IVC_STATE_SYNC before stores + * clearing tx.channel. + */ + smp_rmb(); + + /* + * Reset tx.channel counters. The remote end is in the SYNC + * state and won't make progress until we change our state, + * so the counters are not in use at this time. + */ + ivc->tx.channel->tx.count = 0; + ivc->rx.channel->rx.count = 0; + + ivc->tx.position = 0; + ivc->rx.position = 0; + + /* + * Ensure that counters appear cleared before new state can be + * observed. + */ + smp_wmb(); + + /* + * Move to ACK state. We have just cleared our counters, so it + * is now safe for the remote end to start using these values. + */ + ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK; + tegra_ivc_flush(ivc, ivc->tx.phys + offset); + + /* + * Notify remote end to observe state transition. + */ + ivc->notify(ivc, ivc->notify_data); + + } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC && + state == TEGRA_IVC_STATE_ACK) { + offset = offsetof(struct tegra_ivc_header, tx.count); + + /* + * Order observation of ivc_state_sync before stores clearing + * tx_channel. + */ + smp_rmb(); + + /* + * Reset tx.channel counters. The remote end is in the ACK + * state and won't make progress until we change our state, + * so the counters are not in use at this time. + */ + ivc->tx.channel->tx.count = 0; + ivc->rx.channel->rx.count = 0; + + ivc->tx.position = 0; + ivc->rx.position = 0; + + /* + * Ensure that counters appear cleared before new state can be + * observed. + */ + smp_wmb(); + + /* + * Move to ESTABLISHED state. We know that the remote end has + * already cleared its counters, so it is safe to start + * writing/reading on this channel. + */ + ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; + tegra_ivc_flush(ivc, ivc->tx.phys + offset); + + /* + * Notify remote end to observe state transition. + */ + ivc->notify(ivc, ivc->notify_data); + + } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) { + offset = offsetof(struct tegra_ivc_header, tx.count); + + /* + * At this point, we have observed the peer to be in either + * the ACK or ESTABLISHED state. Next, order observation of + * peer state before storing to tx.channel. + */ + smp_rmb(); + + /* + * Move to ESTABLISHED state. We know that we have previously + * cleared our counters, and we know that the remote end has + * cleared its counters, so it is safe to start writing/reading + * on this channel. + */ + ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; + tegra_ivc_flush(ivc, ivc->tx.phys + offset); + + /* + * Notify remote end to observe state transition. + */ + ivc->notify(ivc, ivc->notify_data); + + } else { + /* + * There is no need to handle any further action. Either the + * channel is already fully established, or we are waiting for + * the remote end to catch up with our current state. Refer + * to the diagram in "IVC State Transition Table" above. + */ + } + + if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) + return -EAGAIN; + + return 0; +} +EXPORT_SYMBOL(tegra_ivc_notified); + +size_t tegra_ivc_align(size_t size) +{ + return ALIGN(size, TEGRA_IVC_ALIGN); +} +EXPORT_SYMBOL(tegra_ivc_align); + +unsigned tegra_ivc_total_queue_size(unsigned queue_size) +{ + if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) { + pr_err("%s: queue_size (%u) must be %u-byte aligned\n", + __func__, queue_size, TEGRA_IVC_ALIGN); + return 0; + } + + return queue_size + sizeof(struct tegra_ivc_header); +} +EXPORT_SYMBOL(tegra_ivc_total_queue_size); + +static int tegra_ivc_check_params(unsigned long rx, unsigned long tx, + unsigned int num_frames, size_t frame_size) +{ + BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count), + TEGRA_IVC_ALIGN)); + BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count), + TEGRA_IVC_ALIGN)); + BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header), + TEGRA_IVC_ALIGN)); + + if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) { + pr_err("num_frames * frame_size overflows\n"); + return -EINVAL; + } + + if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) { + pr_err("frame size not adequately aligned: %zu\n", frame_size); + return -EINVAL; + } + + /* + * The headers must at least be aligned enough for counters + * to be accessed atomically. + */ + if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) { + pr_err("IVC channel start not aligned: %#lx\n", rx); + return -EINVAL; + } + + if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) { + pr_err("IVC channel start not aligned: %#lx\n", tx); + return -EINVAL; + } + + if (rx < tx) { + if (rx + frame_size * num_frames > tx) { + pr_err("queue regions overlap: %#lx + %zx > %#lx\n", + rx, frame_size * num_frames, tx); + return -EINVAL; + } + } else { + if (tx + frame_size * num_frames > rx) { + pr_err("queue regions overlap: %#lx + %zx > %#lx\n", + tx, frame_size * num_frames, rx); + return -EINVAL; + } + } + + return 0; +} + +int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx, + dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys, + unsigned int num_frames, size_t frame_size, + void (*notify)(struct tegra_ivc *ivc, void *data), + void *data) +{ + size_t queue_size; + int err; + + if (WARN_ON(!ivc || !notify)) + return -EINVAL; + + /* + * All sizes that can be returned by communication functions should + * fit in an int. + */ + if (frame_size > INT_MAX) + return -E2BIG; + + err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx, + num_frames, frame_size); + if (err < 0) + return err; + + queue_size = tegra_ivc_total_queue_size(num_frames * frame_size); + + if (peer) { + ivc->rx.phys = dma_map_single(peer, rx, queue_size, + DMA_BIDIRECTIONAL); + if (ivc->rx.phys == DMA_ERROR_CODE) + return -ENOMEM; + + ivc->tx.phys = dma_map_single(peer, tx, queue_size, + DMA_BIDIRECTIONAL); + if (ivc->tx.phys == DMA_ERROR_CODE) { + dma_unmap_single(peer, ivc->rx.phys, queue_size, + DMA_BIDIRECTIONAL); + return -ENOMEM; + } + } else { + ivc->rx.phys = rx_phys; + ivc->tx.phys = tx_phys; + } + + ivc->rx.channel = rx; + ivc->tx.channel = tx; + ivc->peer = peer; + ivc->notify = notify; + ivc->notify_data = data; + ivc->frame_size = frame_size; + ivc->num_frames = num_frames; + + /* + * These values aren't necessarily correct until the channel has been + * reset. + */ + ivc->tx.position = 0; + ivc->rx.position = 0; + + return 0; +} +EXPORT_SYMBOL(tegra_ivc_init); + +void tegra_ivc_cleanup(struct tegra_ivc *ivc) +{ + if (ivc->peer) { + size_t size = tegra_ivc_total_queue_size(ivc->num_frames * + ivc->frame_size); + + dma_unmap_single(ivc->peer, ivc->rx.phys, size, + DMA_BIDIRECTIONAL); + dma_unmap_single(ivc->peer, ivc->tx.phys, size, + DMA_BIDIRECTIONAL); + } +} +EXPORT_SYMBOL(tegra_ivc_cleanup); diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c new file mode 100644 index 000000000000..874ff32db366 --- /dev/null +++ b/drivers/firmware/ti_sci.c @@ -0,0 +1,1991 @@ +/* + * Texas Instruments System Control Interface Protocol Driver + * + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Nishanth Menon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/bitmap.h> +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/semaphore.h> +#include <linux/slab.h> +#include <linux/soc/ti/ti-msgmgr.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/reboot.h> + +#include "ti_sci.h" + +/* List of all TI SCI devices active in system */ +static LIST_HEAD(ti_sci_list); +/* Protection for the entire list */ +static DEFINE_MUTEX(ti_sci_list_mutex); + +/** + * struct ti_sci_xfer - Structure representing a message flow + * @tx_message: Transmit message + * @rx_len: Receive message length + * @xfer_buf: Preallocated buffer to store receive message + * Since we work with request-ACK protocol, we can + * reuse the same buffer for the rx path as we + * use for the tx path. + * @done: completion event + */ +struct ti_sci_xfer { + struct ti_msgmgr_message tx_message; + u8 rx_len; + u8 *xfer_buf; + struct completion done; +}; + +/** + * struct ti_sci_xfers_info - Structure to manage transfer information + * @sem_xfer_count: Counting Semaphore for managing max simultaneous + * Messages. + * @xfer_block: Preallocated Message array + * @xfer_alloc_table: Bitmap table for allocated messages. + * Index of this bitmap table is also used for message + * sequence identifier. + * @xfer_lock: Protection for message allocation + */ +struct ti_sci_xfers_info { + struct semaphore sem_xfer_count; + struct ti_sci_xfer *xfer_block; + unsigned long *xfer_alloc_table; + /* protect transfer allocation */ + spinlock_t xfer_lock; +}; + +/** + * struct ti_sci_desc - Description of SoC integration + * @host_id: Host identifier representing the compute entity + * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds) + * @max_msgs: Maximum number of messages that can be pending + * simultaneously in the system + * @max_msg_size: Maximum size of data per message that can be handled. + */ +struct ti_sci_desc { + u8 host_id; + int max_rx_timeout_ms; + int max_msgs; + int max_msg_size; +}; + +/** + * struct ti_sci_info - Structure representing a TI SCI instance + * @dev: Device pointer + * @desc: SoC description for this instance + * @nb: Reboot Notifier block + * @d: Debugfs file entry + * @debug_region: Memory region where the debug message are available + * @debug_region_size: Debug region size + * @debug_buffer: Buffer allocated to copy debug messages. + * @handle: Instance of TI SCI handle to send to clients. + * @cl: Mailbox Client + * @chan_tx: Transmit mailbox channel + * @chan_rx: Receive mailbox channel + * @minfo: Message info + * @node: list head + * @users: Number of users of this instance + */ +struct ti_sci_info { + struct device *dev; + struct notifier_block nb; + const struct ti_sci_desc *desc; + struct dentry *d; + void __iomem *debug_region; + char *debug_buffer; + size_t debug_region_size; + struct ti_sci_handle handle; + struct mbox_client cl; + struct mbox_chan *chan_tx; + struct mbox_chan *chan_rx; + struct ti_sci_xfers_info minfo; + struct list_head node; + /* protected by ti_sci_list_mutex */ + int users; + +}; + +#define cl_to_ti_sci_info(c) container_of(c, struct ti_sci_info, cl) +#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle) +#define reboot_to_ti_sci_info(n) container_of(n, struct ti_sci_info, nb) + +#ifdef CONFIG_DEBUG_FS + +/** + * ti_sci_debug_show() - Helper to dump the debug log + * @s: sequence file pointer + * @unused: unused. + * + * Return: 0 + */ +static int ti_sci_debug_show(struct seq_file *s, void *unused) +{ + struct ti_sci_info *info = s->private; + + memcpy_fromio(info->debug_buffer, info->debug_region, + info->debug_region_size); + /* + * We don't trust firmware to leave NULL terminated last byte (hence + * we have allocated 1 extra 0 byte). Since we cannot guarantee any + * specific data format for debug messages, We just present the data + * in the buffer as is - we expect the messages to be self explanatory. + */ + seq_puts(s, info->debug_buffer); + return 0; +} + +/** + * ti_sci_debug_open() - debug file open + * @inode: inode pointer + * @file: file pointer + * + * Return: result of single_open + */ +static int ti_sci_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, ti_sci_debug_show, inode->i_private); +} + +/* log file operations */ +static const struct file_operations ti_sci_debug_fops = { + .open = ti_sci_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/** + * ti_sci_debugfs_create() - Create log debug file + * @pdev: platform device pointer + * @info: Pointer to SCI entity information + * + * Return: 0 if all went fine, else corresponding error. + */ +static int ti_sci_debugfs_create(struct platform_device *pdev, + struct ti_sci_info *info) +{ + struct device *dev = &pdev->dev; + struct resource *res; + char debug_name[50] = "ti_sci_debug@"; + + /* Debug region is optional */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "debug_messages"); + info->debug_region = devm_ioremap_resource(dev, res); + if (IS_ERR(info->debug_region)) + return 0; + info->debug_region_size = resource_size(res); + + info->debug_buffer = devm_kcalloc(dev, info->debug_region_size + 1, + sizeof(char), GFP_KERNEL); + if (!info->debug_buffer) + return -ENOMEM; + /* Setup NULL termination */ + info->debug_buffer[info->debug_region_size] = 0; + + info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), + sizeof(debug_name)), + 0444, NULL, info, &ti_sci_debug_fops); + if (IS_ERR(info->d)) + return PTR_ERR(info->d); + + dev_dbg(dev, "Debug region => %p, size = %zu bytes, resource: %pr\n", + info->debug_region, info->debug_region_size, res); + return 0; +} + +/** + * ti_sci_debugfs_destroy() - clean up log debug file + * @pdev: platform device pointer + * @info: Pointer to SCI entity information + */ +static void ti_sci_debugfs_destroy(struct platform_device *pdev, + struct ti_sci_info *info) +{ + if (IS_ERR(info->debug_region)) + return; + + debugfs_remove(info->d); +} +#else /* CONFIG_DEBUG_FS */ +static inline int ti_sci_debugfs_create(struct platform_device *dev, + struct ti_sci_info *info) +{ + return 0; +} + +static inline void ti_sci_debugfs_destroy(struct platform_device *dev, + struct ti_sci_info *info) +{ +} +#endif /* CONFIG_DEBUG_FS */ + +/** + * ti_sci_dump_header_dbg() - Helper to dump a message header. + * @dev: Device pointer corresponding to the SCI entity + * @hdr: pointer to header. + */ +static inline void ti_sci_dump_header_dbg(struct device *dev, + struct ti_sci_msg_hdr *hdr) +{ + dev_dbg(dev, "MSGHDR:type=0x%04x host=0x%02x seq=0x%02x flags=0x%08x\n", + hdr->type, hdr->host, hdr->seq, hdr->flags); +} + +/** + * ti_sci_rx_callback() - mailbox client callback for receive messages + * @cl: client pointer + * @m: mailbox message + * + * Processes one received message to appropriate transfer information and + * signals completion of the transfer. + * + * NOTE: This function will be invoked in IRQ context, hence should be + * as optimal as possible. + */ +static void ti_sci_rx_callback(struct mbox_client *cl, void *m) +{ + struct ti_sci_info *info = cl_to_ti_sci_info(cl); + struct device *dev = info->dev; + struct ti_sci_xfers_info *minfo = &info->minfo; + struct ti_msgmgr_message *mbox_msg = m; + struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)mbox_msg->buf; + struct ti_sci_xfer *xfer; + u8 xfer_id; + + xfer_id = hdr->seq; + + /* + * Are we even expecting this? + * NOTE: barriers were implicit in locks used for modifying the bitmap + */ + if (!test_bit(xfer_id, minfo->xfer_alloc_table)) { + dev_err(dev, "Message for %d is not expected!\n", xfer_id); + return; + } + + xfer = &minfo->xfer_block[xfer_id]; + + /* Is the message of valid length? */ + if (mbox_msg->len > info->desc->max_msg_size) { + dev_err(dev, "Unable to handle %d xfer(max %d)\n", + mbox_msg->len, info->desc->max_msg_size); + ti_sci_dump_header_dbg(dev, hdr); + return; + } + if (mbox_msg->len < xfer->rx_len) { + dev_err(dev, "Recv xfer %d < expected %d length\n", + mbox_msg->len, xfer->rx_len); + ti_sci_dump_header_dbg(dev, hdr); + return; + } + + ti_sci_dump_header_dbg(dev, hdr); + /* Take a copy to the rx buffer.. */ + memcpy(xfer->xfer_buf, mbox_msg->buf, xfer->rx_len); + complete(&xfer->done); +} + +/** + * ti_sci_get_one_xfer() - Allocate one message + * @info: Pointer to SCI entity information + * @msg_type: Message type + * @msg_flags: Flag to set for the message + * @tx_message_size: transmit message size + * @rx_message_size: receive message size + * + * Helper function which is used by various command functions that are + * exposed to clients of this driver for allocating a message traffic event. + * + * This function can sleep depending on pending requests already in the system + * for the SCI entity. Further, this also holds a spinlock to maintain integrity + * of internal data structures. + * + * Return: 0 if all went fine, else corresponding error. + */ +static struct ti_sci_xfer *ti_sci_get_one_xfer(struct ti_sci_info *info, + u16 msg_type, u32 msg_flags, + size_t tx_message_size, + size_t rx_message_size) +{ + struct ti_sci_xfers_info *minfo = &info->minfo; + struct ti_sci_xfer *xfer; + struct ti_sci_msg_hdr *hdr; + unsigned long flags; + unsigned long bit_pos; + u8 xfer_id; + int ret; + int timeout; + + /* Ensure we have sane transfer sizes */ + if (rx_message_size > info->desc->max_msg_size || + tx_message_size > info->desc->max_msg_size || + rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr)) + return ERR_PTR(-ERANGE); + + /* + * Ensure we have only controlled number of pending messages. + * Ideally, we might just have to wait a single message, be + * conservative and wait 5 times that.. + */ + timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms) * 5; + ret = down_timeout(&minfo->sem_xfer_count, timeout); + if (ret < 0) + return ERR_PTR(ret); + + /* Keep the locked section as small as possible */ + spin_lock_irqsave(&minfo->xfer_lock, flags); + bit_pos = find_first_zero_bit(minfo->xfer_alloc_table, + info->desc->max_msgs); + set_bit(bit_pos, minfo->xfer_alloc_table); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + /* + * We already ensured in probe that we can have max messages that can + * fit in hdr.seq - NOTE: this improves access latencies + * to predictable O(1) access, BUT, it opens us to risk if + * remote misbehaves with corrupted message sequence responses. + * If that happens, we are going to be messed up anyways.. + */ + xfer_id = (u8)bit_pos; + + xfer = &minfo->xfer_block[xfer_id]; + + hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; + xfer->tx_message.len = tx_message_size; + xfer->rx_len = (u8)rx_message_size; + + reinit_completion(&xfer->done); + + hdr->seq = xfer_id; + hdr->type = msg_type; + hdr->host = info->desc->host_id; + hdr->flags = msg_flags; + + return xfer; +} + +/** + * ti_sci_put_one_xfer() - Release a message + * @minfo: transfer info pointer + * @xfer: message that was reserved by ti_sci_get_one_xfer + * + * This holds a spinlock to maintain integrity of internal data structures. + */ +static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo, + struct ti_sci_xfer *xfer) +{ + unsigned long flags; + struct ti_sci_msg_hdr *hdr; + u8 xfer_id; + + hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf; + xfer_id = hdr->seq; + + /* + * Keep the locked section as small as possible + * NOTE: we might escape with smp_mb and no lock here.. + * but just be conservative and symmetric. + */ + spin_lock_irqsave(&minfo->xfer_lock, flags); + clear_bit(xfer_id, minfo->xfer_alloc_table); + spin_unlock_irqrestore(&minfo->xfer_lock, flags); + + /* Increment the count for the next user to get through */ + up(&minfo->sem_xfer_count); +} + +/** + * ti_sci_do_xfer() - Do one transfer + * @info: Pointer to SCI entity information + * @xfer: Transfer to initiate and wait for response + * + * Return: -ETIMEDOUT in case of no response, if transmit error, + * return corresponding error, else if all goes well, + * return 0. + */ +static inline int ti_sci_do_xfer(struct ti_sci_info *info, + struct ti_sci_xfer *xfer) +{ + int ret; + int timeout; + struct device *dev = info->dev; + + ret = mbox_send_message(info->chan_tx, &xfer->tx_message); + if (ret < 0) + return ret; + + ret = 0; + + /* And we wait for the response. */ + timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); + if (!wait_for_completion_timeout(&xfer->done, timeout)) { + dev_err(dev, "Mbox timedout in resp(caller: %pF)\n", + (void *)_RET_IP_); + ret = -ETIMEDOUT; + } + /* + * NOTE: we might prefer not to need the mailbox ticker to manage the + * transfer queueing since the protocol layer queues things by itself. + * Unfortunately, we have to kick the mailbox framework after we have + * received our message. + */ + mbox_client_txdone(info->chan_tx, ret); + + return ret; +} + +/** + * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity + * @info: Pointer to SCI entity information + * + * Updates the SCI information in the internal data structure. + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_get_revision(struct ti_sci_info *info) +{ + struct device *dev = info->dev; + struct ti_sci_handle *handle = &info->handle; + struct ti_sci_version_info *ver = &handle->version; + struct ti_sci_msg_resp_version *rev_info; + struct ti_sci_xfer *xfer; + int ret; + + /* No need to setup flags since it is expected to respond */ + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION, + 0x0, sizeof(struct ti_sci_msg_hdr), + sizeof(*rev_info)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + + rev_info = (struct ti_sci_msg_resp_version *)xfer->xfer_buf; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + ver->abi_major = rev_info->abi_major; + ver->abi_minor = rev_info->abi_minor; + ver->firmware_revision = rev_info->firmware_revision; + strncpy(ver->firmware_description, rev_info->firmware_description, + sizeof(ver->firmware_description)); + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + return ret; +} + +/** + * ti_sci_is_response_ack() - Generic ACK/NACK message checkup + * @r: pointer to response buffer + * + * Return: true if the response was an ACK, else returns false. + */ +static inline bool ti_sci_is_response_ack(void *r) +{ + struct ti_sci_msg_hdr *hdr = r; + + return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false; +} + +/** + * ti_sci_set_device_state() - Set device state helper + * @handle: pointer to TI SCI handle + * @id: Device identifier + * @flags: flags to setup for the device + * @state: State to move the device to + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_set_device_state(const struct ti_sci_handle *handle, + u32 id, u32 flags, u8 state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_set_device_state *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE, + flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_set_device_state *)xfer->xfer_buf; + req->id = id; + req->state = state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_get_device_state() - Get device state helper + * @handle: Handle to the device + * @id: Device Identifier + * @clcnt: Pointer to Context Loss Count + * @resets: pointer to resets + * @p_state: pointer to p_state + * @c_state: pointer to c_state + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_get_device_state(const struct ti_sci_handle *handle, + u32 id, u32 *clcnt, u32 *resets, + u8 *p_state, u8 *c_state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_get_device_state *req; + struct ti_sci_msg_resp_get_device_state *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + if (!clcnt && !resets && !p_state && !c_state) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + /* Response is expected, so need of any flags */ + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, + 0, sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_get_device_state *)xfer->xfer_buf; + req->id = id; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_get_device_state *)xfer->xfer_buf; + if (!ti_sci_is_response_ack(resp)) { + ret = -ENODEV; + goto fail; + } + + if (clcnt) + *clcnt = resp->context_loss_count; + if (resets) + *resets = resp->resets; + if (p_state) + *p_state = resp->programmed_state; + if (c_state) + *c_state = resp->current_state; +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_get_device() - command to request for device managed by TISCI + * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * @id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + * + * NOTE: The request is for exclusive access for the processor. + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id) +{ + return ti_sci_set_device_state(handle, id, + MSG_FLAG_DEVICE_EXCLUSIVE, + MSG_DEVICE_SW_STATE_ON); +} + +/** + * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI + * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * @id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id) +{ + return ti_sci_set_device_state(handle, id, + MSG_FLAG_DEVICE_EXCLUSIVE, + MSG_DEVICE_SW_STATE_RETENTION); +} + +/** + * ti_sci_cmd_put_device() - command to release a device managed by TISCI + * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * @id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id) +{ + return ti_sci_set_device_state(handle, id, + 0, MSG_DEVICE_SW_STATE_AUTO_OFF); +} + +/** + * ti_sci_cmd_dev_is_valid() - Is the device valid + * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * @id: Device Identifier + * + * Return: 0 if all went fine and the device ID is valid, else return + * appropriate error. + */ +static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id) +{ + u8 unused; + + /* check the device state which will also tell us if the ID is valid */ + return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused); +} + +/** + * ti_sci_cmd_dev_get_clcnt() - Get context loss counter + * @handle: Pointer to TISCI handle + * @id: Device Identifier + * @count: Pointer to Context Loss counter to populate + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id, + u32 *count) +{ + return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL); +} + +/** + * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle + * @handle: Pointer to TISCI handle + * @id: Device Identifier + * @r_state: true if requested to be idle + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id, + bool *r_state) +{ + int ret; + u8 state; + + if (!r_state) + return -EINVAL; + + ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL); + if (ret) + return ret; + + *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION); + + return 0; +} + +/** + * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped + * @handle: Pointer to TISCI handle + * @id: Device Identifier + * @r_state: true if requested to be stopped + * @curr_state: true if currently stopped. + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id, + bool *r_state, bool *curr_state) +{ + int ret; + u8 p_state, c_state; + + if (!r_state && !curr_state) + return -EINVAL; + + ret = + ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); + if (ret) + return ret; + + if (r_state) + *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF); + if (curr_state) + *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF); + + return 0; +} + +/** + * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON + * @handle: Pointer to TISCI handle + * @id: Device Identifier + * @r_state: true if requested to be ON + * @curr_state: true if currently ON and active + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id, + bool *r_state, bool *curr_state) +{ + int ret; + u8 p_state, c_state; + + if (!r_state && !curr_state) + return -EINVAL; + + ret = + ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state); + if (ret) + return ret; + + if (r_state) + *r_state = (p_state == MSG_DEVICE_SW_STATE_ON); + if (curr_state) + *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON); + + return 0; +} + +/** + * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning + * @handle: Pointer to TISCI handle + * @id: Device Identifier + * @curr_state: true if currently transitioning. + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id, + bool *curr_state) +{ + int ret; + u8 state; + + if (!curr_state) + return -EINVAL; + + ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state); + if (ret) + return ret; + + *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS); + + return 0; +} + +/** + * ti_sci_cmd_set_device_resets() - command to set resets for device managed + * by TISCI + * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * @id: Device Identifier + * @reset_state: Device specific reset bit field + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle, + u32 id, u32 reset_state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_set_device_resets *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_set_device_resets *)xfer->xfer_buf; + req->id = id; + req->resets = reset_state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_get_device_resets() - Get reset state for device managed + * by TISCI + * @handle: Pointer to TISCI handle + * @id: Device Identifier + * @reset_state: Pointer to reset state to populate + * + * Return: 0 if all went fine, else return appropriate error. + */ +static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle, + u32 id, u32 *reset_state) +{ + return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL, + NULL); +} + +/** + * ti_sci_set_clock_state() - Set clock state helper + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @flags: Header flags as needed + * @state: State to request for the clock. + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_set_clock_state(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id, + u32 flags, u8 state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_set_clock_state *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE, + flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_set_clock_state *)xfer->xfer_buf; + req->dev_id = dev_id; + req->clk_id = clk_id; + req->request_state = state; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_get_clock_state() - Get clock state helper + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @programmed_state: State requested for clock to move to + * @current_state: State that the clock is currently in + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id, + u8 *programmed_state, u8 *current_state) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_get_clock_state *req; + struct ti_sci_msg_resp_get_clock_state *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + if (!programmed_state && !current_state) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_get_clock_state *)xfer->xfer_buf; + req->dev_id = dev_id; + req->clk_id = clk_id; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) { + ret = -ENODEV; + goto fail; + } + + if (programmed_state) + *programmed_state = resp->programmed_state; + if (current_state) + *current_state = resp->current_state; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false' + * @can_change_freq: 'true' if frequency change is desired, else 'false' + * @enable_input_term: 'true' if input termination is desired, else 'false' + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id, + u8 clk_id, bool needs_ssc, bool can_change_freq, + bool enable_input_term) +{ + u32 flags = 0; + + flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0; + flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0; + flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0; + + return ti_sci_set_clock_state(handle, dev_id, clk_id, flags, + MSG_CLOCK_SW_STATE_REQ); +} + +/** + * ti_sci_cmd_idle_clock() - Idle a clock which is in our control + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * + * NOTE: This clock must have been requested by get_clock previously. + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id) +{ + return ti_sci_set_clock_state(handle, dev_id, clk_id, 0, + MSG_CLOCK_SW_STATE_UNREQ); +} + +/** + * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * + * NOTE: This clock must have been requested by get_clock previously. + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id) +{ + return ti_sci_set_clock_state(handle, dev_id, clk_id, 0, + MSG_CLOCK_SW_STATE_AUTO); +} + +/** + * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @req_state: state indicating if the clock is auto managed + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id, bool *req_state) +{ + u8 state = 0; + int ret; + + if (!req_state) + return -EINVAL; + + ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL); + if (ret) + return ret; + + *req_state = (state == MSG_CLOCK_SW_STATE_AUTO); + return 0; +} + +/** + * ti_sci_cmd_clk_is_on() - Is the clock ON + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @req_state: state indicating if the clock is managed by us and enabled + * @curr_state: state indicating if the clock is ready for operation + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id, + u8 clk_id, bool *req_state, bool *curr_state) +{ + u8 c_state = 0, r_state = 0; + int ret; + + if (!req_state && !curr_state) + return -EINVAL; + + ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, + &r_state, &c_state); + if (ret) + return ret; + + if (req_state) + *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ); + if (curr_state) + *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY); + return 0; +} + +/** + * ti_sci_cmd_clk_is_off() - Is the clock OFF + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @req_state: state indicating if the clock is managed by us and disabled + * @curr_state: state indicating if the clock is NOT ready for operation + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id, + u8 clk_id, bool *req_state, bool *curr_state) +{ + u8 c_state = 0, r_state = 0; + int ret; + + if (!req_state && !curr_state) + return -EINVAL; + + ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, + &r_state, &c_state); + if (ret) + return ret; + + if (req_state) + *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ); + if (curr_state) + *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY); + return 0; +} + +/** + * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @parent_id: Parent clock identifier to set + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id, u8 parent_id) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_set_clock_parent *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_set_clock_parent *)xfer->xfer_buf; + req->dev_id = dev_id; + req->clk_id = clk_id; + req->parent_id = parent_id; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_clk_get_parent() - Get current parent clock source + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @parent_id: Current clock parent + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id, u8 *parent_id) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_get_clock_parent *req; + struct ti_sci_msg_resp_get_clock_parent *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle || !parent_id) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_get_clock_parent *)xfer->xfer_buf; + req->dev_id = dev_id; + req->clk_id = clk_id; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) + ret = -ENODEV; + else + *parent_id = resp->parent_id; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @num_parents: Returns he number of parents to the current clock. + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id, + u8 *num_parents) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_get_clock_num_parents *req; + struct ti_sci_msg_resp_get_clock_num_parents *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle || !num_parents) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_get_clock_num_parents *)xfer->xfer_buf; + req->dev_id = dev_id; + req->clk_id = clk_id; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_get_clock_num_parents *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) + ret = -ENODEV; + else + *num_parents = resp->num_parents; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @min_freq: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @target_freq: The target clock frequency in Hz. A frequency will be + * processed as close to this target frequency as possible. + * @max_freq: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @match_freq: Frequency match in Hz response. + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id, u64 min_freq, + u64 target_freq, u64 max_freq, + u64 *match_freq) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_query_clock_freq *req; + struct ti_sci_msg_resp_query_clock_freq *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle || !match_freq) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_query_clock_freq *)xfer->xfer_buf; + req->dev_id = dev_id; + req->clk_id = clk_id; + req->min_freq_hz = min_freq; + req->target_freq_hz = target_freq; + req->max_freq_hz = max_freq; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) + ret = -ENODEV; + else + *match_freq = resp->freq_hz; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_clk_set_freq() - Set a frequency for clock + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @min_freq: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @target_freq: The target clock frequency in Hz. A frequency will be + * processed as close to this target frequency as possible. + * @max_freq: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id, u64 min_freq, + u64 target_freq, u64 max_freq) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_set_clock_freq *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_set_clock_freq *)xfer->xfer_buf; + req->dev_id = dev_id; + req->clk_id = clk_id; + req->min_freq_hz = min_freq; + req->target_freq_hz = target_freq; + req->max_freq_hz = max_freq; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/** + * ti_sci_cmd_clk_get_freq() - Get current frequency + * @handle: pointer to TI SCI handle + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @freq: Currently frequency in Hz + * + * Return: 0 if all went well, else returns appropriate error value. + */ +static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle, + u32 dev_id, u8 clk_id, u64 *freq) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_get_clock_freq *req; + struct ti_sci_msg_resp_get_clock_freq *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle || !freq) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_get_clock_freq *)xfer->xfer_buf; + req->dev_id = dev_id; + req->clk_id = clk_id; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) + ret = -ENODEV; + else + *freq = resp->freq_hz; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle) +{ + struct ti_sci_info *info; + struct ti_sci_msg_req_reboot *req; + struct ti_sci_msg_hdr *resp; + struct ti_sci_xfer *xfer; + struct device *dev; + int ret = 0; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + dev = info->dev; + + xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SYS_RESET, + TI_SCI_FLAG_REQ_ACK_ON_PROCESSED, + sizeof(*req), sizeof(*resp)); + if (IS_ERR(xfer)) { + ret = PTR_ERR(xfer); + dev_err(dev, "Message alloc failed(%d)\n", ret); + return ret; + } + req = (struct ti_sci_msg_req_reboot *)xfer->xfer_buf; + + ret = ti_sci_do_xfer(info, xfer); + if (ret) { + dev_err(dev, "Mbox send fail %d\n", ret); + goto fail; + } + + resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf; + + if (!ti_sci_is_response_ack(resp)) + ret = -ENODEV; + else + ret = 0; + +fail: + ti_sci_put_one_xfer(&info->minfo, xfer); + + return ret; +} + +/* + * ti_sci_setup_ops() - Setup the operations structures + * @info: pointer to TISCI pointer + */ +static void ti_sci_setup_ops(struct ti_sci_info *info) +{ + struct ti_sci_ops *ops = &info->handle.ops; + struct ti_sci_core_ops *core_ops = &ops->core_ops; + struct ti_sci_dev_ops *dops = &ops->dev_ops; + struct ti_sci_clk_ops *cops = &ops->clk_ops; + + core_ops->reboot_device = ti_sci_cmd_core_reboot; + + dops->get_device = ti_sci_cmd_get_device; + dops->idle_device = ti_sci_cmd_idle_device; + dops->put_device = ti_sci_cmd_put_device; + + dops->is_valid = ti_sci_cmd_dev_is_valid; + dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt; + dops->is_idle = ti_sci_cmd_dev_is_idle; + dops->is_stop = ti_sci_cmd_dev_is_stop; + dops->is_on = ti_sci_cmd_dev_is_on; + dops->is_transitioning = ti_sci_cmd_dev_is_trans; + dops->set_device_resets = ti_sci_cmd_set_device_resets; + dops->get_device_resets = ti_sci_cmd_get_device_resets; + + cops->get_clock = ti_sci_cmd_get_clock; + cops->idle_clock = ti_sci_cmd_idle_clock; + cops->put_clock = ti_sci_cmd_put_clock; + cops->is_auto = ti_sci_cmd_clk_is_auto; + cops->is_on = ti_sci_cmd_clk_is_on; + cops->is_off = ti_sci_cmd_clk_is_off; + + cops->set_parent = ti_sci_cmd_clk_set_parent; + cops->get_parent = ti_sci_cmd_clk_get_parent; + cops->get_num_parents = ti_sci_cmd_clk_get_num_parents; + + cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq; + cops->set_freq = ti_sci_cmd_clk_set_freq; + cops->get_freq = ti_sci_cmd_clk_get_freq; +} + +/** + * ti_sci_get_handle() - Get the TI SCI handle for a device + * @dev: Pointer to device for which we want SCI handle + * + * NOTE: The function does not track individual clients of the framework + * and is expected to be maintained by caller of TI SCI protocol library. + * ti_sci_put_handle must be balanced with successful ti_sci_get_handle + * Return: pointer to handle if successful, else: + * -EPROBE_DEFER if the instance is not ready + * -ENODEV if the required node handler is missing + * -EINVAL if invalid conditions are encountered. + */ +const struct ti_sci_handle *ti_sci_get_handle(struct device *dev) +{ + struct device_node *ti_sci_np; + struct list_head *p; + struct ti_sci_handle *handle = NULL; + struct ti_sci_info *info; + + if (!dev) { + pr_err("I need a device pointer\n"); + return ERR_PTR(-EINVAL); + } + ti_sci_np = of_get_parent(dev->of_node); + if (!ti_sci_np) { + dev_err(dev, "No OF information\n"); + return ERR_PTR(-EINVAL); + } + + mutex_lock(&ti_sci_list_mutex); + list_for_each(p, &ti_sci_list) { + info = list_entry(p, struct ti_sci_info, node); + if (ti_sci_np == info->dev->of_node) { + handle = &info->handle; + info->users++; + break; + } + } + mutex_unlock(&ti_sci_list_mutex); + of_node_put(ti_sci_np); + + if (!handle) + return ERR_PTR(-EPROBE_DEFER); + + return handle; +} +EXPORT_SYMBOL_GPL(ti_sci_get_handle); + +/** + * ti_sci_put_handle() - Release the handle acquired by ti_sci_get_handle + * @handle: Handle acquired by ti_sci_get_handle + * + * NOTE: The function does not track individual clients of the framework + * and is expected to be maintained by caller of TI SCI protocol library. + * ti_sci_put_handle must be balanced with successful ti_sci_get_handle + * + * Return: 0 is successfully released + * if an error pointer was passed, it returns the error value back, + * if null was passed, it returns -EINVAL; + */ +int ti_sci_put_handle(const struct ti_sci_handle *handle) +{ + struct ti_sci_info *info; + + if (IS_ERR(handle)) + return PTR_ERR(handle); + if (!handle) + return -EINVAL; + + info = handle_to_ti_sci_info(handle); + mutex_lock(&ti_sci_list_mutex); + if (!WARN_ON(!info->users)) + info->users--; + mutex_unlock(&ti_sci_list_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(ti_sci_put_handle); + +static void devm_ti_sci_release(struct device *dev, void *res) +{ + const struct ti_sci_handle **ptr = res; + const struct ti_sci_handle *handle = *ptr; + int ret; + + ret = ti_sci_put_handle(handle); + if (ret) + dev_err(dev, "failed to put handle %d\n", ret); +} + +/** + * devm_ti_sci_get_handle() - Managed get handle + * @dev: device for which we want SCI handle for. + * + * NOTE: This releases the handle once the device resources are + * no longer needed. MUST NOT BE released with ti_sci_put_handle. + * The function does not track individual clients of the framework + * and is expected to be maintained by caller of TI SCI protocol library. + * + * Return: 0 if all went fine, else corresponding error. + */ +const struct ti_sci_handle *devm_ti_sci_get_handle(struct device *dev) +{ + const struct ti_sci_handle **ptr; + const struct ti_sci_handle *handle; + + ptr = devres_alloc(devm_ti_sci_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + handle = ti_sci_get_handle(dev); + + if (!IS_ERR(handle)) { + *ptr = handle; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return handle; +} +EXPORT_SYMBOL_GPL(devm_ti_sci_get_handle); + +static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, + void *cmd) +{ + struct ti_sci_info *info = reboot_to_ti_sci_info(nb); + const struct ti_sci_handle *handle = &info->handle; + + ti_sci_cmd_core_reboot(handle); + + /* call fail OR pass, we should not be here in the first place */ + return NOTIFY_BAD; +} + +/* Description for K2G */ +static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = { + .host_id = 2, + /* Conservative duration */ + .max_rx_timeout_ms = 1000, + /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */ + .max_msgs = 20, + .max_msg_size = 64, +}; + +static const struct of_device_id ti_sci_of_match[] = { + {.compatible = "ti,k2g-sci", .data = &ti_sci_pmmc_k2g_desc}, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, ti_sci_of_match); + +static int ti_sci_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *of_id; + const struct ti_sci_desc *desc; + struct ti_sci_xfer *xfer; + struct ti_sci_info *info = NULL; + struct ti_sci_xfers_info *minfo; + struct mbox_client *cl; + int ret = -EINVAL; + int i; + int reboot = 0; + + of_id = of_match_device(ti_sci_of_match, dev); + if (!of_id) { + dev_err(dev, "OF data missing\n"); + return -EINVAL; + } + desc = of_id->data; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->dev = dev; + info->desc = desc; + reboot = of_property_read_bool(dev->of_node, + "ti,system-reboot-controller"); + INIT_LIST_HEAD(&info->node); + minfo = &info->minfo; + + /* + * Pre-allocate messages + * NEVER allocate more than what we can indicate in hdr.seq + * if we have data description bug, force a fix.. + */ + if (WARN_ON(desc->max_msgs >= + 1 << 8 * sizeof(((struct ti_sci_msg_hdr *)0)->seq))) + return -EINVAL; + + minfo->xfer_block = devm_kcalloc(dev, + desc->max_msgs, + sizeof(*minfo->xfer_block), + GFP_KERNEL); + if (!minfo->xfer_block) + return -ENOMEM; + + minfo->xfer_alloc_table = devm_kzalloc(dev, + BITS_TO_LONGS(desc->max_msgs) + * sizeof(unsigned long), + GFP_KERNEL); + if (!minfo->xfer_alloc_table) + return -ENOMEM; + bitmap_zero(minfo->xfer_alloc_table, desc->max_msgs); + + /* Pre-initialize the buffer pointer to pre-allocated buffers */ + for (i = 0, xfer = minfo->xfer_block; i < desc->max_msgs; i++, xfer++) { + xfer->xfer_buf = devm_kcalloc(dev, 1, desc->max_msg_size, + GFP_KERNEL); + if (!xfer->xfer_buf) + return -ENOMEM; + + xfer->tx_message.buf = xfer->xfer_buf; + init_completion(&xfer->done); + } + + ret = ti_sci_debugfs_create(pdev, info); + if (ret) + dev_warn(dev, "Failed to create debug file\n"); + + platform_set_drvdata(pdev, info); + + cl = &info->cl; + cl->dev = dev; + cl->tx_block = false; + cl->rx_callback = ti_sci_rx_callback; + cl->knows_txdone = true; + + spin_lock_init(&minfo->xfer_lock); + sema_init(&minfo->sem_xfer_count, desc->max_msgs); + + info->chan_rx = mbox_request_channel_byname(cl, "rx"); + if (IS_ERR(info->chan_rx)) { + ret = PTR_ERR(info->chan_rx); + goto out; + } + + info->chan_tx = mbox_request_channel_byname(cl, "tx"); + if (IS_ERR(info->chan_tx)) { + ret = PTR_ERR(info->chan_tx); + goto out; + } + ret = ti_sci_cmd_get_revision(info); + if (ret) { + dev_err(dev, "Unable to communicate with TISCI(%d)\n", ret); + goto out; + } + + ti_sci_setup_ops(info); + + if (reboot) { + info->nb.notifier_call = tisci_reboot_handler; + info->nb.priority = 128; + + ret = register_restart_handler(&info->nb); + if (ret) { + dev_err(dev, "reboot registration fail(%d)\n", ret); + return ret; + } + } + + dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n", + info->handle.version.abi_major, info->handle.version.abi_minor, + info->handle.version.firmware_revision, + info->handle.version.firmware_description); + + mutex_lock(&ti_sci_list_mutex); + list_add_tail(&info->node, &ti_sci_list); + mutex_unlock(&ti_sci_list_mutex); + + return of_platform_populate(dev->of_node, NULL, NULL, dev); +out: + if (!IS_ERR(info->chan_tx)) + mbox_free_channel(info->chan_tx); + if (!IS_ERR(info->chan_rx)) + mbox_free_channel(info->chan_rx); + debugfs_remove(info->d); + return ret; +} + +static int ti_sci_remove(struct platform_device *pdev) +{ + struct ti_sci_info *info; + struct device *dev = &pdev->dev; + int ret = 0; + + of_platform_depopulate(dev); + + info = platform_get_drvdata(pdev); + + if (info->nb.notifier_call) + unregister_restart_handler(&info->nb); + + mutex_lock(&ti_sci_list_mutex); + if (info->users) + ret = -EBUSY; + else + list_del(&info->node); + mutex_unlock(&ti_sci_list_mutex); + + if (!ret) { + ti_sci_debugfs_destroy(pdev, info); + + /* Safe to free channels since no more users */ + mbox_free_channel(info->chan_tx); + mbox_free_channel(info->chan_rx); + } + + return ret; +} + +static struct platform_driver ti_sci_driver = { + .probe = ti_sci_probe, + .remove = ti_sci_remove, + .driver = { + .name = "ti-sci", + .of_match_table = of_match_ptr(ti_sci_of_match), + }, +}; +module_platform_driver(ti_sci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI System Control Interface(SCI) driver"); +MODULE_AUTHOR("Nishanth Menon"); +MODULE_ALIAS("platform:ti-sci"); diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h new file mode 100644 index 000000000000..9b611e9e6f6d --- /dev/null +++ b/drivers/firmware/ti_sci.h @@ -0,0 +1,492 @@ +/* + * Texas Instruments System Control Interface (TISCI) Protocol + * + * Communication protocol with TI SCI hardware + * The system works in a message response protocol + * See: http://processors.wiki.ti.com/index.php/TISCI for details + * + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the + * distribution. + * + * Neither the name of Texas Instruments Incorporated nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __TI_SCI_H +#define __TI_SCI_H + +/* Generic Messages */ +#define TI_SCI_MSG_ENABLE_WDT 0x0000 +#define TI_SCI_MSG_WAKE_RESET 0x0001 +#define TI_SCI_MSG_VERSION 0x0002 +#define TI_SCI_MSG_WAKE_REASON 0x0003 +#define TI_SCI_MSG_GOODBYE 0x0004 +#define TI_SCI_MSG_SYS_RESET 0x0005 + +/* Device requests */ +#define TI_SCI_MSG_SET_DEVICE_STATE 0x0200 +#define TI_SCI_MSG_GET_DEVICE_STATE 0x0201 +#define TI_SCI_MSG_SET_DEVICE_RESETS 0x0202 + +/* Clock requests */ +#define TI_SCI_MSG_SET_CLOCK_STATE 0x0100 +#define TI_SCI_MSG_GET_CLOCK_STATE 0x0101 +#define TI_SCI_MSG_SET_CLOCK_PARENT 0x0102 +#define TI_SCI_MSG_GET_CLOCK_PARENT 0x0103 +#define TI_SCI_MSG_GET_NUM_CLOCK_PARENTS 0x0104 +#define TI_SCI_MSG_SET_CLOCK_FREQ 0x010c +#define TI_SCI_MSG_QUERY_CLOCK_FREQ 0x010d +#define TI_SCI_MSG_GET_CLOCK_FREQ 0x010e + +/** + * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses + * @type: Type of messages: One of TI_SCI_MSG* values + * @host: Host of the message + * @seq: Message identifier indicating a transfer sequence + * @flags: Flag for the message + */ +struct ti_sci_msg_hdr { + u16 type; + u8 host; + u8 seq; +#define TI_SCI_MSG_FLAG(val) (1 << (val)) +#define TI_SCI_FLAG_REQ_GENERIC_NORESPONSE 0x0 +#define TI_SCI_FLAG_REQ_ACK_ON_RECEIVED TI_SCI_MSG_FLAG(0) +#define TI_SCI_FLAG_REQ_ACK_ON_PROCESSED TI_SCI_MSG_FLAG(1) +#define TI_SCI_FLAG_RESP_GENERIC_NACK 0x0 +#define TI_SCI_FLAG_RESP_GENERIC_ACK TI_SCI_MSG_FLAG(1) + /* Additional Flags */ + u32 flags; +} __packed; + +/** + * struct ti_sci_msg_resp_version - Response for a message + * @hdr: Generic header + * @firmware_description: String describing the firmware + * @firmware_revision: Firmware revision + * @abi_major: Major version of the ABI that firmware supports + * @abi_minor: Minor version of the ABI that firmware supports + * + * In general, ABI version changes follow the rule that minor version increments + * are backward compatible. Major revision changes in ABI may not be + * backward compatible. + * + * Response to a generic message with message type TI_SCI_MSG_VERSION + */ +struct ti_sci_msg_resp_version { + struct ti_sci_msg_hdr hdr; + char firmware_description[32]; + u16 firmware_revision; + u8 abi_major; + u8 abi_minor; +} __packed; + +/** + * struct ti_sci_msg_req_reboot - Reboot the SoC + * @hdr: Generic Header + * + * Request type is TI_SCI_MSG_SYS_RESET, responded with a generic + * ACK/NACK message. + */ +struct ti_sci_msg_req_reboot { + struct ti_sci_msg_hdr hdr; +} __packed; + +/** + * struct ti_sci_msg_req_set_device_state - Set the desired state of the device + * @hdr: Generic header + * @id: Indicates which device to modify + * @reserved: Reserved space in message, must be 0 for backward compatibility + * @state: The desired state of the device. + * + * Certain flags can also be set to alter the device state: + * + MSG_FLAG_DEVICE_WAKE_ENABLED - Configure the device to be a wake source. + * The meaning of this flag will vary slightly from device to device and from + * SoC to SoC but it generally allows the device to wake the SoC out of deep + * suspend states. + * + MSG_FLAG_DEVICE_RESET_ISO - Enable reset isolation for this device. + * + MSG_FLAG_DEVICE_EXCLUSIVE - Claim this device exclusively. When passed + * with STATE_RETENTION or STATE_ON, it will claim the device exclusively. + * If another host already has this device set to STATE_RETENTION or STATE_ON, + * the message will fail. Once successful, other hosts attempting to set + * STATE_RETENTION or STATE_ON will fail. + * + * Request type is TI_SCI_MSG_SET_DEVICE_STATE, responded with a generic + * ACK/NACK message. + */ +struct ti_sci_msg_req_set_device_state { + /* Additional hdr->flags options */ +#define MSG_FLAG_DEVICE_WAKE_ENABLED TI_SCI_MSG_FLAG(8) +#define MSG_FLAG_DEVICE_RESET_ISO TI_SCI_MSG_FLAG(9) +#define MSG_FLAG_DEVICE_EXCLUSIVE TI_SCI_MSG_FLAG(10) + struct ti_sci_msg_hdr hdr; + u32 id; + u32 reserved; + +#define MSG_DEVICE_SW_STATE_AUTO_OFF 0 +#define MSG_DEVICE_SW_STATE_RETENTION 1 +#define MSG_DEVICE_SW_STATE_ON 2 + u8 state; +} __packed; + +/** + * struct ti_sci_msg_req_get_device_state - Request to get device. + * @hdr: Generic header + * @id: Device Identifier + * + * Request type is TI_SCI_MSG_GET_DEVICE_STATE, responded device state + * information + */ +struct ti_sci_msg_req_get_device_state { + struct ti_sci_msg_hdr hdr; + u32 id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_device_state - Response to get device request. + * @hdr: Generic header + * @context_loss_count: Indicates how many times the device has lost context. A + * driver can use this monotonic counter to determine if the device has + * lost context since the last time this message was exchanged. + * @resets: Programmed state of the reset lines. + * @programmed_state: The state as programmed by set_device. + * - Uses the MSG_DEVICE_SW_* macros + * @current_state: The actual state of the hardware. + * + * Response to request TI_SCI_MSG_GET_DEVICE_STATE. + */ +struct ti_sci_msg_resp_get_device_state { + struct ti_sci_msg_hdr hdr; + u32 context_loss_count; + u32 resets; + u8 programmed_state; +#define MSG_DEVICE_HW_STATE_OFF 0 +#define MSG_DEVICE_HW_STATE_ON 1 +#define MSG_DEVICE_HW_STATE_TRANS 2 + u8 current_state; +} __packed; + +/** + * struct ti_sci_msg_req_set_device_resets - Set the desired resets + * configuration of the device + * @hdr: Generic header + * @id: Indicates which device to modify + * @resets: A bit field of resets for the device. The meaning, behavior, + * and usage of the reset flags are device specific. 0 for a bit + * indicates releasing the reset represented by that bit while 1 + * indicates keeping it held. + * + * Request type is TI_SCI_MSG_SET_DEVICE_RESETS, responded with a generic + * ACK/NACK message. + */ +struct ti_sci_msg_req_set_device_resets { + struct ti_sci_msg_hdr hdr; + u32 id; + u32 resets; +} __packed; + +/** + * struct ti_sci_msg_req_set_clock_state - Request to setup a Clock state + * @hdr: Generic Header, Certain flags can be set specific to the clocks: + * MSG_FLAG_CLOCK_ALLOW_SSC: Allow this clock to be modified + * via spread spectrum clocking. + * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE: Allow this clock's + * frequency to be changed while it is running so long as it + * is within the min/max limits. + * MSG_FLAG_CLOCK_INPUT_TERM: Enable input termination, this + * is only applicable to clock inputs on the SoC pseudo-device. + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @request_state: Request the state for the clock to be set to. + * MSG_CLOCK_SW_STATE_UNREQ: The IP does not require this clock, + * it can be disabled, regardless of the state of the device + * MSG_CLOCK_SW_STATE_AUTO: Allow the System Controller to + * automatically manage the state of this clock. If the device + * is enabled, then the clock is enabled. If the device is set + * to off or retention, then the clock is internally set as not + * being required by the device.(default) + * MSG_CLOCK_SW_STATE_REQ: Configure the clock to be enabled, + * regardless of the state of the device. + * + * Normally, all required clocks are managed by TISCI entity, this is used + * only for specific control *IF* required. Auto managed state is + * MSG_CLOCK_SW_STATE_AUTO, in other states, TISCI entity assume remote + * will explicitly control. + * + * Request type is TI_SCI_MSG_SET_CLOCK_STATE, response is a generic + * ACK or NACK message. + */ +struct ti_sci_msg_req_set_clock_state { + /* Additional hdr->flags options */ +#define MSG_FLAG_CLOCK_ALLOW_SSC TI_SCI_MSG_FLAG(8) +#define MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE TI_SCI_MSG_FLAG(9) +#define MSG_FLAG_CLOCK_INPUT_TERM TI_SCI_MSG_FLAG(10) + struct ti_sci_msg_hdr hdr; + u32 dev_id; + u8 clk_id; +#define MSG_CLOCK_SW_STATE_UNREQ 0 +#define MSG_CLOCK_SW_STATE_AUTO 1 +#define MSG_CLOCK_SW_STATE_REQ 2 + u8 request_state; +} __packed; + +/** + * struct ti_sci_msg_req_get_clock_state - Request for clock state + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to get state of. + * + * Request type is TI_SCI_MSG_GET_CLOCK_STATE, response is state + * of the clock + */ +struct ti_sci_msg_req_get_clock_state { + struct ti_sci_msg_hdr hdr; + u32 dev_id; + u8 clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_clock_state - Response to get clock state + * @hdr: Generic Header + * @programmed_state: Any programmed state of the clock. This is one of + * MSG_CLOCK_SW_STATE* values. + * @current_state: Current state of the clock. This is one of: + * MSG_CLOCK_HW_STATE_NOT_READY: Clock is not ready + * MSG_CLOCK_HW_STATE_READY: Clock is ready + * + * Response to TI_SCI_MSG_GET_CLOCK_STATE. + */ +struct ti_sci_msg_resp_get_clock_state { + struct ti_sci_msg_hdr hdr; + u8 programmed_state; +#define MSG_CLOCK_HW_STATE_NOT_READY 0 +#define MSG_CLOCK_HW_STATE_READY 1 + u8 current_state; +} __packed; + +/** + * struct ti_sci_msg_req_set_clock_parent - Set the clock parent + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * @parent_id: The new clock parent is selectable by an index via this + * parameter. + * + * Request type is TI_SCI_MSG_SET_CLOCK_PARENT, response is generic + * ACK / NACK message. + */ +struct ti_sci_msg_req_set_clock_parent { + struct ti_sci_msg_hdr hdr; + u32 dev_id; + u8 clk_id; + u8 parent_id; +} __packed; + +/** + * struct ti_sci_msg_req_get_clock_parent - Get the clock parent + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to get the parent for. + * + * Request type is TI_SCI_MSG_GET_CLOCK_PARENT, response is parent information + */ +struct ti_sci_msg_req_get_clock_parent { + struct ti_sci_msg_hdr hdr; + u32 dev_id; + u8 clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_clock_parent - Response with clock parent + * @hdr: Generic Header + * @parent_id: The current clock parent + * + * Response to TI_SCI_MSG_GET_CLOCK_PARENT. + */ +struct ti_sci_msg_resp_get_clock_parent { + struct ti_sci_msg_hdr hdr; + u8 parent_id; +} __packed; + +/** + * struct ti_sci_msg_req_get_clock_num_parents - Request to get clock parents + * @hdr: Generic header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * + * This request provides information about how many clock parent options + * are available for a given clock to a device. This is typically used + * for input clocks. + * + * Request type is TI_SCI_MSG_GET_NUM_CLOCK_PARENTS, response is appropriate + * message, or NACK in case of inability to satisfy request. + */ +struct ti_sci_msg_req_get_clock_num_parents { + struct ti_sci_msg_hdr hdr; + u32 dev_id; + u8 clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_clock_num_parents - Response for get clk parents + * @hdr: Generic header + * @num_parents: Number of clock parents + * + * Response to TI_SCI_MSG_GET_NUM_CLOCK_PARENTS + */ +struct ti_sci_msg_resp_get_clock_num_parents { + struct ti_sci_msg_hdr hdr; + u8 num_parents; +} __packed; + +/** + * struct ti_sci_msg_req_query_clock_freq - Request to query a frequency + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @target_freq_hz: The target clock frequency. A frequency will be found + * as close to this target frequency as possible. + * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @clk_id: Clock identifier for the device for this request. + * + * NOTE: Normally clock frequency management is automatically done by TISCI + * entity. In case of specific requests, TISCI evaluates capability to achieve + * requested frequency within provided range and responds with + * result message. + * + * Request type is TI_SCI_MSG_QUERY_CLOCK_FREQ, response is appropriate message, + * or NACK in case of inability to satisfy request. + */ +struct ti_sci_msg_req_query_clock_freq { + struct ti_sci_msg_hdr hdr; + u32 dev_id; + u64 min_freq_hz; + u64 target_freq_hz; + u64 max_freq_hz; + u8 clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_query_clock_freq - Response to a clock frequency query + * @hdr: Generic Header + * @freq_hz: Frequency that is the best match in Hz. + * + * Response to request type TI_SCI_MSG_QUERY_CLOCK_FREQ. NOTE: if the request + * cannot be satisfied, the message will be of type NACK. + */ +struct ti_sci_msg_resp_query_clock_freq { + struct ti_sci_msg_hdr hdr; + u64 freq_hz; +} __packed; + +/** + * struct ti_sci_msg_req_set_clock_freq - Request to setup a clock frequency + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @min_freq_hz: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @target_freq_hz: The target clock frequency. The clock will be programmed + * at a rate as close to this target frequency as possible. + * @max_freq_hz: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * @clk_id: Clock identifier for the device for this request. + * + * NOTE: Normally clock frequency management is automatically done by TISCI + * entity. In case of specific requests, TISCI evaluates capability to achieve + * requested range and responds with success/failure message. + * + * This sets the desired frequency for a clock within an allowable + * range. This message will fail on an enabled clock unless + * MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE is set for the clock. Additionally, + * if other clocks have their frequency modified due to this message, + * they also must have the MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE or be disabled. + * + * Calling set frequency on a clock input to the SoC pseudo-device will + * inform the PMMC of that clock's frequency. Setting a frequency of + * zero will indicate the clock is disabled. + * + * Calling set frequency on clock outputs from the SoC pseudo-device will + * function similarly to setting the clock frequency on a device. + * + * Request type is TI_SCI_MSG_SET_CLOCK_FREQ, response is a generic ACK/NACK + * message. + */ +struct ti_sci_msg_req_set_clock_freq { + struct ti_sci_msg_hdr hdr; + u32 dev_id; + u64 min_freq_hz; + u64 target_freq_hz; + u64 max_freq_hz; + u8 clk_id; +} __packed; + +/** + * struct ti_sci_msg_req_get_clock_freq - Request to get the clock frequency + * @hdr: Generic Header + * @dev_id: Device identifier this request is for + * @clk_id: Clock identifier for the device for this request. + * + * NOTE: Normally clock frequency management is automatically done by TISCI + * entity. In some cases, clock frequencies are configured by host. + * + * Request type is TI_SCI_MSG_GET_CLOCK_FREQ, responded with clock frequency + * that the clock is currently at. + */ +struct ti_sci_msg_req_get_clock_freq { + struct ti_sci_msg_hdr hdr; + u32 dev_id; + u8 clk_id; +} __packed; + +/** + * struct ti_sci_msg_resp_get_clock_freq - Response of clock frequency request + * @hdr: Generic Header + * @freq_hz: Frequency that the clock is currently on, in Hz. + * + * Response to request type TI_SCI_MSG_GET_CLOCK_FREQ. + */ +struct ti_sci_msg_resp_get_clock_freq { + struct ti_sci_msg_hdr hdr; + u64 freq_hz; +} __packed; + +#endif /* __TI_SCI_H */ |