diff options
Diffstat (limited to 'drivers')
754 files changed, 45066 insertions, 11328 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 020780b6b4d2..f735c4955143 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -72,9 +72,9 @@ obj-$(CONFIG_PARPORT) += parport/ obj-y += base/ block/ misc/ mfd/ nfc/ obj-$(CONFIG_LIBNVDIMM) += nvdimm/ obj-$(CONFIG_DAX) += dax/ -obj-$(CONFIG_CXL_BUS) += cxl/ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ obj-$(CONFIG_NUBUS) += nubus/ +obj-y += cxl/ obj-y += macintosh/ obj-y += scsi/ obj-y += nvme/ diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index db487ff9dd1b..c29e41bfcf35 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -32,7 +32,6 @@ MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI AC Adapter Driver"); MODULE_LICENSE("GPL"); - static int acpi_ac_add(struct acpi_device *device); static int acpi_ac_remove(struct acpi_device *device); static void acpi_ac_notify(struct acpi_device *device, u32 event); @@ -125,6 +124,7 @@ static int get_ac_property(struct power_supply *psy, default: return -EINVAL; } + return 0; } @@ -286,6 +286,7 @@ static int acpi_ac_resume(struct device *dev) return 0; if (old_state != ac->state) kobject_uevent(&ac->charger->dev.kobj, KOBJ_CHANGE); + return 0; } #else @@ -296,7 +297,6 @@ static int acpi_ac_remove(struct acpi_device *device) { struct acpi_ac *ac = NULL; - if (!device || !acpi_driver_data(device)) return -EINVAL; diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 990ff5b0aeb8..e07782b1fbb6 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -1707,24 +1707,23 @@ static int acpi_video_resume(struct notifier_block *nb, int i; switch (val) { - case PM_HIBERNATION_PREPARE: - case PM_SUSPEND_PREPARE: - case PM_RESTORE_PREPARE: - return NOTIFY_DONE; - } - - video = container_of(nb, struct acpi_video_bus, pm_nb); - - dev_info(&video->device->dev, "Restoring backlight state\n"); + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + case PM_POST_RESTORE: + video = container_of(nb, struct acpi_video_bus, pm_nb); + + dev_info(&video->device->dev, "Restoring backlight state\n"); + + for (i = 0; i < video->attached_count; i++) { + video_device = video->attached_array[i].bind_info; + if (video_device && video_device->brightness) + acpi_video_device_lcd_set_level(video_device, + video_device->brightness->curr); + } - for (i = 0; i < video->attached_count; i++) { - video_device = video->attached_array[i].bind_info; - if (video_device && video_device->brightness) - acpi_video_device_lcd_set_level(video_device, - video_device->brightness->curr); + return NOTIFY_OK; } - - return NOTIFY_OK; + return NOTIFY_DONE; } static acpi_status diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index dc208f5f5a1f..306513fec1e1 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -52,7 +52,6 @@ static bool battery_driver_registered; static int battery_bix_broken_package; static int battery_notification_delay_ms; static int battery_ac_is_broken; -static int battery_quirk_notcharging; static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); @@ -216,10 +215,8 @@ static int acpi_battery_get_property(struct power_supply *psy, val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (acpi_battery_is_charged(battery)) val->intval = POWER_SUPPLY_STATUS_FULL; - else if (battery_quirk_notcharging) - val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; else - val->intval = POWER_SUPPLY_STATUS_UNKNOWN; + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = acpi_battery_present(battery); @@ -1105,12 +1102,6 @@ battery_ac_is_broken_quirk(const struct dmi_system_id *d) return 0; } -static int __init battery_quirk_not_charging(const struct dmi_system_id *d) -{ - battery_quirk_notcharging = 1; - return 0; -} - static const struct dmi_system_id bat_dmi_table[] __initconst = { { /* NEC LZ750/LS */ @@ -1140,19 +1131,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = { }, }, { - /* - * On Lenovo ThinkPads the BIOS specification defines - * a state when the bits for charging and discharging - * are both set to 0. That state is "Not Charging". - */ - .callback = battery_quirk_not_charging, - .ident = "Lenovo ThinkPad", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad"), - }, - }, - { /* Microsoft Surface Go 3 */ .callback = battery_notification_delay_quirk, .matches = { diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index b67d2ee77cd1..86fa61a21826 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -443,7 +443,7 @@ static void acpi_bus_osc_negotiate_usb_control(void) } osc_sb_native_usb4_control = - control & ((u32 *)context.ret.pointer)[OSC_CONTROL_DWORD]; + control & acpi_osc_ctx_get_pci_control(&context); acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control); acpi_bus_decode_usb_osc("USB4 _OSC: OS controls", diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 3b299b28a8af..903528f7e187 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -315,7 +315,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd) goto end; } - /* wait for completion and check for PCC errro bit */ + /* wait for completion and check for PCC error bit */ ret = check_pcc_chan(pcc_ss_id, true); if (pcc_ss_data->pcc_mrtt) diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c index c0da24c9f8c3..4919e7abe93f 100644 --- a/drivers/acpi/dptf/dptf_pch_fivr.c +++ b/drivers/acpi/dptf/dptf_pch_fivr.c @@ -151,6 +151,7 @@ static int pch_fivr_remove(struct platform_device *pdev) static const struct acpi_device_id pch_fivr_device_ids[] = { {"INTC1045", 0}, {"INTC1049", 0}, + {"INTC1064", 0}, {"INTC10A3", 0}, {"", 0}, }; diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index 407b89d8a2ce..86561eda939f 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -232,6 +232,8 @@ static const struct acpi_device_id int3407_device_ids[] = { {"INTC1050", 0}, {"INTC1060", 0}, {"INTC1061", 0}, + {"INTC1065", 0}, + {"INTC1066", 0}, {"INTC10A4", 0}, {"INTC10A5", 0}, {"", 0}, diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index 42a556346548..b7113fa92fa6 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c @@ -27,6 +27,7 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = { {"INT3532"}, {"INTC1040"}, {"INTC1041"}, + {"INTC1042"}, {"INTC1043"}, {"INTC1044"}, {"INTC1045"}, @@ -37,6 +38,11 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = { {"INTC1050"}, {"INTC1060"}, {"INTC1061"}, + {"INTC1062"}, + {"INTC1063"}, + {"INTC1064"}, + {"INTC1065"}, + {"INTC1066"}, {"INTC10A0"}, {"INTC10A1"}, {"INTC10A2"}, diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h index 44728529a5b6..e7b4b4e4a55e 100644 --- a/drivers/acpi/fan.h +++ b/drivers/acpi/fan.h @@ -14,6 +14,7 @@ {"INT3404", }, /* Fan */ \ {"INTC1044", }, /* Fan for Tiger Lake generation */ \ {"INTC1048", }, /* Fan for Alder Lake generation */ \ + {"INTC1063", }, /* Fan for Meteor Lake generation */ \ {"INTC10A2", }, /* Fan for Raptor Lake generation */ \ {"PNP0C0B", } /* Generic ACPI fan */ diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index ef104809f27b..8d769114a048 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -79,17 +79,17 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) static int find_child_checks(struct acpi_device *adev, bool check_children) { - bool sta_present = true; unsigned long long sta; acpi_status status; + if (check_children && list_empty(&adev->children)) + return -ENODEV; + status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta); if (status == AE_NOT_FOUND) - sta_present = false; - else if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) - return -ENODEV; + return FIND_CHILD_MIN_SCORE; - if (check_children && list_empty(&adev->children)) + if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) return -ENODEV; /* @@ -99,8 +99,10 @@ static int find_child_checks(struct acpi_device *adev, bool check_children) * matched going forward. [This means a second spec violation in a row, * so whatever we do here is best effort anyway.] */ - return sta_present && !adev->pnp.type.platform_id ? - FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE; + if (adev->pnp.type.platform_id) + return FIND_CHILD_MIN_SCORE; + + return FIND_CHILD_MAX_SCORE; } struct acpi_device *acpi_find_child_device(struct acpi_device *parent, diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index fe61f617a943..ae5f4acf2675 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1230,7 +1230,7 @@ static ssize_t hw_error_scrub_store(struct device *dev, if (rc) return rc; - nfit_device_lock(dev); + device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); @@ -1247,7 +1247,7 @@ static ssize_t hw_error_scrub_store(struct device *dev, break; } } - nfit_device_unlock(dev); + device_unlock(dev); if (rc) return rc; return size; @@ -1267,10 +1267,10 @@ static ssize_t scrub_show(struct device *dev, ssize_t rc = -ENXIO; bool busy; - nfit_device_lock(dev); + device_lock(dev); nd_desc = dev_get_drvdata(dev); if (!nd_desc) { - nfit_device_unlock(dev); + device_unlock(dev); return rc; } acpi_desc = to_acpi_desc(nd_desc); @@ -1287,7 +1287,7 @@ static ssize_t scrub_show(struct device *dev, } mutex_unlock(&acpi_desc->init_mutex); - nfit_device_unlock(dev); + device_unlock(dev); return rc; } @@ -1304,14 +1304,14 @@ static ssize_t scrub_store(struct device *dev, if (val != 1) return -EINVAL; - nfit_device_lock(dev); + device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG); } - nfit_device_unlock(dev); + device_unlock(dev); if (rc) return rc; return size; @@ -1697,9 +1697,9 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data) struct acpi_device *adev = data; struct device *dev = &adev->dev; - nfit_device_lock(dev->parent); + device_lock(dev->parent); __acpi_nvdimm_notify(dev, event); - nfit_device_unlock(dev->parent); + device_unlock(dev->parent); } static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method) @@ -3152,8 +3152,8 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) struct device *dev = acpi_desc->dev; /* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ - nfit_device_lock(dev); - nfit_device_unlock(dev); + device_lock(dev); + device_unlock(dev); /* Bounce the init_mutex to complete initial registration */ mutex_lock(&acpi_desc->init_mutex); @@ -3305,8 +3305,8 @@ void acpi_nfit_shutdown(void *data) * acpi_nfit_ars_rescan() submissions have had a chance to * either submit or see ->cancel set. */ - nfit_device_lock(bus_dev); - nfit_device_unlock(bus_dev); + device_lock(bus_dev); + device_unlock(bus_dev); flush_workqueue(nfit_wq); } @@ -3449,9 +3449,9 @@ EXPORT_SYMBOL_GPL(__acpi_nfit_notify); static void acpi_nfit_notify(struct acpi_device *adev, u32 event) { - nfit_device_lock(&adev->dev); + device_lock(&adev->dev); __acpi_nfit_notify(&adev->dev, adev->handle, event); - nfit_device_unlock(&adev->dev); + device_unlock(&adev->dev); } static const struct acpi_device_id acpi_nfit_ids[] = { diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index ee8d9973f60b..d48a388b796e 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c @@ -32,6 +32,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, */ mutex_lock(&acpi_desc_lock); list_for_each_entry(acpi_desc, &acpi_descs, list) { + unsigned int align = 1UL << MCI_MISC_ADDR_LSB(mce->misc); struct device *dev = acpi_desc->dev; int found_match = 0; @@ -63,8 +64,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, /* If this fails due to an -ENOMEM, there is little we can do */ nvdimm_bus_add_badrange(acpi_desc->nvdimm_bus, - ALIGN(mce->addr, L1_CACHE_BYTES), - L1_CACHE_BYTES); + ALIGN_DOWN(mce->addr, align), align); nvdimm_region_notify(nfit_spa->nd_region, NVDIMM_REVALIDATE_POISON); diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 50882bdbeb96..6023ad61831a 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -337,30 +337,6 @@ static inline struct acpi_nfit_desc *to_acpi_desc( return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); } -#ifdef CONFIG_PROVE_LOCKING -static inline void nfit_device_lock(struct device *dev) -{ - device_lock(dev); - mutex_lock(&dev->lockdep_mutex); -} - -static inline void nfit_device_unlock(struct device *dev) -{ - mutex_unlock(&dev->lockdep_mutex); - device_unlock(dev); -} -#else -static inline void nfit_device_lock(struct device *dev) -{ - device_lock(dev); -} - -static inline void nfit_device_unlock(struct device *dev) -{ - device_unlock(dev); -} -#endif - const guid_t *to_nfit_uuid(enum nfit_uuids id); int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz); void acpi_nfit_shutdown(void *data); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 7a70c4bfc23c..3269a888fb7a 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -36,7 +36,6 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include "acpica/accommon.h" -#include "acpica/acnamesp.h" #include "internal.h" /* Definitions for ACPI_DEBUG_PRINT() */ @@ -1496,91 +1495,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n, } EXPORT_SYMBOL(acpi_check_region); -static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level, - void *_res, void **return_value) -{ - struct acpi_mem_space_context **mem_ctx; - union acpi_operand_object *handler_obj; - union acpi_operand_object *region_obj2; - union acpi_operand_object *region_obj; - struct resource *res = _res; - acpi_status status; - - region_obj = acpi_ns_get_attached_object(handle); - if (!region_obj) - return AE_OK; - - handler_obj = region_obj->region.handler; - if (!handler_obj) - return AE_OK; - - if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) - return AE_OK; - - if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) - return AE_OK; - - region_obj2 = acpi_ns_get_secondary_object(region_obj); - if (!region_obj2) - return AE_OK; - - mem_ctx = (void *)®ion_obj2->extra.region_context; - - if (!(mem_ctx[0]->address >= res->start && - mem_ctx[0]->address < res->end)) - return AE_OK; - - status = handler_obj->address_space.setup(region_obj, - ACPI_REGION_DEACTIVATE, - NULL, (void **)mem_ctx); - if (ACPI_SUCCESS(status)) - region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE); - - return status; -} - -/** - * acpi_release_memory - Release any mappings done to a memory region - * @handle: Handle to namespace node - * @res: Memory resource - * @level: A level that terminates the search - * - * Walks through @handle and unmaps all SystemMemory Operation Regions that - * overlap with @res and that have already been activated (mapped). - * - * This is a helper that allows drivers to place special requirements on memory - * region that may overlap with operation regions, primarily allowing them to - * safely map the region as non-cached memory. - * - * The unmapped Operation Regions will be automatically remapped next time they - * are called, so the drivers do not need to do anything else. - */ -acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, - u32 level) -{ - acpi_status status; - - if (!(res->flags & IORESOURCE_MEM)) - return AE_TYPE; - - status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level, - acpi_deactivate_mem_region, NULL, - res, NULL); - if (ACPI_FAILURE(status)) - return status; - - /* - * Wait for all of the mappings queued up for removal by - * acpi_deactivate_mem_region() to actually go away. - */ - synchronize_rcu(); - rcu_barrier(); - flush_scheduled_work(); - - return AE_OK; -} -EXPORT_SYMBOL_GPL(acpi_release_memory); - /* * Let drivers know whether the resource checks are effective */ diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index b3b507f20e87..d57cf8454b93 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -140,6 +140,17 @@ static struct pci_osc_bit_struct pci_osc_control_bit[] = { { OSC_PCI_EXPRESS_DPC_CONTROL, "DPC" }, }; +static struct pci_osc_bit_struct cxl_osc_support_bit[] = { + { OSC_CXL_1_1_PORT_REG_ACCESS_SUPPORT, "CXL11PortRegAccess" }, + { OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT, "CXL20PortDevRegAccess" }, + { OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT, "CXLProtocolErrorReporting" }, + { OSC_CXL_NATIVE_HP_SUPPORT, "CXLNativeHotPlug" }, +}; + +static struct pci_osc_bit_struct cxl_osc_control_bit[] = { + { OSC_CXL_ERROR_REPORTING_CONTROL, "CXLMemErrorReporting" }, +}; + static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word, struct pci_osc_bit_struct *table, int size) { @@ -168,33 +179,73 @@ static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word) ARRAY_SIZE(pci_osc_control_bit)); } +static void decode_cxl_osc_support(struct acpi_pci_root *root, char *msg, u32 word) +{ + decode_osc_bits(root, msg, word, cxl_osc_support_bit, + ARRAY_SIZE(cxl_osc_support_bit)); +} + +static void decode_cxl_osc_control(struct acpi_pci_root *root, char *msg, u32 word) +{ + decode_osc_bits(root, msg, word, cxl_osc_control_bit, + ARRAY_SIZE(cxl_osc_control_bit)); +} + +static inline bool is_pcie(struct acpi_pci_root *root) +{ + return root->bridge_type == ACPI_BRIDGE_TYPE_PCIE; +} + +static inline bool is_cxl(struct acpi_pci_root *root) +{ + return root->bridge_type == ACPI_BRIDGE_TYPE_CXL; +} + static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766"; +static u8 cxl_osc_uuid_str[] = "68F2D50B-C469-4d8A-BD3D-941A103FD3FC"; -static acpi_status acpi_pci_run_osc(acpi_handle handle, - const u32 *capbuf, u32 *retval) +static char *to_uuid(struct acpi_pci_root *root) +{ + if (is_cxl(root)) + return cxl_osc_uuid_str; + return pci_osc_uuid_str; +} + +static int cap_length(struct acpi_pci_root *root) +{ + if (is_cxl(root)) + return sizeof(u32) * OSC_CXL_CAPABILITY_DWORDS; + return sizeof(u32) * OSC_PCI_CAPABILITY_DWORDS; +} + +static acpi_status acpi_pci_run_osc(struct acpi_pci_root *root, + const u32 *capbuf, u32 *pci_control, + u32 *cxl_control) { struct acpi_osc_context context = { - .uuid_str = pci_osc_uuid_str, + .uuid_str = to_uuid(root), .rev = 1, - .cap.length = 12, + .cap.length = cap_length(root), .cap.pointer = (void *)capbuf, }; acpi_status status; - status = acpi_run_osc(handle, &context); + status = acpi_run_osc(root->device->handle, &context); if (ACPI_SUCCESS(status)) { - *retval = *((u32 *)(context.ret.pointer + 8)); + *pci_control = acpi_osc_ctx_get_pci_control(&context); + if (is_cxl(root)) + *cxl_control = acpi_osc_ctx_get_cxl_control(&context); kfree(context.ret.pointer); } return status; } -static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, - u32 support, - u32 *control) +static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, u32 support, + u32 *control, u32 cxl_support, + u32 *cxl_control) { acpi_status status; - u32 result, capbuf[3]; + u32 pci_result, cxl_result, capbuf[OSC_CXL_CAPABILITY_DWORDS]; support |= root->osc_support_set; @@ -202,10 +253,28 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, capbuf[OSC_SUPPORT_DWORD] = support; capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set; - status = acpi_pci_run_osc(root->device->handle, capbuf, &result); + if (is_cxl(root)) { + cxl_support |= root->osc_ext_support_set; + capbuf[OSC_EXT_SUPPORT_DWORD] = cxl_support; + capbuf[OSC_EXT_CONTROL_DWORD] = *cxl_control | root->osc_ext_control_set; + } + +retry: + status = acpi_pci_run_osc(root, capbuf, &pci_result, &cxl_result); if (ACPI_SUCCESS(status)) { root->osc_support_set = support; - *control = result; + *control = pci_result; + if (is_cxl(root)) { + root->osc_ext_support_set = cxl_support; + *cxl_control = cxl_result; + } + } else if (is_cxl(root)) { + /* + * CXL _OSC is optional on CXL 1.1 hosts. Fall back to PCIe _OSC + * upon any failure using CXL _OSC. + */ + root->bridge_type = ACPI_BRIDGE_TYPE_PCIE; + goto retry; } return status; } @@ -321,6 +390,8 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev); * @handle: ACPI handle of a PCI root bridge (or PCIe Root Complex). * @mask: Mask of _OSC bits to request control of, place to store control mask. * @support: _OSC supported capability. + * @cxl_mask: Mask of CXL _OSC control bits, place to store control mask. + * @cxl_support: CXL _OSC supported capability. * * Run _OSC query for @mask and if that is successful, compare the returned * mask of control bits with @req. If all of the @req bits are set in the @@ -331,12 +402,14 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev); * _OSC bits the BIOS has granted control of, but its contents are meaningless * on failure. **/ -static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 support) +static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, + u32 support, u32 *cxl_mask, + u32 cxl_support) { u32 req = OSC_PCI_EXPRESS_CAPABILITY_CONTROL; struct acpi_pci_root *root; acpi_status status; - u32 ctrl, capbuf[3]; + u32 ctrl, cxl_ctrl = 0, capbuf[OSC_CXL_CAPABILITY_DWORDS]; if (!mask) return AE_BAD_PARAMETER; @@ -348,20 +421,42 @@ static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 s ctrl = *mask; *mask |= root->osc_control_set; + if (is_cxl(root)) { + cxl_ctrl = *cxl_mask; + *cxl_mask |= root->osc_ext_control_set; + } + /* Need to check the available controls bits before requesting them. */ do { - status = acpi_pci_query_osc(root, support, mask); + u32 pci_missing = 0, cxl_missing = 0; + + status = acpi_pci_query_osc(root, support, mask, cxl_support, + cxl_mask); if (ACPI_FAILURE(status)) return status; - if (ctrl == *mask) - break; - decode_osc_control(root, "platform does not support", - ctrl & ~(*mask)); + if (is_cxl(root)) { + if (ctrl == *mask && cxl_ctrl == *cxl_mask) + break; + pci_missing = ctrl & ~(*mask); + cxl_missing = cxl_ctrl & ~(*cxl_mask); + } else { + if (ctrl == *mask) + break; + pci_missing = ctrl & ~(*mask); + } + if (pci_missing) + decode_osc_control(root, "platform does not support", + pci_missing); + if (cxl_missing) + decode_cxl_osc_control(root, "CXL platform does not support", + cxl_missing); ctrl = *mask; - } while (*mask); + cxl_ctrl = *cxl_mask; + } while (*mask || *cxl_mask); /* No need to request _OSC if the control was already granted. */ - if ((root->osc_control_set & ctrl) == ctrl) + if ((root->osc_control_set & ctrl) == ctrl && + (root->osc_ext_control_set & cxl_ctrl) == cxl_ctrl) return AE_OK; if ((ctrl & req) != req) { @@ -373,11 +468,17 @@ static acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 s capbuf[OSC_QUERY_DWORD] = 0; capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set; capbuf[OSC_CONTROL_DWORD] = ctrl; - status = acpi_pci_run_osc(handle, capbuf, mask); + if (is_cxl(root)) { + capbuf[OSC_EXT_SUPPORT_DWORD] = root->osc_ext_support_set; + capbuf[OSC_EXT_CONTROL_DWORD] = cxl_ctrl; + } + + status = acpi_pci_run_osc(root, capbuf, mask, cxl_mask); if (ACPI_FAILURE(status)) return status; root->osc_control_set = *mask; + root->osc_ext_control_set = *cxl_mask; return AE_OK; } @@ -403,6 +504,53 @@ static u32 calculate_support(void) return support; } +/* + * Background on hotplug support, and making it depend on only + * CONFIG_HOTPLUG_PCI_PCIE vs. also considering CONFIG_MEMORY_HOTPLUG: + * + * CONFIG_ACPI_HOTPLUG_MEMORY does depend on CONFIG_MEMORY_HOTPLUG, but + * there is no existing _OSC for memory hotplug support. The reason is that + * ACPI memory hotplug requires the OS to acknowledge / coordinate with + * memory plug events via a scan handler. On the CXL side the equivalent + * would be if Linux supported the Mechanical Retention Lock [1], or + * otherwise had some coordination for the driver of a PCI device + * undergoing hotplug to be consulted on whether the hotplug should + * proceed or not. + * + * The concern is that if Linux says no to supporting CXL hotplug then + * the BIOS may say no to giving the OS hotplug control of any other PCIe + * device. So the question here is not whether hotplug is enabled, it's + * whether it is handled natively by the at all OS, and if + * CONFIG_HOTPLUG_PCI_PCIE is enabled then the answer is "yes". + * + * Otherwise, the plan for CXL coordinated remove, since the kernel does + * not support blocking hotplug, is to require the memory device to be + * disabled before hotplug is attempted. When CONFIG_MEMORY_HOTPLUG is + * disabled that step will fail and the remove attempt cancelled by the + * user. If that is not honored and the card is removed anyway then it + * does not matter if CONFIG_MEMORY_HOTPLUG is enabled or not, it will + * cause a crash and other badness. + * + * Therefore, just say yes to CXL hotplug and require removal to + * be coordinated by userspace unless and until the kernel grows better + * mechanisms for doing "managed" removal of devices in consultation with + * the driver. + * + * [1]: https://lore.kernel.org/all/20201122014203.4706-1-ashok.raj@intel.com/ + */ +static u32 calculate_cxl_support(void) +{ + u32 support; + + support = OSC_CXL_2_0_PORT_DEV_REG_ACCESS_SUPPORT; + if (pci_aer_available()) + support |= OSC_CXL_PROTOCOL_ERR_REPORTING_SUPPORT; + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + support |= OSC_CXL_NATIVE_HP_SUPPORT; + + return support; +} + static u32 calculate_control(void) { u32 control; @@ -434,6 +582,16 @@ static u32 calculate_control(void) return control; } +static u32 calculate_cxl_control(void) +{ + u32 control = 0; + + if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) + control |= OSC_CXL_ERROR_REPORTING_CONTROL; + + return control; +} + static bool os_control_query_checks(struct acpi_pci_root *root, u32 support) { struct acpi_device *device = root->device; @@ -452,10 +610,10 @@ static bool os_control_query_checks(struct acpi_pci_root *root, u32 support) return true; } -static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, - bool is_pcie) +static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) { u32 support, control = 0, requested = 0; + u32 cxl_support = 0, cxl_control = 0, cxl_requested = 0; acpi_status status; struct acpi_device *device = root->device; acpi_handle handle = device->handle; @@ -479,10 +637,20 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, if (os_control_query_checks(root, support)) requested = control = calculate_control(); - status = acpi_pci_osc_control_set(handle, &control, support); + if (is_cxl(root)) { + cxl_support = calculate_cxl_support(); + decode_cxl_osc_support(root, "OS supports", cxl_support); + cxl_requested = cxl_control = calculate_cxl_control(); + } + + status = acpi_pci_osc_control_set(handle, &control, support, + &cxl_control, cxl_support); if (ACPI_SUCCESS(status)) { if (control) decode_osc_control(root, "OS now controls", control); + if (cxl_control) + decode_cxl_osc_control(root, "OS now controls", + cxl_control); if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { /* @@ -504,13 +672,18 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, *no_aspm = 1; /* _OSC is optional for PCI host bridges */ - if ((status == AE_NOT_FOUND) && !is_pcie) + if (status == AE_NOT_FOUND && !is_pcie(root)) return; if (control) { decode_osc_control(root, "OS requested", requested); decode_osc_control(root, "platform willing to grant", control); } + if (cxl_control) { + decode_cxl_osc_control(root, "OS requested", cxl_requested); + decode_cxl_osc_control(root, "platform willing to grant", + cxl_control); + } dev_info(&device->dev, "_OSC: platform retains control of PCIe features (%s)\n", acpi_format_exception(status)); @@ -527,7 +700,7 @@ static int acpi_pci_root_add(struct acpi_device *device, acpi_handle handle = device->handle; int no_aspm = 0; bool hotadd = system_state == SYSTEM_RUNNING; - bool is_pcie; + const char *acpi_hid; root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); if (!root) @@ -585,8 +758,15 @@ static int acpi_pci_root_add(struct acpi_device *device, root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle); - is_pcie = strcmp(acpi_device_hid(device), "PNP0A08") == 0; - negotiate_os_control(root, &no_aspm, is_pcie); + acpi_hid = acpi_device_hid(root->device); + if (strcmp(acpi_hid, "PNP0A08") == 0) + root->bridge_type = ACPI_BRIDGE_TYPE_PCIE; + else if (strcmp(acpi_hid, "ACPI0016") == 0) + root->bridge_type = ACPI_BRIDGE_TYPE_CXL; + else + dev_dbg(&device->dev, "Assuming non-PCIe host bridge\n"); + + negotiate_os_control(root, &no_aspm); /* * TBD: Need PCI interface for enumeration/configuration of roots. diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index e9c84d0ac55b..6a5572a1a80c 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -38,11 +38,11 @@ #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0) static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; -module_param(max_cstate, uint, 0000); -static unsigned int nocst __read_mostly; -module_param(nocst, uint, 0000); -static int bm_check_disable __read_mostly; -module_param(bm_check_disable, uint, 0000); +module_param(max_cstate, uint, 0400); +static bool nocst __read_mostly; +module_param(nocst, bool, 0400); +static bool bm_check_disable __read_mostly; +module_param(bm_check_disable, bool, 0400); static unsigned int latency_factor __read_mostly = 2; module_param(latency_factor, uint, 0644); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 3147702710af..04ea1569df78 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -1035,20 +1035,22 @@ static void acpi_sleep_hibernate_setup(void) static inline void acpi_sleep_hibernate_setup(void) {} #endif /* !CONFIG_HIBERNATION */ -static void acpi_power_off_prepare(void) +static int acpi_power_off_prepare(struct sys_off_data *data) { /* Prepare to power off the system */ acpi_sleep_prepare(ACPI_STATE_S5); acpi_disable_all_gpes(); acpi_os_wait_events_complete(); + return NOTIFY_DONE; } -static void acpi_power_off(void) +static int acpi_power_off(struct sys_off_data *data) { /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ pr_debug("%s called\n", __func__); local_irq_disable(); acpi_enter_sleep_state(ACPI_STATE_S5); + return NOTIFY_DONE; } int __init acpi_sleep_init(void) @@ -1067,8 +1069,14 @@ int __init acpi_sleep_init(void) if (acpi_sleep_state_supported(ACPI_STATE_S5)) { sleep_states[ACPI_STATE_S5] = 1; - pm_power_off_prepare = acpi_power_off_prepare; - pm_power_off = acpi_power_off; + + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE, + SYS_OFF_PRIO_FIRMWARE, + acpi_power_off_prepare, NULL); + + register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE, + acpi_power_off, NULL); } else { acpi_no_s5 = true; } diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 7e775ba6fdd9..3a2adeaef5ce 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -20,6 +20,10 @@ #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/of_irq.h> +#include <linux/of_device.h> +#include <linux/acpi.h> +#include <linux/iommu.h> +#include <linux/dma-map-ops.h> #define to_amba_driver(d) container_of(d, struct amba_driver, drv) @@ -273,6 +277,36 @@ static void amba_shutdown(struct device *dev) drv->shutdown(to_amba_device(dev)); } +static int amba_dma_configure(struct device *dev) +{ + struct amba_driver *drv = to_amba_driver(dev->driver); + enum dev_dma_attr attr; + int ret = 0; + + if (dev->of_node) { + ret = of_dma_configure(dev, dev->of_node, true); + } else if (has_acpi_companion(dev)) { + attr = acpi_get_dma_attr(to_acpi_device_node(dev->fwnode)); + ret = acpi_dma_configure(dev, attr); + } + + if (!ret && !drv->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); + } + + return ret; +} + +static void amba_dma_cleanup(struct device *dev) +{ + struct amba_driver *drv = to_amba_driver(dev->driver); + + if (!drv->driver_managed_dma) + iommu_device_unuse_default_domain(dev); +} + #ifdef CONFIG_PM /* * Hooks to provide runtime PM of the pclk (bus clock). It is safe to @@ -341,7 +375,8 @@ struct bus_type amba_bustype = { .probe = amba_probe, .remove = amba_remove, .shutdown = amba_shutdown, - .dma_configure = platform_dma_configure, + .dma_configure = amba_dma_configure, + .dma_cleanup = amba_dma_cleanup, .pm = &amba_pm, }; EXPORT_SYMBOL_GPL(amba_bustype); diff --git a/drivers/base/core.c b/drivers/base/core.c index 3d6430eb0c6a..2eede2ec3d64 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2864,9 +2864,6 @@ void device_initialize(struct device *dev) kobject_init(&dev->kobj, &device_ktype); INIT_LIST_HEAD(&dev->dma_pools); mutex_init(&dev->mutex); -#ifdef CONFIG_PROVE_LOCKING - mutex_init(&dev->lockdep_mutex); -#endif lockdep_set_novalidate_class(&dev->mutex); spin_lock_init(&dev->devres_lock); INIT_LIST_HEAD(&dev->devres_head); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 3fc3b5940bb3..94b7ac9bf459 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -671,6 +671,8 @@ sysfs_failed: if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); + if (dev->bus && dev->bus->dma_cleanup) + dev->bus->dma_cleanup(dev); pinctrl_bind_failed: device_links_no_driver(dev); device_unbind_cleanup(dev); @@ -1199,6 +1201,9 @@ static void __device_release_driver(struct device *dev, struct device *parent) device_remove(dev); + if (dev->bus && dev->bus->dma_cleanup) + dev->bus->dma_cleanup(dev); + device_links_driver_cleanup(dev); device_unbind_cleanup(dev); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 8cc272fd5c99..70bc30cf575c 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -30,6 +30,8 @@ #include <linux/property.h> #include <linux/kmemleak.h> #include <linux/types.h> +#include <linux/iommu.h> +#include <linux/dma-map-ops.h> #include "base.h" #include "power/power.h" @@ -1454,9 +1456,9 @@ static void platform_shutdown(struct device *_dev) drv->shutdown(dev); } - -int platform_dma_configure(struct device *dev) +static int platform_dma_configure(struct device *dev) { + struct platform_driver *drv = to_platform_driver(dev->driver); enum dev_dma_attr attr; int ret = 0; @@ -1467,9 +1469,23 @@ int platform_dma_configure(struct device *dev) ret = acpi_dma_configure(dev, attr); } + if (!ret && !drv->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); + } + return ret; } +static void platform_dma_cleanup(struct device *dev) +{ + struct platform_driver *drv = to_platform_driver(dev->driver); + + if (!drv->driver_managed_dma) + iommu_device_unuse_default_domain(dev); +} + static const struct dev_pm_ops platform_dev_pm_ops = { SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) USE_PLATFORM_PM_SLEEP_OPS @@ -1484,6 +1500,7 @@ struct bus_type platform_bus_type = { .remove = platform_remove, .shutdown = platform_shutdown, .dma_configure = platform_dma_configure, + .dma_cleanup = platform_dma_cleanup, .pm = &platform_dev_pm_ops, }; EXPORT_SYMBOL_GPL(platform_bus_type); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 2b21f717cce1..ef9bc62e9afd 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -756,24 +756,23 @@ static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc) */ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) { - struct rbd_client *client_node; - bool found = false; + struct rbd_client *rbdc = NULL, *iter; if (ceph_opts->flags & CEPH_OPT_NOSHARE) return NULL; spin_lock(&rbd_client_list_lock); - list_for_each_entry(client_node, &rbd_client_list, node) { - if (!ceph_compare_options(ceph_opts, client_node->client)) { - __rbd_get_client(client_node); + list_for_each_entry(iter, &rbd_client_list, node) { + if (!ceph_compare_options(ceph_opts, iter->client)) { + __rbd_get_client(iter); - found = true; + rbdc = iter; break; } } spin_unlock(&rbd_client_list_lock); - return found ? client_node : NULL; + return rbdc; } /* diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 8fd4a356a86e..76648c4fdaf4 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/acpi.h> #include <linux/iommu.h> +#include <linux/dma-map-ops.h> #include "fsl-mc-private.h" @@ -140,15 +141,33 @@ static int fsl_mc_dma_configure(struct device *dev) { struct device *dma_dev = dev; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); + struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); u32 input_id = mc_dev->icid; + int ret; while (dev_is_fsl_mc(dma_dev)) dma_dev = dma_dev->parent; if (dev_of_node(dma_dev)) - return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id); + ret = of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id); + else + ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); + + if (!ret && !mc_drv->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); + } + + return ret; +} + +static void fsl_mc_dma_cleanup(struct device *dev) +{ + struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); - return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); + if (!mc_drv->driver_managed_dma) + iommu_device_unuse_default_domain(dev); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, @@ -312,6 +331,7 @@ struct bus_type fsl_mc_bus_type = { .match = fsl_mc_bus_match, .uevent = fsl_mc_bus_uevent, .dma_configure = fsl_mc_dma_configure, + .dma_cleanup = fsl_mc_dma_cleanup, .dev_groups = fsl_mc_dev_groups, .bus_groups = fsl_mc_bus_groups, }; diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index a087156a5818..b3f2d55dc551 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -385,6 +385,19 @@ config HW_RANDOM_PIC32 If unsure, say Y. +config HW_RANDOM_POLARFIRE_SOC + tristate "Microchip PolarFire SoC Random Number Generator support" + depends on HW_RANDOM && POLARFIRE_SOC_SYS_CTRL + help + This driver provides kernel-side support for the Random Number + Generator hardware found on PolarFire SoC (MPFS). + + To compile this driver as a module, choose M here. The + module will be called mfps_rng. + + If unsure, say N. + + config HW_RANDOM_MESON tristate "Amlogic Meson Random Number Generator support" depends on HW_RANDOM @@ -527,7 +540,7 @@ config HW_RANDOM_ARM_SMCCC_TRNG config HW_RANDOM_CN10K tristate "Marvell CN10K Random Number Generator support" - depends on HW_RANDOM && PCI && ARM64 + depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) default HW_RANDOM help This driver provides support for the True Random Number diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 584d47ba32f7..3e948cf04476 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -46,3 +46,4 @@ obj-$(CONFIG_HW_RANDOM_CCTRNG) += cctrng.o obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o +obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c index 35001c63648b..a01e9307737c 100644 --- a/drivers/char/hw_random/cn10k-rng.c +++ b/drivers/char/hw_random/cn10k-rng.c @@ -31,26 +31,23 @@ struct cn10k_rng { #define PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE 0xc2000b0f -static int reset_rng_health_state(struct cn10k_rng *rng) +static unsigned long reset_rng_health_state(struct cn10k_rng *rng) { struct arm_smccc_res res; /* Send SMC service call to reset EBG health state */ arm_smccc_smc(PLAT_OCTEONTX_RESET_RNG_EBG_HEALTH_STATE, 0, 0, 0, 0, 0, 0, 0, &res); - if (res.a0 != 0UL) - return -EIO; - - return 0; + return res.a0; } static int check_rng_health(struct cn10k_rng *rng) { u64 status; - int err; + unsigned long err; /* Skip checking health */ if (!rng->reg_base) - return 0; + return -ENODEV; status = readq(rng->reg_base + RNM_PF_EBG_HEALTH); if (status & BIT_ULL(20)) { @@ -58,7 +55,9 @@ static int check_rng_health(struct cn10k_rng *rng) if (err) { dev_err(&rng->pdev->dev, "HWRNG: Health test failed (status=%llx)\n", status); - dev_err(&rng->pdev->dev, "HWRNG: error during reset\n"); + dev_err(&rng->pdev->dev, "HWRNG: error during reset (error=%lx)\n", + err); + return -EIO; } } return 0; @@ -90,6 +89,7 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data, { struct cn10k_rng *rng = (struct cn10k_rng *)hwrng->priv; unsigned int size; + u8 *pos = data; int err = 0; u64 value; @@ -102,17 +102,20 @@ static int cn10k_rng_read(struct hwrng *hwrng, void *data, while (size >= 8) { cn10k_read_trng(rng, &value); - *((u64 *)data) = (u64)value; + *((u64 *)pos) = value; size -= 8; - data += 8; + pos += 8; } - while (size > 0) { + if (size > 0) { cn10k_read_trng(rng, &value); - *((u8 *)data) = (u8)value; - size--; - data++; + while (size > 0) { + *pos = (u8)value; + value >>= 8; + size--; + pos++; + } } return max - size; diff --git a/drivers/char/hw_random/mpfs-rng.c b/drivers/char/hw_random/mpfs-rng.c new file mode 100644 index 000000000000..5813da617a48 --- /dev/null +++ b/drivers/char/hw_random/mpfs-rng.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip PolarFire SoC (MPFS) hardware random driver + * + * Copyright (c) 2020-2022 Microchip Corporation. All rights reserved. + * + * Author: Conor Dooley <conor.dooley@microchip.com> + */ + +#include <linux/module.h> +#include <linux/hw_random.h> +#include <linux/platform_device.h> +#include <soc/microchip/mpfs.h> + +#define CMD_OPCODE 0x21 +#define CMD_DATA_SIZE 0U +#define CMD_DATA NULL +#define MBOX_OFFSET 0U +#define RESP_OFFSET 0U +#define RNG_RESP_BYTES 32U + +struct mpfs_rng { + struct mpfs_sys_controller *sys_controller; + struct hwrng rng; +}; + +static int mpfs_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct mpfs_rng *rng_priv = container_of(rng, struct mpfs_rng, rng); + u32 response_msg[RNG_RESP_BYTES / sizeof(u32)]; + unsigned int count = 0, copy_size_bytes; + int ret; + + struct mpfs_mss_response response = { + .resp_status = 0U, + .resp_msg = (u32 *)response_msg, + .resp_size = RNG_RESP_BYTES + }; + struct mpfs_mss_msg msg = { + .cmd_opcode = CMD_OPCODE, + .cmd_data_size = CMD_DATA_SIZE, + .response = &response, + .cmd_data = CMD_DATA, + .mbox_offset = MBOX_OFFSET, + .resp_offset = RESP_OFFSET + }; + + while (count < max) { + ret = mpfs_blocking_transaction(rng_priv->sys_controller, &msg); + if (ret) + return ret; + + copy_size_bytes = max - count > RNG_RESP_BYTES ? RNG_RESP_BYTES : max - count; + memcpy(buf + count, response_msg, copy_size_bytes); + + count += copy_size_bytes; + if (!wait) + break; + } + + return count; +} + +static int mpfs_rng_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mpfs_rng *rng_priv; + int ret; + + rng_priv = devm_kzalloc(dev, sizeof(*rng_priv), GFP_KERNEL); + if (!rng_priv) + return -ENOMEM; + + rng_priv->sys_controller = mpfs_sys_controller_get(&pdev->dev); + if (IS_ERR(rng_priv->sys_controller)) + return dev_err_probe(dev, PTR_ERR(rng_priv->sys_controller), + "Failed to register system controller hwrng sub device\n"); + + rng_priv->rng.read = mpfs_rng_read; + rng_priv->rng.name = pdev->name; + rng_priv->rng.quality = 1024; + + platform_set_drvdata(pdev, rng_priv); + + ret = devm_hwrng_register(&pdev->dev, &rng_priv->rng); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to register MPFS hwrng\n"); + + dev_info(&pdev->dev, "Registered MPFS hwrng\n"); + + return 0; +} + +static struct platform_driver mpfs_rng_driver = { + .driver = { + .name = "mpfs-rng", + }, + .probe = mpfs_rng_probe, +}; +module_platform_driver(mpfs_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>"); +MODULE_DESCRIPTION("PolarFire SoC (MPFS) hardware random driver"); diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c index e0d77fa048fb..f06e4f95114f 100644 --- a/drivers/char/hw_random/omap3-rom-rng.c +++ b/drivers/char/hw_random/omap3-rom-rng.c @@ -92,7 +92,7 @@ static int __maybe_unused omap_rom_rng_runtime_resume(struct device *dev) r = ddata->rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT); if (r != 0) { - clk_disable(ddata->clk); + clk_disable_unprepare(ddata->clk); dev_err(dev, "HW init failed: %d\n", r); return -EIO; diff --git a/drivers/char/hw_random/optee-rng.c b/drivers/char/hw_random/optee-rng.c index a948c0727b2b..96b5d546d136 100644 --- a/drivers/char/hw_random/optee-rng.c +++ b/drivers/char/hw_random/optee-rng.c @@ -115,7 +115,7 @@ static size_t get_optee_rng_data(struct optee_rng_private *pvt_data, static int optee_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct optee_rng_private *pvt_data = to_optee_rng_private(rng); - size_t read = 0, rng_size = 0; + size_t read = 0, rng_size; int timeout = 1; u8 *data = buf; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 5d596e778ff4..48f8f4221e21 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -210,6 +210,15 @@ config COMMON_CLK_CS2000_CP help If you say yes here you get support for the CS2000 clock multiplier. +config COMMON_CLK_EN7523 + bool "Clock driver for Airoha EN7523 SoC system clocks" + depends on OF + depends on ARCH_AIROHA || COMPILE_TEST + default ARCH_AIROHA + help + This driver provides the fixed clocks and gates present on Airoha + ARM silicon. + config COMMON_CLK_FSL_FLEXSPI tristate "Clock driver for FlexSPI on Layerscape SoCs" depends on ARCH_LAYERSCAPE || COMPILE_TEST @@ -368,6 +377,11 @@ config COMMON_CLK_VC5 This driver supports the IDT VersaClock 5 and VersaClock 6 programmable clock generators. +config COMMON_CLK_STM32MP135 + def_bool COMMON_CLK && MACH_STM32MP13 + help + Support for stm32mp135 SoC family clocks + config COMMON_CLK_STM32MP157 def_bool COMMON_CLK && MACH_STM32MP157 help diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 21cb96b022e6..d5db170d38d2 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o +obj-$(CONFIG_COMMON_CLK_EN7523) += clk-en7523.o obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o @@ -114,6 +115,7 @@ obj-y += socfpga/ obj-$(CONFIG_PLAT_SPEAR) += spear/ obj-y += sprd/ obj-$(CONFIG_ARCH_STI) += st/ +obj-$(CONFIG_ARCH_STM32) += stm32/ obj-$(CONFIG_SOC_STARFIVE) += starfive/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-y += sunxi-ng/ diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c index 02437bdedf4d..155f313986b4 100644 --- a/drivers/clk/actions/owl-pll.c +++ b/drivers/clk/actions/owl-pll.c @@ -25,7 +25,7 @@ static u32 owl_pll_calculate_mul(struct owl_pll_hw *pll_hw, unsigned long rate) else if (mul > pll_hw->max_mul) mul = pll_hw->max_mul; - return mul &= mul_mask(pll_hw); + return mul & mul_mask(pll_hw); } static unsigned long _get_table_rate(const struct clk_pll_table *table, diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c index 9d09621549b9..73518009a0f2 100644 --- a/drivers/clk/bcm/clk-raspberrypi.c +++ b/drivers/clk/bcm/clk-raspberrypi.c @@ -345,7 +345,7 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi, int ret; clks = devm_kcalloc(rpi->dev, - sizeof(*clks), RPI_FIRMWARE_NUM_CLK_ID, + RPI_FIRMWARE_NUM_CLK_ID, sizeof(*clks), GFP_KERNEL); if (!clks) return -ENOMEM; diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c index c91e9096b070..5467d941ddfd 100644 --- a/drivers/clk/clk-cdce706.c +++ b/drivers/clk/clk-cdce706.c @@ -627,8 +627,7 @@ of_clk_cdce_get(struct of_phandle_args *clkspec, void *data) return &cdce->clkout[idx].hw; } -static int cdce706_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int cdce706_probe(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; struct cdce706_dev_data *cdce; @@ -692,7 +691,7 @@ static struct i2c_driver cdce706_i2c_driver = { .name = "cdce706", .of_match_table = of_match_ptr(cdce706_dt_match), }, - .probe = cdce706_probe, + .probe_new = cdce706_probe, .remove = cdce706_remove, .id_table = cdce706_id, }; diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c index 308b353815e1..ef9a2d44e40c 100644 --- a/drivers/clk/clk-cdce925.c +++ b/drivers/clk/clk-cdce925.c @@ -634,11 +634,20 @@ static struct regmap_bus regmap_cdce925_bus = { .read = cdce925_regmap_i2c_read, }; -static int cdce925_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static const struct i2c_device_id cdce925_id[] = { + { "cdce913", CDCE913 }, + { "cdce925", CDCE925 }, + { "cdce937", CDCE937 }, + { "cdce949", CDCE949 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, cdce925_id); + +static int cdce925_probe(struct i2c_client *client) { struct clk_cdce925_chip *data; struct device_node *node = client->dev.of_node; + const struct i2c_device_id *id = i2c_match_id(cdce925_id, client); const char *parent_name; const char *pll_clk_name[MAX_NUMBER_OF_PLLS] = {NULL,}; struct clk_init_data init; @@ -814,15 +823,6 @@ error: return err; } -static const struct i2c_device_id cdce925_id[] = { - { "cdce913", CDCE913 }, - { "cdce925", CDCE925 }, - { "cdce937", CDCE937 }, - { "cdce949", CDCE949 }, - { } -}; -MODULE_DEVICE_TABLE(i2c, cdce925_id); - static const struct of_device_id clk_cdce925_of_match[] = { { .compatible = "ti,cdce913" }, { .compatible = "ti,cdce925" }, @@ -837,7 +837,7 @@ static struct i2c_driver cdce925_driver = { .name = "cdce925", .of_match_table = of_match_ptr(clk_cdce925_of_match), }, - .probe = cdce925_probe, + .probe_new = cdce925_probe, .id_table = cdce925_id, }; module_i2c_driver(cdce925_driver); diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c index dc5040a84dcc..aa5c72bab83e 100644 --- a/drivers/clk/clk-cs2000-cp.c +++ b/drivers/clk/clk-cs2000-cp.c @@ -570,8 +570,7 @@ static int cs2000_remove(struct i2c_client *client) return 0; } -static int cs2000_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int cs2000_probe(struct i2c_client *client) { struct cs2000_priv *priv; struct device *dev = &client->dev; @@ -625,7 +624,7 @@ static struct i2c_driver cs2000_driver = { .pm = &cs2000_pm_ops, .of_match_table = cs2000_of_match, }, - .probe = cs2000_probe, + .probe_new = cs2000_probe, .remove = cs2000_remove, .id_table = cs2000_id, }; diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c new file mode 100644 index 000000000000..29f0126cbd05 --- /dev/null +++ b/drivers/clk/clk-en7523.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/delay.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/en7523-clk.h> + +#define REG_PCI_CONTROL 0x88 +#define REG_PCI_CONTROL_PERSTOUT BIT(29) +#define REG_PCI_CONTROL_PERSTOUT1 BIT(26) +#define REG_PCI_CONTROL_REFCLK_EN1 BIT(22) +#define REG_GSW_CLK_DIV_SEL 0x1b4 +#define REG_EMI_CLK_DIV_SEL 0x1b8 +#define REG_BUS_CLK_DIV_SEL 0x1bc +#define REG_SPI_CLK_DIV_SEL 0x1c4 +#define REG_SPI_CLK_FREQ_SEL 0x1c8 +#define REG_NPU_CLK_DIV_SEL 0x1fc +#define REG_CRYPTO_CLKSRC 0x200 +#define REG_RESET_CONTROL 0x834 +#define REG_RESET_CONTROL_PCIEHB BIT(29) +#define REG_RESET_CONTROL_PCIE1 BIT(27) +#define REG_RESET_CONTROL_PCIE2 BIT(26) + +struct en_clk_desc { + int id; + const char *name; + u32 base_reg; + u8 base_bits; + u8 base_shift; + union { + const unsigned int *base_values; + unsigned int base_value; + }; + size_t n_base_values; + + u16 div_reg; + u8 div_bits; + u8 div_shift; + u16 div_val0; + u8 div_step; +}; + +struct en_clk_gate { + void __iomem *base; + struct clk_hw hw; +}; + +static const u32 gsw_base[] = { 400000000, 500000000 }; +static const u32 emi_base[] = { 333000000, 400000000 }; +static const u32 bus_base[] = { 500000000, 540000000 }; +static const u32 slic_base[] = { 100000000, 3125000 }; +static const u32 npu_base[] = { 333000000, 400000000, 500000000 }; + +static const struct en_clk_desc en7523_base_clks[] = { + { + .id = EN7523_CLK_GSW, + .name = "gsw", + + .base_reg = REG_GSW_CLK_DIV_SEL, + .base_bits = 1, + .base_shift = 8, + .base_values = gsw_base, + .n_base_values = ARRAY_SIZE(gsw_base), + + .div_bits = 3, + .div_shift = 0, + .div_step = 1, + }, { + .id = EN7523_CLK_EMI, + .name = "emi", + + .base_reg = REG_EMI_CLK_DIV_SEL, + .base_bits = 1, + .base_shift = 8, + .base_values = emi_base, + .n_base_values = ARRAY_SIZE(emi_base), + + .div_bits = 3, + .div_shift = 0, + .div_step = 1, + }, { + .id = EN7523_CLK_BUS, + .name = "bus", + + .base_reg = REG_BUS_CLK_DIV_SEL, + .base_bits = 1, + .base_shift = 8, + .base_values = bus_base, + .n_base_values = ARRAY_SIZE(bus_base), + + .div_bits = 3, + .div_shift = 0, + .div_step = 1, + }, { + .id = EN7523_CLK_SLIC, + .name = "slic", + + .base_reg = REG_SPI_CLK_FREQ_SEL, + .base_bits = 1, + .base_shift = 0, + .base_values = slic_base, + .n_base_values = ARRAY_SIZE(slic_base), + + .div_reg = REG_SPI_CLK_DIV_SEL, + .div_bits = 5, + .div_shift = 24, + .div_val0 = 20, + .div_step = 2, + }, { + .id = EN7523_CLK_SPI, + .name = "spi", + + .base_reg = REG_SPI_CLK_DIV_SEL, + + .base_value = 400000000, + + .div_bits = 5, + .div_shift = 8, + .div_val0 = 40, + .div_step = 2, + }, { + .id = EN7523_CLK_NPU, + .name = "npu", + + .base_reg = REG_NPU_CLK_DIV_SEL, + .base_bits = 2, + .base_shift = 8, + .base_values = npu_base, + .n_base_values = ARRAY_SIZE(npu_base), + + .div_bits = 3, + .div_shift = 0, + .div_step = 1, + }, { + .id = EN7523_CLK_CRYPTO, + .name = "crypto", + + .base_reg = REG_CRYPTO_CLKSRC, + .base_bits = 1, + .base_shift = 8, + .base_values = emi_base, + .n_base_values = ARRAY_SIZE(emi_base), + } +}; + +static const struct of_device_id of_match_clk_en7523[] = { + { .compatible = "airoha,en7523-scu", }, + { /* sentinel */ } +}; + +static unsigned int en7523_get_base_rate(void __iomem *base, unsigned int i) +{ + const struct en_clk_desc *desc = &en7523_base_clks[i]; + u32 val; + + if (!desc->base_bits) + return desc->base_value; + + val = readl(base + desc->base_reg); + val >>= desc->base_shift; + val &= (1 << desc->base_bits) - 1; + + if (val >= desc->n_base_values) + return 0; + + return desc->base_values[val]; +} + +static u32 en7523_get_div(void __iomem *base, int i) +{ + const struct en_clk_desc *desc = &en7523_base_clks[i]; + u32 reg, val; + + if (!desc->div_bits) + return 1; + + reg = desc->div_reg ? desc->div_reg : desc->base_reg; + val = readl(base + reg); + val >>= desc->div_shift; + val &= (1 << desc->div_bits) - 1; + + if (!val && desc->div_val0) + return desc->div_val0; + + return (val + 1) * desc->div_step; +} + +static int en7523_pci_is_enabled(struct clk_hw *hw) +{ + struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); + + return !!(readl(cg->base + REG_PCI_CONTROL) & REG_PCI_CONTROL_REFCLK_EN1); +} + +static int en7523_pci_prepare(struct clk_hw *hw) +{ + struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); + void __iomem *np_base = cg->base; + u32 val, mask; + + /* Need to pull device low before reset */ + val = readl(np_base + REG_PCI_CONTROL); + val &= ~(REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT); + writel(val, np_base + REG_PCI_CONTROL); + usleep_range(1000, 2000); + + /* Enable PCIe port 1 */ + val |= REG_PCI_CONTROL_REFCLK_EN1; + writel(val, np_base + REG_PCI_CONTROL); + usleep_range(1000, 2000); + + /* Reset to default */ + val = readl(np_base + REG_RESET_CONTROL); + mask = REG_RESET_CONTROL_PCIE1 | REG_RESET_CONTROL_PCIE2 | + REG_RESET_CONTROL_PCIEHB; + writel(val & ~mask, np_base + REG_RESET_CONTROL); + usleep_range(1000, 2000); + writel(val | mask, np_base + REG_RESET_CONTROL); + msleep(100); + writel(val & ~mask, np_base + REG_RESET_CONTROL); + usleep_range(5000, 10000); + + /* Release device */ + mask = REG_PCI_CONTROL_PERSTOUT1 | REG_PCI_CONTROL_PERSTOUT; + val = readl(np_base + REG_PCI_CONTROL); + writel(val & ~mask, np_base + REG_PCI_CONTROL); + usleep_range(1000, 2000); + writel(val | mask, np_base + REG_PCI_CONTROL); + msleep(250); + + return 0; +} + +static void en7523_pci_unprepare(struct clk_hw *hw) +{ + struct en_clk_gate *cg = container_of(hw, struct en_clk_gate, hw); + void __iomem *np_base = cg->base; + u32 val; + + val = readl(np_base + REG_PCI_CONTROL); + val &= ~REG_PCI_CONTROL_REFCLK_EN1; + writel(val, np_base + REG_PCI_CONTROL); +} + +static struct clk_hw *en7523_register_pcie_clk(struct device *dev, + void __iomem *np_base) +{ + static const struct clk_ops pcie_gate_ops = { + .is_enabled = en7523_pci_is_enabled, + .prepare = en7523_pci_prepare, + .unprepare = en7523_pci_unprepare, + }; + struct clk_init_data init = { + .name = "pcie", + .ops = &pcie_gate_ops, + }; + struct en_clk_gate *cg; + + cg = devm_kzalloc(dev, sizeof(*cg), GFP_KERNEL); + if (!cg) + return NULL; + + cg->base = np_base; + cg->hw.init = &init; + en7523_pci_unprepare(&cg->hw); + + if (clk_hw_register(dev, &cg->hw)) + return NULL; + + return &cg->hw; +} + +static void en7523_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data, + void __iomem *base, void __iomem *np_base) +{ + struct clk_hw *hw; + u32 rate; + int i; + + for (i = 0; i < ARRAY_SIZE(en7523_base_clks); i++) { + const struct en_clk_desc *desc = &en7523_base_clks[i]; + + rate = en7523_get_base_rate(base, i); + rate /= en7523_get_div(base, i); + + hw = clk_hw_register_fixed_rate(dev, desc->name, NULL, 0, rate); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %ld\n", + desc->name, PTR_ERR(hw)); + continue; + } + + clk_data->hws[desc->id] = hw; + } + + hw = en7523_register_pcie_clk(dev, np_base); + clk_data->hws[EN7523_CLK_PCIE] = hw; + + clk_data->num = EN7523_NUM_CLOCKS; +} + +static int en7523_clk_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct clk_hw_onecell_data *clk_data; + void __iomem *base, *np_base; + int r; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + np_base = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(np_base)) + return PTR_ERR(np_base); + + clk_data = devm_kzalloc(&pdev->dev, + struct_size(clk_data, hws, EN7523_NUM_CLOCKS), + GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + + en7523_register_clocks(&pdev->dev, clk_data, base, np_base); + + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); + if (r) + dev_err(&pdev->dev, + "could not register clock provider: %s: %d\n", + pdev->name, r); + + return r; +} + +static struct platform_driver clk_en7523_drv = { + .probe = en7523_clk_probe, + .driver = { + .name = "clk-en7523", + .of_match_table = of_match_clk_en7523, + .suppress_bind_attrs = true, + }, +}; + +static int __init clk_en7523_init(void) +{ + return platform_driver_register(&clk_en7523_drv); +} + +arch_initcall(clk_en7523_init); diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index 45501637705c..ac68a6b40f0e 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -87,7 +87,7 @@ struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev, hw = &fixed->hw; if (dev || !np) ret = clk_hw_register(dev, hw); - else if (np) + else ret = of_clk_hw_register(np, hw); if (ret) { kfree(fixed); diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c index 5e80f3d090f3..5f85b0a32872 100644 --- a/drivers/clk/clk-max9485.c +++ b/drivers/clk/clk-max9485.c @@ -254,8 +254,7 @@ max9485_of_clk_get(struct of_phandle_args *clkspec, void *data) return &drvdata->hw[idx].hw; } -static int max9485_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int max9485_i2c_probe(struct i2c_client *client) { struct max9485_driver_data *drvdata; struct device *dev = &client->dev; @@ -377,7 +376,7 @@ static struct i2c_driver max9485_driver = { .pm = &max9485_pm_ops, .of_match_table = max9485_dt_ids, }, - .probe = max9485_i2c_probe, + .probe_new = max9485_i2c_probe, .id_table = max9485_i2c_ids, }; module_i2c_driver(max9485_driver); diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 214045f6e989..fa817c317c2a 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -157,11 +157,11 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np, struct clk_mux *mux; struct clk_hw *hw; struct clk_init_data init = {}; - u8 width = 0; int ret = -EINVAL; if (clk_mux_flags & CLK_MUX_HIWORD_MASK) { - width = fls(mask) - ffs(mask) + 1; + u8 width = fls(mask) - ffs(mask) + 1; + if (width + shift > 16) { pr_err("mux value exceeds LOWORD field\n"); return ERR_PTR(-EINVAL); diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c index 59d9cf0053eb..4f5df1fc74b4 100644 --- a/drivers/clk/clk-renesas-pcie.c +++ b/drivers/clk/clk-renesas-pcie.c @@ -213,7 +213,7 @@ rs9_of_clk_get(struct of_phandle_args *clkspec, void *data) return rs9->clk_dif[idx]; } -static int rs9_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int rs9_probe(struct i2c_client *client) { unsigned char name[5] = "DIF0"; struct rs9_driver_data *rs9; @@ -312,7 +312,7 @@ static struct i2c_driver rs9_driver = { .pm = &rs9_pm_ops, .of_match_table = clk_rs9_of_match, }, - .probe = rs9_probe, + .probe_new = rs9_probe, .id_table = rs9_id, }; module_i2c_driver(rs9_driver); diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c index 364b62b9928d..4481c4303534 100644 --- a/drivers/clk/clk-si514.c +++ b/drivers/clk/clk-si514.c @@ -327,8 +327,7 @@ static const struct regmap_config si514_regmap_config = { .volatile_reg = si514_regmap_is_volatile, }; -static int si514_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int si514_probe(struct i2c_client *client) { struct clk_si514 *data; struct clk_init_data init; @@ -394,7 +393,7 @@ static struct i2c_driver si514_driver = { .name = "si514", .of_match_table = clk_si514_of_match, }, - .probe = si514_probe, + .probe_new = si514_probe, .remove = si514_remove, .id_table = si514_id, }; diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index 41851f41b682..4bca73212662 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -1547,8 +1547,7 @@ static const struct attribute *si5341_attributes[] = { NULL }; -static int si5341_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int si5341_probe(struct i2c_client *client) { struct clk_si5341 *data; struct clk_init_data init; @@ -1837,7 +1836,7 @@ static struct i2c_driver si5341_driver = { .name = "si5341", .of_match_table = clk_si5341_of_match, }, - .probe = si5341_probe, + .probe_new = si5341_probe, .remove = si5341_remove, .id_table = si5341_id, }; diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index 93fa8c9e11be..b9f088c4ba2f 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -1367,9 +1367,18 @@ si53351_of_clk_get(struct of_phandle_args *clkspec, void *data) } #endif /* CONFIG_OF */ -static int si5351_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static const struct i2c_device_id si5351_i2c_ids[] = { + { "si5351a", SI5351_VARIANT_A }, + { "si5351a-msop", SI5351_VARIANT_A3 }, + { "si5351b", SI5351_VARIANT_B }, + { "si5351c", SI5351_VARIANT_C }, + { } +}; +MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids); + +static int si5351_i2c_probe(struct i2c_client *client) { + const struct i2c_device_id *id = i2c_match_id(si5351_i2c_ids, client); enum si5351_variant variant = (enum si5351_variant)id->driver_data; struct si5351_platform_data *pdata; struct si5351_driver_data *drvdata; @@ -1649,21 +1658,12 @@ static int si5351_i2c_remove(struct i2c_client *client) return 0; } -static const struct i2c_device_id si5351_i2c_ids[] = { - { "si5351a", SI5351_VARIANT_A }, - { "si5351a-msop", SI5351_VARIANT_A3 }, - { "si5351b", SI5351_VARIANT_B }, - { "si5351c", SI5351_VARIANT_C }, - { } -}; -MODULE_DEVICE_TABLE(i2c, si5351_i2c_ids); - static struct i2c_driver si5351_driver = { .driver = { .name = "si5351", .of_match_table = of_match_ptr(si5351_dt_ids), }, - .probe = si5351_i2c_probe, + .probe_new = si5351_i2c_probe, .remove = si5351_i2c_remove, .id_table = si5351_i2c_ids, }; diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c index d9ec9086184d..089786907641 100644 --- a/drivers/clk/clk-si544.c +++ b/drivers/clk/clk-si544.c @@ -451,11 +451,19 @@ static const struct regmap_config si544_regmap_config = { .volatile_reg = si544_regmap_is_volatile, }; -static int si544_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static const struct i2c_device_id si544_id[] = { + { "si544a", si544a }, + { "si544b", si544b }, + { "si544c", si544c }, + { } +}; +MODULE_DEVICE_TABLE(i2c, si544_id); + +static int si544_probe(struct i2c_client *client) { struct clk_si544 *data; struct clk_init_data init; + const struct i2c_device_id *id = i2c_match_id(si544_id, client); int err; data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); @@ -499,14 +507,6 @@ static int si544_probe(struct i2c_client *client, return 0; } -static const struct i2c_device_id si544_id[] = { - { "si544a", si544a }, - { "si544b", si544b }, - { "si544c", si544c }, - { } -}; -MODULE_DEVICE_TABLE(i2c, si544_id); - static const struct of_device_id clk_si544_of_match[] = { { .compatible = "silabs,si544a" }, { .compatible = "silabs,si544b" }, @@ -520,7 +520,7 @@ static struct i2c_driver si544_driver = { .name = "si544", .of_match_table = clk_si544_of_match, }, - .probe = si544_probe, + .probe_new = si544_probe, .id_table = si544_id, }; module_i2c_driver(si544_driver); diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c index eea50121718a..1ff8f32f734d 100644 --- a/drivers/clk/clk-si570.c +++ b/drivers/clk/clk-si570.c @@ -398,11 +398,20 @@ static const struct regmap_config si570_regmap_config = { .volatile_reg = si570_regmap_is_volatile, }; -static int si570_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static const struct i2c_device_id si570_id[] = { + { "si570", si57x }, + { "si571", si57x }, + { "si598", si59x }, + { "si599", si59x }, + { } +}; +MODULE_DEVICE_TABLE(i2c, si570_id); + +static int si570_probe(struct i2c_client *client) { struct clk_si570 *data; struct clk_init_data init; + const struct i2c_device_id *id = i2c_match_id(si570_id, client); u32 initial_fout, factory_fout, stability; bool skip_recall; int err; @@ -495,15 +504,6 @@ static int si570_remove(struct i2c_client *client) return 0; } -static const struct i2c_device_id si570_id[] = { - { "si570", si57x }, - { "si571", si57x }, - { "si598", si59x }, - { "si599", si59x }, - { } -}; -MODULE_DEVICE_TABLE(i2c, si570_id); - static const struct of_device_id clk_si570_of_match[] = { { .compatible = "silabs,si570" }, { .compatible = "silabs,si571" }, @@ -518,7 +518,7 @@ static struct i2c_driver si570_driver = { .name = "si570", .of_match_table = clk_si570_of_match, }, - .probe = si570_probe, + .probe_new = si570_probe, .remove = si570_remove, .id_table = si570_id, }; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index ed119182aa1b..f00d4c1158d7 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -108,17 +108,10 @@ struct clk { /*** runtime pm ***/ static int clk_pm_runtime_get(struct clk_core *core) { - int ret; - if (!core->rpm_enabled) return 0; - ret = pm_runtime_get_sync(core->dev); - if (ret < 0) { - pm_runtime_put_noidle(core->dev); - return ret; - } - return 0; + return pm_runtime_resume_and_get(core->dev); } static void clk_pm_runtime_put(struct clk_core *core) diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 2dfd6149e528..cbf0d7955a00 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -178,7 +178,7 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, unsigned long flags) { struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw; - struct clk_hw *div_hw, *gate_hw; + struct clk_hw *div_hw, *gate_hw = NULL; struct clk_divider *div = NULL; struct clk_gate *gate = NULL; struct clk_mux *mux = NULL; @@ -223,14 +223,17 @@ struct clk_hw *__imx8m_clk_hw_composite(const char *name, div->lock = &imx_ccm_lock; div->flags = CLK_DIVIDER_ROUND_CLOSEST; - gate = kzalloc(sizeof(*gate), GFP_KERNEL); - if (!gate) - goto fail; + /* skip registering the gate ops if M4 is enabled */ + if (!mcore_booted) { + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + goto fail; - gate_hw = &gate->hw; - gate->reg = reg; - gate->bit_idx = PCG_CGC_SHIFT; - gate->lock = &imx_ccm_lock; + gate_hw = &gate->hw; + gate->reg = reg; + gate->bit_idx = PCG_CGC_SHIFT; + gate->lock = &imx_ccm_lock; + } hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, mux_ops, div_hw, diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 3f6fd7ef2a68..cbf8131c63f7 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -782,7 +782,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) hws[IMX7D_DRAM_PHYM_ALT_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_phym_alt_root_clk", "dram_phym_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); hws[IMX7D_DRAM_ALT_ROOT_CLK] = imx_clk_hw_gate2_flags("dram_alt_root_clk", "dram_alt_post_div", base + 0x4130, 0, CLK_IS_CRITICAL | CLK_OPS_PARENT_ENABLE); hws[IMX7D_OCOTP_CLK] = imx_clk_hw_gate4("ocotp_clk", "ipg_root_clk", base + 0x4230, 0); - hws[IMX7D_SNVS_CLK] = imx_clk_hw_gate4("snvs_clk", "ipg_root_clk", base + 0x4250, 0); hws[IMX7D_MU_ROOT_CLK] = imx_clk_hw_gate4("mu_root_clk", "ipg_root_clk", base + 0x4270, 0); hws[IMX7D_CAAM_CLK] = imx_clk_hw_gate4("caam_clk", "ipg_root_clk", base + 0x4240, 0); hws[IMX7D_USB_HSIC_ROOT_CLK] = imx_clk_hw_gate4("usb_hsic_root_clk", "usb_hsic_post_div", base + 0x4690, 0); diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c index e8cbe181ec06..b6d275855b36 100644 --- a/drivers/clk/imx/clk-imx8mm.c +++ b/drivers/clk/imx/clk-imx8mm.c @@ -560,7 +560,6 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) hws[IMX8MM_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5); hws[IMX8MM_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6); hws[IMX8MM_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6); - hws[IMX8MM_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0); hws[IMX8MM_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0); hws[IMX8MM_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0); hws[IMX8MM_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); @@ -639,6 +638,8 @@ static struct platform_driver imx8mm_clk_driver = { }, }; module_platform_driver(imx8mm_clk_driver); +module_param(mcore_booted, bool, S_IRUGO); +MODULE_PARM_DESC(mcore_booted, "See Cortex-M core is booted or not"); MODULE_AUTHOR("Bai Ping <ping.bai@nxp.com>"); MODULE_DESCRIPTION("NXP i.MX8MM clock driver"); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index 92fcbab4f5be..d37c45b676ab 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -227,6 +227,30 @@ static const char * const imx8mn_pwm4_sels[] = {"osc_24m", "sys_pll2_100m", "sys "sys_pll1_40m", "sys_pll3_out", "clk_ext2", "sys_pll1_80m", "video_pll1_out", }; +static const char * const imx8mn_gpt1_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", + "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "audio_pll1_out", "clk_ext1", }; + +static const char * const imx8mn_gpt2_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", + "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "audio_pll1_out", "clk_ext1", }; + +static const char * const imx8mn_gpt3_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", + "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "audio_pll1_out", "clk_ext1", }; + +static const char * const imx8mn_gpt4_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", + "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "audio_pll1_out", "clk_ext1", }; + +static const char * const imx8mn_gpt5_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", + "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "audio_pll1_out", "clk_ext1", }; + +static const char * const imx8mn_gpt6_sels[] = {"osc_24m", "sys_pll2_100m", "sys_pll1_400m", + "sys_pll1_40m", "video_pll1_out", "sys_pll1_80m", + "audio_pll1_out", "clk_ext1", }; + static const char * const imx8mn_wdog_sels[] = {"osc_24m", "sys_pll1_133m", "sys_pll1_160m", "vpu_pll_out", "sys_pll2_125m", "sys_pll3_out", "sys_pll1_80m", "sys_pll2_166m", }; @@ -476,6 +500,12 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_CLK_PWM2] = imx8m_clk_hw_composite("pwm2", imx8mn_pwm2_sels, base + 0xb400); hws[IMX8MN_CLK_PWM3] = imx8m_clk_hw_composite("pwm3", imx8mn_pwm3_sels, base + 0xb480); hws[IMX8MN_CLK_PWM4] = imx8m_clk_hw_composite("pwm4", imx8mn_pwm4_sels, base + 0xb500); + hws[IMX8MN_CLK_GPT1] = imx8m_clk_hw_composite("gpt1", imx8mn_gpt1_sels, base + 0xb580); + hws[IMX8MN_CLK_GPT2] = imx8m_clk_hw_composite("gpt2", imx8mn_gpt2_sels, base + 0xb600); + hws[IMX8MN_CLK_GPT3] = imx8m_clk_hw_composite("gpt3", imx8mn_gpt3_sels, base + 0xb680); + hws[IMX8MN_CLK_GPT4] = imx8m_clk_hw_composite("gpt4", imx8mn_gpt4_sels, base + 0xb700); + hws[IMX8MN_CLK_GPT5] = imx8m_clk_hw_composite("gpt5", imx8mn_gpt5_sels, base + 0xb780); + hws[IMX8MN_CLK_GPT6] = imx8m_clk_hw_composite("gpt6", imx8mn_gpt6_sels, base + 0xb800); hws[IMX8MN_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mn_wdog_sels, base + 0xb900); hws[IMX8MN_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mn_wrclk_sels, base + 0xb980); hws[IMX8MN_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mn_clko1_sels, base + 0xba00); @@ -501,6 +531,12 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_CLK_GPIO3_ROOT] = imx_clk_hw_gate4("gpio3_root_clk", "ipg_root", base + 0x40d0, 0); hws[IMX8MN_CLK_GPIO4_ROOT] = imx_clk_hw_gate4("gpio4_root_clk", "ipg_root", base + 0x40e0, 0); hws[IMX8MN_CLK_GPIO5_ROOT] = imx_clk_hw_gate4("gpio5_root_clk", "ipg_root", base + 0x40f0, 0); + hws[IMX8MN_CLK_GPT1_ROOT] = imx_clk_hw_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0); + hws[IMX8MN_CLK_GPT2_ROOT] = imx_clk_hw_gate4("gpt2_root_clk", "gpt2", base + 0x4110, 0); + hws[IMX8MN_CLK_GPT3_ROOT] = imx_clk_hw_gate4("gpt3_root_clk", "gpt3", base + 0x4120, 0); + hws[IMX8MN_CLK_GPT4_ROOT] = imx_clk_hw_gate4("gpt4_root_clk", "gpt4", base + 0x4130, 0); + hws[IMX8MN_CLK_GPT5_ROOT] = imx_clk_hw_gate4("gpt5_root_clk", "gpt5", base + 0x4140, 0); + hws[IMX8MN_CLK_GPT6_ROOT] = imx_clk_hw_gate4("gpt6_root_clk", "gpt6", base + 0x4150, 0); hws[IMX8MN_CLK_I2C1_ROOT] = imx_clk_hw_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0); hws[IMX8MN_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0); hws[IMX8MN_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0); @@ -522,7 +558,6 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5); hws[IMX8MN_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6); hws[IMX8MN_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6); - hws[IMX8MN_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0); hws[IMX8MN_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0); hws[IMX8MN_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0); hws[IMX8MN_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); @@ -549,6 +584,8 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0); hws[IMX8MN_CLK_SAI7_ROOT] = imx_clk_hw_gate2_shared2("sai7_root_clk", "sai7", base + 0x4650, 0, &share_count_sai7); + hws[IMX8MN_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8); + hws[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4); hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core", @@ -594,6 +631,8 @@ static struct platform_driver imx8mn_clk_driver = { }, }; module_platform_driver(imx8mn_clk_driver); +module_param(mcore_booted, bool, S_IRUGO); +MODULE_PARM_DESC(mcore_booted, "See Cortex-M core is booted or not"); MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>"); MODULE_DESCRIPTION("NXP i.MX8MN clock driver"); diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 18f5b7c3ca9d..e89db568f5a8 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -358,7 +358,7 @@ static const char * const imx8mp_media_mipi_phy1_ref_sels[] = {"osc_24m", "sys_p "clk_ext2", "audio_pll2_out", "video_pll1_out", }; -static const char * const imx8mp_media_disp1_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", +static const char * const imx8mp_media_disp_pix_sels[] = {"osc_24m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "clk_ext4", }; @@ -399,6 +399,11 @@ static const char * const imx8mp_sai7_sels[] = {"osc_24m", "audio_pll1_out", "au static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; +static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out", + "dummy", "dummy", "gpu_pll_out", "vpu_pll_out", + "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3", + "dummy", "dummy", "osc_24m", "dummy", "osc_32k"}; + static struct clk_hw **hws; static struct clk_hw_onecell_data *clk_hw_data; @@ -504,6 +509,15 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_SYS_PLL2_500M] = imx_clk_hw_fixed_factor("sys_pll2_500m", "sys_pll2_out", 1, 2); hws[IMX8MP_SYS_PLL2_1000M] = imx_clk_hw_fixed_factor("sys_pll2_1000m", "sys_pll2_out", 1, 1); + hws[IMX8MP_CLK_CLKOUT1_SEL] = imx_clk_hw_mux2("clkout1_sel", anatop_base + 0x128, 4, 4, + imx8mp_clkout_sels, ARRAY_SIZE(imx8mp_clkout_sels)); + hws[IMX8MP_CLK_CLKOUT1_DIV] = imx_clk_hw_divider("clkout1_div", "clkout1_sel", anatop_base + 0x128, 0, 4); + hws[IMX8MP_CLK_CLKOUT1] = imx_clk_hw_gate("clkout1", "clkout1_div", anatop_base + 0x128, 8); + hws[IMX8MP_CLK_CLKOUT2_SEL] = imx_clk_hw_mux2("clkout2_sel", anatop_base + 0x128, 20, 4, + imx8mp_clkout_sels, ARRAY_SIZE(imx8mp_clkout_sels)); + hws[IMX8MP_CLK_CLKOUT2_DIV] = imx_clk_hw_divider("clkout2_div", "clkout2_sel", anatop_base + 0x128, 16, 4); + hws[IMX8MP_CLK_CLKOUT2] = imx_clk_hw_gate("clkout2", "clkout2_div", anatop_base + 0x128, 24); + hws[IMX8MP_CLK_A53_DIV] = imx8m_clk_hw_composite_core("arm_a53_div", imx8mp_a53_sels, ccm_base + 0x8000); hws[IMX8MP_CLK_A53_SRC] = hws[IMX8MP_CLK_A53_DIV]; hws[IMX8MP_CLK_A53_CG] = hws[IMX8MP_CLK_A53_DIV]; @@ -538,6 +552,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_AHB] = imx8m_clk_hw_composite_bus_critical("ahb_root", imx8mp_ahb_sels, ccm_base + 0x9000); hws[IMX8MP_CLK_AUDIO_AHB] = imx8m_clk_hw_composite_bus("audio_ahb", imx8mp_audio_ahb_sels, ccm_base + 0x9100); hws[IMX8MP_CLK_MIPI_DSI_ESC_RX] = imx8m_clk_hw_composite_bus("mipi_dsi_esc_rx", imx8mp_mipi_dsi_esc_rx_sels, ccm_base + 0x9200); + hws[IMX8MP_CLK_MEDIA_DISP2_PIX] = imx8m_clk_hw_composite("media_disp2_pix", imx8mp_media_disp_pix_sels, ccm_base + 0x9300); hws[IMX8MP_CLK_IPG_ROOT] = imx_clk_hw_divider2("ipg_root", "ahb_root", ccm_base + 0x9080, 0, 1); @@ -600,7 +615,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80); hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00); hws[IMX8MP_CLK_MEDIA_MIPI_PHY1_REF] = imx8m_clk_hw_composite("media_mipi_phy1_ref", imx8mp_media_mipi_phy1_ref_sels, ccm_base + 0xbd80); - hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp1_pix_sels, ccm_base + 0xbe00); + hws[IMX8MP_CLK_MEDIA_DISP1_PIX] = imx8m_clk_hw_composite("media_disp1_pix", imx8mp_media_disp_pix_sels, ccm_base + 0xbe00); hws[IMX8MP_CLK_MEDIA_CAM2_PIX] = imx8m_clk_hw_composite("media_cam2_pix", imx8mp_media_cam2_pix_sels, ccm_base + 0xbe80); hws[IMX8MP_CLK_MEDIA_LDB] = imx8m_clk_hw_composite("media_ldb", imx8mp_media_ldb_sels, ccm_base + 0xbf00); hws[IMX8MP_CLK_MEMREPAIR] = imx8m_clk_hw_composite_critical("mem_repair", imx8mp_memrepair_sels, ccm_base + 0xbf80); @@ -654,12 +669,11 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_SIM_ENET_ROOT] = imx_clk_hw_gate4("sim_enet_root_clk", "enet_axi", ccm_base + 0x4400, 0); hws[IMX8MP_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", ccm_base + 0x4450, 0); hws[IMX8MP_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", ccm_base + 0x4460, 0); - hws[IMX8MP_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", ccm_base + 0x4470, 0); hws[IMX8MP_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", ccm_base + 0x4490, 0); hws[IMX8MP_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", ccm_base + 0x44a0, 0); hws[IMX8MP_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", ccm_base + 0x44b0, 0); hws[IMX8MP_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", ccm_base + 0x44c0, 0); - hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "osc_32k", ccm_base + 0x44d0, 0); + hws[IMX8MP_CLK_USB_ROOT] = imx_clk_hw_gate4("usb_root_clk", "hsio_axi", ccm_base + 0x44d0, 0); hws[IMX8MP_CLK_USB_PHY_ROOT] = imx_clk_hw_gate4("usb_phy_root_clk", "usb_phy_ref", ccm_base + 0x44f0, 0); hws[IMX8MP_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", ccm_base + 0x4510, 0); hws[IMX8MP_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", ccm_base + 0x4520, 0); @@ -721,6 +735,8 @@ static struct platform_driver imx8mp_clk_driver = { }, }; module_platform_driver(imx8mp_clk_driver); +module_param(mcore_booted, bool, S_IRUGO); +MODULE_PARM_DESC(mcore_booted, "See Cortex-M core is booted or not"); MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>"); MODULE_DESCRIPTION("NXP i.MX8MP clock driver"); diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index 83cc2b1c3294..882dcad4817d 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -25,7 +25,7 @@ static u32 share_count_sai6; static u32 share_count_dcss; static u32 share_count_nand; -static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", }; +static const char * const pll_ref_sels[] = { "osc_25m", "osc_27m", "hdmi_phy_27m", "dummy", }; static const char * const arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", }; static const char * const gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", }; static const char * const vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", }; @@ -557,7 +557,6 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) hws[IMX8MQ_CLK_SAI5_IPG] = imx_clk_hw_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5); hws[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_hw_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6); hws[IMX8MQ_CLK_SAI6_IPG] = imx_clk_hw_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6); - hws[IMX8MQ_CLK_SNVS_ROOT] = imx_clk_hw_gate4("snvs_root_clk", "ipg_root", base + 0x4470, 0); hws[IMX8MQ_CLK_UART1_ROOT] = imx_clk_hw_gate4("uart1_root_clk", "uart1", base + 0x4490, 0); hws[IMX8MQ_CLK_UART2_ROOT] = imx_clk_hw_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0); hws[IMX8MQ_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); @@ -632,6 +631,8 @@ static struct platform_driver imx8mq_clk_driver = { }, }; module_platform_driver(imx8mq_clk_driver); +module_param(mcore_booted, bool, S_IRUGO); +MODULE_PARM_DESC(mcore_booted, "See Cortex-M core is booted or not"); MODULE_AUTHOR("Abel Vesa <abel.vesa@nxp.com>"); MODULE_DESCRIPTION("NXP i.MX8MQ clock driver"); diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c index 083da31dc3ea..5b022eeb838b 100644 --- a/drivers/clk/imx/clk-scu.c +++ b/drivers/clk/imx/clk-scu.c @@ -528,7 +528,7 @@ static int imx_clk_scu_probe(struct platform_device *pdev) pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); + ret = pm_runtime_resume_and_get(dev); if (ret) { pm_genpd_remove_device(dev); pm_runtime_disable(dev); @@ -837,8 +837,10 @@ struct clk_hw *__imx_clk_gpr_scu(const char *name, const char * const *parent_na if (!clk_node) return ERR_PTR(-ENOMEM); - if (!imx_scu_clk_is_valid(rsrc_id)) + if (!imx_scu_clk_is_valid(rsrc_id)) { + kfree(clk_node); return ERR_PTR(-EINVAL); + } clk = kzalloc(sizeof(*clk), GFP_KERNEL); if (!clk) { diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c index 7cc669934253..5582f18dd632 100644 --- a/drivers/clk/imx/clk.c +++ b/drivers/clk/imx/clk.c @@ -17,6 +17,9 @@ DEFINE_SPINLOCK(imx_ccm_lock); EXPORT_SYMBOL_GPL(imx_ccm_lock); +bool mcore_booted; +EXPORT_SYMBOL_GPL(mcore_booted); + void imx_unregister_clocks(struct clk *clks[], unsigned int count) { unsigned int i; @@ -173,6 +176,8 @@ void imx_register_uart_clocks(unsigned int clk_count) int i; imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL); + if (!imx_uart_clocks) + return; if (!of_stdout) return; diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index a7cbbcd1a3f4..5061a06468df 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -7,6 +7,7 @@ #include <linux/clk-provider.h> extern spinlock_t imx_ccm_lock; +extern bool mcore_booted; void imx_check_clocks(struct clk *clks[], unsigned int count); void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count); diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c index af31633a8862..861c50d6cb24 100644 --- a/drivers/clk/ingenic/cgu.c +++ b/drivers/clk/ingenic/cgu.c @@ -660,7 +660,7 @@ static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx) ingenic_clk->idx = idx; clk_init.name = clk_info->name; - clk_init.flags = 0; + clk_init.flags = clk_info->flags; clk_init.parent_names = parent_names; caps = clk_info->type; diff --git a/drivers/clk/ingenic/cgu.h b/drivers/clk/ingenic/cgu.h index bfc2b9c38a41..147b7df0d657 100644 --- a/drivers/clk/ingenic/cgu.h +++ b/drivers/clk/ingenic/cgu.h @@ -136,6 +136,7 @@ struct ingenic_cgu_custom_info { * struct ingenic_cgu_clk_info - information about a clock * @name: name of the clock * @type: a bitmask formed from CGU_CLK_* values + * @flags: common clock flags to set on this clock * @parents: an array of the indices of potential parents of this clock * within the clock_info array of the CGU, or -1 in entries * which correspond to no valid parent @@ -161,6 +162,8 @@ struct ingenic_cgu_clk_info { CGU_CLK_CUSTOM = BIT(7), } type; + unsigned long flags; + int parents[4]; union { diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c index 15d61793f53b..590e9c85cb25 100644 --- a/drivers/clk/ingenic/jz4725b-cgu.c +++ b/drivers/clk/ingenic/jz4725b-cgu.c @@ -87,6 +87,11 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { [JZ4725B_CLK_CCLK] = { "cclk", CGU_CLK_DIV, + /* + * Disabling the CPU clock or any parent clocks will hang the + * system; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, 0, @@ -114,6 +119,11 @@ static const struct ingenic_cgu_clk_info jz4725b_cgu_clocks[] = { [JZ4725B_CLK_MCLK] = { "mclk", CGU_CLK_DIV, + /* + * Disabling MCLK or its parents will render DRAM + * inaccessible; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { JZ4725B_CLK_PLL, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1, 0, diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c index 43ffb62c42bb..3e0a30574ebb 100644 --- a/drivers/clk/ingenic/jz4740-cgu.c +++ b/drivers/clk/ingenic/jz4740-cgu.c @@ -102,6 +102,11 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { [JZ4740_CLK_CCLK] = { "cclk", CGU_CLK_DIV, + /* + * Disabling the CPU clock or any parent clocks will hang the + * system; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { JZ4740_CLK_PLL, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, 0, @@ -129,6 +134,11 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = { [JZ4740_CLK_MCLK] = { "mclk", CGU_CLK_DIV, + /* + * Disabling MCLK or its parents will render DRAM + * inaccessible; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { JZ4740_CLK_PLL, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1, 0, diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c index 8fdd383560fb..ecd395ac8a28 100644 --- a/drivers/clk/ingenic/jz4760-cgu.c +++ b/drivers/clk/ingenic/jz4760-cgu.c @@ -143,6 +143,11 @@ static const struct ingenic_cgu_clk_info jz4760_cgu_clocks[] = { [JZ4760_CLK_CCLK] = { "cclk", CGU_CLK_DIV, + /* + * Disabling the CPU clock or any parent clocks will hang the + * system; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { JZ4760_CLK_PLL0, }, .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, 0, @@ -175,6 +180,11 @@ static const struct ingenic_cgu_clk_info jz4760_cgu_clocks[] = { }, [JZ4760_CLK_MCLK] = { "mclk", CGU_CLK_DIV, + /* + * Disabling MCLK or its parents will render DRAM + * inaccessible; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { JZ4760_CLK_PLL0, }, .div = { CGU_REG_CPCCR, 12, 1, 4, 22, -1, -1, 0, diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c index 7ef91257630e..6ae1740367f9 100644 --- a/drivers/clk/ingenic/jz4770-cgu.c +++ b/drivers/clk/ingenic/jz4770-cgu.c @@ -149,6 +149,11 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = { [JZ4770_CLK_CCLK] = { "cclk", CGU_CLK_DIV, + /* + * Disabling the CPU clock or any parent clocks will hang the + * system; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { JZ4770_CLK_PLL0, }, .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1, 0, diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c index e357c228e0f1..b1dadc0a5e75 100644 --- a/drivers/clk/ingenic/jz4780-cgu.c +++ b/drivers/clk/ingenic/jz4780-cgu.c @@ -341,12 +341,22 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = { [JZ4780_CLK_CPU] = { "cpu", CGU_CLK_DIV, + /* + * Disabling the CPU clock or any parent clocks will hang the + * system; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { JZ4780_CLK_CPUMUX, -1, -1, -1 }, .div = { CGU_REG_CLOCKCONTROL, 0, 1, 4, 22, -1, -1 }, }, [JZ4780_CLK_L2CACHE] = { "l2cache", CGU_CLK_DIV, + /* + * The L2 cache clock is critical if caches are enabled and + * disabling it or any parent clocks will hang the system. + */ + .flags = CLK_IS_CRITICAL, .parents = { JZ4780_CLK_CPUMUX, -1, -1, -1 }, .div = { CGU_REG_CLOCKCONTROL, 4, 1, 4, -1, -1, -1 }, }, @@ -380,6 +390,11 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = { [JZ4780_CLK_DDR] = { "ddr", CGU_CLK_MUX | CGU_CLK_DIV, + /* + * Disabling DDR clock or its parents will render DRAM + * inaccessible; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { -1, JZ4780_CLK_SCLKA, JZ4780_CLK_MPLL, -1 }, .mux = { CGU_REG_DDRCDR, 30, 2 }, .div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 }, diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c index 77acfbeb4830..201bf6e6b6e0 100644 --- a/drivers/clk/ingenic/tcu.c +++ b/drivers/clk/ingenic/tcu.c @@ -31,6 +31,7 @@ struct ingenic_soc_info { unsigned int num_channels; bool has_ost; bool has_tcu_clk; + bool allow_missing_tcu_clk; }; struct ingenic_tcu_clk_info { @@ -320,7 +321,8 @@ static const struct ingenic_soc_info jz4770_soc_info = { static const struct ingenic_soc_info x1000_soc_info = { .num_channels = 8, .has_ost = false, /* X1000 has OST, but it not belong TCU */ - .has_tcu_clk = false, + .has_tcu_clk = true, + .allow_missing_tcu_clk = true, }; static const struct of_device_id __maybe_unused ingenic_tcu_of_match[] __initconst = { @@ -355,14 +357,27 @@ static int __init ingenic_tcu_probe(struct device_node *np) tcu->clk = of_clk_get_by_name(np, "tcu"); if (IS_ERR(tcu->clk)) { ret = PTR_ERR(tcu->clk); - pr_crit("Cannot get TCU clock\n"); - goto err_free_tcu; - } - ret = clk_prepare_enable(tcu->clk); - if (ret) { - pr_crit("Unable to enable TCU clock\n"); - goto err_put_clk; + /* + * Old device trees for some SoCs did not include the + * TCU clock because this driver (incorrectly) didn't + * use it. In this case we complain loudly and attempt + * to continue without the clock, which might work if + * booting with workarounds like "clk_ignore_unused". + */ + if (tcu->soc_info->allow_missing_tcu_clk && ret == -EINVAL) { + pr_warn("TCU clock missing from device tree, please update your device tree\n"); + tcu->clk = NULL; + } else { + pr_crit("Cannot get TCU clock from device tree\n"); + goto err_free_tcu; + } + } else { + ret = clk_prepare_enable(tcu->clk); + if (ret) { + pr_crit("Unable to enable TCU clock\n"); + goto err_put_clk; + } } } @@ -432,10 +447,10 @@ err_unregister_timer_clocks: clk_hw_unregister(tcu->clocks->hws[i]); kfree(tcu->clocks); err_clk_disable: - if (tcu->soc_info->has_tcu_clk) + if (tcu->clk) clk_disable_unprepare(tcu->clk); err_put_clk: - if (tcu->soc_info->has_tcu_clk) + if (tcu->clk) clk_put(tcu->clk); err_free_tcu: kfree(tcu); diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c index 3c4d5a77ccbd..b2ce3fb83f54 100644 --- a/drivers/clk/ingenic/x1000-cgu.c +++ b/drivers/clk/ingenic/x1000-cgu.c @@ -251,6 +251,11 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { [X1000_CLK_CPU] = { "cpu", CGU_CLK_DIV | CGU_CLK_GATE, + /* + * Disabling the CPU clock or any parent clocks will hang the + * system; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { X1000_CLK_CPUMUX, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 }, .gate = { CGU_REG_CLKGR, 30 }, @@ -258,6 +263,11 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { [X1000_CLK_L2CACHE] = { "l2cache", CGU_CLK_DIV, + /* + * The L2 cache clock is critical if caches are enabled and + * disabling it or any parent clocks will hang the system. + */ + .flags = CLK_IS_CRITICAL, .parents = { X1000_CLK_CPUMUX, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 }, }, @@ -290,6 +300,11 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { [X1000_CLK_DDR] = { "ddr", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + /* + * Disabling DDR clock or its parents will render DRAM + * inaccessible; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { -1, X1000_CLK_SCLKA, X1000_CLK_MPLL, -1 }, .mux = { CGU_REG_DDRCDR, 30, 2 }, .div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 }, diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c index e01ec2dc7a1a..0fd46e50a513 100644 --- a/drivers/clk/ingenic/x1830-cgu.c +++ b/drivers/clk/ingenic/x1830-cgu.c @@ -225,6 +225,7 @@ static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = { [X1830_CLK_CPU] = { "cpu", CGU_CLK_DIV | CGU_CLK_GATE, + .flags = CLK_IS_CRITICAL, .parents = { X1830_CLK_CPUMUX, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 0, 1, 4, 22, -1, -1 }, .gate = { CGU_REG_CLKGR1, 15 }, @@ -232,6 +233,11 @@ static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = { [X1830_CLK_L2CACHE] = { "l2cache", CGU_CLK_DIV, + /* + * The L2 cache clock is critical if caches are enabled and + * disabling it or any parent clocks will hang the system. + */ + .flags = CLK_IS_CRITICAL, .parents = { X1830_CLK_CPUMUX, -1, -1, -1 }, .div = { CGU_REG_CPCCR, 4, 1, 4, 22, -1, -1 }, }, @@ -264,6 +270,11 @@ static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = { [X1830_CLK_DDR] = { "ddr", CGU_CLK_MUX | CGU_CLK_DIV | CGU_CLK_GATE, + /* + * Disabling DDR clock or its parents will render DRAM + * inaccessible; mark it critical. + */ + .flags = CLK_IS_CRITICAL, .parents = { -1, X1830_CLK_SCLKA, X1830_CLK_MPLL, -1 }, .mux = { CGU_REG_DDRCDR, 30, 2 }, .div = { CGU_REG_DDRCDR, 0, 1, 4, 29, 28, 27 }, diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c index aae1a4076281..19198325b909 100644 --- a/drivers/clk/keystone/syscon-clk.c +++ b/drivers/clk/keystone/syscon-clk.c @@ -162,6 +162,13 @@ static const struct ti_syscon_gate_clk_data am64_clk_data[] = { { /* Sentinel */ }, }; +static const struct ti_syscon_gate_clk_data am62_clk_data[] = { + TI_SYSCON_CLK_GATE("epwm_tbclk0", 0x0, 0), + TI_SYSCON_CLK_GATE("epwm_tbclk1", 0x0, 1), + TI_SYSCON_CLK_GATE("epwm_tbclk2", 0x0, 2), + { /* Sentinel */ }, +}; + static const struct of_device_id ti_syscon_gate_clk_ids[] = { { .compatible = "ti,am654-ehrpwm-tbclk", @@ -171,6 +178,10 @@ static const struct of_device_id ti_syscon_gate_clk_ids[] = { .compatible = "ti,am64-epwm-tbclk", .data = &am64_clk_data, }, + { + .compatible = "ti,am62-epwm-tbclk", + .data = &am62_clk_data, + }, { } }; MODULE_DEVICE_TABLE(of, ti_syscon_gate_clk_ids); diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig index 01ef02c54725..d5936cfb3bee 100644 --- a/drivers/clk/mediatek/Kconfig +++ b/drivers/clk/mediatek/Kconfig @@ -512,6 +512,14 @@ config COMMON_CLK_MT8183_VENCSYS help This driver supports MediaTek MT8183 vencsys clocks. +config COMMON_CLK_MT8186 + bool "Clock driver for MediaTek MT8186" + depends on ARM64 || COMPILE_TEST + select COMMON_CLK_MEDIATEK + default ARCH_MEDIATEK + help + This driver supports MediaTek MT8186 clocks. + config COMMON_CLK_MT8192 bool "Clock driver for MediaTek MT8192" depends on ARM64 || COMPILE_TEST diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile index 7b0c2646ce4a..caf2ce93d666 100644 --- a/drivers/clk/mediatek/Makefile +++ b/drivers/clk/mediatek/Makefile @@ -71,6 +71,11 @@ obj-$(CONFIG_COMMON_CLK_MT8183_MFGCFG) += clk-mt8183-mfgcfg.o obj-$(CONFIG_COMMON_CLK_MT8183_MMSYS) += clk-mt8183-mm.o obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o +obj-$(CONFIG_COMMON_CLK_MT8186) += clk-mt8186-mcu.o clk-mt8186-topckgen.o clk-mt8186-infra_ao.o \ + clk-mt8186-apmixedsys.o clk-mt8186-imp_iic_wrap.o \ + clk-mt8186-mfg.o clk-mt8186-mm.o clk-mt8186-wpe.o \ + clk-mt8186-img.o clk-mt8186-vdec.o clk-mt8186-venc.o \ + clk-mt8186-cam.o clk-mt8186-mdp.o clk-mt8186-ipe.o obj-$(CONFIG_COMMON_CLK_MT8192) += clk-mt8192.o obj-$(CONFIG_COMMON_CLK_MT8192_AUDSYS) += clk-mt8192-aud.o obj-$(CONFIG_COMMON_CLK_MT8192_CAMSYS) += clk-mt8192-cam.o diff --git a/drivers/clk/mediatek/clk-apmixed.c b/drivers/clk/mediatek/clk-apmixed.c index a29339cc26c4..fc3d4146f482 100644 --- a/drivers/clk/mediatek/clk-apmixed.c +++ b/drivers/clk/mediatek/clk-apmixed.c @@ -70,12 +70,12 @@ static const struct clk_ops mtk_ref2usb_tx_ops = { .unprepare = mtk_ref2usb_tx_unprepare, }; -struct clk * __init mtk_clk_register_ref2usb_tx(const char *name, +struct clk_hw * __init mtk_clk_register_ref2usb_tx(const char *name, const char *parent_name, void __iomem *reg) { struct mtk_ref2usb_tx *tx; struct clk_init_data init = {}; - struct clk *clk; + int ret; tx = kzalloc(sizeof(*tx), GFP_KERNEL); if (!tx) @@ -89,14 +89,14 @@ struct clk * __init mtk_clk_register_ref2usb_tx(const char *name, init.parent_names = &parent_name; init.num_parents = 1; - clk = clk_register(NULL, &tx->hw); + ret = clk_hw_register(NULL, &tx->hw); - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %pe\n", name, clk); + if (ret) { kfree(tx); + return ERR_PTR(ret); } - return clk; + return &tx->hw; } MODULE_LICENSE("GPL"); diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c index c11b3fae622e..2b5d48591738 100644 --- a/drivers/clk/mediatek/clk-cpumux.c +++ b/drivers/clk/mediatek/clk-cpumux.c @@ -57,12 +57,12 @@ static const struct clk_ops clk_cpumux_ops = { .set_parent = clk_cpumux_set_parent, }; -static struct clk * +static struct clk_hw * mtk_clk_register_cpumux(const struct mtk_composite *mux, struct regmap *regmap) { struct mtk_clk_cpumux *cpumux; - struct clk *clk; + int ret; struct clk_init_data init; cpumux = kzalloc(sizeof(*cpumux), GFP_KERNEL); @@ -81,34 +81,33 @@ mtk_clk_register_cpumux(const struct mtk_composite *mux, cpumux->regmap = regmap; cpumux->hw.init = &init; - clk = clk_register(NULL, &cpumux->hw); - if (IS_ERR(clk)) + ret = clk_hw_register(NULL, &cpumux->hw); + if (ret) { kfree(cpumux); + return ERR_PTR(ret); + } - return clk; + return &cpumux->hw; } -static void mtk_clk_unregister_cpumux(struct clk *clk) +static void mtk_clk_unregister_cpumux(struct clk_hw *hw) { struct mtk_clk_cpumux *cpumux; - struct clk_hw *hw; - - hw = __clk_get_hw(clk); if (!hw) return; cpumux = to_mtk_clk_cpumux(hw); - clk_unregister(clk); + clk_hw_unregister(hw); kfree(cpumux); } int mtk_clk_register_cpumuxes(struct device_node *node, const struct mtk_composite *clks, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; - struct clk *clk; + struct clk_hw *hw; struct regmap *regmap; regmap = device_node_to_regmap(node); @@ -120,19 +119,20 @@ int mtk_clk_register_cpumuxes(struct device_node *node, for (i = 0; i < num; i++) { const struct mtk_composite *mux = &clks[i]; - if (!IS_ERR_OR_NULL(clk_data->clks[mux->id])) { + if (!IS_ERR_OR_NULL(clk_data->hws[mux->id])) { pr_warn("%pOF: Trying to register duplicate clock ID: %d\n", node, mux->id); continue; } - clk = mtk_clk_register_cpumux(mux, regmap); - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %pe\n", mux->name, clk); + hw = mtk_clk_register_cpumux(mux, regmap); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %pe\n", mux->name, + hw); goto err; } - clk_data->clks[mux->id] = clk; + clk_data->hws[mux->id] = hw; } return 0; @@ -141,29 +141,29 @@ err: while (--i >= 0) { const struct mtk_composite *mux = &clks[i]; - if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) + if (IS_ERR_OR_NULL(clk_data->hws[mux->id])) continue; - mtk_clk_unregister_cpumux(clk_data->clks[mux->id]); - clk_data->clks[mux->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_cpumux(clk_data->hws[mux->id]); + clk_data->hws[mux->id] = ERR_PTR(-ENOENT); } - return PTR_ERR(clk); + return PTR_ERR(hw); } void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; for (i = num; i > 0; i--) { const struct mtk_composite *mux = &clks[i - 1]; - if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) + if (IS_ERR_OR_NULL(clk_data->hws[mux->id])) continue; - mtk_clk_unregister_cpumux(clk_data->clks[mux->id]); - clk_data->clks[mux->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_cpumux(clk_data->hws[mux->id]); + clk_data->hws[mux->id] = ERR_PTR(-ENOENT); } } diff --git a/drivers/clk/mediatek/clk-cpumux.h b/drivers/clk/mediatek/clk-cpumux.h index b07e89f7c283..325adbef25d1 100644 --- a/drivers/clk/mediatek/clk-cpumux.h +++ b/drivers/clk/mediatek/clk-cpumux.h @@ -7,15 +7,15 @@ #ifndef __DRV_CLK_CPUMUX_H #define __DRV_CLK_CPUMUX_H -struct clk_onecell_data; +struct clk_hw_onecell_data; struct device_node; struct mtk_composite; int mtk_clk_register_cpumuxes(struct device_node *node, const struct mtk_composite *clks, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); void mtk_clk_unregister_cpumuxes(const struct mtk_composite *clks, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); #endif /* __DRV_CLK_CPUMUX_H */ diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c index da52023f8455..421806236228 100644 --- a/drivers/clk/mediatek/clk-gate.c +++ b/drivers/clk/mediatek/clk-gate.c @@ -152,7 +152,7 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = { }; EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv); -static struct clk *mtk_clk_register_gate(const char *name, +static struct clk_hw *mtk_clk_register_gate(const char *name, const char *parent_name, struct regmap *regmap, int set_ofs, int clr_ofs, int sta_ofs, u8 bit, @@ -160,7 +160,7 @@ static struct clk *mtk_clk_register_gate(const char *name, unsigned long flags, struct device *dev) { struct mtk_clk_gate *cg; - struct clk *clk; + int ret; struct clk_init_data init = {}; cg = kzalloc(sizeof(*cg), GFP_KERNEL); @@ -181,35 +181,34 @@ static struct clk *mtk_clk_register_gate(const char *name, cg->hw.init = &init; - clk = clk_register(dev, &cg->hw); - if (IS_ERR(clk)) + ret = clk_hw_register(dev, &cg->hw); + if (ret) { kfree(cg); + return ERR_PTR(ret); + } - return clk; + return &cg->hw; } -static void mtk_clk_unregister_gate(struct clk *clk) +static void mtk_clk_unregister_gate(struct clk_hw *hw) { struct mtk_clk_gate *cg; - struct clk_hw *hw; - - hw = __clk_get_hw(clk); if (!hw) return; cg = to_mtk_clk_gate(hw); - clk_unregister(clk); + clk_hw_unregister(hw); kfree(cg); } int mtk_clk_register_gates_with_dev(struct device_node *node, const struct mtk_gate *clks, int num, - struct clk_onecell_data *clk_data, + struct clk_hw_onecell_data *clk_data, struct device *dev) { int i; - struct clk *clk; + struct clk_hw *hw; struct regmap *regmap; if (!clk_data) @@ -224,13 +223,13 @@ int mtk_clk_register_gates_with_dev(struct device_node *node, for (i = 0; i < num; i++) { const struct mtk_gate *gate = &clks[i]; - if (!IS_ERR_OR_NULL(clk_data->clks[gate->id])) { + if (!IS_ERR_OR_NULL(clk_data->hws[gate->id])) { pr_warn("%pOF: Trying to register duplicate clock ID: %d\n", node, gate->id); continue; } - clk = mtk_clk_register_gate(gate->name, gate->parent_name, + hw = mtk_clk_register_gate(gate->name, gate->parent_name, regmap, gate->regs->set_ofs, gate->regs->clr_ofs, @@ -238,12 +237,13 @@ int mtk_clk_register_gates_with_dev(struct device_node *node, gate->shift, gate->ops, gate->flags, dev); - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %pe\n", gate->name, clk); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %pe\n", gate->name, + hw); goto err; } - clk_data->clks[gate->id] = clk; + clk_data->hws[gate->id] = hw; } return 0; @@ -252,26 +252,26 @@ err: while (--i >= 0) { const struct mtk_gate *gate = &clks[i]; - if (IS_ERR_OR_NULL(clk_data->clks[gate->id])) + if (IS_ERR_OR_NULL(clk_data->hws[gate->id])) continue; - mtk_clk_unregister_gate(clk_data->clks[gate->id]); - clk_data->clks[gate->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_gate(clk_data->hws[gate->id]); + clk_data->hws[gate->id] = ERR_PTR(-ENOENT); } - return PTR_ERR(clk); + return PTR_ERR(hw); } int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { return mtk_clk_register_gates_with_dev(node, clks, num, clk_data, NULL); } EXPORT_SYMBOL_GPL(mtk_clk_register_gates); void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; @@ -281,11 +281,11 @@ void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num, for (i = num; i > 0; i--) { const struct mtk_gate *gate = &clks[i - 1]; - if (IS_ERR_OR_NULL(clk_data->clks[gate->id])) + if (IS_ERR_OR_NULL(clk_data->hws[gate->id])) continue; - mtk_clk_unregister_gate(clk_data->clks[gate->id]); - clk_data->clks[gate->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_gate(clk_data->hws[gate->id]); + clk_data->hws[gate->id] = ERR_PTR(-ENOENT); } } EXPORT_SYMBOL_GPL(mtk_clk_unregister_gates); diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h index 6b5738826a22..d9897ef53528 100644 --- a/drivers/clk/mediatek/clk-gate.h +++ b/drivers/clk/mediatek/clk-gate.h @@ -10,7 +10,7 @@ #include <linux/types.h> struct clk; -struct clk_onecell_data; +struct clk_hw_onecell_data; struct clk_ops; struct device; struct device_node; @@ -52,14 +52,14 @@ struct mtk_gate { int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); int mtk_clk_register_gates_with_dev(struct device_node *node, const struct mtk_gate *clks, int num, - struct clk_onecell_data *clk_data, + struct clk_hw_onecell_data *clk_data, struct device *dev); void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); #endif /* __DRV_CLK_GATE_H */ diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c index e66896a44fad..6ba398eb7df9 100644 --- a/drivers/clk/mediatek/clk-mt2701-aud.c +++ b/drivers/clk/mediatek/clk-mt2701-aud.c @@ -145,7 +145,7 @@ static const struct of_device_id of_match_clk_mt2701_aud[] = { static int clk_mt2701_aud_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -154,7 +154,7 @@ static int clk_mt2701_aud_probe(struct platform_device *pdev) mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) { dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c index ffa09cfbfd51..662a8ab3fbb1 100644 --- a/drivers/clk/mediatek/clk-mt2701-bdp.c +++ b/drivers/clk/mediatek/clk-mt2701-bdp.c @@ -101,7 +101,7 @@ static const struct of_device_id of_match_clk_mt2701_bdp[] = { static int clk_mt2701_bdp_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -110,7 +110,7 @@ static int clk_mt2701_bdp_probe(struct platform_device *pdev) mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c index 100ff6ca609e..47c2289f3d1d 100644 --- a/drivers/clk/mediatek/clk-mt2701-eth.c +++ b/drivers/clk/mediatek/clk-mt2701-eth.c @@ -43,7 +43,7 @@ static const struct of_device_id of_match_clk_mt2701_eth[] = { static int clk_mt2701_eth_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -52,7 +52,7 @@ static int clk_mt2701_eth_probe(struct platform_device *pdev) mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c index 1328c112a38f..79929ed37f83 100644 --- a/drivers/clk/mediatek/clk-mt2701-g3d.c +++ b/drivers/clk/mediatek/clk-mt2701-g3d.c @@ -37,7 +37,7 @@ static const struct mtk_gate g3d_clks[] = { static int clk_mt2701_g3dsys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -46,7 +46,7 @@ static int clk_mt2701_g3dsys_init(struct platform_device *pdev) mtk_clk_register_gates(node, g3d_clks, ARRAY_SIZE(g3d_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c index 61444881c539..1aa36cb93ad0 100644 --- a/drivers/clk/mediatek/clk-mt2701-hif.c +++ b/drivers/clk/mediatek/clk-mt2701-hif.c @@ -40,7 +40,7 @@ static const struct of_device_id of_match_clk_mt2701_hif[] = { static int clk_mt2701_hif_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -49,7 +49,7 @@ static int clk_mt2701_hif_probe(struct platform_device *pdev) mtk_clk_register_gates(node, hif_clks, ARRAY_SIZE(hif_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) { dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c index 631e80f0fc7d..c4f3cd26df60 100644 --- a/drivers/clk/mediatek/clk-mt2701-img.c +++ b/drivers/clk/mediatek/clk-mt2701-img.c @@ -43,7 +43,7 @@ static const struct of_device_id of_match_clk_mt2701_img[] = { static int clk_mt2701_img_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -52,7 +52,7 @@ static int clk_mt2701_img_probe(struct platform_device *pdev) mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c index cb18e1849492..9ea7abad99d2 100644 --- a/drivers/clk/mediatek/clk-mt2701-mm.c +++ b/drivers/clk/mediatek/clk-mt2701-mm.c @@ -83,7 +83,7 @@ static int clk_mt2701_mm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_MM_NR); @@ -91,7 +91,7 @@ static int clk_mt2701_mm_probe(struct platform_device *pdev) mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c index c9def728ad1e..a2f18117f27a 100644 --- a/drivers/clk/mediatek/clk-mt2701-vdec.c +++ b/drivers/clk/mediatek/clk-mt2701-vdec.c @@ -54,7 +54,7 @@ static const struct of_device_id of_match_clk_mt2701_vdec[] = { static int clk_mt2701_vdec_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -63,7 +63,7 @@ static int clk_mt2701_vdec_probe(struct platform_device *pdev) mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c index 1eb3e4563c3f..04ba356db2d7 100644 --- a/drivers/clk/mediatek/clk-mt2701.c +++ b/drivers/clk/mediatek/clk-mt2701.c @@ -666,7 +666,7 @@ static const struct mtk_gate top_clks[] = { static int mtk_topckgen_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; struct device_node *node = pdev->dev.of_node; struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -692,7 +692,7 @@ static int mtk_topckgen_init(struct platform_device *pdev) mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct mtk_gate_regs infra_cg_regs = { @@ -735,7 +735,7 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = { FACTOR(CLK_INFRA_CLK_13M, "clk13m", "clk26m", 1, 2), }; -static struct clk_onecell_data *infra_clk_data; +static struct clk_hw_onecell_data *infra_clk_data; static void __init mtk_infrasys_init_early(struct device_node *node) { @@ -745,7 +745,7 @@ static void __init mtk_infrasys_init_early(struct device_node *node) infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); for (i = 0; i < CLK_INFRA_NR; i++) - infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); + infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER); } mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs), @@ -754,7 +754,8 @@ static void __init mtk_infrasys_init_early(struct device_node *node) mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes), infra_clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + infra_clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -771,8 +772,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); } else { for (i = 0; i < CLK_INFRA_NR; i++) { - if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER)) - infra_clk_data->clks[i] = ERR_PTR(-ENOENT); + if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER)) + infra_clk_data->hws[i] = ERR_PTR(-ENOENT); } } @@ -781,7 +782,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs), infra_clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + infra_clk_data); if (r) return r; @@ -886,7 +888,7 @@ static const struct mtk_composite peri_muxs[] = { static int mtk_pericfg_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; int r; struct device_node *node = pdev->dev.of_node; @@ -904,7 +906,7 @@ static int mtk_pericfg_init(struct platform_device *pdev) mtk_clk_register_composites(peri_muxs, ARRAY_SIZE(peri_muxs), base, &mt2701_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) return r; @@ -935,13 +937,13 @@ static int mtk_pericfg_init(struct platform_device *pdev) } static const struct mtk_pll_data apmixed_plls[] = { - PLL(CLK_APMIXED_ARMPLL, "armpll", 0x200, 0x20c, 0x80000001, + PLL(CLK_APMIXED_ARMPLL, "armpll", 0x200, 0x20c, 0x80000000, PLL_AO, 21, 0x204, 24, 0x0, 0x204, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x210, 0x21c, 0xf0000001, + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x210, 0x21c, 0xf0000000, HAVE_RST_BAR, 21, 0x210, 4, 0x0, 0x214, 0), - PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x220, 0x22c, 0xf3000001, + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x220, 0x22c, 0xf3000000, HAVE_RST_BAR, 7, 0x220, 4, 0x0, 0x224, 14), - PLL(CLK_APMIXED_MMPLL, "mmpll", 0x230, 0x23c, 0x00000001, 0, + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x230, 0x23c, 0, 0, 21, 0x230, 4, 0x0, 0x234, 0), PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x240, 4, 0x0, 0x244, 0), @@ -969,7 +971,7 @@ static const struct mtk_fixed_factor apmixed_fixed_divs[] = { static int mtk_apmixedsys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR); @@ -981,7 +983,7 @@ static int mtk_apmixedsys_init(struct platform_device *pdev) mtk_clk_register_factors(apmixed_fixed_divs, ARRAY_SIZE(apmixed_fixed_divs), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt2701[] = { diff --git a/drivers/clk/mediatek/clk-mt2712-bdp.c b/drivers/clk/mediatek/clk-mt2712-bdp.c index a200714001d8..9acab4357133 100644 --- a/drivers/clk/mediatek/clk-mt2712-bdp.c +++ b/drivers/clk/mediatek/clk-mt2712-bdp.c @@ -60,7 +60,7 @@ static const struct mtk_gate bdp_clks[] = { static int clk_mt2712_bdp_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -69,7 +69,7 @@ static int clk_mt2712_bdp_probe(struct platform_device *pdev) mtk_clk_register_gates(node, bdp_clks, ARRAY_SIZE(bdp_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2712-img.c b/drivers/clk/mediatek/clk-mt2712-img.c index 89b2a7197b02..5cc143e65e42 100644 --- a/drivers/clk/mediatek/clk-mt2712-img.c +++ b/drivers/clk/mediatek/clk-mt2712-img.c @@ -38,7 +38,7 @@ static const struct mtk_gate img_clks[] = { static int clk_mt2712_img_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -47,7 +47,7 @@ static int clk_mt2712_img_probe(struct platform_device *pdev) mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2712-jpgdec.c b/drivers/clk/mediatek/clk-mt2712-jpgdec.c index 58813c38ab4d..31fc30370d98 100644 --- a/drivers/clk/mediatek/clk-mt2712-jpgdec.c +++ b/drivers/clk/mediatek/clk-mt2712-jpgdec.c @@ -34,7 +34,7 @@ static const struct mtk_gate jpgdec_clks[] = { static int clk_mt2712_jpgdec_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -43,7 +43,7 @@ static int clk_mt2712_jpgdec_probe(struct platform_device *pdev) mtk_clk_register_gates(node, jpgdec_clks, ARRAY_SIZE(jpgdec_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2712-mfg.c b/drivers/clk/mediatek/clk-mt2712-mfg.c index a6b827db17bc..a4d09675bf18 100644 --- a/drivers/clk/mediatek/clk-mt2712-mfg.c +++ b/drivers/clk/mediatek/clk-mt2712-mfg.c @@ -33,7 +33,7 @@ static const struct mtk_gate mfg_clks[] = { static int clk_mt2712_mfg_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -42,7 +42,7 @@ static int clk_mt2712_mfg_probe(struct platform_device *pdev) mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c index 5519c3d68c1f..7d44b09b8a0a 100644 --- a/drivers/clk/mediatek/clk-mt2712-mm.c +++ b/drivers/clk/mediatek/clk-mt2712-mm.c @@ -130,7 +130,7 @@ static int clk_mt2712_mm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); @@ -138,7 +138,7 @@ static int clk_mt2712_mm_probe(struct platform_device *pdev) mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2712-vdec.c b/drivers/clk/mediatek/clk-mt2712-vdec.c index 4987ad9d3b11..af13f43dd831 100644 --- a/drivers/clk/mediatek/clk-mt2712-vdec.c +++ b/drivers/clk/mediatek/clk-mt2712-vdec.c @@ -52,7 +52,7 @@ static const struct mtk_gate vdec_clks[] = { static int clk_mt2712_vdec_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -61,7 +61,7 @@ static int clk_mt2712_vdec_probe(struct platform_device *pdev) mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2712-venc.c b/drivers/clk/mediatek/clk-mt2712-venc.c index 07c29daa1ad6..abc08a029753 100644 --- a/drivers/clk/mediatek/clk-mt2712-venc.c +++ b/drivers/clk/mediatek/clk-mt2712-venc.c @@ -35,7 +35,7 @@ static const struct mtk_gate venc_clks[] = { static int clk_mt2712_venc_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -44,7 +44,7 @@ static int clk_mt2712_venc_probe(struct platform_device *pdev) mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c index ff72b9ab945b..410b059727ea 100644 --- a/drivers/clk/mediatek/clk-mt2712.c +++ b/drivers/clk/mediatek/clk-mt2712.c @@ -1223,44 +1223,44 @@ static const struct mtk_pll_div_table mmpll_div_table[] = { }; static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0xf0000101, + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0xf0000100, HAVE_RST_BAR, 31, 0x0230, 4, 0, 0, 0, 0x0234, 0), - PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0240, 0x024C, 0xfe000101, + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0240, 0x024C, 0xfe000100, HAVE_RST_BAR, 31, 0x0240, 4, 0, 0, 0, 0x0244, 0), - PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x0320, 0x032C, 0xc0000101, + PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x0320, 0x032C, 0xc0000100, 0, 31, 0x0320, 4, 0, 0, 0, 0x0324, 0), - PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x0280, 0x028C, 0x00000101, + PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x0280, 0x028C, 0x00000100, 0, 31, 0x0280, 4, 0, 0, 0, 0x0284, 0), - PLL(CLK_APMIXED_APLL1, "apll1", 0x0330, 0x0340, 0x00000101, + PLL(CLK_APMIXED_APLL1, "apll1", 0x0330, 0x0340, 0x00000100, 0, 31, 0x0330, 4, 0x0338, 0x0014, 0, 0x0334, 0), - PLL(CLK_APMIXED_APLL2, "apll2", 0x0350, 0x0360, 0x00000101, + PLL(CLK_APMIXED_APLL2, "apll2", 0x0350, 0x0360, 0x00000100, 0, 31, 0x0350, 4, 0x0358, 0x0014, 1, 0x0354, 0), - PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0370, 0x037c, 0x00000101, + PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x0370, 0x037c, 0x00000100, 0, 31, 0x0370, 4, 0, 0, 0, 0x0374, 0), - PLL(CLK_APMIXED_LVDSPLL2, "lvdspll2", 0x0390, 0x039C, 0x00000101, + PLL(CLK_APMIXED_LVDSPLL2, "lvdspll2", 0x0390, 0x039C, 0x00000100, 0, 31, 0x0390, 4, 0, 0, 0, 0x0394, 0), - PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0270, 0x027C, 0x00000101, + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0270, 0x027C, 0x00000100, 0, 31, 0x0270, 4, 0, 0, 0, 0x0274, 0), - PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x0410, 0x041C, 0x00000101, + PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x0410, 0x041C, 0x00000100, 0, 31, 0x0410, 4, 0, 0, 0, 0x0414, 0), - PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0290, 0x029C, 0xc0000101, + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0290, 0x029C, 0xc0000100, 0, 31, 0x0290, 4, 0, 0, 0, 0x0294, 0), - PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0250, 0x0260, 0x00000101, + PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0250, 0x0260, 0x00000100, 0, 31, 0x0250, 4, 0, 0, 0, 0x0254, 0, mmpll_div_table), - PLL_B(CLK_APMIXED_ARMCA35PLL, "armca35pll", 0x0100, 0x0110, 0xf0000101, + PLL_B(CLK_APMIXED_ARMCA35PLL, "armca35pll", 0x0100, 0x0110, 0xf0000100, HAVE_RST_BAR, 31, 0x0100, 4, 0, 0, 0, 0x0104, 0, armca35pll_div_table), - PLL_B(CLK_APMIXED_ARMCA72PLL, "armca72pll", 0x0210, 0x0220, 0x00000101, + PLL_B(CLK_APMIXED_ARMCA72PLL, "armca72pll", 0x0210, 0x0220, 0x00000100, 0, 31, 0x0210, 4, 0, 0, 0, 0x0214, 0, armca72pll_div_table), - PLL(CLK_APMIXED_ETHERPLL, "etherpll", 0x0300, 0x030C, 0xc0000101, + PLL(CLK_APMIXED_ETHERPLL, "etherpll", 0x0300, 0x030C, 0xc0000100, 0, 31, 0x0300, 4, 0, 0, 0, 0x0304, 0), }; static int clk_mt2712_apmixed_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -1268,7 +1268,7 @@ static int clk_mt2712_apmixed_probe(struct platform_device *pdev) mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", @@ -1277,7 +1277,7 @@ static int clk_mt2712_apmixed_probe(struct platform_device *pdev) return r; } -static struct clk_onecell_data *top_clk_data; +static struct clk_hw_onecell_data *top_clk_data; static void clk_mt2712_top_init_early(struct device_node *node) { @@ -1287,13 +1287,13 @@ static void clk_mt2712_top_init_early(struct device_node *node) top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); for (i = 0; i < CLK_TOP_NR_CLK; i++) - top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); + top_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER); } mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -1318,8 +1318,8 @@ static int clk_mt2712_top_probe(struct platform_device *pdev) top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); } else { for (i = 0; i < CLK_TOP_NR_CLK; i++) { - if (top_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER)) - top_clk_data->clks[i] = ERR_PTR(-ENOENT); + if (top_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER)) + top_clk_data->hws[i] = ERR_PTR(-ENOENT); } } @@ -1335,7 +1335,7 @@ static int clk_mt2712_top_probe(struct platform_device *pdev) mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", @@ -1346,7 +1346,7 @@ static int clk_mt2712_top_probe(struct platform_device *pdev) static int clk_mt2712_infra_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -1355,7 +1355,7 @@ static int clk_mt2712_infra_probe(struct platform_device *pdev) mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", @@ -1368,7 +1368,7 @@ static int clk_mt2712_infra_probe(struct platform_device *pdev) static int clk_mt2712_peri_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -1377,7 +1377,7 @@ static int clk_mt2712_peri_probe(struct platform_device *pdev) mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", @@ -1390,7 +1390,7 @@ static int clk_mt2712_peri_probe(struct platform_device *pdev) static int clk_mt2712_mcu_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; void __iomem *base; @@ -1406,7 +1406,7 @@ static int clk_mt2712_mcu_probe(struct platform_device *pdev) mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base, &mt2712_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r != 0) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6765-audio.c b/drivers/clk/mediatek/clk-mt6765-audio.c index 4c989165d795..9c6e9caad597 100644 --- a/drivers/clk/mediatek/clk-mt6765-audio.c +++ b/drivers/clk/mediatek/clk-mt6765-audio.c @@ -66,7 +66,7 @@ static const struct mtk_gate audio_clks[] = { static int clk_mt6765_audio_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -75,7 +75,7 @@ static int clk_mt6765_audio_probe(struct platform_device *pdev) mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6765-cam.c b/drivers/clk/mediatek/clk-mt6765-cam.c index c96394893bcf..2586d3ac4cd4 100644 --- a/drivers/clk/mediatek/clk-mt6765-cam.c +++ b/drivers/clk/mediatek/clk-mt6765-cam.c @@ -41,7 +41,7 @@ static const struct mtk_gate cam_clks[] = { static int clk_mt6765_cam_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -49,7 +49,7 @@ static int clk_mt6765_cam_probe(struct platform_device *pdev) mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6765-img.c b/drivers/clk/mediatek/clk-mt6765-img.c index 6fd8bf8030fc..8cc95b98921e 100644 --- a/drivers/clk/mediatek/clk-mt6765-img.c +++ b/drivers/clk/mediatek/clk-mt6765-img.c @@ -37,7 +37,7 @@ static const struct mtk_gate img_clks[] = { static int clk_mt6765_img_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -45,7 +45,7 @@ static int clk_mt6765_img_probe(struct platform_device *pdev) mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6765-mipi0a.c b/drivers/clk/mediatek/clk-mt6765-mipi0a.c index 81744d0f95a0..c816e26a95f9 100644 --- a/drivers/clk/mediatek/clk-mt6765-mipi0a.c +++ b/drivers/clk/mediatek/clk-mt6765-mipi0a.c @@ -34,7 +34,7 @@ static const struct mtk_gate mipi0a_clks[] = { static int clk_mt6765_mipi0a_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -43,7 +43,7 @@ static int clk_mt6765_mipi0a_probe(struct platform_device *pdev) mtk_clk_register_gates(node, mipi0a_clks, ARRAY_SIZE(mipi0a_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6765-mm.c b/drivers/clk/mediatek/clk-mt6765-mm.c index 6d8214c51684..ee6d3b859a6c 100644 --- a/drivers/clk/mediatek/clk-mt6765-mm.c +++ b/drivers/clk/mediatek/clk-mt6765-mm.c @@ -63,7 +63,7 @@ static const struct mtk_gate mm_clks[] = { static int clk_mt6765_mm_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -71,7 +71,7 @@ static int clk_mt6765_mm_probe(struct platform_device *pdev) mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6765-vcodec.c b/drivers/clk/mediatek/clk-mt6765-vcodec.c index baae665fab31..d8045979d48a 100644 --- a/drivers/clk/mediatek/clk-mt6765-vcodec.c +++ b/drivers/clk/mediatek/clk-mt6765-vcodec.c @@ -36,7 +36,7 @@ static const struct mtk_gate venc_clks[] = { static int clk_mt6765_vcodec_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -45,7 +45,7 @@ static int clk_mt6765_vcodec_probe(struct platform_device *pdev) mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c index 24829ca3bd1f..e9b9e6729733 100644 --- a/drivers/clk/mediatek/clk-mt6765.c +++ b/drivers/clk/mediatek/clk-mt6765.c @@ -748,32 +748,32 @@ static const struct mtk_gate apmixed_clks[] = { _pcw_reg, _pcw_shift, NULL) \ static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x021C, 0x0228, BIT(0), + PLL(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x021C, 0x0228, 0, PLL_AO, 22, 8, 0x0220, 24, 0, 0, 0, 0x0220, 0), - PLL(CLK_APMIXED_ARMPLL, "armpll", 0x020C, 0x0218, BIT(0), + PLL(CLK_APMIXED_ARMPLL, "armpll", 0x020C, 0x0218, 0, PLL_AO, 22, 8, 0x0210, 24, 0, 0, 0, 0x0210, 0), - PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x022C, 0x0238, BIT(0), + PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x022C, 0x0238, 0, PLL_AO, 22, 8, 0x0230, 24, 0, 0, 0, 0x0230, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x023C, 0x0248, BIT(0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x023C, 0x0248, 0, (HAVE_RST_BAR | PLL_AO), 22, 8, 0x0240, 24, 0, 0, 0, 0x0240, 0), - PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x024C, 0x0258, BIT(0), + PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x024C, 0x0258, 0, 0, 22, 8, 0x0250, 24, 0, 0, 0, 0x0250, 0), - PLL(CLK_APMIXED_MMPLL, "mmpll", 0x025C, 0x0268, BIT(0), + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x025C, 0x0268, 0, 0, 22, 8, 0x0260, 24, 0, 0, 0, 0x0260, 0), - PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x026C, 0x0278, BIT(0), + PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x026C, 0x0278, 0, HAVE_RST_BAR, 22, 8, 0x0270, 24, 0, 0, 0, 0x0270, 0), - PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x027C, 0x0288, BIT(0), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x027C, 0x0288, 0, 0, 22, 8, 0x0280, 24, 0, 0, 0, 0x0280, 0), - PLL(CLK_APMIXED_APLL1, "apll1", 0x028C, 0x029C, BIT(0), + PLL(CLK_APMIXED_APLL1, "apll1", 0x028C, 0x029C, 0, 0, 32, 8, 0x0290, 24, 0x0040, 0x000C, 0, 0x0294, 0), - PLL(CLK_APMIXED_MPLL, "mpll", 0x02A0, 0x02AC, BIT(0), + PLL(CLK_APMIXED_MPLL, "mpll", 0x02A0, 0x02AC, 0, PLL_AO, 22, 8, 0x02A4, 24, 0, 0, 0, 0x02A4, 0), }; static int clk_mt6765_apmixed_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; void __iomem *base; @@ -791,7 +791,7 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev) mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", @@ -811,7 +811,7 @@ static int clk_mt6765_top_probe(struct platform_device *pdev) int r; struct device_node *node = pdev->dev.of_node; void __iomem *base; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); @@ -831,7 +831,7 @@ static int clk_mt6765_top_probe(struct platform_device *pdev) mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", @@ -848,7 +848,7 @@ static int clk_mt6765_top_probe(struct platform_device *pdev) static int clk_mt6765_ifr_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; void __iomem *base; @@ -864,7 +864,7 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev) mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c index 9e889e4c361a..97e44abb7e87 100644 --- a/drivers/clk/mediatek/clk-mt6779-aud.c +++ b/drivers/clk/mediatek/clk-mt6779-aud.c @@ -96,7 +96,7 @@ static const struct of_device_id of_match_clk_mt6779_aud[] = { static int clk_mt6779_aud_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK); @@ -104,7 +104,7 @@ static int clk_mt6779_aud_probe(struct platform_device *pdev) mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static struct platform_driver clk_mt6779_aud_drv = { diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c index 7f07a2a139ac..9c5117aae146 100644 --- a/drivers/clk/mediatek/clk-mt6779-cam.c +++ b/drivers/clk/mediatek/clk-mt6779-cam.c @@ -45,7 +45,7 @@ static const struct of_device_id of_match_clk_mt6779_cam[] = { static int clk_mt6779_cam_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK); @@ -53,7 +53,7 @@ static int clk_mt6779_cam_probe(struct platform_device *pdev) mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static struct platform_driver clk_mt6779_cam_drv = { diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c index f0961fa1a286..801271477d46 100644 --- a/drivers/clk/mediatek/clk-mt6779-img.c +++ b/drivers/clk/mediatek/clk-mt6779-img.c @@ -37,7 +37,7 @@ static const struct of_device_id of_match_clk_mt6779_img[] = { static int clk_mt6779_img_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK); @@ -45,7 +45,7 @@ static int clk_mt6779_img_probe(struct platform_device *pdev) mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static struct platform_driver clk_mt6779_img_drv = { diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c index 8c6f3e154bf3..f67814ca7dfb 100644 --- a/drivers/clk/mediatek/clk-mt6779-ipe.c +++ b/drivers/clk/mediatek/clk-mt6779-ipe.c @@ -39,7 +39,7 @@ static const struct of_device_id of_match_clk_mt6779_ipe[] = { static int clk_mt6779_ipe_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_IPE_NR_CLK); @@ -47,7 +47,7 @@ static int clk_mt6779_ipe_probe(struct platform_device *pdev) mtk_clk_register_gates(node, ipe_clks, ARRAY_SIZE(ipe_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static struct platform_driver clk_mt6779_ipe_drv = { diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c index 9f3372886e6b..fc7387b59758 100644 --- a/drivers/clk/mediatek/clk-mt6779-mfg.c +++ b/drivers/clk/mediatek/clk-mt6779-mfg.c @@ -29,7 +29,7 @@ static const struct mtk_gate mfg_clks[] = { static int clk_mt6779_mfg_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_MFGCFG_NR_CLK); @@ -37,7 +37,7 @@ static int clk_mt6779_mfg_probe(struct platform_device *pdev) mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt6779_mfg[] = { diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c index 33946e647122..eda8cbee3d23 100644 --- a/drivers/clk/mediatek/clk-mt6779-mm.c +++ b/drivers/clk/mediatek/clk-mt6779-mm.c @@ -89,14 +89,14 @@ static int clk_mt6779_mm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static struct platform_driver clk_mt6779_mm_drv = { diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c index f4358844c2e0..7e195b082e86 100644 --- a/drivers/clk/mediatek/clk-mt6779-vdec.c +++ b/drivers/clk/mediatek/clk-mt6779-vdec.c @@ -46,7 +46,7 @@ static const struct of_device_id of_match_clk_mt6779_vdec[] = { static int clk_mt6779_vdec_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_VDEC_GCON_NR_CLK); @@ -54,7 +54,7 @@ static int clk_mt6779_vdec_probe(struct platform_device *pdev) mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static struct platform_driver clk_mt6779_vdec_drv = { diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c index ff67084af5aa..573efa87c9bd 100644 --- a/drivers/clk/mediatek/clk-mt6779-venc.c +++ b/drivers/clk/mediatek/clk-mt6779-venc.c @@ -37,7 +37,7 @@ static const struct of_device_id of_match_clk_mt6779_venc[] = { static int clk_mt6779_venc_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_VENC_GCON_NR_CLK); @@ -45,7 +45,7 @@ static int clk_mt6779_venc_probe(struct platform_device *pdev) mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static struct platform_driver clk_mt6779_venc_drv = { diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c index 7b61664da18f..0d0a90ee5eb2 100644 --- a/drivers/clk/mediatek/clk-mt6779.c +++ b/drivers/clk/mediatek/clk-mt6779.c @@ -1182,39 +1182,39 @@ static const struct mtk_gate apmixed_clks[] = { _pcw_chg_reg, NULL) static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, BIT(0), + PLL(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0, PLL_AO, 0, 22, 8, 0x0204, 24, 0, 0, 0, 0x0204, 0, 0), - PLL(CLK_APMIXED_ARMPLL_BL, "armpll_bl", 0x0210, 0x021C, BIT(0), + PLL(CLK_APMIXED_ARMPLL_BL, "armpll_bl", 0x0210, 0x021C, 0, PLL_AO, 0, 22, 8, 0x0214, 24, 0, 0, 0, 0x0214, 0, 0), - PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x02A0, 0x02AC, BIT(0), + PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x02A0, 0x02AC, 0, PLL_AO, 0, 22, 8, 0x02A4, 24, 0, 0, 0, 0x02A4, 0, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, BIT(0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0230, 0x023C, 0, (HAVE_RST_BAR), BIT(24), 22, 8, 0x0234, 24, 0, 0, 0, 0x0234, 0, 0), - PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0240, 0x024C, BIT(0), + PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0240, 0x024C, 0, (HAVE_RST_BAR), BIT(24), 22, 8, 0x0244, 24, 0, 0, 0, 0x0244, 0, 0), - PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0250, 0x025C, BIT(0), + PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0250, 0x025C, 0, 0, 0, 22, 8, 0x0254, 24, 0, 0, 0, 0x0254, 0, 0), - PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0260, 0x026C, BIT(0), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0260, 0x026C, 0, 0, 0, 22, 8, 0x0264, 24, 0, 0, 0, 0x0264, 0, 0), - PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, BIT(0), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, 0, 0, 0, 22, 8, 0x0274, 24, 0, 0, 0, 0x0274, 0, 0), - PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x02b0, 0x02bC, BIT(0), + PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x02b0, 0x02bC, 0, (HAVE_RST_BAR), BIT(23), 22, 8, 0x02b4, 24, 0, 0, 0, 0x02b4, 0, 0), - PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0280, 0x028C, BIT(0), + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0280, 0x028C, 0, (HAVE_RST_BAR), BIT(23), 22, 8, 0x0284, 24, 0, 0, 0, 0x0284, 0, 0), - PLL(CLK_APMIXED_APLL1, "apll1", 0x02C0, 0x02D0, BIT(0), + PLL(CLK_APMIXED_APLL1, "apll1", 0x02C0, 0x02D0, 0, 0, 0, 32, 8, 0x02C0, 1, 0, 0x14, 0, 0x02C4, 0, 0x2C0), - PLL(CLK_APMIXED_APLL2, "apll2", 0x02D4, 0x02E4, BIT(0), + PLL(CLK_APMIXED_APLL2, "apll2", 0x02D4, 0x02E4, 0, 0, 0, 32, 8, 0x02D4, 1, 0, 0x14, 1, 0x02D8, 0, 0x02D4), }; static int clk_mt6779_apmixed_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); @@ -1224,13 +1224,13 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev) mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static int clk_mt6779_top_probe(struct platform_device *pdev) { void __iomem *base; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; base = devm_platform_ioremap_resource(pdev, 0); @@ -1253,12 +1253,12 @@ static int clk_mt6779_top_probe(struct platform_device *pdev) mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), base, &mt6779_clk_lock, clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static int clk_mt6779_infra_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); @@ -1266,7 +1266,7 @@ static int clk_mt6779_infra_probe(struct platform_device *pdev) mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt6779[] = { diff --git a/drivers/clk/mediatek/clk-mt6797-img.c b/drivers/clk/mediatek/clk-mt6797-img.c index 908bf9784f03..25d17db13bac 100644 --- a/drivers/clk/mediatek/clk-mt6797-img.c +++ b/drivers/clk/mediatek/clk-mt6797-img.c @@ -39,7 +39,7 @@ static const struct of_device_id of_match_clk_mt6797_img[] = { static int clk_mt6797_img_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -48,7 +48,7 @@ static int clk_mt6797_img_probe(struct platform_device *pdev) mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c index 01fdce287247..0846011fc894 100644 --- a/drivers/clk/mediatek/clk-mt6797-mm.c +++ b/drivers/clk/mediatek/clk-mt6797-mm.c @@ -96,7 +96,7 @@ static int clk_mt6797_mm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_MM_NR); @@ -104,7 +104,7 @@ static int clk_mt6797_mm_probe(struct platform_device *pdev) mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6797-vdec.c b/drivers/clk/mediatek/clk-mt6797-vdec.c index bbbc8119c3af..de857894e033 100644 --- a/drivers/clk/mediatek/clk-mt6797-vdec.c +++ b/drivers/clk/mediatek/clk-mt6797-vdec.c @@ -56,7 +56,7 @@ static const struct of_device_id of_match_clk_mt6797_vdec[] = { static int clk_mt6797_vdec_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -65,7 +65,7 @@ static int clk_mt6797_vdec_probe(struct platform_device *pdev) mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6797-venc.c b/drivers/clk/mediatek/clk-mt6797-venc.c index 2c75f0cbfb51..78b7ed55f979 100644 --- a/drivers/clk/mediatek/clk-mt6797-venc.c +++ b/drivers/clk/mediatek/clk-mt6797-venc.c @@ -41,7 +41,7 @@ static const struct of_device_id of_match_clk_mt6797_venc[] = { static int clk_mt6797_venc_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -50,7 +50,7 @@ static int clk_mt6797_venc_probe(struct platform_device *pdev) mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c index 02259e81625a..b89f325a4b9b 100644 --- a/drivers/clk/mediatek/clk-mt6797.c +++ b/drivers/clk/mediatek/clk-mt6797.c @@ -383,7 +383,7 @@ static const struct mtk_composite top_muxes[] = { static int mtk_topckgen_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; struct device_node *node = pdev->dev.of_node; @@ -399,7 +399,7 @@ static int mtk_topckgen_init(struct platform_device *pdev) mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, &mt6797_clk_lock, clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct mtk_gate_regs infra0_cg_regs = { @@ -556,7 +556,7 @@ static const struct mtk_fixed_factor infra_fixed_divs[] = { FACTOR(CLK_INFRA_13M, "clk13m", "clk26m", 1, 2), }; -static struct clk_onecell_data *infra_clk_data; +static struct clk_hw_onecell_data *infra_clk_data; static void mtk_infrasys_init_early(struct device_node *node) { @@ -566,13 +566,14 @@ static void mtk_infrasys_init_early(struct device_node *node) infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); for (i = 0; i < CLK_INFRA_NR; i++) - infra_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); + infra_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER); } mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs), infra_clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + infra_clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -590,8 +591,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) infra_clk_data = mtk_alloc_clk_data(CLK_INFRA_NR); } else { for (i = 0; i < CLK_INFRA_NR; i++) { - if (infra_clk_data->clks[i] == ERR_PTR(-EPROBE_DEFER)) - infra_clk_data->clks[i] = ERR_PTR(-ENOENT); + if (infra_clk_data->hws[i] == ERR_PTR(-EPROBE_DEFER)) + infra_clk_data->hws[i] = ERR_PTR(-ENOENT); } } @@ -600,7 +601,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs), infra_clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, infra_clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + infra_clk_data); } #define MT6797_PLL_FMAX (3000UL * MHZ) @@ -635,31 +637,31 @@ static int mtk_infrasys_init(struct platform_device *pdev) NULL) static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0xF0000101, PLL_AO, + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0xF0000100, PLL_AO, 21, 0x220, 4, 0x0, 0x224, 0), - PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0230, 0x023C, 0xFE000011, 0, 7, + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0230, 0x023C, 0xFE000010, 0, 7, 0x230, 4, 0x0, 0x234, 14), - PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000101, 0, 21, + PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000100, 0, 21, 0x244, 24, 0x0, 0x244, 0), - PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000121, 0, 21, + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000120, 0, 21, 0x250, 4, 0x0, 0x254, 0), - PLL(CLK_APMIXED_IMGPLL, "imgpll", 0x0260, 0x026C, 0x00000121, 0, 21, + PLL(CLK_APMIXED_IMGPLL, "imgpll", 0x0260, 0x026C, 0x00000120, 0, 21, 0x260, 4, 0x0, 0x264, 0), - PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, 0xC0000121, 0, 21, + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0270, 0x027C, 0xC0000120, 0, 21, 0x270, 4, 0x0, 0x274, 0), - PLL(CLK_APMIXED_CODECPLL, "codecpll", 0x0290, 0x029C, 0x00000121, 0, 21, + PLL(CLK_APMIXED_CODECPLL, "codecpll", 0x0290, 0x029C, 0x00000120, 0, 21, 0x290, 4, 0x0, 0x294, 0), - PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x02E4, 0x02F0, 0x00000121, 0, 21, + PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x02E4, 0x02F0, 0x00000120, 0, 21, 0x2E4, 4, 0x0, 0x2E8, 0), - PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000131, 0, 31, + PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000130, 0, 31, 0x2A0, 4, 0x2A8, 0x2A4, 0), - PLL(CLK_APMIXED_APLL2, "apll2", 0x02B4, 0x02C4, 0x00000131, 0, 31, + PLL(CLK_APMIXED_APLL2, "apll2", 0x02B4, 0x02C4, 0x00000130, 0, 31, 0x2B4, 4, 0x2BC, 0x2B8, 0), }; static int mtk_apmixedsys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR); @@ -668,7 +670,7 @@ static int mtk_apmixedsys_init(struct platform_device *pdev) mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt6797[] = { diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c index 2bd4295bc36b..9f2e5aa7b5d9 100644 --- a/drivers/clk/mediatek/clk-mt7622-aud.c +++ b/drivers/clk/mediatek/clk-mt7622-aud.c @@ -132,7 +132,7 @@ static const struct mtk_gate audio_clks[] = { static int clk_mt7622_audiosys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -141,7 +141,7 @@ static int clk_mt7622_audiosys_init(struct platform_device *pdev) mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) { dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c index c9947dc7ba5a..b12d48705496 100644 --- a/drivers/clk/mediatek/clk-mt7622-eth.c +++ b/drivers/clk/mediatek/clk-mt7622-eth.c @@ -67,7 +67,7 @@ static const struct mtk_gate sgmii_clks[] = { static int clk_mt7622_ethsys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -76,7 +76,7 @@ static int clk_mt7622_ethsys_init(struct platform_device *pdev) mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", @@ -89,7 +89,7 @@ static int clk_mt7622_ethsys_init(struct platform_device *pdev) static int clk_mt7622_sgmiisys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -98,7 +98,7 @@ static int clk_mt7622_sgmiisys_init(struct platform_device *pdev) mtk_clk_register_gates(node, sgmii_clks, ARRAY_SIZE(sgmii_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c index 628be0c9f888..58728e35e80a 100644 --- a/drivers/clk/mediatek/clk-mt7622-hif.c +++ b/drivers/clk/mediatek/clk-mt7622-hif.c @@ -78,7 +78,7 @@ static const struct mtk_gate pcie_clks[] = { static int clk_mt7622_ssusbsys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -87,7 +87,7 @@ static int clk_mt7622_ssusbsys_init(struct platform_device *pdev) mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", @@ -100,7 +100,7 @@ static int clk_mt7622_ssusbsys_init(struct platform_device *pdev) static int clk_mt7622_pciesys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -109,7 +109,7 @@ static int clk_mt7622_pciesys_init(struct platform_device *pdev) mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c index 0e1fb30a1e98..e4a5e5230861 100644 --- a/drivers/clk/mediatek/clk-mt7622.c +++ b/drivers/clk/mediatek/clk-mt7622.c @@ -329,23 +329,23 @@ static const struct mtk_gate_regs peri1_cg_regs = { }; static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001, + PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0, PLL_AO, 21, 0x0204, 24, 0, 0x0204, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0x00000001, + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0, HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0), - PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0x00000001, + PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0, HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14), - PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0x00000001, + PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0, 0, 21, 0x0300, 1, 0, 0x0304, 0), - PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0x00000001, + PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0, 0, 21, 0x0314, 1, 0, 0x0318, 0), - PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x0324, 0x0330, 0x00000001, + PLL(CLK_APMIXED_AUD1PLL, "aud1pll", 0x0324, 0x0330, 0, 0, 31, 0x0324, 1, 0, 0x0328, 0), - PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x0334, 0x0340, 0x00000001, + PLL(CLK_APMIXED_AUD2PLL, "aud2pll", 0x0334, 0x0340, 0, 0, 31, 0x0334, 1, 0, 0x0338, 0), - PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x0344, 0x0354, 0x00000001, + PLL(CLK_APMIXED_TRGPLL, "trgpll", 0x0344, 0x0354, 0, 0, 21, 0x0344, 1, 0, 0x0348, 0), - PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0x00000001, + PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0, 0, 21, 0x0358, 1, 0, 0x035C, 0), }; @@ -612,7 +612,7 @@ static struct mtk_composite peri_muxes[] = { static int mtk_topckgen_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; struct device_node *node = pdev->dev.of_node; @@ -637,17 +637,17 @@ static int mtk_topckgen_init(struct platform_device *pdev) mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data); - clk_prepare_enable(clk_data->clks[CLK_TOP_AXI_SEL]); - clk_prepare_enable(clk_data->clks[CLK_TOP_MEM_SEL]); - clk_prepare_enable(clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]); + clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk); + clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk); + clk_prepare_enable(clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static int mtk_infrasys_init(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); @@ -658,8 +658,8 @@ static int mtk_infrasys_init(struct platform_device *pdev) mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, - clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + clk_data); if (r) return r; @@ -670,7 +670,7 @@ static int mtk_infrasys_init(struct platform_device *pdev) static int mtk_apmixedsys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); @@ -683,15 +683,15 @@ static int mtk_apmixedsys_init(struct platform_device *pdev) mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data); - clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]); - clk_prepare_enable(clk_data->clks[CLK_APMIXED_MAIN_CORE_EN]); + clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk); + clk_prepare_enable(clk_data->hws[CLK_APMIXED_MAIN_CORE_EN]->clk); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static int mtk_pericfg_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; int r; struct device_node *node = pdev->dev.of_node; @@ -708,11 +708,11 @@ static int mtk_pericfg_init(struct platform_device *pdev) mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base, &mt7622_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) return r; - clk_prepare_enable(clk_data->clks[CLK_PERI_UART0_PD]); + clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk); mtk_register_reset_controller(node, 2, 0x0); diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c index 88279d0ea1a7..c49fd732c9b2 100644 --- a/drivers/clk/mediatek/clk-mt7629-eth.c +++ b/drivers/clk/mediatek/clk-mt7629-eth.c @@ -78,7 +78,7 @@ static const struct mtk_gate sgmii_clks[2][4] = { static int clk_mt7629_ethsys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -86,7 +86,7 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev) mtk_clk_register_gates(node, eth_clks, CLK_ETH_NR_CLK, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", @@ -99,7 +99,7 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev) static int clk_mt7629_sgmiisys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; static int id; int r; @@ -109,7 +109,7 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev) mtk_clk_register_gates(node, sgmii_clks[id++], CLK_SGMII_NR_CLK, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c index 5c5b37207afb..acaa97fda331 100644 --- a/drivers/clk/mediatek/clk-mt7629-hif.c +++ b/drivers/clk/mediatek/clk-mt7629-hif.c @@ -73,7 +73,7 @@ static const struct mtk_gate pcie_clks[] = { static int clk_mt7629_ssusbsys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -82,7 +82,7 @@ static int clk_mt7629_ssusbsys_init(struct platform_device *pdev) mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", @@ -95,7 +95,7 @@ static int clk_mt7629_ssusbsys_init(struct platform_device *pdev) static int clk_mt7629_pciesys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -104,7 +104,7 @@ static int clk_mt7629_pciesys_init(struct platform_device *pdev) mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) dev_err(&pdev->dev, "could not register clock provider: %s: %d\n", diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c index c0e023bf31eb..e4a08c811adc 100644 --- a/drivers/clk/mediatek/clk-mt7629.c +++ b/drivers/clk/mediatek/clk-mt7629.c @@ -336,17 +336,17 @@ static const struct mtk_gate_regs peri1_cg_regs = { }; static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001, + PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0, 0, 21, 0x0204, 24, 0, 0x0204, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0x00000001, + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0, HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0), - PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0x00000001, + PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0, HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14), - PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0x00000001, + PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0, 0, 21, 0x0300, 1, 0, 0x0304, 0), - PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0x00000001, + PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0, 0, 21, 0x0314, 1, 0, 0x0318, 0), - PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0x00000001, + PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0, 0, 21, 0x0358, 1, 0, 0x035C, 0), }; @@ -572,7 +572,7 @@ static struct mtk_composite peri_muxes[] = { static int mtk_topckgen_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; struct device_node *node = pdev->dev.of_node; @@ -591,17 +591,17 @@ static int mtk_topckgen_init(struct platform_device *pdev) mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, &mt7629_clk_lock, clk_data); - clk_prepare_enable(clk_data->clks[CLK_TOP_AXI_SEL]); - clk_prepare_enable(clk_data->clks[CLK_TOP_MEM_SEL]); - clk_prepare_enable(clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]); + clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk); + clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk); + clk_prepare_enable(clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static int mtk_infrasys_init(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); @@ -611,13 +611,13 @@ static int mtk_infrasys_init(struct platform_device *pdev) mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, - clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + clk_data); } static int mtk_pericfg_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; int r; struct device_node *node = pdev->dev.of_node; @@ -634,18 +634,18 @@ static int mtk_pericfg_init(struct platform_device *pdev) mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base, &mt7629_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) return r; - clk_prepare_enable(clk_data->clks[CLK_PERI_UART0_PD]); + clk_prepare_enable(clk_data->hws[CLK_PERI_UART0_PD]->clk); return 0; } static int mtk_apmixedsys_init(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); @@ -658,10 +658,10 @@ static int mtk_apmixedsys_init(struct platform_device *pdev) mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data); - clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]); - clk_prepare_enable(clk_data->clks[CLK_APMIXED_MAIN_CORE_EN]); + clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk); + clk_prepare_enable(clk_data->hws[CLK_APMIXED_MAIN_CORE_EN]->clk); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } diff --git a/drivers/clk/mediatek/clk-mt7986-apmixed.c b/drivers/clk/mediatek/clk-mt7986-apmixed.c index 21d4c82e782a..62080ee4dbe3 100644 --- a/drivers/clk/mediatek/clk-mt7986-apmixed.c +++ b/drivers/clk/mediatek/clk-mt7986-apmixed.c @@ -42,21 +42,21 @@ "clkxtal") static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001, 0, 32, + PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x0, 0, 32, 0x0200, 4, 0, 0x0204, 0), - PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0210, 0x021C, 0x00000001, 0, 32, + PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0210, 0x021C, 0x0, 0, 32, 0x0210, 4, 0, 0x0214, 0), - PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0220, 0x022C, 0x00000001, 0, 32, + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0220, 0x022C, 0x0, 0, 32, 0x0220, 4, 0, 0x0224, 0), - PLL(CLK_APMIXED_SGMPLL, "sgmpll", 0x0230, 0x023c, 0x00000001, 0, 32, + PLL(CLK_APMIXED_SGMPLL, "sgmpll", 0x0230, 0x023c, 0x0, 0, 32, 0x0230, 4, 0, 0x0234, 0), - PLL(CLK_APMIXED_WEDMCUPLL, "wedmcupll", 0x0240, 0x024c, 0x00000001, 0, + PLL(CLK_APMIXED_WEDMCUPLL, "wedmcupll", 0x0240, 0x024c, 0x0, 0, 32, 0x0240, 4, 0, 0x0244, 0), - PLL(CLK_APMIXED_NET1PLL, "net1pll", 0x0250, 0x025c, 0x00000001, 0, 32, + PLL(CLK_APMIXED_NET1PLL, "net1pll", 0x0250, 0x025c, 0x0, 0, 32, 0x0250, 4, 0, 0x0254, 0), - PLL(CLK_APMIXED_MPLL, "mpll", 0x0260, 0x0270, 0x00000001, 0, 32, 0x0260, + PLL(CLK_APMIXED_MPLL, "mpll", 0x0260, 0x0270, 0x0, 0, 32, 0x0260, 4, 0, 0x0264, 0), - PLL(CLK_APMIXED_APLL2, "apll2", 0x0278, 0x0288, 0x00000001, 0, 32, + PLL(CLK_APMIXED_APLL2, "apll2", 0x0278, 0x0288, 0x0, 0, 32, 0x0278, 4, 0, 0x027c, 0), }; @@ -67,7 +67,7 @@ static const struct of_device_id of_match_clk_mt7986_apmixed[] = { static int clk_mt7986_apmixed_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -77,9 +77,9 @@ static int clk_mt7986_apmixed_probe(struct platform_device *pdev) mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); - clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]); + clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) { pr_err("%s(): could not register clock provider: %d\n", __func__, r); diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c index 495d023ccad7..7868c0728e96 100644 --- a/drivers/clk/mediatek/clk-mt7986-eth.c +++ b/drivers/clk/mediatek/clk-mt7986-eth.c @@ -79,7 +79,7 @@ static const struct mtk_gate eth_clks[] __initconst = { static void __init mtk_sgmiisys_0_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii0_clks)); @@ -87,7 +87,7 @@ static void __init mtk_sgmiisys_0_init(struct device_node *node) mtk_clk_register_gates(node, sgmii0_clks, ARRAY_SIZE(sgmii0_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -97,7 +97,7 @@ CLK_OF_DECLARE(mtk_sgmiisys_0, "mediatek,mt7986-sgmiisys_0", static void __init mtk_sgmiisys_1_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii1_clks)); @@ -105,7 +105,7 @@ static void __init mtk_sgmiisys_1_init(struct device_node *node) mtk_clk_register_gates(node, sgmii1_clks, ARRAY_SIZE(sgmii1_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", @@ -116,17 +116,17 @@ CLK_OF_DECLARE(mtk_sgmiisys_1, "mediatek,mt7986-sgmiisys_1", static void __init mtk_ethsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(ARRAY_SIZE(eth_clks)); mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); } -CLK_OF_DECLARE(mtk_ethsys, "mediatek,mt7986-ethsys_ck", mtk_ethsys_init); +CLK_OF_DECLARE(mtk_ethsys, "mediatek,mt7986-ethsys", mtk_ethsys_init); diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c index f209c559fbc3..d90727a53283 100644 --- a/drivers/clk/mediatek/clk-mt7986-infracfg.c +++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c @@ -171,7 +171,7 @@ static const struct mtk_gate infra_clks[] = { static int clk_mt7986_infracfg_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; void __iomem *base; @@ -195,7 +195,7 @@ static int clk_mt7986_infracfg_probe(struct platform_device *pdev) mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) { pr_err("%s(): could not register clock provider: %d\n", __func__, r); diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c index 8f6f79b6e31e..de5121cf2877 100644 --- a/drivers/clk/mediatek/clk-mt7986-topckgen.c +++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c @@ -283,7 +283,7 @@ static const struct mtk_mux top_muxes[] = { static int clk_mt7986_topckgen_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; void __iomem *base; @@ -306,14 +306,14 @@ static int clk_mt7986_topckgen_probe(struct platform_device *pdev) mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node, &mt7986_clk_lock, clk_data); - clk_prepare_enable(clk_data->clks[CLK_TOP_SYSAXI_SEL]); - clk_prepare_enable(clk_data->clks[CLK_TOP_SYSAPB_SEL]); - clk_prepare_enable(clk_data->clks[CLK_TOP_DRAMC_SEL]); - clk_prepare_enable(clk_data->clks[CLK_TOP_DRAMC_MD32_SEL]); - clk_prepare_enable(clk_data->clks[CLK_TOP_F26M_SEL]); - clk_prepare_enable(clk_data->clks[CLK_TOP_SGM_REG_SEL]); + clk_prepare_enable(clk_data->hws[CLK_TOP_SYSAXI_SEL]->clk); + clk_prepare_enable(clk_data->hws[CLK_TOP_SYSAPB_SEL]->clk); + clk_prepare_enable(clk_data->hws[CLK_TOP_DRAMC_SEL]->clk); + clk_prepare_enable(clk_data->hws[CLK_TOP_DRAMC_MD32_SEL]->clk); + clk_prepare_enable(clk_data->hws[CLK_TOP_F26M_SEL]->clk); + clk_prepare_enable(clk_data->hws[CLK_TOP_SGM_REG_SEL]->clk); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) { pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c index 09ad272d51f1..9ef524b44862 100644 --- a/drivers/clk/mediatek/clk-mt8135.c +++ b/drivers/clk/mediatek/clk-mt8135.c @@ -516,7 +516,7 @@ static const struct mtk_composite peri_clks[] __initconst = { static void __init mtk_topckgen_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; int r; @@ -533,9 +533,9 @@ static void __init mtk_topckgen_init(struct device_node *node) mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, &mt8135_clk_lock, clk_data); - clk_prepare_enable(clk_data->clks[CLK_TOP_CCI_SEL]); + clk_prepare_enable(clk_data->hws[CLK_TOP_CCI_SEL]->clk); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -544,7 +544,7 @@ CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8135-topckgen", mtk_topckgen_init); static void __init mtk_infrasys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); @@ -552,9 +552,9 @@ static void __init mtk_infrasys_init(struct device_node *node) mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data); - clk_prepare_enable(clk_data->clks[CLK_INFRA_M4U]); + clk_prepare_enable(clk_data->hws[CLK_INFRA_M4U]->clk); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -565,7 +565,7 @@ CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8135-infracfg", mtk_infrasys_init); static void __init mtk_pericfg_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; void __iomem *base; @@ -582,7 +582,7 @@ static void __init mtk_pericfg_init(struct device_node *node) mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base, &mt8135_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -612,21 +612,21 @@ CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8135-pericfg", mtk_pericfg_init); } static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_ARMPLL1, "armpll1", 0x200, 0x218, 0x80000001, 0, 21, 0x204, 24, 0x0, 0x204, 0), - PLL(CLK_APMIXED_ARMPLL2, "armpll2", 0x2cc, 0x2e4, 0x80000001, 0, 21, 0x2d0, 24, 0x0, 0x2d0, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x21c, 0x234, 0xf0000001, HAVE_RST_BAR, 21, 0x21c, 6, 0x0, 0x220, 0), - PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x238, 0x250, 0xf3000001, HAVE_RST_BAR, 7, 0x238, 6, 0x0, 0x238, 9), - PLL(CLK_APMIXED_MMPLL, "mmpll", 0x254, 0x26c, 0xf0000001, HAVE_RST_BAR, 21, 0x254, 6, 0x0, 0x258, 0), - PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x278, 0x290, 0x80000001, 0, 21, 0x278, 6, 0x0, 0x27c, 0), - PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x294, 0x2ac, 0x80000001, 0, 31, 0x294, 6, 0x0, 0x298, 0), - PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2b0, 0x2c8, 0x80000001, 0, 21, 0x2b0, 6, 0x0, 0x2b4, 0), - PLL(CLK_APMIXED_AUDPLL, "audpll", 0x2e8, 0x300, 0x80000001, 0, 31, 0x2e8, 6, 0x2f8, 0x2ec, 0), - PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x304, 0x31c, 0x80000001, 0, 21, 0x2b0, 6, 0x0, 0x308, 0), + PLL(CLK_APMIXED_ARMPLL1, "armpll1", 0x200, 0x218, 0x80000000, 0, 21, 0x204, 24, 0x0, 0x204, 0), + PLL(CLK_APMIXED_ARMPLL2, "armpll2", 0x2cc, 0x2e4, 0x80000000, 0, 21, 0x2d0, 24, 0x0, 0x2d0, 0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x21c, 0x234, 0xf0000000, HAVE_RST_BAR, 21, 0x21c, 6, 0x0, 0x220, 0), + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x238, 0x250, 0xf3000000, HAVE_RST_BAR, 7, 0x238, 6, 0x0, 0x238, 9), + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x254, 0x26c, 0xf0000000, HAVE_RST_BAR, 21, 0x254, 6, 0x0, 0x258, 0), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x278, 0x290, 0x80000000, 0, 21, 0x278, 6, 0x0, 0x27c, 0), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x294, 0x2ac, 0x80000000, 0, 31, 0x294, 6, 0x0, 0x298, 0), + PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2b0, 0x2c8, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x2b4, 0), + PLL(CLK_APMIXED_AUDPLL, "audpll", 0x2e8, 0x300, 0x80000000, 0, 31, 0x2e8, 6, 0x2f8, 0x2ec, 0), + PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x304, 0x31c, 0x80000000, 0, 21, 0x2b0, 6, 0x0, 0x308, 0), }; static void __init mtk_apmixedsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); if (!clk_data) diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c index 3f7bf6485792..ce1ae8d243c3 100644 --- a/drivers/clk/mediatek/clk-mt8167-aud.c +++ b/drivers/clk/mediatek/clk-mt8167-aud.c @@ -50,14 +50,14 @@ static const struct mtk_gate aud_clks[] __initconst = { static void __init mtk_audsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK); mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c index 3b4ec9eae432..e359e563d2b7 100644 --- a/drivers/clk/mediatek/clk-mt8167-img.c +++ b/drivers/clk/mediatek/clk-mt8167-img.c @@ -43,14 +43,14 @@ static const struct mtk_gate img_clks[] __initconst = { static void __init mtk_imgsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK); mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c index 90b871730f2d..4fd82fe87d6e 100644 --- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c +++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c @@ -41,14 +41,14 @@ static const struct mtk_gate mfg_clks[] __initconst = { static void __init mtk_mfgcfg_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK); mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c index 963b129aade1..73910060577f 100644 --- a/drivers/clk/mediatek/clk-mt8167-mm.c +++ b/drivers/clk/mediatek/clk-mt8167-mm.c @@ -101,7 +101,7 @@ static int clk_mt8167_mm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; const struct clk_mt8167_mm_driver_data *data; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int ret; clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); @@ -115,7 +115,7 @@ static int clk_mt8167_mm_probe(struct platform_device *pdev) if (ret) return ret; - ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (ret) return ret; diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c index 910b28355ec0..ee4fffb6859d 100644 --- a/drivers/clk/mediatek/clk-mt8167-vdec.c +++ b/drivers/clk/mediatek/clk-mt8167-vdec.c @@ -56,14 +56,14 @@ static const struct mtk_gate vdec_clks[] __initconst = { static void __init mtk_vdecsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK); mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c index 812b33a57530..f900ac4bf7b8 100644 --- a/drivers/clk/mediatek/clk-mt8167.c +++ b/drivers/clk/mediatek/clk-mt8167.c @@ -923,7 +923,7 @@ static const struct mtk_gate top_clks[] __initconst = { static void __init mtk_topckgen_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; void __iomem *base; @@ -945,7 +945,7 @@ static void __init mtk_topckgen_init(struct device_node *node) mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs), base, &mt8167_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -954,7 +954,7 @@ CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8167-topckgen", mtk_topckgen_init); static void __init mtk_infracfg_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; void __iomem *base; @@ -969,7 +969,7 @@ static void __init mtk_infracfg_init(struct device_node *node) mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base, &mt8167_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -1017,27 +1017,27 @@ static const struct mtk_pll_div_table mmpll_div_table[] = { }; static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0, + PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0, 0, 21, 0x0104, 24, 0, 0x0104, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001, + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0, HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0), - PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001, + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000000, HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0), - PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0, + PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0, 0, 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table), - PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0, + PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0, 0, 31, 0x0180, 1, 0x0194, 0x0184, 0), - PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0, + PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0, 0, 31, 0x01A0, 1, 0x01B4, 0x01A4, 0), - PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x01C0, 0x01D0, 0x00000001, 0, + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x01C0, 0x01D0, 0, 0, 21, 0x01C4, 24, 0, 0x01C4, 0), - PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x01E0, 0x01F0, 0x00000001, 0, + PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x01E0, 0x01F0, 0, 0, 21, 0x01E4, 24, 0, 0x01E4, 0), }; static void __init mtk_apmixedsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; int r; @@ -1053,7 +1053,7 @@ static void __init mtk_apmixedsys_init(struct device_node *node) mtk_clk_register_dividers(apmixed_adj_divs, ARRAY_SIZE(apmixed_adj_divs), base, &mt8167_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c index 36fa20be77b6..8abf42c2030c 100644 --- a/drivers/clk/mediatek/clk-mt8173-mm.c +++ b/drivers/clk/mediatek/clk-mt8173-mm.c @@ -115,7 +115,7 @@ static int clk_mt8173_mm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; const struct clk_mt8173_mm_driver_data *data; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int ret; clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); @@ -129,7 +129,7 @@ static int clk_mt8173_mm_probe(struct platform_device *pdev) if (ret) return ret; - ret = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (ret) return ret; diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c index 46b7655feeaa..0929db330852 100644 --- a/drivers/clk/mediatek/clk-mt8173.c +++ b/drivers/clk/mediatek/clk-mt8173.c @@ -819,25 +819,25 @@ static const struct mtk_gate venclt_clks[] __initconst = { GATE_VENCLT(CLK_VENCLT_CKE1, "venclt_cke1", "venclt_sel", 4), }; -static struct clk_onecell_data *mt8173_top_clk_data __initdata; -static struct clk_onecell_data *mt8173_pll_clk_data __initdata; +static struct clk_hw_onecell_data *mt8173_top_clk_data __initdata; +static struct clk_hw_onecell_data *mt8173_pll_clk_data __initdata; static void __init mtk_clk_enable_critical(void) { if (!mt8173_top_clk_data || !mt8173_pll_clk_data) return; - clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA15PLL]); - clk_prepare_enable(mt8173_pll_clk_data->clks[CLK_APMIXED_ARMCA7PLL]); - clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_MEM_SEL]); - clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]); - clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_CCI400_SEL]); - clk_prepare_enable(mt8173_top_clk_data->clks[CLK_TOP_RTC_SEL]); + clk_prepare_enable(mt8173_pll_clk_data->hws[CLK_APMIXED_ARMCA15PLL]->clk); + clk_prepare_enable(mt8173_pll_clk_data->hws[CLK_APMIXED_ARMCA7PLL]->clk); + clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_MEM_SEL]->clk); + clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_DDRPHYCFG_SEL]->clk); + clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_CCI400_SEL]->clk); + clk_prepare_enable(mt8173_top_clk_data->hws[CLK_TOP_RTC_SEL]->clk); } static void __init mtk_topckgen_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; int r; @@ -854,7 +854,7 @@ static void __init mtk_topckgen_init(struct device_node *node) mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, &mt8173_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -865,7 +865,7 @@ CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init); static void __init mtk_infrasys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); @@ -877,7 +877,7 @@ static void __init mtk_infrasys_init(struct device_node *node) mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -888,7 +888,7 @@ CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8173-infracfg", mtk_infrasys_init); static void __init mtk_pericfg_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; void __iomem *base; @@ -905,7 +905,7 @@ static void __init mtk_pericfg_init(struct device_node *node) mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base, &mt8173_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -973,27 +973,27 @@ static const struct mtk_pll_div_table mmpll_div_table[] = { }; static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0x00000001, 0, 21, 0x204, 24, 0x0, 0x204, 0), - PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0x00000001, 0, 21, 0x214, 24, 0x0, 0x214, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000101, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0), - PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000001, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14), - PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0, mmpll_div_table), - PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0x00000001, 0, 21, 0x250, 4, 0x0, 0x254, 0), - PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0x00000001, 0, 21, 0x260, 4, 0x0, 0x264, 0), - PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0x00000001, 0, 21, 0x270, 4, 0x0, 0x274, 0), - PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0x00000001, 0, 21, 0x280, 4, 0x0, 0x284, 0), - PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0x00000001, 0, 21, 0x290, 4, 0x0, 0x294, 0), - PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0x00000001, 0, 31, 0x2a0, 4, 0x2a4, 0x2a4, 0), - PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0x00000001, 0, 31, 0x2b4, 4, 0x2b8, 0x2b8, 0), - PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2d0, 0x2dc, 0x00000001, 0, 21, 0x2d0, 4, 0x0, 0x2d4, 0), - PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x2f0, 0x2fc, 0x00000001, 0, 21, 0x2f0, 4, 0x0, 0x2f4, 0), + PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0, 0, 21, 0x204, 24, 0x0, 0x204, 0), + PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0, 0, 21, 0x214, 24, 0x0, 0x214, 0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000100, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0), + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000000, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14), + PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0, 0, 21, 0x244, 24, 0x0, 0x244, 0, mmpll_div_table), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0, 0, 21, 0x250, 4, 0x0, 0x254, 0), + PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0, 0, 21, 0x260, 4, 0x0, 0x264, 0), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0, 0, 21, 0x270, 4, 0x0, 0x274, 0), + PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0, 0, 21, 0x280, 4, 0x0, 0x284, 0), + PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0, 0, 21, 0x290, 4, 0x0, 0x294, 0), + PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0, 0, 31, 0x2a0, 4, 0x2a4, 0x2a4, 0), + PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0, 0, 31, 0x2b4, 4, 0x2b8, 0x2b8, 0), + PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2d0, 0x2dc, 0, 0, 21, 0x2d0, 4, 0x0, 0x2d4, 0), + PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x2f0, 0x2fc, 0, 0, 21, 0x2f0, 4, 0x0, 0x2f4, 0), }; static void __init mtk_apmixedsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; - struct clk *clk; + struct clk_hw *hw; int r, i; base = of_iomap(node, 0); @@ -1013,24 +1013,21 @@ static void __init mtk_apmixedsys_init(struct device_node *node) for (i = 0; i < ARRAY_SIZE(apmixed_usb); i++) { const struct mtk_clk_usb *cku = &apmixed_usb[i]; - clk = mtk_clk_register_ref2usb_tx(cku->name, cku->parent, - base + cku->reg_ofs); - - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %ld\n", cku->name, - PTR_ERR(clk)); + hw = mtk_clk_register_ref2usb_tx(cku->name, cku->parent, base + cku->reg_ofs); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %ld\n", cku->name, PTR_ERR(hw)); continue; } - clk_data->clks[cku->id] = clk; + clk_data->hws[cku->id] = hw; } - clk = clk_register_divider(NULL, "hdmi_ref", "tvdpll_594m", 0, - base + 0x40, 16, 3, CLK_DIVIDER_POWER_OF_TWO, - NULL); - clk_data->clks[CLK_APMIXED_HDMI_REF] = clk; + hw = clk_hw_register_divider(NULL, "hdmi_ref", "tvdpll_594m", 0, + base + 0x40, 16, 3, CLK_DIVIDER_POWER_OF_TWO, + NULL); + clk_data->hws[CLK_APMIXED_HDMI_REF] = hw; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -1042,7 +1039,7 @@ CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys", static void __init mtk_imgsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK); @@ -1050,7 +1047,7 @@ static void __init mtk_imgsys_init(struct device_node *node) mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", @@ -1060,7 +1057,7 @@ CLK_OF_DECLARE(mtk_imgsys, "mediatek,mt8173-imgsys", mtk_imgsys_init); static void __init mtk_vdecsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK); @@ -1068,7 +1065,7 @@ static void __init mtk_vdecsys_init(struct device_node *node) mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -1077,7 +1074,7 @@ CLK_OF_DECLARE(mtk_vdecsys, "mediatek,mt8173-vdecsys", mtk_vdecsys_init); static void __init mtk_vencsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK); @@ -1085,7 +1082,7 @@ static void __init mtk_vencsys_init(struct device_node *node) mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -1094,7 +1091,7 @@ CLK_OF_DECLARE(mtk_vencsys, "mediatek,mt8173-vencsys", mtk_vencsys_init); static void __init mtk_vencltsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_VENCLT_NR_CLK); @@ -1102,7 +1099,7 @@ static void __init mtk_vencltsys_init(struct device_node *node) mtk_clk_register_gates(node, venclt_clks, ARRAY_SIZE(venclt_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c index c87450180b7b..b2d7746eddbe 100644 --- a/drivers/clk/mediatek/clk-mt8183-audio.c +++ b/drivers/clk/mediatek/clk-mt8183-audio.c @@ -69,7 +69,7 @@ static const struct mtk_gate audio_clks[] = { static int clk_mt8183_audio_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; struct device_node *node = pdev->dev.of_node; @@ -78,7 +78,7 @@ static int clk_mt8183_audio_probe(struct platform_device *pdev) mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) return r; diff --git a/drivers/clk/mediatek/clk-mt8183-cam.c b/drivers/clk/mediatek/clk-mt8183-cam.c index 8643802c4471..fcc598a45165 100644 --- a/drivers/clk/mediatek/clk-mt8183-cam.c +++ b/drivers/clk/mediatek/clk-mt8183-cam.c @@ -36,7 +36,7 @@ static const struct mtk_gate cam_clks[] = { static int clk_mt8183_cam_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_CAM_NR_CLK); @@ -44,7 +44,7 @@ static int clk_mt8183_cam_probe(struct platform_device *pdev) mtk_clk_register_gates(node, cam_clks, ARRAY_SIZE(cam_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183_cam[] = { diff --git a/drivers/clk/mediatek/clk-mt8183-img.c b/drivers/clk/mediatek/clk-mt8183-img.c index 470d676a4a10..eb2def2cf0ae 100644 --- a/drivers/clk/mediatek/clk-mt8183-img.c +++ b/drivers/clk/mediatek/clk-mt8183-img.c @@ -36,7 +36,7 @@ static const struct mtk_gate img_clks[] = { static int clk_mt8183_img_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK); @@ -44,7 +44,7 @@ static int clk_mt8183_img_probe(struct platform_device *pdev) mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183_img[] = { diff --git a/drivers/clk/mediatek/clk-mt8183-ipu0.c b/drivers/clk/mediatek/clk-mt8183-ipu0.c index c5cb76fc9e5e..b30fc9f47518 100644 --- a/drivers/clk/mediatek/clk-mt8183-ipu0.c +++ b/drivers/clk/mediatek/clk-mt8183-ipu0.c @@ -29,7 +29,7 @@ static const struct mtk_gate ipu_core0_clks[] = { static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_IPU_CORE0_NR_CLK); @@ -37,7 +37,7 @@ static int clk_mt8183_ipu_core0_probe(struct platform_device *pdev) mtk_clk_register_gates(node, ipu_core0_clks, ARRAY_SIZE(ipu_core0_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183_ipu_core0[] = { diff --git a/drivers/clk/mediatek/clk-mt8183-ipu1.c b/drivers/clk/mediatek/clk-mt8183-ipu1.c index 8fd5fe002890..b378957e11d0 100644 --- a/drivers/clk/mediatek/clk-mt8183-ipu1.c +++ b/drivers/clk/mediatek/clk-mt8183-ipu1.c @@ -29,7 +29,7 @@ static const struct mtk_gate ipu_core1_clks[] = { static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_IPU_CORE1_NR_CLK); @@ -37,7 +37,7 @@ static int clk_mt8183_ipu_core1_probe(struct platform_device *pdev) mtk_clk_register_gates(node, ipu_core1_clks, ARRAY_SIZE(ipu_core1_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183_ipu_core1[] = { diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c index 3f37d0ef1df1..941b43ac8bec 100644 --- a/drivers/clk/mediatek/clk-mt8183-ipu_adl.c +++ b/drivers/clk/mediatek/clk-mt8183-ipu_adl.c @@ -27,7 +27,7 @@ static const struct mtk_gate ipu_adl_clks[] = { static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_IPU_ADL_NR_CLK); @@ -35,7 +35,7 @@ static int clk_mt8183_ipu_adl_probe(struct platform_device *pdev) mtk_clk_register_gates(node, ipu_adl_clks, ARRAY_SIZE(ipu_adl_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183_ipu_adl[] = { diff --git a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c index 7e0eef79c461..ae82c2e17110 100644 --- a/drivers/clk/mediatek/clk-mt8183-ipu_conn.c +++ b/drivers/clk/mediatek/clk-mt8183-ipu_conn.c @@ -96,7 +96,7 @@ static const struct mtk_gate ipu_conn_clks[] = { static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_IPU_CONN_NR_CLK); @@ -104,7 +104,7 @@ static int clk_mt8183_ipu_conn_probe(struct platform_device *pdev) mtk_clk_register_gates(node, ipu_conn_clks, ARRAY_SIZE(ipu_conn_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183_ipu_conn[] = { diff --git a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c index 37b4162c5882..d774edaf760b 100644 --- a/drivers/clk/mediatek/clk-mt8183-mfgcfg.c +++ b/drivers/clk/mediatek/clk-mt8183-mfgcfg.c @@ -28,7 +28,7 @@ static const struct mtk_gate mfg_clks[] = { static int clk_mt8183_mfg_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; pm_runtime_enable(&pdev->dev); @@ -38,7 +38,7 @@ static int clk_mt8183_mfg_probe(struct platform_device *pdev) mtk_clk_register_gates_with_dev(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data, &pdev->dev); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183_mfg[] = { diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c index 9d60e09619c1..11ecc6fb0065 100644 --- a/drivers/clk/mediatek/clk-mt8183-mm.c +++ b/drivers/clk/mediatek/clk-mt8183-mm.c @@ -86,14 +86,14 @@ static int clk_mt8183_mm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static struct platform_driver clk_mt8183_mm_drv = { diff --git a/drivers/clk/mediatek/clk-mt8183-vdec.c b/drivers/clk/mediatek/clk-mt8183-vdec.c index 6250fd1e0edc..0548cde159d0 100644 --- a/drivers/clk/mediatek/clk-mt8183-vdec.c +++ b/drivers/clk/mediatek/clk-mt8183-vdec.c @@ -40,7 +40,7 @@ static const struct mtk_gate vdec_clks[] = { static int clk_mt8183_vdec_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK); @@ -48,7 +48,7 @@ static int clk_mt8183_vdec_probe(struct platform_device *pdev) mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183_vdec[] = { diff --git a/drivers/clk/mediatek/clk-mt8183-venc.c b/drivers/clk/mediatek/clk-mt8183-venc.c index 6678ef03fab2..f86ec607d87a 100644 --- a/drivers/clk/mediatek/clk-mt8183-venc.c +++ b/drivers/clk/mediatek/clk-mt8183-venc.c @@ -32,7 +32,7 @@ static const struct mtk_gate venc_clks[] = { static int clk_mt8183_venc_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK); @@ -40,7 +40,7 @@ static int clk_mt8183_venc_probe(struct platform_device *pdev) mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183_venc[] = { diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c index 68496554dd3d..b5c17988c337 100644 --- a/drivers/clk/mediatek/clk-mt8183.c +++ b/drivers/clk/mediatek/clk-mt8183.c @@ -1122,40 +1122,40 @@ static const struct mtk_pll_div_table mfgpll_div_table[] = { }; static const struct mtk_pll_data plls[] = { - PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0x00000001, + PLL_B(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0200, 0x020C, 0, HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0204, 24, 0x0, 0x0, 0, 0x0204, 0, 0, armpll_div_table), - PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0x00000001, + PLL_B(CLK_APMIXED_ARMPLL_L, "armpll_l", 0x0210, 0x021C, 0, HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0214, 24, 0x0, 0x0, 0, 0x0214, 0, 0, armpll_div_table), - PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0x00000001, + PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0290, 0x029C, 0, HAVE_RST_BAR | PLL_AO, BIT(24), 22, 8, 0x0294, 24, 0x0, 0x0, 0, 0x0294, 0, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0x00000001, + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0220, 0x022C, 0, HAVE_RST_BAR, BIT(24), 22, 8, 0x0224, 24, 0x0, 0x0, 0, 0x0224, 0, 0), - PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0x00000001, + PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0230, 0x023C, 0, HAVE_RST_BAR, BIT(24), 22, 8, 0x0234, 24, 0x0, 0x0, 0, 0x0234, 0, 0), - PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0x00000001, + PLL_B(CLK_APMIXED_MFGPLL, "mfgpll", 0x0240, 0x024C, 0, 0, 0, 22, 8, 0x0244, 24, 0x0, 0x0, 0, 0x0244, 0, 0, mfgpll_div_table), - PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0x00000001, + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0250, 0x025C, 0, 0, 0, 22, 8, 0x0254, 24, 0x0, 0x0, 0, 0x0254, 0, 0), - PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0x00000001, + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0260, 0x026C, 0, 0, 0, 22, 8, 0x0264, 24, 0x0, 0x0, 0, 0x0264, 0, 0), - PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0x00000001, + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0270, 0x027C, 0, HAVE_RST_BAR, BIT(23), 22, 8, 0x0274, 24, 0x0, 0x0, 0, 0x0274, 0, 0), - PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0x00000001, + PLL(CLK_APMIXED_APLL1, "apll1", 0x02A0, 0x02B0, 0, 0, 0, 32, 8, 0x02A0, 1, 0x02A8, 0x0014, 0, 0x02A4, 0, 0x02A0), - PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0x00000001, + PLL(CLK_APMIXED_APLL2, "apll2", 0x02b4, 0x02c4, 0, 0, 0, 32, 8, 0x02B4, 1, 0x02BC, 0x0014, 1, 0x02B8, 0, 0x02B4), }; static int clk_mt8183_apmixed_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); @@ -1165,10 +1165,10 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev) mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } -static struct clk_onecell_data *top_clk_data; +static struct clk_hw_onecell_data *top_clk_data; static void clk_mt8183_top_init_early(struct device_node *node) { @@ -1177,12 +1177,12 @@ static void clk_mt8183_top_init_early(struct device_node *node) top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); for (i = 0; i < CLK_TOP_NR_CLK; i++) - top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); + top_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER); mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data); - of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); + of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data); } CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen", @@ -1217,12 +1217,13 @@ static int clk_mt8183_top_probe(struct platform_device *pdev) mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + top_clk_data); } static int clk_mt8183_infra_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -1231,7 +1232,7 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev) mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) { dev_err(&pdev->dev, "%s(): could not register clock provider: %d\n", @@ -1246,7 +1247,7 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev) static int clk_mt8183_peri_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK); @@ -1254,12 +1255,12 @@ static int clk_mt8183_peri_probe(struct platform_device *pdev) mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static int clk_mt8183_mcu_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; void __iomem *base; @@ -1272,7 +1273,7 @@ static int clk_mt8183_mcu_probe(struct platform_device *pdev) mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base, &mt8183_clk_lock, clk_data); - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static const struct of_device_id of_match_clk_mt8183[] = { diff --git a/drivers/clk/mediatek/clk-mt8186-apmixedsys.c b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c new file mode 100644 index 000000000000..e692a2a67ce1 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-apmixedsys.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-mtk.h" +#include "clk-pll.h" + +#define MT8186_PLL_FMAX (3800UL * MHZ) +#define MT8186_PLL_FMIN (1500UL * MHZ) +#define MT8186_INTEGER_BITS (8) + +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \ + _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \ + _tuner_reg, _tuner_en_reg, _tuner_en_bit, \ + _pcw_reg) { \ + .id = _id, \ + .name = _name, \ + .reg = _reg, \ + .pwr_reg = _pwr_reg, \ + .en_mask = _en_mask, \ + .flags = _flags, \ + .rst_bar_mask = _rst_bar_mask, \ + .fmax = MT8186_PLL_FMAX, \ + .fmin = MT8186_PLL_FMIN, \ + .pcwbits = _pcwbits, \ + .pcwibits = MT8186_INTEGER_BITS, \ + .pd_reg = _pd_reg, \ + .pd_shift = _pd_shift, \ + .tuner_reg = _tuner_reg, \ + .tuner_en_reg = _tuner_en_reg, \ + .tuner_en_bit = _tuner_en_bit, \ + .pcw_reg = _pcw_reg, \ + .pcw_shift = 0, \ + .pcw_chg_reg = 0, \ + .en_reg = 0, \ + .pll_en_bit = 0, \ + } + +static const struct mtk_pll_data plls[] = { + /* + * armpll_ll/armpll_bl/ccipll are main clock source of AP MCU, + * should not be closed in Linux world. + */ + PLL(CLK_APMIXED_ARMPLL_LL, "armpll_ll", 0x0204, 0x0210, 0, + PLL_AO, 0, 22, 0x0208, 24, 0, 0, 0, 0x0208), + PLL(CLK_APMIXED_ARMPLL_BL, "armpll_bl", 0x0214, 0x0220, 0, + PLL_AO, 0, 22, 0x0218, 24, 0, 0, 0, 0x0218), + PLL(CLK_APMIXED_CCIPLL, "ccipll", 0x0224, 0x0230, 0, + PLL_AO, 0, 22, 0x0228, 24, 0, 0, 0, 0x0228), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0244, 0x0250, 0xff000000, + HAVE_RST_BAR, BIT(23), 22, 0x0248, 24, 0, 0, 0, 0x0248), + PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0324, 0x0330, 0xff000000, + HAVE_RST_BAR, BIT(23), 22, 0x0328, 24, 0, 0, 0, 0x0328), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x038C, 0x0398, 0, + 0, 0, 22, 0x0390, 24, 0, 0, 0, 0x0390), + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0254, 0x0260, 0, + 0, 0, 22, 0x0258, 24, 0, 0, 0, 0x0258), + PLL(CLK_APMIXED_NNAPLL, "nnapll", 0x035C, 0x0368, 0, + 0, 0, 22, 0x0360, 24, 0, 0, 0, 0x0360), + PLL(CLK_APMIXED_NNA2PLL, "nna2pll", 0x036C, 0x0378, 0, + 0, 0, 22, 0x0370, 24, 0, 0, 0, 0x0370), + PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x0304, 0x0310, 0, + 0, 0, 22, 0x0308, 24, 0, 0, 0, 0x0308), + PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0314, 0x0320, 0, + 0, 0, 22, 0x0318, 24, 0, 0, 0, 0x0318), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x0264, 0x0270, 0, + 0, 0, 22, 0x0268, 24, 0, 0, 0, 0x0268), + PLL(CLK_APMIXED_APLL1, "apll1", 0x0334, 0x0344, 0, + 0, 0, 32, 0x0338, 24, 0x0040, 0x000C, 0, 0x033C), + PLL(CLK_APMIXED_APLL2, "apll2", 0x0348, 0x0358, 0, + 0, 0, 32, 0x034C, 24, 0x0044, 0x000C, 5, 0x0350), +}; + +static const struct of_device_id of_match_clk_mt8186_apmixed[] = { + { .compatible = "mediatek,mt8186-apmixedsys", }, + {} +}; + +static int clk_mt8186_apmixed_probe(struct platform_device *pdev) +{ + struct clk_hw_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + int r; + + clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + if (!clk_data) + return -ENOMEM; + + r = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); + if (r) + goto free_apmixed_data; + + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); + if (r) + goto unregister_plls; + + platform_set_drvdata(pdev, clk_data); + + return r; + +unregister_plls: + mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data); +free_apmixed_data: + mtk_free_clk_data(clk_data); + return r; +} + +static int clk_mt8186_apmixed_remove(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); + + of_clk_del_provider(node); + mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data); + mtk_free_clk_data(clk_data); + + return 0; +} + +static struct platform_driver clk_mt8186_apmixed_drv = { + .probe = clk_mt8186_apmixed_probe, + .remove = clk_mt8186_apmixed_remove, + .driver = { + .name = "clk-mt8186-apmixed", + .of_match_table = of_match_clk_mt8186_apmixed, + }, +}; +builtin_platform_driver(clk_mt8186_apmixed_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-cam.c b/drivers/clk/mediatek/clk-mt8186-cam.c new file mode 100644 index 000000000000..9ec345a2ce66 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-cam.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs cam_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_CAM(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate cam_clks[] = { + GATE_CAM(CLK_CAM_LARB13, "cam_larb13", "top_cam", 0), + GATE_CAM(CLK_CAM_DFP_VAD, "cam_dfp_vad", "top_cam", 1), + GATE_CAM(CLK_CAM_LARB14, "cam_larb14", "top_cam", 2), + GATE_CAM(CLK_CAM, "cam", "top_cam", 6), + GATE_CAM(CLK_CAMTG, "camtg", "top_cam", 7), + GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "top_cam", 8), + GATE_CAM(CLK_CAMSV1, "camsv1", "top_cam", 10), + GATE_CAM(CLK_CAMSV2, "camsv2", "top_cam", 11), + GATE_CAM(CLK_CAMSV3, "camsv3", "top_cam", 12), + GATE_CAM(CLK_CAM_CCU0, "cam_ccu0", "top_cam", 13), + GATE_CAM(CLK_CAM_CCU1, "cam_ccu1", "top_cam", 14), + GATE_CAM(CLK_CAM_MRAW0, "cam_mraw0", "top_cam", 15), + GATE_CAM(CLK_CAM_FAKE_ENG, "cam_fake_eng", "top_cam", 17), + GATE_CAM(CLK_CAM_CCU_GALS, "cam_ccu_gals", "top_cam", 18), + GATE_CAM(CLK_CAM2MM_GALS, "cam2mm_gals", "top_cam", 19), +}; + +static const struct mtk_gate cam_rawa_clks[] = { + GATE_CAM(CLK_CAM_RAWA_LARBX_RAWA, "cam_rawa_larbx_rawa", "top_cam", 0), + GATE_CAM(CLK_CAM_RAWA, "cam_rawa", "top_cam", 1), + GATE_CAM(CLK_CAM_RAWA_CAMTG_RAWA, "cam_rawa_camtg_rawa", "top_cam", 2), +}; + +static const struct mtk_gate cam_rawb_clks[] = { + GATE_CAM(CLK_CAM_RAWB_LARBX_RAWB, "cam_rawb_larbx_rawb", "top_cam", 0), + GATE_CAM(CLK_CAM_RAWB, "cam_rawb", "top_cam", 1), + GATE_CAM(CLK_CAM_RAWB_CAMTG_RAWB, "cam_rawb_camtg_rawb", "top_cam", 2), +}; + +static const struct mtk_clk_desc cam_desc = { + .clks = cam_clks, + .num_clks = ARRAY_SIZE(cam_clks), +}; + +static const struct mtk_clk_desc cam_rawa_desc = { + .clks = cam_rawa_clks, + .num_clks = ARRAY_SIZE(cam_rawa_clks), +}; + +static const struct mtk_clk_desc cam_rawb_desc = { + .clks = cam_rawb_clks, + .num_clks = ARRAY_SIZE(cam_rawb_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_cam[] = { + { + .compatible = "mediatek,mt8186-camsys", + .data = &cam_desc, + }, { + .compatible = "mediatek,mt8186-camsys_rawa", + .data = &cam_rawa_desc, + }, { + .compatible = "mediatek,mt8186-camsys_rawb", + .data = &cam_rawb_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_cam_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-cam", + .of_match_table = of_match_clk_mt8186_cam, + }, +}; +builtin_platform_driver(clk_mt8186_cam_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-img.c b/drivers/clk/mediatek/clk-mt8186-img.c new file mode 100644 index 000000000000..08a625475aee --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-img.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs img_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_IMG(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate img1_clks[] = { + GATE_IMG(CLK_IMG1_LARB9_IMG1, "img1_larb9_img1", "top_img1", 0), + GATE_IMG(CLK_IMG1_LARB10_IMG1, "img1_larb10_img1", "top_img1", 1), + GATE_IMG(CLK_IMG1_DIP, "img1_dip", "top_img1", 2), + GATE_IMG(CLK_IMG1_GALS_IMG1, "img1_gals_img1", "top_img1", 12), +}; + +static const struct mtk_gate img2_clks[] = { + GATE_IMG(CLK_IMG2_LARB9_IMG2, "img2_larb9_img2", "top_img1", 0), + GATE_IMG(CLK_IMG2_LARB10_IMG2, "img2_larb10_img2", "top_img1", 1), + GATE_IMG(CLK_IMG2_MFB, "img2_mfb", "top_img1", 6), + GATE_IMG(CLK_IMG2_WPE, "img2_wpe", "top_img1", 7), + GATE_IMG(CLK_IMG2_MSS, "img2_mss", "top_img1", 8), + GATE_IMG(CLK_IMG2_GALS_IMG2, "img2_gals_img2", "top_img1", 12), +}; + +static const struct mtk_clk_desc img1_desc = { + .clks = img1_clks, + .num_clks = ARRAY_SIZE(img1_clks), +}; + +static const struct mtk_clk_desc img2_desc = { + .clks = img2_clks, + .num_clks = ARRAY_SIZE(img2_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_img[] = { + { + .compatible = "mediatek,mt8186-imgsys1", + .data = &img1_desc, + }, { + .compatible = "mediatek,mt8186-imgsys2", + .data = &img2_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_img_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-img", + .of_match_table = of_match_clk_mt8186_img, + }, +}; +builtin_platform_driver(clk_mt8186_img_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c new file mode 100644 index 000000000000..47f2e480a05e --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-imp_iic_wrap.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs imp_iic_wrap_cg_regs = { + .set_ofs = 0xe08, + .clr_ofs = 0xe04, + .sta_ofs = 0xe00, +}; + +#define GATE_IMP_IIC_WRAP(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &imp_iic_wrap_cg_regs, _shift, &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate imp_iic_wrap_clks[] = { + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C0, + "imp_iic_wrap_ap_clock_i2c0", "infra_ao_i2c_ap", 0), + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C1, + "imp_iic_wrap_ap_clock_i2c1", "infra_ao_i2c_ap", 1), + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C2, + "imp_iic_wrap_ap_clock_i2c2", "infra_ao_i2c_ap", 2), + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C3, + "imp_iic_wrap_ap_clock_i2c3", "infra_ao_i2c_ap", 3), + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C4, + "imp_iic_wrap_ap_clock_i2c4", "infra_ao_i2c_ap", 4), + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C5, + "imp_iic_wrap_ap_clock_i2c5", "infra_ao_i2c_ap", 5), + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C6, + "imp_iic_wrap_ap_clock_i2c6", "infra_ao_i2c_ap", 6), + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C7, + "imp_iic_wrap_ap_clock_i2c7", "infra_ao_i2c_ap", 7), + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C8, + "imp_iic_wrap_ap_clock_i2c8", "infra_ao_i2c_ap", 8), + GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_AP_CLOCK_I2C9, + "imp_iic_wrap_ap_clock_i2c9", "infra_ao_i2c_ap", 9), +}; + +static const struct mtk_clk_desc imp_iic_wrap_desc = { + .clks = imp_iic_wrap_clks, + .num_clks = ARRAY_SIZE(imp_iic_wrap_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_imp_iic_wrap[] = { + { + .compatible = "mediatek,mt8186-imp_iic_wrap", + .data = &imp_iic_wrap_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_imp_iic_wrap_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-imp_iic_wrap", + .of_match_table = of_match_clk_mt8186_imp_iic_wrap, + }, +}; +builtin_platform_driver(clk_mt8186_imp_iic_wrap_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-infra_ao.c b/drivers/clk/mediatek/clk-mt8186-infra_ao.c new file mode 100644 index 000000000000..2a7adc25abaa --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-infra_ao.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs infra_ao0_cg_regs = { + .set_ofs = 0x80, + .clr_ofs = 0x84, + .sta_ofs = 0x90, +}; + +static const struct mtk_gate_regs infra_ao1_cg_regs = { + .set_ofs = 0x88, + .clr_ofs = 0x8c, + .sta_ofs = 0x94, +}; + +static const struct mtk_gate_regs infra_ao2_cg_regs = { + .set_ofs = 0xa4, + .clr_ofs = 0xa8, + .sta_ofs = 0xac, +}; + +static const struct mtk_gate_regs infra_ao3_cg_regs = { + .set_ofs = 0xc0, + .clr_ofs = 0xc4, + .sta_ofs = 0xc8, +}; + +#define GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, _flag) \ + GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao0_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr, _flag) + +#define GATE_INFRA_AO0(_id, _name, _parent, _shift) \ + GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, 0) + +#define GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, _flag) \ + GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao1_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr, _flag) + +#define GATE_INFRA_AO1(_id, _name, _parent, _shift) \ + GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, 0) + +#define GATE_INFRA_AO2_FLAGS(_id, _name, _parent, _shift, _flag) \ + GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao2_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr, _flag) + +#define GATE_INFRA_AO2(_id, _name, _parent, _shift) \ + GATE_INFRA_AO2_FLAGS(_id, _name, _parent, _shift, 0) + + #define GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, _flag) \ + GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao3_cg_regs, _shift, \ + &mtk_clk_gate_ops_setclr, _flag) + +#define GATE_INFRA_AO3(_id, _name, _parent, _shift) \ + GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, 0) + +static const struct mtk_gate infra_ao_clks[] = { + /* INFRA_AO0 */ + GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_TMR, "infra_ao_pmic_tmr", "top_pwrap_ulposc", 0), + GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_AP, "infra_ao_pmic_ap", "top_pwrap_ulposc", 1), + GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_MD, "infra_ao_pmic_md", "top_pwrap_ulposc", 2), + GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_CONN, "infra_ao_pmic_conn", "top_pwrap_ulposc", 3), + /* infra_ao_scp_core are main clock in always-on co-processor. */ + GATE_INFRA_AO0_FLAGS(CLK_INFRA_AO_SCP_CORE, + "infra_ao_scp_core", "top_scp", 4, CLK_IS_CRITICAL), + /* infra_ao_sej is main clock for secure engine with JTAG support */ + GATE_INFRA_AO0_FLAGS(CLK_INFRA_AO_SEJ, + "infra_ao_sej", "top_axi", 5, CLK_IS_CRITICAL), + GATE_INFRA_AO0(CLK_INFRA_AO_APXGPT, "infra_ao_apxgpt", "top_axi", 6), + GATE_INFRA_AO0(CLK_INFRA_AO_ICUSB, "infra_ao_icusb", "top_axi", 8), + GATE_INFRA_AO0(CLK_INFRA_AO_GCE, "infra_ao_gce", "top_axi", 9), + GATE_INFRA_AO0(CLK_INFRA_AO_THERM, "infra_ao_therm", "top_axi", 10), + GATE_INFRA_AO0(CLK_INFRA_AO_I2C_AP, "infra_ao_i2c_ap", "top_i2c", 11), + GATE_INFRA_AO0(CLK_INFRA_AO_I2C_CCU, "infra_ao_i2c_ccu", "top_i2c", 12), + GATE_INFRA_AO0(CLK_INFRA_AO_I2C_SSPM, "infra_ao_i2c_sspm", "top_i2c", 13), + GATE_INFRA_AO0(CLK_INFRA_AO_I2C_RSV, "infra_ao_i2c_rsv", "top_i2c", 14), + GATE_INFRA_AO0(CLK_INFRA_AO_PWM_HCLK, "infra_ao_pwm_hclk", "top_axi", 15), + GATE_INFRA_AO0(CLK_INFRA_AO_PWM1, "infra_ao_pwm1", "top_pwm", 16), + GATE_INFRA_AO0(CLK_INFRA_AO_PWM2, "infra_ao_pwm2", "top_pwm", 17), + GATE_INFRA_AO0(CLK_INFRA_AO_PWM3, "infra_ao_pwm3", "top_pwm", 18), + GATE_INFRA_AO0(CLK_INFRA_AO_PWM4, "infra_ao_pwm4", "top_pwm", 19), + GATE_INFRA_AO0(CLK_INFRA_AO_PWM5, "infra_ao_pwm5", "top_pwm", 20), + GATE_INFRA_AO0(CLK_INFRA_AO_PWM, "infra_ao_pwm", "top_pwm", 21), + GATE_INFRA_AO0(CLK_INFRA_AO_UART0, "infra_ao_uart0", "top_uart", 22), + GATE_INFRA_AO0(CLK_INFRA_AO_UART1, "infra_ao_uart1", "top_uart", 23), + GATE_INFRA_AO0(CLK_INFRA_AO_UART2, "infra_ao_uart2", "top_uart", 24), + GATE_INFRA_AO0(CLK_INFRA_AO_GCE_26M, "infra_ao_gce_26m", "clk26m", 27), + GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_dma", "top_axi", 28), + GATE_INFRA_AO0(CLK_INFRA_AO_BTIF, "infra_ao_btif", "top_axi", 31), + /* INFRA_AO1 */ + GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1), + GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc5hclk", 2), + GATE_INFRA_AO1(CLK_INFRA_AO_MSDCFDE, "infra_ao_msdcfde", "top_aes_msdcfde", 3), + GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4), + /* infra_ao_dvfsrc is for internal DVFS usage, should not be handled by Linux */ + GATE_INFRA_AO1_FLAGS(CLK_INFRA_AO_DVFSRC, + "infra_ao_dvfsrc", "top_dvfsrc", 7, CLK_IS_CRITICAL), + GATE_INFRA_AO1(CLK_INFRA_AO_GCPU, "infra_ao_gcpu", "top_axi", 8), + GATE_INFRA_AO1(CLK_INFRA_AO_TRNG, "infra_ao_trng", "top_axi", 9), + GATE_INFRA_AO1(CLK_INFRA_AO_AUXADC, "infra_ao_auxadc", "clk26m", 10), + GATE_INFRA_AO1(CLK_INFRA_AO_CPUM, "infra_ao_cpum", "top_axi", 11), + GATE_INFRA_AO1(CLK_INFRA_AO_CCIF1_AP, "infra_ao_ccif1_ap", "top_axi", 12), + GATE_INFRA_AO1(CLK_INFRA_AO_CCIF1_MD, "infra_ao_ccif1_md", "top_axi", 13), + GATE_INFRA_AO1(CLK_INFRA_AO_AUXADC_MD, "infra_ao_auxadc_md", "clk26m", 14), + GATE_INFRA_AO1(CLK_INFRA_AO_AP_DMA, "infra_ao_ap_dma", "top_axi", 18), + GATE_INFRA_AO1(CLK_INFRA_AO_XIU, "infra_ao_xiu", "top_axi", 19), + /* infra_ao_device_apc is for device access permission control module */ + GATE_INFRA_AO1_FLAGS(CLK_INFRA_AO_DEVICE_APC, + "infra_ao_dapc", "top_axi", 20, CLK_IS_CRITICAL), + GATE_INFRA_AO1(CLK_INFRA_AO_CCIF_AP, "infra_ao_ccif_ap", "top_axi", 23), + GATE_INFRA_AO1(CLK_INFRA_AO_DEBUGTOP, "infra_ao_debugtop", "top_axi", 24), + GATE_INFRA_AO1(CLK_INFRA_AO_AUDIO, "infra_ao_audio", "top_axi", 25), + GATE_INFRA_AO1(CLK_INFRA_AO_CCIF_MD, "infra_ao_ccif_md", "top_axi", 26), + GATE_INFRA_AO1(CLK_INFRA_AO_DXCC_SEC_CORE, "infra_ao_secore", "top_dxcc", 27), + GATE_INFRA_AO1(CLK_INFRA_AO_DXCC_AO, "infra_ao_dxcc_ao", "top_dxcc", 28), + GATE_INFRA_AO1(CLK_INFRA_AO_IMP_IIC, "infra_ao_imp_iic", "top_axi", 29), + GATE_INFRA_AO1(CLK_INFRA_AO_DRAMC_F26M, "infra_ao_dramc26", "clk26m", 31), + /* INFRA_AO2 */ + GATE_INFRA_AO2(CLK_INFRA_AO_RG_PWM_FBCLK6, "infra_ao_pwm_fbclk6", "clk26m", 0), + GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_HCLK, "infra_ao_ssusb_hclk", "top_axi", 1), + GATE_INFRA_AO2(CLK_INFRA_AO_DISP_PWM, "infra_ao_disp_pwm", "top_disp_pwm", 2), + GATE_INFRA_AO2(CLK_INFRA_AO_CLDMA_BCLK, "infra_ao_cldmabclk", "top_axi", 3), + GATE_INFRA_AO2(CLK_INFRA_AO_AUDIO_26M_BCLK, "infra_ao_audio26m", "clk26m", 4), + GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_P1_HCLK, "infra_ao_ssusb_p1_hclk", "top_axi", 5), + GATE_INFRA_AO2(CLK_INFRA_AO_SPI1, "infra_ao_spi1", "top_spi", 6), + GATE_INFRA_AO2(CLK_INFRA_AO_I2C4, "infra_ao_i2c4", "top_i2c", 7), + GATE_INFRA_AO2(CLK_INFRA_AO_MODEM_TEMP_SHARE, "infra_ao_mdtemp", "clk26m", 8), + GATE_INFRA_AO2(CLK_INFRA_AO_SPI2, "infra_ao_spi2", "top_spi", 9), + GATE_INFRA_AO2(CLK_INFRA_AO_SPI3, "infra_ao_spi3", "top_spi", 10), + GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_REF, "infra_ao_ssusb_ref", "clk26m", 11), + GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_XHCI, "infra_ao_ssusb_xhci", "top_ssusb_xhci", 12), + GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_P1_REF, "infra_ao_ssusb_p1_ref", "clk26m", 13), + GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_P1_XHCI, + "infra_ao_ssusb_p1_xhci", "top_ssusb_xhci_1p", 14), + /* infra_ao_sspm is main clock in co-processor, should not be closed in Linux. */ + GATE_INFRA_AO2_FLAGS(CLK_INFRA_AO_SSPM, "infra_ao_sspm", "top_sspm", 15, CLK_IS_CRITICAL), + GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_TOP_P1_SYS, + "infra_ao_ssusb_p1_sys", "top_ssusb_1p", 16), + GATE_INFRA_AO2(CLK_INFRA_AO_I2C5, "infra_ao_i2c5", "top_i2c", 18), + GATE_INFRA_AO2(CLK_INFRA_AO_I2C5_ARBITER, "infra_ao_i2c5a", "top_i2c", 19), + GATE_INFRA_AO2(CLK_INFRA_AO_I2C5_IMM, "infra_ao_i2c5_imm", "top_i2c", 20), + GATE_INFRA_AO2(CLK_INFRA_AO_I2C1_ARBITER, "infra_ao_i2c1a", "top_i2c", 21), + GATE_INFRA_AO2(CLK_INFRA_AO_I2C1_IMM, "infra_ao_i2c1_imm", "top_i2c", 22), + GATE_INFRA_AO2(CLK_INFRA_AO_I2C2_ARBITER, "infra_ao_i2c2a", "top_i2c", 23), + GATE_INFRA_AO2(CLK_INFRA_AO_I2C2_IMM, "infra_ao_i2c2_imm", "top_i2c", 24), + GATE_INFRA_AO2(CLK_INFRA_AO_SPI4, "infra_ao_spi4", "top_spi", 25), + GATE_INFRA_AO2(CLK_INFRA_AO_SPI5, "infra_ao_spi5", "top_spi", 26), + GATE_INFRA_AO2(CLK_INFRA_AO_CQ_DMA, "infra_ao_cq_dma", "top_axi", 27), + GATE_INFRA_AO2(CLK_INFRA_AO_BIST2FPC, "infra_ao_bist2fpc", "f_bist2fpc_ck", 28), + /* INFRA_AO3 */ + GATE_INFRA_AO3(CLK_INFRA_AO_MSDC0_SELF, "infra_ao_msdc0sf", "top_msdc50_0", 0), + GATE_INFRA_AO3(CLK_INFRA_AO_SPINOR, "infra_ao_spinor", "top_spinor", 1), + /* + * infra_ao_sspm_26m/infra_ao_sspm_32k are main clocks in co-processor, + * should not be closed in Linux. + */ + GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_SSPM_26M_SELF, "infra_ao_sspm_26m", "clk26m", 3, + CLK_IS_CRITICAL), + GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_SSPM_32K_SELF, "infra_ao_sspm_32k", "clk32k", 4, + CLK_IS_CRITICAL), + GATE_INFRA_AO3(CLK_INFRA_AO_I2C6, "infra_ao_i2c6", "top_i2c", 6), + GATE_INFRA_AO3(CLK_INFRA_AO_AP_MSDC0, "infra_ao_ap_msdc0", "top_axi", 7), + GATE_INFRA_AO3(CLK_INFRA_AO_MD_MSDC0, "infra_ao_md_msdc0", "top_axi", 8), + GATE_INFRA_AO3(CLK_INFRA_AO_MSDC0_SRC, "infra_ao_msdc0_clk", "top_msdc50_0", 9), + GATE_INFRA_AO3(CLK_INFRA_AO_MSDC1_SRC, "infra_ao_msdc1_clk", "top_msdc30_1", 10), + /* infra_ao_sej_f13m is main clock for secure engine with JTAG support */ + GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_SEJ_F13M, + "infra_ao_sej_f13m", "clk26m", 15, CLK_IS_CRITICAL), + /* infra_ao_aes_top0_bclk is for secure encryption */ + GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_AES_TOP0_BCLK, + "infra_ao_aes_top0_bclk", "top_axi", 16, CLK_IS_CRITICAL), + GATE_INFRA_AO3(CLK_INFRA_AO_MCU_PM_BCLK, "infra_ao_mcu_pm_bclk", "top_axi", 17), + GATE_INFRA_AO3(CLK_INFRA_AO_CCIF2_AP, "infra_ao_ccif2_ap", "top_axi", 18), + GATE_INFRA_AO3(CLK_INFRA_AO_CCIF2_MD, "infra_ao_ccif2_md", "top_axi", 19), + GATE_INFRA_AO3(CLK_INFRA_AO_CCIF3_AP, "infra_ao_ccif3_ap", "top_axi", 20), + GATE_INFRA_AO3(CLK_INFRA_AO_CCIF3_MD, "infra_ao_ccif3_md", "top_axi", 21), + GATE_INFRA_AO3(CLK_INFRA_AO_FADSP_26M, "infra_ao_fadsp_26m", "clk26m", 22), + GATE_INFRA_AO3(CLK_INFRA_AO_FADSP_32K, "infra_ao_fadsp_32k", "clk32k", 23), + GATE_INFRA_AO3(CLK_INFRA_AO_CCIF4_AP, "infra_ao_ccif4_ap", "top_axi", 24), + GATE_INFRA_AO3(CLK_INFRA_AO_CCIF4_MD, "infra_ao_ccif4_md", "top_axi", 25), + GATE_INFRA_AO3(CLK_INFRA_AO_FADSP, "infra_ao_fadsp", "top_audiodsp", 27), + GATE_INFRA_AO3(CLK_INFRA_AO_FLASHIF_133M, "infra_ao_flashif_133m", "top_axi", 28), + GATE_INFRA_AO3(CLK_INFRA_AO_FLASHIF_66M, "infra_ao_flashif_66m", "top_axi", 29), +}; + +static const struct mtk_clk_desc infra_ao_desc = { + .clks = infra_ao_clks, + .num_clks = ARRAY_SIZE(infra_ao_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_infra_ao[] = { + { + .compatible = "mediatek,mt8186-infracfg_ao", + .data = &infra_ao_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_infra_ao_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-infra-ao", + .of_match_table = of_match_clk_mt8186_infra_ao, + }, +}; +builtin_platform_driver(clk_mt8186_infra_ao_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-ipe.c b/drivers/clk/mediatek/clk-mt8186-ipe.c new file mode 100644 index 000000000000..8fca148effa6 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-ipe.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs ipe_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_IPE(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate ipe_clks[] = { + GATE_IPE(CLK_IPE_LARB19, "ipe_larb19", "top_ipe", 0), + GATE_IPE(CLK_IPE_LARB20, "ipe_larb20", "top_ipe", 1), + GATE_IPE(CLK_IPE_SMI_SUBCOM, "ipe_smi_subcom", "top_ipe", 2), + GATE_IPE(CLK_IPE_FD, "ipe_fd", "top_ipe", 3), + GATE_IPE(CLK_IPE_FE, "ipe_fe", "top_ipe", 4), + GATE_IPE(CLK_IPE_RSC, "ipe_rsc", "top_ipe", 5), + GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "top_ipe", 6), + GATE_IPE(CLK_IPE_GALS_IPE, "ipe_gals_ipe", "top_img1", 8), +}; + +static const struct mtk_clk_desc ipe_desc = { + .clks = ipe_clks, + .num_clks = ARRAY_SIZE(ipe_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_ipe[] = { + { + .compatible = "mediatek,mt8186-ipesys", + .data = &ipe_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_ipe_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-ipe", + .of_match_table = of_match_clk_mt8186_ipe, + }, +}; +builtin_platform_driver(clk_mt8186_ipe_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-mcu.c b/drivers/clk/mediatek/clk-mt8186-mcu.c new file mode 100644 index 000000000000..dfc305c1fc5d --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-mcu.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-mtk.h" + +static const char * const mcu_armpll_ll_parents[] = { + "clk26m", + "armpll_ll", + "mainpll", + "univpll_d2" +}; + +static const char * const mcu_armpll_bl_parents[] = { + "clk26m", + "armpll_bl", + "mainpll", + "univpll_d2" +}; + +static const char * const mcu_armpll_bus_parents[] = { + "clk26m", + "ccipll", + "mainpll", + "univpll_d2" +}; + +/* + * We only configure the CPU muxes when adjust CPU frequency in MediaTek CPUFreq Driver. + * Other fields like divider always keep the same value. (set once in bootloader) + */ +static struct mtk_composite mcu_muxes[] = { + /* CPU_PLLDIV_CFG0 */ + MUX(CLK_MCU_ARMPLL_LL_SEL, "mcu_armpll_ll_sel", mcu_armpll_ll_parents, 0x2A0, 9, 2), + /* CPU_PLLDIV_CFG1 */ + MUX(CLK_MCU_ARMPLL_BL_SEL, "mcu_armpll_bl_sel", mcu_armpll_bl_parents, 0x2A4, 9, 2), + /* BUS_PLLDIV_CFG */ + MUX(CLK_MCU_ARMPLL_BUS_SEL, "mcu_armpll_bus_sel", mcu_armpll_bus_parents, 0x2E0, 9, 2), +}; + +static const struct of_device_id of_match_clk_mt8186_mcu[] = { + { .compatible = "mediatek,mt8186-mcusys", }, + {} +}; + +static int clk_mt8186_mcu_probe(struct platform_device *pdev) +{ + struct clk_hw_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + int r; + void __iomem *base; + + clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK); + if (!clk_data) + return -ENOMEM; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) { + r = PTR_ERR(base); + goto free_mcu_data; + } + + r = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base, + NULL, clk_data); + if (r) + goto free_mcu_data; + + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); + if (r) + goto unregister_composite_muxes; + + platform_set_drvdata(pdev, clk_data); + + return r; + +unregister_composite_muxes: + mtk_clk_unregister_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), clk_data); +free_mcu_data: + mtk_free_clk_data(clk_data); + return r; +} + +static int clk_mt8186_mcu_remove(struct platform_device *pdev) +{ + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); + struct device_node *node = pdev->dev.of_node; + + of_clk_del_provider(node); + mtk_clk_unregister_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), clk_data); + mtk_free_clk_data(clk_data); + + return 0; +} + +static struct platform_driver clk_mt8186_mcu_drv = { + .probe = clk_mt8186_mcu_probe, + .remove = clk_mt8186_mcu_remove, + .driver = { + .name = "clk-mt8186-mcu", + .of_match_table = of_match_clk_mt8186_mcu, + }, +}; +builtin_platform_driver(clk_mt8186_mcu_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-mdp.c b/drivers/clk/mediatek/clk-mt8186-mdp.c new file mode 100644 index 000000000000..05174088ef20 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-mdp.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs mdp0_cg_regs = { + .set_ofs = 0x104, + .clr_ofs = 0x108, + .sta_ofs = 0x100, +}; + +static const struct mtk_gate_regs mdp2_cg_regs = { + .set_ofs = 0x124, + .clr_ofs = 0x128, + .sta_ofs = 0x120, +}; + +#define GATE_MDP0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mdp0_cg_regs, _shift, &mtk_clk_gate_ops_setclr) + +#define GATE_MDP2(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mdp2_cg_regs, _shift, &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate mdp_clks[] = { + /* MDP0 */ + GATE_MDP0(CLK_MDP_RDMA0, "mdp_rdma0", "top_mdp", 0), + GATE_MDP0(CLK_MDP_TDSHP0, "mdp_tdshp0", "top_mdp", 1), + GATE_MDP0(CLK_MDP_IMG_DL_ASYNC0, "mdp_img_dl_async0", "top_mdp", 2), + GATE_MDP0(CLK_MDP_IMG_DL_ASYNC1, "mdp_img_dl_async1", "top_mdp", 3), + GATE_MDP0(CLK_MDP_DISP_RDMA, "mdp_disp_rdma", "top_mdp", 4), + GATE_MDP0(CLK_MDP_HMS, "mdp_hms", "top_mdp", 5), + GATE_MDP0(CLK_MDP_SMI0, "mdp_smi0", "top_mdp", 6), + GATE_MDP0(CLK_MDP_APB_BUS, "mdp_apb_bus", "top_mdp", 7), + GATE_MDP0(CLK_MDP_WROT0, "mdp_wrot0", "top_mdp", 8), + GATE_MDP0(CLK_MDP_RSZ0, "mdp_rsz0", "top_mdp", 9), + GATE_MDP0(CLK_MDP_HDR0, "mdp_hdr0", "top_mdp", 10), + GATE_MDP0(CLK_MDP_MUTEX0, "mdp_mutex0", "top_mdp", 11), + GATE_MDP0(CLK_MDP_WROT1, "mdp_wrot1", "top_mdp", 12), + GATE_MDP0(CLK_MDP_RSZ1, "mdp_rsz1", "top_mdp", 13), + GATE_MDP0(CLK_MDP_FAKE_ENG0, "mdp_fake_eng0", "top_mdp", 14), + GATE_MDP0(CLK_MDP_AAL0, "mdp_aal0", "top_mdp", 15), + GATE_MDP0(CLK_MDP_DISP_WDMA, "mdp_disp_wdma", "top_mdp", 16), + GATE_MDP0(CLK_MDP_COLOR, "mdp_color", "top_mdp", 17), + GATE_MDP0(CLK_MDP_IMG_DL_ASYNC2, "mdp_img_dl_async2", "top_mdp", 18), + /* MDP2 */ + GATE_MDP2(CLK_MDP_IMG_DL_RELAY0_ASYNC0, "mdp_img_dl_rel0_as0", "top_mdp", 0), + GATE_MDP2(CLK_MDP_IMG_DL_RELAY1_ASYNC1, "mdp_img_dl_rel1_as1", "top_mdp", 8), + GATE_MDP2(CLK_MDP_IMG_DL_RELAY2_ASYNC2, "mdp_img_dl_rel2_as2", "top_mdp", 24), +}; + +static const struct mtk_clk_desc mdp_desc = { + .clks = mdp_clks, + .num_clks = ARRAY_SIZE(mdp_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_mdp[] = { + { + .compatible = "mediatek,mt8186-mdpsys", + .data = &mdp_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_mdp_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-mdp", + .of_match_table = of_match_clk_mt8186_mdp, + }, +}; +builtin_platform_driver(clk_mt8186_mdp_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-mfg.c b/drivers/clk/mediatek/clk-mt8186-mfg.c new file mode 100644 index 000000000000..f1f92216f894 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-mfg.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs mfg_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_MFG(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate mfg_clks[] = { + GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "top_mfg", 0), +}; + +static const struct mtk_clk_desc mfg_desc = { + .clks = mfg_clks, + .num_clks = ARRAY_SIZE(mfg_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_mfg[] = { + { + .compatible = "mediatek,mt8186-mfgsys", + .data = &mfg_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_mfg_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-mfg", + .of_match_table = of_match_clk_mt8186_mfg, + }, +}; +builtin_platform_driver(clk_mt8186_mfg_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-mm.c b/drivers/clk/mediatek/clk-mt8186-mm.c new file mode 100644 index 000000000000..1d33be407947 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-mm.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs mm0_cg_regs = { + .set_ofs = 0x104, + .clr_ofs = 0x108, + .sta_ofs = 0x100, +}; + +static const struct mtk_gate_regs mm1_cg_regs = { + .set_ofs = 0x1a4, + .clr_ofs = 0x1a8, + .sta_ofs = 0x1a0, +}; + +#define GATE_MM0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mm0_cg_regs, _shift, &mtk_clk_gate_ops_setclr) + +#define GATE_MM1(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &mm1_cg_regs, _shift, &mtk_clk_gate_ops_setclr) + +static const struct mtk_gate mm_clks[] = { + /* MM0 */ + GATE_MM0(CLK_MM_DISP_MUTEX0, "mm_disp_mutex0", "top_disp", 0), + GATE_MM0(CLK_MM_APB_MM_BUS, "mm_apb_mm_bus", "top_disp", 1), + GATE_MM0(CLK_MM_DISP_OVL0, "mm_disp_ovl0", "top_disp", 2), + GATE_MM0(CLK_MM_DISP_RDMA0, "mm_disp_rdma0", "top_disp", 3), + GATE_MM0(CLK_MM_DISP_OVL0_2L, "mm_disp_ovl0_2l", "top_disp", 4), + GATE_MM0(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "top_disp", 5), + GATE_MM0(CLK_MM_DISP_RSZ0, "mm_disp_rsz0", "top_disp", 7), + GATE_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "top_disp", 8), + GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "top_disp", 9), + GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "top_disp", 10), + GATE_MM0(CLK_MM_SMI_INFRA, "mm_smi_infra", "top_disp", 11), + GATE_MM0(CLK_MM_DISP_DSC_WRAP0, "mm_disp_dsc_wrap0", "top_disp", 12), + GATE_MM0(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "top_disp", 13), + GATE_MM0(CLK_MM_DISP_POSTMASK0, "mm_disp_postmask0", "top_disp", 14), + GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "top_disp", 16), + GATE_MM0(CLK_MM_SMI_COMMON, "mm_smi_common", "top_disp", 17), + GATE_MM0(CLK_MM_DSI0, "mm_dsi0", "top_disp", 19), + GATE_MM0(CLK_MM_DISP_FAKE_ENG0, "mm_disp_fake_eng0", "top_disp", 20), + GATE_MM0(CLK_MM_DISP_FAKE_ENG1, "mm_disp_fake_eng1", "top_disp", 21), + GATE_MM0(CLK_MM_SMI_GALS, "mm_smi_gals", "top_disp", 22), + GATE_MM0(CLK_MM_SMI_IOMMU, "mm_smi_iommu", "top_disp", 24), + GATE_MM0(CLK_MM_DISP_RDMA1, "mm_disp_rdma1", "top_disp", 25), + GATE_MM0(CLK_MM_DISP_DPI, "mm_disp_dpi", "top_disp", 26), + /* MM1 */ + GATE_MM1(CLK_MM_DSI0_DSI_CK_DOMAIN, "mm_dsi0_dsi_domain", "top_disp", 0), + GATE_MM1(CLK_MM_DISP_26M, "mm_disp_26m_ck", "top_disp", 10), +}; + +static int clk_mt8186_mm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->parent->of_node; + struct clk_hw_onecell_data *clk_data; + int r; + + clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); + if (!clk_data) + return -ENOMEM; + + r = mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data); + if (r) + goto free_mm_data; + + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); + if (r) + goto unregister_gates; + + platform_set_drvdata(pdev, clk_data); + + return r; + +unregister_gates: + mtk_clk_unregister_gates(mm_clks, ARRAY_SIZE(mm_clks), clk_data); +free_mm_data: + mtk_free_clk_data(clk_data); + return r; +} + +static int clk_mt8186_mm_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->parent->of_node; + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); + + of_clk_del_provider(node); + mtk_clk_unregister_gates(mm_clks, ARRAY_SIZE(mm_clks), clk_data); + mtk_free_clk_data(clk_data); + + return 0; +} + +static struct platform_driver clk_mt8186_mm_drv = { + .probe = clk_mt8186_mm_probe, + .remove = clk_mt8186_mm_remove, + .driver = { + .name = "clk-mt8186-mm", + }, +}; +builtin_platform_driver(clk_mt8186_mm_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-topckgen.c b/drivers/clk/mediatek/clk-mt8186-topckgen.c new file mode 100644 index 000000000000..d7f2c4663c85 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-topckgen.c @@ -0,0 +1,780 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-mtk.h" +#include "clk-mux.h" + +static DEFINE_SPINLOCK(mt8186_clk_lock); + +static const struct mtk_fixed_clk top_fixed_clks[] = { + FIXED_CLK(CLK_TOP_ULPOSC1, "ulposc1", NULL, 250000000), + FIXED_CLK(CLK_TOP_466M_FMEM, "hd_466m_fmem_ck", NULL, 466000000), + FIXED_CLK(CLK_TOP_MPLL, "mpll", NULL, 208000000), +}; + +static const struct mtk_fixed_factor top_divs[] = { + FACTOR(CLK_TOP_MAINPLL_D2, "mainpll_d2", "mainpll", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D2_D2, "mainpll_d2_d2", "mainpll_d2", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D2_D4, "mainpll_d2_d4", "mainpll_d2", 1, 4), + FACTOR(CLK_TOP_MAINPLL_D2_D16, "mainpll_d2_d16", "mainpll_d2", 1, 16), + FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3), + FACTOR(CLK_TOP_MAINPLL_D3_D2, "mainpll_d3_d2", "mainpll_d3", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D3_D4, "mainpll_d3_d4", "mainpll_d3", 1, 4), + FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5), + FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll_d5", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll_d5", 1, 4), + FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7), + FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll_d7", 1, 2), + FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll_d7", 1, 4), + FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D2_D2, "univpll_d2_d2", "univpll_d2", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D2_D4, "univpll_d2_d4", "univpll_d2", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3), + FACTOR(CLK_TOP_UNIVPLL_D3_D2, "univpll_d3_d2", "univpll_d3", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D3_D4, "univpll_d3_d4", "univpll_d3", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_D3_D8, "univpll_d3_d8", "univpll_d3", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D3_D32, "univpll_d3_d32", "univpll_d3", 1, 32), + FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5), + FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7), + FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univ2pll", 1, 13), + FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll_192m", 1, 4), + FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll_192m", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll_192m", 1, 16), + FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll_192m", 1, 32), + FACTOR(CLK_TOP_APLL1_D2, "apll1_d2", "apll1", 1, 2), + FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1, 4), + FACTOR(CLK_TOP_APLL1_D8, "apll1_d8", "apll1", 1, 8), + FACTOR(CLK_TOP_APLL2_D2, "apll2_d2", "apll2", 1, 2), + FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, 4), + FACTOR(CLK_TOP_APLL2_D8, "apll2_d8", "apll2", 1, 8), + FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2), + FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll", 1, 2), + FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll", 1, 4), + FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll", 1, 8), + FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll", 1, 16), + FACTOR(CLK_TOP_TVDPLL_D32, "tvdpll_d32", "tvdpll", 1, 32), + FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2), + FACTOR(CLK_TOP_ULPOSC1_D2, "ulposc1_d2", "ulposc1", 1, 2), + FACTOR(CLK_TOP_ULPOSC1_D4, "ulposc1_d4", "ulposc1", 1, 4), + FACTOR(CLK_TOP_ULPOSC1_D8, "ulposc1_d8", "ulposc1", 1, 8), + FACTOR(CLK_TOP_ULPOSC1_D10, "ulposc1_d10", "ulposc1", 1, 10), + FACTOR(CLK_TOP_ULPOSC1_D16, "ulposc1_d16", "ulposc1", 1, 16), + FACTOR(CLK_TOP_ULPOSC1_D32, "ulposc1_d32", "ulposc1", 1, 32), + FACTOR(CLK_TOP_ADSPPLL_D2, "adsppll_d2", "adsppll", 1, 2), + FACTOR(CLK_TOP_ADSPPLL_D4, "adsppll_d4", "adsppll", 1, 4), + FACTOR(CLK_TOP_ADSPPLL_D8, "adsppll_d8", "adsppll", 1, 8), + FACTOR(CLK_TOP_NNAPLL_D2, "nnapll_d2", "nnapll", 1, 2), + FACTOR(CLK_TOP_NNAPLL_D4, "nnapll_d4", "nnapll", 1, 4), + FACTOR(CLK_TOP_NNAPLL_D8, "nnapll_d8", "nnapll", 1, 8), + FACTOR(CLK_TOP_NNA2PLL_D2, "nna2pll_d2", "nna2pll", 1, 2), + FACTOR(CLK_TOP_NNA2PLL_D4, "nna2pll_d4", "nna2pll", 1, 4), + FACTOR(CLK_TOP_NNA2PLL_D8, "nna2pll_d8", "nna2pll", 1, 8), + FACTOR(CLK_TOP_F_BIST2FPC, "f_bist2fpc_ck", "univpll_d3_d2", 1, 1), +}; + +static const char * const axi_parents[] = { + "clk26m", + "mainpll_d7", + "mainpll_d2_d4", + "univpll_d7" +}; + +static const char * const scp_parents[] = { + "clk26m", + "mainpll_d2_d4", + "mainpll_d5", + "mainpll_d2_d2", + "mainpll_d3", + "univpll_d3" +}; + +static const char * const mfg_parents[] = { + "clk26m", + "mfgpll", + "mainpll_d3", + "mainpll_d5" +}; + +static const char * const camtg_parents[] = { + "clk26m", + "univpll_192m_d8", + "univpll_d3_d8", + "univpll_192m_d4", + "univpll_d3_d32", + "univpll_192m_d16", + "univpll_192m_d32" +}; + +static const char * const uart_parents[] = { + "clk26m", + "univpll_d3_d8" +}; + +static const char * const spi_parents[] = { + "clk26m", + "mainpll_d5_d4", + "mainpll_d3_d4", + "mainpll_d5_d2", + "mainpll_d2_d4", + "mainpll_d7", + "mainpll_d3_d2", + "mainpll_d5" +}; + +static const char * const msdc5hclk_parents[] = { + "clk26m", + "mainpll_d2_d2", + "mainpll_d7", + "mainpll_d3_d2" +}; + +static const char * const msdc50_0_parents[] = { + "clk26m", + "msdcpll", + "univpll_d3", + "msdcpll_d2", + "mainpll_d7", + "mainpll_d3_d2", + "univpll_d2_d2" +}; + +static const char * const msdc30_1_parents[] = { + "clk26m", + "msdcpll_d2", + "univpll_d3_d2", + "mainpll_d3_d2", + "mainpll_d7" +}; + +static const char * const audio_parents[] = { + "clk26m", + "mainpll_d5_d4", + "mainpll_d7_d4", + "mainpll_d2_d16" +}; + +static const char * const aud_intbus_parents[] = { + "clk26m", + "mainpll_d2_d4", + "mainpll_d7_d2" +}; + +static const char * const aud_1_parents[] = { + "clk26m", + "apll1" +}; + +static const char * const aud_2_parents[] = { + "clk26m", + "apll2" +}; + +static const char * const aud_engen1_parents[] = { + "clk26m", + "apll1_d2", + "apll1_d4", + "apll1_d8" +}; + +static const char * const aud_engen2_parents[] = { + "clk26m", + "apll2_d2", + "apll2_d4", + "apll2_d8" +}; + +static const char * const disp_pwm_parents[] = { + "clk26m", + "univpll_d5_d2", + "univpll_d3_d4", + "ulposc1_d2", + "ulposc1_d8" +}; + +static const char * const sspm_parents[] = { + "clk26m", + "mainpll_d2_d2", + "mainpll_d3_d2", + "mainpll_d5", + "mainpll_d3" +}; + +static const char * const dxcc_parents[] = { + "clk26m", + "mainpll_d2_d2", + "mainpll_d2_d4" +}; + +static const char * const usb_parents[] = { + "clk26m", + "univpll_d5_d4", + "univpll_d5_d2" +}; + +static const char * const srck_parents[] = { + "clk32k", + "clk26m", + "ulposc1_d10" +}; + +static const char * const spm_parents[] = { + "clk32k", + "ulposc1_d10", + "clk26m", + "mainpll_d7_d2" +}; + +static const char * const i2c_parents[] = { + "clk26m", + "univpll_d5_d4", + "univpll_d3_d4", + "univpll_d5_d2" +}; + +static const char * const pwm_parents[] = { + "clk26m", + "univpll_d3_d8", + "univpll_d3_d4", + "univpll_d2_d4" +}; + +static const char * const seninf_parents[] = { + "clk26m", + "univpll_d2_d4", + "univpll_d2_d2", + "univpll_d3_d2" +}; + +static const char * const aes_msdcfde_parents[] = { + "clk26m", + "univpll_d3", + "mainpll_d3", + "univpll_d2_d2", + "mainpll_d2_d2", + "mainpll_d2_d4" +}; + +static const char * const pwrap_ulposc_parents[] = { + "clk26m", + "univpll_d5_d4", + "ulposc1_d4", + "ulposc1_d8", + "ulposc1_d10", + "ulposc1_d16", + "ulposc1_d32" +}; + +static const char * const camtm_parents[] = { + "clk26m", + "univpll_d2_d4", + "univpll_d3_d2" +}; + +static const char * const venc_parents[] = { + "clk26m", + "mmpll", + "mainpll_d2_d2", + "mainpll_d2", + "univpll_d3", + "univpll_d2_d2", + "mainpll_d3", + "mmpll" +}; + +static const char * const isp_parents[] = { + "clk26m", + "mainpll_d2", + "mainpll_d2_d2", + "univpll_d3", + "mainpll_d3", + "mmpll", + "univpll_d5", + "univpll_d2_d2", + "mmpll_d2" +}; + +static const char * const dpmaif_parents[] = { + "clk26m", + "univpll_d2_d2", + "mainpll_d3", + "mainpll_d2_d2", + "univpll_d3_d2" +}; + +static const char * const vdec_parents[] = { + "clk26m", + "mainpll_d3", + "mainpll_d2_d2", + "univpll_d5", + "mainpll_d2", + "univpll_d3", + "univpll_d2_d2" +}; + +static const char * const disp_parents[] = { + "clk26m", + "univpll_d3_d2", + "mainpll_d5", + "univpll_d5", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mainpll_d2", + "mmpll" +}; + +static const char * const mdp_parents[] = { + "clk26m", + "mainpll_d5", + "univpll_d5", + "mainpll_d2_d2", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mainpll_d2", + "mmpll" +}; + +static const char * const audio_h_parents[] = { + "clk26m", + "univpll_d7", + "apll1", + "apll2" +}; + +static const char * const ufs_parents[] = { + "clk26m", + "mainpll_d7", + "univpll_d2_d4", + "mainpll_d2_d4" +}; + +static const char * const aes_fde_parents[] = { + "clk26m", + "univpll_d3", + "mainpll_d2_d2", + "univpll_d5" +}; + +static const char * const audiodsp_parents[] = { + "clk26m", + "ulposc1_d10", + "adsppll", + "adsppll_d2", + "adsppll_d4", + "adsppll_d8" +}; + +static const char * const dvfsrc_parents[] = { + "clk26m", + "ulposc1_d10", +}; + +static const char * const dsi_occ_parents[] = { + "clk26m", + "univpll_d3_d2", + "mpll", + "mainpll_d5" +}; + +static const char * const spmi_mst_parents[] = { + "clk26m", + "univpll_d5_d4", + "ulposc1_d4", + "ulposc1_d8", + "ulposc1_d10", + "ulposc1_d16", + "ulposc1_d32" +}; + +static const char * const spinor_parents[] = { + "clk26m", + "clk13m", + "mainpll_d7_d4", + "univpll_d3_d8", + "univpll_d5_d4", + "mainpll_d7_d2" +}; + +static const char * const nna_parents[] = { + "clk26m", + "univpll_d3_d8", + "mainpll_d2_d4", + "univpll_d3_d2", + "mainpll_d2_d2", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mmpll", + "mainpll_d2", + "univpll_d2", + "nnapll_d2", + "nnapll_d4", + "nnapll_d8", + "nnapll", + "nna2pll" +}; + +static const char * const nna2_parents[] = { + "clk26m", + "univpll_d3_d8", + "mainpll_d2_d4", + "univpll_d3_d2", + "mainpll_d2_d2", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mmpll", + "mainpll_d2", + "univpll_d2", + "nna2pll_d2", + "nna2pll_d4", + "nna2pll_d8", + "nnapll", + "nna2pll" +}; + +static const char * const ssusb_parents[] = { + "clk26m", + "univpll_d5_d4", + "univpll_d5_d2" +}; + +static const char * const wpe_parents[] = { + "clk26m", + "univpll_d3_d2", + "mainpll_d5", + "univpll_d5", + "univpll_d2_d2", + "mainpll_d3", + "univpll_d3", + "mainpll_d2", + "mmpll" +}; + +static const char * const dpi_parents[] = { + "clk26m", + "tvdpll", + "tvdpll_d2", + "tvdpll_d4", + "tvdpll_d8", + "tvdpll_d16", + "tvdpll_d32" +}; + +static const char * const u3_occ_250m_parents[] = { + "clk26m", + "univpll_d5" +}; + +static const char * const u3_occ_500m_parents[] = { + "clk26m", + "nna2pll_d2" +}; + +static const char * const adsp_bus_parents[] = { + "clk26m", + "ulposc1_d2", + "mainpll_d5", + "mainpll_d2_d2", + "mainpll_d3", + "mainpll_d2", + "univpll_d3" +}; + +static const char * const apll_mck_parents[] = { + "top_aud_1", + "top_aud_2" +}; + +static const struct mtk_mux top_mtk_muxes[] = { + /* + * CLK_CFG_0 + * top_axi is bus clock, should not be closed by Linux. + * top_scp is main clock in always-on co-processor. + */ + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI, "top_axi", axi_parents, + 0x0040, 0x0044, 0x0048, 0, 2, 7, 0x0004, 0, + CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SCP, "top_scp", scp_parents, + 0x0040, 0x0044, 0x0048, 8, 3, 15, 0x0004, 1, + CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG, "top_mfg", + mfg_parents, 0x0040, 0x0044, 0x0048, 16, 2, 23, 0x0004, 2), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG, "top_camtg", + camtg_parents, 0x0040, 0x0044, 0x0048, 24, 3, 31, 0x0004, 3), + /* CLK_CFG_1 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG1, "top_camtg1", + camtg_parents, 0x0050, 0x0054, 0x0058, 0, 3, 7, 0x0004, 4), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2, "top_camtg2", + camtg_parents, 0x0050, 0x0054, 0x0058, 8, 3, 15, 0x0004, 5), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3, "top_camtg3", + camtg_parents, 0x0050, 0x0054, 0x0058, 16, 3, 23, 0x0004, 6), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG4, "top_camtg4", + camtg_parents, 0x0050, 0x0054, 0x0058, 24, 3, 31, 0x0004, 7), + /* CLK_CFG_2 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG5, "top_camtg5", + camtg_parents, 0x0060, 0x0064, 0x0068, 0, 3, 7, 0x0004, 8), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG6, "top_camtg6", + camtg_parents, 0x0060, 0x0064, 0x0068, 8, 3, 15, 0x0004, 9), + MUX_GATE_CLR_SET_UPD(CLK_TOP_UART, "top_uart", + uart_parents, 0x0060, 0x0064, 0x0068, 16, 1, 23, 0x0004, 10), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI, "top_spi", + spi_parents, 0x0060, 0x0064, 0x0068, 24, 3, 31, 0x0004, 11), + /* CLK_CFG_3 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HCLK, "top_msdc5hclk", + msdc5hclk_parents, 0x0070, 0x0074, 0x0078, 0, 2, 7, 0x0004, 12), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0, "top_msdc50_0", + msdc50_0_parents, 0x0070, 0x0074, 0x0078, 8, 3, 15, 0x0004, 13), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1, "top_msdc30_1", + msdc30_1_parents, 0x0070, 0x0074, 0x0078, 16, 3, 23, 0x0004, 14), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO, "top_audio", + audio_parents, 0x0070, 0x0074, 0x0078, 24, 2, 31, 0x0004, 15), + /* CLK_CFG_4 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS, "top_aud_intbus", + aud_intbus_parents, 0x0080, 0x0084, 0x0088, 0, 2, 7, 0x0004, 16), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_1, "top_aud_1", + aud_1_parents, 0x0080, 0x0084, 0x0088, 8, 1, 15, 0x0004, 17), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_2, "top_aud_2", + aud_2_parents, 0x0080, 0x0084, 0x0088, 16, 1, 23, 0x0004, 18), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN1, "top_aud_engen1", + aud_engen1_parents, 0x0080, 0x0084, 0x0088, 24, 2, 31, 0x0004, 19), + /* + * CLK_CFG_5 + * top_sspm is main clock in always-on co-processor, should not be closed + * in Linux. + */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_ENGEN2, "top_aud_engen2", + aud_engen2_parents, 0x0090, 0x0094, 0x0098, 0, 2, 7, 0x0004, 20), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM, "top_disp_pwm", + disp_pwm_parents, 0x0090, 0x0094, 0x0098, 8, 3, 15, 0x0004, 21), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SSPM, "top_sspm", sspm_parents, + 0x0090, 0x0094, 0x0098, 16, 3, 23, 0x0004, 22, + CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC, "top_dxcc", + dxcc_parents, 0x0090, 0x0094, 0x0098, 24, 2, 31, 0x0004, 23), + /* + * CLK_CFG_6 + * top_spm and top_srck are main clocks in always-on co-processor. + */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP, "top_usb", + usb_parents, 0x00a0, 0x00a4, 0x00a8, 0, 2, 7, 0x0004, 24), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SRCK, "top_srck", srck_parents, + 0x00a0, 0x00a4, 0x00a8, 8, 2, 15, 0x0004, 25, + CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SPM, "top_spm", spm_parents, + 0x00a0, 0x00a4, 0x00a8, 16, 2, 23, 0x0004, 26, + CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C, "top_i2c", + i2c_parents, 0x00a0, 0x00a4, 0x00a8, 24, 2, 31, 0x0004, 27), + /* CLK_CFG_7 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM, "top_pwm", + pwm_parents, 0x00b0, 0x00b4, 0x00b8, 0, 2, 7, 0x0004, 28), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF, "top_seninf", + seninf_parents, 0x00b0, 0x00b4, 0x00b8, 8, 2, 15, 0x0004, 29), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF1, "top_seninf1", + seninf_parents, 0x00b0, 0x00b4, 0x00b8, 16, 2, 23, 0x0004, 30), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF2, "top_seninf2", + seninf_parents, 0x00b0, 0x00b4, 0x00b8, 24, 2, 31, 0x0008, 0), + /* CLK_CFG_8 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF3, "top_seninf3", + seninf_parents, 0x00c0, 0x00c4, 0x00c8, 0, 2, 7, 0x0008, 1), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_MSDCFDE, "top_aes_msdcfde", + aes_msdcfde_parents, 0x00c0, 0x00c4, 0x00c8, 8, 3, 15, 0x0008, 2), + MUX_GATE_CLR_SET_UPD(CLK_TOP_PWRAP_ULPOSC, "top_pwrap_ulposc", + pwrap_ulposc_parents, 0x00c0, 0x00c4, 0x00c8, 16, 3, 23, 0x0008, 3), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM, "top_camtm", + camtm_parents, 0x00c0, 0x00c4, 0x00c8, 24, 2, 31, 0x0008, 4), + /* CLK_CFG_9 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_VENC, "top_venc", + venc_parents, 0x00d0, 0x00d4, 0x00d8, 0, 3, 7, 0x0008, 5), + MUX_GATE_CLR_SET_UPD(CLK_TOP_CAM, "top_cam", + isp_parents, 0x00d0, 0x00d4, 0x00d8, 8, 4, 15, 0x0008, 6), + MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG1, "top_img1", + isp_parents, 0x00d0, 0x00d4, 0x00d8, 16, 4, 23, 0x0008, 7), + MUX_GATE_CLR_SET_UPD(CLK_TOP_IPE, "top_ipe", + isp_parents, 0x00d0, 0x00d4, 0x00d8, 24, 4, 31, 0x0008, 8), + /* CLK_CFG_10 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_DPMAIF, "top_dpmaif", + dpmaif_parents, 0x00e0, 0x00e4, 0x00e8, 0, 3, 7, 0x0008, 9), + MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC, "top_vdec", + vdec_parents, 0x00e0, 0x00e4, 0x00e8, 8, 3, 15, 0x0008, 10), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP, "top_disp", + disp_parents, 0x00e0, 0x00e4, 0x00e8, 16, 4, 23, 0x0008, 11), + MUX_GATE_CLR_SET_UPD(CLK_TOP_MDP, "top_mdp", + mdp_parents, 0x00e0, 0x00e4, 0x00e8, 24, 4, 31, 0x0008, 12), + /* CLK_CFG_11 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_H, "top_audio_h", + audio_h_parents, 0x00ec, 0x00f0, 0x00f4, 0, 2, 7, 0x0008, 13), + MUX_GATE_CLR_SET_UPD(CLK_TOP_UFS, "top_ufs", + ufs_parents, 0x00ec, 0x00f0, 0x00f4, 8, 2, 15, 0x0008, 14), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_FDE, "top_aes_fde", + aes_fde_parents, 0x00ec, 0x00f0, 0x00f4, 16, 2, 23, 0x0008, 15), + MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIODSP, "top_audiodsp", + audiodsp_parents, 0x00ec, 0x00f0, 0x00f4, 24, 3, 31, 0x0008, 16), + /* + * CLK_CFG_12 + * dvfsrc is for internal DVFS usage, should not be closed in Linux. + */ + MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DVFSRC, "top_dvfsrc", dvfsrc_parents, + 0x0100, 0x0104, 0x0108, 0, 1, 7, 0x0008, 17, + CLK_IS_CRITICAL), + MUX_GATE_CLR_SET_UPD(CLK_TOP_DSI_OCC, "top_dsi_occ", + dsi_occ_parents, 0x0100, 0x0104, 0x0108, 8, 2, 15, 0x0008, 18), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SPMI_MST, "top_spmi_mst", + spmi_mst_parents, 0x0100, 0x0104, 0x0108, 16, 3, 23, 0x0008, 19), + /* CLK_CFG_13 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINOR, "top_spinor", + spinor_parents, 0x0110, 0x0114, 0x0118, 0, 3, 6, 0x0008, 20), + MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA, "top_nna", + nna_parents, 0x0110, 0x0114, 0x0118, 7, 4, 14, 0x0008, 21), + MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA1, "top_nna1", + nna_parents, 0x0110, 0x0114, 0x0118, 15, 4, 22, 0x0008, 22), + MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA2, "top_nna2", + nna2_parents, 0x0110, 0x0114, 0x0118, 23, 4, 30, 0x0008, 23), + /* CLK_CFG_14 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI, "top_ssusb_xhci", + ssusb_parents, 0x0120, 0x0124, 0x0128, 0, 2, 5, 0x0008, 24), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_TOP_1P, "top_ssusb_1p", + ssusb_parents, 0x0120, 0x0124, 0x0128, 6, 2, 11, 0x0008, 25), + MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_1P, "top_ssusb_xhci_1p", + ssusb_parents, 0x0120, 0x0124, 0x0128, 12, 2, 17, 0x0008, 26), + MUX_GATE_CLR_SET_UPD(CLK_TOP_WPE, "top_wpe", + wpe_parents, 0x0120, 0x0124, 0x0128, 18, 4, 25, 0x0008, 27), + /* CLK_CFG_15 */ + MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI, "top_dpi", + dpi_parents, 0x0180, 0x0184, 0x0188, 0, 3, 6, 0x0008, 28), + MUX_GATE_CLR_SET_UPD(CLK_TOP_U3_OCC_250M, "top_u3_occ_250m", + u3_occ_250m_parents, 0x0180, 0x0184, 0x0188, 7, 1, 11, 0x0008, 29), + MUX_GATE_CLR_SET_UPD(CLK_TOP_U3_OCC_500M, "top_u3_occ_500m", + u3_occ_500m_parents, 0x0180, 0x0184, 0x0188, 12, 1, 16, 0x0008, 30), + MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP_BUS, "top_adsp_bus", + adsp_bus_parents, 0x0180, 0x0184, 0x0188, 17, 3, 23, 0x0008, 31), +}; + +static struct mtk_composite top_muxes[] = { + /* CLK_AUDDIV_0 */ + MUX(CLK_TOP_APLL_I2S0_MCK_SEL, "apll_i2s0_mck_sel", apll_mck_parents, 0x0320, 16, 1), + MUX(CLK_TOP_APLL_I2S1_MCK_SEL, "apll_i2s1_mck_sel", apll_mck_parents, 0x0320, 17, 1), + MUX(CLK_TOP_APLL_I2S2_MCK_SEL, "apll_i2s2_mck_sel", apll_mck_parents, 0x0320, 18, 1), + MUX(CLK_TOP_APLL_I2S4_MCK_SEL, "apll_i2s4_mck_sel", apll_mck_parents, 0x0320, 19, 1), + MUX(CLK_TOP_APLL_TDMOUT_MCK_SEL, "apll_tdmout_mck_sel", apll_mck_parents, + 0x0320, 20, 1), +}; + +static const struct mtk_composite top_adj_divs[] = { + DIV_GATE(CLK_TOP_APLL12_CK_DIV0, "apll12_div0", "apll_i2s0_mck_sel", + 0x0320, 0, 0x0328, 8, 0), + DIV_GATE(CLK_TOP_APLL12_CK_DIV1, "apll12_div1", "apll_i2s1_mck_sel", + 0x0320, 1, 0x0328, 8, 8), + DIV_GATE(CLK_TOP_APLL12_CK_DIV2, "apll12_div2", "apll_i2s2_mck_sel", + 0x0320, 2, 0x0328, 8, 16), + DIV_GATE(CLK_TOP_APLL12_CK_DIV4, "apll12_div4", "apll_i2s4_mck_sel", + 0x0320, 3, 0x0328, 8, 24), + DIV_GATE(CLK_TOP_APLL12_CK_DIV_TDMOUT_M, "apll12_div_tdmout_m", "apll_tdmout_mck_sel", + 0x0320, 4, 0x0334, 8, 0), +}; + +static const struct of_device_id of_match_clk_mt8186_topck[] = { + { .compatible = "mediatek,mt8186-topckgen", }, + {} +}; + +static int clk_mt8186_topck_probe(struct platform_device *pdev) +{ + struct clk_hw_onecell_data *clk_data; + struct device_node *node = pdev->dev.of_node; + int r; + void __iomem *base; + + clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + if (!clk_data) + return -ENOMEM; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) { + r = PTR_ERR(base); + goto free_top_data; + } + + r = mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), + clk_data); + if (r) + goto free_top_data; + + r = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + if (r) + goto unregister_fixed_clks; + + r = mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node, + &mt8186_clk_lock, clk_data); + if (r) + goto unregister_factors; + + r = mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, + &mt8186_clk_lock, clk_data); + if (r) + goto unregister_muxes; + + r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base, + &mt8186_clk_lock, clk_data); + if (r) + goto unregister_composite_muxes; + + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); + if (r) + goto unregister_composite_divs; + + platform_set_drvdata(pdev, clk_data); + + return r; + +unregister_composite_divs: + mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), clk_data); +unregister_composite_muxes: + mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), clk_data); +unregister_muxes: + mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), clk_data); +unregister_factors: + mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); +unregister_fixed_clks: + mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data); +free_top_data: + mtk_free_clk_data(clk_data); + return r; +} + +static int clk_mt8186_topck_remove(struct platform_device *pdev) +{ + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); + struct device_node *node = pdev->dev.of_node; + + of_clk_del_provider(node); + mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), clk_data); + mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), clk_data); + mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), clk_data); + mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), clk_data); + mtk_free_clk_data(clk_data); + + return 0; +} + +static struct platform_driver clk_mt8186_topck_drv = { + .probe = clk_mt8186_topck_probe, + .remove = clk_mt8186_topck_remove, + .driver = { + .name = "clk-mt8186-topck", + .of_match_table = of_match_clk_mt8186_topck, + }, +}; +builtin_platform_driver(clk_mt8186_topck_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-vdec.c b/drivers/clk/mediatek/clk-mt8186-vdec.c new file mode 100644 index 000000000000..5ad7e1ae0bac --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-vdec.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8186-clk.h> + +static const struct mtk_gate_regs vdec0_cg_regs = { + .set_ofs = 0x0, + .clr_ofs = 0x4, + .sta_ofs = 0x0, +}; + +static const struct mtk_gate_regs vdec1_cg_regs = { + .set_ofs = 0x190, + .clr_ofs = 0x190, + .sta_ofs = 0x190, +}; + +static const struct mtk_gate_regs vdec2_cg_regs = { + .set_ofs = 0x200, + .clr_ofs = 0x204, + .sta_ofs = 0x200, +}; + +static const struct mtk_gate_regs vdec3_cg_regs = { + .set_ofs = 0x8, + .clr_ofs = 0xc, + .sta_ofs = 0x8, +}; + +#define GATE_VDEC0(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv) + +#define GATE_VDEC1(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv) + +#define GATE_VDEC2(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &vdec2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv) + +#define GATE_VDEC3(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &vdec3_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv) + +static const struct mtk_gate vdec_clks[] = { + /* VDEC0 */ + GATE_VDEC0(CLK_VDEC_CKEN, "vdec_cken", "top_vdec", 0), + GATE_VDEC0(CLK_VDEC_ACTIVE, "vdec_active", "top_vdec", 4), + GATE_VDEC0(CLK_VDEC_CKEN_ENG, "vdec_cken_eng", "top_vdec", 8), + /* VDEC1 */ + GATE_VDEC1(CLK_VDEC_MINI_MDP_CKEN_CFG_RG, "vdec_mini_mdp_cken_cfg_rg", "top_vdec", 0), + /* VDEC2 */ + GATE_VDEC2(CLK_VDEC_LAT_CKEN, "vdec_lat_cken", "top_vdec", 0), + GATE_VDEC2(CLK_VDEC_LAT_ACTIVE, "vdec_lat_active", "top_vdec", 4), + GATE_VDEC2(CLK_VDEC_LAT_CKEN_ENG, "vdec_lat_cken_eng", "top_vdec", 8), + /* VDEC3 */ + GATE_VDEC3(CLK_VDEC_LARB1_CKEN, "vdec_larb1_cken", "top_vdec", 0), +}; + +static const struct mtk_clk_desc vdec_desc = { + .clks = vdec_clks, + .num_clks = ARRAY_SIZE(vdec_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_vdec[] = { + { + .compatible = "mediatek,mt8186-vdecsys", + .data = &vdec_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_vdec_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-vdec", + .of_match_table = of_match_clk_mt8186_vdec, + }, +}; +builtin_platform_driver(clk_mt8186_vdec_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-venc.c b/drivers/clk/mediatek/clk-mt8186-venc.c new file mode 100644 index 000000000000..f5519f794c45 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-venc.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs venc_cg_regs = { + .set_ofs = 0x4, + .clr_ofs = 0x8, + .sta_ofs = 0x0, +}; + +#define GATE_VENC(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv) + +static const struct mtk_gate venc_clks[] = { + GATE_VENC(CLK_VENC_CKE0_LARB, "venc_cke0_larb", "top_venc", 0), + GATE_VENC(CLK_VENC_CKE1_VENC, "venc_cke1_venc", "top_venc", 4), + GATE_VENC(CLK_VENC_CKE2_JPGENC, "venc_cke2_jpgenc", "top_venc", 8), + GATE_VENC(CLK_VENC_CKE5_GALS, "venc_cke5_gals", "top_venc", 28), +}; + +static const struct mtk_clk_desc venc_desc = { + .clks = venc_clks, + .num_clks = ARRAY_SIZE(venc_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_venc[] = { + { + .compatible = "mediatek,mt8186-vencsys", + .data = &venc_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_venc_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-venc", + .of_match_table = of_match_clk_mt8186_venc, + }, +}; +builtin_platform_driver(clk_mt8186_venc_drv); diff --git a/drivers/clk/mediatek/clk-mt8186-wpe.c b/drivers/clk/mediatek/clk-mt8186-wpe.c new file mode 100644 index 000000000000..8db3e9178a1e --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8186-wpe.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// Copyright (c) 2022 MediaTek Inc. +// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/mt8186-clk.h> + +#include "clk-gate.h" +#include "clk-mtk.h" + +static const struct mtk_gate_regs wpe_cg_regs = { + .set_ofs = 0x0, + .clr_ofs = 0x0, + .sta_ofs = 0x0, +}; + +#define GATE_WPE(_id, _name, _parent, _shift) \ + GATE_MTK(_id, _name, _parent, &wpe_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv) + +static const struct mtk_gate wpe_clks[] = { + GATE_WPE(CLK_WPE_CK_EN, "wpe", "top_wpe", 17), + GATE_WPE(CLK_WPE_SMI_LARB8_CK_EN, "wpe_smi_larb8", "top_wpe", 19), + GATE_WPE(CLK_WPE_SYS_EVENT_TX_CK_EN, "wpe_sys_event_tx", "top_wpe", 20), + GATE_WPE(CLK_WPE_SMI_LARB8_PCLK_EN, "wpe_smi_larb8_p_en", "top_wpe", 25), +}; + +static const struct mtk_clk_desc wpe_desc = { + .clks = wpe_clks, + .num_clks = ARRAY_SIZE(wpe_clks), +}; + +static const struct of_device_id of_match_clk_mt8186_wpe[] = { + { + .compatible = "mediatek,mt8186-wpesys", + .data = &wpe_desc, + }, { + /* sentinel */ + } +}; + +static struct platform_driver clk_mt8186_wpe_drv = { + .probe = mtk_clk_simple_probe, + .remove = mtk_clk_simple_remove, + .driver = { + .name = "clk-mt8186-wpe", + .of_match_table = of_match_clk_mt8186_wpe, + }, +}; +builtin_platform_driver(clk_mt8186_wpe_drv); diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c index f28d56628045..8c989bffd8c7 100644 --- a/drivers/clk/mediatek/clk-mt8192-aud.c +++ b/drivers/clk/mediatek/clk-mt8192-aud.c @@ -79,7 +79,7 @@ static const struct mtk_gate aud_clks[] = { static int clk_mt8192_aud_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -91,7 +91,7 @@ static int clk_mt8192_aud_probe(struct platform_device *pdev) if (r) return r; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) return r; diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c index 4a0b4c4bc06a..1be3ff4d407d 100644 --- a/drivers/clk/mediatek/clk-mt8192-mm.c +++ b/drivers/clk/mediatek/clk-mt8192-mm.c @@ -84,7 +84,7 @@ static int clk_mt8192_mm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK); @@ -95,7 +95,7 @@ static int clk_mt8192_mm_probe(struct platform_device *pdev) if (r) return r; - return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); } static struct platform_driver clk_mt8192_mm_drv = { diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c index ab27cd66b866..dda211b7a745 100644 --- a/drivers/clk/mediatek/clk-mt8192.c +++ b/drivers/clk/mediatek/clk-mt8192.c @@ -1178,7 +1178,7 @@ static const struct mtk_pll_data plls[] = { 0, 0, 32, 0x0330, 24, 0, 0, 0, 0x0334, 0), }; -static struct clk_onecell_data *top_clk_data; +static struct clk_hw_onecell_data *top_clk_data; static void clk_mt8192_top_init_early(struct device_node *node) { @@ -1189,11 +1189,11 @@ static void clk_mt8192_top_init_early(struct device_node *node) return; for (i = 0; i < CLK_TOP_NR_CLK; i++) - top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER); + top_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER); mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data); - of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); + of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data); } CLK_OF_DECLARE_DRIVER(mt8192_topckgen, "mediatek,mt8192-topckgen", @@ -1222,12 +1222,13 @@ static int clk_mt8192_top_probe(struct platform_device *pdev) if (r) return r; - return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); + return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, + top_clk_data); } static int clk_mt8192_infra_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -1239,7 +1240,7 @@ static int clk_mt8192_infra_probe(struct platform_device *pdev) if (r) goto free_clk_data; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) goto free_clk_data; @@ -1252,7 +1253,7 @@ free_clk_data: static int clk_mt8192_peri_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -1264,7 +1265,7 @@ static int clk_mt8192_peri_probe(struct platform_device *pdev) if (r) goto free_clk_data; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) goto free_clk_data; @@ -1277,7 +1278,7 @@ free_clk_data: static int clk_mt8192_apmixed_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -1290,7 +1291,7 @@ static int clk_mt8192_apmixed_probe(struct platform_device *pdev) if (r) goto free_clk_data; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) goto free_clk_data; diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c index eecc7035a56a..0dfed6ec4d15 100644 --- a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c +++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c @@ -112,7 +112,7 @@ static const struct of_device_id of_match_clk_mt8195_apmixed[] = { static int clk_mt8195_apmixed_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -128,7 +128,7 @@ static int clk_mt8195_apmixed_probe(struct platform_device *pdev) if (r) goto unregister_plls; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) goto unregister_gates; @@ -148,7 +148,7 @@ free_apmixed_data: static int clk_mt8195_apmixed_remove(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; - struct clk_onecell_data *clk_data = platform_get_drvdata(pdev); + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); of_clk_del_provider(node); mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data); diff --git a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c index 8cd88dfc3283..0b52f6a009c4 100644 --- a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c +++ b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c @@ -58,7 +58,7 @@ static const struct mtk_pll_data apusys_plls[] = { static int clk_mt8195_apusys_pll_probe(struct platform_device *pdev) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -70,7 +70,7 @@ static int clk_mt8195_apusys_pll_probe(struct platform_device *pdev) if (r) goto free_apusys_pll_data; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) goto unregister_plls; @@ -87,7 +87,7 @@ free_apusys_pll_data: static int clk_mt8195_apusys_pll_remove(struct platform_device *pdev) { - struct clk_onecell_data *clk_data = platform_get_drvdata(pdev); + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); struct device_node *node = pdev->dev.of_node; of_clk_del_provider(node); diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c index b602fcd7f1d1..ec70e1f65eaf 100644 --- a/drivers/clk/mediatek/clk-mt8195-topckgen.c +++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c @@ -1224,7 +1224,7 @@ static const struct of_device_id of_match_clk_mt8195_topck[] = { static int clk_mt8195_topck_probe(struct platform_device *pdev) { - struct clk_onecell_data *top_clk_data; + struct clk_hw_onecell_data *top_clk_data; struct device_node *node = pdev->dev.of_node; int r; void __iomem *base; @@ -1267,7 +1267,7 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev) if (r) goto unregister_composite_divs; - r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data); if (r) goto unregister_gates; @@ -1294,7 +1294,7 @@ free_top_data: static int clk_mt8195_topck_remove(struct platform_device *pdev) { - struct clk_onecell_data *top_clk_data = platform_get_drvdata(pdev); + struct clk_hw_onecell_data *top_clk_data = platform_get_drvdata(pdev); struct device_node *node = pdev->dev.of_node; of_clk_del_provider(node); diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c index 3bc7ed19d550..261a7f76dd3c 100644 --- a/drivers/clk/mediatek/clk-mt8195-vdo0.c +++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c @@ -92,7 +92,7 @@ static int clk_mt8195_vdo0_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_VDO0_NR_CLK); @@ -103,7 +103,7 @@ static int clk_mt8195_vdo0_probe(struct platform_device *pdev) if (r) goto free_vdo0_data; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) goto unregister_gates; @@ -122,7 +122,7 @@ static int clk_mt8195_vdo0_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data = platform_get_drvdata(pdev); + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); of_clk_del_provider(node); mtk_clk_unregister_gates(vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data); diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c index 90c738a85ff1..3378487d2c90 100644 --- a/drivers/clk/mediatek/clk-mt8195-vdo1.c +++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c @@ -109,7 +109,7 @@ static int clk_mt8195_vdo1_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_VDO1_NR_CLK); @@ -120,7 +120,7 @@ static int clk_mt8195_vdo1_probe(struct platform_device *pdev) if (r) goto free_vdo1_data; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) goto unregister_gates; @@ -139,7 +139,7 @@ static int clk_mt8195_vdo1_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = dev->parent->of_node; - struct clk_onecell_data *clk_data = platform_get_drvdata(pdev); + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); of_clk_del_provider(node); mtk_clk_unregister_gates(vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data); diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c index 6ab3a06dc9d5..90f48068a8de 100644 --- a/drivers/clk/mediatek/clk-mt8516-aud.c +++ b/drivers/clk/mediatek/clk-mt8516-aud.c @@ -49,14 +49,14 @@ static const struct mtk_gate aud_clks[] __initconst = { static void __init mtk_audsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK); mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c index a37143f920ce..b96db88893e2 100644 --- a/drivers/clk/mediatek/clk-mt8516.c +++ b/drivers/clk/mediatek/clk-mt8516.c @@ -677,7 +677,7 @@ static const struct mtk_gate top_clks[] __initconst = { static void __init mtk_topckgen_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; void __iomem *base; @@ -699,7 +699,7 @@ static void __init mtk_topckgen_init(struct device_node *node) mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs), base, &mt8516_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -708,7 +708,7 @@ CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8516-topckgen", mtk_topckgen_init); static void __init mtk_infracfg_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; int r; void __iomem *base; @@ -723,7 +723,7 @@ static void __init mtk_infracfg_init(struct device_node *node) mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base, &mt8516_clk_lock, clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); @@ -771,23 +771,23 @@ static const struct mtk_pll_div_table mmpll_div_table[] = { }; static const struct mtk_pll_data plls[] = { - PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0x00000001, 0, + PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0100, 0x0110, 0, 0, 21, 0x0104, 24, 0, 0x0104, 0), - PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0x00000001, + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0120, 0x0130, 0, HAVE_RST_BAR, 21, 0x0124, 24, 0, 0x0124, 0), - PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000001, + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x0140, 0x0150, 0x30000000, HAVE_RST_BAR, 7, 0x0144, 24, 0, 0x0144, 0), - PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0x00000001, 0, + PLL_B(CLK_APMIXED_MMPLL, "mmpll", 0x0160, 0x0170, 0, 0, 21, 0x0164, 24, 0, 0x0164, 0, mmpll_div_table), - PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0x00000001, 0, + PLL(CLK_APMIXED_APLL1, "apll1", 0x0180, 0x0190, 0, 0, 31, 0x0180, 1, 0x0194, 0x0184, 0), - PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0x00000001, 0, + PLL(CLK_APMIXED_APLL2, "apll2", 0x01A0, 0x01B0, 0, 0, 31, 0x01A0, 1, 0x01B4, 0x01A4, 0), }; static void __init mtk_apmixedsys_init(struct device_node *node) { - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; void __iomem *base; int r; @@ -801,7 +801,7 @@ static void __init mtk_apmixedsys_init(struct device_node *node) mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) pr_err("%s(): could not register clock provider: %d\n", __func__, r); diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c index b4063261cf56..b9188000ab3c 100644 --- a/drivers/clk/mediatek/clk-mtk.c +++ b/drivers/clk/mediatek/clk-mtk.c @@ -18,46 +18,35 @@ #include "clk-mtk.h" #include "clk-gate.h" -struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num) +struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num) { int i; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; - clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + clk_data = kzalloc(struct_size(clk_data, hws, clk_num), GFP_KERNEL); if (!clk_data) return NULL; - clk_data->clks = kcalloc(clk_num, sizeof(*clk_data->clks), GFP_KERNEL); - if (!clk_data->clks) - goto err_out; - - clk_data->clk_num = clk_num; + clk_data->num = clk_num; for (i = 0; i < clk_num; i++) - clk_data->clks[i] = ERR_PTR(-ENOENT); + clk_data->hws[i] = ERR_PTR(-ENOENT); return clk_data; -err_out: - kfree(clk_data); - - return NULL; } EXPORT_SYMBOL_GPL(mtk_alloc_clk_data); -void mtk_free_clk_data(struct clk_onecell_data *clk_data) +void mtk_free_clk_data(struct clk_hw_onecell_data *clk_data) { - if (!clk_data) - return; - - kfree(clk_data->clks); kfree(clk_data); } +EXPORT_SYMBOL_GPL(mtk_free_clk_data); int mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; - struct clk *clk; + struct clk_hw *hw; if (!clk_data) return -ENOMEM; @@ -65,20 +54,21 @@ int mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks, int num, for (i = 0; i < num; i++) { const struct mtk_fixed_clk *rc = &clks[i]; - if (!IS_ERR_OR_NULL(clk_data->clks[rc->id])) { + if (!IS_ERR_OR_NULL(clk_data->hws[rc->id])) { pr_warn("Trying to register duplicate clock ID: %d\n", rc->id); continue; } - clk = clk_register_fixed_rate(NULL, rc->name, rc->parent, 0, + hw = clk_hw_register_fixed_rate(NULL, rc->name, rc->parent, 0, rc->rate); - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %pe\n", rc->name, clk); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %pe\n", rc->name, + hw); goto err; } - clk_data->clks[rc->id] = clk; + clk_data->hws[rc->id] = hw; } return 0; @@ -87,19 +77,19 @@ err: while (--i >= 0) { const struct mtk_fixed_clk *rc = &clks[i]; - if (IS_ERR_OR_NULL(clk_data->clks[rc->id])) + if (IS_ERR_OR_NULL(clk_data->hws[rc->id])) continue; - clk_unregister_fixed_rate(clk_data->clks[rc->id]); - clk_data->clks[rc->id] = ERR_PTR(-ENOENT); + clk_unregister_fixed_rate(clk_data->hws[rc->id]->clk); + clk_data->hws[rc->id] = ERR_PTR(-ENOENT); } - return PTR_ERR(clk); + return PTR_ERR(hw); } EXPORT_SYMBOL_GPL(mtk_clk_register_fixed_clks); void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; @@ -109,20 +99,20 @@ void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num, for (i = num; i > 0; i--) { const struct mtk_fixed_clk *rc = &clks[i - 1]; - if (IS_ERR_OR_NULL(clk_data->clks[rc->id])) + if (IS_ERR_OR_NULL(clk_data->hws[rc->id])) continue; - clk_unregister_fixed_rate(clk_data->clks[rc->id]); - clk_data->clks[rc->id] = ERR_PTR(-ENOENT); + clk_unregister_fixed_rate(clk_data->hws[rc->id]->clk); + clk_data->hws[rc->id] = ERR_PTR(-ENOENT); } } EXPORT_SYMBOL_GPL(mtk_clk_unregister_fixed_clks); int mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; - struct clk *clk; + struct clk_hw *hw; if (!clk_data) return -ENOMEM; @@ -130,20 +120,21 @@ int mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num, for (i = 0; i < num; i++) { const struct mtk_fixed_factor *ff = &clks[i]; - if (!IS_ERR_OR_NULL(clk_data->clks[ff->id])) { + if (!IS_ERR_OR_NULL(clk_data->hws[ff->id])) { pr_warn("Trying to register duplicate clock ID: %d\n", ff->id); continue; } - clk = clk_register_fixed_factor(NULL, ff->name, ff->parent_name, + hw = clk_hw_register_fixed_factor(NULL, ff->name, ff->parent_name, CLK_SET_RATE_PARENT, ff->mult, ff->div); - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %pe\n", ff->name, clk); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %pe\n", ff->name, + hw); goto err; } - clk_data->clks[ff->id] = clk; + clk_data->hws[ff->id] = hw; } return 0; @@ -152,19 +143,19 @@ err: while (--i >= 0) { const struct mtk_fixed_factor *ff = &clks[i]; - if (IS_ERR_OR_NULL(clk_data->clks[ff->id])) + if (IS_ERR_OR_NULL(clk_data->hws[ff->id])) continue; - clk_unregister_fixed_factor(clk_data->clks[ff->id]); - clk_data->clks[ff->id] = ERR_PTR(-ENOENT); + clk_unregister_fixed_factor(clk_data->hws[ff->id]->clk); + clk_data->hws[ff->id] = ERR_PTR(-ENOENT); } - return PTR_ERR(clk); + return PTR_ERR(hw); } EXPORT_SYMBOL_GPL(mtk_clk_register_factors); void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; @@ -174,19 +165,19 @@ void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num, for (i = num; i > 0; i--) { const struct mtk_fixed_factor *ff = &clks[i - 1]; - if (IS_ERR_OR_NULL(clk_data->clks[ff->id])) + if (IS_ERR_OR_NULL(clk_data->hws[ff->id])) continue; - clk_unregister_fixed_factor(clk_data->clks[ff->id]); - clk_data->clks[ff->id] = ERR_PTR(-ENOENT); + clk_unregister_fixed_factor(clk_data->hws[ff->id]->clk); + clk_data->hws[ff->id] = ERR_PTR(-ENOENT); } } EXPORT_SYMBOL_GPL(mtk_clk_unregister_factors); -struct clk *mtk_clk_register_composite(const struct mtk_composite *mc, +static struct clk_hw *mtk_clk_register_composite(const struct mtk_composite *mc, void __iomem *base, spinlock_t *lock) { - struct clk *clk; + struct clk_hw *hw; struct clk_mux *mux = NULL; struct clk_gate *gate = NULL; struct clk_divider *div = NULL; @@ -250,18 +241,18 @@ struct clk *mtk_clk_register_composite(const struct mtk_composite *mc, div_ops = &clk_divider_ops; } - clk = clk_register_composite(NULL, mc->name, parent_names, num_parents, + hw = clk_hw_register_composite(NULL, mc->name, parent_names, num_parents, mux_hw, mux_ops, div_hw, div_ops, gate_hw, gate_ops, mc->flags); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); + if (IS_ERR(hw)) { + ret = PTR_ERR(hw); goto err_out; } - return clk; + return hw; err_out: kfree(div); kfree(gate); @@ -270,15 +261,13 @@ err_out: return ERR_PTR(ret); } -static void mtk_clk_unregister_composite(struct clk *clk) +static void mtk_clk_unregister_composite(struct clk_hw *hw) { - struct clk_hw *hw; struct clk_composite *composite; struct clk_mux *mux = NULL; struct clk_gate *gate = NULL; struct clk_divider *div = NULL; - hw = __clk_get_hw(clk); if (!hw) return; @@ -290,7 +279,7 @@ static void mtk_clk_unregister_composite(struct clk *clk) if (composite->rate_hw) div = to_clk_divider(composite->rate_hw); - clk_unregister_composite(clk); + clk_hw_unregister_composite(hw); kfree(div); kfree(gate); kfree(mux); @@ -298,9 +287,9 @@ static void mtk_clk_unregister_composite(struct clk *clk) int mtk_clk_register_composites(const struct mtk_composite *mcs, int num, void __iomem *base, spinlock_t *lock, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { - struct clk *clk; + struct clk_hw *hw; int i; if (!clk_data) @@ -309,20 +298,21 @@ int mtk_clk_register_composites(const struct mtk_composite *mcs, int num, for (i = 0; i < num; i++) { const struct mtk_composite *mc = &mcs[i]; - if (!IS_ERR_OR_NULL(clk_data->clks[mc->id])) { + if (!IS_ERR_OR_NULL(clk_data->hws[mc->id])) { pr_warn("Trying to register duplicate clock ID: %d\n", mc->id); continue; } - clk = mtk_clk_register_composite(mc, base, lock); + hw = mtk_clk_register_composite(mc, base, lock); - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %pe\n", mc->name, clk); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %pe\n", mc->name, + hw); goto err; } - clk_data->clks[mc->id] = clk; + clk_data->hws[mc->id] = hw; } return 0; @@ -331,19 +321,19 @@ err: while (--i >= 0) { const struct mtk_composite *mc = &mcs[i]; - if (IS_ERR_OR_NULL(clk_data->clks[mcs->id])) + if (IS_ERR_OR_NULL(clk_data->hws[mcs->id])) continue; - mtk_clk_unregister_composite(clk_data->clks[mc->id]); - clk_data->clks[mc->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_composite(clk_data->hws[mc->id]); + clk_data->hws[mc->id] = ERR_PTR(-ENOENT); } - return PTR_ERR(clk); + return PTR_ERR(hw); } EXPORT_SYMBOL_GPL(mtk_clk_register_composites); void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; @@ -353,20 +343,20 @@ void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num, for (i = num; i > 0; i--) { const struct mtk_composite *mc = &mcs[i - 1]; - if (IS_ERR_OR_NULL(clk_data->clks[mc->id])) + if (IS_ERR_OR_NULL(clk_data->hws[mc->id])) continue; - mtk_clk_unregister_composite(clk_data->clks[mc->id]); - clk_data->clks[mc->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_composite(clk_data->hws[mc->id]); + clk_data->hws[mc->id] = ERR_PTR(-ENOENT); } } EXPORT_SYMBOL_GPL(mtk_clk_unregister_composites); int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num, void __iomem *base, spinlock_t *lock, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { - struct clk *clk; + struct clk_hw *hw; int i; if (!clk_data) @@ -375,22 +365,23 @@ int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num, for (i = 0; i < num; i++) { const struct mtk_clk_divider *mcd = &mcds[i]; - if (!IS_ERR_OR_NULL(clk_data->clks[mcd->id])) { + if (!IS_ERR_OR_NULL(clk_data->hws[mcd->id])) { pr_warn("Trying to register duplicate clock ID: %d\n", mcd->id); continue; } - clk = clk_register_divider(NULL, mcd->name, mcd->parent_name, + hw = clk_hw_register_divider(NULL, mcd->name, mcd->parent_name, mcd->flags, base + mcd->div_reg, mcd->div_shift, mcd->div_width, mcd->clk_divider_flags, lock); - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %pe\n", mcd->name, clk); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %pe\n", mcd->name, + hw); goto err; } - clk_data->clks[mcd->id] = clk; + clk_data->hws[mcd->id] = hw; } return 0; @@ -399,18 +390,18 @@ err: while (--i >= 0) { const struct mtk_clk_divider *mcd = &mcds[i]; - if (IS_ERR_OR_NULL(clk_data->clks[mcd->id])) + if (IS_ERR_OR_NULL(clk_data->hws[mcd->id])) continue; - mtk_clk_unregister_composite(clk_data->clks[mcd->id]); - clk_data->clks[mcd->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_composite(clk_data->hws[mcd->id]); + clk_data->hws[mcd->id] = ERR_PTR(-ENOENT); } - return PTR_ERR(clk); + return PTR_ERR(hw); } void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; @@ -420,18 +411,18 @@ void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num, for (i = num; i > 0; i--) { const struct mtk_clk_divider *mcd = &mcds[i - 1]; - if (IS_ERR_OR_NULL(clk_data->clks[mcd->id])) + if (IS_ERR_OR_NULL(clk_data->hws[mcd->id])) continue; - clk_unregister_divider(clk_data->clks[mcd->id]); - clk_data->clks[mcd->id] = ERR_PTR(-ENOENT); + clk_unregister_divider(clk_data->hws[mcd->id]->clk); + clk_data->hws[mcd->id] = ERR_PTR(-ENOENT); } } int mtk_clk_simple_probe(struct platform_device *pdev) { const struct mtk_clk_desc *mcd; - struct clk_onecell_data *clk_data; + struct clk_hw_onecell_data *clk_data; struct device_node *node = pdev->dev.of_node; int r; @@ -447,7 +438,7 @@ int mtk_clk_simple_probe(struct platform_device *pdev) if (r) goto free_data; - r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); if (r) goto unregister_clks; @@ -465,7 +456,7 @@ free_data: int mtk_clk_simple_remove(struct platform_device *pdev) { const struct mtk_clk_desc *mcd = of_device_get_match_data(&pdev->dev); - struct clk_onecell_data *clk_data = platform_get_drvdata(pdev); + struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev); struct device_node *node = pdev->dev.of_node; of_clk_del_provider(node); diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index bf6565aa7319..adb1304d35d4 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -35,9 +35,9 @@ struct mtk_fixed_clk { } int mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); void mtk_clk_unregister_fixed_clks(const struct mtk_fixed_clk *clks, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); struct mtk_fixed_factor { int id; @@ -56,9 +56,9 @@ struct mtk_fixed_factor { } int mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); struct mtk_composite { int id; @@ -147,14 +147,11 @@ struct mtk_composite { .flags = 0, \ } -struct clk *mtk_clk_register_composite(const struct mtk_composite *mc, - void __iomem *base, spinlock_t *lock); - int mtk_clk_register_composites(const struct mtk_composite *mcs, int num, void __iomem *base, spinlock_t *lock, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); struct mtk_clk_divider { int id; @@ -180,14 +177,14 @@ struct mtk_clk_divider { int mtk_clk_register_dividers(const struct mtk_clk_divider *mcds, int num, void __iomem *base, spinlock_t *lock, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); void mtk_clk_unregister_dividers(const struct mtk_clk_divider *mcds, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); -struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num); -void mtk_free_clk_data(struct clk_onecell_data *clk_data); +struct clk_hw_onecell_data *mtk_alloc_clk_data(unsigned int clk_num); +void mtk_free_clk_data(struct clk_hw_onecell_data *clk_data); -struct clk *mtk_clk_register_ref2usb_tx(const char *name, +struct clk_hw *mtk_clk_register_ref2usb_tx(const char *name, const char *parent_name, void __iomem *reg); void mtk_register_reset_controller(struct device_node *np, diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c index 21ad5a4afd65..cd5f9fd8cb98 100644 --- a/drivers/clk/mediatek/clk-mux.c +++ b/drivers/clk/mediatek/clk-mux.c @@ -143,13 +143,13 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = { }; EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops); -static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux, +static struct clk_hw *mtk_clk_register_mux(const struct mtk_mux *mux, struct regmap *regmap, spinlock_t *lock) { struct mtk_clk_mux *clk_mux; struct clk_init_data init = {}; - struct clk *clk; + int ret; clk_mux = kzalloc(sizeof(*clk_mux), GFP_KERNEL); if (!clk_mux) @@ -166,37 +166,34 @@ static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux, clk_mux->lock = lock; clk_mux->hw.init = &init; - clk = clk_register(NULL, &clk_mux->hw); - if (IS_ERR(clk)) { + ret = clk_hw_register(NULL, &clk_mux->hw); + if (ret) { kfree(clk_mux); - return clk; + return ERR_PTR(ret); } - return clk; + return &clk_mux->hw; } -static void mtk_clk_unregister_mux(struct clk *clk) +static void mtk_clk_unregister_mux(struct clk_hw *hw) { struct mtk_clk_mux *mux; - struct clk_hw *hw; - - hw = __clk_get_hw(clk); if (!hw) return; mux = to_mtk_clk_mux(hw); - clk_unregister(clk); + clk_hw_unregister(hw); kfree(mux); } int mtk_clk_register_muxes(const struct mtk_mux *muxes, int num, struct device_node *node, spinlock_t *lock, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { struct regmap *regmap; - struct clk *clk; + struct clk_hw *hw; int i; regmap = device_node_to_regmap(node); @@ -208,20 +205,21 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes, for (i = 0; i < num; i++) { const struct mtk_mux *mux = &muxes[i]; - if (!IS_ERR_OR_NULL(clk_data->clks[mux->id])) { + if (!IS_ERR_OR_NULL(clk_data->hws[mux->id])) { pr_warn("%pOF: Trying to register duplicate clock ID: %d\n", node, mux->id); continue; } - clk = mtk_clk_register_mux(mux, regmap, lock); + hw = mtk_clk_register_mux(mux, regmap, lock); - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %pe\n", mux->name, clk); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %pe\n", mux->name, + hw); goto err; } - clk_data->clks[mux->id] = clk; + clk_data->hws[mux->id] = hw; } return 0; @@ -230,19 +228,19 @@ err: while (--i >= 0) { const struct mtk_mux *mux = &muxes[i]; - if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) + if (IS_ERR_OR_NULL(clk_data->hws[mux->id])) continue; - mtk_clk_unregister_mux(clk_data->clks[mux->id]); - clk_data->clks[mux->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_mux(clk_data->hws[mux->id]); + clk_data->hws[mux->id] = ERR_PTR(-ENOENT); } - return PTR_ERR(clk); + return PTR_ERR(hw); } EXPORT_SYMBOL_GPL(mtk_clk_register_muxes); void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { int i; @@ -252,11 +250,11 @@ void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num, for (i = num; i > 0; i--) { const struct mtk_mux *mux = &muxes[i - 1]; - if (IS_ERR_OR_NULL(clk_data->clks[mux->id])) + if (IS_ERR_OR_NULL(clk_data->hws[mux->id])) continue; - mtk_clk_unregister_mux(clk_data->clks[mux->id]); - clk_data->clks[mux->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_mux(clk_data->hws[mux->id]); + clk_data->hws[mux->id] = ERR_PTR(-ENOENT); } } EXPORT_SYMBOL_GPL(mtk_clk_unregister_muxes); diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h index 903a3c937959..6539c58f5d7d 100644 --- a/drivers/clk/mediatek/clk-mux.h +++ b/drivers/clk/mediatek/clk-mux.h @@ -11,7 +11,7 @@ #include <linux/types.h> struct clk; -struct clk_onecell_data; +struct clk_hw_onecell_data; struct clk_ops; struct device_node; @@ -84,9 +84,9 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops; int mtk_clk_register_muxes(const struct mtk_mux *muxes, int num, struct device_node *node, spinlock_t *lock, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); void mtk_clk_unregister_muxes(const struct mtk_mux *muxes, int num, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); #endif /* __DRV_CLK_MTK_MUX_H */ diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c index ccaa2085ab4d..54e6cfd29dfc 100644 --- a/drivers/clk/mediatek/clk-pll.c +++ b/drivers/clk/mediatek/clk-pll.c @@ -243,7 +243,6 @@ static int mtk_pll_prepare(struct clk_hw *hw) { struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); u32 r; - u32 div_en_mask; r = readl(pll->pwr_addr) | CON0_PWR_ON; writel(r, pll->pwr_addr); @@ -256,9 +255,8 @@ static int mtk_pll_prepare(struct clk_hw *hw) r = readl(pll->en_addr) | BIT(pll->data->pll_en_bit); writel(r, pll->en_addr); - div_en_mask = pll->data->en_mask & ~CON0_BASE_EN; - if (div_en_mask) { - r = readl(pll->base_addr + REG_CON0) | div_en_mask; + if (pll->data->en_mask) { + r = readl(pll->base_addr + REG_CON0) | pll->data->en_mask; writel(r, pll->base_addr + REG_CON0); } @@ -279,7 +277,6 @@ static void mtk_pll_unprepare(struct clk_hw *hw) { struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); u32 r; - u32 div_en_mask; if (pll->data->flags & HAVE_RST_BAR) { r = readl(pll->base_addr + REG_CON0); @@ -289,9 +286,8 @@ static void mtk_pll_unprepare(struct clk_hw *hw) __mtk_pll_tuner_disable(pll); - div_en_mask = pll->data->en_mask & ~CON0_BASE_EN; - if (div_en_mask) { - r = readl(pll->base_addr + REG_CON0) & ~div_en_mask; + if (pll->data->en_mask) { + r = readl(pll->base_addr + REG_CON0) & ~pll->data->en_mask; writel(r, pll->base_addr + REG_CON0); } @@ -314,12 +310,12 @@ static const struct clk_ops mtk_pll_ops = { .set_rate = mtk_pll_set_rate, }; -static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data, +static struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data, void __iomem *base) { struct mtk_clk_pll *pll; struct clk_init_data init = {}; - struct clk *clk; + int ret; const char *parent_name = "clk26m"; pll = kzalloc(sizeof(*pll), GFP_KERNEL); @@ -354,36 +350,36 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data, init.parent_names = &parent_name; init.num_parents = 1; - clk = clk_register(NULL, &pll->hw); + ret = clk_hw_register(NULL, &pll->hw); - if (IS_ERR(clk)) + if (ret) { kfree(pll); + return ERR_PTR(ret); + } - return clk; + return &pll->hw; } -static void mtk_clk_unregister_pll(struct clk *clk) +static void mtk_clk_unregister_pll(struct clk_hw *hw) { - struct clk_hw *hw; struct mtk_clk_pll *pll; - hw = __clk_get_hw(clk); if (!hw) return; pll = to_mtk_clk_pll(hw); - clk_unregister(clk); + clk_hw_unregister(hw); kfree(pll); } int mtk_clk_register_plls(struct device_node *node, const struct mtk_pll_data *plls, int num_plls, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { void __iomem *base; int i; - struct clk *clk; + struct clk_hw *hw; base = of_iomap(node, 0); if (!base) { @@ -394,20 +390,21 @@ int mtk_clk_register_plls(struct device_node *node, for (i = 0; i < num_plls; i++) { const struct mtk_pll_data *pll = &plls[i]; - if (!IS_ERR_OR_NULL(clk_data->clks[pll->id])) { + if (!IS_ERR_OR_NULL(clk_data->hws[pll->id])) { pr_warn("%pOF: Trying to register duplicate clock ID: %d\n", node, pll->id); continue; } - clk = mtk_clk_register_pll(pll, base); + hw = mtk_clk_register_pll(pll, base); - if (IS_ERR(clk)) { - pr_err("Failed to register clk %s: %pe\n", pll->name, clk); + if (IS_ERR(hw)) { + pr_err("Failed to register clk %s: %pe\n", pll->name, + hw); goto err; } - clk_data->clks[pll->id] = clk; + clk_data->hws[pll->id] = hw; } return 0; @@ -416,27 +413,26 @@ err: while (--i >= 0) { const struct mtk_pll_data *pll = &plls[i]; - mtk_clk_unregister_pll(clk_data->clks[pll->id]); - clk_data->clks[pll->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_pll(clk_data->hws[pll->id]); + clk_data->hws[pll->id] = ERR_PTR(-ENOENT); } iounmap(base); - return PTR_ERR(clk); + return PTR_ERR(hw); } EXPORT_SYMBOL_GPL(mtk_clk_register_plls); -static __iomem void *mtk_clk_pll_get_base(struct clk *clk, +static __iomem void *mtk_clk_pll_get_base(struct clk_hw *hw, const struct mtk_pll_data *data) { - struct clk_hw *hw = __clk_get_hw(clk); struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); return pll->base_addr - data->reg; } void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls, - struct clk_onecell_data *clk_data) + struct clk_hw_onecell_data *clk_data) { __iomem void *base = NULL; int i; @@ -447,7 +443,7 @@ void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls, for (i = num_plls; i > 0; i--) { const struct mtk_pll_data *pll = &plls[i - 1]; - if (IS_ERR_OR_NULL(clk_data->clks[pll->id])) + if (IS_ERR_OR_NULL(clk_data->hws[pll->id])) continue; /* @@ -456,10 +452,10 @@ void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls, * pointer to the I/O region base address. We have to fetch * it from one of the registered clks. */ - base = mtk_clk_pll_get_base(clk_data->clks[pll->id], pll); + base = mtk_clk_pll_get_base(clk_data->hws[pll->id], pll); - mtk_clk_unregister_pll(clk_data->clks[pll->id]); - clk_data->clks[pll->id] = ERR_PTR(-ENOENT); + mtk_clk_unregister_pll(clk_data->hws[pll->id]); + clk_data->hws[pll->id] = ERR_PTR(-ENOENT); } iounmap(base); diff --git a/drivers/clk/mediatek/clk-pll.h b/drivers/clk/mediatek/clk-pll.h index bf06e44caef9..fe3199715688 100644 --- a/drivers/clk/mediatek/clk-pll.h +++ b/drivers/clk/mediatek/clk-pll.h @@ -10,7 +10,7 @@ #include <linux/types.h> struct clk_ops; -struct clk_onecell_data; +struct clk_hw_onecell_data; struct device_node; struct mtk_pll_div_table { @@ -50,8 +50,8 @@ struct mtk_pll_data { int mtk_clk_register_plls(struct device_node *node, const struct mtk_pll_data *plls, int num_plls, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls, - struct clk_onecell_data *clk_data); + struct clk_hw_onecell_data *clk_data); #endif /* __DRV_CLK_MTK_PLL_H */ diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index d01436be6d7a..bc4dcf356d82 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -419,6 +419,15 @@ config SC_GCC_8180X Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, UFS, SDCC, etc. +config SC_GCC_8280XP + tristate "SC8280XP Global Clock Controller" + select QCOM_GDSC + depends on COMMON_CLK_QCOM + help + Support for the global clock controller on SC8280XP devices. + Say Y if you want to use peripheral devices such as UART, SPI, + I2C, USB, UFS, SDCC, etc. + config SC_GPUCC_7180 tristate "SC7180 Graphics Clock Controller" select SC_GCC_7180 @@ -452,6 +461,16 @@ config SC_LPASS_CORECC_7180 Say Y if you want to use LPASS clocks and power domains of the LPASS core clock controller. +config SC_LPASS_CORECC_7280 + tristate "SC7280 LPASS Core & Audio Clock Controller" + select SC_GCC_7280 + select QCOM_GDSC + help + Support for the LPASS(Low Power Audio Subsystem) core and audio clock + controller on SC7280 devices. + Say Y if you want to use LPASS clocks and power domains of the LPASS + core clock controller. + config SC_MSS_7180 tristate "SC7180 Modem Clock Controller" select SC_GCC_7180 diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 671cf5821af1..36789f5233ef 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -67,10 +67,12 @@ obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o +obj-$(CONFIG_SC_GCC_8280XP) += gcc-sc8280xp.o obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o obj-$(CONFIG_SC_GPUCC_7280) += gpucc-sc7280.o obj-$(CONFIG_SC_LPASSCC_7280) += lpasscc-sc7280.o obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o +obj-$(CONFIG_SC_LPASS_CORECC_7280) += lpasscorecc-sc7280.o lpassaudiocc-sc7280.o obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o obj-$(CONFIG_SC_VIDEOCC_7280) += videocc-sc7280.o diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h index 00cea508d49e..012e745794fd 100644 --- a/drivers/clk/qcom/clk-rcg.h +++ b/drivers/clk/qcom/clk-rcg.h @@ -140,6 +140,7 @@ extern const struct clk_ops clk_dyn_rcg_ops; * @freq_tbl: frequency table * @clkr: regmap clock handle * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG + * @parked_cfg: cached value of the CFG register for parked RCGs */ struct clk_rcg2 { u32 cmd_rcgr; @@ -150,6 +151,7 @@ struct clk_rcg2 { const struct freq_tbl *freq_tbl; struct clk_regmap clkr; u8 cfg_off; + u32 parked_cfg; }; #define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr) diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index e9c357309fd9..8e5dce09d162 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -73,16 +73,11 @@ static int clk_rcg2_is_enabled(struct clk_hw *hw) return (cmd & CMD_ROOT_OFF) == 0; } -static u8 clk_rcg2_get_parent(struct clk_hw *hw) +static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); int num_parents = clk_hw_get_num_parents(hw); - u32 cfg; - int i, ret; - - ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); - if (ret) - goto err; + int i; cfg &= CFG_SRC_SEL_MASK; cfg >>= CFG_SRC_SEL_SHIFT; @@ -91,12 +86,27 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw) if (cfg == rcg->parent_map[i].cfg) return i; -err: pr_debug("%s: Clock %s has invalid parent, using default.\n", __func__, clk_hw_get_name(hw)); return 0; } +static u8 clk_rcg2_get_parent(struct clk_hw *hw) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + u32 cfg; + int ret; + + ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); + if (ret) { + pr_debug("%s: Unable to read CFG register for %s\n", + __func__, clk_hw_get_name(hw)); + return 0; + } + + return __clk_rcg2_get_parent(hw, cfg); +} + static int update_config(struct clk_rcg2 *rcg) { int count, ret; @@ -163,12 +173,10 @@ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) } static unsigned long -clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +__clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); - u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; - - regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); + u32 hid_div, m = 0, n = 0, mode = 0, mask; if (rcg->mnd_width) { mask = BIT(rcg->mnd_width) - 1; @@ -189,6 +197,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) return calc_rate(parent_rate, m, n, mode, hid_div); } +static unsigned long +clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + u32 cfg; + + regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); + + return __clk_rcg2_recalc_rate(hw, parent_rate, cfg); +} + static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, struct clk_rate_request *req, enum freq_policy policy) @@ -262,7 +281,8 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR); } -static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) +static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f, + u32 *_cfg) { u32 cfg, mask, d_val, not2d_val, n_minus_m; struct clk_hw *hw = &rcg->clkr.hw; @@ -304,15 +324,27 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; if (rcg->mnd_width && f->n && (f->m != f->n)) cfg |= CFG_MODE_DUAL_EDGE; - return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), - mask, cfg); + + *_cfg &= ~mask; + *_cfg |= cfg; + + return 0; } static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) { + u32 cfg; int ret; - ret = __clk_rcg2_configure(rcg, f); + ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg); + if (ret) + return ret; + + ret = __clk_rcg2_configure(rcg, f, &cfg); + if (ret) + return ret; + + ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg); if (ret) return ret; @@ -979,11 +1011,12 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, return -EINVAL; /* - * In case clock is disabled, update the CFG, M, N and D registers - * and don't hit the update bit of CMD register. + * In case clock is disabled, update the M, N and D registers, cache + * the CFG value in parked_cfg and don't hit the update bit of CMD + * register. */ - if (!__clk_is_enabled(hw->clk)) - return __clk_rcg2_configure(rcg, f); + if (!clk_hw_is_enabled(hw)) + return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg); return clk_rcg2_shared_force_enable_clear(hw, f); } @@ -1007,6 +1040,11 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw) if (ret) return ret; + /* Write back the stored configuration corresponding to current rate */ + ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg); + if (ret) + return ret; + ret = update_config(rcg); if (ret) return ret; @@ -1017,13 +1055,12 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw) static void clk_rcg2_shared_disable(struct clk_hw *hw) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); - u32 cfg; /* * Store current configuration as switching to safe source would clear * the SRC and DIV of CFG register */ - regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg); /* * Park the RCG at a safe configuration - sourced off of safe source. @@ -1041,17 +1078,52 @@ static void clk_rcg2_shared_disable(struct clk_hw *hw) update_config(rcg); clk_rcg2_clear_force_enable(hw); +} - /* Write back the stored configuration corresponding to current rate */ - regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg); +static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + + /* If the shared rcg is parked use the cached cfg instead */ + if (!clk_hw_is_enabled(hw)) + return __clk_rcg2_get_parent(hw, rcg->parked_cfg); + + return clk_rcg2_get_parent(hw); +} + +static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + + /* If the shared rcg is parked only update the cached cfg */ + if (!clk_hw_is_enabled(hw)) { + rcg->parked_cfg &= ~CFG_SRC_SEL_MASK; + rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; + + return 0; + } + + return clk_rcg2_set_parent(hw, index); +} + +static unsigned long +clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + struct clk_rcg2 *rcg = to_clk_rcg2(hw); + + /* If the shared rcg is parked use the cached cfg instead */ + if (!clk_hw_is_enabled(hw)) + return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg); + + return clk_rcg2_recalc_rate(hw, parent_rate); } const struct clk_ops clk_rcg2_shared_ops = { .enable = clk_rcg2_shared_enable, .disable = clk_rcg2_shared_disable, - .get_parent = clk_rcg2_get_parent, - .set_parent = clk_rcg2_set_parent, - .recalc_rate = clk_rcg2_recalc_rate, + .get_parent = clk_rcg2_shared_get_parent, + .set_parent = clk_rcg2_shared_set_parent, + .recalc_rate = clk_rcg2_shared_recalc_rate, .determine_rate = clk_rcg2_determine_rate, .set_rate = clk_rcg2_shared_set_rate, .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index afc6dc930011..10b4e6d8d10f 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -563,17 +563,19 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8974 = { .num_clks = ARRAY_SIZE(msm8974_clks), }; -DEFINE_CLK_SMD_RPM(msm8976, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, - QCOM_SMD_RPM_BUS_CLK, 2); DEFINE_CLK_SMD_RPM(msm8976, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); static struct clk_smd_rpm *msm8976_clks[] = { + [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo, + [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a, [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk, [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk, [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk, [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk, [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk, [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk, + [RPM_SMD_SYSMMNOC_CLK] = &msm8936_sysmmnoc_clk, + [RPM_SMD_SYSMMNOC_A_CLK] = &msm8936_sysmmnoc_a_clk, [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk, [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk, [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1, @@ -586,8 +588,6 @@ static struct clk_smd_rpm *msm8976_clks[] = { [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin, [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin, [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin, - [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8976_mmssnoc_ahb_clk, - [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8976_mmssnoc_ahb_a_clk, [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2, [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2, [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk, diff --git a/drivers/clk/qcom/gcc-msm8976.c b/drivers/clk/qcom/gcc-msm8976.c index a8b15814933e..6b112984694c 100644 --- a/drivers/clk/qcom/gcc-msm8976.c +++ b/drivers/clk/qcom/gcc-msm8976.c @@ -1486,7 +1486,7 @@ static const struct clk_init_data sdcc1_apps_clk_src_8976v1_1_init = { .name = "sdcc1_apps_clk_src", .parent_data = gcc_parent_data_v1_1, .num_parents = ARRAY_SIZE(gcc_parent_data_v1_1), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }; static struct clk_rcg2 sdcc1_apps_clk_src = { @@ -1499,7 +1499,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { .name = "sdcc1_apps_clk_src", .parent_data = gcc_parent_data_1, .num_parents = ARRAY_SIZE(gcc_parent_data_1), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -1547,7 +1547,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = { .name = "sdcc2_apps_clk_src", .parent_data = gcc_parent_data_4_8, .num_parents = ARRAY_SIZE(gcc_parent_data_4_8), - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -4056,6 +4056,7 @@ static const struct qcom_reset_map gcc_msm8976_resets[] = { [RST_CAMSS_CSI_VFE1_BCR] = { 0x58070 }, [RST_CAMSS_VFE1_BCR] = { 0x5807c }, [RST_CAMSS_CPP_BCR] = { 0x58080 }, + [RST_MSS_BCR] = { 0x71000 }, }; static struct gdsc *gcc_msm8976_gdscs[] = { diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c index 407e2c5caea4..33473c52eb90 100644 --- a/drivers/clk/qcom/gcc-msm8998.c +++ b/drivers/clk/qcom/gcc-msm8998.c @@ -2833,6 +2833,58 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = { }, }; +static struct clk_branch gcc_im_sleep_clk = { + .halt_reg = 0x4300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x4300c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "gcc_im_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch aggre2_snoc_north_axi_clk = { + .halt_reg = 0x83010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x83010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "aggre2_snoc_north_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch ssc_xo_clk = { + .halt_reg = 0x63018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x63018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "ssc_xo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch ssc_cnoc_ahbs_clk = { + .halt_reg = 0x6300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6300c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "ssc_cnoc_ahbs_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct gdsc pcie_0_gdsc = { .gdscr = 0x6b004, .gds_hw_ctrl = 0x0, @@ -3036,6 +3088,10 @@ static struct clk_regmap *gcc_msm8998_clocks[] = { [GCC_MSS_MNOC_BIMC_AXI_CLK] = &gcc_mss_mnoc_bimc_axi_clk.clkr, [GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr, [HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr, + [GCC_IM_SLEEP] = &gcc_im_sleep_clk.clkr, + [AGGRE2_SNOC_NORTH_AXI] = &aggre2_snoc_north_axi_clk.clkr, + [SSC_XO] = &ssc_xo_clk.clkr, + [SSC_CNOC_AHBS_CLK] = &ssc_cnoc_ahbs_clk.clkr, }; static struct gdsc *gcc_msm8998_gdscs[] = { diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c new file mode 100644 index 000000000000..4b894442fdf5 --- /dev/null +++ b/drivers/clk/qcom/gcc-sc8280xp.c @@ -0,0 +1,7488 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Linaro Ltd. + */ + +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,gcc-sc8280xp.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +/* Need to match the order of clocks in DT binding */ +enum { + DT_BI_TCXO, + DT_SLEEP_CLK, + DT_UFS_PHY_RX_SYMBOL_0_CLK, + DT_UFS_PHY_RX_SYMBOL_1_CLK, + DT_UFS_PHY_TX_SYMBOL_0_CLK, + DT_UFS_CARD_RX_SYMBOL_0_CLK, + DT_UFS_CARD_RX_SYMBOL_1_CLK, + DT_UFS_CARD_TX_SYMBOL_0_CLK, + DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, + DT_GCC_USB4_PHY_PIPEGMUX_CLK_SRC, + DT_GCC_USB4_PHY_DP_GMUX_CLK_SRC, + DT_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC, + DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK, + DT_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK, + DT_QUSB4PHY_GCC_USB4_RX0_CLK, + DT_QUSB4PHY_GCC_USB4_RX1_CLK, + DT_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK, + DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, + DT_GCC_USB4_1_PHY_DP_GMUX_CLK_SRC, + DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, + DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, + DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, + DT_QUSB4PHY_1_GCC_USB4_RX0_CLK, + DT_QUSB4PHY_1_GCC_USB4_RX1_CLK, + DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK, + DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK, + DT_PCIE_2A_PIPE_CLK, + DT_PCIE_2B_PIPE_CLK, + DT_PCIE_3A_PIPE_CLK, + DT_PCIE_3B_PIPE_CLK, + DT_PCIE_4_PIPE_CLK, + DT_RXC0_REF_CLK, + DT_RXC1_REF_CLK, +}; + +enum { + P_BI_TCXO, + P_GCC_GPLL0_OUT_EVEN, + P_GCC_GPLL0_OUT_MAIN, + P_GCC_GPLL2_OUT_MAIN, + P_GCC_GPLL4_OUT_MAIN, + P_GCC_GPLL7_OUT_MAIN, + P_GCC_GPLL8_OUT_MAIN, + P_GCC_GPLL9_OUT_MAIN, + P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC, + P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC, + P_GCC_USB4_1_PHY_DP_GMUX_CLK_SRC, + P_GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC, + P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC, + P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, + P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, + P_GCC_USB4_PHY_DP_GMUX_CLK_SRC, + P_GCC_USB4_PHY_PCIE_PIPE_CLK_SRC, + P_GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC, + P_GCC_USB4_PHY_PIPEGMUX_CLK_SRC, + P_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC, + P_PCIE_2A_PIPE_CLK, + P_PCIE_2B_PIPE_CLK, + P_PCIE_3A_PIPE_CLK, + P_PCIE_3B_PIPE_CLK, + P_PCIE_4_PIPE_CLK, + P_QUSB4PHY_1_GCC_USB4_RX0_CLK, + P_QUSB4PHY_1_GCC_USB4_RX1_CLK, + P_QUSB4PHY_GCC_USB4_RX0_CLK, + P_QUSB4PHY_GCC_USB4_RX1_CLK, + P_RXC0_REF_CLK, + P_RXC1_REF_CLK, + P_SLEEP_CLK, + P_UFS_CARD_RX_SYMBOL_0_CLK, + P_UFS_CARD_RX_SYMBOL_1_CLK, + P_UFS_CARD_TX_SYMBOL_0_CLK, + P_UFS_PHY_RX_SYMBOL_0_CLK, + P_UFS_PHY_RX_SYMBOL_1_CLK, + P_UFS_PHY_TX_SYMBOL_0_CLK, + P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, + P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK, + P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK, + P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK, + P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, + P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, + P_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK, + P_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK, +}; + +static const struct clk_parent_data gcc_parent_data_tcxo = { .index = DT_BI_TCXO }; + +static struct clk_alpha_pll gcc_gpll0 = { + .offset = 0x0, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x52028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll0", + .parent_data = &gcc_parent_data_tcxo, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_gcc_gpll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll0_out_even", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gpll0.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_5lpe_ops, + }, +}; + +static struct clk_alpha_pll gcc_gpll2 = { + .offset = 0x2000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x52028, + .enable_mask = BIT(2), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll2", + .parent_data = &gcc_parent_data_tcxo, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll4 = { + .offset = 0x76000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x52028, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll4", + .parent_data = &gcc_parent_data_tcxo, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll7 = { + .offset = 0x1a000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x52028, + .enable_mask = BIT(7), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll7", + .parent_data = &gcc_parent_data_tcxo, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll8 = { + .offset = 0x1b000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x52028, + .enable_mask = BIT(8), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll8", + .parent_data = &gcc_parent_data_tcxo, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops, + }, + }, +}; + +static struct clk_alpha_pll gcc_gpll9 = { + .offset = 0x1c000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x52028, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpll9", + .parent_data = &gcc_parent_data_tcxo, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_5lpe_ops, + }, + }, +}; + +static struct clk_rcg2 gcc_usb4_1_phy_pcie_pipe_clk_src; +static struct clk_rcg2 gcc_usb4_phy_pcie_pipe_clk_src; + +static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_0[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_SLEEP_CLK, 5 }, +}; + +static const struct clk_parent_data gcc_parent_data_1[] = { + { .index = DT_BI_TCXO }, + { .index = DT_SLEEP_CLK }, +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_SLEEP_CLK, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_2[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .index = DT_SLEEP_CLK }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data gcc_parent_data_3[] = { + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL7_OUT_MAIN, 2 }, + { P_GCC_GPLL4_OUT_MAIN, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_4[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll7.clkr.hw }, + { .hw = &gcc_gpll4.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL8_OUT_MAIN, 2 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_5[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll8.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_6[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL7_OUT_MAIN, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_6[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll7.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_7[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL2_OUT_MAIN, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_7[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll2.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_8[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL7_OUT_MAIN, 2 }, + { P_RXC0_REF_CLK, 3 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_8[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll7.clkr.hw }, + { .index = DT_RXC0_REF_CLK }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_9[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL7_OUT_MAIN, 2 }, + { P_RXC1_REF_CLK, 3 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_9[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll7.clkr.hw }, + { .index = DT_RXC1_REF_CLK }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_10[] = { + { P_PCIE_2A_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_10[] = { + { .index = DT_PCIE_2A_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_11[] = { + { P_PCIE_2B_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_11[] = { + { .index = DT_PCIE_2B_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_12[] = { + { P_PCIE_3A_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_12[] = { + { .index = DT_PCIE_3A_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_13[] = { + { P_PCIE_3B_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_13[] = { + { .index = DT_PCIE_3B_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_14[] = { + { P_PCIE_4_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_14[] = { + { .index = DT_PCIE_4_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_15[] = { + { P_BI_TCXO, 0 }, + { P_GCC_GPLL0_OUT_MAIN, 1 }, + { P_GCC_GPLL9_OUT_MAIN, 2 }, + { P_GCC_GPLL4_OUT_MAIN, 5 }, + { P_GCC_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_15[] = { + { .index = DT_BI_TCXO }, + { .hw = &gcc_gpll0.clkr.hw }, + { .hw = &gcc_gpll9.clkr.hw }, + { .hw = &gcc_gpll4.clkr.hw }, + { .hw = &gcc_gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_16[] = { + { P_UFS_CARD_RX_SYMBOL_0_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_16[] = { + { .index = DT_UFS_CARD_RX_SYMBOL_0_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_17[] = { + { P_UFS_CARD_RX_SYMBOL_1_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_17[] = { + { .index = DT_UFS_CARD_RX_SYMBOL_1_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_18[] = { + { P_UFS_CARD_TX_SYMBOL_0_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_18[] = { + { .index = DT_UFS_CARD_TX_SYMBOL_0_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_19[] = { + { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_19[] = { + { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_20[] = { + { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_20[] = { + { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_21[] = { + { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_21[] = { + { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_22[] = { + { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_22[] = { + { .index = DT_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_23[] = { + { P_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_23[] = { + { .index = DT_USB3_UNI_PHY_SEC_GCC_USB30_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = { + .reg = 0xf060, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_22, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_pipe_clk_src", + .parent_data = gcc_parent_data_22, + .num_parents = ARRAY_SIZE(gcc_parent_data_22), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = { + .reg = 0x10060, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_23, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_sec_phy_pipe_clk_src", + .parent_data = gcc_parent_data_23, + .num_parents = ARRAY_SIZE(gcc_parent_data_23), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static const struct parent_map gcc_parent_map_24[] = { + { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_24[] = { + { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_25[] = { + { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_25[] = { + { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_26[] = { + { P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC, 0 }, + { P_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 }, + { P_GCC_USB4_PHY_PIPEGMUX_CLK_SRC, 3 }, +}; + +static const struct clk_parent_data gcc_parent_data_26[] = { + { .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw }, + { .index = DT_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK }, + { .index = DT_GCC_USB4_PHY_PIPEGMUX_CLK_SRC }, +}; + +static const struct parent_map gcc_parent_map_27[] = { + { P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC, 0 }, + { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 }, + { P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, 3 }, +}; + +static const struct clk_parent_data gcc_parent_data_27[] = { + { .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw }, + { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK }, + { .index = DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC }, +}; + +static const struct parent_map gcc_parent_map_28[] = { + { P_GCC_USB4_1_PHY_DP_GMUX_CLK_SRC, 0 }, + { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_28[] = { + { .index = DT_GCC_USB4_1_PHY_DP_GMUX_CLK_SRC }, + { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK }, +}; + +static const struct parent_map gcc_parent_map_29[] = { + { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_29[] = { + { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_30[] = { + { P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, 0 }, + { P_GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC, 1 }, +}; + +static const struct clk_parent_data gcc_parent_data_30[] = { + { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC }, + { .hw = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr.hw }, +}; + +static struct clk_regmap_mux gcc_usb4_1_phy_pcie_pipegmux_clk_src = { + .reg = 0xb80dc, + .shift = 0, + .width = 1, + .parent_map = gcc_parent_map_30, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_pcie_pipegmux_clk_src", + .parent_data = gcc_parent_data_30, + .num_parents = ARRAY_SIZE(gcc_parent_data_30), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static const struct parent_map gcc_parent_map_31[] = { + { P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 }, + { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_31[] = { + { .hw = &gcc_usb4_1_phy_pcie_pipegmux_clk_src.clkr.hw }, + { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK }, +}; + +static const struct parent_map gcc_parent_map_32[] = { + { P_QUSB4PHY_1_GCC_USB4_RX0_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_32[] = { + { .index = DT_QUSB4PHY_1_GCC_USB4_RX0_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_33[] = { + { P_QUSB4PHY_1_GCC_USB4_RX1_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_33[] = { + { .index = DT_QUSB4PHY_1_GCC_USB4_RX1_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_34[] = { + { P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, 0 }, + { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_34[] = { + { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC }, + { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK }, +}; + +static const struct parent_map gcc_parent_map_35[] = { + { P_GCC_USB4_PHY_DP_GMUX_CLK_SRC, 0 }, + { P_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_35[] = { + { .index = DT_GCC_USB4_PHY_DP_GMUX_CLK_SRC }, + { .index = DT_USB4_PHY_GCC_USB4RTR_MAX_PIPE_CLK }, +}; + +static const struct parent_map gcc_parent_map_36[] = { + { P_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_36[] = { + { .index = DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_37[] = { + { P_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC, 0 }, + { P_GCC_USB4_PHY_PCIE_PIPE_CLK_SRC, 1 }, +}; + +static const struct clk_parent_data gcc_parent_data_37[] = { + { .index = DT_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC }, + { .hw = &gcc_usb4_phy_pcie_pipe_clk_src.clkr.hw }, +}; + +static struct clk_regmap_mux gcc_usb4_phy_pcie_pipegmux_clk_src = { + .reg = 0x2a0dc, + .shift = 0, + .width = 1, + .parent_map = gcc_parent_map_37, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_pcie_pipegmux_clk_src", + .parent_data = gcc_parent_data_37, + .num_parents = ARRAY_SIZE(gcc_parent_data_37), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static const struct parent_map gcc_parent_map_38[] = { + { P_GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 }, + { P_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_38[] = { + { .hw = &gcc_usb4_phy_pcie_pipegmux_clk_src.clkr.hw }, + { .index = DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK }, +}; + +static const struct parent_map gcc_parent_map_39[] = { + { P_QUSB4PHY_GCC_USB4_RX0_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_39[] = { + { .index = DT_QUSB4PHY_GCC_USB4_RX0_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_40[] = { + { P_QUSB4PHY_GCC_USB4_RX1_CLK, 0 }, + { P_BI_TCXO, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_40[] = { + { .index = DT_QUSB4PHY_GCC_USB4_RX1_CLK }, + { .index = DT_BI_TCXO }, +}; + +static const struct parent_map gcc_parent_map_41[] = { + { P_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC, 0 }, + { P_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 }, +}; + +static const struct clk_parent_data gcc_parent_data_41[] = { + { .index = DT_GCC_USB4_PHY_SYS_PIPEGMUX_CLK_SRC }, + { .index = DT_USB4_PHY_GCC_USB4_PCIE_PIPE_CLK }, +}; + +static struct clk_regmap_mux gcc_pcie_2a_pipe_clk_src = { + .reg = 0x9d05c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_10, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_pipe_clk_src", + .parent_data = gcc_parent_data_10, + .num_parents = ARRAY_SIZE(gcc_parent_data_10), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_pcie_2b_pipe_clk_src = { + .reg = 0x9e05c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_11, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_pipe_clk_src", + .parent_data = gcc_parent_data_11, + .num_parents = ARRAY_SIZE(gcc_parent_data_11), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_pcie_3a_pipe_clk_src = { + .reg = 0xa005c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_12, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_pipe_clk_src", + .parent_data = gcc_parent_data_12, + .num_parents = ARRAY_SIZE(gcc_parent_data_12), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_pcie_3b_pipe_clk_src = { + .reg = 0xa205c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_13, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_pipe_clk_src", + .parent_data = gcc_parent_data_13, + .num_parents = ARRAY_SIZE(gcc_parent_data_13), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_pcie_4_pipe_clk_src = { + .reg = 0x6b05c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_14, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_pipe_clk_src", + .parent_data = gcc_parent_data_14, + .num_parents = ARRAY_SIZE(gcc_parent_data_14), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_ufs_card_rx_symbol_0_clk_src = { + .reg = 0x75058, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_16, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_rx_symbol_0_clk_src", + .parent_data = gcc_parent_data_16, + .num_parents = ARRAY_SIZE(gcc_parent_data_16), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_ufs_card_rx_symbol_1_clk_src = { + .reg = 0x750c8, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_17, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_rx_symbol_1_clk_src", + .parent_data = gcc_parent_data_17, + .num_parents = ARRAY_SIZE(gcc_parent_data_17), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_ufs_card_tx_symbol_0_clk_src = { + .reg = 0x75048, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_18, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_tx_symbol_0_clk_src", + .parent_data = gcc_parent_data_18, + .num_parents = ARRAY_SIZE(gcc_parent_data_18), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = { + .reg = 0x77058, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_19, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_rx_symbol_0_clk_src", + .parent_data = gcc_parent_data_19, + .num_parents = ARRAY_SIZE(gcc_parent_data_19), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = { + .reg = 0x770c8, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_20, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_rx_symbol_1_clk_src", + .parent_data = gcc_parent_data_20, + .num_parents = ARRAY_SIZE(gcc_parent_data_20), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = { + .reg = 0x77048, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_21, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_tx_symbol_0_clk_src", + .parent_data = gcc_parent_data_21, + .num_parents = ARRAY_SIZE(gcc_parent_data_21), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb34_prim_phy_pipe_clk_src = { + .reg = 0xf064, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_26, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb34_prim_phy_pipe_clk_src", + .parent_data = gcc_parent_data_26, + .num_parents = ARRAY_SIZE(gcc_parent_data_26), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb34_sec_phy_pipe_clk_src = { + .reg = 0x10064, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_27, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb34_sec_phy_pipe_clk_src", + .parent_data = gcc_parent_data_27, + .num_parents = ARRAY_SIZE(gcc_parent_data_27), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_0_clk_src = { + .reg = 0xab060, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_24, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_mp_phy_pipe_0_clk_src", + .parent_data = gcc_parent_data_24, + .num_parents = ARRAY_SIZE(gcc_parent_data_24), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_1_clk_src = { + .reg = 0xab068, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_25, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_mp_phy_pipe_1_clk_src", + .parent_data = gcc_parent_data_25, + .num_parents = ARRAY_SIZE(gcc_parent_data_25), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_1_phy_dp_clk_src = { + .reg = 0xb8050, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_28, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_dp_clk_src", + .parent_data = gcc_parent_data_28, + .num_parents = ARRAY_SIZE(gcc_parent_data_28), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_1_phy_p2rr2p_pipe_clk_src = { + .reg = 0xb80b0, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_29, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk_src", + .parent_data = gcc_parent_data_29, + .num_parents = ARRAY_SIZE(gcc_parent_data_29), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_1_phy_pcie_pipe_mux_clk_src = { + .reg = 0xb80e0, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_31, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_pcie_pipe_mux_clk_src", + .parent_data = gcc_parent_data_31, + .num_parents = ARRAY_SIZE(gcc_parent_data_31), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_1_phy_rx0_clk_src = { + .reg = 0xb8090, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_32, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_rx0_clk_src", + .parent_data = gcc_parent_data_32, + .num_parents = ARRAY_SIZE(gcc_parent_data_32), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_1_phy_rx1_clk_src = { + .reg = 0xb809c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_33, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_rx1_clk_src", + .parent_data = gcc_parent_data_33, + .num_parents = ARRAY_SIZE(gcc_parent_data_33), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_1_phy_sys_clk_src = { + .reg = 0xb80c0, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_34, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_sys_clk_src", + .parent_data = gcc_parent_data_34, + .num_parents = ARRAY_SIZE(gcc_parent_data_34), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_phy_dp_clk_src = { + .reg = 0x2a050, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_35, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_dp_clk_src", + .parent_data = gcc_parent_data_35, + .num_parents = ARRAY_SIZE(gcc_parent_data_35), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_phy_p2rr2p_pipe_clk_src = { + .reg = 0x2a0b0, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_36, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_p2rr2p_pipe_clk_src", + .parent_data = gcc_parent_data_36, + .num_parents = ARRAY_SIZE(gcc_parent_data_36), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_phy_pcie_pipe_mux_clk_src = { + .reg = 0x2a0e0, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_38, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_pcie_pipe_mux_clk_src", + .parent_data = gcc_parent_data_38, + .num_parents = ARRAY_SIZE(gcc_parent_data_38), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_phy_rx0_clk_src = { + .reg = 0x2a090, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_39, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_rx0_clk_src", + .parent_data = gcc_parent_data_39, + .num_parents = ARRAY_SIZE(gcc_parent_data_39), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_phy_rx1_clk_src = { + .reg = 0x2a09c, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_40, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_rx1_clk_src", + .parent_data = gcc_parent_data_40, + .num_parents = ARRAY_SIZE(gcc_parent_data_40), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static struct clk_regmap_mux gcc_usb4_phy_sys_clk_src = { + .reg = 0x2a0c0, + .shift = 0, + .width = 2, + .parent_map = gcc_parent_map_41, + .clkr = { + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_sys_clk_src", + .parent_data = gcc_parent_data_41, + .num_parents = ARRAY_SIZE(gcc_parent_data_41), + .ops = &clk_regmap_mux_closest_ops, + }, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac0_ptp_clk_src[] = { + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0), + F(230400000, P_GCC_GPLL4_OUT_MAIN, 3.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac0_ptp_clk_src = { + .cmd_rcgr = 0xaa020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_emac0_ptp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_ptp_clk_src", + .parent_data = gcc_parent_data_4, + .num_parents = ARRAY_SIZE(gcc_parent_data_4), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_emac0_rgmii_clk_src[] = { + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0), + F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_emac0_rgmii_clk_src = { + .cmd_rcgr = 0xaa040, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_8, + .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_rgmii_clk_src", + .parent_data = gcc_parent_data_8, + .num_parents = ARRAY_SIZE(gcc_parent_data_8), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_emac1_ptp_clk_src = { + .cmd_rcgr = 0xba020, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_emac0_ptp_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_emac1_ptp_clk_src", + .parent_data = gcc_parent_data_4, + .num_parents = ARRAY_SIZE(gcc_parent_data_4), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_emac1_rgmii_clk_src = { + .cmd_rcgr = 0xba040, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_9, + .freq_tbl = ftbl_gcc_emac0_rgmii_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_emac1_rgmii_clk_src", + .parent_data = gcc_parent_data_9, + .num_parents = ARRAY_SIZE(gcc_parent_data_9), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = { + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_gp1_clk_src = { + .cmd_rcgr = 0x64004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp1_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp2_clk_src = { + .cmd_rcgr = 0x65004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp2_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp3_clk_src = { + .cmd_rcgr = 0x66004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp3_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp4_clk_src = { + .cmd_rcgr = 0xc2004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp4_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_gp5_clk_src = { + .cmd_rcgr = 0xc3004, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_gp5_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = ARRAY_SIZE(gcc_parent_data_2), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_aux_clk_src = { + .cmd_rcgr = 0xa4054, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = { + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = { + .cmd_rcgr = 0xa403c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_1_aux_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_1_aux_clk_src = { + .cmd_rcgr = 0x8d054, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = { + .cmd_rcgr = 0x8d03c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_2a_aux_clk_src = { + .cmd_rcgr = 0x9d064, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_2a_phy_rchng_clk_src = { + .cmd_rcgr = 0x9d044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_2b_aux_clk_src = { + .cmd_rcgr = 0x9e064, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_2b_phy_rchng_clk_src = { + .cmd_rcgr = 0x9e044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_3a_aux_clk_src = { + .cmd_rcgr = 0xa0064, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_3a_phy_rchng_clk_src = { + .cmd_rcgr = 0xa0044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_3b_aux_clk_src = { + .cmd_rcgr = 0xa2064, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_3b_phy_rchng_clk_src = { + .cmd_rcgr = 0xa2044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_4_aux_clk_src = { + .cmd_rcgr = 0x6b064, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_4_phy_rchng_clk_src = { + .cmd_rcgr = 0x6b044, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_phy_rchng_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_rscc_xo_clk_src = { + .cmd_rcgr = 0xae00c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_rscc_xo_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { + F(60000000, P_GCC_GPLL0_OUT_EVEN, 5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pdm2_clk_src = { + .cmd_rcgr = 0x33010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { + F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { + .cmd_rcgr = 0x17148, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { + .cmd_rcgr = 0x17278, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { + .cmd_rcgr = 0x173a8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { + .cmd_rcgr = 0x174d8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { + .cmd_rcgr = 0x17608, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { + .cmd_rcgr = 0x17738, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s6_clk_src[] = { + F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25), + F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25), + F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = { + .name = "gcc_qupv3_wrap0_s6_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { + .cmd_rcgr = 0x17868, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = { + .name = "gcc_qupv3_wrap0_s7_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { + .cmd_rcgr = 0x17998, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { + .name = "gcc_qupv3_wrap1_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { + .cmd_rcgr = 0x18148, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { + .name = "gcc_qupv3_wrap1_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { + .cmd_rcgr = 0x18278, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { + .name = "gcc_qupv3_wrap1_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { + .cmd_rcgr = 0x183a8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { + .name = "gcc_qupv3_wrap1_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { + .cmd_rcgr = 0x184d8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { + .name = "gcc_qupv3_wrap1_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { + .cmd_rcgr = 0x18608, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { + .name = "gcc_qupv3_wrap1_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { + .cmd_rcgr = 0x18738, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = { + .name = "gcc_qupv3_wrap1_s6_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { + .cmd_rcgr = 0x18868, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = { + .name = "gcc_qupv3_wrap1_s7_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { + .cmd_rcgr = 0x18998, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = { + .name = "gcc_qupv3_wrap2_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { + .cmd_rcgr = 0x1e148, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = { + .name = "gcc_qupv3_wrap2_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { + .cmd_rcgr = 0x1e278, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = { + .name = "gcc_qupv3_wrap2_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { + .cmd_rcgr = 0x1e3a8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = { + .name = "gcc_qupv3_wrap2_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { + .cmd_rcgr = 0x1e4d8, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = { + .name = "gcc_qupv3_wrap2_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { + .cmd_rcgr = 0x1e608, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = { + .name = "gcc_qupv3_wrap2_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { + .cmd_rcgr = 0x1e738, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = { + .name = "gcc_qupv3_wrap2_s6_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = { + .cmd_rcgr = 0x1e868, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = { + .name = "gcc_qupv3_wrap2_s7_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_shared_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = { + .cmd_rcgr = 0x1e998, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s6_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { + .cmd_rcgr = 0x1400c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_15, + .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc2_apps_clk_src", + .parent_data = gcc_parent_data_15, + .num_parents = ARRAY_SIZE(gcc_parent_data_15), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), + F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc4_apps_clk_src = { + .cmd_rcgr = 0x1600c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc4_apps_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = { + F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0), + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_axi_clk_src = { + .cmd_rcgr = 0x75024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_axi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = { + F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = { + .cmd_rcgr = 0x7506c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_ice_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = { + .cmd_rcgr = 0x750a0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_phy_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = { + .cmd_rcgr = 0x75084, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_unipro_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = { + .cmd_rcgr = 0x77024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_axi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = { + .cmd_rcgr = 0x7706c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_ice_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = { + .cmd_rcgr = 0x770a0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_phy_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = { + .cmd_rcgr = 0x77084, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_unipro_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_mp_master_clk_src[] = { + F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0), + F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0), + F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_mp_master_clk_src = { + .cmd_rcgr = 0xab020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_mp_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_mp_mock_utmi_clk_src = { + .cmd_rcgr = 0xab038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_mp_mock_utmi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { + .cmd_rcgr = 0xf020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = { + .cmd_rcgr = 0xf038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_sec_master_clk_src = { + .cmd_rcgr = 0x10020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_sec_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = { + .cmd_rcgr = 0x10038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_sec_mock_utmi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb3_mp_phy_aux_clk_src = { + .cmd_rcgr = 0xab06c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_mp_phy_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = { + .cmd_rcgr = 0xf068, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = { + .cmd_rcgr = 0x10068, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_sec_phy_aux_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb4_1_master_clk_src[] = { + F(85714286, P_GCC_GPLL0_OUT_EVEN, 3.5, 0, 0), + F(175000000, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0), + F(350000000, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb4_1_master_clk_src = { + .cmd_rcgr = 0xb8018, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_usb4_1_master_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_master_clk_src", + .parent_data = gcc_parent_data_5, + .num_parents = ARRAY_SIZE(gcc_parent_data_5), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0), + F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb4_1_phy_pcie_pipe_clk_src = { + .cmd_rcgr = 0xb80c4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_pcie_pipe_clk_src", + .parent_data = gcc_parent_data_6, + .num_parents = ARRAY_SIZE(gcc_parent_data_6), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb4_1_sb_if_clk_src = { + .cmd_rcgr = 0xb8070, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_sb_if_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb4_1_tmu_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(250000000, P_GCC_GPLL2_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb4_1_tmu_clk_src = { + .cmd_rcgr = 0xb8054, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_usb4_1_tmu_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_tmu_clk_src", + .parent_data = gcc_parent_data_7, + .num_parents = ARRAY_SIZE(gcc_parent_data_7), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb4_master_clk_src = { + .cmd_rcgr = 0x2a018, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_usb4_1_master_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_master_clk_src", + .parent_data = gcc_parent_data_5, + .num_parents = ARRAY_SIZE(gcc_parent_data_5), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb4_phy_pcie_pipe_clk_src = { + .cmd_rcgr = 0x2a0c4, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_6, + .freq_tbl = ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_pcie_pipe_clk_src", + .parent_data = gcc_parent_data_6, + .num_parents = ARRAY_SIZE(gcc_parent_data_6), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb4_sb_if_clk_src = { + .cmd_rcgr = 0x2a070, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_pcie_1_aux_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_sb_if_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_rcg2 gcc_usb4_tmu_clk_src = { + .cmd_rcgr = 0x2a054, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_7, + .freq_tbl = ftbl_gcc_usb4_1_tmu_clk_src, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_tmu_clk_src", + .parent_data = gcc_parent_data_7, + .num_parents = ARRAY_SIZE(gcc_parent_data_7), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static struct clk_regmap_div gcc_pcie_2a_pipe_div_clk_src = { + .reg = 0x9d060, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_pipe_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2a_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_pcie_2b_pipe_div_clk_src = { + .reg = 0x9e060, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_pipe_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2b_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_pcie_3a_pipe_div_clk_src = { + .reg = 0xa0060, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_pipe_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3a_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_pcie_3b_pipe_div_clk_src = { + .reg = 0xa2060, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_pipe_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3b_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_pcie_4_pipe_div_clk_src = { + .reg = 0x6b060, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_pipe_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_4_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_qupv3_wrap0_s4_div_clk_src = { + .reg = 0x17ac8, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s4_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_qupv3_wrap1_s4_div_clk_src = { + .reg = 0x18ac8, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s4_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_qupv3_wrap2_s4_div_clk_src = { + .reg = 0x1eac8, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_s4_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_usb30_mp_mock_utmi_postdiv_clk_src = { + .reg = 0xab050, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_mp_mock_utmi_postdiv_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_mp_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = { + .reg = 0xf050, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = { + .reg = 0x10050, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch gcc_aggre_noc_pcie0_tunnel_axi_clk = { + .halt_reg = 0xa41a8, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xa41a8, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(14), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_noc_pcie0_tunnel_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_noc_pcie1_tunnel_axi_clk = { + .halt_reg = 0x8d07c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x8d07c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(21), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_noc_pcie1_tunnel_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_noc_pcie_4_axi_clk = { + .halt_reg = 0x6b1b8, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x6b1b8, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(12), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_noc_pcie_4_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_noc_pcie_south_sf_axi_clk = { + .halt_reg = 0xbf13c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xbf13c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(13), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_noc_pcie_south_sf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_card_axi_clk = { + .halt_reg = 0x750cc, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x750cc, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x750cc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_ufs_card_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = { + .halt_reg = 0x750cc, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x750cc, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x750cc, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_ufs_card_axi_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_phy_axi_clk = { + .halt_reg = 0x770cc, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x770cc, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x770cc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_ufs_phy_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = { + .halt_reg = 0x770cc, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x770cc, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x770cc, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_mp_axi_clk = { + .halt_reg = 0xab084, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xab084, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xab084, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb3_mp_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_mp_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_prim_axi_clk = { + .halt_reg = 0xf080, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xf080, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xf080, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_sec_axi_clk = { + .halt_reg = 0x10080, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x10080, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x10080, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb3_sec_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_sec_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb4_1_axi_clk = { + .halt_reg = 0xb80e4, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb80e4, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb80e4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb4_1_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb4_axi_clk = { + .halt_reg = 0x2a0e4, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2a0e4, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x2a0e4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb4_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb_noc_axi_clk = { + .halt_reg = 0x5d024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x5d024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5d024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb_noc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb_noc_north_axi_clk = { + .halt_reg = 0x5d020, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x5d020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5d020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb_noc_north_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb_noc_south_axi_clk = { + .halt_reg = 0x5d01c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x5d01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5d01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_aggre_usb_noc_south_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ahb2phy0_clk = { + .halt_reg = 0x6a004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6a004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6a004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ahb2phy0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ahb2phy2_clk = { + .halt_reg = 0x6a008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6a008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x6a008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ahb2phy2_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x38004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x38004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_hf_axi_clk = { + .halt_reg = 0x26010, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x26010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x26010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_camera_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_sf_axi_clk = { + .halt_reg = 0x26014, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x26014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x26014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_camera_sf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_throttle_nrt_axi_clk = { + .halt_reg = 0x2601c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x2601c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x2601c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_camera_throttle_nrt_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_throttle_rt_axi_clk = { + .halt_reg = 0x26018, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x26018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x26018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_camera_throttle_rt_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_throttle_xo_clk = { + .halt_reg = 0x26024, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x26024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_camera_throttle_xo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_mp_axi_clk = { + .halt_reg = 0xab088, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xab088, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xab088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cfg_noc_usb3_mp_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_mp_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = { + .halt_reg = 0xf084, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xf084, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xf084, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cfg_noc_usb3_prim_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = { + .halt_reg = 0x10084, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x10084, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x10084, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cfg_noc_usb3_sec_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_sec_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cnoc_pcie0_tunnel_clk = { + .halt_reg = 0xa4074, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(8), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cnoc_pcie0_tunnel_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cnoc_pcie1_tunnel_clk = { + .halt_reg = 0x8d074, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cnoc_pcie1_tunnel_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cnoc_pcie4_qx_clk = { + .halt_reg = 0x6b084, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b084, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_cnoc_pcie4_qx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_gpu_axi_clk = { + .halt_reg = 0x7115c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x7115c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7115c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ddrss_gpu_axi_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_pcie_sf_tbu_clk = { + .halt_reg = 0xa602c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xa602c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(19), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ddrss_pcie_sf_tbu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp1_hf_axi_clk = { + .halt_reg = 0xbb010, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xbb010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xbb010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp1_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp1_sf_axi_clk = { + .halt_reg = 0xbb018, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xbb018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xbb018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp1_sf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp1_throttle_nrt_axi_clk = { + .halt_reg = 0xbb024, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xbb024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xbb024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp1_throttle_nrt_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp1_throttle_rt_axi_clk = { + .halt_reg = 0xbb020, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xbb020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xbb020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp1_throttle_rt_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_hf_axi_clk = { + .halt_reg = 0x27010, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x27010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x27010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_sf_axi_clk = { + .halt_reg = 0x27018, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x27018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x27018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp_sf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_throttle_nrt_axi_clk = { + .halt_reg = 0x27024, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x27024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x27024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp_throttle_nrt_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_throttle_rt_axi_clk = { + .halt_reg = 0x27020, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x27020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x27020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_disp_throttle_rt_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac0_axi_clk = { + .halt_reg = 0xaa010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xaa010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xaa010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac0_ptp_clk = { + .halt_reg = 0xaa01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xaa01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_ptp_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_emac0_ptp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac0_rgmii_clk = { + .halt_reg = 0xaa038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xaa038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_rgmii_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_emac0_rgmii_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac0_slv_ahb_clk = { + .halt_reg = 0xaa018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xaa018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xaa018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac0_slv_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac1_axi_clk = { + .halt_reg = 0xba010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xba010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xba010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac1_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac1_ptp_clk = { + .halt_reg = 0xba01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xba01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac1_ptp_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_emac1_ptp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac1_rgmii_clk = { + .halt_reg = 0xba038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xba038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac1_rgmii_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_emac1_rgmii_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_emac1_slv_ahb_clk = { + .halt_reg = 0xba018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xba018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xba018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_emac1_slv_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x64000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x64000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gp1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x65000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x65000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gp2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x66000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x66000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp3_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gp3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp4_clk = { + .halt_reg = 0xc2000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc2000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp4_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gp4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp5_clk = { + .halt_reg = 0xc3000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xc3000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gp5_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gp5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(15), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_gpll0_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gpll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &gcc_gpll0_out_even.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_iref_en = { + .halt_reg = 0x8c014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_iref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_memnoc_gfx_clk = { + .halt_reg = 0x71010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x71010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x71010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_memnoc_gfx_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = { + .halt_reg = 0x71020, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x71020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_snoc_dvm_gfx_clk", + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_tcu_throttle_ahb_clk = { + .halt_reg = 0x71008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x71008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x71008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_tcu_throttle_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_tcu_throttle_clk = { + .halt_reg = 0x71018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x71018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x71018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_gpu_tcu_throttle_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_phy_rchng_clk = { + .halt_reg = 0xa4038, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(11), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie0_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_0_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_phy_rchng_clk = { + .halt_reg = 0x8d038, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(23), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie1_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_1_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2a_phy_rchng_clk = { + .halt_reg = 0x9d040, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(15), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2a_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2a_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2b_phy_rchng_clk = { + .halt_reg = 0x9e040, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(22), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie2b_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2b_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie3a_phy_rchng_clk = { + .halt_reg = 0xa0040, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(29), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3a_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3a_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie3b_phy_rchng_clk = { + .halt_reg = 0xa2040, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie3b_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3b_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie4_phy_rchng_clk = { + .halt_reg = 0x6b040, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(22), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie4_phy_rchng_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_4_phy_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_aux_clk = { + .halt_reg = 0xa4028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { + .halt_reg = 0xa4024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xa4024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(8), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_mstr_axi_clk = { + .halt_reg = 0xa401c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xa401c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(7), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_pipe_clk = { + .halt_reg = 0xa4030, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_phy_pcie_pipe_mux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_axi_clk = { + .halt_reg = 0xa4014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xa4014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(6), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = { + .halt_reg = 0xa4010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(5), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_0_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_aux_clk = { + .halt_reg = 0x8d028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(29), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_1_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { + .halt_reg = 0x8d024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8d024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(28), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_mstr_axi_clk = { + .halt_reg = 0x8d01c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x8d01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(27), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_pipe_clk = { + .halt_reg = 0x8d030, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(30), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_axi_clk = { + .halt_reg = 0x8d014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8d014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(26), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = { + .halt_reg = 0x8d010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(25), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_1_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2a2b_clkref_clk = { + .halt_reg = 0x8c034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a2b_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2a_aux_clk = { + .halt_reg = 0x9d028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(13), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2a_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2a_cfg_ahb_clk = { + .halt_reg = 0x9d024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9d024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(12), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2a_mstr_axi_clk = { + .halt_reg = 0x9d01c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x9d01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(11), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2a_pipe_clk = { + .halt_reg = 0x9d030, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(14), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2a_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2a_pipediv2_clk = { + .halt_reg = 0x9d038, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(22), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_pipediv2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2a_pipe_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2a_slv_axi_clk = { + .halt_reg = 0x9d014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9d014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2a_slv_q2a_axi_clk = { + .halt_reg = 0x9d010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(12), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2a_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2b_aux_clk = { + .halt_reg = 0x9e028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(20), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2b_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2b_cfg_ahb_clk = { + .halt_reg = 0x9e024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9e024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(19), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2b_mstr_axi_clk = { + .halt_reg = 0x9e01c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x9e01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(18), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2b_pipe_clk = { + .halt_reg = 0x9e030, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(21), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2b_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2b_pipediv2_clk = { + .halt_reg = 0x9e038, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(23), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_pipediv2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_2b_pipe_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2b_slv_axi_clk = { + .halt_reg = 0x9e014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x9e014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(17), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2b_slv_q2a_axi_clk = { + .halt_reg = 0x9e010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_2b_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3a3b_clkref_clk = { + .halt_reg = 0x8c038, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c038, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a3b_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3a_aux_clk = { + .halt_reg = 0xa0028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(27), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3a_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3a_cfg_ahb_clk = { + .halt_reg = 0xa0024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xa0024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(26), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3a_mstr_axi_clk = { + .halt_reg = 0xa001c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xa001c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(25), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3a_pipe_clk = { + .halt_reg = 0xa0030, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(28), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3a_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3a_pipediv2_clk = { + .halt_reg = 0xa0038, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(24), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_pipediv2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3a_pipe_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3a_slv_axi_clk = { + .halt_reg = 0xa0014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xa0014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(24), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3a_slv_q2a_axi_clk = { + .halt_reg = 0xa0010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(23), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3a_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3b_aux_clk = { + .halt_reg = 0xa2028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(2), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3b_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3b_cfg_ahb_clk = { + .halt_reg = 0xa2024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xa2024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3b_mstr_axi_clk = { + .halt_reg = 0xa201c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0xa201c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3b_pipe_clk = { + .halt_reg = 0xa2030, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(3), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3b_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3b_pipediv2_clk = { + .halt_reg = 0xa2038, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(25), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_pipediv2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_3b_pipe_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3b_slv_axi_clk = { + .halt_reg = 0xa2014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xa2014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(31), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_3b_slv_q2a_axi_clk = { + .halt_reg = 0xa2010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(30), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_3b_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_4_aux_clk = { + .halt_reg = 0x6b028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(3), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_4_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_4_cfg_ahb_clk = { + .halt_reg = 0x6b024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(2), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_4_clkref_clk = { + .halt_reg = 0x8c030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c030, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_4_mstr_axi_clk = { + .halt_reg = 0x6b01c, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x6b01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_4_pipe_clk = { + .halt_reg = 0x6b030, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_4_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_4_pipediv2_clk = { + .halt_reg = 0x6b038, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_pipediv2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_4_pipe_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_4_slv_axi_clk = { + .halt_reg = 0x6b014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_4_slv_q2a_axi_clk = { + .halt_reg = 0x6b010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(5), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_4_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_rscc_ahb_clk = { + .halt_reg = 0xae008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xae008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(17), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_rscc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_rscc_xo_clk = { + .halt_reg = 0xae004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_rscc_xo_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pcie_rscc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_throttle_cfg_clk = { + .halt_reg = 0xa6028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(15), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pcie_throttle_cfg_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x3300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3300c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_pdm2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x33004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x33004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x33004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_xo4_clk = { + .halt_reg = 0x33008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x33008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_pdm_xo4_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = { + .halt_reg = 0x26008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x26008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x26008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_camera_nrt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_rt_ahb_clk = { + .halt_reg = 0x2600c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2600c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x2600c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_camera_rt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp1_ahb_clk = { + .halt_reg = 0xbb008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xbb008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xbb008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_disp1_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp1_rot_ahb_clk = { + .halt_reg = 0xbb00c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xbb00c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xbb00c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_disp1_rot_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp_ahb_clk = { + .halt_reg = 0x27008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x27008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x27008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_disp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp_rot_ahb_clk = { + .halt_reg = 0x2700c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2700c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x2700c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_disp_rot_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_cvp_ahb_clk = { + .halt_reg = 0x28008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x28008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x28008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_cvp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = { + .halt_reg = 0x2800c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2800c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x2800c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qmip_video_vcodec_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = { + .halt_reg = 0x17014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_clk = { + .halt_reg = 0x1700c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(8), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_qspi0_clk = { + .halt_reg = 0x17ac4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_qspi0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s0_clk = { + .halt_reg = 0x17144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(10), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s1_clk = { + .halt_reg = 0x17274, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(11), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s2_clk = { + .halt_reg = 0x173a4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(12), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s3_clk = { + .halt_reg = 0x174d4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(13), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s3_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s4_clk = { + .halt_reg = 0x17604, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(14), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s4_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s4_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s5_clk = { + .halt_reg = 0x17734, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(15), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s5_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s6_clk = { + .halt_reg = 0x17864, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(16), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s6_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s6_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s7_clk = { + .halt_reg = 0x17994, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(17), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap0_s7_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap0_s7_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = { + .halt_reg = 0x18014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(18), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_clk = { + .halt_reg = 0x1800c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(19), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_qspi0_clk = { + .halt_reg = 0x18ac4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(2), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_qspi0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s0_clk = { + .halt_reg = 0x18144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(22), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s1_clk = { + .halt_reg = 0x18274, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(23), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s2_clk = { + .halt_reg = 0x183a4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(24), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s3_clk = { + .halt_reg = 0x184d4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(25), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s3_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s4_clk = { + .halt_reg = 0x18604, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(26), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s4_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s4_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s5_clk = { + .halt_reg = 0x18734, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(27), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s5_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s6_clk = { + .halt_reg = 0x18864, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(27), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s6_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s6_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s7_clk = { + .halt_reg = 0x18994, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(28), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap1_s7_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap1_s7_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = { + .halt_reg = 0x1e014, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(3), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_core_clk = { + .halt_reg = 0x1e00c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_qspi0_clk = { + .halt_reg = 0x1eac4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_qspi0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s0_clk = { + .halt_reg = 0x1e144, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(4), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_s0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s1_clk = { + .halt_reg = 0x1e274, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(5), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_s1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s2_clk = { + .halt_reg = 0x1e3a4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(6), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_s2_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s3_clk = { + .halt_reg = 0x1e4d4, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(7), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_s3_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s4_clk = { + .halt_reg = 0x1e604, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(8), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_s4_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s4_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s5_clk = { + .halt_reg = 0x1e734, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(9), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_s5_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s6_clk = { + .halt_reg = 0x1e864, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(29), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_s6_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s6_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s7_clk = { + .halt_reg = 0x1e994, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(30), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap2_s7_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_qupv3_wrap2_s7_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = { + .halt_reg = 0x17004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(6), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_0_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = { + .halt_reg = 0x17008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(7), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_0_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = { + .halt_reg = 0x18004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x18004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(20), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_1_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = { + .halt_reg = 0x18008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x18008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(21), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_1_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = { + .halt_reg = 0x1e004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1e004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(2), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_2_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = { + .halt_reg = 0x1e008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1e008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_qupv3_wrap_2_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_ahb_clk = { + .halt_reg = 0x14008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_apps_clk = { + .halt_reg = 0x14004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc2_apps_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_sdcc2_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc4_ahb_clk = { + .halt_reg = 0x16008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc4_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc4_apps_clk = { + .halt_reg = 0x16004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sdcc4_apps_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_sdcc4_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sys_noc_usb_axi_clk = { + .halt_reg = 0x5d000, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x5d000, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x5d000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_sys_noc_usb_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_1_card_clkref_clk = { + .halt_reg = 0x8c000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_1_card_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_ahb_clk = { + .halt_reg = 0x75018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x75018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_axi_clk = { + .halt_reg = 0x75010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x75010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_axi_hw_ctl_clk = { + .halt_reg = 0x75010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x75010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75010, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_axi_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_clkref_clk = { + .halt_reg = 0x8c054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_ice_core_clk = { + .halt_reg = 0x75064, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x75064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_ice_core_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = { + .halt_reg = 0x75064, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x75064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75064, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_ice_core_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_phy_aux_clk = { + .halt_reg = 0x7509c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7509c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7509c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_phy_aux_hw_ctl_clk = { + .halt_reg = 0x7509c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7509c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7509c, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_phy_aux_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = { + .halt_reg = 0x75020, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x75020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_rx_symbol_0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_rx_symbol_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = { + .halt_reg = 0x750b8, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x750b8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_rx_symbol_1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_rx_symbol_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = { + .halt_reg = 0x7501c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x7501c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_tx_symbol_0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_tx_symbol_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_unipro_core_clk = { + .halt_reg = 0x7505c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7505c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7505c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_unipro_core_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_unipro_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_unipro_core_hw_ctl_clk = { + .halt_reg = 0x7505c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7505c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7505c, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_card_unipro_core_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_card_unipro_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ahb_clk = { + .halt_reg = 0x77018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x77018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_axi_clk = { + .halt_reg = 0x77010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x77010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_axi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = { + .halt_reg = 0x77010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x77010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77010, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_axi_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ice_core_clk = { + .halt_reg = 0x77064, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x77064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_ice_core_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = { + .halt_reg = 0x77064, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x77064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77064, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_ice_core_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_phy_aux_clk = { + .halt_reg = 0x7709c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7709c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7709c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = { + .halt_reg = 0x7709c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7709c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7709c, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { + .halt_reg = 0x77020, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x77020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_rx_symbol_0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = { + .halt_reg = 0x770b8, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x770b8, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_rx_symbol_1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = { + .halt_reg = 0x7701c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x7701c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_tx_symbol_0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_unipro_core_clk = { + .halt_reg = 0x7705c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7705c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7705c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_unipro_core_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_unipro_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = { + .halt_reg = 0x7705c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x7705c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7705c, + .enable_mask = BIT(1), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_ufs_phy_unipro_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_ref_clkref_clk = { + .halt_reg = 0x8c058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_ufs_ref_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2_hs0_clkref_clk = { + .halt_reg = 0x8c044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c044, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb2_hs0_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2_hs1_clkref_clk = { + .halt_reg = 0x8c048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb2_hs1_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2_hs2_clkref_clk = { + .halt_reg = 0x8c04c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c04c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb2_hs2_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb2_hs3_clkref_clk = { + .halt_reg = 0x8c050, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c050, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb2_hs3_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_mp_master_clk = { + .halt_reg = 0xab010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xab010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_mp_master_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_mp_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_mp_mock_utmi_clk = { + .halt_reg = 0xab01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xab01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_mp_mock_utmi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_mp_sleep_clk = { + .halt_reg = 0xab018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xab018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_mp_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_master_clk = { + .halt_reg = 0xf010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_master_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_mock_utmi_clk = { + .halt_reg = 0xf01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf01c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_sleep_clk = { + .halt_reg = 0xf018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_prim_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sec_master_clk = { + .halt_reg = 0x10010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_sec_master_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_sec_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sec_mock_utmi_clk = { + .halt_reg = 0x1001c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1001c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_sec_mock_utmi_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sec_sleep_clk = { + .halt_reg = 0x10018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb30_sec_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_mp0_clkref_clk = { + .halt_reg = 0x8c03c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c03c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_mp0_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_mp1_clkref_clk = { + .halt_reg = 0x8c040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_mp1_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_mp_phy_aux_clk = { + .halt_reg = 0xab054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xab054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_mp_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_mp_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = { + .halt_reg = 0xab058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xab058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_mp_phy_com_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_mp_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = { + .halt_reg = 0xab05c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0xab05c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_mp_phy_pipe_0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_mp_phy_pipe_0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = { + .halt_reg = 0xab064, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0xab064, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_mp_phy_pipe_1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_mp_phy_pipe_1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_aux_clk = { + .halt_reg = 0xf054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = { + .halt_reg = 0xf058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_com_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { + .halt_reg = 0xf05c, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0xf05c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xf05c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_prim_phy_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_phy_aux_clk = { + .halt_reg = 0x10054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10054, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_sec_phy_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_sec_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = { + .halt_reg = 0x10058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10058, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_sec_phy_com_aux_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb3_sec_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_phy_pipe_clk = { + .halt_reg = 0x1005c, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0x1005c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1005c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb3_sec_phy_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_cfg_ahb_clk = { + .halt_reg = 0xb808c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb808c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb808c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_dp_clk = { + .halt_reg = 0xb8048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb8048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_dp_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_phy_dp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_master_clk = { + .halt_reg = 0xb8010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb8010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_master_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = { + .halt_reg = 0xb80b4, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0xb80b4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = { + .halt_reg = 0xb8038, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(19), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_pcie_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_phy_rx0_clk = { + .halt_reg = 0xb8094, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb8094, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_rx0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_phy_rx0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_phy_rx1_clk = { + .halt_reg = 0xb80a0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb80a0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_rx1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_phy_rx1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = { + .halt_reg = 0xb8088, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0xb8088, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb8088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_phy_usb_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_sb_if_clk = { + .halt_reg = 0xb8034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb8034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_sb_if_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_sb_if_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_sys_clk = { + .halt_reg = 0xb8040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb8040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_sys_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_phy_sys_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_1_tmu_clk = { + .halt_reg = 0xb806c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb806c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb806c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_1_tmu_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_1_tmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_cfg_ahb_clk = { + .halt_reg = 0x2a08c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2a08c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x2a08c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_clkref_clk = { + .halt_reg = 0x8c010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_dp_clk = { + .halt_reg = 0x2a048, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a048, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_dp_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_phy_dp_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_eud_clkref_clk = { + .halt_reg = 0x8c02c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c02c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_eud_clkref_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_master_clk = { + .halt_reg = 0x2a010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_master_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_phy_p2rr2p_pipe_clk = { + .halt_reg = 0x2a0b4, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x2a0b4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_p2rr2p_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_phy_p2rr2p_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_phy_pcie_pipe_clk = { + .halt_reg = 0x2a038, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52020, + .enable_mask = BIT(18), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_pcie_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_phy_pcie_pipe_mux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_phy_rx0_clk = { + .halt_reg = 0x2a094, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a094, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_rx0_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_phy_rx0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_phy_rx1_clk = { + .halt_reg = 0x2a0a0, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a0a0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_rx1_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_phy_rx1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_phy_usb_pipe_clk = { + .halt_reg = 0x2a088, + .halt_check = BRANCH_HALT_DELAY, + .hwcg_reg = 0x2a088, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x2a088, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_phy_usb_pipe_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_sb_if_clk = { + .halt_reg = 0x2a034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a034, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_sb_if_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_sb_if_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_sys_clk = { + .halt_reg = 0x2a040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x2a040, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_sys_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_phy_sys_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb4_tmu_clk = { + .halt_reg = 0x2a06c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x2a06c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x2a06c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_usb4_tmu_clk", + .parent_hws = (const struct clk_hw*[]){ + &gcc_usb4_tmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi0_clk = { + .halt_reg = 0x28010, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x28010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x28010, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_video_axi0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi1_clk = { + .halt_reg = 0x28018, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x28018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x28018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_video_axi1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_cvp_throttle_clk = { + .halt_reg = 0x28024, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x28024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x28024, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_video_cvp_throttle_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_vcodec_throttle_clk = { + .halt_reg = 0x28020, + .halt_check = BRANCH_HALT_SKIP, + .hwcg_reg = 0x28020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x28020, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "gcc_video_vcodec_throttle_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc pcie_0_tunnel_gdsc = { + .gdscr = 0xa4004, + .pd = { + .name = "pcie_0_tunnel_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie_1_tunnel_gdsc = { + .gdscr = 0x8d004, + .pd = { + .name = "pcie_1_tunnel_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie_2a_gdsc = { + .gdscr = 0x9d004, + .pd = { + .name = "pcie_2a_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie_2b_gdsc = { + .gdscr = 0x9e004, + .pd = { + .name = "pcie_2b_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie_3a_gdsc = { + .gdscr = 0xa0004, + .pd = { + .name = "pcie_3a_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie_3b_gdsc = { + .gdscr = 0xa2004, + .pd = { + .name = "pcie_3b_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie_4_gdsc = { + .gdscr = 0x6b004, + .pd = { + .name = "pcie_4_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc ufs_card_gdsc = { + .gdscr = 0x75004, + .pd = { + .name = "ufs_card_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc ufs_phy_gdsc = { + .gdscr = 0x77004, + .pd = { + .name = "ufs_phy_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc usb30_mp_gdsc = { + .gdscr = 0xab004, + .pd = { + .name = "usb30_mp_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc usb30_prim_gdsc = { + .gdscr = 0xf004, + .pd = { + .name = "usb30_prim_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc usb30_sec_gdsc = { + .gdscr = 0x10004, + .pd = { + .name = "usb30_sec_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct clk_regmap *gcc_sc8280xp_clocks[] = { + [GCC_AGGRE_NOC_PCIE0_TUNNEL_AXI_CLK] = &gcc_aggre_noc_pcie0_tunnel_axi_clk.clkr, + [GCC_AGGRE_NOC_PCIE1_TUNNEL_AXI_CLK] = &gcc_aggre_noc_pcie1_tunnel_axi_clk.clkr, + [GCC_AGGRE_NOC_PCIE_4_AXI_CLK] = &gcc_aggre_noc_pcie_4_axi_clk.clkr, + [GCC_AGGRE_NOC_PCIE_SOUTH_SF_AXI_CLK] = &gcc_aggre_noc_pcie_south_sf_axi_clk.clkr, + [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr, + [GCC_AGGRE_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_card_axi_hw_ctl_clk.clkr, + [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr, + [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr, + [GCC_AGGRE_USB3_MP_AXI_CLK] = &gcc_aggre_usb3_mp_axi_clk.clkr, + [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr, + [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr, + [GCC_AGGRE_USB4_1_AXI_CLK] = &gcc_aggre_usb4_1_axi_clk.clkr, + [GCC_AGGRE_USB4_AXI_CLK] = &gcc_aggre_usb4_axi_clk.clkr, + [GCC_AGGRE_USB_NOC_AXI_CLK] = &gcc_aggre_usb_noc_axi_clk.clkr, + [GCC_AGGRE_USB_NOC_NORTH_AXI_CLK] = &gcc_aggre_usb_noc_north_axi_clk.clkr, + [GCC_AGGRE_USB_NOC_SOUTH_AXI_CLK] = &gcc_aggre_usb_noc_south_axi_clk.clkr, + [GCC_AHB2PHY0_CLK] = &gcc_ahb2phy0_clk.clkr, + [GCC_AHB2PHY2_CLK] = &gcc_ahb2phy2_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr, + [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr, + [GCC_CAMERA_THROTTLE_NRT_AXI_CLK] = &gcc_camera_throttle_nrt_axi_clk.clkr, + [GCC_CAMERA_THROTTLE_RT_AXI_CLK] = &gcc_camera_throttle_rt_axi_clk.clkr, + [GCC_CAMERA_THROTTLE_XO_CLK] = &gcc_camera_throttle_xo_clk.clkr, + [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr, + [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, + [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr, + [GCC_CNOC_PCIE0_TUNNEL_CLK] = &gcc_cnoc_pcie0_tunnel_clk.clkr, + [GCC_CNOC_PCIE1_TUNNEL_CLK] = &gcc_cnoc_pcie1_tunnel_clk.clkr, + [GCC_CNOC_PCIE4_QX_CLK] = &gcc_cnoc_pcie4_qx_clk.clkr, + [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, + [GCC_DDRSS_PCIE_SF_TBU_CLK] = &gcc_ddrss_pcie_sf_tbu_clk.clkr, + [GCC_DISP1_HF_AXI_CLK] = &gcc_disp1_hf_axi_clk.clkr, + [GCC_DISP1_SF_AXI_CLK] = &gcc_disp1_sf_axi_clk.clkr, + [GCC_DISP1_THROTTLE_NRT_AXI_CLK] = &gcc_disp1_throttle_nrt_axi_clk.clkr, + [GCC_DISP1_THROTTLE_RT_AXI_CLK] = &gcc_disp1_throttle_rt_axi_clk.clkr, + [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, + [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr, + [GCC_DISP_THROTTLE_NRT_AXI_CLK] = &gcc_disp_throttle_nrt_axi_clk.clkr, + [GCC_DISP_THROTTLE_RT_AXI_CLK] = &gcc_disp_throttle_rt_axi_clk.clkr, + [GCC_EMAC0_AXI_CLK] = &gcc_emac0_axi_clk.clkr, + [GCC_EMAC0_PTP_CLK] = &gcc_emac0_ptp_clk.clkr, + [GCC_EMAC0_PTP_CLK_SRC] = &gcc_emac0_ptp_clk_src.clkr, + [GCC_EMAC0_RGMII_CLK] = &gcc_emac0_rgmii_clk.clkr, + [GCC_EMAC0_RGMII_CLK_SRC] = &gcc_emac0_rgmii_clk_src.clkr, + [GCC_EMAC0_SLV_AHB_CLK] = &gcc_emac0_slv_ahb_clk.clkr, + [GCC_EMAC1_AXI_CLK] = &gcc_emac1_axi_clk.clkr, + [GCC_EMAC1_PTP_CLK] = &gcc_emac1_ptp_clk.clkr, + [GCC_EMAC1_PTP_CLK_SRC] = &gcc_emac1_ptp_clk_src.clkr, + [GCC_EMAC1_RGMII_CLK] = &gcc_emac1_rgmii_clk.clkr, + [GCC_EMAC1_RGMII_CLK_SRC] = &gcc_emac1_rgmii_clk_src.clkr, + [GCC_EMAC1_SLV_AHB_CLK] = &gcc_emac1_slv_ahb_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, + [GCC_GP4_CLK] = &gcc_gp4_clk.clkr, + [GCC_GP4_CLK_SRC] = &gcc_gp4_clk_src.clkr, + [GCC_GP5_CLK] = &gcc_gp5_clk.clkr, + [GCC_GP5_CLK_SRC] = &gcc_gp5_clk_src.clkr, + [GCC_GPLL0] = &gcc_gpll0.clkr, + [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr, + [GCC_GPLL2] = &gcc_gpll2.clkr, + [GCC_GPLL4] = &gcc_gpll4.clkr, + [GCC_GPLL7] = &gcc_gpll7.clkr, + [GCC_GPLL8] = &gcc_gpll8.clkr, + [GCC_GPLL9] = &gcc_gpll9.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, + [GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr, + [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, + [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, + [GCC_GPU_TCU_THROTTLE_AHB_CLK] = &gcc_gpu_tcu_throttle_ahb_clk.clkr, + [GCC_GPU_TCU_THROTTLE_CLK] = &gcc_gpu_tcu_throttle_clk.clkr, + [GCC_PCIE0_PHY_RCHNG_CLK] = &gcc_pcie0_phy_rchng_clk.clkr, + [GCC_PCIE1_PHY_RCHNG_CLK] = &gcc_pcie1_phy_rchng_clk.clkr, + [GCC_PCIE2A_PHY_RCHNG_CLK] = &gcc_pcie2a_phy_rchng_clk.clkr, + [GCC_PCIE2B_PHY_RCHNG_CLK] = &gcc_pcie2b_phy_rchng_clk.clkr, + [GCC_PCIE3A_PHY_RCHNG_CLK] = &gcc_pcie3a_phy_rchng_clk.clkr, + [GCC_PCIE3B_PHY_RCHNG_CLK] = &gcc_pcie3b_phy_rchng_clk.clkr, + [GCC_PCIE4_PHY_RCHNG_CLK] = &gcc_pcie4_phy_rchng_clk.clkr, + [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr, + [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr, + [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr, + [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr, + [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr, + [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr, + [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr, + [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr, + [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr, + [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr, + [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr, + [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr, + [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr, + [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr, + [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr, + [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr, + [GCC_PCIE_2A2B_CLKREF_CLK] = &gcc_pcie_2a2b_clkref_clk.clkr, + [GCC_PCIE_2A_AUX_CLK] = &gcc_pcie_2a_aux_clk.clkr, + [GCC_PCIE_2A_AUX_CLK_SRC] = &gcc_pcie_2a_aux_clk_src.clkr, + [GCC_PCIE_2A_CFG_AHB_CLK] = &gcc_pcie_2a_cfg_ahb_clk.clkr, + [GCC_PCIE_2A_MSTR_AXI_CLK] = &gcc_pcie_2a_mstr_axi_clk.clkr, + [GCC_PCIE_2A_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2a_phy_rchng_clk_src.clkr, + [GCC_PCIE_2A_PIPE_CLK] = &gcc_pcie_2a_pipe_clk.clkr, + [GCC_PCIE_2A_PIPE_CLK_SRC] = &gcc_pcie_2a_pipe_clk_src.clkr, + [GCC_PCIE_2A_PIPE_DIV_CLK_SRC] = &gcc_pcie_2a_pipe_div_clk_src.clkr, + [GCC_PCIE_2A_PIPEDIV2_CLK] = &gcc_pcie_2a_pipediv2_clk.clkr, + [GCC_PCIE_2A_SLV_AXI_CLK] = &gcc_pcie_2a_slv_axi_clk.clkr, + [GCC_PCIE_2A_SLV_Q2A_AXI_CLK] = &gcc_pcie_2a_slv_q2a_axi_clk.clkr, + [GCC_PCIE_2B_AUX_CLK] = &gcc_pcie_2b_aux_clk.clkr, + [GCC_PCIE_2B_AUX_CLK_SRC] = &gcc_pcie_2b_aux_clk_src.clkr, + [GCC_PCIE_2B_CFG_AHB_CLK] = &gcc_pcie_2b_cfg_ahb_clk.clkr, + [GCC_PCIE_2B_MSTR_AXI_CLK] = &gcc_pcie_2b_mstr_axi_clk.clkr, + [GCC_PCIE_2B_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2b_phy_rchng_clk_src.clkr, + [GCC_PCIE_2B_PIPE_CLK] = &gcc_pcie_2b_pipe_clk.clkr, + [GCC_PCIE_2B_PIPE_CLK_SRC] = &gcc_pcie_2b_pipe_clk_src.clkr, + [GCC_PCIE_2B_PIPE_DIV_CLK_SRC] = &gcc_pcie_2b_pipe_div_clk_src.clkr, + [GCC_PCIE_2B_PIPEDIV2_CLK] = &gcc_pcie_2b_pipediv2_clk.clkr, + [GCC_PCIE_2B_SLV_AXI_CLK] = &gcc_pcie_2b_slv_axi_clk.clkr, + [GCC_PCIE_2B_SLV_Q2A_AXI_CLK] = &gcc_pcie_2b_slv_q2a_axi_clk.clkr, + [GCC_PCIE_3A3B_CLKREF_CLK] = &gcc_pcie_3a3b_clkref_clk.clkr, + [GCC_PCIE_3A_AUX_CLK] = &gcc_pcie_3a_aux_clk.clkr, + [GCC_PCIE_3A_AUX_CLK_SRC] = &gcc_pcie_3a_aux_clk_src.clkr, + [GCC_PCIE_3A_CFG_AHB_CLK] = &gcc_pcie_3a_cfg_ahb_clk.clkr, + [GCC_PCIE_3A_MSTR_AXI_CLK] = &gcc_pcie_3a_mstr_axi_clk.clkr, + [GCC_PCIE_3A_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3a_phy_rchng_clk_src.clkr, + [GCC_PCIE_3A_PIPE_CLK] = &gcc_pcie_3a_pipe_clk.clkr, + [GCC_PCIE_3A_PIPE_CLK_SRC] = &gcc_pcie_3a_pipe_clk_src.clkr, + [GCC_PCIE_3A_PIPE_DIV_CLK_SRC] = &gcc_pcie_3a_pipe_div_clk_src.clkr, + [GCC_PCIE_3A_PIPEDIV2_CLK] = &gcc_pcie_3a_pipediv2_clk.clkr, + [GCC_PCIE_3A_SLV_AXI_CLK] = &gcc_pcie_3a_slv_axi_clk.clkr, + [GCC_PCIE_3A_SLV_Q2A_AXI_CLK] = &gcc_pcie_3a_slv_q2a_axi_clk.clkr, + [GCC_PCIE_3B_AUX_CLK] = &gcc_pcie_3b_aux_clk.clkr, + [GCC_PCIE_3B_AUX_CLK_SRC] = &gcc_pcie_3b_aux_clk_src.clkr, + [GCC_PCIE_3B_CFG_AHB_CLK] = &gcc_pcie_3b_cfg_ahb_clk.clkr, + [GCC_PCIE_3B_MSTR_AXI_CLK] = &gcc_pcie_3b_mstr_axi_clk.clkr, + [GCC_PCIE_3B_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3b_phy_rchng_clk_src.clkr, + [GCC_PCIE_3B_PIPE_CLK] = &gcc_pcie_3b_pipe_clk.clkr, + [GCC_PCIE_3B_PIPE_CLK_SRC] = &gcc_pcie_3b_pipe_clk_src.clkr, + [GCC_PCIE_3B_PIPE_DIV_CLK_SRC] = &gcc_pcie_3b_pipe_div_clk_src.clkr, + [GCC_PCIE_3B_PIPEDIV2_CLK] = &gcc_pcie_3b_pipediv2_clk.clkr, + [GCC_PCIE_3B_SLV_AXI_CLK] = &gcc_pcie_3b_slv_axi_clk.clkr, + [GCC_PCIE_3B_SLV_Q2A_AXI_CLK] = &gcc_pcie_3b_slv_q2a_axi_clk.clkr, + [GCC_PCIE_4_AUX_CLK] = &gcc_pcie_4_aux_clk.clkr, + [GCC_PCIE_4_AUX_CLK_SRC] = &gcc_pcie_4_aux_clk_src.clkr, + [GCC_PCIE_4_CFG_AHB_CLK] = &gcc_pcie_4_cfg_ahb_clk.clkr, + [GCC_PCIE_4_CLKREF_CLK] = &gcc_pcie_4_clkref_clk.clkr, + [GCC_PCIE_4_MSTR_AXI_CLK] = &gcc_pcie_4_mstr_axi_clk.clkr, + [GCC_PCIE_4_PHY_RCHNG_CLK_SRC] = &gcc_pcie_4_phy_rchng_clk_src.clkr, + [GCC_PCIE_4_PIPE_CLK] = &gcc_pcie_4_pipe_clk.clkr, + [GCC_PCIE_4_PIPE_CLK_SRC] = &gcc_pcie_4_pipe_clk_src.clkr, + [GCC_PCIE_4_PIPE_DIV_CLK_SRC] = &gcc_pcie_4_pipe_div_clk_src.clkr, + [GCC_PCIE_4_PIPEDIV2_CLK] = &gcc_pcie_4_pipediv2_clk.clkr, + [GCC_PCIE_4_SLV_AXI_CLK] = &gcc_pcie_4_slv_axi_clk.clkr, + [GCC_PCIE_4_SLV_Q2A_AXI_CLK] = &gcc_pcie_4_slv_q2a_axi_clk.clkr, + [GCC_PCIE_RSCC_AHB_CLK] = &gcc_pcie_rscc_ahb_clk.clkr, + [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr, + [GCC_PCIE_RSCC_XO_CLK_SRC] = &gcc_pcie_rscc_xo_clk_src.clkr, + [GCC_PCIE_THROTTLE_CFG_CLK] = &gcc_pcie_throttle_cfg_clk.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, + [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr, + [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr, + [GCC_QMIP_DISP1_AHB_CLK] = &gcc_qmip_disp1_ahb_clk.clkr, + [GCC_QMIP_DISP1_ROT_AHB_CLK] = &gcc_qmip_disp1_rot_ahb_clk.clkr, + [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr, + [GCC_QMIP_DISP_ROT_AHB_CLK] = &gcc_qmip_disp_rot_ahb_clk.clkr, + [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr, + [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr, + [GCC_QUPV3_WRAP0_QSPI0_CLK] = &gcc_qupv3_wrap0_qspi0_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr, + [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr, + [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr, + [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr, + [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr, + [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr, + [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr, + [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr, + [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr, + [GCC_QUPV3_WRAP0_S4_DIV_CLK_SRC] = &gcc_qupv3_wrap0_s4_div_clk_src.clkr, + [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr, + [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr, + [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr, + [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr, + [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr, + [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr, + [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr, + [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr, + [GCC_QUPV3_WRAP1_QSPI0_CLK] = &gcc_qupv3_wrap1_qspi0_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr, + [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr, + [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr, + [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr, + [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr, + [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr, + [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr, + [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr, + [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr, + [GCC_QUPV3_WRAP1_S4_DIV_CLK_SRC] = &gcc_qupv3_wrap1_s4_div_clk_src.clkr, + [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr, + [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr, + [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr, + [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr, + [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr, + [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr, + [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr, + [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr, + [GCC_QUPV3_WRAP2_QSPI0_CLK] = &gcc_qupv3_wrap2_qspi0_clk.clkr, + [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr, + [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr, + [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr, + [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr, + [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr, + [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr, + [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr, + [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr, + [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr, + [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr, + [GCC_QUPV3_WRAP2_S4_DIV_CLK_SRC] = &gcc_qupv3_wrap2_s4_div_clk_src.clkr, + [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr, + [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr, + [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr, + [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr, + [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr, + [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr, + [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr, + [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, + [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, + [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr, + [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr, + [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr, + [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr, + [GCC_SYS_NOC_USB_AXI_CLK] = &gcc_sys_noc_usb_axi_clk.clkr, + [GCC_UFS_1_CARD_CLKREF_CLK] = &gcc_ufs_1_card_clkref_clk.clkr, + [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr, + [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr, + [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr, + [GCC_UFS_CARD_AXI_HW_CTL_CLK] = &gcc_ufs_card_axi_hw_ctl_clk.clkr, + [GCC_UFS_CARD_CLKREF_CLK] = &gcc_ufs_card_clkref_clk.clkr, + [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr, + [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr, + [GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_card_ice_core_hw_ctl_clk.clkr, + [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr, + [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr, + [GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_card_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_rx_symbol_0_clk_src.clkr, + [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_card_rx_symbol_1_clk_src.clkr, + [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr, + [GCC_UFS_CARD_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_card_tx_symbol_0_clk_src.clkr, + [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr, + [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_card_unipro_core_clk_src.clkr, + [GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_card_unipro_core_hw_ctl_clk.clkr, + [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr, + [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr, + [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr, + [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr, + [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr, + [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr, + [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr, + [GCC_UFS_REF_CLKREF_CLK] = &gcc_ufs_ref_clkref_clk.clkr, + [GCC_USB2_HS0_CLKREF_CLK] = &gcc_usb2_hs0_clkref_clk.clkr, + [GCC_USB2_HS1_CLKREF_CLK] = &gcc_usb2_hs1_clkref_clk.clkr, + [GCC_USB2_HS2_CLKREF_CLK] = &gcc_usb2_hs2_clkref_clk.clkr, + [GCC_USB2_HS3_CLKREF_CLK] = &gcc_usb2_hs3_clkref_clk.clkr, + [GCC_USB30_MP_MASTER_CLK] = &gcc_usb30_mp_master_clk.clkr, + [GCC_USB30_MP_MASTER_CLK_SRC] = &gcc_usb30_mp_master_clk_src.clkr, + [GCC_USB30_MP_MOCK_UTMI_CLK] = &gcc_usb30_mp_mock_utmi_clk.clkr, + [GCC_USB30_MP_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mp_mock_utmi_clk_src.clkr, + [GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB30_MP_SLEEP_CLK] = &gcc_usb30_mp_sleep_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr, + [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr, + [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr, + [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr, + [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr, + [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr, + [GCC_USB34_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb34_prim_phy_pipe_clk_src.clkr, + [GCC_USB34_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb34_sec_phy_pipe_clk_src.clkr, + [GCC_USB3_MP0_CLKREF_CLK] = &gcc_usb3_mp0_clkref_clk.clkr, + [GCC_USB3_MP1_CLKREF_CLK] = &gcc_usb3_mp1_clkref_clk.clkr, + [GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr, + [GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr, + [GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr, + [GCC_USB3_MP_PHY_PIPE_0_CLK] = &gcc_usb3_mp_phy_pipe_0_clk.clkr, + [GCC_USB3_MP_PHY_PIPE_0_CLK_SRC] = &gcc_usb3_mp_phy_pipe_0_clk_src.clkr, + [GCC_USB3_MP_PHY_PIPE_1_CLK] = &gcc_usb3_mp_phy_pipe_1_clk.clkr, + [GCC_USB3_MP_PHY_PIPE_1_CLK_SRC] = &gcc_usb3_mp_phy_pipe_1_clk_src.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr, + [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr, + [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr, + [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr, + [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr, + [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr, + [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr, + [GCC_USB4_1_CFG_AHB_CLK] = &gcc_usb4_1_cfg_ahb_clk.clkr, + [GCC_USB4_1_DP_CLK] = &gcc_usb4_1_dp_clk.clkr, + [GCC_USB4_1_MASTER_CLK] = &gcc_usb4_1_master_clk.clkr, + [GCC_USB4_1_MASTER_CLK_SRC] = &gcc_usb4_1_master_clk_src.clkr, + [GCC_USB4_1_PHY_DP_CLK_SRC] = &gcc_usb4_1_phy_dp_clk_src.clkr, + [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_1_phy_p2rr2p_pipe_clk.clkr, + [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr, + [GCC_USB4_1_PHY_PCIE_PIPE_CLK] = &gcc_usb4_1_phy_pcie_pipe_clk.clkr, + [GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr, + [GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr, + [GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipegmux_clk_src.clkr, + [GCC_USB4_1_PHY_RX0_CLK] = &gcc_usb4_1_phy_rx0_clk.clkr, + [GCC_USB4_1_PHY_RX0_CLK_SRC] = &gcc_usb4_1_phy_rx0_clk_src.clkr, + [GCC_USB4_1_PHY_RX1_CLK] = &gcc_usb4_1_phy_rx1_clk.clkr, + [GCC_USB4_1_PHY_RX1_CLK_SRC] = &gcc_usb4_1_phy_rx1_clk_src.clkr, + [GCC_USB4_1_PHY_SYS_CLK_SRC] = &gcc_usb4_1_phy_sys_clk_src.clkr, + [GCC_USB4_1_PHY_USB_PIPE_CLK] = &gcc_usb4_1_phy_usb_pipe_clk.clkr, + [GCC_USB4_1_SB_IF_CLK] = &gcc_usb4_1_sb_if_clk.clkr, + [GCC_USB4_1_SB_IF_CLK_SRC] = &gcc_usb4_1_sb_if_clk_src.clkr, + [GCC_USB4_1_SYS_CLK] = &gcc_usb4_1_sys_clk.clkr, + [GCC_USB4_1_TMU_CLK] = &gcc_usb4_1_tmu_clk.clkr, + [GCC_USB4_1_TMU_CLK_SRC] = &gcc_usb4_1_tmu_clk_src.clkr, + [GCC_USB4_CFG_AHB_CLK] = &gcc_usb4_cfg_ahb_clk.clkr, + [GCC_USB4_CLKREF_CLK] = &gcc_usb4_clkref_clk.clkr, + [GCC_USB4_DP_CLK] = &gcc_usb4_dp_clk.clkr, + [GCC_USB4_EUD_CLKREF_CLK] = &gcc_usb4_eud_clkref_clk.clkr, + [GCC_USB4_MASTER_CLK] = &gcc_usb4_master_clk.clkr, + [GCC_USB4_MASTER_CLK_SRC] = &gcc_usb4_master_clk_src.clkr, + [GCC_USB4_PHY_DP_CLK_SRC] = &gcc_usb4_phy_dp_clk_src.clkr, + [GCC_USB4_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_phy_p2rr2p_pipe_clk.clkr, + [GCC_USB4_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_phy_p2rr2p_pipe_clk_src.clkr, + [GCC_USB4_PHY_PCIE_PIPE_CLK] = &gcc_usb4_phy_pcie_pipe_clk.clkr, + [GCC_USB4_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_phy_pcie_pipe_clk_src.clkr, + [GCC_USB4_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_phy_pcie_pipe_mux_clk_src.clkr, + [GCC_USB4_PHY_PCIE_PIPEGMUX_CLK_SRC] = &gcc_usb4_phy_pcie_pipegmux_clk_src.clkr, + [GCC_USB4_PHY_RX0_CLK] = &gcc_usb4_phy_rx0_clk.clkr, + [GCC_USB4_PHY_RX0_CLK_SRC] = &gcc_usb4_phy_rx0_clk_src.clkr, + [GCC_USB4_PHY_RX1_CLK] = &gcc_usb4_phy_rx1_clk.clkr, + [GCC_USB4_PHY_RX1_CLK_SRC] = &gcc_usb4_phy_rx1_clk_src.clkr, + [GCC_USB4_PHY_SYS_CLK_SRC] = &gcc_usb4_phy_sys_clk_src.clkr, + [GCC_USB4_PHY_USB_PIPE_CLK] = &gcc_usb4_phy_usb_pipe_clk.clkr, + [GCC_USB4_SB_IF_CLK] = &gcc_usb4_sb_if_clk.clkr, + [GCC_USB4_SB_IF_CLK_SRC] = &gcc_usb4_sb_if_clk_src.clkr, + [GCC_USB4_SYS_CLK] = &gcc_usb4_sys_clk.clkr, + [GCC_USB4_TMU_CLK] = &gcc_usb4_tmu_clk.clkr, + [GCC_USB4_TMU_CLK_SRC] = &gcc_usb4_tmu_clk_src.clkr, + [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, + [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr, + [GCC_VIDEO_CVP_THROTTLE_CLK] = &gcc_video_cvp_throttle_clk.clkr, + [GCC_VIDEO_VCODEC_THROTTLE_CLK] = &gcc_video_vcodec_throttle_clk.clkr, +}; + +static const struct qcom_reset_map gcc_sc8280xp_resets[] = { + [GCC_EMAC0_BCR] = { 0xaa000 }, + [GCC_EMAC1_BCR] = { 0xba000 }, + [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 }, + [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 }, + [GCC_PCIE_0_PHY_BCR] = { 0x6c01c }, + [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 }, + [GCC_PCIE_0_TUNNEL_BCR] = { 0xa4000 }, + [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 }, + [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 }, + [GCC_PCIE_1_PHY_BCR] = { 0x8e01c }, + [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e000 }, + [GCC_PCIE_1_TUNNEL_BCR] = { 0x8d000 }, + [GCC_PCIE_2A_BCR] = { 0x9d000 }, + [GCC_PCIE_2A_LINK_DOWN_BCR] = { 0x9d13c }, + [GCC_PCIE_2A_NOCSR_COM_PHY_BCR] = { 0x9d148 }, + [GCC_PCIE_2A_PHY_BCR] = { 0x9d144 }, + [GCC_PCIE_2A_PHY_NOCSR_COM_PHY_BCR] = { 0x9d14c }, + [GCC_PCIE_2B_BCR] = { 0x9e000 }, + [GCC_PCIE_2B_LINK_DOWN_BCR] = { 0x9e084 }, + [GCC_PCIE_2B_NOCSR_COM_PHY_BCR] = { 0x9e090 }, + [GCC_PCIE_2B_PHY_BCR] = { 0x9e08c }, + [GCC_PCIE_2B_PHY_NOCSR_COM_PHY_BCR] = { 0x9e094 }, + [GCC_PCIE_3A_BCR] = { 0xa0000 }, + [GCC_PCIE_3A_LINK_DOWN_BCR] = { 0xa00f0 }, + [GCC_PCIE_3A_NOCSR_COM_PHY_BCR] = { 0xa00fc }, + [GCC_PCIE_3A_PHY_BCR] = { 0xa00e0 }, + [GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR] = { 0xa00e4 }, + [GCC_PCIE_3B_BCR] = { 0xa2000 }, + [GCC_PCIE_3B_LINK_DOWN_BCR] = { 0xa20e0 }, + [GCC_PCIE_3B_NOCSR_COM_PHY_BCR] = { 0xa20ec }, + [GCC_PCIE_3B_PHY_BCR] = { 0xa20e8 }, + [GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR] = { 0xa20f0 }, + [GCC_PCIE_4_BCR] = { 0x6b000 }, + [GCC_PCIE_4_LINK_DOWN_BCR] = { 0x6b300 }, + [GCC_PCIE_4_NOCSR_COM_PHY_BCR] = { 0x6b30c }, + [GCC_PCIE_4_PHY_BCR] = { 0x6b308 }, + [GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR] = { 0x6b310 }, + [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c }, + [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 }, + [GCC_PCIE_RSCC_BCR] = { 0xae000 }, + [GCC_QUSB2PHY_HS0_MP_BCR] = { 0x12008 }, + [GCC_QUSB2PHY_HS1_MP_BCR] = { 0x1200c }, + [GCC_QUSB2PHY_HS2_MP_BCR] = { 0x12010 }, + [GCC_QUSB2PHY_HS3_MP_BCR] = { 0x12014 }, + [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 }, + [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 }, + [GCC_SDCC2_BCR] = { 0x14000 }, + [GCC_SDCC4_BCR] = { 0x16000 }, + [GCC_UFS_CARD_BCR] = { 0x75000 }, + [GCC_UFS_PHY_BCR] = { 0x77000 }, + [GCC_USB2_PHY_PRIM_BCR] = { 0x50028 }, + [GCC_USB2_PHY_SEC_BCR] = { 0x5002c }, + [GCC_USB30_MP_BCR] = { 0xab000 }, + [GCC_USB30_PRIM_BCR] = { 0xf000 }, + [GCC_USB30_SEC_BCR] = { 0x10000 }, + [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 }, + [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 }, + [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 }, + [GCC_USB3_PHY_SEC_BCR] = { 0x5000c }, + [GCC_USB3_UNIPHY_MP0_BCR] = { 0x50018 }, + [GCC_USB3_UNIPHY_MP1_BCR] = { 0x5001c }, + [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 }, + [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 }, + [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x50020 }, + [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x50024 }, + [GCC_USB4_1_BCR] = { 0xb8000 }, + [GCC_USB4_1_DP_PHY_PRIM_BCR] = { 0xb9020 }, + [GCC_USB4_1_DPPHY_AUX_BCR] = { 0xb9024 }, + [GCC_USB4_1_PHY_PRIM_BCR] = { 0xb9018 }, + [GCC_USB4_BCR] = { 0x2a000 }, + [GCC_USB4_DP_PHY_PRIM_BCR] = { 0x4a008 }, + [GCC_USB4_DPPHY_AUX_BCR] = { 0x4a00c }, + [GCC_USB4_PHY_PRIM_BCR] = { 0x4a000 }, + [GCC_USB4PHY_1_PHY_PRIM_BCR] = { 0xb901c }, + [GCC_USB4PHY_PHY_PRIM_BCR] = { 0x4a004 }, + [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 }, + [GCC_VIDEO_BCR] = { 0x28000 }, + [GCC_VIDEO_AXI0_CLK_ARES] = { 0x28010, 2 }, + [GCC_VIDEO_AXI1_CLK_ARES] = { 0x28018, 2 }, +}; + +static struct gdsc *gcc_sc8280xp_gdscs[] = { + [PCIE_0_TUNNEL_GDSC] = &pcie_0_tunnel_gdsc, + [PCIE_1_TUNNEL_GDSC] = &pcie_1_tunnel_gdsc, + [PCIE_2A_GDSC] = &pcie_2a_gdsc, + [PCIE_2B_GDSC] = &pcie_2b_gdsc, + [PCIE_3A_GDSC] = &pcie_3a_gdsc, + [PCIE_3B_GDSC] = &pcie_3b_gdsc, + [PCIE_4_GDSC] = &pcie_4_gdsc, + [UFS_CARD_GDSC] = &ufs_card_gdsc, + [UFS_PHY_GDSC] = &ufs_phy_gdsc, + [USB30_MP_GDSC] = &usb30_mp_gdsc, + [USB30_PRIM_GDSC] = &usb30_prim_gdsc, + [USB30_SEC_GDSC] = &usb30_sec_gdsc, +}; + +static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = { + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src), +}; + +static const struct regmap_config gcc_sc8280xp_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xc3014, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_sc8280xp_desc = { + .config = &gcc_sc8280xp_regmap_config, + .clks = gcc_sc8280xp_clocks, + .num_clks = ARRAY_SIZE(gcc_sc8280xp_clocks), + .resets = gcc_sc8280xp_resets, + .num_resets = ARRAY_SIZE(gcc_sc8280xp_resets), + .gdscs = gcc_sc8280xp_gdscs, + .num_gdscs = ARRAY_SIZE(gcc_sc8280xp_gdscs), +}; + +static int gcc_sc8280xp_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &gcc_sc8280xp_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* + * Keep the clocks always-ON + * GCC_CAMERA_AHB_CLK, GCC_CAMERA_XO_CLK, GCC_DISP_AHB_CLK, + * GCC_DISP_XO_CLK, GCC_GPU_CFG_AHB_CLK, GCC_VIDEO_AHB_CLK, + * GCC_VIDEO_XO_CLK, GCC_DISP1_AHB_CLK, GCC_DISP1_XO_CLK + */ + regmap_update_bits(regmap, 0x26004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x26020, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x27004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x27028, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x28028, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0xbb004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0xbb028, BIT(0), BIT(0)); + + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks)); + if (ret) + return ret; + + return qcom_cc_really_probe(pdev, &gcc_sc8280xp_desc, regmap); +} + +static const struct of_device_id gcc_sc8280xp_match_table[] = { + { .compatible = "qcom,gcc-sc8280xp" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_sc8280xp_match_table); + +static struct platform_driver gcc_sc8280xp_driver = { + .probe = gcc_sc8280xp_probe, + .driver = { + .name = "gcc-sc8280xp", + .of_match_table = gcc_sc8280xp_match_table, + }, +}; + +static int __init gcc_sc8280xp_init(void) +{ + return platform_driver_register(&gcc_sc8280xp_driver); +} +subsys_initcall(gcc_sc8280xp_init); + +static void __exit gcc_sc8280xp_exit(void) +{ + platform_driver_unregister(&gcc_sc8280xp_driver); +} +module_exit(gcc_sc8280xp_exit); + +MODULE_DESCRIPTION("Qualcomm SC8280XP GCC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c new file mode 100644 index 000000000000..6ab6e5a34c72 --- /dev/null +++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c @@ -0,0 +1,838 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/pm_clock.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,lpassaudiocc-sc7280.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" +#include "common.h" +#include "gdsc.h" + +enum { + P_BI_TCXO, + P_LPASS_AON_CC_PLL_OUT_EVEN, + P_LPASS_AON_CC_PLL_OUT_MAIN, + P_LPASS_AON_CC_PLL_OUT_MAIN_CDIV_DIV_CLK_SRC, + P_LPASS_AON_CC_PLL_OUT_ODD, + P_LPASS_AUDIO_CC_PLL_OUT_AUX, + P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, + P_LPASS_AUDIO_CC_PLL_MAIN_DIV_CLK, +}; + +static const struct pll_vco zonda_vco[] = { + { 595200000UL, 3600000000UL, 0 }, +}; + +/* 1128.96MHz configuration */ +static const struct alpha_pll_config lpass_audio_cc_pll_config = { + .l = 0x3a, + .alpha = 0xcccc, + .config_ctl_val = 0x08200920, + .config_ctl_hi_val = 0x05002001, + .config_ctl_hi1_val = 0x00000000, + .user_ctl_val = 0x03000101, +}; + +static struct clk_alpha_pll lpass_audio_cc_pll = { + .offset = 0x0, + .vco_table = zonda_vco, + .num_vco = ARRAY_SIZE(zonda_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA], + .clkr = { + .hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_pll", + .parent_data = &(const struct clk_parent_data){ + .index = 0, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_zonda_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_lpass_audio_cc_pll_out_aux2[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv lpass_audio_cc_pll_out_aux2 = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_lpass_audio_cc_pll_out_aux2, + .num_post_div = ARRAY_SIZE(post_div_table_lpass_audio_cc_pll_out_aux2), + .width = 2, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA], + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_pll_out_aux2", + .parent_hws = (const struct clk_hw*[]){ + &lpass_audio_cc_pll.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_zonda_ops, + }, +}; + +static const struct pll_vco lucid_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +/* 614.4 MHz configuration */ +static const struct alpha_pll_config lpass_aon_cc_pll_config = { + .l = 0x20, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002261, + .config_ctl_hi1_val = 0x329A299C, + .user_ctl_val = 0x00005100, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x00000000, +}; + +static struct clk_alpha_pll lpass_aon_cc_pll = { + .offset = 0x0, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .hw.init = &(const struct clk_init_data){ + .name = "lpass_aon_cc_pll", + .parent_data = &(const struct clk_parent_data){ + .index = 0, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_lpass_aon_cc_pll_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv lpass_aon_cc_pll_out_even = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_lpass_aon_cc_pll_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_lpass_aon_cc_pll_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_aon_cc_pll_out_even", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_pll.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +static const struct clk_div_table post_div_table_lpass_aon_cc_pll_out_odd[] = { + { 0x5, 5 }, + { } +}; + +static struct clk_alpha_pll_postdiv lpass_aon_cc_pll_out_odd = { + .offset = 0x0, + .post_div_shift = 12, + .post_div_table = post_div_table_lpass_aon_cc_pll_out_odd, + .num_post_div = ARRAY_SIZE(post_div_table_lpass_aon_cc_pll_out_odd), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_aon_cc_pll_out_odd", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_pll.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +static const struct parent_map lpass_audio_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_LPASS_AUDIO_CC_PLL_OUT_AUX, 3 }, + { P_LPASS_AON_CC_PLL_OUT_ODD, 5 }, + { P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 6 }, +}; + +static struct clk_regmap_div lpass_audio_cc_pll_out_aux2_div_clk_src; +static struct clk_regmap_div lpass_audio_cc_pll_out_main_div_clk_src; + +static const struct clk_parent_data lpass_audio_cc_parent_data_0[] = { + { .index = 0 }, + { .hw = &lpass_audio_cc_pll.clkr.hw }, + { .hw = &lpass_aon_cc_pll_out_odd.clkr.hw }, + { .hw = &lpass_audio_cc_pll_out_aux2_div_clk_src.clkr.hw }, +}; + +static const struct parent_map lpass_aon_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_LPASS_AON_CC_PLL_OUT_EVEN, 4 }, +}; + +static const struct clk_parent_data lpass_aon_cc_parent_data_0[] = { + { .index = 0 }, + { .hw = &lpass_aon_cc_pll_out_even.clkr.hw }, +}; + +static const struct parent_map lpass_aon_cc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_LPASS_AON_CC_PLL_OUT_ODD, 1 }, + { P_LPASS_AUDIO_CC_PLL_MAIN_DIV_CLK, 6 }, +}; + +static const struct clk_parent_data lpass_aon_cc_parent_data_1[] = { + { .index = 0 }, + { .hw = &lpass_aon_cc_pll_out_odd.clkr.hw }, + { .hw = &lpass_audio_cc_pll_out_main_div_clk_src.clkr.hw }, +}; + +static const struct freq_tbl ftbl_lpass_aon_cc_main_rcg_clk_src[] = { + F(38400000, P_LPASS_AON_CC_PLL_OUT_EVEN, 8, 0, 0), + F(76800000, P_LPASS_AON_CC_PLL_OUT_EVEN, 4, 0, 0), + F(153600000, P_LPASS_AON_CC_PLL_OUT_EVEN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 lpass_aon_cc_main_rcg_clk_src = { + .cmd_rcgr = 0x1000, + .mnd_width = 0, + .hid_width = 5, + .parent_map = lpass_aon_cc_parent_map_0, + .freq_tbl = ftbl_lpass_aon_cc_main_rcg_clk_src, + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_aon_cc_main_rcg_clk_src", + .parent_data = lpass_aon_cc_parent_data_0, + .num_parents = ARRAY_SIZE(lpass_aon_cc_parent_data_0), + .flags = CLK_OPS_PARENT_ENABLE, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_lpass_aon_cc_tx_mclk_rcg_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(24576000, P_LPASS_AON_CC_PLL_OUT_ODD, 5, 0, 0), + { } +}; + +static struct clk_rcg2 lpass_aon_cc_tx_mclk_rcg_clk_src = { + .cmd_rcgr = 0x13004, + .mnd_width = 0, + .hid_width = 5, + .parent_map = lpass_aon_cc_parent_map_1, + .freq_tbl = ftbl_lpass_aon_cc_tx_mclk_rcg_clk_src, + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_aon_cc_tx_mclk_rcg_clk_src", + .parent_data = lpass_aon_cc_parent_data_1, + .num_parents = ARRAY_SIZE(lpass_aon_cc_parent_data_1), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_regmap_div lpass_audio_cc_pll_out_aux2_div_clk_src = { + .reg = 0x48, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "lpass_audio_cc_pll_out_aux2_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &lpass_audio_cc_pll_out_aux2.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div lpass_audio_cc_pll_out_main_div_clk_src = { + .reg = 0x3c, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "lpass_audio_cc_pll_out_main_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &lpass_audio_cc_pll.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div lpass_aon_cc_cdiv_tx_mclk_div_clk_src = { + .reg = 0x13010, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "lpass_aon_cc_cdiv_tx_mclk_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_tx_mclk_rcg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div lpass_aon_cc_pll_out_main_cdiv_div_clk_src = { + .reg = 0x80, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "lpass_aon_cc_pll_out_main_cdiv_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_pll.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static const struct freq_tbl ftbl_lpass_audio_cc_ext_mclk0_clk_src[] = { + F(256000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 32), + F(352800, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 32), + F(512000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 16), + F(705600, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 16), + F(768000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 1, 16), + F(1024000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 8), + F(1411200, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 8), + F(1536000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 1, 8), + F(2048000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 4), + F(2822400, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 4), + F(3072000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 1, 4), + F(4096000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 1, 2), + F(5644800, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 1, 2), + F(6144000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 1, 2), + F(8192000, P_LPASS_AON_CC_PLL_OUT_ODD, 15, 0, 0), + F(9600000, P_BI_TCXO, 2, 0, 0), + F(11289600, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 10, 0, 0), + F(12288000, P_LPASS_AON_CC_PLL_OUT_ODD, 10, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(22579200, P_LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC, 5, 0, 0), + F(24576000, P_LPASS_AON_CC_PLL_OUT_ODD, 5, 0, 0), + { } +}; + +static struct clk_rcg2 lpass_audio_cc_ext_mclk0_clk_src = { + .cmd_rcgr = 0x20004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = lpass_audio_cc_parent_map_0, + .freq_tbl = ftbl_lpass_audio_cc_ext_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_ext_mclk0_clk_src", + .parent_data = lpass_audio_cc_parent_data_0, + .num_parents = ARRAY_SIZE(lpass_audio_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 lpass_audio_cc_ext_mclk1_clk_src = { + .cmd_rcgr = 0x21004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = lpass_audio_cc_parent_map_0, + .freq_tbl = ftbl_lpass_audio_cc_ext_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_ext_mclk1_clk_src", + .parent_data = lpass_audio_cc_parent_data_0, + .num_parents = ARRAY_SIZE(lpass_audio_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 lpass_audio_cc_rx_mclk_clk_src = { + .cmd_rcgr = 0x24004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = lpass_audio_cc_parent_map_0, + .freq_tbl = ftbl_lpass_audio_cc_ext_mclk0_clk_src, + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_rx_mclk_clk_src", + .parent_data = lpass_audio_cc_parent_data_0, + .num_parents = ARRAY_SIZE(lpass_audio_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_regmap_div lpass_audio_cc_cdiv_rx_mclk_div_clk_src = { + .reg = 0x240d0, + .shift = 0, + .width = 4, + .clkr.hw.init = &(const struct clk_init_data) { + .name = "lpass_audio_cc_cdiv_rx_mclk_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &lpass_audio_cc_rx_mclk_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch lpass_aon_cc_audio_hm_h_clk; + +static struct clk_branch lpass_audio_cc_codec_mem0_clk = { + .halt_reg = 0x1e004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_codec_mem0_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_audio_hm_h_clk.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_cc_codec_mem1_clk = { + .halt_reg = 0x1e008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e008, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_codec_mem1_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_audio_hm_h_clk.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_cc_codec_mem2_clk = { + .halt_reg = 0x1e00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e00c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_codec_mem2_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_audio_hm_h_clk.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_cc_codec_mem_clk = { + .halt_reg = 0x1e000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_codec_mem_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_audio_hm_h_clk.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_cc_ext_mclk0_clk = { + .halt_reg = 0x20018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x20018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_ext_mclk0_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_audio_cc_ext_mclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_cc_ext_mclk1_clk = { + .halt_reg = 0x21018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x21018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_ext_mclk1_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_audio_cc_ext_mclk1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_cc_rx_mclk_2x_clk = { + .halt_reg = 0x240cc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x240cc, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_rx_mclk_2x_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_audio_cc_rx_mclk_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_cc_rx_mclk_clk = { + .halt_reg = 0x240d4, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x240d4, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_audio_cc_rx_mclk_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_audio_cc_cdiv_rx_mclk_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_aon_cc_audio_hm_h_clk = { + .halt_reg = 0x9014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_aon_cc_audio_hm_h_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_main_rcg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch lpass_aon_cc_va_mem0_clk = { + .halt_reg = 0x9028, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x9028, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_aon_cc_va_mem0_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_main_rcg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_aon_cc_tx_mclk_2x_clk = { + .halt_reg = 0x1300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1300c, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_aon_cc_tx_mclk_2x_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_tx_mclk_rcg_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_aon_cc_tx_mclk_clk = { + .halt_reg = 0x13014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x13014, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_aon_cc_tx_mclk_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_aon_cc_cdiv_tx_mclk_div_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc lpass_aon_cc_lpass_audio_hm_gdsc = { + .gdscr = 0x9090, + .pd = { + .name = "lpass_aon_cc_lpass_audio_hm_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE, +}; + +static struct clk_regmap *lpass_aon_cc_sc7280_clocks[] = { + [LPASS_AON_CC_AUDIO_HM_H_CLK] = &lpass_aon_cc_audio_hm_h_clk.clkr, + [LPASS_AON_CC_VA_MEM0_CLK] = &lpass_aon_cc_va_mem0_clk.clkr, + [LPASS_AON_CC_CDIV_TX_MCLK_DIV_CLK_SRC] = &lpass_aon_cc_cdiv_tx_mclk_div_clk_src.clkr, + [LPASS_AON_CC_MAIN_RCG_CLK_SRC] = &lpass_aon_cc_main_rcg_clk_src.clkr, + [LPASS_AON_CC_PLL] = &lpass_aon_cc_pll.clkr, + [LPASS_AON_CC_PLL_OUT_EVEN] = &lpass_aon_cc_pll_out_even.clkr, + [LPASS_AON_CC_PLL_OUT_MAIN_CDIV_DIV_CLK_SRC] = + &lpass_aon_cc_pll_out_main_cdiv_div_clk_src.clkr, + [LPASS_AON_CC_PLL_OUT_ODD] = &lpass_aon_cc_pll_out_odd.clkr, + [LPASS_AON_CC_TX_MCLK_2X_CLK] = &lpass_aon_cc_tx_mclk_2x_clk.clkr, + [LPASS_AON_CC_TX_MCLK_CLK] = &lpass_aon_cc_tx_mclk_clk.clkr, + [LPASS_AON_CC_TX_MCLK_RCG_CLK_SRC] = &lpass_aon_cc_tx_mclk_rcg_clk_src.clkr, +}; + +static struct gdsc *lpass_aon_cc_sc7280_gdscs[] = { + [LPASS_AON_CC_LPASS_AUDIO_HM_GDSC] = &lpass_aon_cc_lpass_audio_hm_gdsc, +}; + +static struct clk_regmap *lpass_audio_cc_sc7280_clocks[] = { + [LPASS_AUDIO_CC_CDIV_RX_MCLK_DIV_CLK_SRC] = &lpass_audio_cc_cdiv_rx_mclk_div_clk_src.clkr, + [LPASS_AUDIO_CC_CODEC_MEM0_CLK] = &lpass_audio_cc_codec_mem0_clk.clkr, + [LPASS_AUDIO_CC_CODEC_MEM1_CLK] = &lpass_audio_cc_codec_mem1_clk.clkr, + [LPASS_AUDIO_CC_CODEC_MEM2_CLK] = &lpass_audio_cc_codec_mem2_clk.clkr, + [LPASS_AUDIO_CC_CODEC_MEM_CLK] = &lpass_audio_cc_codec_mem_clk.clkr, + [LPASS_AUDIO_CC_EXT_MCLK0_CLK] = &lpass_audio_cc_ext_mclk0_clk.clkr, + [LPASS_AUDIO_CC_EXT_MCLK0_CLK_SRC] = &lpass_audio_cc_ext_mclk0_clk_src.clkr, + [LPASS_AUDIO_CC_EXT_MCLK1_CLK] = &lpass_audio_cc_ext_mclk1_clk.clkr, + [LPASS_AUDIO_CC_EXT_MCLK1_CLK_SRC] = &lpass_audio_cc_ext_mclk1_clk_src.clkr, + [LPASS_AUDIO_CC_PLL] = &lpass_audio_cc_pll.clkr, + [LPASS_AUDIO_CC_PLL_OUT_AUX2] = &lpass_audio_cc_pll_out_aux2.clkr, + [LPASS_AUDIO_CC_PLL_OUT_AUX2_DIV_CLK_SRC] = &lpass_audio_cc_pll_out_aux2_div_clk_src.clkr, + [LPASS_AUDIO_CC_PLL_OUT_MAIN_DIV_CLK_SRC] = &lpass_audio_cc_pll_out_main_div_clk_src.clkr, + [LPASS_AUDIO_CC_RX_MCLK_2X_CLK] = &lpass_audio_cc_rx_mclk_2x_clk.clkr, + [LPASS_AUDIO_CC_RX_MCLK_CLK] = &lpass_audio_cc_rx_mclk_clk.clkr, + [LPASS_AUDIO_CC_RX_MCLK_CLK_SRC] = &lpass_audio_cc_rx_mclk_clk_src.clkr, +}; + +static struct regmap_config lpass_audio_cc_sc7280_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, +}; + +static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = { + .config = &lpass_audio_cc_sc7280_regmap_config, + .clks = lpass_audio_cc_sc7280_clocks, + .num_clks = ARRAY_SIZE(lpass_audio_cc_sc7280_clocks), +}; + +static const struct of_device_id lpass_audio_cc_sc7280_match_table[] = { + { .compatible = "qcom,sc7280-lpassaudiocc" }, + { } +}; +MODULE_DEVICE_TABLE(of, lpass_audio_cc_sc7280_match_table); + +static void lpassaudio_pm_runtime_disable(void *data) +{ + pm_runtime_disable(data); +} + +static void lpassaudio_pm_clk_destroy(void *data) +{ + pm_clk_destroy(data); +} + +static int lpassaudio_create_pm_clks(struct platform_device *pdev) +{ + int ret; + + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_enable(&pdev->dev); + + ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_runtime_disable, &pdev->dev); + if (ret) + return ret; + + ret = pm_clk_create(&pdev->dev); + if (ret) + return ret; + + ret = devm_add_action_or_reset(&pdev->dev, lpassaudio_pm_clk_destroy, &pdev->dev); + if (ret) + return ret; + + ret = pm_clk_add(&pdev->dev, "iface"); + if (ret < 0) + dev_err(&pdev->dev, "failed to acquire iface clock\n"); + + return ret; +} + +static int lpass_audio_cc_sc7280_probe(struct platform_device *pdev) +{ + const struct qcom_cc_desc *desc; + struct regmap *regmap; + int ret; + + ret = lpassaudio_create_pm_clks(pdev); + if (ret) + return ret; + + lpass_audio_cc_sc7280_regmap_config.name = "lpassaudio_cc"; + lpass_audio_cc_sc7280_regmap_config.max_register = 0x2f000; + desc = &lpass_audio_cc_sc7280_desc; + + regmap = qcom_cc_map(pdev, desc); + if (IS_ERR(regmap)) { + pm_runtime_disable(&pdev->dev); + return PTR_ERR(regmap); + } + + clk_zonda_pll_configure(&lpass_audio_cc_pll, regmap, &lpass_audio_cc_pll_config); + + /* PLL settings */ + regmap_write(regmap, 0x4, 0x3b); + regmap_write(regmap, 0x8, 0xff05); + + ret = qcom_cc_really_probe(pdev, &lpass_audio_cc_sc7280_desc, regmap); + if (ret) { + dev_err(&pdev->dev, "Failed to register LPASS AUDIO CC clocks\n"); + pm_runtime_disable(&pdev->dev); + return ret; + } + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + + return ret; +} + +static const struct dev_pm_ops lpass_audio_cc_pm_ops = { + SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL) +}; + +static struct platform_driver lpass_audio_cc_sc7280_driver = { + .probe = lpass_audio_cc_sc7280_probe, + .driver = { + .name = "lpass_audio_cc-sc7280", + .of_match_table = lpass_audio_cc_sc7280_match_table, + .pm = &lpass_audio_cc_pm_ops, + }, +}; + +static const struct qcom_cc_desc lpass_aon_cc_sc7280_desc = { + .config = &lpass_audio_cc_sc7280_regmap_config, + .clks = lpass_aon_cc_sc7280_clocks, + .num_clks = ARRAY_SIZE(lpass_aon_cc_sc7280_clocks), + .gdscs = lpass_aon_cc_sc7280_gdscs, + .num_gdscs = ARRAY_SIZE(lpass_aon_cc_sc7280_gdscs), +}; + +static const struct of_device_id lpass_aon_cc_sc7280_match_table[] = { + { .compatible = "qcom,sc7280-lpassaoncc" }, + { } +}; +MODULE_DEVICE_TABLE(of, lpass_aon_cc_sc7280_match_table); + +static int lpass_aon_cc_sc7280_probe(struct platform_device *pdev) +{ + const struct qcom_cc_desc *desc; + struct regmap *regmap; + int ret; + + ret = lpassaudio_create_pm_clks(pdev); + if (ret) + return ret; + + lpass_audio_cc_sc7280_regmap_config.name = "lpasscc_aon"; + lpass_audio_cc_sc7280_regmap_config.max_register = 0xa0008; + desc = &lpass_aon_cc_sc7280_desc; + + regmap = qcom_cc_map(pdev, desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_lucid_pll_configure(&lpass_aon_cc_pll, regmap, &lpass_aon_cc_pll_config); + + ret = qcom_cc_really_probe(pdev, &lpass_aon_cc_sc7280_desc, regmap); + if (ret) + dev_err(&pdev->dev, "Failed to register LPASS AON CC clocks\n"); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + + return ret; +} + +static struct platform_driver lpass_aon_cc_sc7280_driver = { + .probe = lpass_aon_cc_sc7280_probe, + .driver = { + .name = "lpass_aon_cc-sc7280", + .of_match_table = lpass_aon_cc_sc7280_match_table, + .pm = &lpass_audio_cc_pm_ops, + }, +}; + +static int __init lpass_audio_cc_sc7280_init(void) +{ + int ret; + + ret = platform_driver_register(&lpass_aon_cc_sc7280_driver); + if (ret) + return ret; + + return platform_driver_register(&lpass_audio_cc_sc7280_driver); +} +subsys_initcall(lpass_audio_cc_sc7280_init); + +static void __exit lpass_audio_cc_sc7280_exit(void) +{ + platform_driver_unregister(&lpass_audio_cc_sc7280_driver); + platform_driver_unregister(&lpass_aon_cc_sc7280_driver); +} +module_exit(lpass_audio_cc_sc7280_exit); + +MODULE_DESCRIPTION("QTI LPASS_AUDIO_CC SC7280 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/lpasscorecc-sc7280.c b/drivers/clk/qcom/lpasscorecc-sc7280.c new file mode 100644 index 000000000000..1f1f1bd1b68e --- /dev/null +++ b/drivers/clk/qcom/lpasscorecc-sc7280.c @@ -0,0 +1,431 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/pm_clock.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,lpasscorecc-sc7280.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "common.h" +#include "gdsc.h" + +enum { + P_BI_TCXO, + P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN, + P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC, + P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, +}; + +static const struct pll_vco lucid_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +/* 614.4MHz configuration */ +static const struct alpha_pll_config lpass_core_cc_dig_pll_config = { + .l = 0x20, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002261, + .config_ctl_hi1_val = 0xB2923BBC, + .user_ctl_val = 0x00005100, + .user_ctl_hi_val = 0x00050805, + .user_ctl_hi1_val = 0x00000000, +}; + +static struct clk_alpha_pll lpass_core_cc_dig_pll = { + .offset = 0x1000, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "lpass_core_cc_dig_pll", + .parent_data = &(const struct clk_parent_data){ + .index = 0, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_lpass_core_cc_dig_pll_out_odd[] = { + { 0x5, 5 }, + { } +}; + +static struct clk_alpha_pll_postdiv lpass_core_cc_dig_pll_out_odd = { + .offset = 0x1000, + .post_div_shift = 12, + .post_div_table = post_div_table_lpass_core_cc_dig_pll_out_odd, + .num_post_div = ARRAY_SIZE(post_div_table_lpass_core_cc_dig_pll_out_odd), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr.hw.init = &(struct clk_init_data){ + .name = "lpass_core_cc_dig_pll_out_odd", + .parent_hws = (const struct clk_hw*[]){ + &lpass_core_cc_dig_pll.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +static struct clk_regmap_div lpass_core_cc_dig_pll_out_main_div_clk_src = { + .reg = 0x1054, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "lpass_core_cc_dig_pll_out_main_div_clk_src", + .parent_hws = (const struct clk_hw*[]){ + &lpass_core_cc_dig_pll.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + + +static const struct parent_map lpass_core_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 5 }, +}; + +static const struct clk_parent_data lpass_core_cc_parent_data_0[] = { + { .index = 0 }, + { .hw = &lpass_core_cc_dig_pll_out_odd.clkr.hw }, +}; + +static const struct parent_map lpass_core_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN, 1 }, + { P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC, 2 }, +}; + +static const struct clk_parent_data lpass_core_cc_parent_data_ao_2[] = { + { .index = 1 }, + { .hw = &lpass_core_cc_dig_pll.clkr.hw }, + { .hw = &lpass_core_cc_dig_pll_out_main_div_clk_src.clkr.hw }, +}; + +static const struct freq_tbl ftbl_lpass_core_cc_core_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(51200000, P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC, 6, 0, 0), + F(102400000, P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC, 3, 0, 0), + F(204800000, P_LPASS_CORE_CC_DIG_PLL_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 lpass_core_cc_core_clk_src = { + .cmd_rcgr = 0x1d000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = lpass_core_cc_parent_map_2, + .freq_tbl = ftbl_lpass_core_cc_core_clk_src, + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_core_cc_core_clk_src", + .parent_data = lpass_core_cc_parent_data_ao_2, + .num_parents = ARRAY_SIZE(lpass_core_cc_parent_data_ao_2), + .ops = &clk_rcg2_shared_ops, + }, +}; + +static const struct freq_tbl ftbl_lpass_core_cc_ext_if0_clk_src[] = { + F(256000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 32), + F(512000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 16), + F(768000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 1, 16), + F(1024000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 8), + F(1536000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 1, 8), + F(2048000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 4), + F(3072000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 1, 4), + F(4096000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 1, 2), + F(6144000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 1, 2), + F(8192000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 15, 0, 0), + F(9600000, P_BI_TCXO, 2, 0, 0), + F(12288000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 10, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(24576000, P_LPASS_CORE_CC_DIG_PLL_OUT_ODD, 5, 0, 0), + { } +}; + +static struct clk_rcg2 lpass_core_cc_ext_if0_clk_src = { + .cmd_rcgr = 0x10000, + .mnd_width = 16, + .hid_width = 5, + .parent_map = lpass_core_cc_parent_map_0, + .freq_tbl = ftbl_lpass_core_cc_ext_if0_clk_src, + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_core_cc_ext_if0_clk_src", + .parent_data = lpass_core_cc_parent_data_0, + .num_parents = ARRAY_SIZE(lpass_core_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 lpass_core_cc_ext_if1_clk_src = { + .cmd_rcgr = 0x11000, + .mnd_width = 16, + .hid_width = 5, + .parent_map = lpass_core_cc_parent_map_0, + .freq_tbl = ftbl_lpass_core_cc_ext_if0_clk_src, + .clkr.hw.init = &(const struct clk_init_data){ + .name = "lpass_core_cc_ext_if1_clk_src", + .parent_data = lpass_core_cc_parent_data_0, + .num_parents = ARRAY_SIZE(lpass_core_cc_parent_data_0), + .ops = &clk_rcg2_ops, + }, +}; + + +static struct clk_branch lpass_core_cc_core_clk = { + .halt_reg = 0x1f000, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1f000, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x1f000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_core_cc_core_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_core_cc_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_aon_ops, + }, + }, +}; + +static struct clk_branch lpass_core_cc_ext_if0_ibit_clk = { + .halt_reg = 0x10018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_core_cc_ext_if0_ibit_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_core_cc_ext_if0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_core_cc_ext_if1_ibit_clk = { + .halt_reg = 0x11018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x11018, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_core_cc_ext_if1_ibit_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_core_cc_ext_if1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_core_cc_lpm_core_clk = { + .halt_reg = 0x1e000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_core_cc_lpm_core_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_core_cc_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_core_cc_lpm_mem0_core_clk = { + .halt_reg = 0x1e004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1e004, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_core_cc_lpm_mem0_core_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_core_cc_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_core_cc_sysnoc_mport_core_clk = { + .halt_reg = 0x23000, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x23000, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x23000, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data){ + .name = "lpass_core_cc_sysnoc_mport_core_clk", + .parent_hws = (const struct clk_hw*[]){ + &lpass_core_cc_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc lpass_core_cc_lpass_core_hm_gdsc = { + .gdscr = 0x0, + .pd = { + .name = "lpass_core_cc_lpass_core_hm_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE, +}; + +static struct clk_regmap *lpass_core_cc_sc7280_clocks[] = { + [LPASS_CORE_CC_CORE_CLK] = &lpass_core_cc_core_clk.clkr, + [LPASS_CORE_CC_CORE_CLK_SRC] = &lpass_core_cc_core_clk_src.clkr, + [LPASS_CORE_CC_DIG_PLL] = &lpass_core_cc_dig_pll.clkr, + [LPASS_CORE_CC_DIG_PLL_OUT_MAIN_DIV_CLK_SRC] = + &lpass_core_cc_dig_pll_out_main_div_clk_src.clkr, + [LPASS_CORE_CC_DIG_PLL_OUT_ODD] = &lpass_core_cc_dig_pll_out_odd.clkr, + [LPASS_CORE_CC_EXT_IF0_CLK_SRC] = &lpass_core_cc_ext_if0_clk_src.clkr, + [LPASS_CORE_CC_EXT_IF0_IBIT_CLK] = &lpass_core_cc_ext_if0_ibit_clk.clkr, + [LPASS_CORE_CC_EXT_IF1_CLK_SRC] = &lpass_core_cc_ext_if1_clk_src.clkr, + [LPASS_CORE_CC_EXT_IF1_IBIT_CLK] = &lpass_core_cc_ext_if1_ibit_clk.clkr, + [LPASS_CORE_CC_LPM_CORE_CLK] = &lpass_core_cc_lpm_core_clk.clkr, + [LPASS_CORE_CC_LPM_MEM0_CORE_CLK] = &lpass_core_cc_lpm_mem0_core_clk.clkr, + [LPASS_CORE_CC_SYSNOC_MPORT_CORE_CLK] = &lpass_core_cc_sysnoc_mport_core_clk.clkr, +}; + +static struct regmap_config lpass_core_cc_sc7280_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, +}; + +static const struct qcom_cc_desc lpass_core_cc_sc7280_desc = { + .config = &lpass_core_cc_sc7280_regmap_config, + .clks = lpass_core_cc_sc7280_clocks, + .num_clks = ARRAY_SIZE(lpass_core_cc_sc7280_clocks), +}; + +static const struct of_device_id lpass_core_cc_sc7280_match_table[] = { + { .compatible = "qcom,sc7280-lpasscorecc" }, + { } +}; +MODULE_DEVICE_TABLE(of, lpass_core_cc_sc7280_match_table); + +static struct gdsc *lpass_core_hm_sc7280_gdscs[] = { + [LPASS_CORE_CC_LPASS_CORE_HM_GDSC] = &lpass_core_cc_lpass_core_hm_gdsc, +}; + +static const struct qcom_cc_desc lpass_core_hm_sc7280_desc = { + .config = &lpass_core_cc_sc7280_regmap_config, + .gdscs = lpass_core_hm_sc7280_gdscs, + .num_gdscs = ARRAY_SIZE(lpass_core_hm_sc7280_gdscs), +}; + +static int lpass_core_cc_sc7280_probe(struct platform_device *pdev) +{ + const struct qcom_cc_desc *desc; + struct regmap *regmap; + + lpass_core_cc_sc7280_regmap_config.name = "lpass_core_cc"; + lpass_core_cc_sc7280_regmap_config.max_register = 0x4f004; + desc = &lpass_core_cc_sc7280_desc; + + regmap = qcom_cc_map(pdev, desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_lucid_pll_configure(&lpass_core_cc_dig_pll, regmap, &lpass_core_cc_dig_pll_config); + + return qcom_cc_really_probe(pdev, &lpass_core_cc_sc7280_desc, regmap); +} + +static struct platform_driver lpass_core_cc_sc7280_driver = { + .probe = lpass_core_cc_sc7280_probe, + .driver = { + .name = "lpass_core_cc-sc7280", + .of_match_table = lpass_core_cc_sc7280_match_table, + }, +}; + +static int lpass_hm_core_probe(struct platform_device *pdev) +{ + const struct qcom_cc_desc *desc; + + lpass_core_cc_sc7280_regmap_config.name = "lpass_hm_core"; + lpass_core_cc_sc7280_regmap_config.max_register = 0x24; + desc = &lpass_core_hm_sc7280_desc; + + return qcom_cc_probe_by_index(pdev, 0, desc); +} + +static const struct of_device_id lpass_hm_sc7280_match_table[] = { + { .compatible = "qcom,sc7280-lpasshm" }, + { } +}; +MODULE_DEVICE_TABLE(of, lpass_hm_sc7280_match_table); + +static struct platform_driver lpass_hm_sc7280_driver = { + .probe = lpass_hm_core_probe, + .driver = { + .name = "lpass_hm-sc7280", + .of_match_table = lpass_hm_sc7280_match_table, + }, +}; + +static int __init lpass_core_cc_sc7280_init(void) +{ + int ret; + + ret = platform_driver_register(&lpass_hm_sc7280_driver); + if (ret) + return ret; + + return platform_driver_register(&lpass_core_cc_sc7280_driver); +} +subsys_initcall(lpass_core_cc_sc7280_init); + +static void __exit lpass_core_cc_sc7280_exit(void) +{ + platform_driver_unregister(&lpass_core_cc_sc7280_driver); + platform_driver_unregister(&lpass_hm_sc7280_driver); +} +module_exit(lpass_core_cc_sc7280_exit); + +MODULE_DESCRIPTION("QTI LPASS_CORE_CC SC7280 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index c281f3af5716..cacaf9b87d26 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -32,9 +32,12 @@ config CLK_RENESAS select CLK_R8A77995 if ARCH_R8A77995 select CLK_R8A779A0 if ARCH_R8A779A0 select CLK_R8A779F0 if ARCH_R8A779F0 + select CLK_R8A779G0 if ARCH_R8A779G0 select CLK_R9A06G032 if ARCH_R9A06G032 + select CLK_R9A07G043 if ARCH_R9A07G043 select CLK_R9A07G044 if ARCH_R9A07G044 select CLK_R9A07G054 if ARCH_R9A07G054 + select CLK_R9A09G011 if ARCH_R9A09G011 select CLK_SH73A0 if ARCH_SH73A0 if CLK_RENESAS @@ -157,9 +160,17 @@ config CLK_R8A779F0 bool "R-Car S4-8 clock support" if COMPILE_TEST select CLK_RCAR_GEN4_CPG +config CLK_R8A779G0 + bool "R-Car V4H clock support" if COMPILE_TEST + select CLK_RCAR_GEN4_CPG + config CLK_R9A06G032 bool "RZ/N1D clock support" if COMPILE_TEST +config CLK_R9A07G043 + bool "RZ/G2UL clock support" if COMPILE_TEST + select CLK_RZG2L + config CLK_R9A07G044 bool "RZ/G2L clock support" if COMPILE_TEST select CLK_RZG2L @@ -168,6 +179,10 @@ config CLK_R9A07G054 bool "RZ/V2L clock support" if COMPILE_TEST select CLK_RZG2L +config CLK_R9A09G011 + bool "RZ/V2M clock support" if COMPILE_TEST + select CLK_RZG2L + config CLK_SH73A0 bool "SH-Mobile AG5 clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP @@ -200,7 +215,7 @@ config CLK_RCAR_USB2_CLOCK_SEL This is a driver for R-Car USB2 clock selector config CLK_RZG2L - bool "Renesas RZ/{G2L,V2L} family clock support" if COMPILE_TEST + bool "Renesas RZ/{G2L,G2UL,V2L} family clock support" if COMPILE_TEST select RESET_CONTROLLER # Generic diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index d5e571699a30..de907623fe3f 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -29,9 +29,12 @@ obj-$(CONFIG_CLK_R8A77990) += r8a77990-cpg-mssr.o obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o obj-$(CONFIG_CLK_R8A779A0) += r8a779a0-cpg-mssr.o obj-$(CONFIG_CLK_R8A779F0) += r8a779f0-cpg-mssr.o +obj-$(CONFIG_CLK_R8A779G0) += r8a779g0-cpg-mssr.o obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o +obj-$(CONFIG_CLK_R9A07G043) += r9a07g043-cpg.o obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o obj-$(CONFIG_CLK_R9A07G054) += r9a07g044-cpg.o +obj-$(CONFIG_CLK_R9A09G011) += r9a09g011-cpg.o obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o # Family diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c index 95dd56b64d64..ad03c09ebc1f 100644 --- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c @@ -68,12 +68,8 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), - DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), - DEF_BASE("rpc", R8A774A1_CLK_RPC, CLK_TYPE_GEN3_RPC, - CLK_RPCSRC), - DEF_BASE("rpcd2", R8A774A1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, - R8A774A1_CLK_RPC), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -109,6 +105,9 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = { DEF_GEN3_SD("sd2", R8A774A1_CLK_SD2, R8A774A1_CLK_SD2H, 0x268), DEF_GEN3_SD("sd3", R8A774A1_CLK_SD3, R8A774A1_CLK_SD3H, 0x26c), + DEF_BASE("rpc", R8A774A1_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A774A1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A774A1_CLK_RPC), + DEF_FIXED("cl", R8A774A1_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1), diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c index 56061b9b8437..ab087b02ef90 100644 --- a/drivers/clk/renesas/r8a774b1-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c @@ -66,12 +66,8 @@ static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), - DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), - DEF_BASE("rpc", R8A774B1_CLK_RPC, CLK_TYPE_GEN3_RPC, - CLK_RPCSRC), - DEF_BASE("rpcd2", R8A774B1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, - R8A774B1_CLK_RPC), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -106,6 +102,9 @@ static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = { DEF_GEN3_SD("sd2", R8A774B1_CLK_SD2, R8A774B1_CLK_SD2H, 0x268), DEF_GEN3_SD("sd3", R8A774B1_CLK_SD3, R8A774B1_CLK_SD3H, 0x26c), + DEF_BASE("rpc", R8A774B1_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A774B1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A774B1_CLK_RPC), + DEF_FIXED("cl", R8A774B1_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cp", R8A774B1_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A774B1_CLK_CPEX, CLK_EXTAL, 2, 1), diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c index b5eb5dc45d62..c9c8fde0f0a6 100644 --- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c @@ -77,11 +77,6 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = { DEF_FIXED_RPCSRC_E3(".rpcsrc", CLK_RPCSRC, CLK_PLL0, CLK_PLL1), - DEF_BASE("rpc", R8A774C0_CLK_RPC, CLK_TYPE_GEN3_RPC, - CLK_RPCSRC), - DEF_BASE("rpcd2", R8A774C0_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, - R8A774C0_CLK_RPC), - DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000), @@ -108,6 +103,9 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = { DEF_FIXED("s3d2", R8A774C0_CLK_S3D2, CLK_S3, 2, 1), DEF_FIXED("s3d4", R8A774C0_CLK_S3D4, CLK_S3, 4, 1), + DEF_BASE("rpc", R8A774C0_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A774C0_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A774C0_CLK_RPC), + DEF_GEN3_SDH("sd0h", R8A774C0_CLK_SD0H, CLK_SDSRC, 0x0074), DEF_GEN3_SDH("sd1h", R8A774C0_CLK_SD1H, CLK_SDSRC, 0x0078), DEF_GEN3_SDH("sd3h", R8A774C0_CLK_SD3H, CLK_SDSRC, 0x026c), diff --git a/drivers/clk/renesas/r8a774e1-cpg-mssr.c b/drivers/clk/renesas/r8a774e1-cpg-mssr.c index 2950f0db90ae..a790061db877 100644 --- a/drivers/clk/renesas/r8a774e1-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774e1-cpg-mssr.c @@ -68,12 +68,8 @@ static const struct cpg_core_clk r8a774e1_core_clks[] __initconst = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), - DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), - DEF_BASE("rpc", R8A774E1_CLK_RPC, CLK_TYPE_GEN3_RPC, - CLK_RPCSRC), - DEF_BASE("rpcd2", R8A774E1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, - R8A774E1_CLK_RPC), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -109,6 +105,9 @@ static const struct cpg_core_clk r8a774e1_core_clks[] __initconst = { DEF_GEN3_SD("sd2", R8A774E1_CLK_SD2, R8A774E1_CLK_SD2H, 0x268), DEF_GEN3_SD("sd3", R8A774E1_CLK_SD3, R8A774E1_CLK_SD3H, 0x26c), + DEF_BASE("rpc", R8A774E1_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A774E1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A774E1_CLK_RPC), + DEF_FIXED("cl", R8A774E1_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cr", R8A774E1_CLK_CR, CLK_PLL1_DIV4, 2, 1), DEF_FIXED("cp", R8A774E1_CLK_CP, CLK_EXTAL, 2, 1), diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index 991a44315d71..301475c74f50 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -71,12 +71,8 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), - DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), - DEF_BASE("rpc", R8A7795_CLK_RPC, CLK_TYPE_GEN3_RPC, - CLK_RPCSRC), - DEF_BASE("rpcd2", R8A7795_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, - R8A7795_CLK_RPC), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -113,6 +109,9 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = { DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, R8A7795_CLK_SD2H, 0x268), DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, R8A7795_CLK_SD3H, 0x26c), + DEF_BASE("rpc", R8A7795_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A7795_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A7795_CLK_RPC), + DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cr", R8A7795_CLK_CR, CLK_PLL1_DIV4, 2, 1), DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1), diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c index 7950313611ef..c4969318508e 100644 --- a/drivers/clk/renesas/r8a7796-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -73,12 +73,8 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), - DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), - DEF_BASE("rpc", R8A7796_CLK_RPC, CLK_TYPE_GEN3_RPC, - CLK_RPCSRC), - DEF_BASE("rpcd2", R8A7796_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, - R8A7796_CLK_RPC), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -115,6 +111,9 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, R8A7796_CLK_SD2H, 0x268), DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, R8A7796_CLK_SD3H, 0x26c), + DEF_BASE("rpc", R8A7796_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A7796_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A7796_CLK_RPC), + DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cr", R8A7796_CLK_CR, CLK_PLL1_DIV4, 2, 1), DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1), diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c index d687c29efa3c..78f6e530848e 100644 --- a/drivers/clk/renesas/r8a77965-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c @@ -69,12 +69,8 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), - DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), - DEF_BASE("rpc", R8A77965_CLK_RPC, CLK_TYPE_GEN3_RPC, - CLK_RPCSRC), - DEF_BASE("rpcd2", R8A77965_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, - R8A77965_CLK_RPC), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -110,6 +106,9 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = { DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, R8A77965_CLK_SD2H, 0x268), DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, R8A77965_CLK_SD3H, 0x26c), + DEF_BASE("rpc", R8A77965_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A77965_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A77965_CLK_RPC), + DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cr", R8A77965_CLK_CR, CLK_PLL1_DIV4, 2, 1), DEF_FIXED("cp", R8A77965_CLK_CP, CLK_EXTAL, 2, 1), diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c index f3cd64de4dc6..06f925aff407 100644 --- a/drivers/clk/renesas/r8a77980-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c @@ -66,13 +66,10 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), - DEF_RATE(".oco", CLK_OCO, 32768), - DEF_BASE("rpc", R8A77980_CLK_RPC, CLK_TYPE_GEN3_RPC, - CLK_RPCSRC), - DEF_BASE("rpcd2", R8A77980_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, - R8A77980_CLK_RPC), + DEF_RATE(".oco", CLK_OCO, 32768), /* Core Clock Outputs */ DEF_FIXED("ztr", R8A77980_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), @@ -99,6 +96,9 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = { DEF_GEN3_SDH("sd0h", R8A77980_CLK_SD0H, CLK_SDSRC, 0x0074), DEF_GEN3_SD("sd0", R8A77980_CLK_SD0, R8A77980_CLK_SD0H, 0x0074), + DEF_BASE("rpc", R8A77980_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A77980_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A77980_CLK_RPC), + DEF_FIXED("cl", R8A77980_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cp", R8A77980_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A77980_CLK_CPEX, CLK_EXTAL, 2, 1), diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c index d34d97baab35..b666d099365e 100644 --- a/drivers/clk/renesas/r8a77990-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c @@ -44,6 +44,7 @@ enum clk_ids { CLK_S2, CLK_S3, CLK_SDSRC, + CLK_RPCSRC, CLK_RINT, CLK_OCO, @@ -74,6 +75,8 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = { DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1), + DEF_FIXED_RPCSRC_E3(".rpcsrc", CLK_RPCSRC, CLK_PLL0, CLK_PLL1), + DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000), @@ -107,6 +110,9 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = { DEF_GEN3_SD("sd1", R8A77990_CLK_SD1, R8A77990_CLK_SD1H, 0x0078), DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, R8A77990_CLK_SD3H, 0x026c), + DEF_BASE("rpc", R8A77990_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A77990_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A77990_CLK_RPC), + DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1), DEF_FIXED("cr", R8A77990_CLK_CR, CLK_PLL1D2, 2, 1), DEF_FIXED("cp", R8A77990_CLK_CP, CLK_EXTAL, 2, 1), @@ -215,6 +221,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = { DEF_MOD("can-fd", 914, R8A77990_CLK_S3D2), DEF_MOD("can-if1", 915, R8A77990_CLK_S3D4), DEF_MOD("can-if0", 916, R8A77990_CLK_S3D4), + DEF_MOD("rpc-if", 917, R8A77990_CLK_RPCD2), DEF_MOD("i2c6", 918, R8A77990_CLK_S3D2), DEF_MOD("i2c5", 919, R8A77990_CLK_S3D2), DEF_MOD("i2c-dvfs", 926, R8A77990_CLK_CP), diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c index 525eef197fd9..24ba9093a72f 100644 --- a/drivers/clk/renesas/r8a77995-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c @@ -42,6 +42,7 @@ enum clk_ids { CLK_S2, CLK_S3, CLK_SDSRC, + CLK_RPCSRC, CLK_RINT, CLK_OCO, @@ -70,6 +71,8 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = { DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1), + DEF_FIXED_RPCSRC_D3(".rpcsrc", CLK_RPCSRC, CLK_PLL0, CLK_PLL1), + DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32), DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000), @@ -103,8 +106,11 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = { DEF_GEN3_PE("s3d2c", R8A77995_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2), DEF_GEN3_PE("s3d4c", R8A77995_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4), - DEF_GEN3_SDH("sd0h", R8A77995_CLK_SD0H, CLK_SDSRC, 0x268), - DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, R8A77995_CLK_SD0H, 0x268), + DEF_GEN3_SDH("sd0h", R8A77995_CLK_SD0H, CLK_SDSRC, 0x268), + DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, R8A77995_CLK_SD0H, 0x268), + + DEF_BASE("rpc", R8A77995_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A77995_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A77995_CLK_RPC), DEF_DIV6P1("canfd", R8A77995_CLK_CANFD, CLK_PLL0D3, 0x244), DEF_DIV6P1("mso", R8A77995_CLK_MSO, CLK_PLL1D2, 0x014), @@ -174,6 +180,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = { DEF_MOD("can-fd", 914, R8A77995_CLK_S3D2), DEF_MOD("can-if1", 915, R8A77995_CLK_S3D4), DEF_MOD("can-if0", 916, R8A77995_CLK_S3D4), + DEF_MOD("rpc-if", 917, R8A77995_CLK_RPCD2), DEF_MOD("i2c3", 928, R8A77995_CLK_S3D2), DEF_MOD("i2c2", 929, R8A77995_CLK_S3D2), DEF_MOD("i2c1", 930, R8A77995_CLK_S3D2), diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c index fadd8a1718c6..d74d46833012 100644 --- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c @@ -85,11 +85,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = { DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 2, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL5_DIV4, 1, 1), + DEF_RATE(".oco", CLK_OCO, 32768), - DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5), - DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), - DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, - R8A779A0_CLK_RPC), + + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5), /* Core Clock Outputs */ DEF_GEN4_Z("z0", R8A779A0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL20, 2, 0), @@ -120,6 +119,10 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = { DEF_GEN4_SDH("sdh0", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870), DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, 0x870), + DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, + R8A779A0_CLK_RPC), + DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c), DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878), DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880), @@ -241,7 +244,7 @@ static const unsigned int r8a779a0_crit_mod_clks[] __initconst = { /* * MD EXTAL PLL1 PLL20 PLL30 PLL4 PLL5 OSC * 14 13 (MHz) 21 31 - * -------------------------------------------------------- + * ---------------------------------------------------------------- * 0 0 16.66 x 1 x128 x216 x128 x144 x192 /16 * 0 1 20 x 1 x106 x180 x106 x120 x160 /19 * 1 0 Prohibited setting @@ -250,11 +253,11 @@ static const unsigned int r8a779a0_crit_mod_clks[] __initconst = { #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \ (((md) & BIT(13)) >> 13)) static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = { - /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */ - { 1, 128, 1, 0, 0, 0, 0, 192, 1, 0, 0, 16, }, - { 1, 106, 1, 0, 0, 0, 0, 160, 1, 0, 0, 19, }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, - { 2, 128, 1, 0, 0, 0, 0, 192, 1, 0, 0, 32, }, + /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */ + { 1, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 16, }, + { 1, 106, 1, 0, 0, 0, 0, 120, 1, 160, 1, 0, 0, 19, }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + { 2, 128, 1, 0, 0, 0, 0, 144, 1, 192, 1, 0, 0, 32, }, }; diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c index 76b441965037..c17ebe6b5992 100644 --- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c @@ -70,12 +70,11 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = { DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1), DEF_FIXED(".pll6_div2", CLK_PLL6_DIV2, CLK_PLL6, 2, 1), DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1), + DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5), DEF_RATE(".oco", CLK_OCO, 32768), - DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5), - DEF_BASE(".rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), - DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5), /* Core Clock Outputs */ DEF_FIXED("s0d2", R8A779F0_CLK_S0D2, CLK_S0, 2, 1), @@ -108,6 +107,10 @@ static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = { DEF_FIXED("cpex", R8A779F0_CLK_CPEX, CLK_EXTAL, 2, 1), DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, CLK_SDSRC, 0x870), + + DEF_BASE("rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC), + DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, 0x87c), DEF_GEN4_OSC("osc", R8A779F0_CLK_OSC, CLK_EXTAL, 8), @@ -129,6 +132,7 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = { DEF_MOD("sys-dmac1", 710, R8A779F0_CLK_S0D3_PER), DEF_MOD("wdt", 907, R8A779F0_CLK_R), DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M), + DEF_MOD("ufs", 1514, R8A779F0_CLK_S0D4_HSC), }; static const unsigned int r8a779f0_crit_mod_clks[] __initconst = { @@ -139,23 +143,23 @@ static const unsigned int r8a779f0_crit_mod_clks[] __initconst = { * CPG Clock Data */ /* - * MD EXTAL PLL1 PLL2 PLL3 PLL5 PLL6 OSC + * MD EXTAL PLL1 PLL2 PLL3 PLL4 PLL5 PLL6 OSC * 14 13 (MHz) - * ---------------------------------------------------------------- - * 0 0 16 / 1 x200 x150 x200 x200 x134 /15 - * 0 1 20 / 1 x160 x120 x160 x160 x106 /19 + * ------------------------------------------------------------------------ + * 0 0 16 / 1 x200 x150 x200 n/a x200 x134 /15 + * 0 1 20 / 1 x160 x120 x160 n/a x160 x106 /19 * 1 0 Prohibited setting - * 1 1 40 / 2 x160 x120 x160 x160 x106 /38 + * 1 1 40 / 2 x160 x120 x160 n/a x160 x106 /38 */ #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \ (((md) & BIT(13)) >> 13)) static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = { - /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */ - { 1, 200, 1, 150, 1, 200, 1, 200, 1, 134, 1, 15, }, - { 1, 160, 1, 120, 1, 160, 1, 160, 1, 106, 1, 19, }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, - { 2, 160, 1, 120, 1, 160, 1, 160, 1, 106, 1, 38, }, + /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */ + { 1, 200, 1, 150, 1, 200, 1, 0, 0, 200, 1, 134, 1, 15, }, + { 1, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 19, }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + { 2, 160, 1, 120, 1, 160, 1, 0, 0, 160, 1, 106, 1, 38, }, }; static int __init r8a779f0_cpg_mssr_init(struct device *dev) diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c new file mode 100644 index 000000000000..3fc4233b1ead --- /dev/null +++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c @@ -0,0 +1,218 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * r8a779g0 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2022 Renesas Electronics Corp. + * + * Based on r8a779f0-cpg-mssr.c + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/soc/renesas/rcar-rst.h> + +#include <dt-bindings/clock/r8a779g0-cpg-mssr.h> + +#include "renesas-cpg-mssr.h" +#include "rcar-gen4-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A779G0_CLK_R, + + /* External Input Clocks */ + CLK_EXTAL, + CLK_EXTALR, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL1, + CLK_PLL2, + CLK_PLL3, + CLK_PLL4, + CLK_PLL5, + CLK_PLL6, + CLK_PLL1_DIV2, + CLK_PLL2_DIV2, + CLK_PLL3_DIV2, + CLK_PLL4_DIV2, + CLK_PLL5_DIV2, + CLK_PLL5_DIV4, + CLK_PLL6_DIV2, + CLK_S0, + CLK_S0_VIO, + CLK_S0_VC, + CLK_S0_HSC, + CLK_SV_VIP, + CLK_SV_IR, + CLK_SDSRC, + CLK_RPCSRC, + CLK_VIO, + CLK_VC, + CLK_OCO, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + DEF_INPUT("extalr", CLK_EXTALR), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN), + DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN), + DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN4_PLL4, CLK_MAIN), + DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN), + DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN), + + DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), + DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1), + DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 2, 1), + DEF_FIXED(".pll4_div2", CLK_PLL4_DIV2, CLK_PLL4, 2, 1), + DEF_FIXED(".pll5_div2", CLK_PLL5_DIV2, CLK_PLL5, 2, 1), + DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1), + DEF_FIXED(".pll6_div2", CLK_PLL6_DIV2, CLK_PLL6, 2, 1), + DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s0_vio", CLK_S0_VIO, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s0_vc", CLK_S0_VC, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s0_hsc", CLK_S0_HSC, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".sv_vip", CLK_SV_VIP, CLK_PLL1, 5, 1), + DEF_FIXED(".sv_ir", CLK_SV_IR, CLK_PLL1, 5, 1), + DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5), + DEF_RATE(".oco", CLK_OCO, 32768), + + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5), + DEF_FIXED(".vio", CLK_VIO, CLK_PLL5_DIV2, 3, 1), + DEF_FIXED(".vc", CLK_VC, CLK_PLL5_DIV2, 3, 1), + + /* Core Clock Outputs */ + DEF_FIXED("s0d2", R8A779G0_CLK_S0D2, CLK_S0, 2, 1), + DEF_FIXED("s0d3", R8A779G0_CLK_S0D3, CLK_S0, 3, 1), + DEF_FIXED("s0d4", R8A779G0_CLK_S0D4, CLK_S0, 4, 1), + DEF_FIXED("cl16m", R8A779G0_CLK_CL16M, CLK_S0, 48, 1), + DEF_FIXED("s0d1_vio", R8A779G0_CLK_S0D1_VIO, CLK_S0_VIO, 1, 1), + DEF_FIXED("s0d2_vio", R8A779G0_CLK_S0D2_VIO, CLK_S0_VIO, 2, 1), + DEF_FIXED("s0d4_vio", R8A779G0_CLK_S0D4_VIO, CLK_S0_VIO, 4, 1), + DEF_FIXED("s0d8_vio", R8A779G0_CLK_S0D8_VIO, CLK_S0_VIO, 8, 1), + DEF_FIXED("s0d1_vc", R8A779G0_CLK_S0D1_VC, CLK_S0_VC, 1, 1), + DEF_FIXED("s0d2_vc", R8A779G0_CLK_S0D2_VC, CLK_S0_VC, 2, 1), + DEF_FIXED("s0d4_vc", R8A779G0_CLK_S0D4_VC, CLK_S0_VC, 4, 1), + DEF_FIXED("s0d2_mm", R8A779G0_CLK_S0D2_MM, CLK_S0, 2, 1), + DEF_FIXED("s0d4_mm", R8A779G0_CLK_S0D4_MM, CLK_S0, 4, 1), + DEF_FIXED("cl16m_mm", R8A779G0_CLK_CL16M_MM, CLK_S0, 48, 1), + DEF_FIXED("s0d2_u3dg", R8A779G0_CLK_S0D2_U3DG, CLK_S0, 2, 1), + DEF_FIXED("s0d4_u3dg", R8A779G0_CLK_S0D4_U3DG, CLK_S0, 4, 1), + DEF_FIXED("s0d2_rt", R8A779G0_CLK_S0D2_RT, CLK_S0, 2, 1), + DEF_FIXED("s0d3_rt", R8A779G0_CLK_S0D3_RT, CLK_S0, 3, 1), + DEF_FIXED("s0d4_rt", R8A779G0_CLK_S0D4_RT, CLK_S0, 4, 1), + DEF_FIXED("s0d6_rt", R8A779G0_CLK_S0D6_RT, CLK_S0, 6, 1), + DEF_FIXED("s0d24_rt", R8A779G0_CLK_S0D24_RT, CLK_S0, 24, 1), + DEF_FIXED("cl16m_rt", R8A779G0_CLK_CL16M_RT, CLK_S0, 48, 1), + DEF_FIXED("s0d2_per", R8A779G0_CLK_S0D2_PER, CLK_S0, 2, 1), + DEF_FIXED("s0d3_per", R8A779G0_CLK_S0D3_PER, CLK_S0, 3, 1), + DEF_FIXED("s0d4_per", R8A779G0_CLK_S0D4_PER, CLK_S0, 4, 1), + DEF_FIXED("s0d6_per", R8A779G0_CLK_S0D6_PER, CLK_S0, 6, 1), + DEF_FIXED("s0d12_per", R8A779G0_CLK_S0D12_PER, CLK_S0, 12, 1), + DEF_FIXED("s0d24_per", R8A779G0_CLK_S0D24_PER, CLK_S0, 24, 1), + DEF_FIXED("cl16m_per", R8A779G0_CLK_CL16M_PER, CLK_S0, 48, 1), + DEF_FIXED("s0d1_hsc", R8A779G0_CLK_S0D1_HSC, CLK_S0_HSC, 1, 1), + DEF_FIXED("s0d2_hsc", R8A779G0_CLK_S0D2_HSC, CLK_S0_HSC, 2, 1), + DEF_FIXED("s0d4_hsc", R8A779G0_CLK_S0D4_HSC, CLK_S0_HSC, 4, 1), + DEF_FIXED("cl16m_hsc", R8A779G0_CLK_CL16M_HSC, CLK_S0_HSC, 48, 1), + DEF_FIXED("s0d2_cc", R8A779G0_CLK_S0D2_CC, CLK_S0, 2, 1), + DEF_FIXED("svd1_ir", R8A779G0_CLK_SVD1_IR, CLK_SV_IR, 1, 1), + DEF_FIXED("svd2_ir", R8A779G0_CLK_SVD2_IR, CLK_SV_IR, 2, 1), + DEF_FIXED("svd1_vip", R8A779G0_CLK_SVD1_VIP, CLK_SV_VIP, 1, 1), + DEF_FIXED("svd2_vip", R8A779G0_CLK_SVD2_VIP, CLK_SV_VIP, 2, 1), + DEF_FIXED("cbfusa", R8A779G0_CLK_CBFUSA, CLK_EXTAL, 2, 1), + DEF_FIXED("cpex", R8A779G0_CLK_CPEX, CLK_EXTAL, 2, 1), + DEF_FIXED("viobus", R8A779G0_CLK_VIOBUS, CLK_VIO, 1, 1), + DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1), + DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1), + DEF_FIXED("vcbusd2", R8A779G0_CLK_VCBUSD2, CLK_VC, 2, 1), + + DEF_GEN4_SD("sd0", R8A779G0_CLK_SD0, CLK_SDSRC, 0x870), + DEF_DIV6P1("mso", R8A779G0_CLK_MSO, CLK_PLL5_DIV4, 0x87c), + + DEF_BASE("rpc", R8A779G0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC), + DEF_BASE("rpcd2", R8A779G0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779G0_CLK_RPC), + + DEF_GEN4_OSC("osc", R8A779G0_CLK_OSC, CLK_EXTAL, 8), + DEF_GEN4_MDSEL("r", R8A779G0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1), +}; + +static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = { + DEF_MOD("hscif0", 514, R8A779G0_CLK_S0D3_PER), + DEF_MOD("hscif1", 515, R8A779G0_CLK_S0D3_PER), + DEF_MOD("hscif2", 516, R8A779G0_CLK_S0D3_PER), + DEF_MOD("hscif3", 517, R8A779G0_CLK_S0D3_PER), +}; + +/* + * CPG Clock Data + */ +/* + * MD EXTAL PLL1 PLL2 PLL3 PLL4 PLL5 PLL6 OSC + * 14 13 (MHz) + * ------------------------------------------------------------------------ + * 0 0 16.66 / 1 x192 x204 x192 x144 x192 x168 /15 + * 0 1 20 / 1 x160 x170 x160 x120 x160 x140 /19 + * 1 0 Prohibited setting + * 1 1 33.33 / 2 x192 x204 x192 x144 x192 x168 /38 + */ +#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \ + (((md) & BIT(13)) >> 13)) + +static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = { + /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL4 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */ + { 1, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 15, }, + { 1, 160, 1, 170, 1, 160, 1, 120, 1, 160, 1, 140, 1, 19, }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, + { 2, 192, 1, 204, 1, 192, 1, 144, 1, 192, 1, 168, 1, 38, }, +}; + +static int __init r8a779g0_cpg_mssr_init(struct device *dev) +{ + const struct rcar_gen4_cpg_pll_config *cpg_pll_config; + u32 cpg_mode; + int error; + + error = rcar_rst_read_mode_pins(&cpg_mode); + if (error) + return error; + + cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; + if (!cpg_pll_config->extal_div) { + dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode); + return -EINVAL; + } + + return rcar_gen4_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode); +} + +const struct cpg_mssr_info r8a779g0_cpg_mssr_info __initconst = { + /* Core Clocks */ + .core_clks = r8a779g0_core_clks, + .num_core_clks = ARRAY_SIZE(r8a779g0_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r8a779g0_mod_clks, + .num_mod_clks = ARRAY_SIZE(r8a779g0_mod_clks), + .num_hw_mod_clks = 30 * 32, + + /* Callbacks */ + .init = r8a779g0_cpg_mssr_init, + .cpg_clk_register = rcar_gen4_cpg_clk_register, + + .reg_layout = CLK_REG_LAYOUT_RCAR_GEN4, +}; diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c index c99942f0e4d4..35ffc462af1a 100644 --- a/drivers/clk/renesas/r9a06g032-clocks.c +++ b/drivers/clk/renesas/r9a06g032-clocks.c @@ -16,13 +16,17 @@ #include <linux/math64.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_clock.h> #include <linux/pm_domain.h> #include <linux/slab.h> +#include <linux/soc/renesas/r9a06g032-sysctrl.h> #include <linux/spinlock.h> #include <dt-bindings/clock/r9a06g032-sysctrl.h> +#define R9A06G032_SYSCTRL_DMAMUX 0xA0 + struct r9a06g032_gate { u16 gate, reset, ready, midle, scon, mirack, mistat; @@ -256,7 +260,7 @@ static const struct r9a06g032_clkdesc r9a06g032_clocks[] = { D_MODULE(HCLK_QSPI0, "hclk_qspi0", CLK_REF_SYNC_D4, 0x2a0, 0x2a1, 0x2a2, 0x2a3, 0x300, 0x301, 0x302), D_MODULE(HCLK_QSPI1, "hclk_qspi1", CLK_REF_SYNC_D4, 0x480, 0x481, 0x482, 0x483, 0x4c0, 0x4c1, 0x4c2), D_MODULE(HCLK_ROM, "hclk_rom", CLK_REF_SYNC_D4, 0xaa0, 0xaa1, 0xaa2, 0, 0xb80, 0, 0), - D_MODULE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0, 0, 0, 0, 0, 0), + D_MODULE(HCLK_RTC, "hclk_rtc", CLK_REF_SYNC_D8, 0xa00, 0xa03, 0, 0xa02, 0, 0, 0), D_MODULE(HCLK_SDIO0, "hclk_sdio0", CLK_REF_SYNC_D4, 0x60, 0x61, 0x62, 0x63, 0x80, 0x81, 0x82), D_MODULE(HCLK_SDIO1, "hclk_sdio1", CLK_REF_SYNC_D4, 0x640, 0x641, 0x642, 0x643, 0x660, 0x661, 0x662), D_MODULE(HCLK_SEMAP, "hclk_semap", CLK_REF_SYNC_D4, 0x7a3, 0x7a4, 0x7a5, 0, 0xb21, 0, 0), @@ -315,6 +319,30 @@ struct r9a06g032_priv { void __iomem *reg; }; +static struct r9a06g032_priv *sysctrl_priv; + +/* Exported helper to access the DMAMUX register */ +int r9a06g032_sysctrl_set_dmamux(u32 mask, u32 val) +{ + unsigned long flags; + u32 dmamux; + + if (!sysctrl_priv) + return -EPROBE_DEFER; + + spin_lock_irqsave(&sysctrl_priv->lock, flags); + + dmamux = readl(sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX); + dmamux &= ~mask; + dmamux |= val & mask; + writel(dmamux, sysctrl_priv->reg + R9A06G032_SYSCTRL_DMAMUX); + + spin_unlock_irqrestore(&sysctrl_priv->lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(r9a06g032_sysctrl_set_dmamux); + /* register/bit pairs are encoded as an uint16_t */ static void clk_rdesc_set(struct r9a06g032_priv *clocks, @@ -963,7 +991,17 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev) if (error) return error; - return r9a06g032_add_clk_domain(dev); + error = r9a06g032_add_clk_domain(dev); + if (error) + return error; + + sysctrl_priv = clocks; + + error = of_platform_populate(np, NULL, NULL, dev); + if (error) + dev_err(dev, "Failed to populate children (%d)\n", error); + + return 0; } static const struct of_device_id r9a06g032_match[] = { diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c new file mode 100644 index 000000000000..33c2bd8df2e5 --- /dev/null +++ b/drivers/clk/renesas/r9a07g043-cpg.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RZ/G2UL CPG driver + * + * Copyright (C) 2022 Renesas Electronics Corp. + */ + +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> + +#include <dt-bindings/clock/r9a07g043-cpg.h> + +#include "rzg2l-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R9A07G043_CLK_P0_DIV2, + + /* External Input Clocks */ + CLK_EXTAL, + + /* Internal Core Clocks */ + CLK_OSC_DIV1000, + CLK_PLL1, + CLK_PLL2, + CLK_PLL2_DIV2, + CLK_PLL2_DIV2_8, + CLK_PLL2_DIV2_10, + CLK_PLL3, + CLK_PLL3_400, + CLK_PLL3_533, + CLK_PLL3_DIV2, + CLK_PLL3_DIV2_4, + CLK_PLL3_DIV2_4_2, + CLK_SEL_PLL3_3, + CLK_DIV_PLL3_C, + CLK_PLL5, + CLK_PLL5_500, + CLK_PLL5_250, + CLK_PLL6, + CLK_PLL6_250, + CLK_P1_DIV2, + CLK_PLL2_800, + CLK_PLL2_SDHI_533, + CLK_PLL2_SDHI_400, + CLK_PLL2_SDHI_266, + CLK_SD0_DIV4, + CLK_SD1_DIV4, + + /* Module Clocks */ + MOD_CLK_BASE, +}; + +/* Divider tables */ +static const struct clk_div_table dtable_1_8[] = { + {0, 1}, + {1, 2}, + {2, 4}, + {3, 8}, + {0, 0}, +}; + +static const struct clk_div_table dtable_1_32[] = { + {0, 1}, + {1, 2}, + {2, 4}, + {3, 8}, + {4, 32}, + {0, 0}, +}; + +/* Mux clock tables */ +static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" }; +static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" }; +static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" }; + +static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + + /* Internal Core Clocks */ + DEF_FIXED(".osc", R9A07G043_OSCCLK, CLK_EXTAL, 1, 1), + DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000), + DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)), + DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3), + DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 1, 2), + DEF_FIXED(".clk_800", CLK_PLL2_800, CLK_PLL2, 1, 2), + DEF_FIXED(".clk_533", CLK_PLL2_SDHI_533, CLK_PLL2, 1, 3), + DEF_FIXED(".clk_400", CLK_PLL2_SDHI_400, CLK_PLL2_800, 1, 2), + DEF_FIXED(".clk_266", CLK_PLL2_SDHI_266, CLK_PLL2_SDHI_533, 1, 2), + DEF_FIXED(".pll2_div2_8", CLK_PLL2_DIV2_8, CLK_PLL2_DIV2, 1, 8), + DEF_FIXED(".pll2_div2_10", CLK_PLL2_DIV2_10, CLK_PLL2_DIV2, 1, 10), + DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3), + DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2), + DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4), + DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2), + DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4), + DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3), + DEF_MUX_RO(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3, sel_pll3_3), + DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3, DIVPL3C, dtable_1_32), + DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1), + DEF_FIXED(".pll5_500", CLK_PLL5_500, CLK_PLL5, 1, 6), + DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_500, 1, 2), + DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6), + DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2), + + /* Core output clk */ + DEF_DIV("I", R9A07G043_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8), + DEF_DIV("P0", R9A07G043_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A, dtable_1_32), + DEF_FIXED("P0_DIV2", R9A07G043_CLK_P0_DIV2, R9A07G043_CLK_P0, 1, 2), + DEF_FIXED("TSU", R9A07G043_CLK_TSU, CLK_PLL2_DIV2_10, 1, 1), + DEF_DIV("P1", R9A07G043_CLK_P1, CLK_PLL3_DIV2_4, DIVPL3B, dtable_1_32), + DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G043_CLK_P1, 1, 2), + DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32), + DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1), + DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1), + DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2), + DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), + DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), + DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, sel_shdi), + DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, sel_shdi), + DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G043_CLK_SD0, 1, 4), + DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G043_CLK_SD1, 1, 4), +}; + +static struct rzg2l_mod_clk r9a07g043_mod_clks[] = { + DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1, + 0x514, 0), + DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2, + 0x518, 0), + DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1, + 0x518, 1), + DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1, + 0x52c, 0), + DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2, + 0x52c, 1), + DEF_MOD("ostm0_pclk", R9A07G043_OSTM0_PCLK, R9A07G043_CLK_P0, + 0x534, 0), + DEF_MOD("ostm1_pclk", R9A07G043_OSTM1_PCLK, R9A07G043_CLK_P0, + 0x534, 1), + DEF_MOD("ostm2_pclk", R9A07G043_OSTM2_PCLK, R9A07G043_CLK_P0, + 0x534, 2), + DEF_MOD("wdt0_pclk", R9A07G043_WDT0_PCLK, R9A07G043_CLK_P0, + 0x548, 0), + DEF_MOD("wdt0_clk", R9A07G043_WDT0_CLK, R9A07G043_OSCCLK, + 0x548, 1), + DEF_MOD("wdt2_pclk", R9A07G043_WDT2_PCLK, R9A07G043_CLK_P0, + 0x548, 4), + DEF_MOD("wdt2_clk", R9A07G043_WDT2_CLK, R9A07G043_OSCCLK, + 0x548, 5), + DEF_MOD("spi_clk2", R9A07G043_SPI_CLK2, R9A07G043_CLK_SPI1, + 0x550, 0), + DEF_MOD("spi_clk", R9A07G043_SPI_CLK, R9A07G043_CLK_SPI0, + 0x550, 1), + DEF_MOD("sdhi0_imclk", R9A07G043_SDHI0_IMCLK, CLK_SD0_DIV4, + 0x554, 0), + DEF_MOD("sdhi0_imclk2", R9A07G043_SDHI0_IMCLK2, CLK_SD0_DIV4, + 0x554, 1), + DEF_MOD("sdhi0_clk_hs", R9A07G043_SDHI0_CLK_HS, R9A07G043_CLK_SD0, + 0x554, 2), + DEF_MOD("sdhi0_aclk", R9A07G043_SDHI0_ACLK, R9A07G043_CLK_P1, + 0x554, 3), + DEF_MOD("sdhi1_imclk", R9A07G043_SDHI1_IMCLK, CLK_SD1_DIV4, + 0x554, 4), + DEF_MOD("sdhi1_imclk2", R9A07G043_SDHI1_IMCLK2, CLK_SD1_DIV4, + 0x554, 5), + DEF_MOD("sdhi1_clk_hs", R9A07G043_SDHI1_CLK_HS, R9A07G043_CLK_SD1, + 0x554, 6), + DEF_MOD("sdhi1_aclk", R9A07G043_SDHI1_ACLK, R9A07G043_CLK_P1, + 0x554, 7), + DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0, + 0x570, 0), + DEF_MOD("ssi0_sfr", R9A07G043_SSI0_PCLK_SFR, R9A07G043_CLK_P0, + 0x570, 1), + DEF_MOD("ssi1_pclk", R9A07G043_SSI1_PCLK2, R9A07G043_CLK_P0, + 0x570, 2), + DEF_MOD("ssi1_sfr", R9A07G043_SSI1_PCLK_SFR, R9A07G043_CLK_P0, + 0x570, 3), + DEF_MOD("ssi2_pclk", R9A07G043_SSI2_PCLK2, R9A07G043_CLK_P0, + 0x570, 4), + DEF_MOD("ssi2_sfr", R9A07G043_SSI2_PCLK_SFR, R9A07G043_CLK_P0, + 0x570, 5), + DEF_MOD("ssi3_pclk", R9A07G043_SSI3_PCLK2, R9A07G043_CLK_P0, + 0x570, 6), + DEF_MOD("ssi3_sfr", R9A07G043_SSI3_PCLK_SFR, R9A07G043_CLK_P0, + 0x570, 7), + DEF_MOD("usb0_host", R9A07G043_USB_U2H0_HCLK, R9A07G043_CLK_P1, + 0x578, 0), + DEF_MOD("usb1_host", R9A07G043_USB_U2H1_HCLK, R9A07G043_CLK_P1, + 0x578, 1), + DEF_MOD("usb0_func", R9A07G043_USB_U2P_EXR_CPUCLK, R9A07G043_CLK_P1, + 0x578, 2), + DEF_MOD("usb_pclk", R9A07G043_USB_PCLK, R9A07G043_CLK_P1, + 0x578, 3), + DEF_COUPLED("eth0_axi", R9A07G043_ETH0_CLK_AXI, R9A07G043_CLK_M0, + 0x57c, 0), + DEF_COUPLED("eth0_chi", R9A07G043_ETH0_CLK_CHI, R9A07G043_CLK_ZT, + 0x57c, 0), + DEF_COUPLED("eth1_axi", R9A07G043_ETH1_CLK_AXI, R9A07G043_CLK_M0, + 0x57c, 1), + DEF_COUPLED("eth1_chi", R9A07G043_ETH1_CLK_CHI, R9A07G043_CLK_ZT, + 0x57c, 1), + DEF_MOD("i2c0", R9A07G043_I2C0_PCLK, R9A07G043_CLK_P0, + 0x580, 0), + DEF_MOD("i2c1", R9A07G043_I2C1_PCLK, R9A07G043_CLK_P0, + 0x580, 1), + DEF_MOD("i2c2", R9A07G043_I2C2_PCLK, R9A07G043_CLK_P0, + 0x580, 2), + DEF_MOD("i2c3", R9A07G043_I2C3_PCLK, R9A07G043_CLK_P0, + 0x580, 3), + DEF_MOD("scif0", R9A07G043_SCIF0_CLK_PCK, R9A07G043_CLK_P0, + 0x584, 0), + DEF_MOD("scif1", R9A07G043_SCIF1_CLK_PCK, R9A07G043_CLK_P0, + 0x584, 1), + DEF_MOD("scif2", R9A07G043_SCIF2_CLK_PCK, R9A07G043_CLK_P0, + 0x584, 2), + DEF_MOD("scif3", R9A07G043_SCIF3_CLK_PCK, R9A07G043_CLK_P0, + 0x584, 3), + DEF_MOD("scif4", R9A07G043_SCIF4_CLK_PCK, R9A07G043_CLK_P0, + 0x584, 4), + DEF_MOD("sci0", R9A07G043_SCI0_CLKP, R9A07G043_CLK_P0, + 0x588, 0), + DEF_MOD("sci1", R9A07G043_SCI1_CLKP, R9A07G043_CLK_P0, + 0x588, 1), + DEF_MOD("rspi0", R9A07G043_RSPI0_CLKB, R9A07G043_CLK_P0, + 0x590, 0), + DEF_MOD("rspi1", R9A07G043_RSPI1_CLKB, R9A07G043_CLK_P0, + 0x590, 1), + DEF_MOD("rspi2", R9A07G043_RSPI2_CLKB, R9A07G043_CLK_P0, + 0x590, 2), + DEF_MOD("canfd", R9A07G043_CANFD_PCLK, R9A07G043_CLK_P0, + 0x594, 0), + DEF_MOD("gpio", R9A07G043_GPIO_HCLK, R9A07G043_OSCCLK, + 0x598, 0), + DEF_MOD("adc_adclk", R9A07G043_ADC_ADCLK, R9A07G043_CLK_TSU, + 0x5a8, 0), + DEF_MOD("adc_pclk", R9A07G043_ADC_PCLK, R9A07G043_CLK_P0, + 0x5a8, 1), + DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU, + 0x5ac, 0), +}; + +static struct rzg2l_reset r9a07g043_resets[] = { + DEF_RST(R9A07G043_GIC600_GICRESET_N, 0x814, 0), + DEF_RST(R9A07G043_GIC600_DBG_GICRESET_N, 0x814, 1), + DEF_RST(R9A07G043_IA55_RESETN, 0x818, 0), + DEF_RST(R9A07G043_DMAC_ARESETN, 0x82c, 0), + DEF_RST(R9A07G043_DMAC_RST_ASYNC, 0x82c, 1), + DEF_RST(R9A07G043_OSTM0_PRESETZ, 0x834, 0), + DEF_RST(R9A07G043_OSTM1_PRESETZ, 0x834, 1), + DEF_RST(R9A07G043_OSTM2_PRESETZ, 0x834, 2), + DEF_RST(R9A07G043_WDT0_PRESETN, 0x848, 0), + DEF_RST(R9A07G043_WDT2_PRESETN, 0x848, 2), + DEF_RST(R9A07G043_SPI_RST, 0x850, 0), + DEF_RST(R9A07G043_SDHI0_IXRST, 0x854, 0), + DEF_RST(R9A07G043_SDHI1_IXRST, 0x854, 1), + DEF_RST(R9A07G043_SSI0_RST_M2_REG, 0x870, 0), + DEF_RST(R9A07G043_SSI1_RST_M2_REG, 0x870, 1), + DEF_RST(R9A07G043_SSI2_RST_M2_REG, 0x870, 2), + DEF_RST(R9A07G043_SSI3_RST_M2_REG, 0x870, 3), + DEF_RST(R9A07G043_USB_U2H0_HRESETN, 0x878, 0), + DEF_RST(R9A07G043_USB_U2H1_HRESETN, 0x878, 1), + DEF_RST(R9A07G043_USB_U2P_EXL_SYSRST, 0x878, 2), + DEF_RST(R9A07G043_USB_PRESETN, 0x878, 3), + DEF_RST(R9A07G043_ETH0_RST_HW_N, 0x87c, 0), + DEF_RST(R9A07G043_ETH1_RST_HW_N, 0x87c, 1), + DEF_RST(R9A07G043_I2C0_MRST, 0x880, 0), + DEF_RST(R9A07G043_I2C1_MRST, 0x880, 1), + DEF_RST(R9A07G043_I2C2_MRST, 0x880, 2), + DEF_RST(R9A07G043_I2C3_MRST, 0x880, 3), + DEF_RST(R9A07G043_SCIF0_RST_SYSTEM_N, 0x884, 0), + DEF_RST(R9A07G043_SCIF1_RST_SYSTEM_N, 0x884, 1), + DEF_RST(R9A07G043_SCIF2_RST_SYSTEM_N, 0x884, 2), + DEF_RST(R9A07G043_SCIF3_RST_SYSTEM_N, 0x884, 3), + DEF_RST(R9A07G043_SCIF4_RST_SYSTEM_N, 0x884, 4), + DEF_RST(R9A07G043_SCI0_RST, 0x888, 0), + DEF_RST(R9A07G043_SCI1_RST, 0x888, 1), + DEF_RST(R9A07G043_RSPI0_RST, 0x890, 0), + DEF_RST(R9A07G043_RSPI1_RST, 0x890, 1), + DEF_RST(R9A07G043_RSPI2_RST, 0x890, 2), + DEF_RST(R9A07G043_CANFD_RSTP_N, 0x894, 0), + DEF_RST(R9A07G043_CANFD_RSTC_N, 0x894, 1), + DEF_RST(R9A07G043_GPIO_RSTN, 0x898, 0), + DEF_RST(R9A07G043_GPIO_PORT_RESETN, 0x898, 1), + DEF_RST(R9A07G043_GPIO_SPARE_RESETN, 0x898, 2), + DEF_RST(R9A07G043_ADC_PRESETN, 0x8a8, 0), + DEF_RST(R9A07G043_ADC_ADRST_N, 0x8a8, 1), + DEF_RST(R9A07G043_TSU_PRESETN, 0x8ac, 0), +}; + +static const unsigned int r9a07g043_crit_mod_clks[] __initconst = { + MOD_CLK_BASE + R9A07G043_GIC600_GICCLK, + MOD_CLK_BASE + R9A07G043_IA55_CLK, + MOD_CLK_BASE + R9A07G043_DMAC_ACLK, +}; + +const struct rzg2l_cpg_info r9a07g043_cpg_info = { + /* Core Clocks */ + .core_clks = r9a07g043_core_clks, + .num_core_clks = ARRAY_SIZE(r9a07g043_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Critical Module Clocks */ + .crit_mod_clks = r9a07g043_crit_mod_clks, + .num_crit_mod_clks = ARRAY_SIZE(r9a07g043_crit_mod_clks), + + /* Module Clocks */ + .mod_clks = r9a07g043_mod_clks, + .num_mod_clks = ARRAY_SIZE(r9a07g043_mod_clks), + .num_hw_mod_clks = R9A07G043_TSU_PCLK + 1, + + /* Resets */ + .resets = r9a07g043_resets, + .num_resets = R9A07G043_TSU_PRESETN + 1, /* Last reset ID + 1 */ + + .has_clk_mon_regs = true, +}; diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c index bdfabb992a20..b288897852c7 100644 --- a/drivers/clk/renesas/r9a07g044-cpg.c +++ b/drivers/clk/renesas/r9a07g044-cpg.c @@ -32,6 +32,7 @@ enum clk_ids { CLK_PLL3, CLK_PLL3_400, CLK_PLL3_533, + CLK_M2_DIV2, CLK_PLL3_DIV2, CLK_PLL3_DIV2_2, CLK_PLL3_DIV2_4, @@ -40,6 +41,8 @@ enum clk_ids { CLK_DIV_PLL3_C, CLK_PLL4, CLK_PLL5, + CLK_PLL5_FOUTPOSTDIV, + CLK_PLL5_FOUT1PH0, CLK_PLL5_FOUT3, CLK_PLL5_250, CLK_PLL6, @@ -52,6 +55,11 @@ enum clk_ids { CLK_SD0_DIV4, CLK_SD1_DIV4, CLK_SEL_GPU2, + CLK_SEL_PLL5_4, + CLK_DSI_DIV, + CLK_PLL2_533, + CLK_PLL2_533_DIV2, + CLK_DIV_DSI_LPCLK, /* Module Clocks */ MOD_CLK_BASE, @@ -75,14 +83,23 @@ static const struct clk_div_table dtable_1_32[] = { {0, 0}, }; +static const struct clk_div_table dtable_16_128[] = { + {0, 16}, + {1, 32}, + {2, 64}, + {3, 128}, + {0, 0}, +}; + /* Mux clock tables */ static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" }; +static const char * const sel_pll5_4[] = { ".pll5_foutpostdiv", ".pll5_fout1ph0" }; static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" }; static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" }; static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" }; static const struct { - struct cpg_core_clk common[44]; + struct cpg_core_clk common[56]; #ifdef CONFIG_CLK_R9A07G054 struct cpg_core_clk drp[0]; #endif @@ -96,6 +113,7 @@ static const struct { DEF_FIXED(".osc_div1000", CLK_OSC_DIV1000, CLK_EXTAL, 1, 1000), DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)), DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 200, 3), + DEF_FIXED(".pll2_533", CLK_PLL2_533, CLK_PLL2, 1, 3), DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 200, 3), DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4), DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3), @@ -114,46 +132,48 @@ static const struct { DEF_FIXED(".pll2_div2_8", CLK_PLL2_DIV2_8, CLK_PLL2_DIV2, 1, 8), DEF_FIXED(".pll2_div2_10", CLK_PLL2_DIV2_10, CLK_PLL2_DIV2, 1, 10), + DEF_FIXED(".pll2_533_div2", CLK_PLL2_533_DIV2, CLK_PLL2_533, 1, 2), + DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2), DEF_FIXED(".pll3_div2_2", CLK_PLL3_DIV2_2, CLK_PLL3_DIV2, 1, 2), DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4), DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2), - DEF_MUX(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3, - sel_pll3_3, ARRAY_SIZE(sel_pll3_3), 0, CLK_MUX_READ_ONLY), - DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3, - DIVPL3C, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), + DEF_MUX_RO(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3, sel_pll3_3), + DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3, DIVPL3C, dtable_1_32), DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_FOUT3, 1, 2), DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2), - DEF_MUX(".sel_gpu2", CLK_SEL_GPU2, SEL_GPU2, - sel_gpu2, ARRAY_SIZE(sel_gpu2), 0, CLK_MUX_READ_ONLY), + DEF_MUX_RO(".sel_gpu2", CLK_SEL_GPU2, SEL_GPU2, sel_gpu2), + DEF_PLL5_FOUTPOSTDIV(".pll5_foutpostdiv", CLK_PLL5_FOUTPOSTDIV, CLK_EXTAL), + DEF_FIXED(".pll5_fout1ph0", CLK_PLL5_FOUT1PH0, CLK_PLL5_FOUTPOSTDIV, 1, 2), + DEF_PLL5_4_MUX(".sel_pll5_4", CLK_SEL_PLL5_4, SEL_PLL5_4, sel_pll5_4), + DEF_DIV(".div_dsi_lpclk", CLK_DIV_DSI_LPCLK, CLK_PLL2_533_DIV2, + DIVDSILPCLK, dtable_16_128), /* Core output clk */ - DEF_DIV("I", R9A07G044_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8, - CLK_DIVIDER_HIWORD_MASK), - DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A, - dtable_1_32, CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("I", R9A07G044_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8), + DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A, dtable_1_32), DEF_FIXED("P0_DIV2", R9A07G044_CLK_P0_DIV2, R9A07G044_CLK_P0, 1, 2), DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV2_10, 1, 1), - DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4, - DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4, DIVPL3B, dtable_1_32), DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2), - DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2, - DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32), DEF_FIXED("M0", R9A07G044_CLK_M0, CLK_PLL3_DIV2_4, 1, 1), DEF_FIXED("ZT", R9A07G044_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1), - DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, - sel_pll6_2, ARRAY_SIZE(sel_pll6_2), 0, CLK_MUX_HIWORD_MASK), + DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, sel_pll6_2), DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), - DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, - sel_shdi, ARRAY_SIZE(sel_shdi)), - DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, - sel_shdi, ARRAY_SIZE(sel_shdi)), + DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, sel_shdi), + DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, sel_shdi), DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4), DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4), - DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8, - CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8), + DEF_FIXED("M1", R9A07G044_CLK_M1, CLK_PLL5_FOUTPOSTDIV, 1, 1), + DEF_FIXED("M2", R9A07G044_CLK_M2, CLK_PLL3_533, 1, 2), + DEF_FIXED("M2_DIV2", CLK_M2_DIV2, R9A07G044_CLK_M2, 1, 2), + DEF_DSI_DIV("DSI_DIV", CLK_DSI_DIV, CLK_SEL_PLL5_4, CLK_SET_RATE_PARENT), + DEF_FIXED("M3", R9A07G044_CLK_M3, CLK_DSI_DIV, 1, 1), + DEF_FIXED("M4", R9A07G044_CLK_M4, CLK_DIV_DSI_LPCLK, 1, 1), }, #ifdef CONFIG_CLK_R9A07G054 .drp = { @@ -162,7 +182,7 @@ static const struct { }; static const struct { - struct rzg2l_mod_clk common[62]; + struct rzg2l_mod_clk common[71]; #ifdef CONFIG_CLK_R9A07G054 struct rzg2l_mod_clk drp[0]; #endif @@ -180,7 +200,7 @@ static const struct { 0x52c, 1), DEF_MOD("ostm0_pclk", R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0, 0x534, 0), - DEF_MOD("ostm1_clk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0, + DEF_MOD("ostm1_pclk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0, 0x534, 1), DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0, 0x534, 2), @@ -222,6 +242,24 @@ static const struct { 0x558, 1), DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1, 0x558, 2), + DEF_MOD("dsi_pll_clk", R9A07G044_MIPI_DSI_PLLCLK, R9A07G044_CLK_M1, + 0x568, 0), + DEF_MOD("dsi_sys_clk", R9A07G044_MIPI_DSI_SYSCLK, CLK_M2_DIV2, + 0x568, 1), + DEF_MOD("dsi_aclk", R9A07G044_MIPI_DSI_ACLK, R9A07G044_CLK_P1, + 0x568, 2), + DEF_MOD("dsi_pclk", R9A07G044_MIPI_DSI_PCLK, R9A07G044_CLK_P2, + 0x568, 3), + DEF_MOD("dsi_vclk", R9A07G044_MIPI_DSI_VCLK, R9A07G044_CLK_M3, + 0x568, 4), + DEF_MOD("dsi_lpclk", R9A07G044_MIPI_DSI_LPCLK, R9A07G044_CLK_M4, + 0x568, 5), + DEF_COUPLED("lcdc_a", R9A07G044_LCDC_CLK_A, R9A07G044_CLK_M0, + 0x56c, 0), + DEF_COUPLED("lcdc_p", R9A07G044_LCDC_CLK_P, R9A07G044_CLK_ZT, + 0x56c, 0), + DEF_MOD("lcdc_clk_d", R9A07G044_LCDC_CLK_D, R9A07G044_CLK_M3, + 0x56c, 1), DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0, 0x570, 0), DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0, @@ -317,6 +355,10 @@ static struct rzg2l_reset r9a07g044_resets[] = { DEF_RST(R9A07G044_GPU_RESETN, 0x858, 0), DEF_RST(R9A07G044_GPU_AXI_RESETN, 0x858, 1), DEF_RST(R9A07G044_GPU_ACE_RESETN, 0x858, 2), + DEF_RST(R9A07G044_MIPI_DSI_CMN_RSTB, 0x868, 0), + DEF_RST(R9A07G044_MIPI_DSI_ARESET_N, 0x868, 1), + DEF_RST(R9A07G044_MIPI_DSI_PRESET_N, 0x868, 2), + DEF_RST(R9A07G044_LCDC_RESET_N, 0x86c, 0), DEF_RST(R9A07G044_SSI0_RST_M2_REG, 0x870, 0), DEF_RST(R9A07G044_SSI1_RST_M2_REG, 0x870, 1), DEF_RST(R9A07G044_SSI2_RST_M2_REG, 0x870, 2), @@ -376,6 +418,8 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = { /* Resets */ .resets = r9a07g044_resets, .num_resets = R9A07G044_TSU_PRESETN + 1, /* Last reset ID + 1 */ + + .has_clk_mon_regs = true, }; #ifdef CONFIG_CLK_R9A07G054 @@ -398,5 +442,7 @@ const struct rzg2l_cpg_info r9a07g054_cpg_info = { /* Resets */ .resets = r9a07g044_resets, .num_resets = R9A07G054_STPAI_ARESETN + 1, /* Last reset ID + 1 */ + + .has_clk_mon_regs = true, }; #endif diff --git a/drivers/clk/renesas/r9a09g011-cpg.c b/drivers/clk/renesas/r9a09g011-cpg.c new file mode 100644 index 000000000000..40693bb85b80 --- /dev/null +++ b/drivers/clk/renesas/r9a09g011-cpg.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RZ/V2M Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2022 Renesas Electronics Corp. + * + * Based on r9a07g044-cpg.c + */ + +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> + +#include <dt-bindings/clock/r9a09g011-cpg.h> + +#include "rzg2l-cpg.h" + +#define RZV2M_SAMPLL4_CLK1 0x104 +#define RZV2M_SAMPLL4_CLK2 0x108 + +#define PLL4_CONF (RZV2M_SAMPLL4_CLK1 << 22 | RZV2M_SAMPLL4_CLK2 << 12) + +#define DIV_A DDIV_PACK(0x200, 0, 3) +#define DIV_B DDIV_PACK(0x204, 0, 2) +#define DIV_E DDIV_PACK(0x204, 8, 1) +#define DIV_W DDIV_PACK(0x328, 0, 3) + +#define SEL_B SEL_PLL_PACK(0x214, 0, 1) +#define SEL_E SEL_PLL_PACK(0x214, 2, 1) +#define SEL_W0 SEL_PLL_PACK(0x32C, 0, 1) + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = 0, + + /* External Input Clocks */ + CLK_EXTAL, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_MAIN_24, + CLK_MAIN_2, + CLK_PLL1, + CLK_PLL2, + CLK_PLL2_800, + CLK_PLL2_400, + CLK_PLL2_200, + CLK_PLL2_100, + CLK_PLL4, + CLK_DIV_A, + CLK_DIV_B, + CLK_DIV_E, + CLK_DIV_W, + CLK_SEL_B, + CLK_SEL_B_D2, + CLK_SEL_E, + CLK_SEL_W0, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +/* Divider tables */ +static const struct clk_div_table dtable_diva[] = { + {0, 1}, + {1, 2}, + {2, 3}, + {3, 4}, + {4, 6}, + {5, 12}, + {6, 24}, + {0, 0}, +}; + +static const struct clk_div_table dtable_divb[] = { + {0, 1}, + {1, 2}, + {2, 4}, + {3, 8}, + {0, 0}, +}; + +static const struct clk_div_table dtable_divw[] = { + {0, 6}, + {1, 7}, + {2, 8}, + {3, 9}, + {4, 10}, + {5, 11}, + {6, 12}, + {0, 0}, +}; + +/* Mux clock tables */ +static const char * const sel_b[] = { ".main", ".divb" }; +static const char * const sel_e[] = { ".main", ".dive" }; +static const char * const sel_w[] = { ".main", ".divw" }; + +static const struct cpg_core_clk r9a09g011_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + + /* Internal Core Clocks */ + DEF_FIXED(".main", CLK_MAIN, CLK_EXTAL, 1, 1), + DEF_FIXED(".main_24", CLK_MAIN_24, CLK_MAIN, 1, 2), + DEF_FIXED(".main_2", CLK_MAIN_2, CLK_MAIN, 1, 24), + DEF_FIXED(".pll1", CLK_PLL1, CLK_MAIN_2, 498, 1), + DEF_FIXED(".pll2", CLK_PLL2, CLK_MAIN_2, 800, 1), + DEF_FIXED(".pll2_800", CLK_PLL2_800, CLK_PLL2, 1, 2), + DEF_FIXED(".pll2_400", CLK_PLL2_400, CLK_PLL2_800, 1, 2), + DEF_FIXED(".pll2_200", CLK_PLL2_200, CLK_PLL2_800, 1, 4), + DEF_FIXED(".pll2_100", CLK_PLL2_100, CLK_PLL2_800, 1, 8), + DEF_SAMPLL(".pll4", CLK_PLL4, CLK_MAIN_2, PLL4_CONF), + + DEF_DIV_RO(".diva", CLK_DIV_A, CLK_PLL1, DIV_A, dtable_diva), + DEF_DIV_RO(".divb", CLK_DIV_B, CLK_PLL2_400, DIV_B, dtable_divb), + DEF_DIV_RO(".dive", CLK_DIV_E, CLK_PLL2_100, DIV_E, NULL), + DEF_DIV_RO(".divw", CLK_DIV_W, CLK_PLL4, DIV_W, dtable_divw), + + DEF_MUX_RO(".selb", CLK_SEL_B, SEL_B, sel_b), + DEF_MUX_RO(".sele", CLK_SEL_E, SEL_E, sel_e), + DEF_MUX(".selw0", CLK_SEL_W0, SEL_W0, sel_w), + + DEF_FIXED(".selb_d2", CLK_SEL_B_D2, CLK_SEL_B, 1, 2), +}; + +static const struct rzg2l_mod_clk r9a09g011_mod_clks[] __initconst = { + DEF_MOD("gic", R9A09G011_GIC_CLK, CLK_SEL_B_D2, 0x400, 5), + DEF_COUPLED("eth_axi", R9A09G011_ETH0_CLK_AXI, CLK_PLL2_200, 0x40c, 8), + DEF_COUPLED("eth_chi", R9A09G011_ETH0_CLK_CHI, CLK_PLL2_100, 0x40c, 8), + DEF_MOD("eth_clk_gptp", R9A09G011_ETH0_GPTP_EXT, CLK_PLL2_100, 0x40c, 9), + DEF_MOD("syc_cnt_clk", R9A09G011_SYC_CNT_CLK, CLK_MAIN_24, 0x41c, 12), + DEF_MOD("urt_pclk", R9A09G011_URT_PCLK, CLK_SEL_E, 0x438, 4), + DEF_MOD("urt0_clk", R9A09G011_URT0_CLK, CLK_SEL_W0, 0x438, 5), + DEF_MOD("ca53", R9A09G011_CA53_CLK, CLK_DIV_A, 0x448, 0), +}; + +static const struct rzg2l_reset r9a09g011_resets[] = { + DEF_RST_MON(R9A09G011_ETH0_RST_HW_N, 0x608, 11, 11), + DEF_RST_MON(R9A09G011_SYC_RST_N, 0x610, 9, 13), +}; + +static const unsigned int r9a09g011_crit_mod_clks[] __initconst = { + MOD_CLK_BASE + R9A09G011_CA53_CLK, + MOD_CLK_BASE + R9A09G011_GIC_CLK, + MOD_CLK_BASE + R9A09G011_SYC_CNT_CLK, + MOD_CLK_BASE + R9A09G011_URT_PCLK, +}; + +const struct rzg2l_cpg_info r9a09g011_cpg_info = { + /* Core Clocks */ + .core_clks = r9a09g011_core_clks, + .num_core_clks = ARRAY_SIZE(r9a09g011_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Critical Module Clocks */ + .crit_mod_clks = r9a09g011_crit_mod_clks, + .num_crit_mod_clks = ARRAY_SIZE(r9a09g011_crit_mod_clks), + + /* Module Clocks */ + .mod_clks = r9a09g011_mod_clks, + .num_mod_clks = ARRAY_SIZE(r9a09g011_mod_clks), + .num_hw_mod_clks = R9A09G011_CA53_CLK + 1, + + /* Resets */ + .resets = r9a09g011_resets, + .num_resets = ARRAY_SIZE(r9a09g011_resets), + + .has_clk_mon_regs = false, +}; diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h index 2bc0afadf604..9028bf4295ce 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.h +++ b/drivers/clk/renesas/rcar-gen3-cpg.h @@ -25,7 +25,7 @@ enum rcar_gen3_clk_types { CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */ CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */ CLK_TYPE_GEN3_RPCSRC, - CLK_TYPE_GEN3_E3_RPCSRC, + CLK_TYPE_GEN3_E3_RPCSRC,/* Select parent/divider using RPCCKCR.DIV */ CLK_TYPE_GEN3_RPC, CLK_TYPE_GEN3_RPCD2, @@ -62,6 +62,9 @@ enum rcar_gen3_clk_types { #define DEF_FIXED_RPCSRC_E3(_name, _id, _parent0, _parent1) \ DEF_BASE(_name, _id, CLK_TYPE_GEN3_E3_RPCSRC, \ (_parent0) << 16 | (_parent1), .div = 8) +#define DEF_FIXED_RPCSRC_D3(_name, _id, _parent0, _parent1) \ + DEF_BASE(_name, _id, CLK_TYPE_GEN3_E3_RPCSRC, \ + (_parent0) << 16 | (_parent1), .div = 5) struct rcar_gen3_cpg_pll_config { u8 extal_div; diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c index 54ebf4b3c128..c7ed43d6aa67 100644 --- a/drivers/clk/renesas/rcar-gen4-cpg.c +++ b/drivers/clk/renesas/rcar-gen4-cpg.c @@ -215,6 +215,11 @@ struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev, div = cpg_pll_config->pll3_div; break; + case CLK_TYPE_GEN4_PLL4: + mult = cpg_pll_config->pll4_mult; + div = cpg_pll_config->pll4_div; + break; + case CLK_TYPE_GEN4_PLL5: mult = cpg_pll_config->pll5_mult; div = cpg_pll_config->pll5_div; diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h index afc8c024d538..0b15dcfdca7b 100644 --- a/drivers/clk/renesas/rcar-gen4-cpg.h +++ b/drivers/clk/renesas/rcar-gen4-cpg.h @@ -16,6 +16,7 @@ enum rcar_gen4_clk_types { CLK_TYPE_GEN4_PLL2X_3X, /* r8a779a0 only */ CLK_TYPE_GEN4_PLL3, CLK_TYPE_GEN4_PLL5, + CLK_TYPE_GEN4_PLL4, CLK_TYPE_GEN4_PLL6, CLK_TYPE_GEN4_SDSRC, CLK_TYPE_GEN4_SDH, @@ -56,6 +57,8 @@ struct rcar_gen4_cpg_pll_config { u8 pll2_div; u8 pll3_mult; u8 pll3_div; + u8 pll4_mult; + u8 pll4_div; u8 pll5_mult; u8 pll5_div; u8 pll6_mult; diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 5d2c3edbaa14..1a0cdf001b2f 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -854,6 +854,12 @@ static const struct of_device_id cpg_mssr_match[] = { .data = &r8a779f0_cpg_mssr_info, }, #endif +#ifdef CONFIG_CLK_R8A779G0 + { + .compatible = "renesas,r8a779g0-cpg-mssr", + .data = &r8a779g0_cpg_mssr_info, + }, +#endif { /* sentinel */ } }; diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h index 16810dd4e6ac..1c3c057d17f5 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.h +++ b/drivers/clk/renesas/renesas-cpg-mssr.h @@ -179,6 +179,7 @@ extern const struct cpg_mssr_info r8a77990_cpg_mssr_info; extern const struct cpg_mssr_info r8a77995_cpg_mssr_info; extern const struct cpg_mssr_info r8a779a0_cpg_mssr_info; extern const struct cpg_mssr_info r8a779f0_cpg_mssr_info; +extern const struct cpg_mssr_info r8a779g0_cpg_mssr_info; void __init cpg_mssr_early_init(struct device_node *np, const struct cpg_mssr_info *info); diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index 486d0656c58a..e2999ab2b53c 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -27,6 +27,7 @@ #include <linux/pm_domain.h> #include <linux/reset-controller.h> #include <linux/slab.h> +#include <linux/units.h> #include <dt-bindings/clock/renesas-cpg-mssr.h> @@ -56,6 +57,8 @@ #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff) +#define MAX_VCLK_FREQ (148500000) + struct sd_hw_data { struct clk_hw hw; u32 conf; @@ -64,6 +67,21 @@ struct sd_hw_data { #define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw) +struct rzg2l_pll5_param { + u32 pl5_fracin; + u8 pl5_refdiv; + u8 pl5_intin; + u8 pl5_postdiv1; + u8 pl5_postdiv2; + u8 pl5_spread; +}; + +struct rzg2l_pll5_mux_dsi_div_param { + u8 clksrc; + u8 dsi_div_a; + u8 dsi_div_b; +}; + /** * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data * @@ -76,8 +94,8 @@ struct sd_hw_data { * @num_mod_clks: Number of Module Clocks in clks[] * @num_resets: Number of Module Resets in info->resets[] * @last_dt_core_clk: ID of the last Core Clock exported to DT - * @notifiers: Notifier chain to save/restore clock state for system resume * @info: Pointer to platform data + * @pll5_mux_dsi_div_params: pll5 mux and dsi div parameters */ struct rzg2l_cpg_priv { struct reset_controller_dev rcdev; @@ -91,8 +109,9 @@ struct rzg2l_cpg_priv { unsigned int num_resets; unsigned int last_dt_core_clk; - struct raw_notifier_head notifiers; const struct rzg2l_cpg_info *info; + + struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params; }; static void rzg2l_cpg_del_clk_provider(void *data) @@ -266,6 +285,406 @@ rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core, return clk_hw->clk; } +static unsigned long +rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params, + unsigned long rate) +{ + unsigned long foutpostdiv_rate; + + params->pl5_intin = rate / MEGA; + params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA); + params->pl5_refdiv = 2; + params->pl5_postdiv1 = 1; + params->pl5_postdiv2 = 1; + params->pl5_spread = 0x16; + + foutpostdiv_rate = + EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv * + ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) / + (params->pl5_postdiv1 * params->pl5_postdiv2); + + return foutpostdiv_rate; +} + +struct dsi_div_hw_data { + struct clk_hw hw; + u32 conf; + unsigned long rate; + struct rzg2l_cpg_priv *priv; +}; + +#define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw) + +static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw); + unsigned long rate = dsi_div->rate; + + if (!rate) + rate = parent_rate; + + return rate; +} + +static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw, + unsigned long rate) +{ + struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw); + struct rzg2l_cpg_priv *priv = dsi_div->priv; + struct rzg2l_pll5_param params; + unsigned long parent_rate; + + parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate); + + if (priv->mux_dsi_div_params.clksrc) + parent_rate /= 2; + + return parent_rate; +} + +static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + if (req->rate > MAX_VCLK_FREQ) + req->rate = MAX_VCLK_FREQ; + + req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate); + + return 0; +} + +static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw); + struct rzg2l_cpg_priv *priv = dsi_div->priv; + + /* + * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK + * + * Based on the dot clock, the DSI divider clock sets the divider value, + * calculates the pll parameters for generating FOUTPOSTDIV and the clk + * source for the MUX and propagates that info to the parents. + */ + + if (!rate || rate > MAX_VCLK_FREQ) + return -EINVAL; + + dsi_div->rate = rate; + writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN | + (priv->mux_dsi_div_params.dsi_div_a << 0) | + (priv->mux_dsi_div_params.dsi_div_b << 8), + priv->base + CPG_PL5_SDIV); + + return 0; +} + +static const struct clk_ops rzg2l_cpg_dsi_div_ops = { + .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate, + .determine_rate = rzg2l_cpg_dsi_div_determine_rate, + .set_rate = rzg2l_cpg_dsi_div_set_rate, +}; + +static struct clk * __init +rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core, + struct clk **clks, + struct rzg2l_cpg_priv *priv) +{ + struct dsi_div_hw_data *clk_hw_data; + const struct clk *parent; + const char *parent_name; + struct clk_init_data init; + struct clk_hw *clk_hw; + int ret; + + parent = clks[core->parent & 0xffff]; + if (IS_ERR(parent)) + return ERR_CAST(parent); + + clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL); + if (!clk_hw_data) + return ERR_PTR(-ENOMEM); + + clk_hw_data->priv = priv; + + parent_name = __clk_get_name(parent); + init.name = core->name; + init.ops = &rzg2l_cpg_dsi_div_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = &parent_name; + init.num_parents = 1; + + clk_hw = &clk_hw_data->hw; + clk_hw->init = &init; + + ret = devm_clk_hw_register(priv->dev, clk_hw); + if (ret) + return ERR_PTR(ret); + + return clk_hw->clk; +} + +struct pll5_mux_hw_data { + struct clk_hw hw; + u32 conf; + unsigned long rate; + struct rzg2l_cpg_priv *priv; +}; + +#define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw) + +static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_hw *parent; + struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw); + struct rzg2l_cpg_priv *priv = hwdata->priv; + + parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc); + req->best_parent_hw = parent; + req->best_parent_rate = req->rate; + + return 0; +} + +static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw); + struct rzg2l_cpg_priv *priv = hwdata->priv; + + /* + * FOUTPOSTDIV--->| + * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK + * |--FOUT1PH0-->| + * + * Based on the dot clock, the DSI divider clock calculates the parent + * rate and clk source for the MUX. It propagates that info to + * pll5_4_clk_mux which sets the clock source for DSI divider clock. + */ + + writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index, + priv->base + CPG_OTHERFUNC1_REG); + + return 0; +} + +static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw) +{ + struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw); + struct rzg2l_cpg_priv *priv = hwdata->priv; + + return readl(priv->base + GET_REG_OFFSET(hwdata->conf)); +} + +static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = { + .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate, + .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent, + .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent, +}; + +static struct clk * __init +rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core, + struct rzg2l_cpg_priv *priv) +{ + struct pll5_mux_hw_data *clk_hw_data; + struct clk_init_data init; + struct clk_hw *clk_hw; + int ret; + + clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL); + if (!clk_hw_data) + return ERR_PTR(-ENOMEM); + + clk_hw_data->priv = priv; + clk_hw_data->conf = core->conf; + + init.name = core->name; + init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops; + init.flags = CLK_SET_RATE_PARENT; + init.num_parents = core->num_parents; + init.parent_names = core->parent_names; + + clk_hw = &clk_hw_data->hw; + clk_hw->init = &init; + + ret = devm_clk_hw_register(priv->dev, clk_hw); + if (ret) + return ERR_PTR(ret); + + return clk_hw->clk; +} + +struct sipll5 { + struct clk_hw hw; + u32 conf; + unsigned long foutpostdiv_rate; + struct rzg2l_cpg_priv *priv; +}; + +#define to_sipll5(_hw) container_of(_hw, struct sipll5, hw) + +static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw, + unsigned long rate) +{ + struct sipll5 *sipll5 = to_sipll5(hw); + struct rzg2l_cpg_priv *priv = sipll5->priv; + unsigned long vclk; + + vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) * + (priv->mux_dsi_div_params.dsi_div_b + 1)); + + if (priv->mux_dsi_div_params.clksrc) + vclk /= 2; + + return vclk; +} + +static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sipll5 *sipll5 = to_sipll5(hw); + unsigned long pll5_rate = sipll5->foutpostdiv_rate; + + if (!pll5_rate) + pll5_rate = parent_rate; + + return pll5_rate; +} + +static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *parent_rate) +{ + return rate; +} + +static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate) +{ + struct sipll5 *sipll5 = to_sipll5(hw); + struct rzg2l_cpg_priv *priv = sipll5->priv; + struct rzg2l_pll5_param params; + unsigned long vclk_rate; + int ret; + u32 val; + + /* + * OSC --> PLL5 --> FOUTPOSTDIV-->| + * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK + * |--FOUT1PH0-->| + * + * Based on the dot clock, the DSI divider clock calculates the parent + * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates + * that info to sipll5 which sets parameters for generating FOUTPOSTDIV. + * + * OSC --> PLL5 --> FOUTPOSTDIV + */ + + if (!rate) + return -EINVAL; + + vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate); + sipll5->foutpostdiv_rate = + rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate); + + /* Put PLL5 into standby mode */ + writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY); + ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val, + !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000); + if (ret) { + dev_err(priv->dev, "failed to release pll5 lock"); + return ret; + } + + /* Output clock setting 1 */ + writel(CPG_SIPLL5_CLK1_POSTDIV1_WEN | CPG_SIPLL5_CLK1_POSTDIV2_WEN | + CPG_SIPLL5_CLK1_REFDIV_WEN | (params.pl5_postdiv1 << 0) | + (params.pl5_postdiv2 << 4) | (params.pl5_refdiv << 8), + priv->base + CPG_SIPLL5_CLK1); + + /* Output clock setting, SSCG modulation value setting 3 */ + writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3); + + /* Output clock setting 4 */ + writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16), + priv->base + CPG_SIPLL5_CLK4); + + /* Output clock setting 5 */ + writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5); + + /* PLL normal mode setting */ + writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN | + CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB, + priv->base + CPG_SIPLL5_STBY); + + /* PLL normal mode transition, output clock stability check */ + ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val, + (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000); + if (ret) { + dev_err(priv->dev, "failed to lock pll5"); + return ret; + } + + return 0; +} + +static const struct clk_ops rzg2l_cpg_sipll5_ops = { + .recalc_rate = rzg2l_cpg_sipll5_recalc_rate, + .round_rate = rzg2l_cpg_sipll5_round_rate, + .set_rate = rzg2l_cpg_sipll5_set_rate, +}; + +static struct clk * __init +rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core, + struct clk **clks, + struct rzg2l_cpg_priv *priv) +{ + const struct clk *parent; + struct clk_init_data init; + const char *parent_name; + struct sipll5 *sipll5; + struct clk_hw *clk_hw; + int ret; + + parent = clks[core->parent & 0xffff]; + if (IS_ERR(parent)) + return ERR_CAST(parent); + + sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL); + if (!sipll5) + return ERR_PTR(-ENOMEM); + + init.name = core->name; + parent_name = __clk_get_name(parent); + init.ops = &rzg2l_cpg_sipll5_ops; + init.flags = 0; + init.parent_names = &parent_name; + init.num_parents = 1; + + sipll5->hw.init = &init; + sipll5->conf = core->conf; + sipll5->priv = priv; + + writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN | + CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY); + + clk_hw = &sipll5->hw; + clk_hw->init = &init; + + ret = devm_clk_hw_register(priv->dev, clk_hw); + if (ret) + return ERR_PTR(ret); + + priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */ + priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */ + priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */ + + return clk_hw->clk; +} + struct pll_clk { struct clk_hw hw; unsigned int conf; @@ -291,7 +710,7 @@ static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw, val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf)); val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf)); mult = MDIV(val1) + KDIV(val1) / 65536; - div = PDIV(val1) * (1 << SDIV(val2)); + div = PDIV(val1) << SDIV(val2); return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div); } @@ -420,6 +839,9 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core, clk = rzg2l_cpg_pll_clk_register(core, priv->clks, priv->base, priv); break; + case CLK_TYPE_SIPLL5: + clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv); + break; case CLK_TYPE_DIV: clk = rzg2l_cpg_div_clk_register(core, priv->clks, priv->base, priv); @@ -430,6 +852,12 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core, case CLK_TYPE_SD_MUX: clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv); break; + case CLK_TYPE_PLL5_4_MUX: + clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv); + break; + case CLK_TYPE_DSI_DIV: + clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv); + break; default: goto fail; } @@ -498,6 +926,9 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) if (!enable) return 0; + if (!priv->info->has_clk_mon_regs) + return 0; + for (i = 1000; i > 0; --i) { if (((readl(priv->base + CLK_MON_R(reg))) & bitmask)) break; @@ -568,7 +999,10 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw) if (clock->sibling) return clock->enabled; - value = readl(priv->base + CLK_MON_R(clock->off)); + if (priv->info->has_clk_mon_regs) + value = readl(priv->base + CLK_MON_R(clock->off)); + else + value = readl(priv->base + clock->off); return value & bitmask; } @@ -743,8 +1177,16 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev, const struct rzg2l_cpg_info *info = priv->info; unsigned int reg = info->resets[id].off; u32 bitmask = BIT(info->resets[id].bit); + s8 monbit = info->resets[id].monbit; + + if (info->has_clk_mon_regs) { + return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask); + } else if (monbit >= 0) { + u32 monbitmask = BIT(monbit); - return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask); + return !!(readl(priv->base + CPG_RST_MON) & monbitmask); + } + return -ENOTSUPP; } static const struct reset_control_ops rzg2l_cpg_reset_ops = { @@ -947,6 +1389,12 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev) } static const struct of_device_id rzg2l_cpg_match[] = { +#ifdef CONFIG_CLK_R9A07G043 + { + .compatible = "renesas,r9a07g043-cpg", + .data = &r9a07g043_cpg_info, + }, +#endif #ifdef CONFIG_CLK_R9A07G044 { .compatible = "renesas,r9a07g044-cpg", @@ -959,6 +1407,12 @@ static const struct of_device_id rzg2l_cpg_match[] = { .data = &r9a07g054_cpg_info, }, #endif +#ifdef CONFIG_CLK_R9A09G011 + { + .compatible = "renesas,r9a09g011-cpg", + .data = &r9a09g011_cpg_info, + }, +#endif { /* sentinel */ } }; diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h index ce657beaf160..cecbdf5e4f93 100644 --- a/drivers/clk/renesas/rzg2l-cpg.h +++ b/drivers/clk/renesas/rzg2l-cpg.h @@ -9,6 +9,12 @@ #ifndef __RENESAS_RZG2L_CPG_H__ #define __RENESAS_RZG2L_CPG_H__ +#define CPG_SIPLL5_STBY (0x140) +#define CPG_SIPLL5_CLK1 (0x144) +#define CPG_SIPLL5_CLK3 (0x14C) +#define CPG_SIPLL5_CLK4 (0x150) +#define CPG_SIPLL5_CLK5 (0x154) +#define CPG_SIPLL5_MON (0x15C) #define CPG_PL1_DDIV (0x200) #define CPG_PL2_DDIV (0x204) #define CPG_PL3A_DDIV (0x208) @@ -18,6 +24,24 @@ #define CPG_PL3_SSEL (0x408) #define CPG_PL6_SSEL (0x414) #define CPG_PL6_ETH_SSEL (0x418) +#define CPG_PL5_SDIV (0x420) +#define CPG_RST_MON (0x680) +#define CPG_OTHERFUNC1_REG (0xBE8) + +#define CPG_SIPLL5_STBY_RESETB BIT(0) +#define CPG_SIPLL5_STBY_RESETB_WEN BIT(16) +#define CPG_SIPLL5_STBY_SSCG_EN_WEN BIT(18) +#define CPG_SIPLL5_STBY_DOWNSPREAD_WEN BIT(20) +#define CPG_SIPLL5_CLK1_POSTDIV1_WEN BIT(16) +#define CPG_SIPLL5_CLK1_POSTDIV2_WEN BIT(20) +#define CPG_SIPLL5_CLK1_REFDIV_WEN BIT(24) +#define CPG_SIPLL5_CLK4_RESV_LSB (0xFF) +#define CPG_SIPLL5_MON_PLL5_LOCK BIT(4) + +#define CPG_OTHERFUNC1_REG_RES0_ON_WEN BIT(16) + +#define CPG_PL5_SDIV_DIV_DSI_A_WEN BIT(16) +#define CPG_PL5_SDIV_DIV_DSI_B_WEN BIT(24) #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28) #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29) @@ -34,6 +58,7 @@ (((offset) << 20) | ((bitpos) << 12) | ((size) << 8)) #define DIVPL1A DDIV_PACK(CPG_PL1_DDIV, 0, 2) #define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3) +#define DIVDSILPCLK DDIV_PACK(CPG_PL2_DDIV, 12, 2) #define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3) #define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3) #define DIVPL3C DDIV_PACK(CPG_PL3A_DDIV, 8, 3) @@ -43,12 +68,15 @@ (((offset) << 20) | ((bitpos) << 12) | ((size) << 8)) #define SEL_PLL3_3 SEL_PLL_PACK(CPG_PL3_SSEL, 8, 1) +#define SEL_PLL5_4 SEL_PLL_PACK(CPG_OTHERFUNC1_REG, 0, 1) #define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1) #define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1) #define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2) #define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2) +#define EXTAL_FREQ_IN_MEGA_HZ (24) + /** * Definitions of CPG Core Clocks * @@ -86,6 +114,16 @@ enum clk_types { /* Clock with SD clock source selector */ CLK_TYPE_SD_MUX, + + /* Clock for SIPLL5 */ + CLK_TYPE_SIPLL5, + + /* Clock for PLL5_4 clock source selector */ + CLK_TYPE_PLL5_4_MUX, + + /* Clock for DSI divider */ + CLK_TYPE_DSI_DIV, + }; #define DEF_TYPE(_name, _id, _type...) \ @@ -98,17 +136,36 @@ enum clk_types { DEF_TYPE(_name, _id, CLK_TYPE_IN) #define DEF_FIXED(_name, _id, _parent, _mult, _div) \ DEF_BASE(_name, _id, CLK_TYPE_FF, _parent, .div = _div, .mult = _mult) -#define DEF_DIV(_name, _id, _parent, _conf, _dtable, _flag) \ +#define DEF_DIV(_name, _id, _parent, _conf, _dtable) \ + DEF_TYPE(_name, _id, CLK_TYPE_DIV, .conf = _conf, \ + .parent = _parent, .dtable = _dtable, \ + .flag = CLK_DIVIDER_HIWORD_MASK) +#define DEF_DIV_RO(_name, _id, _parent, _conf, _dtable) \ DEF_TYPE(_name, _id, CLK_TYPE_DIV, .conf = _conf, \ - .parent = _parent, .dtable = _dtable, .flag = _flag) -#define DEF_MUX(_name, _id, _conf, _parent_names, _num_parents, _flag, \ - _mux_flags) \ + .parent = _parent, .dtable = _dtable, \ + .flag = CLK_DIVIDER_READ_ONLY) +#define DEF_MUX(_name, _id, _conf, _parent_names) \ DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = _conf, \ - .parent_names = _parent_names, .num_parents = _num_parents, \ - .flag = _flag, .mux_flags = _mux_flags) -#define DEF_SD_MUX(_name, _id, _conf, _parent_names, _num_parents) \ + .parent_names = _parent_names, \ + .num_parents = ARRAY_SIZE(_parent_names), \ + .mux_flags = CLK_MUX_HIWORD_MASK) +#define DEF_MUX_RO(_name, _id, _conf, _parent_names) \ + DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = _conf, \ + .parent_names = _parent_names, \ + .num_parents = ARRAY_SIZE(_parent_names), \ + .mux_flags = CLK_MUX_READ_ONLY) +#define DEF_SD_MUX(_name, _id, _conf, _parent_names) \ DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \ - .parent_names = _parent_names, .num_parents = _num_parents) + .parent_names = _parent_names, \ + .num_parents = ARRAY_SIZE(_parent_names)) +#define DEF_PLL5_FOUTPOSTDIV(_name, _id, _parent) \ + DEF_TYPE(_name, _id, CLK_TYPE_SIPLL5, .parent = _parent) +#define DEF_PLL5_4_MUX(_name, _id, _conf, _parent_names) \ + DEF_TYPE(_name, _id, CLK_TYPE_PLL5_4_MUX, .conf = _conf, \ + .parent_names = _parent_names, \ + .num_parents = ARRAY_SIZE(_parent_names)) +#define DEF_DSI_DIV(_name, _id, _parent, _flag) \ + DEF_TYPE(_name, _id, CLK_TYPE_DSI_DIV, .parent = _parent, .flag = _flag) /** * struct rzg2l_mod_clk - Module Clocks definitions @@ -150,17 +207,22 @@ struct rzg2l_mod_clk { * * @off: register offset * @bit: reset bit + * @monbit: monitor bit in CPG_RST_MON register, -1 if none */ struct rzg2l_reset { u16 off; u8 bit; + s8 monbit; }; -#define DEF_RST(_id, _off, _bit) \ +#define DEF_RST_MON(_id, _off, _bit, _monbit) \ [_id] = { \ .off = (_off), \ - .bit = (_bit) \ + .bit = (_bit), \ + .monbit = (_monbit) \ } +#define DEF_RST(_id, _off, _bit) \ + DEF_RST_MON(_id, _off, _bit, -1) /** * struct rzg2l_cpg_info - SoC-specific CPG Description @@ -180,6 +242,7 @@ struct rzg2l_reset { * @crit_mod_clks: Array with Module Clock IDs of critical clocks that * should not be disabled without a knowledgeable driver * @num_crit_mod_clks: Number of entries in crit_mod_clks[] + * @has_clk_mon_regs: Flag indicating whether the SoC has CLK_MON registers */ struct rzg2l_cpg_info { /* Core Clocks */ @@ -200,9 +263,13 @@ struct rzg2l_cpg_info { /* Critical Module Clocks that should not be disabled */ const unsigned int *crit_mod_clks; unsigned int num_crit_mod_clks; + + bool has_clk_mon_regs; }; +extern const struct rzg2l_cpg_info r9a07g043_cpg_info; extern const struct rzg2l_cpg_info r9a07g044_cpg_info; extern const struct rzg2l_cpg_info r9a07g054_cpg_info; +extern const struct rzg2l_cpg_info r9a09g011_cpg_info; #endif diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c index 606ae6cd918b..f85902e2590c 100644 --- a/drivers/clk/rockchip/clk-rk3568.c +++ b/drivers/clk/rockchip/clk-rk3568.c @@ -1591,6 +1591,7 @@ static const char *const rk3568_cru_critical_clocks[] __initconst = { "hclk_php", "pclk_php", "hclk_usb", + "hclk_vo", }; static const char *const rk3568_pmucru_critical_clocks[] __initconst = { diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index 17e5d1cb9da2..239d9eead77f 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos-arm64.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o +obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c new file mode 100644 index 000000000000..d9e1f8e4a7b4 --- /dev/null +++ b/drivers/clk/samsung/clk-exynosautov9.c @@ -0,0 +1,1733 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Samsung Electronics Co., Ltd. + * Author: Chanho Park <chanho61.park@samsung.com> + * + * Common Clock Framework support for ExynosAuto V9 SoC. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> + +#include <dt-bindings/clock/samsung,exynosautov9.h> + +#include "clk.h" +#include "clk-exynos-arm64.h" + +/* ---- CMU_TOP ------------------------------------------------------------ */ + +/* Register Offset definitions for CMU_TOP (0x1b240000) */ +#define PLL_LOCKTIME_PLL_SHARED0 0x0000 +#define PLL_LOCKTIME_PLL_SHARED1 0x0004 +#define PLL_LOCKTIME_PLL_SHARED2 0x0008 +#define PLL_LOCKTIME_PLL_SHARED3 0x000c +#define PLL_LOCKTIME_PLL_SHARED4 0x0010 +#define PLL_CON0_PLL_SHARED0 0x0100 +#define PLL_CON3_PLL_SHARED0 0x010c +#define PLL_CON0_PLL_SHARED1 0x0140 +#define PLL_CON3_PLL_SHARED1 0x014c +#define PLL_CON0_PLL_SHARED2 0x0180 +#define PLL_CON3_PLL_SHARED2 0x018c +#define PLL_CON0_PLL_SHARED3 0x01c0 +#define PLL_CON3_PLL_SHARED3 0x01cc +#define PLL_CON0_PLL_SHARED4 0x0200 +#define PLL_CON3_PLL_SHARED4 0x020c + +/* MUX */ +#define CLK_CON_MUX_MUX_CLKCMU_ACC_BUS 0x1000 +#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004 +#define CLK_CON_MUX_MUX_CLKCMU_AUD_BUS 0x1008 +#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x100c +#define CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS 0x1010 +#define CLK_CON_MUX_MUX_CLKCMU_BUSMC_BUS 0x1018 +#define CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST 0x101c +#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1020 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER 0x1024 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH 0x102c +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER 0x1030 +#define CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH 0x1034 +#define CLK_CON_MUX_MUX_CLKCMU_DPTX_BUS 0x1040 +#define CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC 0x1044 +#define CLK_CON_MUX_MUX_CLKCMU_DPUM_BUS 0x1048 +#define CLK_CON_MUX_MUX_CLKCMU_DPUS0_BUS 0x104c +#define CLK_CON_MUX_MUX_CLKCMU_DPUS1_BUS 0x1050 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS 0x1054 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS0_PCIE 0x1058 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS 0x105c +#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD 0x1060 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS1_USBDRD 0x1064 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS2_BUS 0x1068 +#define CLK_CON_MUX_MUX_CLKCMU_FSYS2_ETHERNET 0x106c +#define CLK_CON_MUX_MUX_CLKCMU_FSYS2_UFS_EMBD 0x1070 +#define CLK_CON_MUX_MUX_CLKCMU_G2D_G2D 0x1074 +#define CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL 0x1078 +#define CLK_CON_MUX_MUX_CLKCMU_G3D00_SWITCH 0x107c +#define CLK_CON_MUX_MUX_CLKCMU_G3D01_SWITCH 0x1080 +#define CLK_CON_MUX_MUX_CLKCMU_G3D1_SWITCH 0x1084 +#define CLK_CON_MUX_MUX_CLKCMU_ISPB_BUS 0x108c +#define CLK_CON_MUX_MUX_CLKCMU_MFC_MFC 0x1090 +#define CLK_CON_MUX_MUX_CLKCMU_MFC_WFD 0x1094 +#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x109c +#define CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP 0x1098 +#define CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH 0x109c +#define CLK_CON_MUX_MUX_CLKCMU_NPU_BUS 0x10a0 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS 0x10a4 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP 0x10a8 +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS 0x10ac +#define CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP 0x10b0 +#define CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS 0x10b4 +#define CLK_CON_MUX_MUX_CMU_CMUREF 0x10c0 + +/* DIV */ +#define CLK_CON_DIV_CLKCMU_ACC_BUS 0x1800 +#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1804 +#define CLK_CON_DIV_CLKCMU_AUD_BUS 0x1808 +#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x180c +#define CLK_CON_DIV_CLKCMU_BUSC_BUS 0x1810 +#define CLK_CON_DIV_CLKCMU_BUSMC_BUS 0x1818 +#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x181c +#define CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER 0x1820 +#define CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH 0x1828 +#define CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER 0x182c +#define CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH 0x1830 +#define CLK_CON_DIV_CLKCMU_DPTX_BUS 0x183c +#define CLK_CON_DIV_CLKCMU_DPTX_DPGTC 0x1840 +#define CLK_CON_DIV_CLKCMU_DPUM_BUS 0x1844 +#define CLK_CON_DIV_CLKCMU_DPUS0_BUS 0x1848 +#define CLK_CON_DIV_CLKCMU_DPUS1_BUS 0x184c +#define CLK_CON_DIV_CLKCMU_FSYS0_BUS 0x1850 +#define CLK_CON_DIV_CLKCMU_FSYS0_PCIE 0x1854 +#define CLK_CON_DIV_CLKCMU_FSYS1_BUS 0x1858 +#define CLK_CON_DIV_CLKCMU_FSYS1_USBDRD 0x185c +#define CLK_CON_DIV_CLKCMU_FSYS2_BUS 0x1860 +#define CLK_CON_DIV_CLKCMU_FSYS2_ETHERNET 0x1864 +#define CLK_CON_DIV_CLKCMU_FSYS2_UFS_EMBD 0x1868 +#define CLK_CON_DIV_CLKCMU_G2D_G2D 0x186c +#define CLK_CON_DIV_CLKCMU_G2D_MSCL 0x1870 +#define CLK_CON_DIV_CLKCMU_G3D00_SWITCH 0x1874 +#define CLK_CON_DIV_CLKCMU_G3D01_SWITCH 0x1878 +#define CLK_CON_DIV_CLKCMU_G3D1_SWITCH 0x187c +#define CLK_CON_DIV_CLKCMU_ISPB_BUS 0x1884 +#define CLK_CON_DIV_CLKCMU_MFC_MFC 0x1888 +#define CLK_CON_DIV_CLKCMU_MFC_WFD 0x188c +#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x1890 +#define CLK_CON_DIV_CLKCMU_NPU_BUS 0x1894 +#define CLK_CON_DIV_CLKCMU_PERIC0_BUS 0x1898 +#define CLK_CON_DIV_CLKCMU_PERIC0_IP 0x189c +#define CLK_CON_DIV_CLKCMU_PERIC1_BUS 0x18a0 +#define CLK_CON_DIV_CLKCMU_PERIC1_IP 0x18a4 +#define CLK_CON_DIV_CLKCMU_PERIS_BUS 0x18a8 +#define CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST 0x18b4 + +#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18b8 +#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18bc +#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x18c0 +#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x18c4 +#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18c8 +#define CLK_CON_DIV_PLL_SHARED2_DIV2 0x18cc +#define CLK_CON_DIV_PLL_SHARED2_DIV3 0x18d0 +#define CLK_CON_DIV_PLL_SHARED2_DIV4 0x18d4 +#define CLK_CON_DIV_PLL_SHARED4_DIV2 0x18d4 +#define CLK_CON_DIV_PLL_SHARED4_DIV4 0x18d8 + +/* GATE */ +#define CLK_CON_GAT_CLKCMU_CMU_BUSC_BOOST 0x2000 +#define CLK_CON_GAT_CLKCMU_CMU_BUSMC_BOOST 0x2004 +#define CLK_CON_GAT_CLKCMU_CMU_CORE_BOOST 0x2008 +#define CLK_CON_GAT_CLKCMU_CMU_CPUCL0_BOOST 0x2010 +#define CLK_CON_GAT_CLKCMU_CMU_CPUCL1_BOOST 0x2018 +#define CLK_CON_GAT_CLKCMU_CMU_MIF_BOOST 0x2020 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD 0x2024 +#define CLK_CON_GAT_GATE_CLKCMU_MIF_SWITCH 0x2028 +#define CLK_CON_GAT_GATE_CLKCMU_ACC_BUS 0x202c +#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2030 +#define CLK_CON_GAT_GATE_CLKCMU_AUD_BUS 0x2034 +#define CLK_CON_GAT_GATE_CLKCMU_AUD_CPU 0x2038 +#define CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS 0x203c +#define CLK_CON_GAT_GATE_CLKCMU_BUSMC_BUS 0x2044 +#define CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST 0x2048 +#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x204c +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_CLUSTER 0x2050 +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH 0x2058 +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_CLUSTER 0x205c +#define CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH 0x2060 +#define CLK_CON_GAT_GATE_CLKCMU_DPTX_BUS 0x206c +#define CLK_CON_GAT_GATE_CLKCMU_DPTX_DPGTC 0x2070 +#define CLK_CON_GAT_GATE_CLKCMU_DPUM_BUS 0x2060 +#define CLK_CON_GAT_GATE_CLKCMU_DPUS0_BUS 0x2064 +#define CLK_CON_GAT_GATE_CLKCMU_DPUS1_BUS 0x207c +#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS 0x2080 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS0_PCIE 0x2084 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS 0x2088 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS1_USBDRD 0x208c +#define CLK_CON_GAT_GATE_CLKCMU_FSYS2_BUS 0x2090 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS2_ETHERNET 0x2094 +#define CLK_CON_GAT_GATE_CLKCMU_FSYS2_UFS_EMBD 0x2098 +#define CLK_CON_GAT_GATE_CLKCMU_G2D_G2D 0x209c +#define CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL 0x20a0 +#define CLK_CON_GAT_GATE_CLKCMU_G3D00_SWITCH 0x20a4 +#define CLK_CON_GAT_GATE_CLKCMU_G3D01_SWITCH 0x20a8 +#define CLK_CON_GAT_GATE_CLKCMU_G3D1_SWITCH 0x20ac +#define CLK_CON_GAT_GATE_CLKCMU_ISPB_BUS 0x20b4 +#define CLK_CON_GAT_GATE_CLKCMU_MFC_MFC 0x20b8 +#define CLK_CON_GAT_GATE_CLKCMU_MFC_WFD 0x20bc +#define CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP 0x20c0 +#define CLK_CON_GAT_GATE_CLKCMU_NPU_BUS 0x20c4 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS 0x20c8 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP 0x20cc +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS 0x20d0 +#define CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP 0x20d4 +#define CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS 0x20d8 + +static const unsigned long top_clk_regs[] __initconst = { + PLL_LOCKTIME_PLL_SHARED0, + PLL_LOCKTIME_PLL_SHARED1, + PLL_LOCKTIME_PLL_SHARED2, + PLL_LOCKTIME_PLL_SHARED3, + PLL_LOCKTIME_PLL_SHARED4, + PLL_CON0_PLL_SHARED0, + PLL_CON3_PLL_SHARED0, + PLL_CON0_PLL_SHARED1, + PLL_CON3_PLL_SHARED1, + PLL_CON0_PLL_SHARED2, + PLL_CON3_PLL_SHARED2, + PLL_CON0_PLL_SHARED3, + PLL_CON3_PLL_SHARED3, + PLL_CON0_PLL_SHARED4, + PLL_CON3_PLL_SHARED4, + CLK_CON_MUX_MUX_CLKCMU_ACC_BUS, + CLK_CON_MUX_MUX_CLKCMU_APM_BUS, + CLK_CON_MUX_MUX_CLKCMU_AUD_BUS, + CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, + CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS, + CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, + CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_DPTX_BUS, + CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC, + CLK_CON_MUX_MUX_CLKCMU_DPUM_BUS, + CLK_CON_MUX_MUX_CLKCMU_DPUS0_BUS, + CLK_CON_MUX_MUX_CLKCMU_DPUS1_BUS, + CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS, + CLK_CON_MUX_MUX_CLKCMU_FSYS0_PCIE, + CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS, + CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD, + CLK_CON_MUX_MUX_CLKCMU_FSYS1_USBDRD, + CLK_CON_MUX_MUX_CLKCMU_FSYS2_BUS, + CLK_CON_MUX_MUX_CLKCMU_FSYS2_ETHERNET, + CLK_CON_MUX_MUX_CLKCMU_FSYS2_UFS_EMBD, + CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, + CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, + CLK_CON_MUX_MUX_CLKCMU_G3D00_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_G3D01_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_G3D1_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_ISPB_BUS, + CLK_CON_MUX_MUX_CLKCMU_MFC_MFC, + CLK_CON_MUX_MUX_CLKCMU_MFC_WFD, + CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP, + CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, + CLK_CON_MUX_MUX_CLKCMU_NPU_BUS, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, + CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, + CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, + CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, + CLK_CON_MUX_MUX_CMU_CMUREF, + CLK_CON_DIV_CLKCMU_ACC_BUS, + CLK_CON_DIV_CLKCMU_APM_BUS, + CLK_CON_DIV_CLKCMU_AUD_BUS, + CLK_CON_DIV_CLKCMU_AUD_CPU, + CLK_CON_DIV_CLKCMU_BUSC_BUS, + CLK_CON_DIV_CLKCMU_BUSMC_BUS, + CLK_CON_DIV_CLKCMU_CORE_BUS, + CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER, + CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, + CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER, + CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, + CLK_CON_DIV_CLKCMU_DPTX_BUS, + CLK_CON_DIV_CLKCMU_DPTX_DPGTC, + CLK_CON_DIV_CLKCMU_DPUM_BUS, + CLK_CON_DIV_CLKCMU_DPUS0_BUS, + CLK_CON_DIV_CLKCMU_DPUS1_BUS, + CLK_CON_DIV_CLKCMU_FSYS0_BUS, + CLK_CON_DIV_CLKCMU_FSYS0_PCIE, + CLK_CON_DIV_CLKCMU_FSYS1_BUS, + CLK_CON_DIV_CLKCMU_FSYS1_USBDRD, + CLK_CON_DIV_CLKCMU_FSYS2_BUS, + CLK_CON_DIV_CLKCMU_FSYS2_ETHERNET, + CLK_CON_DIV_CLKCMU_FSYS2_UFS_EMBD, + CLK_CON_DIV_CLKCMU_G2D_G2D, + CLK_CON_DIV_CLKCMU_G2D_MSCL, + CLK_CON_DIV_CLKCMU_G3D00_SWITCH, + CLK_CON_DIV_CLKCMU_G3D01_SWITCH, + CLK_CON_DIV_CLKCMU_G3D1_SWITCH, + CLK_CON_DIV_CLKCMU_ISPB_BUS, + CLK_CON_DIV_CLKCMU_MFC_MFC, + CLK_CON_DIV_CLKCMU_MFC_WFD, + CLK_CON_DIV_CLKCMU_MIF_BUSP, + CLK_CON_DIV_CLKCMU_NPU_BUS, + CLK_CON_DIV_CLKCMU_PERIC0_BUS, + CLK_CON_DIV_CLKCMU_PERIC0_IP, + CLK_CON_DIV_CLKCMU_PERIC1_BUS, + CLK_CON_DIV_CLKCMU_PERIC1_IP, + CLK_CON_DIV_CLKCMU_PERIS_BUS, + CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST, + CLK_CON_DIV_PLL_SHARED0_DIV2, + CLK_CON_DIV_PLL_SHARED0_DIV3, + CLK_CON_DIV_PLL_SHARED1_DIV2, + CLK_CON_DIV_PLL_SHARED1_DIV3, + CLK_CON_DIV_PLL_SHARED1_DIV4, + CLK_CON_DIV_PLL_SHARED2_DIV2, + CLK_CON_DIV_PLL_SHARED2_DIV3, + CLK_CON_DIV_PLL_SHARED2_DIV4, + CLK_CON_DIV_PLL_SHARED4_DIV2, + CLK_CON_DIV_PLL_SHARED4_DIV4, + CLK_CON_GAT_CLKCMU_CMU_BUSC_BOOST, + CLK_CON_GAT_CLKCMU_CMU_BUSMC_BOOST, + CLK_CON_GAT_CLKCMU_CMU_CORE_BOOST, + CLK_CON_GAT_CLKCMU_CMU_CPUCL0_BOOST, + CLK_CON_GAT_CLKCMU_CMU_CPUCL1_BOOST, + CLK_CON_GAT_CLKCMU_CMU_MIF_BOOST, + CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD, + CLK_CON_GAT_GATE_CLKCMU_MIF_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_ACC_BUS, + CLK_CON_GAT_GATE_CLKCMU_APM_BUS, + CLK_CON_GAT_GATE_CLKCMU_AUD_BUS, + CLK_CON_GAT_GATE_CLKCMU_AUD_CPU, + CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS, + CLK_CON_GAT_GATE_CLKCMU_BUSMC_BUS, + CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST, + CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, + CLK_CON_GAT_GATE_CLKCMU_CPUCL0_CLUSTER, + CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_CPUCL1_CLUSTER, + CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_DPTX_BUS, + CLK_CON_GAT_GATE_CLKCMU_DPTX_DPGTC, + CLK_CON_GAT_GATE_CLKCMU_DPUM_BUS, + CLK_CON_GAT_GATE_CLKCMU_DPUS0_BUS, + CLK_CON_GAT_GATE_CLKCMU_DPUS1_BUS, + CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS, + CLK_CON_GAT_GATE_CLKCMU_FSYS0_PCIE, + CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS, + CLK_CON_GAT_GATE_CLKCMU_FSYS1_USBDRD, + CLK_CON_GAT_GATE_CLKCMU_FSYS2_BUS, + CLK_CON_GAT_GATE_CLKCMU_FSYS2_ETHERNET, + CLK_CON_GAT_GATE_CLKCMU_FSYS2_UFS_EMBD, + CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, + CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL, + CLK_CON_GAT_GATE_CLKCMU_G3D00_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_G3D01_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_G3D1_SWITCH, + CLK_CON_GAT_GATE_CLKCMU_ISPB_BUS, + CLK_CON_GAT_GATE_CLKCMU_MFC_MFC, + CLK_CON_GAT_GATE_CLKCMU_MFC_WFD, + CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP, + CLK_CON_GAT_GATE_CLKCMU_NPU_BUS, + CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS, + CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS, + CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP, + CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS, +}; + +static const struct samsung_pll_clock top_pll_clks[] __initconst = { + /* CMU_TOP_PURECLKCOMP */ + PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL), + PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared1_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL), + PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared2_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL), + PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared3_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL), + PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared4_pll", "oscclk", + PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL), +}; + +/* List of parent clocks for Muxes in CMU_TOP */ +PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" }; +PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" }; +PNAME(mout_shared2_pll_p) = { "oscclk", "fout_shared2_pll" }; +PNAME(mout_shared3_pll_p) = { "oscclk", "fout_shared3_pll" }; +PNAME(mout_shared4_pll_p) = { "oscclk", "fout_shared4_pll" }; + +PNAME(mout_clkcmu_cmu_boost_p) = { "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; +PNAME(mout_clkcmu_cmu_cmuref_p) = { "oscclk", "dout_cmu_boost" }; +PNAME(mout_clkcmu_acc_bus_p) = { "dout_shared1_div3", "dout_shared2_div3", + "dout_shared1_div4", "dout_shared2_div4" }; +PNAME(mout_clkcmu_apm_bus_p) = { "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; +PNAME(mout_clkcmu_aud_cpu_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "fout_shared3_pll" }; +PNAME(mout_clkcmu_aud_bus_p) = { "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "dout_shared1_div4" }; +PNAME(mout_clkcmu_busc_bus_p) = { "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; +PNAME(mout_clkcmu_core_bus_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "fout_shared3_pll" }; +PNAME(mout_clkcmu_cpucl0_switch_p) = { + "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2" }; +PNAME(mout_clkcmu_cpucl0_cluster_p) = { + "fout_shared2_pll", "fout_shared4_pll", + "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2", + "dout_shared2_div3", "fout_shared3_pll" }; +PNAME(mout_clkcmu_dptx_bus_p) = { "dout_shared4_div2", "dout_shared2_div3", + "dout_shared1_div4", "dout_shared2_div4" }; +PNAME(mout_clkcmu_dptx_dpgtc_p) = { "oscclk", "dout_shared2_div3", + "dout_shared2_div4", "dout_shared4_div4" }; +PNAME(mout_clkcmu_dpum_bus_p) = { "dout_shared1_div3", "dout_shared2_div3", + "dout_shared1_div4", "dout_shared2_div4", + "dout_shared4_div4", "fout_shared3_pll" }; +PNAME(mout_clkcmu_fsys0_bus_p) = { + "dout_shared4_div2", "dout_shared2_div3", + "dout_shared1_div4", "dout_shared2_div4" }; +PNAME(mout_clkcmu_fsys0_pcie_p) = { "oscclk", "dout_shared2_div4" }; +PNAME(mout_clkcmu_fsys1_bus_p) = { "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; +PNAME(mout_clkcmu_fsys1_usbdrd_p) = { + "oscclk", "dout_shared2_div3", + "dout_shared2_div4", "dout_shared4_div4" }; +PNAME(mout_clkcmu_fsys1_mmc_card_p) = { + "oscclk", "dout_shared2_div2", + "dout_shared4_div2", "dout_shared2_div3" }; +PNAME(mout_clkcmu_fsys2_ethernet_p) = { + "oscclk", "dout_shared2_div2", + "dout_shared0_div3", "dout_shared2_div3", + "dout_shared1_div4", "fout_shared3_pll" }; +PNAME(mout_clkcmu_g2d_g2d_p) = { "dout_shared2_div2", "dout_shared0_div3", + "dout_shared4_div2", "dout_shared1_div3", + "dout_shared2_div3", "dout_shared1_div4", + "dout_shared2_div4", "dout_shared4_div4" }; +PNAME(mout_clkcmu_g3d0_switch_p) = { "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "dout_shared4_div2" }; +PNAME(mout_clkcmu_g3d1_switch_p) = { "dout_shared2_div2", "dout_shared4_div2", + "dout_shared2_div3", "dout_shared1_div4" }; +PNAME(mout_clkcmu_mif_switch_p) = { "fout_shared0_pll", "fout_shared1_pll", + "fout_shared2_pll", "fout_shared4_pll", + "dout_shared0_div2", "dout_shared1_div2", + "dout_shared2_div2", "fout_shared3_pll" }; +PNAME(mout_clkcmu_npu_bus_p) = { "dout_shared1_div2", "dout_shared2_div2", + "dout_shared0_div3", "dout_shared4_div2", + "dout_shared1_div3", "dout_shared2_div3", + "dout_shared1_div4", "fout_shared3_pll" }; +PNAME(mout_clkcmu_peric0_bus_p) = { "dout_shared2_div3", "dout_shared2_div4" }; + +static const struct samsung_mux_clock top_mux_clks[] __initconst = { + /* CMU_TOP_PURECLKCOMP */ + MUX(MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p, + PLL_CON0_PLL_SHARED0, 4, 1), + MUX(MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p, + PLL_CON0_PLL_SHARED1, 4, 1), + MUX(MOUT_SHARED2_PLL, "mout_shared2_pll", mout_shared2_pll_p, + PLL_CON0_PLL_SHARED2, 4, 1), + MUX(MOUT_SHARED3_PLL, "mout_shared3_pll", mout_shared3_pll_p, + PLL_CON0_PLL_SHARED3, 4, 1), + MUX(MOUT_SHARED4_PLL, "mout_shared4_pll", mout_shared4_pll_p, + PLL_CON0_PLL_SHARED4, 4, 1), + + /* BOOST */ + MUX(MOUT_CLKCMU_CMU_BOOST, "mout_clkcmu_cmu_boost", + mout_clkcmu_cmu_boost_p, CLK_CON_MUX_MUX_CLKCMU_CMU_BOOST, 0, 2), + MUX(MOUT_CLKCMU_CMU_CMUREF, "mout_clkcmu_cmu_cmuref", + mout_clkcmu_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1), + + /* ACC */ + MUX(MOUT_CLKCMU_ACC_BUS, "mout_clkcmu_acc_bus", mout_clkcmu_acc_bus_p, + CLK_CON_MUX_MUX_CLKCMU_ACC_BUS, 0, 2), + + /* APM */ + MUX(MOUT_CLKCMU_APM_BUS, "mout_clkcmu_apm_bus", mout_clkcmu_apm_bus_p, + CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 2), + + /* AUD */ + MUX(MOUT_CLKCMU_AUD_CPU, "mout_clkcmu_aud_cpu", mout_clkcmu_aud_cpu_p, + CLK_CON_MUX_MUX_CLKCMU_AUD_CPU, 0, 3), + MUX(MOUT_CLKCMU_AUD_BUS, "mout_clkcmu_aud_bus", mout_clkcmu_aud_bus_p, + CLK_CON_MUX_MUX_CLKCMU_AUD_BUS, 0, 2), + + /* BUSC */ + MUX(MOUT_CLKCMU_BUSC_BUS, "mout_clkcmu_busc_bus", + mout_clkcmu_busc_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUSC_BUS, 0, 2), + + /* BUSMC */ + MUX(MOUT_CLKCMU_BUSMC_BUS, "mout_clkcmu_busmc_bus", + mout_clkcmu_busc_bus_p, CLK_CON_MUX_MUX_CLKCMU_BUSMC_BUS, 0, 2), + + /* CORE */ + MUX(MOUT_CLKCMU_CORE_BUS, "mout_clkcmu_core_bus", + mout_clkcmu_core_bus_p, CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 3), + + /* CPUCL0 */ + MUX(MOUT_CLKCMU_CPUCL0_SWITCH, "mout_clkcmu_cpucl0_switch", + mout_clkcmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL0_SWITCH, + 0, 2), + MUX(MOUT_CLKCMU_CPUCL0_CLUSTER, "mout_clkcmu_cpucl0_cluster", + mout_clkcmu_cpucl0_cluster_p, + CLK_CON_MUX_MUX_CLKCMU_CPUCL0_CLUSTER, 0, 3), + + /* CPUCL1 */ + MUX(MOUT_CLKCMU_CPUCL1_SWITCH, "mout_clkcmu_cpucl1_switch", + mout_clkcmu_cpucl0_switch_p, CLK_CON_MUX_MUX_CLKCMU_CPUCL1_SWITCH, + 0, 2), + MUX(MOUT_CLKCMU_CPUCL1_CLUSTER, "mout_clkcmu_cpucl1_cluster", + mout_clkcmu_cpucl0_cluster_p, + CLK_CON_MUX_MUX_CLKCMU_CPUCL1_CLUSTER, 0, 3), + + /* DPTX */ + MUX(MOUT_CLKCMU_DPTX_BUS, "mout_clkcmu_dptx_bus", + mout_clkcmu_dptx_bus_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_BUS, 0, 2), + MUX(MOUT_CLKCMU_DPTX_DPGTC, "mout_clkcmu_dptx_dpgtc", + mout_clkcmu_dptx_dpgtc_p, CLK_CON_MUX_MUX_CLKCMU_DPTX_DPGTC, 0, 2), + + /* DPUM */ + MUX(MOUT_CLKCMU_DPUM_BUS, "mout_clkcmu_dpum_bus", + mout_clkcmu_dpum_bus_p, CLK_CON_MUX_MUX_CLKCMU_DPUM_BUS, 0, 3), + + /* DPUS */ + MUX(MOUT_CLKCMU_DPUS0_BUS, "mout_clkcmu_dpus0_bus", + mout_clkcmu_dpum_bus_p, CLK_CON_MUX_MUX_CLKCMU_DPUS0_BUS, 0, 3), + MUX(MOUT_CLKCMU_DPUS1_BUS, "mout_clkcmu_dpus1_bus", + mout_clkcmu_dpum_bus_p, CLK_CON_MUX_MUX_CLKCMU_DPUS1_BUS, 0, 3), + + /* FSYS0 */ + MUX(MOUT_CLKCMU_FSYS0_BUS, "mout_clkcmu_fsys0_bus", + mout_clkcmu_fsys0_bus_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_BUS, 0, 2), + MUX(MOUT_CLKCMU_FSYS0_PCIE, "mout_clkcmu_fsys0_pcie", + mout_clkcmu_fsys0_pcie_p, CLK_CON_MUX_MUX_CLKCMU_FSYS0_PCIE, 0, 1), + + /* FSYS1 */ + MUX(MOUT_CLKCMU_FSYS1_BUS, "mout_clkcmu_fsys1_bus", + mout_clkcmu_fsys1_bus_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_BUS, 0, 2), + MUX(MOUT_CLKCMU_FSYS1_USBDRD, "mout_clkcmu_fsys1_usbdrd", + mout_clkcmu_fsys1_usbdrd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS1_USBDRD, + 0, 2), + MUX(MOUT_CLKCMU_FSYS1_MMC_CARD, "mout_clkcmu_fsys1_mmc_card", + mout_clkcmu_fsys1_mmc_card_p, + CLK_CON_MUX_MUX_CLKCMU_FSYS1_MMC_CARD, 0, 2), + + /* FSYS2 */ + MUX(MOUT_CLKCMU_FSYS2_BUS, "mout_clkcmu_fsys2_bus", + mout_clkcmu_fsys0_bus_p, CLK_CON_MUX_MUX_CLKCMU_FSYS2_BUS, 0, 2), + MUX(MOUT_CLKCMU_FSYS2_UFS_EMBD, "mout_clkcmu_fsys2_ufs_embd", + mout_clkcmu_fsys1_usbdrd_p, CLK_CON_MUX_MUX_CLKCMU_FSYS2_UFS_EMBD, + 0, 2), + MUX(MOUT_CLKCMU_FSYS2_ETHERNET, "mout_clkcmu_fsys2_ethernet", + mout_clkcmu_fsys2_ethernet_p, + CLK_CON_MUX_MUX_CLKCMU_FSYS2_ETHERNET, 0, 3), + + /* G2D */ + MUX(MOUT_CLKCMU_G2D_G2D, "mout_clkcmu_g2d_g2d", mout_clkcmu_g2d_g2d_p, + CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 3), + MUX(MOUT_CLKCMU_G2D_MSCL, "mout_clkcmu_g2d_mscl", + mout_clkcmu_fsys1_bus_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 2), + + /* G3D0 */ + MUX(MOUT_CLKCMU_G3D00_SWITCH, "mout_clkcmu_g3d00_switch", + mout_clkcmu_g3d0_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D00_SWITCH, + 0, 2), + MUX(MOUT_CLKCMU_G3D01_SWITCH, "mout_clkcmu_g3d01_switch", + mout_clkcmu_g3d0_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D01_SWITCH, + 0, 2), + + /* G3D1 */ + MUX(MOUT_CLKCMU_G3D1_SWITCH, "mout_clkcmu_g3d1_switch", + mout_clkcmu_g3d1_switch_p, CLK_CON_MUX_MUX_CLKCMU_G3D1_SWITCH, + 0, 2), + + /* ISPB */ + MUX(MOUT_CLKCMU_ISPB_BUS, "mout_clkcmu_ispb_bus", + mout_clkcmu_acc_bus_p, CLK_CON_MUX_MUX_CLKCMU_ISPB_BUS, 0, 2), + + /* MFC */ + MUX(MOUT_CLKCMU_MFC_MFC, "mout_clkcmu_mfc_mfc", + mout_clkcmu_g3d1_switch_p, CLK_CON_MUX_MUX_CLKCMU_MFC_MFC, 0, 2), + MUX(MOUT_CLKCMU_MFC_WFD, "mout_clkcmu_mfc_wfd", + mout_clkcmu_fsys0_bus_p, CLK_CON_MUX_MUX_CLKCMU_MFC_WFD, 0, 2), + + /* MIF */ + MUX(MOUT_CLKCMU_MIF_SWITCH, "mout_clkcmu_mif_switch", + mout_clkcmu_mif_switch_p, CLK_CON_MUX_MUX_CLKCMU_MIF_SWITCH, 0, 3), + MUX(MOUT_CLKCMU_MIF_BUSP, "mout_clkcmu_mif_busp", + mout_clkcmu_fsys1_bus_p, CLK_CON_MUX_MUX_CLKCMU_MIF_BUSP, 0, 2), + + /* NPU */ + MUX(MOUT_CLKCMU_NPU_BUS, "mout_clkcmu_npu_bus", mout_clkcmu_npu_bus_p, + CLK_CON_MUX_MUX_CLKCMU_NPU_BUS, 0, 3), + + /* PERIC0 */ + MUX(MOUT_CLKCMU_PERIC0_BUS, "mout_clkcmu_peric0_bus", + mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_BUS, 0, 1), + MUX(MOUT_CLKCMU_PERIC0_IP, "mout_clkcmu_peric0_ip", + mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC0_IP, 0, 1), + + /* PERIC1 */ + MUX(MOUT_CLKCMU_PERIC1_BUS, "mout_clkcmu_peric1_bus", + mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_BUS, 0, 1), + MUX(MOUT_CLKCMU_PERIC1_IP, "mout_clkcmu_peric1_ip", + mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIC1_IP, 0, 1), + + /* PERIS */ + MUX(MOUT_CLKCMU_PERIS_BUS, "mout_clkcmu_peris_bus", + mout_clkcmu_peric0_bus_p, CLK_CON_MUX_MUX_CLKCMU_PERIS_BUS, 0, 1), +}; + +static const struct samsung_div_clock top_div_clks[] __initconst = { + /* CMU_TOP_PURECLKCOMP */ + DIV(DOUT_SHARED0_DIV3, "dout_shared0_div3", "mout_shared0_pll", + CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2), + DIV(DOUT_SHARED0_DIV2, "dout_shared0_div2", "mout_shared0_pll", + CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1), + + DIV(DOUT_SHARED1_DIV3, "dout_shared1_div3", "mout_shared1_pll", + CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2), + DIV(DOUT_SHARED1_DIV2, "dout_shared1_div2", "mout_shared1_pll", + CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1), + DIV(DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2", + CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1), + + DIV(DOUT_SHARED2_DIV3, "dout_shared2_div3", "mout_shared2_pll", + CLK_CON_DIV_PLL_SHARED2_DIV3, 0, 2), + DIV(DOUT_SHARED2_DIV2, "dout_shared2_div2", "mout_shared2_pll", + CLK_CON_DIV_PLL_SHARED2_DIV2, 0, 1), + DIV(DOUT_SHARED2_DIV4, "dout_shared2_div4", "dout_shared2_div2", + CLK_CON_DIV_PLL_SHARED2_DIV4, 0, 1), + + DIV(DOUT_SHARED4_DIV2, "dout_shared4_div2", "mout_shared4_pll", + CLK_CON_DIV_PLL_SHARED4_DIV2, 0, 1), + DIV(DOUT_SHARED4_DIV4, "dout_shared4_div4", "dout_shared4_div2", + CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1), + + /* BOOST */ + DIV(DOUT_CLKCMU_CMU_BOOST, "dout_clkcmu_cmu_boost", + "gout_clkcmu_cmu_boost", CLK_CON_DIV_DIV_CLKCMU_CMU_BOOST, 0, 2), + + /* ACC */ + DIV(DOUT_CLKCMU_ACC_BUS, "dout_clkcmu_acc_bus", "gout_clkcmu_acc_bus", + CLK_CON_DIV_CLKCMU_ACC_BUS, 0, 4), + + /* APM */ + DIV(DOUT_CLKCMU_APM_BUS, "dout_clkcmu_apm_bus", "gout_clkcmu_apm_bus", + CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3), + + /* AUD */ + DIV(DOUT_CLKCMU_AUD_CPU, "dout_clkcmu_aud_cpu", "gout_clkcmu_aud_cpu", + CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3), + DIV(DOUT_CLKCMU_AUD_BUS, "dout_clkcmu_aud_bus", "gout_clkcmu_aud_bus", + CLK_CON_DIV_CLKCMU_AUD_BUS, 0, 4), + + /* BUSC */ + DIV(DOUT_CLKCMU_BUSC_BUS, "dout_clkcmu_busc_bus", + "gout_clkcmu_busc_bus", CLK_CON_DIV_CLKCMU_BUSC_BUS, 0, 4), + + /* BUSMC */ + DIV(DOUT_CLKCMU_BUSMC_BUS, "dout_clkcmu_busmc_bus", + "gout_clkcmu_busmc_bus", CLK_CON_DIV_CLKCMU_BUSMC_BUS, 0, 4), + + /* CORE */ + DIV(DOUT_CLKCMU_CORE_BUS, "dout_clkcmu_core_bus", + "gout_clkcmu_core_bus", CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4), + + /* CPUCL0 */ + DIV(DOUT_CLKCMU_CPUCL0_SWITCH, "dout_clkcmu_cpucl0_switch", + "gout_clkcmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, + 0, 3), + DIV(DOUT_CLKCMU_CPUCL0_CLUSTER, "dout_clkcmu_cpucl0_cluster", + "gout_clkcmu_cpucl0_cluster", CLK_CON_DIV_CLKCMU_CPUCL0_CLUSTER, + 0, 3), + + /* CPUCL1 */ + DIV(DOUT_CLKCMU_CPUCL1_SWITCH, "dout_clkcmu_cpucl1_switch", + "gout_clkcmu_cpucl1_switch", CLK_CON_DIV_CLKCMU_CPUCL1_SWITCH, + 0, 3), + DIV(DOUT_CLKCMU_CPUCL1_CLUSTER, "dout_clkcmu_cpucl1_cluster", + "gout_clkcmu_cpucl1_cluster", CLK_CON_DIV_CLKCMU_CPUCL1_CLUSTER, + 0, 3), + + /* DPTX */ + DIV(DOUT_CLKCMU_DPTX_BUS, "dout_clkcmu_dptx_bus", + "gout_clkcmu_dptx_bus", CLK_CON_DIV_CLKCMU_DPTX_BUS, 0, 4), + DIV(DOUT_CLKCMU_DPTX_DPGTC, "dout_clkcmu_dptx_dpgtc", + "gout_clkcmu_dptx_dpgtc", CLK_CON_DIV_CLKCMU_DPTX_DPGTC, 0, 3), + + /* DPUM */ + DIV(DOUT_CLKCMU_DPUM_BUS, "dout_clkcmu_dpum_bus", + "gout_clkcmu_dpum_bus", CLK_CON_DIV_CLKCMU_DPUM_BUS, 0, 4), + + /* DPUS */ + DIV(DOUT_CLKCMU_DPUS0_BUS, "dout_clkcmu_dpus0_bus", + "gout_clkcmu_dpus0_bus", CLK_CON_DIV_CLKCMU_DPUS0_BUS, 0, 4), + DIV(DOUT_CLKCMU_DPUS1_BUS, "dout_clkcmu_dpus1_bus", + "gout_clkcmu_dpus1_bus", CLK_CON_DIV_CLKCMU_DPUS1_BUS, 0, 4), + + /* FSYS0 */ + DIV(DOUT_CLKCMU_FSYS0_BUS, "dout_clkcmu_fsys0_bus", + "gout_clkcmu_fsys0_bus", CLK_CON_DIV_CLKCMU_FSYS0_BUS, 0, 4), + + /* FSYS1 */ + DIV(DOUT_CLKCMU_FSYS1_BUS, "dout_clkcmu_fsys1_bus", + "gout_clkcmu_fsys1_bus", CLK_CON_DIV_CLKCMU_FSYS1_BUS, 0, 4), + DIV(DOUT_CLKCMU_FSYS1_USBDRD, "dout_clkcmu_fsys1_usbdrd", + "gout_clkcmu_fsys1_usbdrd", CLK_CON_DIV_CLKCMU_FSYS1_USBDRD, 0, 4), + + /* FSYS2 */ + DIV(DOUT_CLKCMU_FSYS2_BUS, "dout_clkcmu_fsys2_bus", + "gout_clkcmu_fsys2_bus", CLK_CON_DIV_CLKCMU_FSYS2_BUS, 0, 4), + DIV(DOUT_CLKCMU_FSYS2_UFS_EMBD, "dout_clkcmu_fsys2_ufs_embd", + "gout_clkcmu_fsys2_ufs_embd", CLK_CON_DIV_CLKCMU_FSYS2_UFS_EMBD, + 0, 3), + DIV(DOUT_CLKCMU_FSYS2_ETHERNET, "dout_clkcmu_fsys2_ethernet", + "gout_clkcmu_fsys2_ethernet", CLK_CON_DIV_CLKCMU_FSYS2_ETHERNET, + 0, 3), + + /* G2D */ + DIV(DOUT_CLKCMU_G2D_G2D, "dout_clkcmu_g2d_g2d", "gout_clkcmu_g2d_g2d", + CLK_CON_DIV_CLKCMU_G2D_G2D, 0, 4), + DIV(DOUT_CLKCMU_G2D_MSCL, "dout_clkcmu_g2d_mscl", + "gout_clkcmu_g2d_mscl", CLK_CON_DIV_CLKCMU_G2D_MSCL, 0, 4), + + /* G3D0 */ + DIV(DOUT_CLKCMU_G3D00_SWITCH, "dout_clkcmu_g3d00_switch", + "gout_clkcmu_g3d00_switch", CLK_CON_DIV_CLKCMU_G3D00_SWITCH, 0, 3), + DIV(DOUT_CLKCMU_G3D01_SWITCH, "dout_clkcmu_g3d01_switch", + "gout_clkcmu_g3d01_switch", CLK_CON_DIV_CLKCMU_G3D01_SWITCH, 0, 3), + + /* G3D1 */ + DIV(DOUT_CLKCMU_G3D1_SWITCH, "dout_clkcmu_g3d1_switch", + "gout_clkcmu_g3d1_switch", CLK_CON_DIV_CLKCMU_G3D1_SWITCH, 0, 3), + + /* ISPB */ + DIV(DOUT_CLKCMU_ISPB_BUS, "dout_clkcmu_ispb_bus", + "gout_clkcmu_ispb_bus", CLK_CON_DIV_CLKCMU_ISPB_BUS, 0, 4), + + /* MFC */ + DIV(DOUT_CLKCMU_MFC_MFC, "dout_clkcmu_mfc_mfc", "gout_clkcmu_mfc_mfc", + CLK_CON_DIV_CLKCMU_MFC_MFC, 0, 4), + DIV(DOUT_CLKCMU_MFC_WFD, "dout_clkcmu_mfc_wfd", "gout_clkcmu_mfc_wfd", + CLK_CON_DIV_CLKCMU_MFC_WFD, 0, 4), + + /* MIF */ + DIV(DOUT_CLKCMU_MIF_BUSP, "dout_clkcmu_mif_busp", + "gout_clkcmu_mif_busp", CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 4), + + /* NPU */ + DIV(DOUT_CLKCMU_NPU_BUS, "dout_clkcmu_npu_bus", "gout_clkcmu_npu_bus", + CLK_CON_DIV_CLKCMU_NPU_BUS, 0, 4), + + /* PERIC0 */ + DIV(DOUT_CLKCMU_PERIC0_BUS, "dout_clkcmu_peric0_bus", + "gout_clkcmu_peric0_bus", CLK_CON_DIV_CLKCMU_PERIC0_BUS, 0, 4), + DIV(DOUT_CLKCMU_PERIC0_IP, "dout_clkcmu_peric0_ip", + "gout_clkcmu_peric0_ip", CLK_CON_DIV_CLKCMU_PERIC0_IP, 0, 4), + + /* PERIC1 */ + DIV(DOUT_CLKCMU_PERIC1_BUS, "dout_clkcmu_peric1_bus", + "gout_clkcmu_peric1_bus", CLK_CON_DIV_CLKCMU_PERIC1_BUS, 0, 4), + DIV(DOUT_CLKCMU_PERIC1_IP, "dout_clkcmu_peric1_ip", + "gout_clkcmu_peric1_ip", CLK_CON_DIV_CLKCMU_PERIC1_IP, 0, 4), + + /* PERIS */ + DIV(DOUT_CLKCMU_PERIS_BUS, "dout_clkcmu_peris_bus", + "gout_clkcmu_peris_bus", CLK_CON_DIV_CLKCMU_PERIS_BUS, 0, 4), +}; + +static const struct samsung_fixed_factor_clock top_fixed_factor_clks[] __initconst = { + FFACTOR(DOUT_CLKCMU_FSYS0_PCIE, "dout_clkcmu_fsys0_pcie", + "gout_clkcmu_fsys0_pcie", 1, 4, 0), +}; + +static const struct samsung_gate_clock top_gate_clks[] __initconst = { + /* BOOST */ + GATE(GOUT_CLKCMU_CMU_BOOST, "gout_clkcmu_cmu_boost", + "mout_clkcmu_cmu_boost", CLK_CON_GAT_GATE_CLKCMU_CMU_BOOST, + 21, 0, 0), + + GATE(GOUT_CLKCMU_CPUCL0_BOOST, "gout_clkcmu_cpucl0_boost", + "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_CPUCL0_BOOST, 21, 0, 0), + GATE(GOUT_CLKCMU_CPUCL1_BOOST, "gout_clkcmu_cpucl1_boost", + "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_CPUCL1_BOOST, 21, 0, 0), + GATE(GOUT_CLKCMU_CORE_BOOST, "gout_clkcmu_core_boost", + "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_CORE_BOOST, 21, 0, 0), + GATE(GOUT_CLKCMU_BUSC_BOOST, "gout_clkcmu_busc_boost", + "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_BUSC_BOOST, 21, 0, 0), + + GATE(GOUT_CLKCMU_BUSMC_BOOST, "gout_clkcmu_busmc_boost", + "dout_cmu_boost", CLK_CON_GAT_CLKCMU_CMU_BUSMC_BOOST, 21, 0, 0), + GATE(GOUT_CLKCMU_MIF_BOOST, "gout_clkcmu_mif_boost", "dout_cmu_boost", + CLK_CON_GAT_CLKCMU_CMU_MIF_BOOST, 21, 0, 0), + + /* ACC */ + GATE(GOUT_CLKCMU_ACC_BUS, "gout_clkcmu_acc_bus", "mout_clkcmu_acc_bus", + CLK_CON_GAT_GATE_CLKCMU_ACC_BUS, 21, 0, 0), + + /* APM */ + GATE(GOUT_CLKCMU_APM_BUS, "gout_clkcmu_apm_bus", "mout_clkcmu_apm_bus", + CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, CLK_IGNORE_UNUSED, 0), + + /* AUD */ + GATE(GOUT_CLKCMU_AUD_CPU, "gout_clkcmu_aud_cpu", "mout_clkcmu_aud_cpu", + CLK_CON_GAT_GATE_CLKCMU_AUD_CPU, 21, 0, 0), + GATE(GOUT_CLKCMU_AUD_BUS, "gout_clkcmu_aud_bus", "mout_clkcmu_aud_bus", + CLK_CON_GAT_GATE_CLKCMU_AUD_BUS, 21, 0, 0), + + /* BUSC */ + GATE(GOUT_CLKCMU_BUSC_BUS, "gout_clkcmu_busc_bus", + "mout_clkcmu_busc_bus", CLK_CON_GAT_GATE_CLKCMU_BUSC_BUS, 21, + CLK_IS_CRITICAL, 0), + + /* BUSMC */ + GATE(GOUT_CLKCMU_BUSMC_BUS, "gout_clkcmu_busmc_bus", + "mout_clkcmu_busmc_bus", CLK_CON_GAT_GATE_CLKCMU_BUSMC_BUS, 21, + CLK_IS_CRITICAL, 0), + + /* CORE */ + GATE(GOUT_CLKCMU_CORE_BUS, "gout_clkcmu_core_bus", + "mout_clkcmu_core_bus", CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, + 21, 0, 0), + + /* CPUCL0 */ + GATE(GOUT_CLKCMU_CPUCL0_SWITCH, "gout_clkcmu_cpucl0_switch", + "mout_clkcmu_cpucl0_switch", + CLK_CON_GAT_GATE_CLKCMU_CPUCL0_SWITCH, 21, CLK_IGNORE_UNUSED, 0), + GATE(GOUT_CLKCMU_CPUCL0_CLUSTER, "gout_clkcmu_cpucl0_cluster", + "mout_clkcmu_cpucl0_cluster", + CLK_CON_GAT_GATE_CLKCMU_CPUCL0_CLUSTER, 21, CLK_IGNORE_UNUSED, 0), + + /* CPUCL1 */ + GATE(GOUT_CLKCMU_CPUCL1_SWITCH, "gout_clkcmu_cpucl1_switch", + "mout_clkcmu_cpucl1_switch", + CLK_CON_GAT_GATE_CLKCMU_CPUCL1_SWITCH, 21, CLK_IGNORE_UNUSED, 0), + GATE(GOUT_CLKCMU_CPUCL1_CLUSTER, "gout_clkcmu_cpucl1_cluster", + "mout_clkcmu_cpucl1_cluster", + CLK_CON_GAT_GATE_CLKCMU_CPUCL1_CLUSTER, 21, CLK_IGNORE_UNUSED, 0), + + /* DPTX */ + GATE(GOUT_CLKCMU_DPTX_BUS, "gout_clkcmu_dptx_bus", + "mout_clkcmu_dptx_bus", CLK_CON_GAT_GATE_CLKCMU_DPTX_BUS, + 21, 0, 0), + GATE(GOUT_CLKCMU_DPTX_DPGTC, "gout_clkcmu_dptx_dpgtc", + "mout_clkcmu_dptx_dpgtc", CLK_CON_GAT_GATE_CLKCMU_DPTX_DPGTC, + 21, 0, 0), + + /* DPUM */ + GATE(GOUT_CLKCMU_DPUM_BUS, "gout_clkcmu_dpum_bus", + "mout_clkcmu_dpum_bus", CLK_CON_GAT_GATE_CLKCMU_DPUM_BUS, + 21, 0, 0), + + /* DPUS */ + GATE(GOUT_CLKCMU_DPUS0_BUS, "gout_clkcmu_dpus0_bus", + "mout_clkcmu_dpus0_bus", CLK_CON_GAT_GATE_CLKCMU_DPUS0_BUS, + 21, 0, 0), + GATE(GOUT_CLKCMU_DPUS1_BUS, "gout_clkcmu_dpus1_bus", + "mout_clkcmu_dpus1_bus", CLK_CON_GAT_GATE_CLKCMU_DPUS1_BUS, + 21, 0, 0), + + /* FSYS0 */ + GATE(GOUT_CLKCMU_FSYS0_BUS, "gout_clkcmu_fsys0_bus", + "mout_clkcmu_fsys0_bus", CLK_CON_GAT_GATE_CLKCMU_FSYS0_BUS, + 21, 0, 0), + GATE(GOUT_CLKCMU_FSYS0_PCIE, "gout_clkcmu_fsys0_pcie", + "mout_clkcmu_fsys0_pcie", CLK_CON_GAT_GATE_CLKCMU_FSYS0_PCIE, + 21, 0, 0), + + /* FSYS1 */ + GATE(GOUT_CLKCMU_FSYS1_BUS, "gout_clkcmu_fsys1_bus", + "mout_clkcmu_fsys1_bus", CLK_CON_GAT_GATE_CLKCMU_FSYS1_BUS, + 21, 0, 0), + GATE(GOUT_CLKCMU_FSYS1_USBDRD, "gout_clkcmu_fsys1_usbdrd", + "mout_clkcmu_fsys1_usbdrd", CLK_CON_GAT_GATE_CLKCMU_FSYS1_USBDRD, + 21, 0, 0), + GATE(GOUT_CLKCMU_FSYS1_MMC_CARD, "gout_clkcmu_fsys1_mmc_card", + "mout_clkcmu_fsys1_mmc_card", + CLK_CON_GAT_GATE_CLKCMU_FSYS1_MMC_CARD, 21, 0, 0), + + /* FSYS2 */ + GATE(GOUT_CLKCMU_FSYS2_BUS, "gout_clkcmu_fsys2_bus", + "mout_clkcmu_fsys2_bus", CLK_CON_GAT_GATE_CLKCMU_FSYS2_BUS, + 21, 0, 0), + GATE(GOUT_CLKCMU_FSYS2_UFS_EMBD, "gout_clkcmu_fsys2_ufs_embd", + "mout_clkcmu_fsys2_ufs_embd", + CLK_CON_GAT_GATE_CLKCMU_FSYS2_UFS_EMBD, 21, 0, 0), + GATE(GOUT_CLKCMU_FSYS2_ETHERNET, "gout_clkcmu_fsys2_ethernet", + "mout_clkcmu_fsys2_ethernet", + CLK_CON_GAT_GATE_CLKCMU_FSYS2_ETHERNET, 21, 0, 0), + + /* G2D */ + GATE(GOUT_CLKCMU_G2D_G2D, "gout_clkcmu_g2d_g2d", + "mout_clkcmu_g2d_g2d", CLK_CON_GAT_GATE_CLKCMU_G2D_G2D, 21, 0, 0), + GATE(GOUT_CLKCMU_G2D_MSCL, "gout_clkcmu_g2d_mscl", + "mout_clkcmu_g2d_mscl", CLK_CON_GAT_GATE_CLKCMU_G2D_MSCL, + 21, 0, 0), + + /* G3D0 */ + GATE(GOUT_CLKCMU_G3D00_SWITCH, "gout_clkcmu_g3d00_switch", + "mout_clkcmu_g3d00_switch", CLK_CON_GAT_GATE_CLKCMU_G3D00_SWITCH, + 21, 0, 0), + GATE(GOUT_CLKCMU_G3D01_SWITCH, "gout_clkcmu_g3d01_switch", + "mout_clkcmu_g3d01_switch", CLK_CON_GAT_GATE_CLKCMU_G3D01_SWITCH, + 21, 0, 0), + + /* G3D1 */ + GATE(GOUT_CLKCMU_G3D1_SWITCH, "gout_clkcmu_g3d1_switch", + "mout_clkcmu_g3d1_switch", CLK_CON_GAT_GATE_CLKCMU_G3D1_SWITCH, + 21, 0, 0), + + /* ISPB */ + GATE(GOUT_CLKCMU_ISPB_BUS, "gout_clkcmu_ispb_bus", + "mout_clkcmu_ispb_bus", CLK_CON_GAT_GATE_CLKCMU_ISPB_BUS, + 21, 0, 0), + + /* MFC */ + GATE(GOUT_CLKCMU_MFC_MFC, "gout_clkcmu_mfc_mfc", "mout_clkcmu_mfc_mfc", + CLK_CON_GAT_GATE_CLKCMU_MFC_MFC, 21, 0, 0), + GATE(GOUT_CLKCMU_MFC_WFD, "gout_clkcmu_mfc_wfd", "mout_clkcmu_mfc_wfd", + CLK_CON_GAT_GATE_CLKCMU_MFC_WFD, 21, 0, 0), + + /* MIF */ + GATE(GOUT_CLKCMU_MIF_SWITCH, "gout_clkcmu_mif_switch", + "mout_clkcmu_mif_switch", CLK_CON_GAT_GATE_CLKCMU_MIF_SWITCH, + 21, CLK_IGNORE_UNUSED, 0), + GATE(GOUT_CLKCMU_MIF_BUSP, "gout_clkcmu_mif_busp", + "mout_clkcmu_mif_busp", CLK_CON_GAT_GATE_CLKCMU_MIF_BUSP, + 21, CLK_IGNORE_UNUSED, 0), + + /* NPU */ + GATE(GOUT_CLKCMU_NPU_BUS, "gout_clkcmu_npu_bus", "mout_clkcmu_npu_bus", + CLK_CON_GAT_GATE_CLKCMU_NPU_BUS, 21, 0, 0), + + /* PERIC0 */ + GATE(GOUT_CLKCMU_PERIC0_BUS, "gout_clkcmu_peric0_bus", + "mout_clkcmu_peric0_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC0_BUS, + 21, 0, 0), + GATE(GOUT_CLKCMU_PERIC0_IP, "gout_clkcmu_peric0_ip", + "mout_clkcmu_peric0_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC0_IP, + 21, 0, 0), + + /* PERIC1 */ + GATE(GOUT_CLKCMU_PERIC1_BUS, "gout_clkcmu_peric1_bus", + "mout_clkcmu_peric1_bus", CLK_CON_GAT_GATE_CLKCMU_PERIC1_BUS, + 21, 0, 0), + GATE(GOUT_CLKCMU_PERIC1_IP, "gout_clkcmu_peric1_ip", + "mout_clkcmu_peric1_ip", CLK_CON_GAT_GATE_CLKCMU_PERIC1_IP, + 21, 0, 0), + + /* PERIS */ + GATE(GOUT_CLKCMU_PERIS_BUS, "gout_clkcmu_peris_bus", + "mout_clkcmu_peris_bus", CLK_CON_GAT_GATE_CLKCMU_PERIS_BUS, + 21, CLK_IGNORE_UNUSED, 0), +}; + +static const struct samsung_cmu_info top_cmu_info __initconst = { + .pll_clks = top_pll_clks, + .nr_pll_clks = ARRAY_SIZE(top_pll_clks), + .mux_clks = top_mux_clks, + .nr_mux_clks = ARRAY_SIZE(top_mux_clks), + .div_clks = top_div_clks, + .nr_div_clks = ARRAY_SIZE(top_div_clks), + .fixed_factor_clks = top_fixed_factor_clks, + .nr_fixed_factor_clks = ARRAY_SIZE(top_fixed_factor_clks), + .gate_clks = top_gate_clks, + .nr_gate_clks = ARRAY_SIZE(top_gate_clks), + .nr_clk_ids = TOP_NR_CLK, + .clk_regs = top_clk_regs, + .nr_clk_regs = ARRAY_SIZE(top_clk_regs), +}; + +static void __init exynosautov9_cmu_top_init(struct device_node *np) +{ + exynos_arm64_register_cmu(NULL, np, &top_cmu_info); +} + +/* Register CMU_TOP early, as it's a dependency for other early domains */ +CLK_OF_DECLARE(exynosautov9_cmu_top, "samsung,exynosautov9-cmu-top", + exynosautov9_cmu_top_init); + +/* ---- CMU_BUSMC ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_BUSMC (0x1b200000) */ +#define PLL_CON0_MUX_CLKCMU_BUSMC_BUS_USER 0x0600 +#define CLK_CON_DIV_DIV_CLK_BUSMC_BUSP 0x1800 +#define CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_PDMA0_IPCLKPORT_PCLK 0x2078 +#define CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_SPDMA_IPCLKPORT_PCLK 0x2080 + +static const unsigned long busmc_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_BUSMC_BUS_USER, + CLK_CON_DIV_DIV_CLK_BUSMC_BUSP, + CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_PDMA0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_SPDMA_IPCLKPORT_PCLK, +}; + +/* List of parent clocks for Muxes in CMU_BUSMC */ +PNAME(mout_busmc_bus_user_p) = { "oscclk", "dout_clkcmu_busmc_bus" }; + +static const struct samsung_mux_clock busmc_mux_clks[] __initconst = { + MUX(CLK_MOUT_BUSMC_BUS_USER, "mout_busmc_bus_user", + mout_busmc_bus_user_p, PLL_CON0_MUX_CLKCMU_BUSMC_BUS_USER, 4, 1), +}; + +static const struct samsung_div_clock busmc_div_clks[] __initconst = { + DIV(CLK_DOUT_BUSMC_BUSP, "dout_busmc_busp", "mout_busmc_bus_user", + CLK_CON_DIV_DIV_CLK_BUSMC_BUSP, 0, 3), +}; + +static const struct samsung_gate_clock busmc_gate_clks[] __initconst = { + GATE(CLK_GOUT_BUSMC_PDMA0_PCLK, "gout_busmc_pdma0_pclk", + "dout_busmc_busp", + CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_PDMA0_IPCLKPORT_PCLK, 21, + 0, 0), + GATE(CLK_GOUT_BUSMC_SPDMA_PCLK, "gout_busmc_spdma_pclk", + "dout_busmc_busp", + CLK_CON_GAT_GOUT_BLK_BUSMC_UID_QE_SPDMA_IPCLKPORT_PCLK, 21, + 0, 0), +}; + +static const struct samsung_cmu_info busmc_cmu_info __initconst = { + .mux_clks = busmc_mux_clks, + .nr_mux_clks = ARRAY_SIZE(busmc_mux_clks), + .div_clks = busmc_div_clks, + .nr_div_clks = ARRAY_SIZE(busmc_div_clks), + .gate_clks = busmc_gate_clks, + .nr_gate_clks = ARRAY_SIZE(busmc_gate_clks), + .nr_clk_ids = BUSMC_NR_CLK, + .clk_regs = busmc_clk_regs, + .nr_clk_regs = ARRAY_SIZE(busmc_clk_regs), + .clk_name = "dout_clkcmu_busmc_bus", +}; + +/* ---- CMU_CORE ----------------------------------------------------------- */ + +/* Register Offset definitions for CMU_CORE (0x1b030000) */ +#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0600 +#define CLK_CON_MUX_MUX_CORE_CMUREF 0x1000 +#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800 +#define CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_CLK 0x2000 +#define CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_PCLK 0x2004 +#define CLK_CON_GAT_CLK_BLK_CORE_UID_CORE_CMU_CORE_IPCLKPORT_PCLK 0x2008 + +static const unsigned long core_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_CORE_BUS_USER, + CLK_CON_MUX_MUX_CORE_CMUREF, + CLK_CON_DIV_DIV_CLK_CORE_BUSP, + CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_CLK, + CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_PCLK, + CLK_CON_GAT_CLK_BLK_CORE_UID_CORE_CMU_CORE_IPCLKPORT_PCLK, +}; + +/* List of parent clocks for Muxes in CMU_CORE */ +PNAME(mout_core_bus_user_p) = { "oscclk", "dout_clkcmu_core_bus" }; + +static const struct samsung_mux_clock core_mux_clks[] __initconst = { + MUX(CLK_MOUT_CORE_BUS_USER, "mout_core_bus_user", mout_core_bus_user_p, + PLL_CON0_MUX_CLKCMU_CORE_BUS_USER, 4, 1), +}; + +static const struct samsung_div_clock core_div_clks[] __initconst = { + DIV(CLK_DOUT_CORE_BUSP, "dout_core_busp", "mout_core_bus_user", + CLK_CON_DIV_DIV_CLK_CORE_BUSP, 0, 3), +}; + +static const struct samsung_gate_clock core_gate_clks[] __initconst = { + GATE(CLK_GOUT_CORE_CCI_CLK, "gout_core_cci_clk", "mout_core_bus_user", + CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_CLK, 21, + CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_CORE_CCI_PCLK, "gout_core_cci_pclk", "dout_core_busp", + CLK_CON_GAT_CLK_BLK_CORE_UID_CCI_IPCLKPORT_PCLK, 21, + CLK_IS_CRITICAL, 0), + GATE(CLK_GOUT_CORE_CMU_CORE_PCLK, "gout_core_cmu_core_pclk", + "dout_core_busp", + CLK_CON_GAT_CLK_BLK_CORE_UID_CORE_CMU_CORE_IPCLKPORT_PCLK, 21, + CLK_IS_CRITICAL, 0), +}; + +static const struct samsung_cmu_info core_cmu_info __initconst = { + .mux_clks = core_mux_clks, + .nr_mux_clks = ARRAY_SIZE(core_mux_clks), + .div_clks = core_div_clks, + .nr_div_clks = ARRAY_SIZE(core_div_clks), + .gate_clks = core_gate_clks, + .nr_gate_clks = ARRAY_SIZE(core_gate_clks), + .nr_clk_ids = CORE_NR_CLK, + .clk_regs = core_clk_regs, + .nr_clk_regs = ARRAY_SIZE(core_clk_regs), + .clk_name = "dout_clkcmu_core_bus", +}; + +/* ---- CMU_FSYS2 ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_FSYS2 (0x17c00000) */ +#define PLL_CON0_MUX_CLKCMU_FSYS2_BUS_USER 0x0600 +#define PLL_CON0_MUX_CLKCMU_FSYS2_UFS_EMBD_USER 0x0620 +#define PLL_CON0_MUX_CLKCMU_FSYS2_ETHERNET_USER 0x0610 +#define CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_ACLK 0x2098 +#define CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_CLK_UNIPRO 0x209c +#define CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_ACLK 0x20a4 +#define CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_CLK_UNIPRO 0x20a8 + +static const unsigned long fsys2_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_FSYS2_BUS_USER, + PLL_CON0_MUX_CLKCMU_FSYS2_UFS_EMBD_USER, + PLL_CON0_MUX_CLKCMU_FSYS2_ETHERNET_USER, + CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_CLK_UNIPRO, + CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_ACLK, + CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_CLK_UNIPRO, +}; + +/* List of parent clocks for Muxes in CMU_FSYS2 */ +PNAME(mout_fsys2_bus_user_p) = { "oscclk", "dout_clkcmu_fsys2_bus" }; +PNAME(mout_fsys2_ufs_embd_user_p) = { "oscclk", "dout_clkcmu_fsys2_ufs_embd" }; +PNAME(mout_fsys2_ethernet_user_p) = { "oscclk", "dout_clkcmu_fsys2_ethernet" }; + +static const struct samsung_mux_clock fsys2_mux_clks[] __initconst = { + MUX(CLK_MOUT_FSYS2_BUS_USER, "mout_fsys2_bus_user", + mout_fsys2_bus_user_p, PLL_CON0_MUX_CLKCMU_FSYS2_BUS_USER, 4, 1), + MUX(CLK_MOUT_FSYS2_UFS_EMBD_USER, "mout_fsys2_ufs_embd_user", + mout_fsys2_ufs_embd_user_p, + PLL_CON0_MUX_CLKCMU_FSYS2_UFS_EMBD_USER, 4, 1), + MUX(CLK_MOUT_FSYS2_ETHERNET_USER, "mout_fsys2_ethernet_user", + mout_fsys2_ethernet_user_p, + PLL_CON0_MUX_CLKCMU_FSYS2_ETHERNET_USER, 4, 1), +}; + +static const struct samsung_gate_clock fsys2_gate_clks[] __initconst = { + GATE(CLK_GOUT_FSYS2_UFS_EMBD0_ACLK, "gout_fsys2_ufs_embd0_aclk", + "mout_fsys2_ufs_embd_user", + CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_ACLK, 21, + 0, 0), + GATE(CLK_GOUT_FSYS2_UFS_EMBD0_UNIPRO, "gout_fsys2_ufs_embd0_unipro", + "mout_fsys2_ufs_embd_user", + CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD0_IPCLKPORT_I_CLK_UNIPRO, + 21, 0, 0), + GATE(CLK_GOUT_FSYS2_UFS_EMBD1_ACLK, "gout_fsys2_ufs_embd1_aclk", + "mout_fsys2_ufs_embd_user", + CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_ACLK, 21, + 0, 0), + GATE(CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO, "gout_fsys2_ufs_embd1_unipro", + "mout_fsys2_ufs_embd_user", + CLK_CON_GAT_GOUT_BLK_FSYS2_UID_UFS_EMBD1_IPCLKPORT_I_CLK_UNIPRO, + 21, 0, 0), +}; + +static const struct samsung_cmu_info fsys2_cmu_info __initconst = { + .mux_clks = fsys2_mux_clks, + .nr_mux_clks = ARRAY_SIZE(fsys2_mux_clks), + .gate_clks = fsys2_gate_clks, + .nr_gate_clks = ARRAY_SIZE(fsys2_gate_clks), + .nr_clk_ids = FSYS2_NR_CLK, + .clk_regs = fsys2_clk_regs, + .nr_clk_regs = ARRAY_SIZE(fsys2_clk_regs), + .clk_name = "dout_clkcmu_fsys2_bus", +}; + +/* ---- CMU_PERIC0 --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_PERIC0 (0x10200000) */ +#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0600 +#define PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER 0x0610 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI 0x1000 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI 0x1004 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI 0x1008 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI 0x100c +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI 0x1010 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI 0x1014 +#define CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C 0x1018 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI 0x1800 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI 0x1804 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI 0x1808 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI 0x180c +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI 0x1810 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI 0x1814 +#define CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C 0x1818 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0 0x2014 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1 0x2018 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2 0x2024 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3 0x2028 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4 0x202c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5 0x2030 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6 0x2034 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7 0x2038 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8 0x203c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9 0x2040 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10 0x201c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11 0x2020 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0 0x2044 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_1 0x2048 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2 0x2058 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3 0x205c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4 0x2060 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5 0x2064 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6 0x2068 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8 0x2070 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9 0x2074 +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10 0x204c +#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11 0x2050 + +static const unsigned long peric0_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER, + PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER, + CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI, + CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C, + CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI, + CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_1, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10, + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11, +}; + +/* List of parent clocks for Muxes in CMU_PERIC0 */ +PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_clkcmu_peric0_bus" }; +PNAME(mout_peric0_ip_user_p) = { "oscclk", "dout_clkcmu_peric0_ip" }; +PNAME(mout_peric0_usi_p) = { "oscclk", "mout_peric0_ip_user" }; + +static const struct samsung_mux_clock peric0_mux_clks[] __initconst = { + MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user", + mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER, 4, 1), + MUX(CLK_MOUT_PERIC0_IP_USER, "mout_peric0_ip_user", + mout_peric0_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_IP_USER, 4, 1), + /* USI00 ~ USI05 */ + MUX(CLK_MOUT_PERIC0_USI00_USI, "mout_peric0_usi00_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI00_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI01_USI, "mout_peric0_usi01_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI01_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI02_USI, "mout_peric0_usi02_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI02_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI03_USI, "mout_peric0_usi03_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI03_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI04_USI, "mout_peric0_usi04_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI04_USI, 0, 1), + MUX(CLK_MOUT_PERIC0_USI05_USI, "mout_peric0_usi05_usi", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI05_USI, 0, 1), + /* USI_I2C */ + MUX(CLK_MOUT_PERIC0_USI_I2C, "mout_peric0_usi_i2c", + mout_peric0_usi_p, CLK_CON_MUX_MUX_CLK_PERIC0_USI_I2C, 0, 1), +}; + +static const struct samsung_div_clock peric0_div_clks[] __initconst = { + /* USI00 ~ USI05 */ + DIV(CLK_DOUT_PERIC0_USI00_USI, "dout_peric0_usi00_usi", + "mout_peric0_usi00_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI01_USI, "dout_peric0_usi01_usi", + "mout_peric0_usi01_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI02_USI, "dout_peric0_usi02_usi", + "mout_peric0_usi02_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI03_USI, "dout_peric0_usi03_usi", + "mout_peric0_usi03_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI04_USI, "dout_peric0_usi04_usi", + "mout_peric0_usi04_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI, + 0, 4), + DIV(CLK_DOUT_PERIC0_USI05_USI, "dout_peric0_usi05_usi", + "mout_peric0_usi05_usi", CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI, + 0, 4), + /* USI_I2C */ + DIV(CLK_DOUT_PERIC0_USI_I2C, "dout_peric0_usi_i2c", + "mout_peric0_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C, 0, 4), +}; + +static const struct samsung_gate_clock peric0_gate_clks[] __initconst = { + /* IPCLK */ + GATE(CLK_GOUT_PERIC0_IPCLK_0, "gout_peric0_ipclk_0", + "dout_peric0_usi00_usi", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_1, "gout_peric0_ipclk_1", + "dout_peric0_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_2, "gout_peric0_ipclk_2", + "dout_peric0_usi01_usi", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_3, "gout_peric0_ipclk_3", + "dout_peric0_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_4, "gout_peric0_ipclk_4", + "dout_peric0_usi02_usi", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_5, "gout_peric0_ipclk_5", + "dout_peric0_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_6, "gout_peric0_ipclk_6", + "dout_peric0_usi03_usi", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_7, "gout_peric0_ipclk_7", + "dout_peric0_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_8, "gout_peric0_ipclk_8", + "dout_peric0_usi04_usi", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_9, "gout_peric0_ipclk_9", + "dout_peric0_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_10, "gout_peric0_ipclk_10", + "dout_peric0_usi05_usi", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_IPCLK_11, "gout_peric0_ipclk_11", + "dout_peric0_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11, + 21, 0, 0), + + /* PCLK */ + GATE(CLK_GOUT_PERIC0_PCLK_0, "gout_peric0_pclk_0", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_0, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_2, "gout_peric0_pclk_2", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_2, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_3, "gout_peric0_pclk_3", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_3, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_4, "gout_peric0_pclk_4", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_5, "gout_peric0_pclk_5", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_6, "gout_peric0_pclk_6", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_7, "gout_peric0_pclk_7", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_8, "gout_peric0_pclk_8", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_9, "gout_peric0_pclk_9", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_10, "gout_peric0_pclk_10", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10, + 21, 0, 0), + GATE(CLK_GOUT_PERIC0_PCLK_11, "gout_peric0_pclk_11", + "mout_peric0_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11, + 21, 0, 0), +}; + +static const struct samsung_cmu_info peric0_cmu_info __initconst = { + .mux_clks = peric0_mux_clks, + .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks), + .div_clks = peric0_div_clks, + .nr_div_clks = ARRAY_SIZE(peric0_div_clks), + .gate_clks = peric0_gate_clks, + .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks), + .nr_clk_ids = PERIC0_NR_CLK, + .clk_regs = peric0_clk_regs, + .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs), + .clk_name = "dout_clkcmu_peric0_bus", +}; + +/* ---- CMU_PERIC1 --------------------------------------------------------- */ + +/* Register Offset definitions for CMU_PERIC1 (0x10800000) */ +#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0600 +#define PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER 0x0610 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI06_USI 0x1000 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI07_USI 0x1004 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI08_USI 0x1008 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI 0x100c +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI 0x1010 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI 0x1014 +#define CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C 0x1018 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI 0x1800 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI 0x1804 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI 0x1808 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI 0x180c +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1810 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x1814 +#define CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C 0x1818 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_0 0x2014 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1 0x2018 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2 0x2024 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3 0x2028 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4 0x202c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5 0x2030 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6 0x2034 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_7 0x2038 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8 0x203c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_9 0x2040 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10 0x201c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11 0x2020 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0 0x2044 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1 0x2048 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2 0x2058 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3 0x205c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x2060 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7 0x206c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5 0x2064 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6 0x2068 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8 0x2070 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9 0x2074 +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10 0x204c +#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11 0x2050 + +static const unsigned long peric1_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER, + PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER, + CLK_CON_MUX_MUX_CLK_PERIC1_USI06_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI07_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI08_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI, + CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C, + CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, + CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_0, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_7, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_9, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_1, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10, + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11, +}; + +/* List of parent clocks for Muxes in CMU_PERIC1 */ +PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_clkcmu_peric1_bus" }; +PNAME(mout_peric1_ip_user_p) = { "oscclk", "dout_clkcmu_peric1_ip" }; +PNAME(mout_peric1_usi_p) = { "oscclk", "mout_peric1_ip_user" }; + +static const struct samsung_mux_clock peric1_mux_clks[] __initconst = { + MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user", + mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER, 4, 1), + MUX(CLK_MOUT_PERIC1_IP_USER, "mout_peric1_ip_user", + mout_peric1_ip_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_IP_USER, 4, 1), + /* USI06 ~ USI11 */ + MUX(CLK_MOUT_PERIC1_USI06_USI, "mout_peric1_usi06_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI06_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI07_USI, "mout_peric1_usi07_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI07_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI08_USI, "mout_peric1_usi08_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI08_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI09_USI, "mout_peric1_usi09_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI09_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI10_USI, "mout_peric1_usi10_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI10_USI, 0, 1), + MUX(CLK_MOUT_PERIC1_USI11_USI, "mout_peric1_usi11_usi", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI11_USI, 0, 1), + /* USI_I2C */ + MUX(CLK_MOUT_PERIC1_USI_I2C, "mout_peric1_usi_i2c", + mout_peric1_usi_p, CLK_CON_MUX_MUX_CLK_PERIC1_USI_I2C, 0, 1), +}; + +static const struct samsung_div_clock peric1_div_clks[] __initconst = { + /* USI06 ~ USI11 */ + DIV(CLK_DOUT_PERIC1_USI06_USI, "dout_peric1_usi06_usi", + "mout_peric1_usi06_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI07_USI, "dout_peric1_usi07_usi", + "mout_peric1_usi07_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI08_USI, "dout_peric1_usi08_usi", + "mout_peric1_usi08_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI09_USI, "dout_peric1_usi09_usi", + "mout_peric1_usi09_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI10_USI, "dout_peric1_usi10_usi", + "mout_peric1_usi10_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, + 0, 4), + DIV(CLK_DOUT_PERIC1_USI11_USI, "dout_peric1_usi11_usi", + "mout_peric1_usi11_usi", CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, + 0, 4), + /* USI_I2C */ + DIV(CLK_DOUT_PERIC1_USI_I2C, "dout_peric1_usi_i2c", + "mout_peric1_usi_i2c", CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C, 0, 4), +}; + +static const struct samsung_gate_clock peric1_gate_clks[] __initconst = { + /* IPCLK */ + GATE(CLK_GOUT_PERIC1_IPCLK_0, "gout_peric1_ipclk_0", + "dout_peric1_usi06_usi", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_0, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_1, "gout_peric1_ipclk_1", + "dout_peric1_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_2, "gout_peric1_ipclk_2", + "dout_peric1_usi07_usi", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_3, "gout_peric1_ipclk_3", + "dout_peric1_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_4, "gout_peric1_ipclk_4", + "dout_peric1_usi08_usi", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_5, "gout_peric1_ipclk_5", + "dout_peric1_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_6, "gout_peric1_ipclk_6", + "dout_peric1_usi09_usi", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_7, "gout_peric1_ipclk_7", + "dout_peric1_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_7, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_8, "gout_peric1_ipclk_8", + "dout_peric1_usi10_usi", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_9, "gout_peric1_ipclk_9", + "dout_peric1_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_9, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_10, "gout_peric1_ipclk_10", + "dout_peric1_usi11_usi", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_IPCLK_11, "gout_peric1_ipclk_11", + "dout_peric1_usi_i2c", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11, + 21, 0, 0), + + /* PCLK */ + GATE(CLK_GOUT_PERIC1_PCLK_0, "gout_peric1_pclk_0", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_0, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_2, "gout_peric1_pclk_2", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_2, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_3, "gout_peric1_pclk_3", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_3, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_4, "gout_peric1_pclk_4", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_5, "gout_peric1_pclk_5", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_5, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_6, "gout_peric1_pclk_6", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_6, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_7, "gout_peric1_pclk_7", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_7, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_8, "gout_peric1_pclk_8", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_8, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_9, "gout_peric1_pclk_9", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_9, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_10, "gout_peric1_pclk_10", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10, + 21, 0, 0), + GATE(CLK_GOUT_PERIC1_PCLK_11, "gout_peric1_pclk_11", + "mout_peric1_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11, + 21, 0, 0), +}; + +static const struct samsung_cmu_info peric1_cmu_info __initconst = { + .mux_clks = peric1_mux_clks, + .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks), + .div_clks = peric1_div_clks, + .nr_div_clks = ARRAY_SIZE(peric1_div_clks), + .gate_clks = peric1_gate_clks, + .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks), + .nr_clk_ids = PERIC1_NR_CLK, + .clk_regs = peric1_clk_regs, + .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs), + .clk_name = "dout_clkcmu_peric1_bus", +}; + +/* ---- CMU_PERIS ---------------------------------------------------------- */ + +/* Register Offset definitions for CMU_PERIS (0x10020000) */ +#define PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER 0x0600 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK 0x2058 +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK 0x205c +#define CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK 0x2060 + +static const unsigned long peris_clk_regs[] __initconst = { + PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK, + CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK, +}; + +/* List of parent clocks for Muxes in CMU_PERIS */ +PNAME(mout_peris_bus_user_p) = { "oscclk", "dout_clkcmu_peris_bus" }; + +static const struct samsung_mux_clock peris_mux_clks[] __initconst = { + MUX(CLK_MOUT_PERIS_BUS_USER, "mout_peris_bus_user", + mout_peris_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIS_BUS_USER, 4, 1), +}; + +static const struct samsung_gate_clock peris_gate_clks[] __initconst = { + GATE(CLK_GOUT_SYSREG_PERIS_PCLK, "gout_sysreg_peris_pclk", + "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_SYSREG_PERIS_IPCLKPORT_PCLK, + 21, CLK_IGNORE_UNUSED, 0), + GATE(CLK_GOUT_WDT_CLUSTER0, "gout_wdt_cluster0", "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER0_IPCLKPORT_PCLK, + 21, 0, 0), + GATE(CLK_GOUT_WDT_CLUSTER1, "gout_wdt_cluster1", "mout_peris_bus_user", + CLK_CON_GAT_GOUT_BLK_PERIS_UID_WDT_CLUSTER1_IPCLKPORT_PCLK, + 21, 0, 0), +}; + +static const struct samsung_cmu_info peris_cmu_info __initconst = { + .mux_clks = peris_mux_clks, + .nr_mux_clks = ARRAY_SIZE(peris_mux_clks), + .gate_clks = peris_gate_clks, + .nr_gate_clks = ARRAY_SIZE(peris_gate_clks), + .nr_clk_ids = PERIS_NR_CLK, + .clk_regs = peris_clk_regs, + .nr_clk_regs = ARRAY_SIZE(peris_clk_regs), + .clk_name = "dout_clkcmu_peris_bus", +}; + +static int __init exynosautov9_cmu_probe(struct platform_device *pdev) +{ + const struct samsung_cmu_info *info; + struct device *dev = &pdev->dev; + + info = of_device_get_match_data(dev); + exynos_arm64_register_cmu(dev, dev->of_node, info); + + return 0; +} + +static const struct of_device_id exynosautov9_cmu_of_match[] = { + { + .compatible = "samsung,exynosautov9-cmu-busmc", + .data = &busmc_cmu_info, + }, { + .compatible = "samsung,exynosautov9-cmu-core", + .data = &core_cmu_info, + }, { + .compatible = "samsung,exynosautov9-cmu-fsys2", + .data = &fsys2_cmu_info, + }, { + .compatible = "samsung,exynosautov9-cmu-peric0", + .data = &peric0_cmu_info, + }, { + .compatible = "samsung,exynosautov9-cmu-peric1", + .data = &peric1_cmu_info, + }, { + .compatible = "samsung,exynosautov9-cmu-peris", + .data = &peris_cmu_info, + }, { + }, +}; + +static struct platform_driver exynosautov9_cmu_driver __refdata = { + .driver = { + .name = "exynosautov9-cmu", + .of_match_table = exynosautov9_cmu_of_match, + .suppress_bind_attrs = true, + }, + .probe = exynosautov9_cmu_probe, +}; + +static int __init exynosautov9_cmu_init(void) +{ + return platform_driver_register(&exynosautov9_cmu_driver); +} +core_initcall(exynosautov9_cmu_init); diff --git a/drivers/clk/stm32/Makefile b/drivers/clk/stm32/Makefile new file mode 100644 index 000000000000..95bd2230bba0 --- /dev/null +++ b/drivers/clk/stm32/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_COMMON_CLK_STM32MP135) += clk-stm32mp13.o clk-stm32-core.o reset-stm32.o diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c new file mode 100644 index 000000000000..45a279e73779 --- /dev/null +++ b/drivers/clk/stm32/clk-stm32-core.c @@ -0,0 +1,695 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2022 - All Rights Reserved + * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "clk-stm32-core.h" +#include "reset-stm32.h" + +static DEFINE_SPINLOCK(rlock); + +static int stm32_rcc_clock_init(struct device *dev, + const struct of_device_id *match, + void __iomem *base) +{ + const struct stm32_rcc_match_data *data = match->data; + struct clk_hw_onecell_data *clk_data = data->hw_clks; + struct device_node *np = dev_of_node(dev); + struct clk_hw **hws; + int n, max_binding; + + max_binding = data->maxbinding; + + clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, max_binding), GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + + clk_data->num = max_binding; + + hws = clk_data->hws; + + for (n = 0; n < max_binding; n++) + hws[n] = ERR_PTR(-ENOENT); + + for (n = 0; n < data->num_clocks; n++) { + const struct clock_config *cfg_clock = &data->tab_clocks[n]; + struct clk_hw *hw = ERR_PTR(-ENOENT); + + if (data->check_security && + data->check_security(base, cfg_clock)) + continue; + + if (cfg_clock->func) + hw = (*cfg_clock->func)(dev, data, base, &rlock, + cfg_clock); + + if (IS_ERR(hw)) { + dev_err(dev, "Can't register clk %d: %ld\n", n, + PTR_ERR(hw)); + return PTR_ERR(hw); + } + + if (cfg_clock->id != NO_ID) + hws[cfg_clock->id] = hw; + } + + return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); +} + +int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data, + void __iomem *base) +{ + const struct of_device_id *match; + int err; + + match = of_match_node(match_data, dev_of_node(dev)); + if (!match) { + dev_err(dev, "match data not found\n"); + return -ENODEV; + } + + /* RCC Reset Configuration */ + err = stm32_rcc_reset_init(dev, match, base); + if (err) { + pr_err("stm32 reset failed to initialize\n"); + return err; + } + + /* RCC Clock Configuration */ + err = stm32_rcc_clock_init(dev, match, base); + if (err) { + pr_err("stm32 clock failed to initialize\n"); + return err; + } + + return 0; +} + +static u8 stm32_mux_get_parent(void __iomem *base, + struct clk_stm32_clock_data *data, + u16 mux_id) +{ + const struct stm32_mux_cfg *mux = &data->muxes[mux_id]; + u32 mask = BIT(mux->width) - 1; + u32 val; + + val = readl(base + mux->offset) >> mux->shift; + val &= mask; + + return val; +} + +static int stm32_mux_set_parent(void __iomem *base, + struct clk_stm32_clock_data *data, + u16 mux_id, u8 index) +{ + const struct stm32_mux_cfg *mux = &data->muxes[mux_id]; + + u32 mask = BIT(mux->width) - 1; + u32 reg = readl(base + mux->offset); + u32 val = index << mux->shift; + + reg &= ~(mask << mux->shift); + reg |= val; + + writel(reg, base + mux->offset); + + return 0; +} + +static void stm32_gate_endisable(void __iomem *base, + struct clk_stm32_clock_data *data, + u16 gate_id, int enable) +{ + const struct stm32_gate_cfg *gate = &data->gates[gate_id]; + void __iomem *addr = base + gate->offset; + + if (enable) { + if (data->gate_cpt[gate_id]++ > 0) + return; + + if (gate->set_clr != 0) + writel(BIT(gate->bit_idx), addr); + else + writel(readl(addr) | BIT(gate->bit_idx), addr); + } else { + if (--data->gate_cpt[gate_id] > 0) + return; + + if (gate->set_clr != 0) + writel(BIT(gate->bit_idx), addr + gate->set_clr); + else + writel(readl(addr) & ~BIT(gate->bit_idx), addr); + } +} + +static void stm32_gate_disable_unused(void __iomem *base, + struct clk_stm32_clock_data *data, + u16 gate_id) +{ + const struct stm32_gate_cfg *gate = &data->gates[gate_id]; + void __iomem *addr = base + gate->offset; + + if (data->gate_cpt[gate_id] > 0) + return; + + if (gate->set_clr != 0) + writel(BIT(gate->bit_idx), addr + gate->set_clr); + else + writel(readl(addr) & ~BIT(gate->bit_idx), addr); +} + +static int stm32_gate_is_enabled(void __iomem *base, + struct clk_stm32_clock_data *data, + u16 gate_id) +{ + const struct stm32_gate_cfg *gate = &data->gates[gate_id]; + + return (readl(base + gate->offset) & BIT(gate->bit_idx)) != 0; +} + +static unsigned int _get_table_div(const struct clk_div_table *table, + unsigned int val) +{ + const struct clk_div_table *clkt; + + for (clkt = table; clkt->div; clkt++) + if (clkt->val == val) + return clkt->div; + return 0; +} + +static unsigned int _get_div(const struct clk_div_table *table, + unsigned int val, unsigned long flags, u8 width) +{ + if (flags & CLK_DIVIDER_ONE_BASED) + return val; + if (flags & CLK_DIVIDER_POWER_OF_TWO) + return 1 << val; + if (table) + return _get_table_div(table, val); + return val + 1; +} + +static unsigned long stm32_divider_get_rate(void __iomem *base, + struct clk_stm32_clock_data *data, + u16 div_id, + unsigned long parent_rate) +{ + const struct stm32_div_cfg *divider = &data->dividers[div_id]; + unsigned int val; + unsigned int div; + + val = readl(base + divider->offset) >> divider->shift; + val &= clk_div_mask(divider->width); + div = _get_div(divider->table, val, divider->flags, divider->width); + + if (!div) { + WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO), + "%d: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", + div_id); + return parent_rate; + } + + return DIV_ROUND_UP_ULL((u64)parent_rate, div); +} + +static int stm32_divider_set_rate(void __iomem *base, + struct clk_stm32_clock_data *data, + u16 div_id, unsigned long rate, + unsigned long parent_rate) +{ + const struct stm32_div_cfg *divider = &data->dividers[div_id]; + int value; + u32 val; + + value = divider_get_val(rate, parent_rate, divider->table, + divider->width, divider->flags); + if (value < 0) + return value; + + if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { + val = clk_div_mask(divider->width) << (divider->shift + 16); + } else { + val = readl(base + divider->offset); + val &= ~(clk_div_mask(divider->width) << divider->shift); + } + + val |= (u32)value << divider->shift; + + writel(val, base + divider->offset); + + return 0; +} + +static u8 clk_stm32_mux_get_parent(struct clk_hw *hw) +{ + struct clk_stm32_mux *mux = to_clk_stm32_mux(hw); + + return stm32_mux_get_parent(mux->base, mux->clock_data, mux->mux_id); +} + +static int clk_stm32_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_stm32_mux *mux = to_clk_stm32_mux(hw); + unsigned long flags = 0; + + spin_lock_irqsave(mux->lock, flags); + + stm32_mux_set_parent(mux->base, mux->clock_data, mux->mux_id, index); + + spin_unlock_irqrestore(mux->lock, flags); + + return 0; +} + +const struct clk_ops clk_stm32_mux_ops = { + .get_parent = clk_stm32_mux_get_parent, + .set_parent = clk_stm32_mux_set_parent, +}; + +static void clk_stm32_gate_endisable(struct clk_hw *hw, int enable) +{ + struct clk_stm32_gate *gate = to_clk_stm32_gate(hw); + unsigned long flags = 0; + + spin_lock_irqsave(gate->lock, flags); + + stm32_gate_endisable(gate->base, gate->clock_data, gate->gate_id, enable); + + spin_unlock_irqrestore(gate->lock, flags); +} + +static int clk_stm32_gate_enable(struct clk_hw *hw) +{ + clk_stm32_gate_endisable(hw, 1); + + return 0; +} + +static void clk_stm32_gate_disable(struct clk_hw *hw) +{ + clk_stm32_gate_endisable(hw, 0); +} + +static int clk_stm32_gate_is_enabled(struct clk_hw *hw) +{ + struct clk_stm32_gate *gate = to_clk_stm32_gate(hw); + + return stm32_gate_is_enabled(gate->base, gate->clock_data, gate->gate_id); +} + +static void clk_stm32_gate_disable_unused(struct clk_hw *hw) +{ + struct clk_stm32_gate *gate = to_clk_stm32_gate(hw); + unsigned long flags = 0; + + spin_lock_irqsave(gate->lock, flags); + + stm32_gate_disable_unused(gate->base, gate->clock_data, gate->gate_id); + + spin_unlock_irqrestore(gate->lock, flags); +} + +const struct clk_ops clk_stm32_gate_ops = { + .enable = clk_stm32_gate_enable, + .disable = clk_stm32_gate_disable, + .is_enabled = clk_stm32_gate_is_enabled, + .disable_unused = clk_stm32_gate_disable_unused, +}; + +static int clk_stm32_divider_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_stm32_div *div = to_clk_stm32_divider(hw); + unsigned long flags = 0; + int ret; + + if (div->div_id == NO_STM32_DIV) + return rate; + + spin_lock_irqsave(div->lock, flags); + + ret = stm32_divider_set_rate(div->base, div->clock_data, div->div_id, rate, parent_rate); + + spin_unlock_irqrestore(div->lock, flags); + + return ret; +} + +static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_stm32_div *div = to_clk_stm32_divider(hw); + const struct stm32_div_cfg *divider; + + if (div->div_id == NO_STM32_DIV) + return rate; + + divider = &div->clock_data->dividers[div->div_id]; + + /* if read only, just return current value */ + if (divider->flags & CLK_DIVIDER_READ_ONLY) { + u32 val; + + val = readl(div->base + divider->offset) >> divider->shift; + val &= clk_div_mask(divider->width); + + return divider_ro_round_rate(hw, rate, prate, divider->table, + divider->width, divider->flags, + val); + } + + return divider_round_rate_parent(hw, clk_hw_get_parent(hw), + rate, prate, divider->table, + divider->width, divider->flags); +} + +static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_stm32_div *div = to_clk_stm32_divider(hw); + + if (div->div_id == NO_STM32_DIV) + return parent_rate; + + return stm32_divider_get_rate(div->base, div->clock_data, div->div_id, parent_rate); +} + +const struct clk_ops clk_stm32_divider_ops = { + .recalc_rate = clk_stm32_divider_recalc_rate, + .round_rate = clk_stm32_divider_round_rate, + .set_rate = clk_stm32_divider_set_rate, +}; + +static int clk_stm32_composite_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + unsigned long flags = 0; + int ret; + + if (composite->div_id == NO_STM32_DIV) + return rate; + + spin_lock_irqsave(composite->lock, flags); + + ret = stm32_divider_set_rate(composite->base, composite->clock_data, + composite->div_id, rate, parent_rate); + + spin_unlock_irqrestore(composite->lock, flags); + + return ret; +} + +static unsigned long clk_stm32_composite_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + + if (composite->div_id == NO_STM32_DIV) + return parent_rate; + + return stm32_divider_get_rate(composite->base, composite->clock_data, + composite->div_id, parent_rate); +} + +static long clk_stm32_composite_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + + const struct stm32_div_cfg *divider; + + if (composite->div_id == NO_STM32_DIV) + return rate; + + divider = &composite->clock_data->dividers[composite->div_id]; + + /* if read only, just return current value */ + if (divider->flags & CLK_DIVIDER_READ_ONLY) { + u32 val; + + val = readl(composite->base + divider->offset) >> divider->shift; + val &= clk_div_mask(divider->width); + + return divider_ro_round_rate(hw, rate, prate, divider->table, + divider->width, divider->flags, + val); + } + + return divider_round_rate_parent(hw, clk_hw_get_parent(hw), + rate, prate, divider->table, + divider->width, divider->flags); +} + +static u8 clk_stm32_composite_get_parent(struct clk_hw *hw) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + + return stm32_mux_get_parent(composite->base, composite->clock_data, composite->mux_id); +} + +static int clk_stm32_composite_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + unsigned long flags = 0; + + spin_lock_irqsave(composite->lock, flags); + + stm32_mux_set_parent(composite->base, composite->clock_data, composite->mux_id, index); + + spin_unlock_irqrestore(composite->lock, flags); + + if (composite->clock_data->is_multi_mux) { + struct clk_hw *other_mux_hw = composite->clock_data->is_multi_mux(hw); + + if (other_mux_hw) { + struct clk_hw *hwp = clk_hw_get_parent_by_index(hw, index); + + clk_hw_reparent(other_mux_hw, hwp); + } + } + + return 0; +} + +static int clk_stm32_composite_is_enabled(struct clk_hw *hw) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + + if (composite->gate_id == NO_STM32_GATE) + return (__clk_get_enable_count(hw->clk) > 0); + + return stm32_gate_is_enabled(composite->base, composite->clock_data, composite->gate_id); +} + +#define MUX_SAFE_POSITION 0 + +static int clk_stm32_has_safe_mux(struct clk_hw *hw) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + const struct stm32_mux_cfg *mux = &composite->clock_data->muxes[composite->mux_id]; + + return !!(mux->flags & MUX_SAFE); +} + +static void clk_stm32_set_safe_position_mux(struct clk_hw *hw) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + + if (!clk_stm32_composite_is_enabled(hw)) { + unsigned long flags = 0; + + if (composite->clock_data->is_multi_mux) { + struct clk_hw *other_mux_hw = NULL; + + other_mux_hw = composite->clock_data->is_multi_mux(hw); + + if (!other_mux_hw || clk_stm32_composite_is_enabled(other_mux_hw)) + return; + } + + spin_lock_irqsave(composite->lock, flags); + + stm32_mux_set_parent(composite->base, composite->clock_data, + composite->mux_id, MUX_SAFE_POSITION); + + spin_unlock_irqrestore(composite->lock, flags); + } +} + +static void clk_stm32_safe_restore_position_mux(struct clk_hw *hw) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + int sel = clk_hw_get_parent_index(hw); + unsigned long flags = 0; + + spin_lock_irqsave(composite->lock, flags); + + stm32_mux_set_parent(composite->base, composite->clock_data, composite->mux_id, sel); + + spin_unlock_irqrestore(composite->lock, flags); +} + +static void clk_stm32_composite_gate_endisable(struct clk_hw *hw, int enable) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + unsigned long flags = 0; + + spin_lock_irqsave(composite->lock, flags); + + stm32_gate_endisable(composite->base, composite->clock_data, composite->gate_id, enable); + + spin_unlock_irqrestore(composite->lock, flags); +} + +static int clk_stm32_composite_gate_enable(struct clk_hw *hw) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + + if (composite->gate_id == NO_STM32_GATE) + return 0; + + clk_stm32_composite_gate_endisable(hw, 1); + + if (composite->mux_id != NO_STM32_MUX && clk_stm32_has_safe_mux(hw)) + clk_stm32_safe_restore_position_mux(hw); + + return 0; +} + +static void clk_stm32_composite_gate_disable(struct clk_hw *hw) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + + if (composite->gate_id == NO_STM32_GATE) + return; + + clk_stm32_composite_gate_endisable(hw, 0); + + if (composite->mux_id != NO_STM32_MUX && clk_stm32_has_safe_mux(hw)) + clk_stm32_set_safe_position_mux(hw); +} + +static void clk_stm32_composite_disable_unused(struct clk_hw *hw) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + unsigned long flags = 0; + + if (composite->gate_id == NO_STM32_GATE) + return; + + spin_lock_irqsave(composite->lock, flags); + + stm32_gate_disable_unused(composite->base, composite->clock_data, composite->gate_id); + + spin_unlock_irqrestore(composite->lock, flags); +} + +const struct clk_ops clk_stm32_composite_ops = { + .set_rate = clk_stm32_composite_set_rate, + .recalc_rate = clk_stm32_composite_recalc_rate, + .round_rate = clk_stm32_composite_round_rate, + .get_parent = clk_stm32_composite_get_parent, + .set_parent = clk_stm32_composite_set_parent, + .enable = clk_stm32_composite_gate_enable, + .disable = clk_stm32_composite_gate_disable, + .is_enabled = clk_stm32_composite_is_enabled, + .disable_unused = clk_stm32_composite_disable_unused, +}; + +struct clk_hw *clk_stm32_mux_register(struct device *dev, + const struct stm32_rcc_match_data *data, + void __iomem *base, + spinlock_t *lock, + const struct clock_config *cfg) +{ + struct clk_stm32_mux *mux = cfg->clock_cfg; + struct clk_hw *hw = &mux->hw; + int err; + + mux->base = base; + mux->lock = lock; + mux->clock_data = data->clock_data; + + err = clk_hw_register(dev, hw); + if (err) + return ERR_PTR(err); + + return hw; +} + +struct clk_hw *clk_stm32_gate_register(struct device *dev, + const struct stm32_rcc_match_data *data, + void __iomem *base, + spinlock_t *lock, + const struct clock_config *cfg) +{ + struct clk_stm32_gate *gate = cfg->clock_cfg; + struct clk_hw *hw = &gate->hw; + int err; + + gate->base = base; + gate->lock = lock; + gate->clock_data = data->clock_data; + + err = clk_hw_register(dev, hw); + if (err) + return ERR_PTR(err); + + return hw; +} + +struct clk_hw *clk_stm32_div_register(struct device *dev, + const struct stm32_rcc_match_data *data, + void __iomem *base, + spinlock_t *lock, + const struct clock_config *cfg) +{ + struct clk_stm32_div *div = cfg->clock_cfg; + struct clk_hw *hw = &div->hw; + int err; + + div->base = base; + div->lock = lock; + div->clock_data = data->clock_data; + + err = clk_hw_register(dev, hw); + if (err) + return ERR_PTR(err); + + return hw; +} + +struct clk_hw *clk_stm32_composite_register(struct device *dev, + const struct stm32_rcc_match_data *data, + void __iomem *base, + spinlock_t *lock, + const struct clock_config *cfg) +{ + struct clk_stm32_composite *composite = cfg->clock_cfg; + struct clk_hw *hw = &composite->hw; + int err; + + composite->base = base; + composite->lock = lock; + composite->clock_data = data->clock_data; + + err = clk_hw_register(dev, hw); + if (err) + return ERR_PTR(err); + + return hw; +} diff --git a/drivers/clk/stm32/clk-stm32-core.h b/drivers/clk/stm32/clk-stm32-core.h new file mode 100644 index 000000000000..76cffda02308 --- /dev/null +++ b/drivers/clk/stm32/clk-stm32-core.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) STMicroelectronics 2022 - All Rights Reserved + * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics. + */ + +#include <linux/clk-provider.h> + +struct stm32_rcc_match_data; + +struct stm32_mux_cfg { + u16 offset; + u8 shift; + u8 width; + u8 flags; + u32 *table; + u8 ready; +}; + +struct stm32_gate_cfg { + u16 offset; + u8 bit_idx; + u8 set_clr; +}; + +struct stm32_div_cfg { + u16 offset; + u8 shift; + u8 width; + u8 flags; + u8 ready; + const struct clk_div_table *table; +}; + +struct stm32_composite_cfg { + int mux; + int gate; + int div; +}; + +#define NO_ID 0xFFFFFFFF + +#define NO_STM32_MUX 0xFFFF +#define NO_STM32_DIV 0xFFFF +#define NO_STM32_GATE 0xFFFF + +struct clock_config { + unsigned long id; + int sec_id; + void *clock_cfg; + + struct clk_hw *(*func)(struct device *dev, + const struct stm32_rcc_match_data *data, + void __iomem *base, + spinlock_t *lock, + const struct clock_config *cfg); +}; + +struct clk_stm32_clock_data { + u16 *gate_cpt; + const struct stm32_gate_cfg *gates; + const struct stm32_mux_cfg *muxes; + const struct stm32_div_cfg *dividers; + struct clk_hw *(*is_multi_mux)(struct clk_hw *hw); +}; + +struct stm32_rcc_match_data { + struct clk_hw_onecell_data *hw_clks; + unsigned int num_clocks; + const struct clock_config *tab_clocks; + unsigned int maxbinding; + struct clk_stm32_clock_data *clock_data; + u32 clear_offset; + int (*check_security)(void __iomem *base, + const struct clock_config *cfg); + int (*multi_mux)(void __iomem *base, const struct clock_config *cfg); +}; + +int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match, + void __iomem *base); + +int stm32_rcc_init(struct device *dev, const struct of_device_id *match_data, + void __iomem *base); + +/* MUX define */ +#define MUX_NO_RDY 0xFF +#define MUX_SAFE BIT(7) + +/* DIV define */ +#define DIV_NO_RDY 0xFF + +/* Definition of clock structure */ +struct clk_stm32_mux { + u16 mux_id; + struct clk_hw hw; + void __iomem *base; + struct clk_stm32_clock_data *clock_data; + spinlock_t *lock; /* spin lock */ +}; + +#define to_clk_stm32_mux(_hw) container_of(_hw, struct clk_stm32_mux, hw) + +struct clk_stm32_gate { + u16 gate_id; + struct clk_hw hw; + void __iomem *base; + struct clk_stm32_clock_data *clock_data; + spinlock_t *lock; /* spin lock */ +}; + +#define to_clk_stm32_gate(_hw) container_of(_hw, struct clk_stm32_gate, hw) + +struct clk_stm32_div { + u16 div_id; + struct clk_hw hw; + void __iomem *base; + struct clk_stm32_clock_data *clock_data; + spinlock_t *lock; /* spin lock */ +}; + +#define to_clk_stm32_divider(_hw) container_of(_hw, struct clk_stm32_div, hw) + +struct clk_stm32_composite { + u16 gate_id; + u16 mux_id; + u16 div_id; + struct clk_hw hw; + void __iomem *base; + struct clk_stm32_clock_data *clock_data; + spinlock_t *lock; /* spin lock */ +}; + +#define to_clk_stm32_composite(_hw) container_of(_hw, struct clk_stm32_composite, hw) + +/* Clock operators */ +extern const struct clk_ops clk_stm32_mux_ops; +extern const struct clk_ops clk_stm32_gate_ops; +extern const struct clk_ops clk_stm32_divider_ops; +extern const struct clk_ops clk_stm32_composite_ops; + +/* Clock registering */ +struct clk_hw *clk_stm32_mux_register(struct device *dev, + const struct stm32_rcc_match_data *data, + void __iomem *base, + spinlock_t *lock, + const struct clock_config *cfg); + +struct clk_hw *clk_stm32_gate_register(struct device *dev, + const struct stm32_rcc_match_data *data, + void __iomem *base, + spinlock_t *lock, + const struct clock_config *cfg); + +struct clk_hw *clk_stm32_div_register(struct device *dev, + const struct stm32_rcc_match_data *data, + void __iomem *base, + spinlock_t *lock, + const struct clock_config *cfg); + +struct clk_hw *clk_stm32_composite_register(struct device *dev, + const struct stm32_rcc_match_data *data, + void __iomem *base, + spinlock_t *lock, + const struct clock_config *cfg); + +#define STM32_CLOCK_CFG(_binding, _clk, _sec_id, _struct, _register)\ +{\ + .id = (_binding),\ + .sec_id = (_sec_id),\ + .clock_cfg = (_struct) {_clk},\ + .func = (_register),\ +} + +#define STM32_MUX_CFG(_binding, _clk, _sec_id)\ + STM32_CLOCK_CFG(_binding, &(_clk), _sec_id, struct clk_stm32_mux *,\ + &clk_stm32_mux_register) + +#define STM32_GATE_CFG(_binding, _clk, _sec_id)\ + STM32_CLOCK_CFG(_binding, &(_clk), _sec_id, struct clk_stm32_gate *,\ + &clk_stm32_gate_register) + +#define STM32_DIV_CFG(_binding, _clk, _sec_id)\ + STM32_CLOCK_CFG(_binding, &(_clk), _sec_id, struct clk_stm32_div *,\ + &clk_stm32_div_register) + +#define STM32_COMPOSITE_CFG(_binding, _clk, _sec_id)\ + STM32_CLOCK_CFG(_binding, &(_clk), _sec_id, struct clk_stm32_composite *,\ + &clk_stm32_composite_register) diff --git a/drivers/clk/stm32/clk-stm32mp13.c b/drivers/clk/stm32/clk-stm32mp13.c new file mode 100644 index 000000000000..1192eee8abe4 --- /dev/null +++ b/drivers/clk/stm32/clk-stm32mp13.c @@ -0,0 +1,1620 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2022 - All Rights Reserved + * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics. + */ + +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/stm32mp13-clks.h> +#include "clk-stm32-core.h" +#include "stm32mp13_rcc.h" + +#define RCC_CLR_OFFSET 0x4 + +/* STM32 Gates definition */ +enum enum_gate_cfg { + GATE_MCO1, + GATE_MCO2, + GATE_DBGCK, + GATE_TRACECK, + GATE_DDRC1, + GATE_DDRC1LP, + GATE_DDRPHYC, + GATE_DDRPHYCLP, + GATE_DDRCAPB, + GATE_DDRCAPBLP, + GATE_AXIDCG, + GATE_DDRPHYCAPB, + GATE_DDRPHYCAPBLP, + GATE_TIM2, + GATE_TIM3, + GATE_TIM4, + GATE_TIM5, + GATE_TIM6, + GATE_TIM7, + GATE_LPTIM1, + GATE_SPI2, + GATE_SPI3, + GATE_USART3, + GATE_UART4, + GATE_UART5, + GATE_UART7, + GATE_UART8, + GATE_I2C1, + GATE_I2C2, + GATE_SPDIF, + GATE_TIM1, + GATE_TIM8, + GATE_SPI1, + GATE_USART6, + GATE_SAI1, + GATE_SAI2, + GATE_DFSDM, + GATE_ADFSDM, + GATE_FDCAN, + GATE_LPTIM2, + GATE_LPTIM3, + GATE_LPTIM4, + GATE_LPTIM5, + GATE_VREF, + GATE_DTS, + GATE_PMBCTRL, + GATE_HDP, + GATE_SYSCFG, + GATE_DCMIPP, + GATE_DDRPERFM, + GATE_IWDG2APB, + GATE_USBPHY, + GATE_STGENRO, + GATE_LTDC, + GATE_RTCAPB, + GATE_TZC, + GATE_ETZPC, + GATE_IWDG1APB, + GATE_BSEC, + GATE_STGENC, + GATE_USART1, + GATE_USART2, + GATE_SPI4, + GATE_SPI5, + GATE_I2C3, + GATE_I2C4, + GATE_I2C5, + GATE_TIM12, + GATE_TIM13, + GATE_TIM14, + GATE_TIM15, + GATE_TIM16, + GATE_TIM17, + GATE_DMA1, + GATE_DMA2, + GATE_DMAMUX1, + GATE_DMA3, + GATE_DMAMUX2, + GATE_ADC1, + GATE_ADC2, + GATE_USBO, + GATE_TSC, + GATE_GPIOA, + GATE_GPIOB, + GATE_GPIOC, + GATE_GPIOD, + GATE_GPIOE, + GATE_GPIOF, + GATE_GPIOG, + GATE_GPIOH, + GATE_GPIOI, + GATE_PKA, + GATE_SAES, + GATE_CRYP1, + GATE_HASH1, + GATE_RNG1, + GATE_BKPSRAM, + GATE_AXIMC, + GATE_MCE, + GATE_ETH1CK, + GATE_ETH1TX, + GATE_ETH1RX, + GATE_ETH1MAC, + GATE_FMC, + GATE_QSPI, + GATE_SDMMC1, + GATE_SDMMC2, + GATE_CRC1, + GATE_USBH, + GATE_ETH2CK, + GATE_ETH2TX, + GATE_ETH2RX, + GATE_ETH2MAC, + GATE_ETH1STP, + GATE_ETH2STP, + GATE_MDMA, + GATE_NB +}; + +#define _CFG_GATE(_id, _offset, _bit_idx, _offset_clr)\ + [(_id)] = {\ + .offset = (_offset),\ + .bit_idx = (_bit_idx),\ + .set_clr = (_offset_clr),\ + } + +#define CFG_GATE(_id, _offset, _bit_idx)\ + _CFG_GATE(_id, _offset, _bit_idx, 0) + +#define CFG_GATE_SETCLR(_id, _offset, _bit_idx)\ + _CFG_GATE(_id, _offset, _bit_idx, RCC_CLR_OFFSET) + +static struct stm32_gate_cfg stm32mp13_gates[] = { + CFG_GATE(GATE_MCO1, RCC_MCO1CFGR, 12), + CFG_GATE(GATE_MCO2, RCC_MCO2CFGR, 12), + CFG_GATE(GATE_DBGCK, RCC_DBGCFGR, 8), + CFG_GATE(GATE_TRACECK, RCC_DBGCFGR, 9), + CFG_GATE(GATE_DDRC1, RCC_DDRITFCR, 0), + CFG_GATE(GATE_DDRC1LP, RCC_DDRITFCR, 1), + CFG_GATE(GATE_DDRPHYC, RCC_DDRITFCR, 4), + CFG_GATE(GATE_DDRPHYCLP, RCC_DDRITFCR, 5), + CFG_GATE(GATE_DDRCAPB, RCC_DDRITFCR, 6), + CFG_GATE(GATE_DDRCAPBLP, RCC_DDRITFCR, 7), + CFG_GATE(GATE_AXIDCG, RCC_DDRITFCR, 8), + CFG_GATE(GATE_DDRPHYCAPB, RCC_DDRITFCR, 9), + CFG_GATE(GATE_DDRPHYCAPBLP, RCC_DDRITFCR, 10), + CFG_GATE_SETCLR(GATE_TIM2, RCC_MP_APB1ENSETR, 0), + CFG_GATE_SETCLR(GATE_TIM3, RCC_MP_APB1ENSETR, 1), + CFG_GATE_SETCLR(GATE_TIM4, RCC_MP_APB1ENSETR, 2), + CFG_GATE_SETCLR(GATE_TIM5, RCC_MP_APB1ENSETR, 3), + CFG_GATE_SETCLR(GATE_TIM6, RCC_MP_APB1ENSETR, 4), + CFG_GATE_SETCLR(GATE_TIM7, RCC_MP_APB1ENSETR, 5), + CFG_GATE_SETCLR(GATE_LPTIM1, RCC_MP_APB1ENSETR, 9), + CFG_GATE_SETCLR(GATE_SPI2, RCC_MP_APB1ENSETR, 11), + CFG_GATE_SETCLR(GATE_SPI3, RCC_MP_APB1ENSETR, 12), + CFG_GATE_SETCLR(GATE_USART3, RCC_MP_APB1ENSETR, 15), + CFG_GATE_SETCLR(GATE_UART4, RCC_MP_APB1ENSETR, 16), + CFG_GATE_SETCLR(GATE_UART5, RCC_MP_APB1ENSETR, 17), + CFG_GATE_SETCLR(GATE_UART7, RCC_MP_APB1ENSETR, 18), + CFG_GATE_SETCLR(GATE_UART8, RCC_MP_APB1ENSETR, 19), + CFG_GATE_SETCLR(GATE_I2C1, RCC_MP_APB1ENSETR, 21), + CFG_GATE_SETCLR(GATE_I2C2, RCC_MP_APB1ENSETR, 22), + CFG_GATE_SETCLR(GATE_SPDIF, RCC_MP_APB1ENSETR, 26), + CFG_GATE_SETCLR(GATE_TIM1, RCC_MP_APB2ENSETR, 0), + CFG_GATE_SETCLR(GATE_TIM8, RCC_MP_APB2ENSETR, 1), + CFG_GATE_SETCLR(GATE_SPI1, RCC_MP_APB2ENSETR, 8), + CFG_GATE_SETCLR(GATE_USART6, RCC_MP_APB2ENSETR, 13), + CFG_GATE_SETCLR(GATE_SAI1, RCC_MP_APB2ENSETR, 16), + CFG_GATE_SETCLR(GATE_SAI2, RCC_MP_APB2ENSETR, 17), + CFG_GATE_SETCLR(GATE_DFSDM, RCC_MP_APB2ENSETR, 20), + CFG_GATE_SETCLR(GATE_ADFSDM, RCC_MP_APB2ENSETR, 21), + CFG_GATE_SETCLR(GATE_FDCAN, RCC_MP_APB2ENSETR, 24), + CFG_GATE_SETCLR(GATE_LPTIM2, RCC_MP_APB3ENSETR, 0), + CFG_GATE_SETCLR(GATE_LPTIM3, RCC_MP_APB3ENSETR, 1), + CFG_GATE_SETCLR(GATE_LPTIM4, RCC_MP_APB3ENSETR, 2), + CFG_GATE_SETCLR(GATE_LPTIM5, RCC_MP_APB3ENSETR, 3), + CFG_GATE_SETCLR(GATE_VREF, RCC_MP_APB3ENSETR, 13), + CFG_GATE_SETCLR(GATE_DTS, RCC_MP_APB3ENSETR, 16), + CFG_GATE_SETCLR(GATE_PMBCTRL, RCC_MP_APB3ENSETR, 17), + CFG_GATE_SETCLR(GATE_HDP, RCC_MP_APB3ENSETR, 20), + CFG_GATE_SETCLR(GATE_SYSCFG, RCC_MP_NS_APB3ENSETR, 0), + CFG_GATE_SETCLR(GATE_DCMIPP, RCC_MP_APB4ENSETR, 1), + CFG_GATE_SETCLR(GATE_DDRPERFM, RCC_MP_APB4ENSETR, 8), + CFG_GATE_SETCLR(GATE_IWDG2APB, RCC_MP_APB4ENSETR, 15), + CFG_GATE_SETCLR(GATE_USBPHY, RCC_MP_APB4ENSETR, 16), + CFG_GATE_SETCLR(GATE_STGENRO, RCC_MP_APB4ENSETR, 20), + CFG_GATE_SETCLR(GATE_LTDC, RCC_MP_NS_APB4ENSETR, 0), + CFG_GATE_SETCLR(GATE_RTCAPB, RCC_MP_APB5ENSETR, 8), + CFG_GATE_SETCLR(GATE_TZC, RCC_MP_APB5ENSETR, 11), + CFG_GATE_SETCLR(GATE_ETZPC, RCC_MP_APB5ENSETR, 13), + CFG_GATE_SETCLR(GATE_IWDG1APB, RCC_MP_APB5ENSETR, 15), + CFG_GATE_SETCLR(GATE_BSEC, RCC_MP_APB5ENSETR, 16), + CFG_GATE_SETCLR(GATE_STGENC, RCC_MP_APB5ENSETR, 20), + CFG_GATE_SETCLR(GATE_USART1, RCC_MP_APB6ENSETR, 0), + CFG_GATE_SETCLR(GATE_USART2, RCC_MP_APB6ENSETR, 1), + CFG_GATE_SETCLR(GATE_SPI4, RCC_MP_APB6ENSETR, 2), + CFG_GATE_SETCLR(GATE_SPI5, RCC_MP_APB6ENSETR, 3), + CFG_GATE_SETCLR(GATE_I2C3, RCC_MP_APB6ENSETR, 4), + CFG_GATE_SETCLR(GATE_I2C4, RCC_MP_APB6ENSETR, 5), + CFG_GATE_SETCLR(GATE_I2C5, RCC_MP_APB6ENSETR, 6), + CFG_GATE_SETCLR(GATE_TIM12, RCC_MP_APB6ENSETR, 7), + CFG_GATE_SETCLR(GATE_TIM13, RCC_MP_APB6ENSETR, 8), + CFG_GATE_SETCLR(GATE_TIM14, RCC_MP_APB6ENSETR, 9), + CFG_GATE_SETCLR(GATE_TIM15, RCC_MP_APB6ENSETR, 10), + CFG_GATE_SETCLR(GATE_TIM16, RCC_MP_APB6ENSETR, 11), + CFG_GATE_SETCLR(GATE_TIM17, RCC_MP_APB6ENSETR, 12), + CFG_GATE_SETCLR(GATE_DMA1, RCC_MP_AHB2ENSETR, 0), + CFG_GATE_SETCLR(GATE_DMA2, RCC_MP_AHB2ENSETR, 1), + CFG_GATE_SETCLR(GATE_DMAMUX1, RCC_MP_AHB2ENSETR, 2), + CFG_GATE_SETCLR(GATE_DMA3, RCC_MP_AHB2ENSETR, 3), + CFG_GATE_SETCLR(GATE_DMAMUX2, RCC_MP_AHB2ENSETR, 4), + CFG_GATE_SETCLR(GATE_ADC1, RCC_MP_AHB2ENSETR, 5), + CFG_GATE_SETCLR(GATE_ADC2, RCC_MP_AHB2ENSETR, 6), + CFG_GATE_SETCLR(GATE_USBO, RCC_MP_AHB2ENSETR, 8), + CFG_GATE_SETCLR(GATE_TSC, RCC_MP_AHB4ENSETR, 15), + CFG_GATE_SETCLR(GATE_GPIOA, RCC_MP_NS_AHB4ENSETR, 0), + CFG_GATE_SETCLR(GATE_GPIOB, RCC_MP_NS_AHB4ENSETR, 1), + CFG_GATE_SETCLR(GATE_GPIOC, RCC_MP_NS_AHB4ENSETR, 2), + CFG_GATE_SETCLR(GATE_GPIOD, RCC_MP_NS_AHB4ENSETR, 3), + CFG_GATE_SETCLR(GATE_GPIOE, RCC_MP_NS_AHB4ENSETR, 4), + CFG_GATE_SETCLR(GATE_GPIOF, RCC_MP_NS_AHB4ENSETR, 5), + CFG_GATE_SETCLR(GATE_GPIOG, RCC_MP_NS_AHB4ENSETR, 6), + CFG_GATE_SETCLR(GATE_GPIOH, RCC_MP_NS_AHB4ENSETR, 7), + CFG_GATE_SETCLR(GATE_GPIOI, RCC_MP_NS_AHB4ENSETR, 8), + CFG_GATE_SETCLR(GATE_PKA, RCC_MP_AHB5ENSETR, 2), + CFG_GATE_SETCLR(GATE_SAES, RCC_MP_AHB5ENSETR, 3), + CFG_GATE_SETCLR(GATE_CRYP1, RCC_MP_AHB5ENSETR, 4), + CFG_GATE_SETCLR(GATE_HASH1, RCC_MP_AHB5ENSETR, 5), + CFG_GATE_SETCLR(GATE_RNG1, RCC_MP_AHB5ENSETR, 6), + CFG_GATE_SETCLR(GATE_BKPSRAM, RCC_MP_AHB5ENSETR, 8), + CFG_GATE_SETCLR(GATE_AXIMC, RCC_MP_AHB5ENSETR, 16), + CFG_GATE_SETCLR(GATE_MCE, RCC_MP_AHB6ENSETR, 1), + CFG_GATE_SETCLR(GATE_ETH1CK, RCC_MP_AHB6ENSETR, 7), + CFG_GATE_SETCLR(GATE_ETH1TX, RCC_MP_AHB6ENSETR, 8), + CFG_GATE_SETCLR(GATE_ETH1RX, RCC_MP_AHB6ENSETR, 9), + CFG_GATE_SETCLR(GATE_ETH1MAC, RCC_MP_AHB6ENSETR, 10), + CFG_GATE_SETCLR(GATE_FMC, RCC_MP_AHB6ENSETR, 12), + CFG_GATE_SETCLR(GATE_QSPI, RCC_MP_AHB6ENSETR, 14), + CFG_GATE_SETCLR(GATE_SDMMC1, RCC_MP_AHB6ENSETR, 16), + CFG_GATE_SETCLR(GATE_SDMMC2, RCC_MP_AHB6ENSETR, 17), + CFG_GATE_SETCLR(GATE_CRC1, RCC_MP_AHB6ENSETR, 20), + CFG_GATE_SETCLR(GATE_USBH, RCC_MP_AHB6ENSETR, 24), + CFG_GATE_SETCLR(GATE_ETH2CK, RCC_MP_AHB6ENSETR, 27), + CFG_GATE_SETCLR(GATE_ETH2TX, RCC_MP_AHB6ENSETR, 28), + CFG_GATE_SETCLR(GATE_ETH2RX, RCC_MP_AHB6ENSETR, 29), + CFG_GATE_SETCLR(GATE_ETH2MAC, RCC_MP_AHB6ENSETR, 30), + CFG_GATE_SETCLR(GATE_ETH1STP, RCC_MP_AHB6LPENSETR, 11), + CFG_GATE_SETCLR(GATE_ETH2STP, RCC_MP_AHB6LPENSETR, 31), + CFG_GATE_SETCLR(GATE_MDMA, RCC_MP_NS_AHB6ENSETR, 0), +}; + +/* STM32 Divivers definition */ +enum enum_div_cfg { + DIV_RTC, + DIV_HSI, + DIV_MCO1, + DIV_MCO2, + DIV_TRACE, + DIV_ETH1PTP, + DIV_ETH2PTP, + DIV_NB +}; + +static const struct clk_div_table ck_trace_div_table[] = { + { 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 }, + { 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 }, + { 0 }, +}; + +#define CFG_DIV(_id, _offset, _shift, _width, _flags, _table, _ready)\ + [(_id)] = {\ + .offset = (_offset),\ + .shift = (_shift),\ + .width = (_width),\ + .flags = (_flags),\ + .table = (_table),\ + .ready = (_ready),\ + } + +static const struct stm32_div_cfg stm32mp13_dividers[DIV_NB] = { + CFG_DIV(DIV_RTC, RCC_RTCDIVR, 0, 6, 0, NULL, DIV_NO_RDY), + CFG_DIV(DIV_MCO1, RCC_MCO1CFGR, 4, 4, 0, NULL, DIV_NO_RDY), + CFG_DIV(DIV_MCO2, RCC_MCO2CFGR, 4, 4, 0, NULL, DIV_NO_RDY), + CFG_DIV(DIV_TRACE, RCC_DBGCFGR, 0, 3, 0, ck_trace_div_table, DIV_NO_RDY), + CFG_DIV(DIV_ETH1PTP, RCC_ETH12CKSELR, 4, 4, 0, NULL, DIV_NO_RDY), + CFG_DIV(DIV_ETH2PTP, RCC_ETH12CKSELR, 12, 4, 0, NULL, DIV_NO_RDY), +}; + +/* STM32 Muxes definition */ +enum enum_mux_cfg { + MUX_ADC1, + MUX_ADC2, + MUX_DCMIPP, + MUX_ETH1, + MUX_ETH2, + MUX_FDCAN, + MUX_FMC, + MUX_I2C12, + MUX_I2C3, + MUX_I2C4, + MUX_I2C5, + MUX_LPTIM1, + MUX_LPTIM2, + MUX_LPTIM3, + MUX_LPTIM45, + MUX_MCO1, + MUX_MCO2, + MUX_QSPI, + MUX_RNG1, + MUX_SAES, + MUX_SAI1, + MUX_SAI2, + MUX_SDMMC1, + MUX_SDMMC2, + MUX_SPDIF, + MUX_SPI1, + MUX_SPI23, + MUX_SPI4, + MUX_SPI5, + MUX_STGEN, + MUX_UART1, + MUX_UART2, + MUX_UART4, + MUX_UART6, + MUX_UART35, + MUX_UART78, + MUX_USBO, + MUX_USBPHY, + MUX_NB +}; + +#define _CFG_MUX(_id, _offset, _shift, _witdh, _ready, _flags)\ + [_id] = {\ + .offset = (_offset),\ + .shift = (_shift),\ + .width = (_witdh),\ + .ready = (_ready),\ + .flags = (_flags),\ + } + +#define CFG_MUX(_id, _offset, _shift, _witdh)\ + _CFG_MUX(_id, _offset, _shift, _witdh, MUX_NO_RDY, 0) + +#define CFG_MUX_SAFE(_id, _offset, _shift, _witdh)\ + _CFG_MUX(_id, _offset, _shift, _witdh, MUX_NO_RDY, MUX_SAFE) + +static const struct stm32_mux_cfg stm32mp13_muxes[] = { + CFG_MUX(MUX_I2C12, RCC_I2C12CKSELR, 0, 3), + CFG_MUX(MUX_LPTIM45, RCC_LPTIM45CKSELR, 0, 3), + CFG_MUX(MUX_SPI23, RCC_SPI2S23CKSELR, 0, 3), + CFG_MUX(MUX_UART35, RCC_UART35CKSELR, 0, 3), + CFG_MUX(MUX_UART78, RCC_UART78CKSELR, 0, 3), + CFG_MUX(MUX_ADC1, RCC_ADC12CKSELR, 0, 2), + CFG_MUX(MUX_ADC2, RCC_ADC12CKSELR, 2, 2), + CFG_MUX(MUX_DCMIPP, RCC_DCMIPPCKSELR, 0, 2), + CFG_MUX(MUX_ETH1, RCC_ETH12CKSELR, 0, 2), + CFG_MUX(MUX_ETH2, RCC_ETH12CKSELR, 8, 2), + CFG_MUX(MUX_FDCAN, RCC_FDCANCKSELR, 0, 2), + CFG_MUX(MUX_I2C3, RCC_I2C345CKSELR, 0, 3), + CFG_MUX(MUX_I2C4, RCC_I2C345CKSELR, 3, 3), + CFG_MUX(MUX_I2C5, RCC_I2C345CKSELR, 6, 3), + CFG_MUX(MUX_LPTIM1, RCC_LPTIM1CKSELR, 0, 3), + CFG_MUX(MUX_LPTIM2, RCC_LPTIM23CKSELR, 0, 3), + CFG_MUX(MUX_LPTIM3, RCC_LPTIM23CKSELR, 3, 3), + CFG_MUX(MUX_MCO1, RCC_MCO1CFGR, 0, 3), + CFG_MUX(MUX_MCO2, RCC_MCO2CFGR, 0, 3), + CFG_MUX(MUX_RNG1, RCC_RNG1CKSELR, 0, 2), + CFG_MUX(MUX_SAES, RCC_SAESCKSELR, 0, 2), + CFG_MUX(MUX_SAI1, RCC_SAI1CKSELR, 0, 3), + CFG_MUX(MUX_SAI2, RCC_SAI2CKSELR, 0, 3), + CFG_MUX(MUX_SPDIF, RCC_SPDIFCKSELR, 0, 2), + CFG_MUX(MUX_SPI1, RCC_SPI2S1CKSELR, 0, 3), + CFG_MUX(MUX_SPI4, RCC_SPI45CKSELR, 0, 3), + CFG_MUX(MUX_SPI5, RCC_SPI45CKSELR, 3, 3), + CFG_MUX(MUX_STGEN, RCC_STGENCKSELR, 0, 2), + CFG_MUX(MUX_UART1, RCC_UART12CKSELR, 0, 3), + CFG_MUX(MUX_UART2, RCC_UART12CKSELR, 3, 3), + CFG_MUX(MUX_UART4, RCC_UART4CKSELR, 0, 3), + CFG_MUX(MUX_UART6, RCC_UART6CKSELR, 0, 3), + CFG_MUX(MUX_USBO, RCC_USBCKSELR, 4, 1), + CFG_MUX(MUX_USBPHY, RCC_USBCKSELR, 0, 2), + CFG_MUX_SAFE(MUX_FMC, RCC_FMCCKSELR, 0, 2), + CFG_MUX_SAFE(MUX_QSPI, RCC_QSPICKSELR, 0, 2), + CFG_MUX_SAFE(MUX_SDMMC1, RCC_SDMMC12CKSELR, 0, 3), + CFG_MUX_SAFE(MUX_SDMMC2, RCC_SDMMC12CKSELR, 3, 3), +}; + +struct clk_stm32_securiy { + u32 offset; + u8 bit_idx; + unsigned long scmi_id; +}; + +enum security_clk { + SECF_NONE, + SECF_LPTIM2, + SECF_LPTIM3, + SECF_VREF, + SECF_DCMIPP, + SECF_USBPHY, + SECF_TZC, + SECF_ETZPC, + SECF_IWDG1, + SECF_BSEC, + SECF_STGENC, + SECF_STGENRO, + SECF_USART1, + SECF_USART2, + SECF_SPI4, + SECF_SPI5, + SECF_I2C3, + SECF_I2C4, + SECF_I2C5, + SECF_TIM12, + SECF_TIM13, + SECF_TIM14, + SECF_TIM15, + SECF_TIM16, + SECF_TIM17, + SECF_DMA3, + SECF_DMAMUX2, + SECF_ADC1, + SECF_ADC2, + SECF_USBO, + SECF_TSC, + SECF_PKA, + SECF_SAES, + SECF_CRYP1, + SECF_HASH1, + SECF_RNG1, + SECF_BKPSRAM, + SECF_MCE, + SECF_FMC, + SECF_QSPI, + SECF_SDMMC1, + SECF_SDMMC2, + SECF_ETH1CK, + SECF_ETH1TX, + SECF_ETH1RX, + SECF_ETH1MAC, + SECF_ETH1STP, + SECF_ETH2CK, + SECF_ETH2TX, + SECF_ETH2RX, + SECF_ETH2MAC, + SECF_ETH2STP, + SECF_MCO1, + SECF_MCO2 +}; + +#define SECF(_sec_id, _offset, _bit_idx)[_sec_id] = {\ + .offset = _offset,\ + .bit_idx = _bit_idx,\ + .scmi_id = -1,\ +} + +static const struct clk_stm32_securiy stm32mp13_security[] = { + SECF(SECF_LPTIM2, RCC_APB3SECSR, RCC_APB3SECSR_LPTIM2SECF), + SECF(SECF_LPTIM3, RCC_APB3SECSR, RCC_APB3SECSR_LPTIM3SECF), + SECF(SECF_VREF, RCC_APB3SECSR, RCC_APB3SECSR_VREFSECF), + SECF(SECF_DCMIPP, RCC_APB4SECSR, RCC_APB4SECSR_DCMIPPSECF), + SECF(SECF_USBPHY, RCC_APB4SECSR, RCC_APB4SECSR_USBPHYSECF), + SECF(SECF_TZC, RCC_APB5SECSR, RCC_APB5SECSR_TZCSECF), + SECF(SECF_ETZPC, RCC_APB5SECSR, RCC_APB5SECSR_ETZPCSECF), + SECF(SECF_IWDG1, RCC_APB5SECSR, RCC_APB5SECSR_IWDG1SECF), + SECF(SECF_BSEC, RCC_APB5SECSR, RCC_APB5SECSR_BSECSECF), + SECF(SECF_STGENC, RCC_APB5SECSR, RCC_APB5SECSR_STGENCSECF), + SECF(SECF_STGENRO, RCC_APB5SECSR, RCC_APB5SECSR_STGENROSECF), + SECF(SECF_USART1, RCC_APB6SECSR, RCC_APB6SECSR_USART1SECF), + SECF(SECF_USART2, RCC_APB6SECSR, RCC_APB6SECSR_USART2SECF), + SECF(SECF_SPI4, RCC_APB6SECSR, RCC_APB6SECSR_SPI4SECF), + SECF(SECF_SPI5, RCC_APB6SECSR, RCC_APB6SECSR_SPI5SECF), + SECF(SECF_I2C3, RCC_APB6SECSR, RCC_APB6SECSR_I2C3SECF), + SECF(SECF_I2C4, RCC_APB6SECSR, RCC_APB6SECSR_I2C4SECF), + SECF(SECF_I2C5, RCC_APB6SECSR, RCC_APB6SECSR_I2C5SECF), + SECF(SECF_TIM12, RCC_APB6SECSR, RCC_APB6SECSR_TIM12SECF), + SECF(SECF_TIM13, RCC_APB6SECSR, RCC_APB6SECSR_TIM13SECF), + SECF(SECF_TIM14, RCC_APB6SECSR, RCC_APB6SECSR_TIM14SECF), + SECF(SECF_TIM15, RCC_APB6SECSR, RCC_APB6SECSR_TIM15SECF), + SECF(SECF_TIM16, RCC_APB6SECSR, RCC_APB6SECSR_TIM16SECF), + SECF(SECF_TIM17, RCC_APB6SECSR, RCC_APB6SECSR_TIM17SECF), + SECF(SECF_DMA3, RCC_AHB2SECSR, RCC_AHB2SECSR_DMA3SECF), + SECF(SECF_DMAMUX2, RCC_AHB2SECSR, RCC_AHB2SECSR_DMAMUX2SECF), + SECF(SECF_ADC1, RCC_AHB2SECSR, RCC_AHB2SECSR_ADC1SECF), + SECF(SECF_ADC2, RCC_AHB2SECSR, RCC_AHB2SECSR_ADC2SECF), + SECF(SECF_USBO, RCC_AHB2SECSR, RCC_AHB2SECSR_USBOSECF), + SECF(SECF_TSC, RCC_AHB4SECSR, RCC_AHB4SECSR_TSCSECF), + SECF(SECF_PKA, RCC_AHB5SECSR, RCC_AHB5SECSR_PKASECF), + SECF(SECF_SAES, RCC_AHB5SECSR, RCC_AHB5SECSR_SAESSECF), + SECF(SECF_CRYP1, RCC_AHB5SECSR, RCC_AHB5SECSR_CRYP1SECF), + SECF(SECF_HASH1, RCC_AHB5SECSR, RCC_AHB5SECSR_HASH1SECF), + SECF(SECF_RNG1, RCC_AHB5SECSR, RCC_AHB5SECSR_RNG1SECF), + SECF(SECF_BKPSRAM, RCC_AHB5SECSR, RCC_AHB5SECSR_BKPSRAMSECF), + SECF(SECF_MCE, RCC_AHB6SECSR, RCC_AHB6SECSR_MCESECF), + SECF(SECF_FMC, RCC_AHB6SECSR, RCC_AHB6SECSR_FMCSECF), + SECF(SECF_QSPI, RCC_AHB6SECSR, RCC_AHB6SECSR_QSPISECF), + SECF(SECF_SDMMC1, RCC_AHB6SECSR, RCC_AHB6SECSR_SDMMC1SECF), + SECF(SECF_SDMMC2, RCC_AHB6SECSR, RCC_AHB6SECSR_SDMMC2SECF), + SECF(SECF_ETH1CK, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1CKSECF), + SECF(SECF_ETH1TX, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1TXSECF), + SECF(SECF_ETH1RX, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1RXSECF), + SECF(SECF_ETH1MAC, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1MACSECF), + SECF(SECF_ETH1STP, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH1STPSECF), + SECF(SECF_ETH2CK, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2CKSECF), + SECF(SECF_ETH2TX, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2TXSECF), + SECF(SECF_ETH2RX, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2RXSECF), + SECF(SECF_ETH2MAC, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2MACSECF), + SECF(SECF_ETH2STP, RCC_AHB6SECSR, RCC_AHB6SECSR_ETH2STPSECF), + SECF(SECF_MCO1, RCC_SECCFGR, RCC_SECCFGR_MCO1SEC), + SECF(SECF_MCO2, RCC_SECCFGR, RCC_SECCFGR_MCO2SEC), +}; + +static const char * const adc12_src[] = { + "pll4_r", "ck_per", "pll3_q" +}; + +static const char * const dcmipp_src[] = { + "ck_axi", "pll2_q", "pll4_p", "ck_per", +}; + +static const char * const eth12_src[] = { + "pll4_p", "pll3_q" +}; + +static const char * const fdcan_src[] = { + "ck_hse", "pll3_q", "pll4_q", "pll4_r" +}; + +static const char * const fmc_src[] = { + "ck_axi", "pll3_r", "pll4_p", "ck_per" +}; + +static const char * const i2c12_src[] = { + "pclk1", "pll4_r", "ck_hsi", "ck_csi" +}; + +static const char * const i2c345_src[] = { + "pclk6", "pll4_r", "ck_hsi", "ck_csi" +}; + +static const char * const lptim1_src[] = { + "pclk1", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per" +}; + +static const char * const lptim23_src[] = { + "pclk3", "pll4_q", "ck_per", "ck_lse", "ck_lsi" +}; + +static const char * const lptim45_src[] = { + "pclk3", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per" +}; + +static const char * const mco1_src[] = { + "ck_hsi", "ck_hse", "ck_csi", "ck_lsi", "ck_lse" +}; + +static const char * const mco2_src[] = { + "ck_mpu", "ck_axi", "ck_mlahb", "pll4_p", "ck_hse", "ck_hsi" +}; + +static const char * const qspi_src[] = { + "ck_axi", "pll3_r", "pll4_p", "ck_per" +}; + +static const char * const rng1_src[] = { + "ck_csi", "pll4_r", "ck_lse", "ck_lsi" +}; + +static const char * const saes_src[] = { + "ck_axi", "ck_per", "pll4_r", "ck_lsi" +}; + +static const char * const sai1_src[] = { + "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r" +}; + +static const char * const sai2_src[] = { + "pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r" +}; + +static const char * const sdmmc12_src[] = { + "ck_axi", "pll3_r", "pll4_p", "ck_hsi" +}; + +static const char * const spdif_src[] = { + "pll4_p", "pll3_q", "ck_hsi" +}; + +static const char * const spi123_src[] = { + "pll4_p", "pll3_q", "i2s_ckin", "ck_per", "pll3_r" +}; + +static const char * const spi4_src[] = { + "pclk6", "pll4_q", "ck_hsi", "ck_csi", "ck_hse", "i2s_ckin" +}; + +static const char * const spi5_src[] = { + "pclk6", "pll4_q", "ck_hsi", "ck_csi", "ck_hse" +}; + +static const char * const stgen_src[] = { + "ck_hsi", "ck_hse" +}; + +static const char * const usart12_src[] = { + "pclk6", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse" +}; + +static const char * const usart34578_src[] = { + "pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse" +}; + +static const char * const usart6_src[] = { + "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse" +}; + +static const char * const usbo_src[] = { + "pll4_r", "ck_usbo_48m" +}; + +static const char * const usbphy_src[] = { + "ck_hse", "pll4_r", "clk-hse-div2" +}; + +/* Timer clocks */ +static struct clk_stm32_gate tim2_k = { + .gate_id = GATE_TIM2, + .hw.init = CLK_HW_INIT("tim2_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim3_k = { + .gate_id = GATE_TIM3, + .hw.init = CLK_HW_INIT("tim3_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim4_k = { + .gate_id = GATE_TIM4, + .hw.init = CLK_HW_INIT("tim4_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim5_k = { + .gate_id = GATE_TIM5, + .hw.init = CLK_HW_INIT("tim5_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim6_k = { + .gate_id = GATE_TIM6, + .hw.init = CLK_HW_INIT("tim6_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim7_k = { + .gate_id = GATE_TIM7, + .hw.init = CLK_HW_INIT("tim7_k", "timg1_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim1_k = { + .gate_id = GATE_TIM1, + .hw.init = CLK_HW_INIT("tim1_k", "timg2_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim8_k = { + .gate_id = GATE_TIM8, + .hw.init = CLK_HW_INIT("tim8_k", "timg2_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim12_k = { + .gate_id = GATE_TIM12, + .hw.init = CLK_HW_INIT("tim12_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim13_k = { + .gate_id = GATE_TIM13, + .hw.init = CLK_HW_INIT("tim13_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim14_k = { + .gate_id = GATE_TIM14, + .hw.init = CLK_HW_INIT("tim14_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim15_k = { + .gate_id = GATE_TIM15, + .hw.init = CLK_HW_INIT("tim15_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim16_k = { + .gate_id = GATE_TIM16, + .hw.init = CLK_HW_INIT("tim16_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_gate tim17_k = { + .gate_id = GATE_TIM17, + .hw.init = CLK_HW_INIT("tim17_k", "timg3_ck", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +/* Peripheral clocks */ +static struct clk_stm32_gate sai1 = { + .gate_id = GATE_SAI1, + .hw.init = CLK_HW_INIT("sai1", "pclk2", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate sai2 = { + .gate_id = GATE_SAI2, + .hw.init = CLK_HW_INIT("sai2", "pclk2", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate syscfg = { + .gate_id = GATE_SYSCFG, + .hw.init = CLK_HW_INIT("syscfg", "pclk3", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate vref = { + .gate_id = GATE_VREF, + .hw.init = CLK_HW_INIT("vref", "pclk3", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate dts = { + .gate_id = GATE_DTS, + .hw.init = CLK_HW_INIT("dts", "pclk3", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate pmbctrl = { + .gate_id = GATE_PMBCTRL, + .hw.init = CLK_HW_INIT("pmbctrl", "pclk3", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate hdp = { + .gate_id = GATE_HDP, + .hw.init = CLK_HW_INIT("hdp", "pclk3", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate iwdg2 = { + .gate_id = GATE_IWDG2APB, + .hw.init = CLK_HW_INIT("iwdg2", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate stgenro = { + .gate_id = GATE_STGENRO, + .hw.init = CLK_HW_INIT("stgenro", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate gpioa = { + .gate_id = GATE_GPIOA, + .hw.init = CLK_HW_INIT("gpioa", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate gpiob = { + .gate_id = GATE_GPIOB, + .hw.init = CLK_HW_INIT("gpiob", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate gpioc = { + .gate_id = GATE_GPIOC, + .hw.init = CLK_HW_INIT("gpioc", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate gpiod = { + .gate_id = GATE_GPIOD, + .hw.init = CLK_HW_INIT("gpiod", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate gpioe = { + .gate_id = GATE_GPIOE, + .hw.init = CLK_HW_INIT("gpioe", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate gpiof = { + .gate_id = GATE_GPIOF, + .hw.init = CLK_HW_INIT("gpiof", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate gpiog = { + .gate_id = GATE_GPIOG, + .hw.init = CLK_HW_INIT("gpiog", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate gpioh = { + .gate_id = GATE_GPIOH, + .hw.init = CLK_HW_INIT("gpioh", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate gpioi = { + .gate_id = GATE_GPIOI, + .hw.init = CLK_HW_INIT("gpioi", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate tsc = { + .gate_id = GATE_TSC, + .hw.init = CLK_HW_INIT("tsc", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ddrperfm = { + .gate_id = GATE_DDRPERFM, + .hw.init = CLK_HW_INIT("ddrperfm", "pclk4", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate tzpc = { + .gate_id = GATE_TZC, + .hw.init = CLK_HW_INIT("tzpc", "pclk5", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate iwdg1 = { + .gate_id = GATE_IWDG1APB, + .hw.init = CLK_HW_INIT("iwdg1", "pclk5", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate bsec = { + .gate_id = GATE_BSEC, + .hw.init = CLK_HW_INIT("bsec", "pclk5", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate dma1 = { + .gate_id = GATE_DMA1, + .hw.init = CLK_HW_INIT("dma1", "ck_mlahb", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate dma2 = { + .gate_id = GATE_DMA2, + .hw.init = CLK_HW_INIT("dma2", "ck_mlahb", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate dmamux1 = { + .gate_id = GATE_DMAMUX1, + .hw.init = CLK_HW_INIT("dmamux1", "ck_mlahb", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate dma3 = { + .gate_id = GATE_DMA3, + .hw.init = CLK_HW_INIT("dma3", "ck_mlahb", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate dmamux2 = { + .gate_id = GATE_DMAMUX2, + .hw.init = CLK_HW_INIT("dmamux2", "ck_mlahb", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate adc1 = { + .gate_id = GATE_ADC1, + .hw.init = CLK_HW_INIT("adc1", "ck_mlahb", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate adc2 = { + .gate_id = GATE_ADC2, + .hw.init = CLK_HW_INIT("adc2", "ck_mlahb", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate pka = { + .gate_id = GATE_PKA, + .hw.init = CLK_HW_INIT("pka", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate cryp1 = { + .gate_id = GATE_CRYP1, + .hw.init = CLK_HW_INIT("cryp1", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate hash1 = { + .gate_id = GATE_HASH1, + .hw.init = CLK_HW_INIT("hash1", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate bkpsram = { + .gate_id = GATE_BKPSRAM, + .hw.init = CLK_HW_INIT("bkpsram", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate mdma = { + .gate_id = GATE_MDMA, + .hw.init = CLK_HW_INIT("mdma", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate eth1tx = { + .gate_id = GATE_ETH1TX, + .hw.init = CLK_HW_INIT("eth1tx", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate eth1rx = { + .gate_id = GATE_ETH1RX, + .hw.init = CLK_HW_INIT("eth1rx", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate eth1mac = { + .gate_id = GATE_ETH1MAC, + .hw.init = CLK_HW_INIT("eth1mac", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate eth2tx = { + .gate_id = GATE_ETH2TX, + .hw.init = CLK_HW_INIT("eth2tx", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate eth2rx = { + .gate_id = GATE_ETH2RX, + .hw.init = CLK_HW_INIT("eth2rx", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate eth2mac = { + .gate_id = GATE_ETH2MAC, + .hw.init = CLK_HW_INIT("eth2mac", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate crc1 = { + .gate_id = GATE_CRC1, + .hw.init = CLK_HW_INIT("crc1", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate usbh = { + .gate_id = GATE_USBH, + .hw.init = CLK_HW_INIT("usbh", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate eth1stp = { + .gate_id = GATE_ETH1STP, + .hw.init = CLK_HW_INIT("eth1stp", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate eth2stp = { + .gate_id = GATE_ETH2STP, + .hw.init = CLK_HW_INIT("eth2stp", "ck_axi", &clk_stm32_gate_ops, 0), +}; + +/* Kernel clocks */ +static struct clk_stm32_composite sdmmc1_k = { + .gate_id = GATE_SDMMC1, + .mux_id = MUX_SDMMC1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("sdmmc1_k", sdmmc12_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite sdmmc2_k = { + .gate_id = GATE_SDMMC2, + .mux_id = MUX_SDMMC2, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("sdmmc2_k", sdmmc12_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite fmc_k = { + .gate_id = GATE_FMC, + .mux_id = MUX_FMC, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("fmc_k", fmc_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite qspi_k = { + .gate_id = GATE_QSPI, + .mux_id = MUX_QSPI, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("qspi_k", qspi_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite spi2_k = { + .gate_id = GATE_SPI2, + .mux_id = MUX_SPI23, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("spi2_k", spi123_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite spi3_k = { + .gate_id = GATE_SPI3, + .mux_id = MUX_SPI23, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("spi3_k", spi123_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite i2c1_k = { + .gate_id = GATE_I2C1, + .mux_id = MUX_I2C12, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("i2c1_k", i2c12_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite i2c2_k = { + .gate_id = GATE_I2C2, + .mux_id = MUX_I2C12, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("i2c2_k", i2c12_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite lptim4_k = { + .gate_id = GATE_LPTIM4, + .mux_id = MUX_LPTIM45, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("lptim4_k", lptim45_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite lptim5_k = { + .gate_id = GATE_LPTIM5, + .mux_id = MUX_LPTIM45, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("lptim5_k", lptim45_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite usart3_k = { + .gate_id = GATE_USART3, + .mux_id = MUX_UART35, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("usart3_k", usart34578_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite uart5_k = { + .gate_id = GATE_UART5, + .mux_id = MUX_UART35, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("uart5_k", usart34578_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite uart7_k = { + .gate_id = GATE_UART7, + .mux_id = MUX_UART78, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("uart7_k", usart34578_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite uart8_k = { + .gate_id = GATE_UART8, + .mux_id = MUX_UART78, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("uart8_k", usart34578_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite sai1_k = { + .gate_id = GATE_SAI1, + .mux_id = MUX_SAI1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("sai1_k", sai1_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite adfsdm_k = { + .gate_id = GATE_ADFSDM, + .mux_id = MUX_SAI1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("adfsdm_k", sai1_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite sai2_k = { + .gate_id = GATE_SAI2, + .mux_id = MUX_SAI2, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("sai2_k", sai2_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite adc1_k = { + .gate_id = GATE_ADC1, + .mux_id = MUX_ADC1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("adc1_k", adc12_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite adc2_k = { + .gate_id = GATE_ADC2, + .mux_id = MUX_ADC2, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("adc2_k", adc12_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite rng1_k = { + .gate_id = GATE_RNG1, + .mux_id = MUX_RNG1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("rng1_k", rng1_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite usbphy_k = { + .gate_id = GATE_USBPHY, + .mux_id = MUX_USBPHY, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("usbphy_k", usbphy_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite stgen_k = { + .gate_id = GATE_STGENC, + .mux_id = MUX_STGEN, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("stgen_k", stgen_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite spdif_k = { + .gate_id = GATE_SPDIF, + .mux_id = MUX_SPDIF, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("spdif_k", spdif_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite spi1_k = { + .gate_id = GATE_SPI1, + .mux_id = MUX_SPI1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("spi1_k", spi123_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite spi4_k = { + .gate_id = GATE_SPI4, + .mux_id = MUX_SPI4, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("spi4_k", spi4_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite spi5_k = { + .gate_id = GATE_SPI5, + .mux_id = MUX_SPI5, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("spi5_k", spi5_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite i2c3_k = { + .gate_id = GATE_I2C3, + .mux_id = MUX_I2C3, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("i2c3_k", i2c345_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite i2c4_k = { + .gate_id = GATE_I2C4, + .mux_id = MUX_I2C4, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("i2c4_k", i2c345_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite i2c5_k = { + .gate_id = GATE_I2C5, + .mux_id = MUX_I2C5, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("i2c5_k", i2c345_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite lptim1_k = { + .gate_id = GATE_LPTIM1, + .mux_id = MUX_LPTIM1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("lptim1_k", lptim1_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite lptim2_k = { + .gate_id = GATE_LPTIM2, + .mux_id = MUX_LPTIM2, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("lptim2_k", lptim23_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite lptim3_k = { + .gate_id = GATE_LPTIM3, + .mux_id = MUX_LPTIM3, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("lptim3_k", lptim23_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite usart1_k = { + .gate_id = GATE_USART1, + .mux_id = MUX_UART1, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("usart1_k", usart12_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite usart2_k = { + .gate_id = GATE_USART2, + .mux_id = MUX_UART2, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("usart2_k", usart12_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite uart4_k = { + .gate_id = GATE_UART4, + .mux_id = MUX_UART4, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("uart4_k", usart34578_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite uart6_k = { + .gate_id = GATE_USART6, + .mux_id = MUX_UART6, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("uart6_k", usart6_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite fdcan_k = { + .gate_id = GATE_FDCAN, + .mux_id = MUX_FDCAN, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("fdcan_k", fdcan_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite dcmipp_k = { + .gate_id = GATE_DCMIPP, + .mux_id = MUX_DCMIPP, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("dcmipp_k", dcmipp_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite usbo_k = { + .gate_id = GATE_USBO, + .mux_id = MUX_USBO, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("usbo_k", usbo_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite saes_k = { + .gate_id = GATE_SAES, + .mux_id = MUX_SAES, + .div_id = NO_STM32_DIV, + .hw.init = CLK_HW_INIT_PARENTS("saes_k", saes_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_gate dfsdm_k = { + .gate_id = GATE_DFSDM, + .hw.init = CLK_HW_INIT("dfsdm_k", "ck_mlahb", &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_gate ltdc_px = { + .gate_id = GATE_LTDC, + .hw.init = CLK_HW_INIT("ltdc_px", "pll4_q", &clk_stm32_gate_ops, CLK_SET_RATE_PARENT), +}; + +static struct clk_stm32_mux ck_ker_eth1 = { + .mux_id = MUX_ETH1, + .hw.init = CLK_HW_INIT_PARENTS("ck_ker_eth1", eth12_src, &clk_stm32_mux_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_gate eth1ck_k = { + .gate_id = GATE_ETH1CK, + .hw.init = CLK_HW_INIT_HW("eth1ck_k", &ck_ker_eth1.hw, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_div eth1ptp_k = { + .div_id = DIV_ETH1PTP, + .hw.init = CLK_HW_INIT_HW("eth1ptp_k", &ck_ker_eth1.hw, &clk_stm32_divider_ops, + CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_mux ck_ker_eth2 = { + .mux_id = MUX_ETH2, + .hw.init = CLK_HW_INIT_PARENTS("ck_ker_eth2", eth12_src, &clk_stm32_mux_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_gate eth2ck_k = { + .gate_id = GATE_ETH2CK, + .hw.init = CLK_HW_INIT_HW("eth2ck_k", &ck_ker_eth2.hw, &clk_stm32_gate_ops, 0), +}; + +static struct clk_stm32_div eth2ptp_k = { + .div_id = DIV_ETH2PTP, + .hw.init = CLK_HW_INIT_HW("eth2ptp_k", &ck_ker_eth2.hw, &clk_stm32_divider_ops, + CLK_SET_RATE_NO_REPARENT), +}; + +static struct clk_stm32_composite ck_mco1 = { + .gate_id = GATE_MCO1, + .mux_id = MUX_MCO1, + .div_id = DIV_MCO1, + .hw.init = CLK_HW_INIT_PARENTS("ck_mco1", mco1_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT | + CLK_IGNORE_UNUSED), +}; + +static struct clk_stm32_composite ck_mco2 = { + .gate_id = GATE_MCO2, + .mux_id = MUX_MCO2, + .div_id = DIV_MCO2, + .hw.init = CLK_HW_INIT_PARENTS("ck_mco2", mco2_src, &clk_stm32_composite_ops, + CLK_OPS_PARENT_ENABLE | CLK_SET_RATE_NO_REPARENT | + CLK_IGNORE_UNUSED), +}; + +/* Debug clocks */ +static struct clk_stm32_gate ck_sys_dbg = { + .gate_id = GATE_DBGCK, + .hw.init = CLK_HW_INIT("ck_sys_dbg", "ck_axi", &clk_stm32_gate_ops, CLK_IS_CRITICAL), +}; + +static struct clk_stm32_composite ck_trace = { + .gate_id = GATE_TRACECK, + .mux_id = NO_STM32_MUX, + .div_id = DIV_TRACE, + .hw.init = CLK_HW_INIT("ck_trace", "ck_axi", &clk_stm32_composite_ops, CLK_IGNORE_UNUSED), +}; + +static const struct clock_config stm32mp13_clock_cfg[] = { + /* Timer clocks */ + STM32_GATE_CFG(TIM2_K, tim2_k, SECF_NONE), + STM32_GATE_CFG(TIM3_K, tim3_k, SECF_NONE), + STM32_GATE_CFG(TIM4_K, tim4_k, SECF_NONE), + STM32_GATE_CFG(TIM5_K, tim5_k, SECF_NONE), + STM32_GATE_CFG(TIM6_K, tim6_k, SECF_NONE), + STM32_GATE_CFG(TIM7_K, tim7_k, SECF_NONE), + STM32_GATE_CFG(TIM1_K, tim1_k, SECF_NONE), + STM32_GATE_CFG(TIM8_K, tim8_k, SECF_NONE), + STM32_GATE_CFG(TIM12_K, tim12_k, SECF_TIM12), + STM32_GATE_CFG(TIM13_K, tim13_k, SECF_TIM13), + STM32_GATE_CFG(TIM14_K, tim14_k, SECF_TIM14), + STM32_GATE_CFG(TIM15_K, tim15_k, SECF_TIM15), + STM32_GATE_CFG(TIM16_K, tim16_k, SECF_TIM16), + STM32_GATE_CFG(TIM17_K, tim17_k, SECF_TIM17), + + /* Peripheral clocks */ + STM32_GATE_CFG(SAI1, sai1, SECF_NONE), + STM32_GATE_CFG(SAI2, sai2, SECF_NONE), + STM32_GATE_CFG(SYSCFG, syscfg, SECF_NONE), + STM32_GATE_CFG(VREF, vref, SECF_VREF), + STM32_GATE_CFG(DTS, dts, SECF_NONE), + STM32_GATE_CFG(PMBCTRL, pmbctrl, SECF_NONE), + STM32_GATE_CFG(HDP, hdp, SECF_NONE), + STM32_GATE_CFG(IWDG2, iwdg2, SECF_NONE), + STM32_GATE_CFG(STGENRO, stgenro, SECF_STGENRO), + STM32_GATE_CFG(TZPC, tzpc, SECF_TZC), + STM32_GATE_CFG(IWDG1, iwdg1, SECF_IWDG1), + STM32_GATE_CFG(BSEC, bsec, SECF_BSEC), + STM32_GATE_CFG(DMA1, dma1, SECF_NONE), + STM32_GATE_CFG(DMA2, dma2, SECF_NONE), + STM32_GATE_CFG(DMAMUX1, dmamux1, SECF_NONE), + STM32_GATE_CFG(DMA3, dma3, SECF_DMA3), + STM32_GATE_CFG(DMAMUX2, dmamux2, SECF_DMAMUX2), + STM32_GATE_CFG(ADC1, adc1, SECF_ADC1), + STM32_GATE_CFG(ADC2, adc2, SECF_ADC2), + STM32_GATE_CFG(GPIOA, gpioa, SECF_NONE), + STM32_GATE_CFG(GPIOB, gpiob, SECF_NONE), + STM32_GATE_CFG(GPIOC, gpioc, SECF_NONE), + STM32_GATE_CFG(GPIOD, gpiod, SECF_NONE), + STM32_GATE_CFG(GPIOE, gpioe, SECF_NONE), + STM32_GATE_CFG(GPIOF, gpiof, SECF_NONE), + STM32_GATE_CFG(GPIOG, gpiog, SECF_NONE), + STM32_GATE_CFG(GPIOH, gpioh, SECF_NONE), + STM32_GATE_CFG(GPIOI, gpioi, SECF_NONE), + STM32_GATE_CFG(TSC, tsc, SECF_TZC), + STM32_GATE_CFG(PKA, pka, SECF_PKA), + STM32_GATE_CFG(CRYP1, cryp1, SECF_CRYP1), + STM32_GATE_CFG(HASH1, hash1, SECF_HASH1), + STM32_GATE_CFG(BKPSRAM, bkpsram, SECF_BKPSRAM), + STM32_GATE_CFG(MDMA, mdma, SECF_NONE), + STM32_GATE_CFG(ETH1TX, eth1tx, SECF_ETH1TX), + STM32_GATE_CFG(ETH1RX, eth1rx, SECF_ETH1RX), + STM32_GATE_CFG(ETH1MAC, eth1mac, SECF_ETH1MAC), + STM32_GATE_CFG(ETH2TX, eth2tx, SECF_ETH2TX), + STM32_GATE_CFG(ETH2RX, eth2rx, SECF_ETH2RX), + STM32_GATE_CFG(ETH2MAC, eth2mac, SECF_ETH2MAC), + STM32_GATE_CFG(CRC1, crc1, SECF_NONE), + STM32_GATE_CFG(USBH, usbh, SECF_NONE), + STM32_GATE_CFG(DDRPERFM, ddrperfm, SECF_NONE), + STM32_GATE_CFG(ETH1STP, eth1stp, SECF_ETH1STP), + STM32_GATE_CFG(ETH2STP, eth2stp, SECF_ETH2STP), + + /* Kernel clocks */ + STM32_COMPOSITE_CFG(SDMMC1_K, sdmmc1_k, SECF_SDMMC1), + STM32_COMPOSITE_CFG(SDMMC2_K, sdmmc2_k, SECF_SDMMC2), + STM32_COMPOSITE_CFG(FMC_K, fmc_k, SECF_FMC), + STM32_COMPOSITE_CFG(QSPI_K, qspi_k, SECF_QSPI), + STM32_COMPOSITE_CFG(SPI2_K, spi2_k, SECF_NONE), + STM32_COMPOSITE_CFG(SPI3_K, spi3_k, SECF_NONE), + STM32_COMPOSITE_CFG(I2C1_K, i2c1_k, SECF_NONE), + STM32_COMPOSITE_CFG(I2C2_K, i2c2_k, SECF_NONE), + STM32_COMPOSITE_CFG(LPTIM4_K, lptim4_k, SECF_NONE), + STM32_COMPOSITE_CFG(LPTIM5_K, lptim5_k, SECF_NONE), + STM32_COMPOSITE_CFG(USART3_K, usart3_k, SECF_NONE), + STM32_COMPOSITE_CFG(UART5_K, uart5_k, SECF_NONE), + STM32_COMPOSITE_CFG(UART7_K, uart7_k, SECF_NONE), + STM32_COMPOSITE_CFG(UART8_K, uart8_k, SECF_NONE), + STM32_COMPOSITE_CFG(SAI1_K, sai1_k, SECF_NONE), + STM32_COMPOSITE_CFG(SAI2_K, sai2_k, SECF_NONE), + STM32_COMPOSITE_CFG(ADFSDM_K, adfsdm_k, SECF_NONE), + STM32_COMPOSITE_CFG(ADC1_K, adc1_k, SECF_ADC1), + STM32_COMPOSITE_CFG(ADC2_K, adc2_k, SECF_ADC2), + STM32_COMPOSITE_CFG(RNG1_K, rng1_k, SECF_RNG1), + STM32_COMPOSITE_CFG(USBPHY_K, usbphy_k, SECF_USBPHY), + STM32_COMPOSITE_CFG(STGEN_K, stgen_k, SECF_STGENC), + STM32_COMPOSITE_CFG(SPDIF_K, spdif_k, SECF_NONE), + STM32_COMPOSITE_CFG(SPI1_K, spi1_k, SECF_NONE), + STM32_COMPOSITE_CFG(SPI4_K, spi4_k, SECF_SPI4), + STM32_COMPOSITE_CFG(SPI5_K, spi5_k, SECF_SPI5), + STM32_COMPOSITE_CFG(I2C3_K, i2c3_k, SECF_I2C3), + STM32_COMPOSITE_CFG(I2C4_K, i2c4_k, SECF_I2C4), + STM32_COMPOSITE_CFG(I2C5_K, i2c5_k, SECF_I2C5), + STM32_COMPOSITE_CFG(LPTIM1_K, lptim1_k, SECF_NONE), + STM32_COMPOSITE_CFG(LPTIM2_K, lptim2_k, SECF_LPTIM2), + STM32_COMPOSITE_CFG(LPTIM3_K, lptim3_k, SECF_LPTIM3), + STM32_COMPOSITE_CFG(USART1_K, usart1_k, SECF_USART1), + STM32_COMPOSITE_CFG(USART2_K, usart2_k, SECF_USART2), + STM32_COMPOSITE_CFG(UART4_K, uart4_k, SECF_NONE), + STM32_COMPOSITE_CFG(USART6_K, uart6_k, SECF_NONE), + STM32_COMPOSITE_CFG(FDCAN_K, fdcan_k, SECF_NONE), + STM32_COMPOSITE_CFG(DCMIPP_K, dcmipp_k, SECF_DCMIPP), + STM32_COMPOSITE_CFG(USBO_K, usbo_k, SECF_USBO), + STM32_COMPOSITE_CFG(SAES_K, saes_k, SECF_SAES), + STM32_GATE_CFG(DFSDM_K, dfsdm_k, SECF_NONE), + STM32_GATE_CFG(LTDC_PX, ltdc_px, SECF_NONE), + + STM32_MUX_CFG(NO_ID, ck_ker_eth1, SECF_ETH1CK), + STM32_GATE_CFG(ETH1CK_K, eth1ck_k, SECF_ETH1CK), + STM32_DIV_CFG(ETH1PTP_K, eth1ptp_k, SECF_ETH1CK), + + STM32_MUX_CFG(NO_ID, ck_ker_eth2, SECF_ETH2CK), + STM32_GATE_CFG(ETH2CK_K, eth2ck_k, SECF_ETH2CK), + STM32_DIV_CFG(ETH2PTP_K, eth2ptp_k, SECF_ETH2CK), + + STM32_GATE_CFG(CK_DBG, ck_sys_dbg, SECF_NONE), + STM32_COMPOSITE_CFG(CK_TRACE, ck_trace, SECF_NONE), + + STM32_COMPOSITE_CFG(CK_MCO1, ck_mco1, SECF_MCO1), + STM32_COMPOSITE_CFG(CK_MCO2, ck_mco2, SECF_MCO2), +}; + +static int stm32mp13_clock_is_provided_by_secure(void __iomem *base, + const struct clock_config *cfg) +{ + int sec_id = cfg->sec_id; + + if (sec_id != SECF_NONE) { + const struct clk_stm32_securiy *secf; + + secf = &stm32mp13_security[sec_id]; + + return !!(readl(base + secf->offset) & BIT(secf->bit_idx)); + } + + return 0; +} + +struct multi_mux { + struct clk_hw *hw1; + struct clk_hw *hw2; +}; + +static struct multi_mux *stm32_mp13_multi_mux[MUX_NB] = { + [MUX_SPI23] = &(struct multi_mux){ &spi2_k.hw, &spi3_k.hw }, + [MUX_I2C12] = &(struct multi_mux){ &i2c1_k.hw, &i2c2_k.hw }, + [MUX_LPTIM45] = &(struct multi_mux){ &lptim4_k.hw, &lptim5_k.hw }, + [MUX_UART35] = &(struct multi_mux){ &usart3_k.hw, &uart5_k.hw }, + [MUX_UART78] = &(struct multi_mux){ &uart7_k.hw, &uart8_k.hw }, + [MUX_SAI1] = &(struct multi_mux){ &sai1_k.hw, &adfsdm_k.hw }, +}; + +static struct clk_hw *stm32mp13_is_multi_mux(struct clk_hw *hw) +{ + struct clk_stm32_composite *composite = to_clk_stm32_composite(hw); + struct multi_mux *mmux = stm32_mp13_multi_mux[composite->mux_id]; + + if (mmux) { + if (!(mmux->hw1 == hw)) + return mmux->hw1; + else + return mmux->hw2; + } + + return NULL; +} + +static u16 stm32mp13_cpt_gate[GATE_NB]; + +static struct clk_stm32_clock_data stm32mp13_clock_data = { + .gate_cpt = stm32mp13_cpt_gate, + .gates = stm32mp13_gates, + .muxes = stm32mp13_muxes, + .dividers = stm32mp13_dividers, + .is_multi_mux = stm32mp13_is_multi_mux, +}; + +static const struct stm32_rcc_match_data stm32mp13_data = { + .tab_clocks = stm32mp13_clock_cfg, + .num_clocks = ARRAY_SIZE(stm32mp13_clock_cfg), + .clock_data = &stm32mp13_clock_data, + .check_security = &stm32mp13_clock_is_provided_by_secure, + .maxbinding = STM32MP1_LAST_CLK, + .clear_offset = RCC_CLR_OFFSET, +}; + +static const struct of_device_id stm32mp13_match_data[] = { + { + .compatible = "st,stm32mp13-rcc", + .data = &stm32mp13_data, + }, + { } +}; +MODULE_DEVICE_TABLE(of, stm32mp13_match_data); + +static int stm32mp1_rcc_init(struct device *dev) +{ + void __iomem *rcc_base; + int ret = -ENOMEM; + + rcc_base = of_iomap(dev_of_node(dev), 0); + if (!rcc_base) { + dev_err(dev, "%pOFn: unable to map resource", dev_of_node(dev)); + goto out; + } + + ret = stm32_rcc_init(dev, stm32mp13_match_data, rcc_base); +out: + if (ret) { + if (rcc_base) + iounmap(rcc_base); + + of_node_put(dev_of_node(dev)); + } + + return ret; +} + +static int get_clock_deps(struct device *dev) +{ + static const char * const clock_deps_name[] = { + "hsi", "hse", "csi", "lsi", "lse", + }; + size_t deps_size = sizeof(struct clk *) * ARRAY_SIZE(clock_deps_name); + struct clk **clk_deps; + int i; + + clk_deps = devm_kzalloc(dev, deps_size, GFP_KERNEL); + if (!clk_deps) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(clock_deps_name); i++) { + struct clk *clk = of_clk_get_by_name(dev_of_node(dev), + clock_deps_name[i]); + + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EINVAL && PTR_ERR(clk) != -ENOENT) + return PTR_ERR(clk); + } else { + /* Device gets a reference count on the clock */ + clk_deps[i] = devm_clk_get(dev, __clk_get_name(clk)); + clk_put(clk); + } + } + + return 0; +} + +static int stm32mp1_rcc_clocks_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret = get_clock_deps(dev); + + if (!ret) + ret = stm32mp1_rcc_init(dev); + + return ret; +} + +static int stm32mp1_rcc_clocks_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *child, *np = dev_of_node(dev); + + for_each_available_child_of_node(np, child) + of_clk_del_provider(child); + + return 0; +} + +static struct platform_driver stm32mp13_rcc_clocks_driver = { + .driver = { + .name = "stm32mp13_rcc", + .of_match_table = stm32mp13_match_data, + }, + .probe = stm32mp1_rcc_clocks_probe, + .remove = stm32mp1_rcc_clocks_remove, +}; + +static int __init stm32mp13_clocks_init(void) +{ + return platform_driver_register(&stm32mp13_rcc_clocks_driver); +} +core_initcall(stm32mp13_clocks_init); diff --git a/drivers/clk/stm32/reset-stm32.c b/drivers/clk/stm32/reset-stm32.c new file mode 100644 index 000000000000..040870130e4b --- /dev/null +++ b/drivers/clk/stm32/reset-stm32.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2022 - All Rights Reserved + * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics. + */ + +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "clk-stm32-core.h" + +#define STM32_RESET_ID_MASK GENMASK(15, 0) + +struct stm32_reset_data { + /* reset lock */ + spinlock_t lock; + struct reset_controller_dev rcdev; + void __iomem *membase; + u32 clear_offset; +}; + +static inline struct stm32_reset_data * +to_stm32_reset_data(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct stm32_reset_data, rcdev); +} + +static int stm32_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, bool assert) +{ + struct stm32_reset_data *data = to_stm32_reset_data(rcdev); + int reg_width = sizeof(u32); + int bank = id / (reg_width * BITS_PER_BYTE); + int offset = id % (reg_width * BITS_PER_BYTE); + + if (data->clear_offset) { + void __iomem *addr; + + addr = data->membase + (bank * reg_width); + if (!assert) + addr += data->clear_offset; + + writel(BIT(offset), addr); + + } else { + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&data->lock, flags); + + reg = readl(data->membase + (bank * reg_width)); + + if (assert) + reg |= BIT(offset); + else + reg &= ~BIT(offset); + + writel(reg, data->membase + (bank * reg_width)); + + spin_unlock_irqrestore(&data->lock, flags); + } + + return 0; +} + +static int stm32_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return stm32_reset_update(rcdev, id, true); +} + +static int stm32_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return stm32_reset_update(rcdev, id, false); +} + +static int stm32_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct stm32_reset_data *data = to_stm32_reset_data(rcdev); + int reg_width = sizeof(u32); + int bank = id / (reg_width * BITS_PER_BYTE); + int offset = id % (reg_width * BITS_PER_BYTE); + u32 reg; + + reg = readl(data->membase + (bank * reg_width)); + + return !!(reg & BIT(offset)); +} + +static const struct reset_control_ops stm32_reset_ops = { + .assert = stm32_reset_assert, + .deassert = stm32_reset_deassert, + .status = stm32_reset_status, +}; + +int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match, + void __iomem *base) +{ + const struct stm32_rcc_match_data *data = match->data; + struct stm32_reset_data *reset_data = NULL; + + data = match->data; + + reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL); + if (!reset_data) + return -ENOMEM; + + reset_data->membase = base; + reset_data->rcdev.owner = THIS_MODULE; + reset_data->rcdev.ops = &stm32_reset_ops; + reset_data->rcdev.of_node = dev_of_node(dev); + reset_data->rcdev.nr_resets = STM32_RESET_ID_MASK; + reset_data->clear_offset = data->clear_offset; + + return reset_controller_register(&reset_data->rcdev); +} diff --git a/drivers/clk/stm32/reset-stm32.h b/drivers/clk/stm32/reset-stm32.h new file mode 100644 index 000000000000..6eb6ea4b55ab --- /dev/null +++ b/drivers/clk/stm32/reset-stm32.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) STMicroelectronics 2022 - All Rights Reserved + * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics. + */ + +int stm32_rcc_reset_init(struct device *dev, const struct of_device_id *match, + void __iomem *base); diff --git a/drivers/clk/stm32/stm32mp13_rcc.h b/drivers/clk/stm32/stm32mp13_rcc.h new file mode 100644 index 000000000000..a82512ae08f2 --- /dev/null +++ b/drivers/clk/stm32/stm32mp13_rcc.h @@ -0,0 +1,1748 @@ +/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ +/* + * Copyright (C) 2020, STMicroelectronics - All Rights Reserved + * + * Configuration settings for the STM32MP13x CPU + */ + +#ifndef STM32MP13_RCC_H +#define STM32MP13_RCC_H +/* RCC registers */ +#define RCC_SECCFGR 0x0 +#define RCC_MP_SREQSETR 0x100 +#define RCC_MP_SREQCLRR 0x104 +#define RCC_MP_APRSTCR 0x108 +#define RCC_MP_APRSTSR 0x10c +#define RCC_PWRLPDLYCR 0x110 +#define RCC_MP_GRSTCSETR 0x114 +#define RCC_BR_RSTSCLRR 0x118 +#define RCC_MP_RSTSSETR 0x11c +#define RCC_MP_RSTSCLRR 0x120 +#define RCC_MP_IWDGFZSETR 0x124 +#define RCC_MP_IWDGFZCLRR 0x128 +#define RCC_MP_CIER 0x200 +#define RCC_MP_CIFR 0x204 +#define RCC_BDCR 0x400 +#define RCC_RDLSICR 0x404 +#define RCC_OCENSETR 0x420 +#define RCC_OCENCLRR 0x424 +#define RCC_OCRDYR 0x428 +#define RCC_HSICFGR 0x440 +#define RCC_CSICFGR 0x444 +#define RCC_MCO1CFGR 0x460 +#define RCC_MCO2CFGR 0x464 +#define RCC_DBGCFGR 0x468 +#define RCC_RCK12SELR 0x480 +#define RCC_RCK3SELR 0x484 +#define RCC_RCK4SELR 0x488 +#define RCC_PLL1CR 0x4a0 +#define RCC_PLL1CFGR1 0x4a4 +#define RCC_PLL1CFGR2 0x4a8 +#define RCC_PLL1FRACR 0x4ac +#define RCC_PLL1CSGR 0x4b0 +#define RCC_PLL2CR 0x4d0 +#define RCC_PLL2CFGR1 0x4d4 +#define RCC_PLL2CFGR2 0x4d8 +#define RCC_PLL2FRACR 0x4dc +#define RCC_PLL2CSGR 0x4e0 +#define RCC_PLL3CR 0x500 +#define RCC_PLL3CFGR1 0x504 +#define RCC_PLL3CFGR2 0x508 +#define RCC_PLL3FRACR 0x50c +#define RCC_PLL3CSGR 0x510 +#define RCC_PLL4CR 0x520 +#define RCC_PLL4CFGR1 0x524 +#define RCC_PLL4CFGR2 0x528 +#define RCC_PLL4FRACR 0x52c +#define RCC_PLL4CSGR 0x530 +#define RCC_MPCKSELR 0x540 +#define RCC_ASSCKSELR 0x544 +#define RCC_MSSCKSELR 0x548 +#define RCC_CPERCKSELR 0x54c +#define RCC_RTCDIVR 0x560 +#define RCC_MPCKDIVR 0x564 +#define RCC_AXIDIVR 0x568 +#define RCC_MLAHBDIVR 0x56c +#define RCC_APB1DIVR 0x570 +#define RCC_APB2DIVR 0x574 +#define RCC_APB3DIVR 0x578 +#define RCC_APB4DIVR 0x57c +#define RCC_APB5DIVR 0x580 +#define RCC_APB6DIVR 0x584 +#define RCC_TIMG1PRER 0x5a0 +#define RCC_TIMG2PRER 0x5a4 +#define RCC_TIMG3PRER 0x5a8 +#define RCC_DDRITFCR 0x5c0 +#define RCC_I2C12CKSELR 0x600 +#define RCC_I2C345CKSELR 0x604 +#define RCC_SPI2S1CKSELR 0x608 +#define RCC_SPI2S23CKSELR 0x60c +#define RCC_SPI45CKSELR 0x610 +#define RCC_UART12CKSELR 0x614 +#define RCC_UART35CKSELR 0x618 +#define RCC_UART4CKSELR 0x61c +#define RCC_UART6CKSELR 0x620 +#define RCC_UART78CKSELR 0x624 +#define RCC_LPTIM1CKSELR 0x628 +#define RCC_LPTIM23CKSELR 0x62c +#define RCC_LPTIM45CKSELR 0x630 +#define RCC_SAI1CKSELR 0x634 +#define RCC_SAI2CKSELR 0x638 +#define RCC_FDCANCKSELR 0x63c +#define RCC_SPDIFCKSELR 0x640 +#define RCC_ADC12CKSELR 0x644 +#define RCC_SDMMC12CKSELR 0x648 +#define RCC_ETH12CKSELR 0x64c +#define RCC_USBCKSELR 0x650 +#define RCC_QSPICKSELR 0x654 +#define RCC_FMCCKSELR 0x658 +#define RCC_RNG1CKSELR 0x65c +#define RCC_STGENCKSELR 0x660 +#define RCC_DCMIPPCKSELR 0x664 +#define RCC_SAESCKSELR 0x668 +#define RCC_APB1RSTSETR 0x6a0 +#define RCC_APB1RSTCLRR 0x6a4 +#define RCC_APB2RSTSETR 0x6a8 +#define RCC_APB2RSTCLRR 0x6ac +#define RCC_APB3RSTSETR 0x6b0 +#define RCC_APB3RSTCLRR 0x6b4 +#define RCC_APB4RSTSETR 0x6b8 +#define RCC_APB4RSTCLRR 0x6bc +#define RCC_APB5RSTSETR 0x6c0 +#define RCC_APB5RSTCLRR 0x6c4 +#define RCC_APB6RSTSETR 0x6c8 +#define RCC_APB6RSTCLRR 0x6cc +#define RCC_AHB2RSTSETR 0x6d0 +#define RCC_AHB2RSTCLRR 0x6d4 +#define RCC_AHB4RSTSETR 0x6e0 +#define RCC_AHB4RSTCLRR 0x6e4 +#define RCC_AHB5RSTSETR 0x6e8 +#define RCC_AHB5RSTCLRR 0x6ec +#define RCC_AHB6RSTSETR 0x6f0 +#define RCC_AHB6RSTCLRR 0x6f4 +#define RCC_MP_APB1ENSETR 0x700 +#define RCC_MP_APB1ENCLRR 0x704 +#define RCC_MP_APB2ENSETR 0x708 +#define RCC_MP_APB2ENCLRR 0x70c +#define RCC_MP_APB3ENSETR 0x710 +#define RCC_MP_APB3ENCLRR 0x714 +#define RCC_MP_S_APB3ENSETR 0x718 +#define RCC_MP_S_APB3ENCLRR 0x71c +#define RCC_MP_NS_APB3ENSETR 0x720 +#define RCC_MP_NS_APB3ENCLRR 0x724 +#define RCC_MP_APB4ENSETR 0x728 +#define RCC_MP_APB4ENCLRR 0x72c +#define RCC_MP_S_APB4ENSETR 0x730 +#define RCC_MP_S_APB4ENCLRR 0x734 +#define RCC_MP_NS_APB4ENSETR 0x738 +#define RCC_MP_NS_APB4ENCLRR 0x73c +#define RCC_MP_APB5ENSETR 0x740 +#define RCC_MP_APB5ENCLRR 0x744 +#define RCC_MP_APB6ENSETR 0x748 +#define RCC_MP_APB6ENCLRR 0x74c +#define RCC_MP_AHB2ENSETR 0x750 +#define RCC_MP_AHB2ENCLRR 0x754 +#define RCC_MP_AHB4ENSETR 0x760 +#define RCC_MP_AHB4ENCLRR 0x764 +#define RCC_MP_S_AHB4ENSETR 0x768 +#define RCC_MP_S_AHB4ENCLRR 0x76c +#define RCC_MP_NS_AHB4ENSETR 0x770 +#define RCC_MP_NS_AHB4ENCLRR 0x774 +#define RCC_MP_AHB5ENSETR 0x778 +#define RCC_MP_AHB5ENCLRR 0x77c +#define RCC_MP_AHB6ENSETR 0x780 +#define RCC_MP_AHB6ENCLRR 0x784 +#define RCC_MP_S_AHB6ENSETR 0x788 +#define RCC_MP_S_AHB6ENCLRR 0x78c +#define RCC_MP_NS_AHB6ENSETR 0x790 +#define RCC_MP_NS_AHB6ENCLRR 0x794 +#define RCC_MP_APB1LPENSETR 0x800 +#define RCC_MP_APB1LPENCLRR 0x804 +#define RCC_MP_APB2LPENSETR 0x808 +#define RCC_MP_APB2LPENCLRR 0x80c +#define RCC_MP_APB3LPENSETR 0x810 +#define RCC_MP_APB3LPENCLRR 0x814 +#define RCC_MP_S_APB3LPENSETR 0x818 +#define RCC_MP_S_APB3LPENCLRR 0x81c +#define RCC_MP_NS_APB3LPENSETR 0x820 +#define RCC_MP_NS_APB3LPENCLRR 0x824 +#define RCC_MP_APB4LPENSETR 0x828 +#define RCC_MP_APB4LPENCLRR 0x82c +#define RCC_MP_S_APB4LPENSETR 0x830 +#define RCC_MP_S_APB4LPENCLRR 0x834 +#define RCC_MP_NS_APB4LPENSETR 0x838 +#define RCC_MP_NS_APB4LPENCLRR 0x83c +#define RCC_MP_APB5LPENSETR 0x840 +#define RCC_MP_APB5LPENCLRR 0x844 +#define RCC_MP_APB6LPENSETR 0x848 +#define RCC_MP_APB6LPENCLRR 0x84c +#define RCC_MP_AHB2LPENSETR 0x850 +#define RCC_MP_AHB2LPENCLRR 0x854 +#define RCC_MP_AHB4LPENSETR 0x858 +#define RCC_MP_AHB4LPENCLRR 0x85c +#define RCC_MP_S_AHB4LPENSETR 0x868 +#define RCC_MP_S_AHB4LPENCLRR 0x86c +#define RCC_MP_NS_AHB4LPENSETR 0x870 +#define RCC_MP_NS_AHB4LPENCLRR 0x874 +#define RCC_MP_AHB5LPENSETR 0x878 +#define RCC_MP_AHB5LPENCLRR 0x87c +#define RCC_MP_AHB6LPENSETR 0x880 +#define RCC_MP_AHB6LPENCLRR 0x884 +#define RCC_MP_S_AHB6LPENSETR 0x888 +#define RCC_MP_S_AHB6LPENCLRR 0x88c +#define RCC_MP_NS_AHB6LPENSETR 0x890 +#define RCC_MP_NS_AHB6LPENCLRR 0x894 +#define RCC_MP_S_AXIMLPENSETR 0x898 +#define RCC_MP_S_AXIMLPENCLRR 0x89c +#define RCC_MP_NS_AXIMLPENSETR 0x8a0 +#define RCC_MP_NS_AXIMLPENCLRR 0x8a4 +#define RCC_MP_MLAHBLPENSETR 0x8a8 +#define RCC_MP_MLAHBLPENCLRR 0x8ac +#define RCC_APB3SECSR 0x8c0 +#define RCC_APB4SECSR 0x8c4 +#define RCC_APB5SECSR 0x8c8 +#define RCC_APB6SECSR 0x8cc +#define RCC_AHB2SECSR 0x8d0 +#define RCC_AHB4SECSR 0x8d4 +#define RCC_AHB5SECSR 0x8d8 +#define RCC_AHB6SECSR 0x8dc +#define RCC_VERR 0xff4 +#define RCC_IDR 0xff8 +#define RCC_SIDR 0xffc + +/* RCC_SECCFGR register fields */ +#define RCC_SECCFGR_HSISEC 0 +#define RCC_SECCFGR_CSISEC 1 +#define RCC_SECCFGR_HSESEC 2 +#define RCC_SECCFGR_LSISEC 3 +#define RCC_SECCFGR_LSESEC 4 +#define RCC_SECCFGR_PLL12SEC 8 +#define RCC_SECCFGR_PLL3SEC 9 +#define RCC_SECCFGR_PLL4SEC 10 +#define RCC_SECCFGR_MPUSEC 11 +#define RCC_SECCFGR_AXISEC 12 +#define RCC_SECCFGR_MLAHBSEC 13 +#define RCC_SECCFGR_APB3DIVSEC 16 +#define RCC_SECCFGR_APB4DIVSEC 17 +#define RCC_SECCFGR_APB5DIVSEC 18 +#define RCC_SECCFGR_APB6DIVSEC 19 +#define RCC_SECCFGR_TIMG3SEC 20 +#define RCC_SECCFGR_CPERSEC 21 +#define RCC_SECCFGR_MCO1SEC 22 +#define RCC_SECCFGR_MCO2SEC 23 +#define RCC_SECCFGR_STPSEC 24 +#define RCC_SECCFGR_RSTSEC 25 +#define RCC_SECCFGR_PWRSEC 31 + +/* RCC_MP_SREQSETR register fields */ +#define RCC_MP_SREQSETR_STPREQ_P0 BIT(0) + +/* RCC_MP_SREQCLRR register fields */ +#define RCC_MP_SREQCLRR_STPREQ_P0 BIT(0) + +/* RCC_MP_APRSTCR register fields */ +#define RCC_MP_APRSTCR_RDCTLEN BIT(0) +#define RCC_MP_APRSTCR_RSTTO_MASK GENMASK(14, 8) +#define RCC_MP_APRSTCR_RSTTO_SHIFT 8 + +/* RCC_MP_APRSTSR register fields */ +#define RCC_MP_APRSTSR_RSTTOV_MASK GENMASK(14, 8) +#define RCC_MP_APRSTSR_RSTTOV_SHIFT 8 + +/* RCC_PWRLPDLYCR register fields */ +#define RCC_PWRLPDLYCR_PWRLP_DLY_MASK GENMASK(21, 0) +#define RCC_PWRLPDLYCR_PWRLP_DLY_SHIFT 0 + +/* RCC_MP_GRSTCSETR register fields */ +#define RCC_MP_GRSTCSETR_MPSYSRST BIT(0) +#define RCC_MP_GRSTCSETR_MPUP0RST BIT(4) + +/* RCC_BR_RSTSCLRR register fields */ +#define RCC_BR_RSTSCLRR_PORRSTF BIT(0) +#define RCC_BR_RSTSCLRR_BORRSTF BIT(1) +#define RCC_BR_RSTSCLRR_PADRSTF BIT(2) +#define RCC_BR_RSTSCLRR_HCSSRSTF BIT(3) +#define RCC_BR_RSTSCLRR_VCORERSTF BIT(4) +#define RCC_BR_RSTSCLRR_VCPURSTF BIT(5) +#define RCC_BR_RSTSCLRR_MPSYSRSTF BIT(6) +#define RCC_BR_RSTSCLRR_IWDG1RSTF BIT(8) +#define RCC_BR_RSTSCLRR_IWDG2RSTF BIT(9) +#define RCC_BR_RSTSCLRR_MPUP0RSTF BIT(13) + +/* RCC_MP_RSTSSETR register fields */ +#define RCC_MP_RSTSSETR_PORRSTF BIT(0) +#define RCC_MP_RSTSSETR_BORRSTF BIT(1) +#define RCC_MP_RSTSSETR_PADRSTF BIT(2) +#define RCC_MP_RSTSSETR_HCSSRSTF BIT(3) +#define RCC_MP_RSTSSETR_VCORERSTF BIT(4) +#define RCC_MP_RSTSSETR_VCPURSTF BIT(5) +#define RCC_MP_RSTSSETR_MPSYSRSTF BIT(6) +#define RCC_MP_RSTSSETR_IWDG1RSTF BIT(8) +#define RCC_MP_RSTSSETR_IWDG2RSTF BIT(9) +#define RCC_MP_RSTSSETR_STP2RSTF BIT(10) +#define RCC_MP_RSTSSETR_STDBYRSTF BIT(11) +#define RCC_MP_RSTSSETR_CSTDBYRSTF BIT(12) +#define RCC_MP_RSTSSETR_MPUP0RSTF BIT(13) +#define RCC_MP_RSTSSETR_SPARE BIT(15) + +/* RCC_MP_RSTSCLRR register fields */ +#define RCC_MP_RSTSCLRR_PORRSTF BIT(0) +#define RCC_MP_RSTSCLRR_BORRSTF BIT(1) +#define RCC_MP_RSTSCLRR_PADRSTF BIT(2) +#define RCC_MP_RSTSCLRR_HCSSRSTF BIT(3) +#define RCC_MP_RSTSCLRR_VCORERSTF BIT(4) +#define RCC_MP_RSTSCLRR_VCPURSTF BIT(5) +#define RCC_MP_RSTSCLRR_MPSYSRSTF BIT(6) +#define RCC_MP_RSTSCLRR_IWDG1RSTF BIT(8) +#define RCC_MP_RSTSCLRR_IWDG2RSTF BIT(9) +#define RCC_MP_RSTSCLRR_STP2RSTF BIT(10) +#define RCC_MP_RSTSCLRR_STDBYRSTF BIT(11) +#define RCC_MP_RSTSCLRR_CSTDBYRSTF BIT(12) +#define RCC_MP_RSTSCLRR_MPUP0RSTF BIT(13) +#define RCC_MP_RSTSCLRR_SPARE BIT(15) + +/* RCC_MP_IWDGFZSETR register fields */ +#define RCC_MP_IWDGFZSETR_FZ_IWDG1 BIT(0) +#define RCC_MP_IWDGFZSETR_FZ_IWDG2 BIT(1) + +/* RCC_MP_IWDGFZCLRR register fields */ +#define RCC_MP_IWDGFZCLRR_FZ_IWDG1 BIT(0) +#define RCC_MP_IWDGFZCLRR_FZ_IWDG2 BIT(1) + +/* RCC_MP_CIER register fields */ +#define RCC_MP_CIER_LSIRDYIE BIT(0) +#define RCC_MP_CIER_LSERDYIE BIT(1) +#define RCC_MP_CIER_HSIRDYIE BIT(2) +#define RCC_MP_CIER_HSERDYIE BIT(3) +#define RCC_MP_CIER_CSIRDYIE BIT(4) +#define RCC_MP_CIER_PLL1DYIE BIT(8) +#define RCC_MP_CIER_PLL2DYIE BIT(9) +#define RCC_MP_CIER_PLL3DYIE BIT(10) +#define RCC_MP_CIER_PLL4DYIE BIT(11) +#define RCC_MP_CIER_LSECSSIE BIT(16) +#define RCC_MP_CIER_WKUPIE BIT(20) + +/* RCC_MP_CIFR register fields */ +#define RCC_MP_CIFR_LSIRDYF BIT(0) +#define RCC_MP_CIFR_LSERDYF BIT(1) +#define RCC_MP_CIFR_HSIRDYF BIT(2) +#define RCC_MP_CIFR_HSERDYF BIT(3) +#define RCC_MP_CIFR_CSIRDYF BIT(4) +#define RCC_MP_CIFR_PLL1DYF BIT(8) +#define RCC_MP_CIFR_PLL2DYF BIT(9) +#define RCC_MP_CIFR_PLL3DYF BIT(10) +#define RCC_MP_CIFR_PLL4DYF BIT(11) +#define RCC_MP_CIFR_LSECSSF BIT(16) +#define RCC_MP_CIFR_WKUPF BIT(20) + +/* RCC_BDCR register fields */ +#define RCC_BDCR_LSEON BIT(0) +#define RCC_BDCR_LSEBYP BIT(1) +#define RCC_BDCR_LSERDY BIT(2) +#define RCC_BDCR_DIGBYP BIT(3) +#define RCC_BDCR_LSEDRV_MASK GENMASK(5, 4) +#define RCC_BDCR_LSECSSON BIT(8) +#define RCC_BDCR_LSECSSD BIT(9) +#define RCC_BDCR_RTCSRC_MASK GENMASK(17, 16) +#define RCC_BDCR_RTCCKEN BIT(20) +#define RCC_BDCR_VSWRST BIT(31) +#define RCC_BDCR_LSEDRV_SHIFT 4 +#define RCC_BDCR_RTCSRC_SHIFT 16 + +/* RCC_RDLSICR register fields */ +#define RCC_RDLSICR_LSION BIT(0) +#define RCC_RDLSICR_LSIRDY BIT(1) +#define RCC_RDLSICR_MRD_MASK GENMASK(20, 16) +#define RCC_RDLSICR_EADLY_MASK GENMASK(26, 24) +#define RCC_RDLSICR_SPARE_MASK GENMASK(31, 27) +#define RCC_RDLSICR_MRD_SHIFT 16 +#define RCC_RDLSICR_EADLY_SHIFT 24 +#define RCC_RDLSICR_SPARE_SHIFT 27 + +/* RCC_OCENSETR register fields */ +#define RCC_OCENSETR_HSION BIT(0) +#define RCC_OCENSETR_HSIKERON BIT(1) +#define RCC_OCENSETR_CSION BIT(4) +#define RCC_OCENSETR_CSIKERON BIT(5) +#define RCC_OCENSETR_DIGBYP BIT(7) +#define RCC_OCENSETR_HSEON BIT(8) +#define RCC_OCENSETR_HSEKERON BIT(9) +#define RCC_OCENSETR_HSEBYP BIT(10) +#define RCC_OCENSETR_HSECSSON BIT(11) + +/* RCC_OCENCLRR register fields */ +#define RCC_OCENCLRR_HSION BIT(0) +#define RCC_OCENCLRR_HSIKERON BIT(1) +#define RCC_OCENCLRR_CSION BIT(4) +#define RCC_OCENCLRR_CSIKERON BIT(5) +#define RCC_OCENCLRR_DIGBYP BIT(7) +#define RCC_OCENCLRR_HSEON BIT(8) +#define RCC_OCENCLRR_HSEKERON BIT(9) +#define RCC_OCENCLRR_HSEBYP BIT(10) + +/* RCC_OCRDYR register fields */ +#define RCC_OCRDYR_HSIRDY BIT(0) +#define RCC_OCRDYR_HSIDIVRDY BIT(2) +#define RCC_OCRDYR_CSIRDY BIT(4) +#define RCC_OCRDYR_HSERDY BIT(8) +#define RCC_OCRDYR_MPUCKRDY BIT(23) +#define RCC_OCRDYR_AXICKRDY BIT(24) + +/* RCC_HSICFGR register fields */ +#define RCC_HSICFGR_HSIDIV_MASK GENMASK(1, 0) +#define RCC_HSICFGR_HSITRIM_MASK GENMASK(14, 8) +#define RCC_HSICFGR_HSICAL_MASK GENMASK(27, 16) +#define RCC_HSICFGR_HSIDIV_SHIFT 0 +#define RCC_HSICFGR_HSITRIM_SHIFT 8 +#define RCC_HSICFGR_HSICAL_SHIFT 16 + +/* RCC_CSICFGR register fields */ +#define RCC_CSICFGR_CSITRIM_MASK GENMASK(12, 8) +#define RCC_CSICFGR_CSICAL_MASK GENMASK(23, 16) +#define RCC_CSICFGR_CSITRIM_SHIFT 8 +#define RCC_CSICFGR_CSICAL_SHIFT 16 + +/* RCC_MCO1CFGR register fields */ +#define RCC_MCO1CFGR_MCO1SEL_MASK GENMASK(2, 0) +#define RCC_MCO1CFGR_MCO1DIV_MASK GENMASK(7, 4) +#define RCC_MCO1CFGR_MCO1ON BIT(12) +#define RCC_MCO1CFGR_MCO1SEL_SHIFT 0 +#define RCC_MCO1CFGR_MCO1DIV_SHIFT 4 + +/* RCC_MCO2CFGR register fields */ +#define RCC_MCO2CFGR_MCO2SEL_MASK GENMASK(2, 0) +#define RCC_MCO2CFGR_MCO2DIV_MASK GENMASK(7, 4) +#define RCC_MCO2CFGR_MCO2ON BIT(12) +#define RCC_MCO2CFGR_MCO2SEL_SHIFT 0 +#define RCC_MCO2CFGR_MCO2DIV_SHIFT 4 + +/* RCC_DBGCFGR register fields */ +#define RCC_DBGCFGR_TRACEDIV_MASK GENMASK(2, 0) +#define RCC_DBGCFGR_DBGCKEN BIT(8) +#define RCC_DBGCFGR_TRACECKEN BIT(9) +#define RCC_DBGCFGR_DBGRST BIT(12) +#define RCC_DBGCFGR_TRACEDIV_SHIFT 0 + +/* RCC_RCK12SELR register fields */ +#define RCC_RCK12SELR_PLL12SRC_MASK GENMASK(1, 0) +#define RCC_RCK12SELR_PLL12SRCRDY BIT(31) +#define RCC_RCK12SELR_PLL12SRC_SHIFT 0 + +/* RCC_RCK3SELR register fields */ +#define RCC_RCK3SELR_PLL3SRC_MASK GENMASK(1, 0) +#define RCC_RCK3SELR_PLL3SRCRDY BIT(31) +#define RCC_RCK3SELR_PLL3SRC_SHIFT 0 + +/* RCC_RCK4SELR register fields */ +#define RCC_RCK4SELR_PLL4SRC_MASK GENMASK(1, 0) +#define RCC_RCK4SELR_PLL4SRCRDY BIT(31) +#define RCC_RCK4SELR_PLL4SRC_SHIFT 0 + +/* RCC_PLL1CR register fields */ +#define RCC_PLL1CR_PLLON BIT(0) +#define RCC_PLL1CR_PLL1RDY BIT(1) +#define RCC_PLL1CR_SSCG_CTRL BIT(2) +#define RCC_PLL1CR_DIVPEN BIT(4) +#define RCC_PLL1CR_DIVQEN BIT(5) +#define RCC_PLL1CR_DIVREN BIT(6) + +/* RCC_PLL1CFGR1 register fields */ +#define RCC_PLL1CFGR1_DIVN_MASK GENMASK(8, 0) +#define RCC_PLL1CFGR1_DIVM1_MASK GENMASK(21, 16) +#define RCC_PLL1CFGR1_DIVN_SHIFT 0 +#define RCC_PLL1CFGR1_DIVM1_SHIFT 16 + +/* RCC_PLL1CFGR2 register fields */ +#define RCC_PLL1CFGR2_DIVP_MASK GENMASK(6, 0) +#define RCC_PLL1CFGR2_DIVQ_MASK GENMASK(14, 8) +#define RCC_PLL1CFGR2_DIVR_MASK GENMASK(22, 16) +#define RCC_PLL1CFGR2_DIVP_SHIFT 0 +#define RCC_PLL1CFGR2_DIVQ_SHIFT 8 +#define RCC_PLL1CFGR2_DIVR_SHIFT 16 + +/* RCC_PLL1FRACR register fields */ +#define RCC_PLL1FRACR_FRACV_MASK GENMASK(15, 3) +#define RCC_PLL1FRACR_FRACLE BIT(16) +#define RCC_PLL1FRACR_FRACV_SHIFT 3 + +/* RCC_PLL1CSGR register fields */ +#define RCC_PLL1CSGR_MOD_PER_MASK GENMASK(12, 0) +#define RCC_PLL1CSGR_TPDFN_DIS BIT(13) +#define RCC_PLL1CSGR_RPDFN_DIS BIT(14) +#define RCC_PLL1CSGR_SSCG_MODE BIT(15) +#define RCC_PLL1CSGR_INC_STEP_MASK GENMASK(30, 16) +#define RCC_PLL1CSGR_MOD_PER_SHIFT 0 +#define RCC_PLL1CSGR_INC_STEP_SHIFT 16 + +/* RCC_PLL2CR register fields */ +#define RCC_PLL2CR_PLLON BIT(0) +#define RCC_PLL2CR_PLL2RDY BIT(1) +#define RCC_PLL2CR_SSCG_CTRL BIT(2) +#define RCC_PLL2CR_DIVPEN BIT(4) +#define RCC_PLL2CR_DIVQEN BIT(5) +#define RCC_PLL2CR_DIVREN BIT(6) + +/* RCC_PLL2CFGR1 register fields */ +#define RCC_PLL2CFGR1_DIVN_MASK GENMASK(8, 0) +#define RCC_PLL2CFGR1_DIVM2_MASK GENMASK(21, 16) +#define RCC_PLL2CFGR1_DIVN_SHIFT 0 +#define RCC_PLL2CFGR1_DIVM2_SHIFT 16 + +/* RCC_PLL2CFGR2 register fields */ +#define RCC_PLL2CFGR2_DIVP_MASK GENMASK(6, 0) +#define RCC_PLL2CFGR2_DIVQ_MASK GENMASK(14, 8) +#define RCC_PLL2CFGR2_DIVR_MASK GENMASK(22, 16) +#define RCC_PLL2CFGR2_DIVP_SHIFT 0 +#define RCC_PLL2CFGR2_DIVQ_SHIFT 8 +#define RCC_PLL2CFGR2_DIVR_SHIFT 16 + +/* RCC_PLL2FRACR register fields */ +#define RCC_PLL2FRACR_FRACV_MASK GENMASK(15, 3) +#define RCC_PLL2FRACR_FRACLE BIT(16) +#define RCC_PLL2FRACR_FRACV_SHIFT 3 + +/* RCC_PLL2CSGR register fields */ +#define RCC_PLL2CSGR_MOD_PER_MASK GENMASK(12, 0) +#define RCC_PLL2CSGR_TPDFN_DIS BIT(13) +#define RCC_PLL2CSGR_RPDFN_DIS BIT(14) +#define RCC_PLL2CSGR_SSCG_MODE BIT(15) +#define RCC_PLL2CSGR_INC_STEP_MASK GENMASK(30, 16) +#define RCC_PLL2CSGR_MOD_PER_SHIFT 0 +#define RCC_PLL2CSGR_INC_STEP_SHIFT 16 + +/* RCC_PLL3CR register fields */ +#define RCC_PLL3CR_PLLON BIT(0) +#define RCC_PLL3CR_PLL3RDY BIT(1) +#define RCC_PLL3CR_SSCG_CTRL BIT(2) +#define RCC_PLL3CR_DIVPEN BIT(4) +#define RCC_PLL3CR_DIVQEN BIT(5) +#define RCC_PLL3CR_DIVREN BIT(6) + +/* RCC_PLL3CFGR1 register fields */ +#define RCC_PLL3CFGR1_DIVN_MASK GENMASK(8, 0) +#define RCC_PLL3CFGR1_DIVM3_MASK GENMASK(21, 16) +#define RCC_PLL3CFGR1_IFRGE_MASK GENMASK(25, 24) +#define RCC_PLL3CFGR1_DIVN_SHIFT 0 +#define RCC_PLL3CFGR1_DIVM3_SHIFT 16 +#define RCC_PLL3CFGR1_IFRGE_SHIFT 24 + +/* RCC_PLL3CFGR2 register fields */ +#define RCC_PLL3CFGR2_DIVP_MASK GENMASK(6, 0) +#define RCC_PLL3CFGR2_DIVQ_MASK GENMASK(14, 8) +#define RCC_PLL3CFGR2_DIVR_MASK GENMASK(22, 16) +#define RCC_PLL3CFGR2_DIVP_SHIFT 0 +#define RCC_PLL3CFGR2_DIVQ_SHIFT 8 +#define RCC_PLL3CFGR2_DIVR_SHIFT 16 + +/* RCC_PLL3FRACR register fields */ +#define RCC_PLL3FRACR_FRACV_MASK GENMASK(15, 3) +#define RCC_PLL3FRACR_FRACLE BIT(16) +#define RCC_PLL3FRACR_FRACV_SHIFT 3 + +/* RCC_PLL3CSGR register fields */ +#define RCC_PLL3CSGR_MOD_PER_MASK GENMASK(12, 0) +#define RCC_PLL3CSGR_TPDFN_DIS BIT(13) +#define RCC_PLL3CSGR_RPDFN_DIS BIT(14) +#define RCC_PLL3CSGR_SSCG_MODE BIT(15) +#define RCC_PLL3CSGR_INC_STEP_MASK GENMASK(30, 16) +#define RCC_PLL3CSGR_MOD_PER_SHIFT 0 +#define RCC_PLL3CSGR_INC_STEP_SHIFT 16 + +/* RCC_PLL4CR register fields */ +#define RCC_PLL4CR_PLLON BIT(0) +#define RCC_PLL4CR_PLL4RDY BIT(1) +#define RCC_PLL4CR_SSCG_CTRL BIT(2) +#define RCC_PLL4CR_DIVPEN BIT(4) +#define RCC_PLL4CR_DIVQEN BIT(5) +#define RCC_PLL4CR_DIVREN BIT(6) + +/* RCC_PLL4CFGR1 register fields */ +#define RCC_PLL4CFGR1_DIVN_MASK GENMASK(8, 0) +#define RCC_PLL4CFGR1_DIVM4_MASK GENMASK(21, 16) +#define RCC_PLL4CFGR1_IFRGE_MASK GENMASK(25, 24) +#define RCC_PLL4CFGR1_DIVN_SHIFT 0 +#define RCC_PLL4CFGR1_DIVM4_SHIFT 16 +#define RCC_PLL4CFGR1_IFRGE_SHIFT 24 + +/* RCC_PLL4CFGR2 register fields */ +#define RCC_PLL4CFGR2_DIVP_MASK GENMASK(6, 0) +#define RCC_PLL4CFGR2_DIVQ_MASK GENMASK(14, 8) +#define RCC_PLL4CFGR2_DIVR_MASK GENMASK(22, 16) +#define RCC_PLL4CFGR2_DIVP_SHIFT 0 +#define RCC_PLL4CFGR2_DIVQ_SHIFT 8 +#define RCC_PLL4CFGR2_DIVR_SHIFT 16 + +/* RCC_PLL4FRACR register fields */ +#define RCC_PLL4FRACR_FRACV_MASK GENMASK(15, 3) +#define RCC_PLL4FRACR_FRACLE BIT(16) +#define RCC_PLL4FRACR_FRACV_SHIFT 3 + +/* RCC_PLL4CSGR register fields */ +#define RCC_PLL4CSGR_MOD_PER_MASK GENMASK(12, 0) +#define RCC_PLL4CSGR_TPDFN_DIS BIT(13) +#define RCC_PLL4CSGR_RPDFN_DIS BIT(14) +#define RCC_PLL4CSGR_SSCG_MODE BIT(15) +#define RCC_PLL4CSGR_INC_STEP_MASK GENMASK(30, 16) +#define RCC_PLL4CSGR_MOD_PER_SHIFT 0 +#define RCC_PLL4CSGR_INC_STEP_SHIFT 16 + +/* RCC_MPCKSELR register fields */ +#define RCC_MPCKSELR_MPUSRC_MASK GENMASK(1, 0) +#define RCC_MPCKSELR_MPUSRCRDY BIT(31) +#define RCC_MPCKSELR_MPUSRC_SHIFT 0 + +/* RCC_ASSCKSELR register fields */ +#define RCC_ASSCKSELR_AXISSRC_MASK GENMASK(2, 0) +#define RCC_ASSCKSELR_AXISSRCRDY BIT(31) +#define RCC_ASSCKSELR_AXISSRC_SHIFT 0 + +/* RCC_MSSCKSELR register fields */ +#define RCC_MSSCKSELR_MLAHBSSRC_MASK GENMASK(1, 0) +#define RCC_MSSCKSELR_MLAHBSSRCRDY BIT(31) +#define RCC_MSSCKSELR_MLAHBSSRC_SHIFT 0 + +/* RCC_CPERCKSELR register fields */ +#define RCC_CPERCKSELR_CKPERSRC_MASK GENMASK(1, 0) +#define RCC_CPERCKSELR_CKPERSRC_SHIFT 0 + +/* RCC_RTCDIVR register fields */ +#define RCC_RTCDIVR_RTCDIV_MASK GENMASK(5, 0) +#define RCC_RTCDIVR_RTCDIV_SHIFT 0 + +/* RCC_MPCKDIVR register fields */ +#define RCC_MPCKDIVR_MPUDIV_MASK GENMASK(3, 0) +#define RCC_MPCKDIVR_MPUDIVRDY BIT(31) +#define RCC_MPCKDIVR_MPUDIV_SHIFT 0 + +/* RCC_AXIDIVR register fields */ +#define RCC_AXIDIVR_AXIDIV_MASK GENMASK(2, 0) +#define RCC_AXIDIVR_AXIDIVRDY BIT(31) +#define RCC_AXIDIVR_AXIDIV_SHIFT 0 + +/* RCC_MLAHBDIVR register fields */ +#define RCC_MLAHBDIVR_MLAHBDIV_MASK GENMASK(3, 0) +#define RCC_MLAHBDIVR_MLAHBDIVRDY BIT(31) +#define RCC_MLAHBDIVR_MLAHBDIV_SHIFT 0 + +/* RCC_APB1DIVR register fields */ +#define RCC_APB1DIVR_APB1DIV_MASK GENMASK(2, 0) +#define RCC_APB1DIVR_APB1DIVRDY BIT(31) +#define RCC_APB1DIVR_APB1DIV_SHIFT 0 + +/* RCC_APB2DIVR register fields */ +#define RCC_APB2DIVR_APB2DIV_MASK GENMASK(2, 0) +#define RCC_APB2DIVR_APB2DIVRDY BIT(31) +#define RCC_APB2DIVR_APB2DIV_SHIFT 0 + +/* RCC_APB3DIVR register fields */ +#define RCC_APB3DIVR_APB3DIV_MASK GENMASK(2, 0) +#define RCC_APB3DIVR_APB3DIVRDY BIT(31) +#define RCC_APB3DIVR_APB3DIV_SHIFT 0 + +/* RCC_APB4DIVR register fields */ +#define RCC_APB4DIVR_APB4DIV_MASK GENMASK(2, 0) +#define RCC_APB4DIVR_APB4DIVRDY BIT(31) +#define RCC_APB4DIVR_APB4DIV_SHIFT 0 + +/* RCC_APB5DIVR register fields */ +#define RCC_APB5DIVR_APB5DIV_MASK GENMASK(2, 0) +#define RCC_APB5DIVR_APB5DIVRDY BIT(31) +#define RCC_APB5DIVR_APB5DIV_SHIFT 0 + +/* RCC_APB6DIVR register fields */ +#define RCC_APB6DIVR_APB6DIV_MASK GENMASK(2, 0) +#define RCC_APB6DIVR_APB6DIVRDY BIT(31) +#define RCC_APB6DIVR_APB6DIV_SHIFT 0 + +/* RCC_TIMG1PRER register fields */ +#define RCC_TIMG1PRER_TIMG1PRE BIT(0) +#define RCC_TIMG1PRER_TIMG1PRERDY BIT(31) + +/* RCC_TIMG2PRER register fields */ +#define RCC_TIMG2PRER_TIMG2PRE BIT(0) +#define RCC_TIMG2PRER_TIMG2PRERDY BIT(31) + +/* RCC_TIMG3PRER register fields */ +#define RCC_TIMG3PRER_TIMG3PRE BIT(0) +#define RCC_TIMG3PRER_TIMG3PRERDY BIT(31) + +/* RCC_DDRITFCR register fields */ +#define RCC_DDRITFCR_DDRC1EN BIT(0) +#define RCC_DDRITFCR_DDRC1LPEN BIT(1) +#define RCC_DDRITFCR_DDRPHYCEN BIT(4) +#define RCC_DDRITFCR_DDRPHYCLPEN BIT(5) +#define RCC_DDRITFCR_DDRCAPBEN BIT(6) +#define RCC_DDRITFCR_DDRCAPBLPEN BIT(7) +#define RCC_DDRITFCR_AXIDCGEN BIT(8) +#define RCC_DDRITFCR_DDRPHYCAPBEN BIT(9) +#define RCC_DDRITFCR_DDRPHYCAPBLPEN BIT(10) +#define RCC_DDRITFCR_KERDCG_DLY_MASK GENMASK(13, 11) +#define RCC_DDRITFCR_DDRCAPBRST BIT(14) +#define RCC_DDRITFCR_DDRCAXIRST BIT(15) +#define RCC_DDRITFCR_DDRCORERST BIT(16) +#define RCC_DDRITFCR_DPHYAPBRST BIT(17) +#define RCC_DDRITFCR_DPHYRST BIT(18) +#define RCC_DDRITFCR_DPHYCTLRST BIT(19) +#define RCC_DDRITFCR_DDRCKMOD_MASK GENMASK(22, 20) +#define RCC_DDRITFCR_GSKPMOD BIT(23) +#define RCC_DDRITFCR_GSKPCTRL BIT(24) +#define RCC_DDRITFCR_DFILP_WIDTH_MASK GENMASK(27, 25) +#define RCC_DDRITFCR_GSKP_DUR_MASK GENMASK(31, 28) +#define RCC_DDRITFCR_KERDCG_DLY_SHIFT 11 +#define RCC_DDRITFCR_DDRCKMOD_SHIFT 20 +#define RCC_DDRITFCR_DFILP_WIDTH_SHIFT 25 +#define RCC_DDRITFCR_GSKP_DUR_SHIFT 28 + +/* RCC_I2C12CKSELR register fields */ +#define RCC_I2C12CKSELR_I2C12SRC_MASK GENMASK(2, 0) +#define RCC_I2C12CKSELR_I2C12SRC_SHIFT 0 + +/* RCC_I2C345CKSELR register fields */ +#define RCC_I2C345CKSELR_I2C3SRC_MASK GENMASK(2, 0) +#define RCC_I2C345CKSELR_I2C4SRC_MASK GENMASK(5, 3) +#define RCC_I2C345CKSELR_I2C5SRC_MASK GENMASK(8, 6) +#define RCC_I2C345CKSELR_I2C3SRC_SHIFT 0 +#define RCC_I2C345CKSELR_I2C4SRC_SHIFT 3 +#define RCC_I2C345CKSELR_I2C5SRC_SHIFT 6 + +/* RCC_SPI2S1CKSELR register fields */ +#define RCC_SPI2S1CKSELR_SPI1SRC_MASK GENMASK(2, 0) +#define RCC_SPI2S1CKSELR_SPI1SRC_SHIFT 0 + +/* RCC_SPI2S23CKSELR register fields */ +#define RCC_SPI2S23CKSELR_SPI23SRC_MASK GENMASK(2, 0) +#define RCC_SPI2S23CKSELR_SPI23SRC_SHIFT 0 + +/* RCC_SPI45CKSELR register fields */ +#define RCC_SPI45CKSELR_SPI4SRC_MASK GENMASK(2, 0) +#define RCC_SPI45CKSELR_SPI5SRC_MASK GENMASK(5, 3) +#define RCC_SPI45CKSELR_SPI4SRC_SHIFT 0 +#define RCC_SPI45CKSELR_SPI5SRC_SHIFT 3 + +/* RCC_UART12CKSELR register fields */ +#define RCC_UART12CKSELR_UART1SRC_MASK GENMASK(2, 0) +#define RCC_UART12CKSELR_UART2SRC_MASK GENMASK(5, 3) +#define RCC_UART12CKSELR_UART1SRC_SHIFT 0 +#define RCC_UART12CKSELR_UART2SRC_SHIFT 3 + +/* RCC_UART35CKSELR register fields */ +#define RCC_UART35CKSELR_UART35SRC_MASK GENMASK(2, 0) +#define RCC_UART35CKSELR_UART35SRC_SHIFT 0 + +/* RCC_UART4CKSELR register fields */ +#define RCC_UART4CKSELR_UART4SRC_MASK GENMASK(2, 0) +#define RCC_UART4CKSELR_UART4SRC_SHIFT 0 + +/* RCC_UART6CKSELR register fields */ +#define RCC_UART6CKSELR_UART6SRC_MASK GENMASK(2, 0) +#define RCC_UART6CKSELR_UART6SRC_SHIFT 0 + +/* RCC_UART78CKSELR register fields */ +#define RCC_UART78CKSELR_UART78SRC_MASK GENMASK(2, 0) +#define RCC_UART78CKSELR_UART78SRC_SHIFT 0 + +/* RCC_LPTIM1CKSELR register fields */ +#define RCC_LPTIM1CKSELR_LPTIM1SRC_MASK GENMASK(2, 0) +#define RCC_LPTIM1CKSELR_LPTIM1SRC_SHIFT 0 + +/* RCC_LPTIM23CKSELR register fields */ +#define RCC_LPTIM23CKSELR_LPTIM2SRC_MASK GENMASK(2, 0) +#define RCC_LPTIM23CKSELR_LPTIM3SRC_MASK GENMASK(5, 3) +#define RCC_LPTIM23CKSELR_LPTIM2SRC_SHIFT 0 +#define RCC_LPTIM23CKSELR_LPTIM3SRC_SHIFT 3 + +/* RCC_LPTIM45CKSELR register fields */ +#define RCC_LPTIM45CKSELR_LPTIM45SRC_MASK GENMASK(2, 0) +#define RCC_LPTIM45CKSELR_LPTIM45SRC_SHIFT 0 + +/* RCC_SAI1CKSELR register fields */ +#define RCC_SAI1CKSELR_SAI1SRC_MASK GENMASK(2, 0) +#define RCC_SAI1CKSELR_SAI1SRC_SHIFT 0 + +/* RCC_SAI2CKSELR register fields */ +#define RCC_SAI2CKSELR_SAI2SRC_MASK GENMASK(2, 0) +#define RCC_SAI2CKSELR_SAI2SRC_SHIFT 0 + +/* RCC_FDCANCKSELR register fields */ +#define RCC_FDCANCKSELR_FDCANSRC_MASK GENMASK(1, 0) +#define RCC_FDCANCKSELR_FDCANSRC_SHIFT 0 + +/* RCC_SPDIFCKSELR register fields */ +#define RCC_SPDIFCKSELR_SPDIFSRC_MASK GENMASK(1, 0) +#define RCC_SPDIFCKSELR_SPDIFSRC_SHIFT 0 + +/* RCC_ADC12CKSELR register fields */ +#define RCC_ADC12CKSELR_ADC1SRC_MASK GENMASK(1, 0) +#define RCC_ADC12CKSELR_ADC2SRC_MASK GENMASK(3, 2) +#define RCC_ADC12CKSELR_ADC1SRC_SHIFT 0 +#define RCC_ADC12CKSELR_ADC2SRC_SHIFT 2 + +/* RCC_SDMMC12CKSELR register fields */ +#define RCC_SDMMC12CKSELR_SDMMC1SRC_MASK GENMASK(2, 0) +#define RCC_SDMMC12CKSELR_SDMMC2SRC_MASK GENMASK(5, 3) +#define RCC_SDMMC12CKSELR_SDMMC1SRC_SHIFT 0 +#define RCC_SDMMC12CKSELR_SDMMC2SRC_SHIFT 3 + +/* RCC_ETH12CKSELR register fields */ +#define RCC_ETH12CKSELR_ETH1SRC_MASK GENMASK(1, 0) +#define RCC_ETH12CKSELR_ETH1PTPDIV_MASK GENMASK(7, 4) +#define RCC_ETH12CKSELR_ETH2SRC_MASK GENMASK(9, 8) +#define RCC_ETH12CKSELR_ETH2PTPDIV_MASK GENMASK(15, 12) +#define RCC_ETH12CKSELR_ETH1SRC_SHIFT 0 +#define RCC_ETH12CKSELR_ETH1PTPDIV_SHIFT 4 +#define RCC_ETH12CKSELR_ETH2SRC_SHIFT 8 +#define RCC_ETH12CKSELR_ETH2PTPDIV_SHIFT 12 + +/* RCC_USBCKSELR register fields */ +#define RCC_USBCKSELR_USBPHYSRC_MASK GENMASK(1, 0) +#define RCC_USBCKSELR_USBOSRC BIT(4) +#define RCC_USBCKSELR_USBPHYSRC_SHIFT 0 + +/* RCC_QSPICKSELR register fields */ +#define RCC_QSPICKSELR_QSPISRC_MASK GENMASK(1, 0) +#define RCC_QSPICKSELR_QSPISRC_SHIFT 0 + +/* RCC_FMCCKSELR register fields */ +#define RCC_FMCCKSELR_FMCSRC_MASK GENMASK(1, 0) +#define RCC_FMCCKSELR_FMCSRC_SHIFT 0 + +/* RCC_RNG1CKSELR register fields */ +#define RCC_RNG1CKSELR_RNG1SRC_MASK GENMASK(1, 0) +#define RCC_RNG1CKSELR_RNG1SRC_SHIFT 0 + +/* RCC_STGENCKSELR register fields */ +#define RCC_STGENCKSELR_STGENSRC_MASK GENMASK(1, 0) +#define RCC_STGENCKSELR_STGENSRC_SHIFT 0 + +/* RCC_DCMIPPCKSELR register fields */ +#define RCC_DCMIPPCKSELR_DCMIPPSRC_MASK GENMASK(1, 0) +#define RCC_DCMIPPCKSELR_DCMIPPSRC_SHIFT 0 + +/* RCC_SAESCKSELR register fields */ +#define RCC_SAESCKSELR_SAESSRC_MASK GENMASK(1, 0) +#define RCC_SAESCKSELR_SAESSRC_SHIFT 0 + +/* RCC_APB1RSTSETR register fields */ +#define RCC_APB1RSTSETR_TIM2RST BIT(0) +#define RCC_APB1RSTSETR_TIM3RST BIT(1) +#define RCC_APB1RSTSETR_TIM4RST BIT(2) +#define RCC_APB1RSTSETR_TIM5RST BIT(3) +#define RCC_APB1RSTSETR_TIM6RST BIT(4) +#define RCC_APB1RSTSETR_TIM7RST BIT(5) +#define RCC_APB1RSTSETR_LPTIM1RST BIT(9) +#define RCC_APB1RSTSETR_SPI2RST BIT(11) +#define RCC_APB1RSTSETR_SPI3RST BIT(12) +#define RCC_APB1RSTSETR_USART3RST BIT(15) +#define RCC_APB1RSTSETR_UART4RST BIT(16) +#define RCC_APB1RSTSETR_UART5RST BIT(17) +#define RCC_APB1RSTSETR_UART7RST BIT(18) +#define RCC_APB1RSTSETR_UART8RST BIT(19) +#define RCC_APB1RSTSETR_I2C1RST BIT(21) +#define RCC_APB1RSTSETR_I2C2RST BIT(22) +#define RCC_APB1RSTSETR_SPDIFRST BIT(26) + +/* RCC_APB1RSTCLRR register fields */ +#define RCC_APB1RSTCLRR_TIM2RST BIT(0) +#define RCC_APB1RSTCLRR_TIM3RST BIT(1) +#define RCC_APB1RSTCLRR_TIM4RST BIT(2) +#define RCC_APB1RSTCLRR_TIM5RST BIT(3) +#define RCC_APB1RSTCLRR_TIM6RST BIT(4) +#define RCC_APB1RSTCLRR_TIM7RST BIT(5) +#define RCC_APB1RSTCLRR_LPTIM1RST BIT(9) +#define RCC_APB1RSTCLRR_SPI2RST BIT(11) +#define RCC_APB1RSTCLRR_SPI3RST BIT(12) +#define RCC_APB1RSTCLRR_USART3RST BIT(15) +#define RCC_APB1RSTCLRR_UART4RST BIT(16) +#define RCC_APB1RSTCLRR_UART5RST BIT(17) +#define RCC_APB1RSTCLRR_UART7RST BIT(18) +#define RCC_APB1RSTCLRR_UART8RST BIT(19) +#define RCC_APB1RSTCLRR_I2C1RST BIT(21) +#define RCC_APB1RSTCLRR_I2C2RST BIT(22) +#define RCC_APB1RSTCLRR_SPDIFRST BIT(26) + +/* RCC_APB2RSTSETR register fields */ +#define RCC_APB2RSTSETR_TIM1RST BIT(0) +#define RCC_APB2RSTSETR_TIM8RST BIT(1) +#define RCC_APB2RSTSETR_SPI1RST BIT(8) +#define RCC_APB2RSTSETR_USART6RST BIT(13) +#define RCC_APB2RSTSETR_SAI1RST BIT(16) +#define RCC_APB2RSTSETR_SAI2RST BIT(17) +#define RCC_APB2RSTSETR_DFSDMRST BIT(20) +#define RCC_APB2RSTSETR_FDCANRST BIT(24) + +/* RCC_APB2RSTCLRR register fields */ +#define RCC_APB2RSTCLRR_TIM1RST BIT(0) +#define RCC_APB2RSTCLRR_TIM8RST BIT(1) +#define RCC_APB2RSTCLRR_SPI1RST BIT(8) +#define RCC_APB2RSTCLRR_USART6RST BIT(13) +#define RCC_APB2RSTCLRR_SAI1RST BIT(16) +#define RCC_APB2RSTCLRR_SAI2RST BIT(17) +#define RCC_APB2RSTCLRR_DFSDMRST BIT(20) +#define RCC_APB2RSTCLRR_FDCANRST BIT(24) + +/* RCC_APB3RSTSETR register fields */ +#define RCC_APB3RSTSETR_LPTIM2RST BIT(0) +#define RCC_APB3RSTSETR_LPTIM3RST BIT(1) +#define RCC_APB3RSTSETR_LPTIM4RST BIT(2) +#define RCC_APB3RSTSETR_LPTIM5RST BIT(3) +#define RCC_APB3RSTSETR_SYSCFGRST BIT(11) +#define RCC_APB3RSTSETR_VREFRST BIT(13) +#define RCC_APB3RSTSETR_DTSRST BIT(16) +#define RCC_APB3RSTSETR_PMBCTRLRST BIT(17) + +/* RCC_APB3RSTCLRR register fields */ +#define RCC_APB3RSTCLRR_LPTIM2RST BIT(0) +#define RCC_APB3RSTCLRR_LPTIM3RST BIT(1) +#define RCC_APB3RSTCLRR_LPTIM4RST BIT(2) +#define RCC_APB3RSTCLRR_LPTIM5RST BIT(3) +#define RCC_APB3RSTCLRR_SYSCFGRST BIT(11) +#define RCC_APB3RSTCLRR_VREFRST BIT(13) +#define RCC_APB3RSTCLRR_DTSRST BIT(16) +#define RCC_APB3RSTCLRR_PMBCTRLRST BIT(17) + +/* RCC_APB4RSTSETR register fields */ +#define RCC_APB4RSTSETR_LTDCRST BIT(0) +#define RCC_APB4RSTSETR_DCMIPPRST BIT(1) +#define RCC_APB4RSTSETR_DDRPERFMRST BIT(8) +#define RCC_APB4RSTSETR_USBPHYRST BIT(16) + +/* RCC_APB4RSTCLRR register fields */ +#define RCC_APB4RSTCLRR_LTDCRST BIT(0) +#define RCC_APB4RSTCLRR_DCMIPPRST BIT(1) +#define RCC_APB4RSTCLRR_DDRPERFMRST BIT(8) +#define RCC_APB4RSTCLRR_USBPHYRST BIT(16) + +/* RCC_APB5RSTSETR register fields */ +#define RCC_APB5RSTSETR_STGENRST BIT(20) + +/* RCC_APB5RSTCLRR register fields */ +#define RCC_APB5RSTCLRR_STGENRST BIT(20) + +/* RCC_APB6RSTSETR register fields */ +#define RCC_APB6RSTSETR_USART1RST BIT(0) +#define RCC_APB6RSTSETR_USART2RST BIT(1) +#define RCC_APB6RSTSETR_SPI4RST BIT(2) +#define RCC_APB6RSTSETR_SPI5RST BIT(3) +#define RCC_APB6RSTSETR_I2C3RST BIT(4) +#define RCC_APB6RSTSETR_I2C4RST BIT(5) +#define RCC_APB6RSTSETR_I2C5RST BIT(6) +#define RCC_APB6RSTSETR_TIM12RST BIT(7) +#define RCC_APB6RSTSETR_TIM13RST BIT(8) +#define RCC_APB6RSTSETR_TIM14RST BIT(9) +#define RCC_APB6RSTSETR_TIM15RST BIT(10) +#define RCC_APB6RSTSETR_TIM16RST BIT(11) +#define RCC_APB6RSTSETR_TIM17RST BIT(12) + +/* RCC_APB6RSTCLRR register fields */ +#define RCC_APB6RSTCLRR_USART1RST BIT(0) +#define RCC_APB6RSTCLRR_USART2RST BIT(1) +#define RCC_APB6RSTCLRR_SPI4RST BIT(2) +#define RCC_APB6RSTCLRR_SPI5RST BIT(3) +#define RCC_APB6RSTCLRR_I2C3RST BIT(4) +#define RCC_APB6RSTCLRR_I2C4RST BIT(5) +#define RCC_APB6RSTCLRR_I2C5RST BIT(6) +#define RCC_APB6RSTCLRR_TIM12RST BIT(7) +#define RCC_APB6RSTCLRR_TIM13RST BIT(8) +#define RCC_APB6RSTCLRR_TIM14RST BIT(9) +#define RCC_APB6RSTCLRR_TIM15RST BIT(10) +#define RCC_APB6RSTCLRR_TIM16RST BIT(11) +#define RCC_APB6RSTCLRR_TIM17RST BIT(12) + +/* RCC_AHB2RSTSETR register fields */ +#define RCC_AHB2RSTSETR_DMA1RST BIT(0) +#define RCC_AHB2RSTSETR_DMA2RST BIT(1) +#define RCC_AHB2RSTSETR_DMAMUX1RST BIT(2) +#define RCC_AHB2RSTSETR_DMA3RST BIT(3) +#define RCC_AHB2RSTSETR_DMAMUX2RST BIT(4) +#define RCC_AHB2RSTSETR_ADC1RST BIT(5) +#define RCC_AHB2RSTSETR_ADC2RST BIT(6) +#define RCC_AHB2RSTSETR_USBORST BIT(8) + +/* RCC_AHB2RSTCLRR register fields */ +#define RCC_AHB2RSTCLRR_DMA1RST BIT(0) +#define RCC_AHB2RSTCLRR_DMA2RST BIT(1) +#define RCC_AHB2RSTCLRR_DMAMUX1RST BIT(2) +#define RCC_AHB2RSTCLRR_DMA3RST BIT(3) +#define RCC_AHB2RSTCLRR_DMAMUX2RST BIT(4) +#define RCC_AHB2RSTCLRR_ADC1RST BIT(5) +#define RCC_AHB2RSTCLRR_ADC2RST BIT(6) +#define RCC_AHB2RSTCLRR_USBORST BIT(8) + +/* RCC_AHB4RSTSETR register fields */ +#define RCC_AHB4RSTSETR_GPIOARST BIT(0) +#define RCC_AHB4RSTSETR_GPIOBRST BIT(1) +#define RCC_AHB4RSTSETR_GPIOCRST BIT(2) +#define RCC_AHB4RSTSETR_GPIODRST BIT(3) +#define RCC_AHB4RSTSETR_GPIOERST BIT(4) +#define RCC_AHB4RSTSETR_GPIOFRST BIT(5) +#define RCC_AHB4RSTSETR_GPIOGRST BIT(6) +#define RCC_AHB4RSTSETR_GPIOHRST BIT(7) +#define RCC_AHB4RSTSETR_GPIOIRST BIT(8) +#define RCC_AHB4RSTSETR_TSCRST BIT(15) + +/* RCC_AHB4RSTCLRR register fields */ +#define RCC_AHB4RSTCLRR_GPIOARST BIT(0) +#define RCC_AHB4RSTCLRR_GPIOBRST BIT(1) +#define RCC_AHB4RSTCLRR_GPIOCRST BIT(2) +#define RCC_AHB4RSTCLRR_GPIODRST BIT(3) +#define RCC_AHB4RSTCLRR_GPIOERST BIT(4) +#define RCC_AHB4RSTCLRR_GPIOFRST BIT(5) +#define RCC_AHB4RSTCLRR_GPIOGRST BIT(6) +#define RCC_AHB4RSTCLRR_GPIOHRST BIT(7) +#define RCC_AHB4RSTCLRR_GPIOIRST BIT(8) +#define RCC_AHB4RSTCLRR_TSCRST BIT(15) + +/* RCC_AHB5RSTSETR register fields */ +#define RCC_AHB5RSTSETR_PKARST BIT(2) +#define RCC_AHB5RSTSETR_SAESRST BIT(3) +#define RCC_AHB5RSTSETR_CRYP1RST BIT(4) +#define RCC_AHB5RSTSETR_HASH1RST BIT(5) +#define RCC_AHB5RSTSETR_RNG1RST BIT(6) +#define RCC_AHB5RSTSETR_AXIMCRST BIT(16) + +/* RCC_AHB5RSTCLRR register fields */ +#define RCC_AHB5RSTCLRR_PKARST BIT(2) +#define RCC_AHB5RSTCLRR_SAESRST BIT(3) +#define RCC_AHB5RSTCLRR_CRYP1RST BIT(4) +#define RCC_AHB5RSTCLRR_HASH1RST BIT(5) +#define RCC_AHB5RSTCLRR_RNG1RST BIT(6) +#define RCC_AHB5RSTCLRR_AXIMCRST BIT(16) + +/* RCC_AHB6RSTSETR register fields */ +#define RCC_AHB6RSTSETR_MDMARST BIT(0) +#define RCC_AHB6RSTSETR_MCERST BIT(1) +#define RCC_AHB6RSTSETR_ETH1MACRST BIT(10) +#define RCC_AHB6RSTSETR_FMCRST BIT(12) +#define RCC_AHB6RSTSETR_QSPIRST BIT(14) +#define RCC_AHB6RSTSETR_SDMMC1RST BIT(16) +#define RCC_AHB6RSTSETR_SDMMC2RST BIT(17) +#define RCC_AHB6RSTSETR_CRC1RST BIT(20) +#define RCC_AHB6RSTSETR_USBHRST BIT(24) +#define RCC_AHB6RSTSETR_ETH2MACRST BIT(30) + +/* RCC_AHB6RSTCLRR register fields */ +#define RCC_AHB6RSTCLRR_MDMARST BIT(0) +#define RCC_AHB6RSTCLRR_MCERST BIT(1) +#define RCC_AHB6RSTCLRR_ETH1MACRST BIT(10) +#define RCC_AHB6RSTCLRR_FMCRST BIT(12) +#define RCC_AHB6RSTCLRR_QSPIRST BIT(14) +#define RCC_AHB6RSTCLRR_SDMMC1RST BIT(16) +#define RCC_AHB6RSTCLRR_SDMMC2RST BIT(17) +#define RCC_AHB6RSTCLRR_CRC1RST BIT(20) +#define RCC_AHB6RSTCLRR_USBHRST BIT(24) +#define RCC_AHB6RSTCLRR_ETH2MACRST BIT(30) + +/* RCC_MP_APB1ENSETR register fields */ +#define RCC_MP_APB1ENSETR_TIM2EN BIT(0) +#define RCC_MP_APB1ENSETR_TIM3EN BIT(1) +#define RCC_MP_APB1ENSETR_TIM4EN BIT(2) +#define RCC_MP_APB1ENSETR_TIM5EN BIT(3) +#define RCC_MP_APB1ENSETR_TIM6EN BIT(4) +#define RCC_MP_APB1ENSETR_TIM7EN BIT(5) +#define RCC_MP_APB1ENSETR_LPTIM1EN BIT(9) +#define RCC_MP_APB1ENSETR_SPI2EN BIT(11) +#define RCC_MP_APB1ENSETR_SPI3EN BIT(12) +#define RCC_MP_APB1ENSETR_USART3EN BIT(15) +#define RCC_MP_APB1ENSETR_UART4EN BIT(16) +#define RCC_MP_APB1ENSETR_UART5EN BIT(17) +#define RCC_MP_APB1ENSETR_UART7EN BIT(18) +#define RCC_MP_APB1ENSETR_UART8EN BIT(19) +#define RCC_MP_APB1ENSETR_I2C1EN BIT(21) +#define RCC_MP_APB1ENSETR_I2C2EN BIT(22) +#define RCC_MP_APB1ENSETR_SPDIFEN BIT(26) + +/* RCC_MP_APB1ENCLRR register fields */ +#define RCC_MP_APB1ENCLRR_TIM2EN BIT(0) +#define RCC_MP_APB1ENCLRR_TIM3EN BIT(1) +#define RCC_MP_APB1ENCLRR_TIM4EN BIT(2) +#define RCC_MP_APB1ENCLRR_TIM5EN BIT(3) +#define RCC_MP_APB1ENCLRR_TIM6EN BIT(4) +#define RCC_MP_APB1ENCLRR_TIM7EN BIT(5) +#define RCC_MP_APB1ENCLRR_LPTIM1EN BIT(9) +#define RCC_MP_APB1ENCLRR_SPI2EN BIT(11) +#define RCC_MP_APB1ENCLRR_SPI3EN BIT(12) +#define RCC_MP_APB1ENCLRR_USART3EN BIT(15) +#define RCC_MP_APB1ENCLRR_UART4EN BIT(16) +#define RCC_MP_APB1ENCLRR_UART5EN BIT(17) +#define RCC_MP_APB1ENCLRR_UART7EN BIT(18) +#define RCC_MP_APB1ENCLRR_UART8EN BIT(19) +#define RCC_MP_APB1ENCLRR_I2C1EN BIT(21) +#define RCC_MP_APB1ENCLRR_I2C2EN BIT(22) +#define RCC_MP_APB1ENCLRR_SPDIFEN BIT(26) + +/* RCC_MP_APB2ENSETR register fields */ +#define RCC_MP_APB2ENSETR_TIM1EN BIT(0) +#define RCC_MP_APB2ENSETR_TIM8EN BIT(1) +#define RCC_MP_APB2ENSETR_SPI1EN BIT(8) +#define RCC_MP_APB2ENSETR_USART6EN BIT(13) +#define RCC_MP_APB2ENSETR_SAI1EN BIT(16) +#define RCC_MP_APB2ENSETR_SAI2EN BIT(17) +#define RCC_MP_APB2ENSETR_DFSDMEN BIT(20) +#define RCC_MP_APB2ENSETR_ADFSDMEN BIT(21) +#define RCC_MP_APB2ENSETR_FDCANEN BIT(24) + +/* RCC_MP_APB2ENCLRR register fields */ +#define RCC_MP_APB2ENCLRR_TIM1EN BIT(0) +#define RCC_MP_APB2ENCLRR_TIM8EN BIT(1) +#define RCC_MP_APB2ENCLRR_SPI1EN BIT(8) +#define RCC_MP_APB2ENCLRR_USART6EN BIT(13) +#define RCC_MP_APB2ENCLRR_SAI1EN BIT(16) +#define RCC_MP_APB2ENCLRR_SAI2EN BIT(17) +#define RCC_MP_APB2ENCLRR_DFSDMEN BIT(20) +#define RCC_MP_APB2ENCLRR_ADFSDMEN BIT(21) +#define RCC_MP_APB2ENCLRR_FDCANEN BIT(24) + +/* RCC_MP_APB3ENSETR register fields */ +#define RCC_MP_APB3ENSETR_LPTIM2EN BIT(0) +#define RCC_MP_APB3ENSETR_LPTIM3EN BIT(1) +#define RCC_MP_APB3ENSETR_LPTIM4EN BIT(2) +#define RCC_MP_APB3ENSETR_LPTIM5EN BIT(3) +#define RCC_MP_APB3ENSETR_VREFEN BIT(13) +#define RCC_MP_APB3ENSETR_DTSEN BIT(16) +#define RCC_MP_APB3ENSETR_PMBCTRLEN BIT(17) +#define RCC_MP_APB3ENSETR_HDPEN BIT(20) + +/* RCC_MP_APB3ENCLRR register fields */ +#define RCC_MP_APB3ENCLRR_LPTIM2EN BIT(0) +#define RCC_MP_APB3ENCLRR_LPTIM3EN BIT(1) +#define RCC_MP_APB3ENCLRR_LPTIM4EN BIT(2) +#define RCC_MP_APB3ENCLRR_LPTIM5EN BIT(3) +#define RCC_MP_APB3ENCLRR_VREFEN BIT(13) +#define RCC_MP_APB3ENCLRR_DTSEN BIT(16) +#define RCC_MP_APB3ENCLRR_PMBCTRLEN BIT(17) +#define RCC_MP_APB3ENCLRR_HDPEN BIT(20) + +/* RCC_MP_S_APB3ENSETR register fields */ +#define RCC_MP_S_APB3ENSETR_SYSCFGEN BIT(0) + +/* RCC_MP_S_APB3ENCLRR register fields */ +#define RCC_MP_S_APB3ENCLRR_SYSCFGEN BIT(0) + +/* RCC_MP_NS_APB3ENSETR register fields */ +#define RCC_MP_NS_APB3ENSETR_SYSCFGEN BIT(0) + +/* RCC_MP_NS_APB3ENCLRR register fields */ +#define RCC_MP_NS_APB3ENCLRR_SYSCFGEN BIT(0) + +/* RCC_MP_APB4ENSETR register fields */ +#define RCC_MP_APB4ENSETR_DCMIPPEN BIT(1) +#define RCC_MP_APB4ENSETR_DDRPERFMEN BIT(8) +#define RCC_MP_APB4ENSETR_IWDG2APBEN BIT(15) +#define RCC_MP_APB4ENSETR_USBPHYEN BIT(16) +#define RCC_MP_APB4ENSETR_STGENROEN BIT(20) + +/* RCC_MP_APB4ENCLRR register fields */ +#define RCC_MP_APB4ENCLRR_DCMIPPEN BIT(1) +#define RCC_MP_APB4ENCLRR_DDRPERFMEN BIT(8) +#define RCC_MP_APB4ENCLRR_IWDG2APBEN BIT(15) +#define RCC_MP_APB4ENCLRR_USBPHYEN BIT(16) +#define RCC_MP_APB4ENCLRR_STGENROEN BIT(20) + +/* RCC_MP_S_APB4ENSETR register fields */ +#define RCC_MP_S_APB4ENSETR_LTDCEN BIT(0) + +/* RCC_MP_S_APB4ENCLRR register fields */ +#define RCC_MP_S_APB4ENCLRR_LTDCEN BIT(0) + +/* RCC_MP_NS_APB4ENSETR register fields */ +#define RCC_MP_NS_APB4ENSETR_LTDCEN BIT(0) + +/* RCC_MP_NS_APB4ENCLRR register fields */ +#define RCC_MP_NS_APB4ENCLRR_LTDCEN BIT(0) + +/* RCC_MP_APB5ENSETR register fields */ +#define RCC_MP_APB5ENSETR_RTCAPBEN BIT(8) +#define RCC_MP_APB5ENSETR_TZCEN BIT(11) +#define RCC_MP_APB5ENSETR_ETZPCEN BIT(13) +#define RCC_MP_APB5ENSETR_IWDG1APBEN BIT(15) +#define RCC_MP_APB5ENSETR_BSECEN BIT(16) +#define RCC_MP_APB5ENSETR_STGENCEN BIT(20) + +/* RCC_MP_APB5ENCLRR register fields */ +#define RCC_MP_APB5ENCLRR_RTCAPBEN BIT(8) +#define RCC_MP_APB5ENCLRR_TZCEN BIT(11) +#define RCC_MP_APB5ENCLRR_ETZPCEN BIT(13) +#define RCC_MP_APB5ENCLRR_IWDG1APBEN BIT(15) +#define RCC_MP_APB5ENCLRR_BSECEN BIT(16) +#define RCC_MP_APB5ENCLRR_STGENCEN BIT(20) + +/* RCC_MP_APB6ENSETR register fields */ +#define RCC_MP_APB6ENSETR_USART1EN BIT(0) +#define RCC_MP_APB6ENSETR_USART2EN BIT(1) +#define RCC_MP_APB6ENSETR_SPI4EN BIT(2) +#define RCC_MP_APB6ENSETR_SPI5EN BIT(3) +#define RCC_MP_APB6ENSETR_I2C3EN BIT(4) +#define RCC_MP_APB6ENSETR_I2C4EN BIT(5) +#define RCC_MP_APB6ENSETR_I2C5EN BIT(6) +#define RCC_MP_APB6ENSETR_TIM12EN BIT(7) +#define RCC_MP_APB6ENSETR_TIM13EN BIT(8) +#define RCC_MP_APB6ENSETR_TIM14EN BIT(9) +#define RCC_MP_APB6ENSETR_TIM15EN BIT(10) +#define RCC_MP_APB6ENSETR_TIM16EN BIT(11) +#define RCC_MP_APB6ENSETR_TIM17EN BIT(12) + +/* RCC_MP_APB6ENCLRR register fields */ +#define RCC_MP_APB6ENCLRR_USART1EN BIT(0) +#define RCC_MP_APB6ENCLRR_USART2EN BIT(1) +#define RCC_MP_APB6ENCLRR_SPI4EN BIT(2) +#define RCC_MP_APB6ENCLRR_SPI5EN BIT(3) +#define RCC_MP_APB6ENCLRR_I2C3EN BIT(4) +#define RCC_MP_APB6ENCLRR_I2C4EN BIT(5) +#define RCC_MP_APB6ENCLRR_I2C5EN BIT(6) +#define RCC_MP_APB6ENCLRR_TIM12EN BIT(7) +#define RCC_MP_APB6ENCLRR_TIM13EN BIT(8) +#define RCC_MP_APB6ENCLRR_TIM14EN BIT(9) +#define RCC_MP_APB6ENCLRR_TIM15EN BIT(10) +#define RCC_MP_APB6ENCLRR_TIM16EN BIT(11) +#define RCC_MP_APB6ENCLRR_TIM17EN BIT(12) + +/* RCC_MP_AHB2ENSETR register fields */ +#define RCC_MP_AHB2ENSETR_DMA1EN BIT(0) +#define RCC_MP_AHB2ENSETR_DMA2EN BIT(1) +#define RCC_MP_AHB2ENSETR_DMAMUX1EN BIT(2) +#define RCC_MP_AHB2ENSETR_DMA3EN BIT(3) +#define RCC_MP_AHB2ENSETR_DMAMUX2EN BIT(4) +#define RCC_MP_AHB2ENSETR_ADC1EN BIT(5) +#define RCC_MP_AHB2ENSETR_ADC2EN BIT(6) +#define RCC_MP_AHB2ENSETR_USBOEN BIT(8) + +/* RCC_MP_AHB2ENCLRR register fields */ +#define RCC_MP_AHB2ENCLRR_DMA1EN BIT(0) +#define RCC_MP_AHB2ENCLRR_DMA2EN BIT(1) +#define RCC_MP_AHB2ENCLRR_DMAMUX1EN BIT(2) +#define RCC_MP_AHB2ENCLRR_DMA3EN BIT(3) +#define RCC_MP_AHB2ENCLRR_DMAMUX2EN BIT(4) +#define RCC_MP_AHB2ENCLRR_ADC1EN BIT(5) +#define RCC_MP_AHB2ENCLRR_ADC2EN BIT(6) +#define RCC_MP_AHB2ENCLRR_USBOEN BIT(8) + +/* RCC_MP_AHB4ENSETR register fields */ +#define RCC_MP_AHB4ENSETR_TSCEN BIT(15) + +/* RCC_MP_AHB4ENCLRR register fields */ +#define RCC_MP_AHB4ENCLRR_TSCEN BIT(15) + +/* RCC_MP_S_AHB4ENSETR register fields */ +#define RCC_MP_S_AHB4ENSETR_GPIOAEN BIT(0) +#define RCC_MP_S_AHB4ENSETR_GPIOBEN BIT(1) +#define RCC_MP_S_AHB4ENSETR_GPIOCEN BIT(2) +#define RCC_MP_S_AHB4ENSETR_GPIODEN BIT(3) +#define RCC_MP_S_AHB4ENSETR_GPIOEEN BIT(4) +#define RCC_MP_S_AHB4ENSETR_GPIOFEN BIT(5) +#define RCC_MP_S_AHB4ENSETR_GPIOGEN BIT(6) +#define RCC_MP_S_AHB4ENSETR_GPIOHEN BIT(7) +#define RCC_MP_S_AHB4ENSETR_GPIOIEN BIT(8) + +/* RCC_MP_S_AHB4ENCLRR register fields */ +#define RCC_MP_S_AHB4ENCLRR_GPIOAEN BIT(0) +#define RCC_MP_S_AHB4ENCLRR_GPIOBEN BIT(1) +#define RCC_MP_S_AHB4ENCLRR_GPIOCEN BIT(2) +#define RCC_MP_S_AHB4ENCLRR_GPIODEN BIT(3) +#define RCC_MP_S_AHB4ENCLRR_GPIOEEN BIT(4) +#define RCC_MP_S_AHB4ENCLRR_GPIOFEN BIT(5) +#define RCC_MP_S_AHB4ENCLRR_GPIOGEN BIT(6) +#define RCC_MP_S_AHB4ENCLRR_GPIOHEN BIT(7) +#define RCC_MP_S_AHB4ENCLRR_GPIOIEN BIT(8) + +/* RCC_MP_NS_AHB4ENSETR register fields */ +#define RCC_MP_NS_AHB4ENSETR_GPIOAEN BIT(0) +#define RCC_MP_NS_AHB4ENSETR_GPIOBEN BIT(1) +#define RCC_MP_NS_AHB4ENSETR_GPIOCEN BIT(2) +#define RCC_MP_NS_AHB4ENSETR_GPIODEN BIT(3) +#define RCC_MP_NS_AHB4ENSETR_GPIOEEN BIT(4) +#define RCC_MP_NS_AHB4ENSETR_GPIOFEN BIT(5) +#define RCC_MP_NS_AHB4ENSETR_GPIOGEN BIT(6) +#define RCC_MP_NS_AHB4ENSETR_GPIOHEN BIT(7) +#define RCC_MP_NS_AHB4ENSETR_GPIOIEN BIT(8) + +/* RCC_MP_NS_AHB4ENCLRR register fields */ +#define RCC_MP_NS_AHB4ENCLRR_GPIOAEN BIT(0) +#define RCC_MP_NS_AHB4ENCLRR_GPIOBEN BIT(1) +#define RCC_MP_NS_AHB4ENCLRR_GPIOCEN BIT(2) +#define RCC_MP_NS_AHB4ENCLRR_GPIODEN BIT(3) +#define RCC_MP_NS_AHB4ENCLRR_GPIOEEN BIT(4) +#define RCC_MP_NS_AHB4ENCLRR_GPIOFEN BIT(5) +#define RCC_MP_NS_AHB4ENCLRR_GPIOGEN BIT(6) +#define RCC_MP_NS_AHB4ENCLRR_GPIOHEN BIT(7) +#define RCC_MP_NS_AHB4ENCLRR_GPIOIEN BIT(8) + +/* RCC_MP_AHB5ENSETR register fields */ +#define RCC_MP_AHB5ENSETR_PKAEN BIT(2) +#define RCC_MP_AHB5ENSETR_SAESEN BIT(3) +#define RCC_MP_AHB5ENSETR_CRYP1EN BIT(4) +#define RCC_MP_AHB5ENSETR_HASH1EN BIT(5) +#define RCC_MP_AHB5ENSETR_RNG1EN BIT(6) +#define RCC_MP_AHB5ENSETR_BKPSRAMEN BIT(8) +#define RCC_MP_AHB5ENSETR_AXIMCEN BIT(16) + +/* RCC_MP_AHB5ENCLRR register fields */ +#define RCC_MP_AHB5ENCLRR_PKAEN BIT(2) +#define RCC_MP_AHB5ENCLRR_SAESEN BIT(3) +#define RCC_MP_AHB5ENCLRR_CRYP1EN BIT(4) +#define RCC_MP_AHB5ENCLRR_HASH1EN BIT(5) +#define RCC_MP_AHB5ENCLRR_RNG1EN BIT(6) +#define RCC_MP_AHB5ENCLRR_BKPSRAMEN BIT(8) +#define RCC_MP_AHB5ENCLRR_AXIMCEN BIT(16) + +/* RCC_MP_AHB6ENSETR register fields */ +#define RCC_MP_AHB6ENSETR_MCEEN BIT(1) +#define RCC_MP_AHB6ENSETR_ETH1CKEN BIT(7) +#define RCC_MP_AHB6ENSETR_ETH1TXEN BIT(8) +#define RCC_MP_AHB6ENSETR_ETH1RXEN BIT(9) +#define RCC_MP_AHB6ENSETR_ETH1MACEN BIT(10) +#define RCC_MP_AHB6ENSETR_FMCEN BIT(12) +#define RCC_MP_AHB6ENSETR_QSPIEN BIT(14) +#define RCC_MP_AHB6ENSETR_SDMMC1EN BIT(16) +#define RCC_MP_AHB6ENSETR_SDMMC2EN BIT(17) +#define RCC_MP_AHB6ENSETR_CRC1EN BIT(20) +#define RCC_MP_AHB6ENSETR_USBHEN BIT(24) +#define RCC_MP_AHB6ENSETR_ETH2CKEN BIT(27) +#define RCC_MP_AHB6ENSETR_ETH2TXEN BIT(28) +#define RCC_MP_AHB6ENSETR_ETH2RXEN BIT(29) +#define RCC_MP_AHB6ENSETR_ETH2MACEN BIT(30) + +/* RCC_MP_AHB6ENCLRR register fields */ +#define RCC_MP_AHB6ENCLRR_MCEEN BIT(1) +#define RCC_MP_AHB6ENCLRR_ETH1CKEN BIT(7) +#define RCC_MP_AHB6ENCLRR_ETH1TXEN BIT(8) +#define RCC_MP_AHB6ENCLRR_ETH1RXEN BIT(9) +#define RCC_MP_AHB6ENCLRR_ETH1MACEN BIT(10) +#define RCC_MP_AHB6ENCLRR_FMCEN BIT(12) +#define RCC_MP_AHB6ENCLRR_QSPIEN BIT(14) +#define RCC_MP_AHB6ENCLRR_SDMMC1EN BIT(16) +#define RCC_MP_AHB6ENCLRR_SDMMC2EN BIT(17) +#define RCC_MP_AHB6ENCLRR_CRC1EN BIT(20) +#define RCC_MP_AHB6ENCLRR_USBHEN BIT(24) +#define RCC_MP_AHB6ENCLRR_ETH2CKEN BIT(27) +#define RCC_MP_AHB6ENCLRR_ETH2TXEN BIT(28) +#define RCC_MP_AHB6ENCLRR_ETH2RXEN BIT(29) +#define RCC_MP_AHB6ENCLRR_ETH2MACEN BIT(30) + +/* RCC_MP_S_AHB6ENSETR register fields */ +#define RCC_MP_S_AHB6ENSETR_MDMAEN BIT(0) + +/* RCC_MP_S_AHB6ENCLRR register fields */ +#define RCC_MP_S_AHB6ENCLRR_MDMAEN BIT(0) + +/* RCC_MP_NS_AHB6ENSETR register fields */ +#define RCC_MP_NS_AHB6ENSETR_MDMAEN BIT(0) + +/* RCC_MP_NS_AHB6ENCLRR register fields */ +#define RCC_MP_NS_AHB6ENCLRR_MDMAEN BIT(0) + +/* RCC_MP_APB1LPENSETR register fields */ +#define RCC_MP_APB1LPENSETR_TIM2LPEN BIT(0) +#define RCC_MP_APB1LPENSETR_TIM3LPEN BIT(1) +#define RCC_MP_APB1LPENSETR_TIM4LPEN BIT(2) +#define RCC_MP_APB1LPENSETR_TIM5LPEN BIT(3) +#define RCC_MP_APB1LPENSETR_TIM6LPEN BIT(4) +#define RCC_MP_APB1LPENSETR_TIM7LPEN BIT(5) +#define RCC_MP_APB1LPENSETR_LPTIM1LPEN BIT(9) +#define RCC_MP_APB1LPENSETR_SPI2LPEN BIT(11) +#define RCC_MP_APB1LPENSETR_SPI3LPEN BIT(12) +#define RCC_MP_APB1LPENSETR_USART3LPEN BIT(15) +#define RCC_MP_APB1LPENSETR_UART4LPEN BIT(16) +#define RCC_MP_APB1LPENSETR_UART5LPEN BIT(17) +#define RCC_MP_APB1LPENSETR_UART7LPEN BIT(18) +#define RCC_MP_APB1LPENSETR_UART8LPEN BIT(19) +#define RCC_MP_APB1LPENSETR_I2C1LPEN BIT(21) +#define RCC_MP_APB1LPENSETR_I2C2LPEN BIT(22) +#define RCC_MP_APB1LPENSETR_SPDIFLPEN BIT(26) + +/* RCC_MP_APB1LPENCLRR register fields */ +#define RCC_MP_APB1LPENCLRR_TIM2LPEN BIT(0) +#define RCC_MP_APB1LPENCLRR_TIM3LPEN BIT(1) +#define RCC_MP_APB1LPENCLRR_TIM4LPEN BIT(2) +#define RCC_MP_APB1LPENCLRR_TIM5LPEN BIT(3) +#define RCC_MP_APB1LPENCLRR_TIM6LPEN BIT(4) +#define RCC_MP_APB1LPENCLRR_TIM7LPEN BIT(5) +#define RCC_MP_APB1LPENCLRR_LPTIM1LPEN BIT(9) +#define RCC_MP_APB1LPENCLRR_SPI2LPEN BIT(11) +#define RCC_MP_APB1LPENCLRR_SPI3LPEN BIT(12) +#define RCC_MP_APB1LPENCLRR_USART3LPEN BIT(15) +#define RCC_MP_APB1LPENCLRR_UART4LPEN BIT(16) +#define RCC_MP_APB1LPENCLRR_UART5LPEN BIT(17) +#define RCC_MP_APB1LPENCLRR_UART7LPEN BIT(18) +#define RCC_MP_APB1LPENCLRR_UART8LPEN BIT(19) +#define RCC_MP_APB1LPENCLRR_I2C1LPEN BIT(21) +#define RCC_MP_APB1LPENCLRR_I2C2LPEN BIT(22) +#define RCC_MP_APB1LPENCLRR_SPDIFLPEN BIT(26) + +/* RCC_MP_APB2LPENSETR register fields */ +#define RCC_MP_APB2LPENSETR_TIM1LPEN BIT(0) +#define RCC_MP_APB2LPENSETR_TIM8LPEN BIT(1) +#define RCC_MP_APB2LPENSETR_SPI1LPEN BIT(8) +#define RCC_MP_APB2LPENSETR_USART6LPEN BIT(13) +#define RCC_MP_APB2LPENSETR_SAI1LPEN BIT(16) +#define RCC_MP_APB2LPENSETR_SAI2LPEN BIT(17) +#define RCC_MP_APB2LPENSETR_DFSDMLPEN BIT(20) +#define RCC_MP_APB2LPENSETR_ADFSDMLPEN BIT(21) +#define RCC_MP_APB2LPENSETR_FDCANLPEN BIT(24) + +/* RCC_MP_APB2LPENCLRR register fields */ +#define RCC_MP_APB2LPENCLRR_TIM1LPEN BIT(0) +#define RCC_MP_APB2LPENCLRR_TIM8LPEN BIT(1) +#define RCC_MP_APB2LPENCLRR_SPI1LPEN BIT(8) +#define RCC_MP_APB2LPENCLRR_USART6LPEN BIT(13) +#define RCC_MP_APB2LPENCLRR_SAI1LPEN BIT(16) +#define RCC_MP_APB2LPENCLRR_SAI2LPEN BIT(17) +#define RCC_MP_APB2LPENCLRR_DFSDMLPEN BIT(20) +#define RCC_MP_APB2LPENCLRR_ADFSDMLPEN BIT(21) +#define RCC_MP_APB2LPENCLRR_FDCANLPEN BIT(24) + +/* RCC_MP_APB3LPENSETR register fields */ +#define RCC_MP_APB3LPENSETR_LPTIM2LPEN BIT(0) +#define RCC_MP_APB3LPENSETR_LPTIM3LPEN BIT(1) +#define RCC_MP_APB3LPENSETR_LPTIM4LPEN BIT(2) +#define RCC_MP_APB3LPENSETR_LPTIM5LPEN BIT(3) +#define RCC_MP_APB3LPENSETR_VREFLPEN BIT(13) +#define RCC_MP_APB3LPENSETR_DTSLPEN BIT(16) +#define RCC_MP_APB3LPENSETR_PMBCTRLLPEN BIT(17) + +/* RCC_MP_APB3LPENCLRR register fields */ +#define RCC_MP_APB3LPENCLRR_LPTIM2LPEN BIT(0) +#define RCC_MP_APB3LPENCLRR_LPTIM3LPEN BIT(1) +#define RCC_MP_APB3LPENCLRR_LPTIM4LPEN BIT(2) +#define RCC_MP_APB3LPENCLRR_LPTIM5LPEN BIT(3) +#define RCC_MP_APB3LPENCLRR_VREFLPEN BIT(13) +#define RCC_MP_APB3LPENCLRR_DTSLPEN BIT(16) +#define RCC_MP_APB3LPENCLRR_PMBCTRLLPEN BIT(17) + +/* RCC_MP_S_APB3LPENSETR register fields */ +#define RCC_MP_S_APB3LPENSETR_SYSCFGLPEN BIT(0) + +/* RCC_MP_S_APB3LPENCLRR register fields */ +#define RCC_MP_S_APB3LPENCLRR_SYSCFGLPEN BIT(0) + +/* RCC_MP_NS_APB3LPENSETR register fields */ +#define RCC_MP_NS_APB3LPENSETR_SYSCFGLPEN BIT(0) + +/* RCC_MP_NS_APB3LPENCLRR register fields */ +#define RCC_MP_NS_APB3LPENCLRR_SYSCFGLPEN BIT(0) + +/* RCC_MP_APB4LPENSETR register fields */ +#define RCC_MP_APB4LPENSETR_DCMIPPLPEN BIT(1) +#define RCC_MP_APB4LPENSETR_DDRPERFMLPEN BIT(8) +#define RCC_MP_APB4LPENSETR_IWDG2APBLPEN BIT(15) +#define RCC_MP_APB4LPENSETR_USBPHYLPEN BIT(16) +#define RCC_MP_APB4LPENSETR_STGENROLPEN BIT(20) +#define RCC_MP_APB4LPENSETR_STGENROSTPEN BIT(21) + +/* RCC_MP_APB4LPENCLRR register fields */ +#define RCC_MP_APB4LPENCLRR_DCMIPPLPEN BIT(1) +#define RCC_MP_APB4LPENCLRR_DDRPERFMLPEN BIT(8) +#define RCC_MP_APB4LPENCLRR_IWDG2APBLPEN BIT(15) +#define RCC_MP_APB4LPENCLRR_USBPHYLPEN BIT(16) +#define RCC_MP_APB4LPENCLRR_STGENROLPEN BIT(20) +#define RCC_MP_APB4LPENCLRR_STGENROSTPEN BIT(21) + +/* RCC_MP_S_APB4LPENSETR register fields */ +#define RCC_MP_S_APB4LPENSETR_LTDCLPEN BIT(0) + +/* RCC_MP_S_APB4LPENCLRR register fields */ +#define RCC_MP_S_APB4LPENCLRR_LTDCLPEN BIT(0) + +/* RCC_MP_NS_APB4LPENSETR register fields */ +#define RCC_MP_NS_APB4LPENSETR_LTDCLPEN BIT(0) + +/* RCC_MP_NS_APB4LPENCLRR register fields */ +#define RCC_MP_NS_APB4LPENCLRR_LTDCLPEN BIT(0) + +/* RCC_MP_APB5LPENSETR register fields */ +#define RCC_MP_APB5LPENSETR_RTCAPBLPEN BIT(8) +#define RCC_MP_APB5LPENSETR_TZCLPEN BIT(11) +#define RCC_MP_APB5LPENSETR_ETZPCLPEN BIT(13) +#define RCC_MP_APB5LPENSETR_IWDG1APBLPEN BIT(15) +#define RCC_MP_APB5LPENSETR_BSECLPEN BIT(16) +#define RCC_MP_APB5LPENSETR_STGENCLPEN BIT(20) +#define RCC_MP_APB5LPENSETR_STGENCSTPEN BIT(21) + +/* RCC_MP_APB5LPENCLRR register fields */ +#define RCC_MP_APB5LPENCLRR_RTCAPBLPEN BIT(8) +#define RCC_MP_APB5LPENCLRR_TZCLPEN BIT(11) +#define RCC_MP_APB5LPENCLRR_ETZPCLPEN BIT(13) +#define RCC_MP_APB5LPENCLRR_IWDG1APBLPEN BIT(15) +#define RCC_MP_APB5LPENCLRR_BSECLPEN BIT(16) +#define RCC_MP_APB5LPENCLRR_STGENCLPEN BIT(20) +#define RCC_MP_APB5LPENCLRR_STGENCSTPEN BIT(21) + +/* RCC_MP_APB6LPENSETR register fields */ +#define RCC_MP_APB6LPENSETR_USART1LPEN BIT(0) +#define RCC_MP_APB6LPENSETR_USART2LPEN BIT(1) +#define RCC_MP_APB6LPENSETR_SPI4LPEN BIT(2) +#define RCC_MP_APB6LPENSETR_SPI5LPEN BIT(3) +#define RCC_MP_APB6LPENSETR_I2C3LPEN BIT(4) +#define RCC_MP_APB6LPENSETR_I2C4LPEN BIT(5) +#define RCC_MP_APB6LPENSETR_I2C5LPEN BIT(6) +#define RCC_MP_APB6LPENSETR_TIM12LPEN BIT(7) +#define RCC_MP_APB6LPENSETR_TIM13LPEN BIT(8) +#define RCC_MP_APB6LPENSETR_TIM14LPEN BIT(9) +#define RCC_MP_APB6LPENSETR_TIM15LPEN BIT(10) +#define RCC_MP_APB6LPENSETR_TIM16LPEN BIT(11) +#define RCC_MP_APB6LPENSETR_TIM17LPEN BIT(12) + +/* RCC_MP_APB6LPENCLRR register fields */ +#define RCC_MP_APB6LPENCLRR_USART1LPEN BIT(0) +#define RCC_MP_APB6LPENCLRR_USART2LPEN BIT(1) +#define RCC_MP_APB6LPENCLRR_SPI4LPEN BIT(2) +#define RCC_MP_APB6LPENCLRR_SPI5LPEN BIT(3) +#define RCC_MP_APB6LPENCLRR_I2C3LPEN BIT(4) +#define RCC_MP_APB6LPENCLRR_I2C4LPEN BIT(5) +#define RCC_MP_APB6LPENCLRR_I2C5LPEN BIT(6) +#define RCC_MP_APB6LPENCLRR_TIM12LPEN BIT(7) +#define RCC_MP_APB6LPENCLRR_TIM13LPEN BIT(8) +#define RCC_MP_APB6LPENCLRR_TIM14LPEN BIT(9) +#define RCC_MP_APB6LPENCLRR_TIM15LPEN BIT(10) +#define RCC_MP_APB6LPENCLRR_TIM16LPEN BIT(11) +#define RCC_MP_APB6LPENCLRR_TIM17LPEN BIT(12) + +/* RCC_MP_AHB2LPENSETR register fields */ +#define RCC_MP_AHB2LPENSETR_DMA1LPEN BIT(0) +#define RCC_MP_AHB2LPENSETR_DMA2LPEN BIT(1) +#define RCC_MP_AHB2LPENSETR_DMAMUX1LPEN BIT(2) +#define RCC_MP_AHB2LPENSETR_DMA3LPEN BIT(3) +#define RCC_MP_AHB2LPENSETR_DMAMUX2LPEN BIT(4) +#define RCC_MP_AHB2LPENSETR_ADC1LPEN BIT(5) +#define RCC_MP_AHB2LPENSETR_ADC2LPEN BIT(6) +#define RCC_MP_AHB2LPENSETR_USBOLPEN BIT(8) + +/* RCC_MP_AHB2LPENCLRR register fields */ +#define RCC_MP_AHB2LPENCLRR_DMA1LPEN BIT(0) +#define RCC_MP_AHB2LPENCLRR_DMA2LPEN BIT(1) +#define RCC_MP_AHB2LPENCLRR_DMAMUX1LPEN BIT(2) +#define RCC_MP_AHB2LPENCLRR_DMA3LPEN BIT(3) +#define RCC_MP_AHB2LPENCLRR_DMAMUX2LPEN BIT(4) +#define RCC_MP_AHB2LPENCLRR_ADC1LPEN BIT(5) +#define RCC_MP_AHB2LPENCLRR_ADC2LPEN BIT(6) +#define RCC_MP_AHB2LPENCLRR_USBOLPEN BIT(8) + +/* RCC_MP_AHB4LPENSETR register fields */ +#define RCC_MP_AHB4LPENSETR_TSCLPEN BIT(15) + +/* RCC_MP_AHB4LPENCLRR register fields */ +#define RCC_MP_AHB4LPENCLRR_TSCLPEN BIT(15) + +/* RCC_MP_S_AHB4LPENSETR register fields */ +#define RCC_MP_S_AHB4LPENSETR_GPIOALPEN BIT(0) +#define RCC_MP_S_AHB4LPENSETR_GPIOBLPEN BIT(1) +#define RCC_MP_S_AHB4LPENSETR_GPIOCLPEN BIT(2) +#define RCC_MP_S_AHB4LPENSETR_GPIODLPEN BIT(3) +#define RCC_MP_S_AHB4LPENSETR_GPIOELPEN BIT(4) +#define RCC_MP_S_AHB4LPENSETR_GPIOFLPEN BIT(5) +#define RCC_MP_S_AHB4LPENSETR_GPIOGLPEN BIT(6) +#define RCC_MP_S_AHB4LPENSETR_GPIOHLPEN BIT(7) +#define RCC_MP_S_AHB4LPENSETR_GPIOILPEN BIT(8) + +/* RCC_MP_S_AHB4LPENCLRR register fields */ +#define RCC_MP_S_AHB4LPENCLRR_GPIOALPEN BIT(0) +#define RCC_MP_S_AHB4LPENCLRR_GPIOBLPEN BIT(1) +#define RCC_MP_S_AHB4LPENCLRR_GPIOCLPEN BIT(2) +#define RCC_MP_S_AHB4LPENCLRR_GPIODLPEN BIT(3) +#define RCC_MP_S_AHB4LPENCLRR_GPIOELPEN BIT(4) +#define RCC_MP_S_AHB4LPENCLRR_GPIOFLPEN BIT(5) +#define RCC_MP_S_AHB4LPENCLRR_GPIOGLPEN BIT(6) +#define RCC_MP_S_AHB4LPENCLRR_GPIOHLPEN BIT(7) +#define RCC_MP_S_AHB4LPENCLRR_GPIOILPEN BIT(8) + +/* RCC_MP_NS_AHB4LPENSETR register fields */ +#define RCC_MP_NS_AHB4LPENSETR_GPIOALPEN BIT(0) +#define RCC_MP_NS_AHB4LPENSETR_GPIOBLPEN BIT(1) +#define RCC_MP_NS_AHB4LPENSETR_GPIOCLPEN BIT(2) +#define RCC_MP_NS_AHB4LPENSETR_GPIODLPEN BIT(3) +#define RCC_MP_NS_AHB4LPENSETR_GPIOELPEN BIT(4) +#define RCC_MP_NS_AHB4LPENSETR_GPIOFLPEN BIT(5) +#define RCC_MP_NS_AHB4LPENSETR_GPIOGLPEN BIT(6) +#define RCC_MP_NS_AHB4LPENSETR_GPIOHLPEN BIT(7) +#define RCC_MP_NS_AHB4LPENSETR_GPIOILPEN BIT(8) + +/* RCC_MP_NS_AHB4LPENCLRR register fields */ +#define RCC_MP_NS_AHB4LPENCLRR_GPIOALPEN BIT(0) +#define RCC_MP_NS_AHB4LPENCLRR_GPIOBLPEN BIT(1) +#define RCC_MP_NS_AHB4LPENCLRR_GPIOCLPEN BIT(2) +#define RCC_MP_NS_AHB4LPENCLRR_GPIODLPEN BIT(3) +#define RCC_MP_NS_AHB4LPENCLRR_GPIOELPEN BIT(4) +#define RCC_MP_NS_AHB4LPENCLRR_GPIOFLPEN BIT(5) +#define RCC_MP_NS_AHB4LPENCLRR_GPIOGLPEN BIT(6) +#define RCC_MP_NS_AHB4LPENCLRR_GPIOHLPEN BIT(7) +#define RCC_MP_NS_AHB4LPENCLRR_GPIOILPEN BIT(8) + +/* RCC_MP_AHB5LPENSETR register fields */ +#define RCC_MP_AHB5LPENSETR_PKALPEN BIT(2) +#define RCC_MP_AHB5LPENSETR_SAESLPEN BIT(3) +#define RCC_MP_AHB5LPENSETR_CRYP1LPEN BIT(4) +#define RCC_MP_AHB5LPENSETR_HASH1LPEN BIT(5) +#define RCC_MP_AHB5LPENSETR_RNG1LPEN BIT(6) +#define RCC_MP_AHB5LPENSETR_BKPSRAMLPEN BIT(8) + +/* RCC_MP_AHB5LPENCLRR register fields */ +#define RCC_MP_AHB5LPENCLRR_PKALPEN BIT(2) +#define RCC_MP_AHB5LPENCLRR_SAESLPEN BIT(3) +#define RCC_MP_AHB5LPENCLRR_CRYP1LPEN BIT(4) +#define RCC_MP_AHB5LPENCLRR_HASH1LPEN BIT(5) +#define RCC_MP_AHB5LPENCLRR_RNG1LPEN BIT(6) +#define RCC_MP_AHB5LPENCLRR_BKPSRAMLPEN BIT(8) + +/* RCC_MP_AHB6LPENSETR register fields */ +#define RCC_MP_AHB6LPENSETR_MCELPEN BIT(1) +#define RCC_MP_AHB6LPENSETR_ETH1CKLPEN BIT(7) +#define RCC_MP_AHB6LPENSETR_ETH1TXLPEN BIT(8) +#define RCC_MP_AHB6LPENSETR_ETH1RXLPEN BIT(9) +#define RCC_MP_AHB6LPENSETR_ETH1MACLPEN BIT(10) +#define RCC_MP_AHB6LPENSETR_ETH1STPEN BIT(11) +#define RCC_MP_AHB6LPENSETR_FMCLPEN BIT(12) +#define RCC_MP_AHB6LPENSETR_QSPILPEN BIT(14) +#define RCC_MP_AHB6LPENSETR_SDMMC1LPEN BIT(16) +#define RCC_MP_AHB6LPENSETR_SDMMC2LPEN BIT(17) +#define RCC_MP_AHB6LPENSETR_CRC1LPEN BIT(20) +#define RCC_MP_AHB6LPENSETR_USBHLPEN BIT(24) +#define RCC_MP_AHB6LPENSETR_ETH2CKLPEN BIT(27) +#define RCC_MP_AHB6LPENSETR_ETH2TXLPEN BIT(28) +#define RCC_MP_AHB6LPENSETR_ETH2RXLPEN BIT(29) +#define RCC_MP_AHB6LPENSETR_ETH2MACLPEN BIT(30) +#define RCC_MP_AHB6LPENSETR_ETH2STPEN BIT(31) + +/* RCC_MP_AHB6LPENCLRR register fields */ +#define RCC_MP_AHB6LPENCLRR_MCELPEN BIT(1) +#define RCC_MP_AHB6LPENCLRR_ETH1CKLPEN BIT(7) +#define RCC_MP_AHB6LPENCLRR_ETH1TXLPEN BIT(8) +#define RCC_MP_AHB6LPENCLRR_ETH1RXLPEN BIT(9) +#define RCC_MP_AHB6LPENCLRR_ETH1MACLPEN BIT(10) +#define RCC_MP_AHB6LPENCLRR_ETH1STPEN BIT(11) +#define RCC_MP_AHB6LPENCLRR_FMCLPEN BIT(12) +#define RCC_MP_AHB6LPENCLRR_QSPILPEN BIT(14) +#define RCC_MP_AHB6LPENCLRR_SDMMC1LPEN BIT(16) +#define RCC_MP_AHB6LPENCLRR_SDMMC2LPEN BIT(17) +#define RCC_MP_AHB6LPENCLRR_CRC1LPEN BIT(20) +#define RCC_MP_AHB6LPENCLRR_USBHLPEN BIT(24) +#define RCC_MP_AHB6LPENCLRR_ETH2CKLPEN BIT(27) +#define RCC_MP_AHB6LPENCLRR_ETH2TXLPEN BIT(28) +#define RCC_MP_AHB6LPENCLRR_ETH2RXLPEN BIT(29) +#define RCC_MP_AHB6LPENCLRR_ETH2MACLPEN BIT(30) +#define RCC_MP_AHB6LPENCLRR_ETH2STPEN BIT(31) + +/* RCC_MP_S_AHB6LPENSETR register fields */ +#define RCC_MP_S_AHB6LPENSETR_MDMALPEN BIT(0) + +/* RCC_MP_S_AHB6LPENCLRR register fields */ +#define RCC_MP_S_AHB6LPENCLRR_MDMALPEN BIT(0) + +/* RCC_MP_NS_AHB6LPENSETR register fields */ +#define RCC_MP_NS_AHB6LPENSETR_MDMALPEN BIT(0) + +/* RCC_MP_NS_AHB6LPENCLRR register fields */ +#define RCC_MP_NS_AHB6LPENCLRR_MDMALPEN BIT(0) + +/* RCC_MP_S_AXIMLPENSETR register fields */ +#define RCC_MP_S_AXIMLPENSETR_SYSRAMLPEN BIT(0) + +/* RCC_MP_S_AXIMLPENCLRR register fields */ +#define RCC_MP_S_AXIMLPENCLRR_SYSRAMLPEN BIT(0) + +/* RCC_MP_NS_AXIMLPENSETR register fields */ +#define RCC_MP_NS_AXIMLPENSETR_SYSRAMLPEN BIT(0) + +/* RCC_MP_NS_AXIMLPENCLRR register fields */ +#define RCC_MP_NS_AXIMLPENCLRR_SYSRAMLPEN BIT(0) + +/* RCC_MP_MLAHBLPENSETR register fields */ +#define RCC_MP_MLAHBLPENSETR_SRAM1LPEN BIT(0) +#define RCC_MP_MLAHBLPENSETR_SRAM2LPEN BIT(1) +#define RCC_MP_MLAHBLPENSETR_SRAM3LPEN BIT(2) + +/* RCC_MP_MLAHBLPENCLRR register fields */ +#define RCC_MP_MLAHBLPENCLRR_SRAM1LPEN BIT(0) +#define RCC_MP_MLAHBLPENCLRR_SRAM2LPEN BIT(1) +#define RCC_MP_MLAHBLPENCLRR_SRAM3LPEN BIT(2) + +/* RCC_APB3SECSR register fields */ +#define RCC_APB3SECSR_LPTIM2SECF 0 +#define RCC_APB3SECSR_LPTIM3SECF 1 +#define RCC_APB3SECSR_VREFSECF 13 + +/* RCC_APB4SECSR register fields */ +#define RCC_APB4SECSR_DCMIPPSECF 1 +#define RCC_APB4SECSR_USBPHYSECF 16 + +/* RCC_APB5SECSR register fields */ +#define RCC_APB5SECSR_RTCSECF 8 +#define RCC_APB5SECSR_TZCSECF 11 +#define RCC_APB5SECSR_ETZPCSECF 13 +#define RCC_APB5SECSR_IWDG1SECF 15 +#define RCC_APB5SECSR_BSECSECF 16 +#define RCC_APB5SECSR_STGENCSECF_MASK GENMASK(21, 20) +#define RCC_APB5SECSR_STGENCSECF 20 +#define RCC_APB5SECSR_STGENROSECF 21 + +/* RCC_APB6SECSR register fields */ +#define RCC_APB6SECSR_USART1SECF 0 +#define RCC_APB6SECSR_USART2SECF 1 +#define RCC_APB6SECSR_SPI4SECF 2 +#define RCC_APB6SECSR_SPI5SECF 3 +#define RCC_APB6SECSR_I2C3SECF 4 +#define RCC_APB6SECSR_I2C4SECF 5 +#define RCC_APB6SECSR_I2C5SECF 6 +#define RCC_APB6SECSR_TIM12SECF 7 +#define RCC_APB6SECSR_TIM13SECF 8 +#define RCC_APB6SECSR_TIM14SECF 9 +#define RCC_APB6SECSR_TIM15SECF 10 +#define RCC_APB6SECSR_TIM16SECF 11 +#define RCC_APB6SECSR_TIM17SECF 12 + +/* RCC_AHB2SECSR register fields */ +#define RCC_AHB2SECSR_DMA3SECF 3 +#define RCC_AHB2SECSR_DMAMUX2SECF 4 +#define RCC_AHB2SECSR_ADC1SECF 5 +#define RCC_AHB2SECSR_ADC2SECF 6 +#define RCC_AHB2SECSR_USBOSECF 8 + +/* RCC_AHB4SECSR register fields */ +#define RCC_AHB4SECSR_TSCSECF 15 + +/* RCC_AHB5SECSR register fields */ +#define RCC_AHB5SECSR_PKASECF 2 +#define RCC_AHB5SECSR_SAESSECF 3 +#define RCC_AHB5SECSR_CRYP1SECF 4 +#define RCC_AHB5SECSR_HASH1SECF 5 +#define RCC_AHB5SECSR_RNG1SECF 6 +#define RCC_AHB5SECSR_BKPSRAMSECF 8 + +/* RCC_AHB6SECSR register fields */ +#define RCC_AHB6SECSR_MCESECF 1 +#define RCC_AHB6SECSR_FMCSECF 12 +#define RCC_AHB6SECSR_QSPISECF 14 +#define RCC_AHB6SECSR_SDMMC1SECF 16 +#define RCC_AHB6SECSR_SDMMC2SECF 17 + +#define RCC_AHB6SECSR_ETH1SECF_MASK GENMASK(11, 7) +#define RCC_AHB6SECSR_ETH2SECF_MASK GENMASK(31, 27) +#define RCC_AHB6SECSR_ETH1SECF_SHIFT 7 +#define RCC_AHB6SECSR_ETH2SECF_SHIFT 27 + +#define RCC_AHB6SECSR_ETH1CKSECF 7 +#define RCC_AHB6SECSR_ETH1TXSECF 8 +#define RCC_AHB6SECSR_ETH1RXSECF 9 +#define RCC_AHB6SECSR_ETH1MACSECF 10 +#define RCC_AHB6SECSR_ETH1STPSECF 11 + +#define RCC_AHB6SECSR_ETH2CKSECF 27 +#define RCC_AHB6SECSR_ETH2TXSECF 28 +#define RCC_AHB6SECSR_ETH2RXSECF 29 +#define RCC_AHB6SECSR_ETH2MACSECF 30 +#define RCC_AHB6SECSR_ETH2STPSECF 31 + +/* RCC_VERR register fields */ +#define RCC_VERR_MINREV_MASK GENMASK(3, 0) +#define RCC_VERR_MAJREV_MASK GENMASK(7, 4) +#define RCC_VERR_MINREV_SHIFT 0 +#define RCC_VERR_MAJREV_SHIFT 4 + +/* RCC_IDR register fields */ +#define RCC_IDR_ID_MASK GENMASK(31, 0) +#define RCC_IDR_ID_SHIFT 0 + +/* RCC_SIDR register fields */ +#define RCC_SIDR_SID_MASK GENMASK(31, 0) +#define RCC_SIDR_SID_SHIFT 0 + +#endif /* STM32MP13_RCC_H */ + diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c index 712e103382d8..29a8c710ae06 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c @@ -98,6 +98,8 @@ static SUNXI_CCU_GATE(r_apb1_ir_clk, "r-apb1-ir", "r-apb1", 0x1cc, BIT(0), 0); static SUNXI_CCU_GATE(r_apb1_w1_clk, "r-apb1-w1", "r-apb1", 0x1ec, BIT(0), 0); +static SUNXI_CCU_GATE(r_apb1_rtc_clk, "r-apb1-rtc", "r-apb1", + 0x20c, BIT(0), CLK_IGNORE_UNUSED); /* Information of IR(RX) mod clock is gathered from BSP source code */ static const char * const r_mod0_default_parents[] = { "osc32k", "osc24M" }; @@ -147,6 +149,7 @@ static struct ccu_common *sun50i_h616_r_ccu_clks[] = { &r_apb2_i2c_clk.common, &r_apb2_rsb_clk.common, &r_apb1_ir_clk.common, + &r_apb1_rtc_clk.common, &ir_clk.common, }; @@ -164,6 +167,7 @@ static struct clk_hw_onecell_data sun50i_h6_r_hw_clks = { [CLK_R_APB2_RSB] = &r_apb2_rsb_clk.common.hw, [CLK_R_APB1_IR] = &r_apb1_ir_clk.common.hw, [CLK_R_APB1_W1] = &r_apb1_w1_clk.common.hw, + [CLK_R_APB1_RTC] = &r_apb1_rtc_clk.common.hw, [CLK_IR] = &ir_clk.common.hw, [CLK_W1] = &w1_clk.common.hw, }, @@ -179,6 +183,7 @@ static struct clk_hw_onecell_data sun50i_h616_r_hw_clks = { [CLK_R_APB2_I2C] = &r_apb2_i2c_clk.common.hw, [CLK_R_APB2_RSB] = &r_apb2_rsb_clk.common.hw, [CLK_R_APB1_IR] = &r_apb1_ir_clk.common.hw, + [CLK_R_APB1_RTC] = &r_apb1_rtc_clk.common.hw, [CLK_IR] = &ir_clk.common.hw, }, .num = CLK_NUMBER, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h index 7e290b840803..10e9b66afc6a 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.h @@ -14,6 +14,6 @@ #define CLK_R_APB2 3 -#define CLK_NUMBER (CLK_R_APB2_RSB + 1) +#define CLK_NUMBER (CLK_R_APB1_RTC + 1) #endif /* _CCU_SUN50I_H6_R_H */ diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c index 49a2474cf314..21e918582aa5 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c @@ -704,6 +704,13 @@ static CLK_FIXED_FACTOR_HWS(pll_periph0_2x_clk, "pll-periph0-2x", pll_periph0_parents, 1, 2, 0); +static const struct clk_hw *pll_periph0_2x_hws[] = { + &pll_periph0_2x_clk.hw +}; + +static CLK_FIXED_FACTOR_HWS(pll_system_32k_clk, "pll-system-32k", + pll_periph0_2x_hws, 36621, 1, 0); + static const struct clk_hw *pll_periph1_parents[] = { &pll_periph1_clk.common.hw }; @@ -852,6 +859,7 @@ static struct clk_hw_onecell_data sun50i_h616_hw_clks = { [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw, [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw, [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw, + [CLK_PLL_SYSTEM_32K] = &pll_system_32k_clk.hw, [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw, [CLK_PLL_PERIPH1_2X] = &pll_periph1_2x_clk.hw, [CLK_PLL_GPU] = &pll_gpu_clk.common.hw, diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h index dd671b413f22..fdd2f4d5103f 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.h +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.h @@ -51,6 +51,6 @@ #define CLK_BUS_DRAM 56 -#define CLK_NUMBER (CLK_BUS_HDCP + 1) +#define CLK_NUMBER (CLK_PLL_SYSTEM_32K + 1) #endif /* _CCU_SUN50I_H616_H_ */ diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c index 6ecf18f71c32..3748a39dae7c 100644 --- a/drivers/clk/tegra/clk-bpmp.c +++ b/drivers/clk/tegra/clk-bpmp.c @@ -164,15 +164,18 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw, return response.rate; } -static long tegra_bpmp_clk_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static int tegra_bpmp_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *rate_req) { struct tegra_bpmp_clk *clk = to_tegra_bpmp_clk(hw); struct cmd_clk_round_rate_response response; struct cmd_clk_round_rate_request request; struct tegra_bpmp_clk_message msg; + unsigned long rate; int err; + rate = min(max(rate_req->rate, rate_req->min_rate), rate_req->max_rate); + memset(&request, 0, sizeof(request)); request.rate = min_t(u64, rate, S64_MAX); @@ -188,7 +191,9 @@ static long tegra_bpmp_clk_round_rate(struct clk_hw *hw, unsigned long rate, if (err < 0) return err; - return response.rate; + rate_req->rate = (unsigned long)response.rate; + + return 0; } static int tegra_bpmp_clk_set_parent(struct clk_hw *hw, u8 index) @@ -290,7 +295,7 @@ static const struct clk_ops tegra_bpmp_clk_rate_ops = { .unprepare = tegra_bpmp_clk_unprepare, .is_prepared = tegra_bpmp_clk_is_prepared, .recalc_rate = tegra_bpmp_clk_recalc_rate, - .round_rate = tegra_bpmp_clk_round_rate, + .determine_rate = tegra_bpmp_clk_determine_rate, .set_rate = tegra_bpmp_clk_set_rate, }; @@ -299,7 +304,7 @@ static const struct clk_ops tegra_bpmp_clk_mux_rate_ops = { .unprepare = tegra_bpmp_clk_unprepare, .is_prepared = tegra_bpmp_clk_is_prepared, .recalc_rate = tegra_bpmp_clk_recalc_rate, - .round_rate = tegra_bpmp_clk_round_rate, + .determine_rate = tegra_bpmp_clk_determine_rate, .set_parent = tegra_bpmp_clk_set_parent, .get_parent = tegra_bpmp_clk_get_parent, .set_rate = tegra_bpmp_clk_set_rate, @@ -448,15 +453,29 @@ static int tegra_bpmp_probe_clocks(struct tegra_bpmp *bpmp, return count; } +static unsigned int +tegra_bpmp_clk_id_to_index(const struct tegra_bpmp_clk_info *clocks, + unsigned int num_clocks, unsigned int id) +{ + unsigned int i; + + for (i = 0; i < num_clocks; i++) + if (clocks[i].id == id) + return i; + + return UINT_MAX; +} + static const struct tegra_bpmp_clk_info * tegra_bpmp_clk_find(const struct tegra_bpmp_clk_info *clocks, unsigned int num_clocks, unsigned int id) { unsigned int i; - for (i = 0; i < num_clocks; i++) - if (clocks[i].id == id) - return &clocks[i]; + i = tegra_bpmp_clk_id_to_index(clocks, num_clocks, id); + + if (i < num_clocks) + return &clocks[i]; return NULL; } @@ -539,31 +558,57 @@ tegra_bpmp_clk_register(struct tegra_bpmp *bpmp, return clk; } +static void tegra_bpmp_register_clocks_one(struct tegra_bpmp *bpmp, + struct tegra_bpmp_clk_info *infos, + unsigned int i, + unsigned int count) +{ + unsigned int j; + struct tegra_bpmp_clk_info *info; + struct tegra_bpmp_clk *clk; + + if (bpmp->clocks[i]) { + /* already registered */ + return; + } + + info = &infos[i]; + for (j = 0; j < info->num_parents; ++j) { + unsigned int p_id = info->parents[j]; + unsigned int p_i = tegra_bpmp_clk_id_to_index(infos, count, + p_id); + if (p_i < count) + tegra_bpmp_register_clocks_one(bpmp, infos, p_i, count); + } + + clk = tegra_bpmp_clk_register(bpmp, info, infos, count); + if (IS_ERR(clk)) { + dev_err(bpmp->dev, + "failed to register clock %u (%s): %ld\n", + info->id, info->name, PTR_ERR(clk)); + /* intentionally store the error pointer to + * bpmp->clocks[i] to avoid re-attempting the + * registration later + */ + } + + bpmp->clocks[i] = clk; +} + static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp, struct tegra_bpmp_clk_info *infos, unsigned int count) { - struct tegra_bpmp_clk *clk; unsigned int i; bpmp->num_clocks = count; - bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(clk), GFP_KERNEL); + bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL); if (!bpmp->clocks) return -ENOMEM; for (i = 0; i < count; i++) { - struct tegra_bpmp_clk_info *info = &infos[i]; - - clk = tegra_bpmp_clk_register(bpmp, info, infos, count); - if (IS_ERR(clk)) { - dev_err(bpmp->dev, - "failed to register clock %u (%s): %ld\n", - info->id, info->name, PTR_ERR(clk)); - continue; - } - - bpmp->clocks[i] = clk; + tegra_bpmp_register_clocks_one(bpmp, infos, i, count); } return 0; diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c index 6144447f86c6..41433927b55c 100644 --- a/drivers/clk/tegra/clk-dfll.c +++ b/drivers/clk/tegra/clk-dfll.c @@ -271,6 +271,7 @@ struct tegra_dfll { struct clk *ref_clk; struct clk *i2c_clk; struct clk *dfll_clk; + struct reset_control *dfll_rst; struct reset_control *dvco_rst; unsigned long ref_rate; unsigned long i2c_clk_rate; @@ -666,7 +667,7 @@ static int dfll_force_output(struct tegra_dfll *td, unsigned int out_sel) } /** - * dfll_load_lut - load the voltage lookup table + * dfll_load_i2c_lut - load the voltage lookup table * @td: struct tegra_dfll * * * Load the voltage-to-PMIC register value lookup table into the DFLL @@ -897,7 +898,7 @@ static void dfll_set_frequency_request(struct tegra_dfll *td, } /** - * tegra_dfll_request_rate - set the next rate for the DFLL to tune to + * dfll_request_rate - set the next rate for the DFLL to tune to * @td: DFLL instance * @rate: clock rate to target * @@ -1005,7 +1006,7 @@ static void dfll_set_open_loop_config(struct tegra_dfll *td) } /** - * tegra_dfll_lock - switch from open-loop to closed-loop mode + * dfll_lock - switch from open-loop to closed-loop mode * @td: DFLL instance * * Switch from OPEN_LOOP state to CLOSED_LOOP state. Returns 0 upon success, @@ -1046,7 +1047,7 @@ static int dfll_lock(struct tegra_dfll *td) } /** - * tegra_dfll_unlock - switch from closed-loop to open-loop mode + * dfll_unlock - switch from closed-loop to open-loop mode * @td: DFLL instance * * Switch from CLOSED_LOOP state to OPEN_LOOP state. Returns 0 upon success, @@ -1464,6 +1465,7 @@ static int dfll_init(struct tegra_dfll *td) return -EINVAL; } + reset_control_deassert(td->dfll_rst); reset_control_deassert(td->dvco_rst); ret = clk_prepare(td->ref_clk); @@ -1509,6 +1511,7 @@ di_err1: clk_unprepare(td->ref_clk); reset_control_assert(td->dvco_rst); + reset_control_assert(td->dfll_rst); return ret; } @@ -1530,6 +1533,7 @@ int tegra_dfll_suspend(struct device *dev) } reset_control_assert(td->dvco_rst); + reset_control_assert(td->dfll_rst); return 0; } @@ -1548,6 +1552,7 @@ int tegra_dfll_resume(struct device *dev) { struct tegra_dfll *td = dev_get_drvdata(dev); + reset_control_deassert(td->dfll_rst); reset_control_deassert(td->dvco_rst); pm_runtime_get_sync(td->dev); @@ -1951,6 +1956,12 @@ int tegra_dfll_register(struct platform_device *pdev, td->soc = soc; + td->dfll_rst = devm_reset_control_get_optional(td->dev, "dfll"); + if (IS_ERR(td->dfll_rst)) { + dev_err(td->dev, "couldn't get dfll reset\n"); + return PTR_ERR(td->dfll_rst); + } + td->dvco_rst = devm_reset_control_get(td->dev, "dvco"); if (IS_ERR(td->dvco_rst)) { dev_err(td->dev, "couldn't get dvco reset\n"); @@ -2087,6 +2098,7 @@ struct tegra_dfll_soc_data *tegra_dfll_unregister(struct platform_device *pdev) clk_unprepare(td->i2c_clk); reset_control_assert(td->dvco_rst); + reset_control_assert(td->dfll_rst); return td->soc; } diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c index 064066e9e85b..617360e20d86 100644 --- a/drivers/clk/ti/clkctrl.c +++ b/drivers/clk/ti/clkctrl.c @@ -232,8 +232,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, void *data) { struct omap_clkctrl_provider *provider = data; - struct omap_clkctrl_clk *entry; - bool found = false; + struct omap_clkctrl_clk *entry = NULL, *iter; if (clkspec->args_count != 2) return ERR_PTR(-EINVAL); @@ -241,15 +240,15 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, pr_debug("%s: looking for %x:%x\n", __func__, clkspec->args[0], clkspec->args[1]); - list_for_each_entry(entry, &provider->clocks, node) { - if (entry->reg_offset == clkspec->args[0] && - entry->bit_offset == clkspec->args[1]) { - found = true; + list_for_each_entry(iter, &provider->clocks, node) { + if (iter->reg_offset == clkspec->args[0] && + iter->bit_offset == clkspec->args[1]) { + entry = iter; break; } } - if (!found) + if (!entry) return ERR_PTR(-EINVAL); return entry->clk; diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index 8d60319be368..779b9900f636 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -255,7 +255,7 @@ int __init ti_clk_add_component(struct device_node *node, struct clk_hw *hw, return -EINVAL; } - parent_names = kzalloc((sizeof(char *) * num_parents), GFP_KERNEL); + parent_names = kcalloc(num_parents, sizeof(char *), GFP_KERNEL); if (!parent_names) return -ENOMEM; diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c index 937b6bb82b30..4deb37f19a7c 100644 --- a/drivers/clk/ux500/clk-prcmu.c +++ b/drivers/clk/ux500/clk-prcmu.c @@ -14,27 +14,28 @@ #include "clk.h" #define to_clk_prcmu(_hw) container_of(_hw, struct clk_prcmu, hw) +#define to_clk_prcmu_clkout(_hw) container_of(_hw, struct clk_prcmu_clkout, hw) struct clk_prcmu { struct clk_hw hw; u8 cg_sel; - int is_prepared; - int is_enabled; int opp_requested; }; +struct clk_prcmu_clkout { + struct clk_hw hw; + u8 clkout_id; + u8 source; + u8 divider; +}; + /* PRCMU clock operations. */ static int clk_prcmu_prepare(struct clk_hw *hw) { - int ret; struct clk_prcmu *clk = to_clk_prcmu(hw); - ret = prcmu_request_clock(clk->cg_sel, true); - if (!ret) - clk->is_prepared = 1; - - return ret; + return prcmu_request_clock(clk->cg_sel, true); } static void clk_prcmu_unprepare(struct clk_hw *hw) @@ -42,34 +43,7 @@ static void clk_prcmu_unprepare(struct clk_hw *hw) struct clk_prcmu *clk = to_clk_prcmu(hw); if (prcmu_request_clock(clk->cg_sel, false)) pr_err("clk_prcmu: %s failed to disable %s.\n", __func__, - clk_hw_get_name(hw)); - else - clk->is_prepared = 0; -} - -static int clk_prcmu_is_prepared(struct clk_hw *hw) -{ - struct clk_prcmu *clk = to_clk_prcmu(hw); - return clk->is_prepared; -} - -static int clk_prcmu_enable(struct clk_hw *hw) -{ - struct clk_prcmu *clk = to_clk_prcmu(hw); - clk->is_enabled = 1; - return 0; -} - -static void clk_prcmu_disable(struct clk_hw *hw) -{ - struct clk_prcmu *clk = to_clk_prcmu(hw); - clk->is_enabled = 0; -} - -static int clk_prcmu_is_enabled(struct clk_hw *hw) -{ - struct clk_prcmu *clk = to_clk_prcmu(hw); - return clk->is_enabled; + clk_hw_get_name(hw)); } static unsigned long clk_prcmu_recalc_rate(struct clk_hw *hw, @@ -118,7 +92,6 @@ static int clk_prcmu_opp_prepare(struct clk_hw *hw) return err; } - clk->is_prepared = 1; return 0; } @@ -137,8 +110,6 @@ static void clk_prcmu_opp_unprepare(struct clk_hw *hw) (char *)clk_hw_get_name(hw)); clk->opp_requested = 0; } - - clk->is_prepared = 0; } static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw) @@ -163,7 +134,6 @@ static int clk_prcmu_opp_volt_prepare(struct clk_hw *hw) return err; } - clk->is_prepared = 1; return 0; } @@ -181,17 +151,11 @@ static void clk_prcmu_opp_volt_unprepare(struct clk_hw *hw) prcmu_request_ape_opp_100_voltage(false); clk->opp_requested = 0; } - - clk->is_prepared = 0; } static const struct clk_ops clk_prcmu_scalable_ops = { .prepare = clk_prcmu_prepare, .unprepare = clk_prcmu_unprepare, - .is_prepared = clk_prcmu_is_prepared, - .enable = clk_prcmu_enable, - .disable = clk_prcmu_disable, - .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, .round_rate = clk_prcmu_round_rate, .set_rate = clk_prcmu_set_rate, @@ -200,57 +164,43 @@ static const struct clk_ops clk_prcmu_scalable_ops = { static const struct clk_ops clk_prcmu_gate_ops = { .prepare = clk_prcmu_prepare, .unprepare = clk_prcmu_unprepare, - .is_prepared = clk_prcmu_is_prepared, - .enable = clk_prcmu_enable, - .disable = clk_prcmu_disable, - .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, }; static const struct clk_ops clk_prcmu_scalable_rate_ops = { - .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, .round_rate = clk_prcmu_round_rate, .set_rate = clk_prcmu_set_rate, }; static const struct clk_ops clk_prcmu_rate_ops = { - .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, }; static const struct clk_ops clk_prcmu_opp_gate_ops = { .prepare = clk_prcmu_opp_prepare, .unprepare = clk_prcmu_opp_unprepare, - .is_prepared = clk_prcmu_is_prepared, - .enable = clk_prcmu_enable, - .disable = clk_prcmu_disable, - .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, }; static const struct clk_ops clk_prcmu_opp_volt_scalable_ops = { .prepare = clk_prcmu_opp_volt_prepare, .unprepare = clk_prcmu_opp_volt_unprepare, - .is_prepared = clk_prcmu_is_prepared, - .enable = clk_prcmu_enable, - .disable = clk_prcmu_disable, - .is_enabled = clk_prcmu_is_enabled, .recalc_rate = clk_prcmu_recalc_rate, .round_rate = clk_prcmu_round_rate, .set_rate = clk_prcmu_set_rate, }; -static struct clk *clk_reg_prcmu(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long rate, - unsigned long flags, - const struct clk_ops *clk_prcmu_ops) +static struct clk_hw *clk_reg_prcmu(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long rate, + unsigned long flags, + const struct clk_ops *clk_prcmu_ops) { struct clk_prcmu *clk; struct clk_init_data clk_prcmu_init; - struct clk *clk_reg; + int ret; if (!name) { pr_err("clk_prcmu: %s invalid arguments passed\n", __func__); @@ -262,8 +212,6 @@ static struct clk *clk_reg_prcmu(const char *name, return ERR_PTR(-ENOMEM); clk->cg_sel = cg_sel; - clk->is_prepared = 1; - clk->is_enabled = 1; clk->opp_requested = 0; /* "rate" can be used for changing the initial frequency */ if (rate) @@ -276,11 +224,11 @@ static struct clk *clk_reg_prcmu(const char *name, clk_prcmu_init.num_parents = (parent_name ? 1 : 0); clk->hw.init = &clk_prcmu_init; - clk_reg = clk_register(NULL, &clk->hw); - if (IS_ERR_OR_NULL(clk_reg)) + ret = clk_hw_register(NULL, &clk->hw); + if (ret) goto free_clk; - return clk_reg; + return &clk->hw; free_clk: kfree(clk); @@ -288,59 +236,165 @@ free_clk: return ERR_PTR(-ENOMEM); } -struct clk *clk_reg_prcmu_scalable(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long rate, - unsigned long flags) +struct clk_hw *clk_reg_prcmu_scalable(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long rate, + unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, rate, flags, &clk_prcmu_scalable_ops); } -struct clk *clk_reg_prcmu_gate(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long flags) +struct clk_hw *clk_reg_prcmu_gate(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, 0, flags, &clk_prcmu_gate_ops); } -struct clk *clk_reg_prcmu_scalable_rate(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long rate, - unsigned long flags) +struct clk_hw *clk_reg_prcmu_scalable_rate(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long rate, + unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, rate, flags, &clk_prcmu_scalable_rate_ops); } -struct clk *clk_reg_prcmu_rate(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long flags) +struct clk_hw *clk_reg_prcmu_rate(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, 0, flags, &clk_prcmu_rate_ops); } -struct clk *clk_reg_prcmu_opp_gate(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long flags) +struct clk_hw *clk_reg_prcmu_opp_gate(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, 0, flags, &clk_prcmu_opp_gate_ops); } -struct clk *clk_reg_prcmu_opp_volt_scalable(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long rate, - unsigned long flags) +struct clk_hw *clk_reg_prcmu_opp_volt_scalable(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long rate, + unsigned long flags) { return clk_reg_prcmu(name, parent_name, cg_sel, rate, flags, &clk_prcmu_opp_volt_scalable_ops); } + +/* The clkout (external) clock is special and need special ops */ + +static int clk_prcmu_clkout_prepare(struct clk_hw *hw) +{ + struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw); + + return prcmu_config_clkout(clk->clkout_id, clk->source, clk->divider); +} + +static void clk_prcmu_clkout_unprepare(struct clk_hw *hw) +{ + struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw); + int ret; + + /* The clkout clock is disabled by dividing by 0 */ + ret = prcmu_config_clkout(clk->clkout_id, clk->source, 0); + if (ret) + pr_err("clk_prcmu: %s failed to disable %s\n", __func__, + clk_hw_get_name(hw)); +} + +static unsigned long clk_prcmu_clkout_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw); + + return (parent_rate / clk->divider); +} + +static u8 clk_prcmu_clkout_get_parent(struct clk_hw *hw) +{ + struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw); + + return clk->source; +} + +static int clk_prcmu_clkout_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_prcmu_clkout *clk = to_clk_prcmu_clkout(hw); + + clk->source = index; + /* Make sure the change reaches the hardware immediately */ + if (clk_hw_is_prepared(hw)) + return clk_prcmu_clkout_prepare(hw); + return 0; +} + +static const struct clk_ops clk_prcmu_clkout_ops = { + .prepare = clk_prcmu_clkout_prepare, + .unprepare = clk_prcmu_clkout_unprepare, + .recalc_rate = clk_prcmu_clkout_recalc_rate, + .get_parent = clk_prcmu_clkout_get_parent, + .set_parent = clk_prcmu_clkout_set_parent, +}; + +struct clk_hw *clk_reg_prcmu_clkout(const char *name, + const char * const *parent_names, + int num_parents, + u8 source, u8 divider) + +{ + struct clk_prcmu_clkout *clk; + struct clk_init_data clk_prcmu_clkout_init; + u8 clkout_id; + int ret; + + if (!name) { + pr_err("clk_prcmu_clkout: %s invalid arguments passed\n", __func__); + return ERR_PTR(-EINVAL); + } + + if (!strcmp(name, "clkout1")) + clkout_id = 0; + else if (!strcmp(name, "clkout2")) + clkout_id = 1; + else { + pr_err("clk_prcmu_clkout: %s bad clock name\n", __func__); + return ERR_PTR(-EINVAL); + } + + clk = kzalloc(sizeof(*clk), GFP_KERNEL); + if (!clk) + return ERR_PTR(-ENOMEM); + + clk->clkout_id = clkout_id; + clk->source = source; + clk->divider = divider; + + clk_prcmu_clkout_init.name = name; + clk_prcmu_clkout_init.ops = &clk_prcmu_clkout_ops; + clk_prcmu_clkout_init.flags = CLK_GET_RATE_NOCACHE; + clk_prcmu_clkout_init.parent_names = parent_names; + clk_prcmu_clkout_init.num_parents = num_parents; + clk->hw.init = &clk_prcmu_clkout_init; + + ret = clk_hw_register(NULL, &clk->hw); + if (ret) + goto free_clkout; + + return &clk->hw; +free_clkout: + kfree(clk); + pr_err("clk_prcmu_clkout: %s failed to register clk\n", __func__); + return ERR_PTR(-ENOMEM); +} diff --git a/drivers/clk/ux500/clk.h b/drivers/clk/ux500/clk.h index 40cd9fc95b8b..91003cf8003c 100644 --- a/drivers/clk/ux500/clk.h +++ b/drivers/clk/ux500/clk.h @@ -13,6 +13,7 @@ #include <linux/types.h> struct clk; +struct clk_hw; struct clk *clk_reg_prcc_pclk(const char *name, const char *parent_name, @@ -26,38 +27,43 @@ struct clk *clk_reg_prcc_kclk(const char *name, u32 cg_sel, unsigned long flags); -struct clk *clk_reg_prcmu_scalable(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long rate, - unsigned long flags); - -struct clk *clk_reg_prcmu_gate(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long flags); - -struct clk *clk_reg_prcmu_scalable_rate(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long rate, - unsigned long flags); - -struct clk *clk_reg_prcmu_rate(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long flags); - -struct clk *clk_reg_prcmu_opp_gate(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long flags); - -struct clk *clk_reg_prcmu_opp_volt_scalable(const char *name, - const char *parent_name, - u8 cg_sel, - unsigned long rate, - unsigned long flags); +struct clk_hw *clk_reg_prcmu_scalable(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long rate, + unsigned long flags); + +struct clk_hw *clk_reg_prcmu_gate(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long flags); + +struct clk_hw *clk_reg_prcmu_scalable_rate(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long rate, + unsigned long flags); + +struct clk_hw *clk_reg_prcmu_rate(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long flags); + +struct clk_hw *clk_reg_prcmu_opp_gate(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long flags); + +struct clk_hw *clk_reg_prcmu_opp_volt_scalable(const char *name, + const char *parent_name, + u8 cg_sel, + unsigned long rate, + unsigned long flags); + +struct clk_hw *clk_reg_prcmu_clkout(const char *name, + const char * const *parent_names, + int num_parents, + u8 source, u8 divider); struct clk *clk_reg_sysctrl_gate(struct device *dev, const char *name, diff --git a/drivers/clk/ux500/reset-prcc.c b/drivers/clk/ux500/reset-prcc.c index fcd5d042806a..f7e48941fbc7 100644 --- a/drivers/clk/ux500/reset-prcc.c +++ b/drivers/clk/ux500/reset-prcc.c @@ -58,7 +58,7 @@ static void __iomem *u8500_prcc_reset_base(struct u8500_prcc_reset *ur, prcc_num = id / PRCC_PERIPHS_PER_CLUSTER; index = prcc_num_to_index(prcc_num); - if (index > ARRAY_SIZE(ur->base)) + if (index >= ARRAY_SIZE(ur->base)) return NULL; return ur->base[index]; diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c index e86ed2eec3fd..8e2f6c65db2a 100644 --- a/drivers/clk/ux500/u8500_of_clk.c +++ b/drivers/clk/ux500/u8500_of_clk.c @@ -15,9 +15,9 @@ #include "prcc.h" #include "reset-prcc.h" -static struct clk *prcmu_clk[PRCMU_NUM_CLKS]; static struct clk *prcc_pclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER]; static struct clk *prcc_kclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER]; +static struct clk_hw *clkout_clk[2]; #define PRCC_SHOW(clk, base, bit) \ clk[(base * PRCC_PERIPHS_PER_CLUSTER) + bit] @@ -46,6 +46,82 @@ static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec, return PRCC_SHOW(clk_data, base, bit); } +static struct clk_hw_onecell_data u8500_prcmu_hw_clks = { + .hws = { + /* + * This assignment makes sure the dynamic array + * gets the right size. + */ + [PRCMU_NUM_CLKS] = NULL, + }, + .num = PRCMU_NUM_CLKS, +}; + +/* Essentially names for the first PRCMU_CLKSRC_* defines */ +static const char * const u8500_clkout_parents[] = { + "clk38m_to_clkgen", + "aclk", + /* Just called "sysclk" in documentation */ + "ab8500_sysclk", + "lcdclk", + "sdmmcclk", + "tvclk", + "timclk", + /* CLK009 is not implemented, add it if you need it */ + "clk009", +}; + +static struct clk_hw *ux500_clkout_get(struct of_phandle_args *clkspec, + void *data) +{ + u32 id, source, divider; + struct clk_hw *clkout; + + if (clkspec->args_count != 3) + return ERR_PTR(-EINVAL); + + id = clkspec->args[0]; + source = clkspec->args[1]; + divider = clkspec->args[2]; + + if (id > 1) { + pr_err("%s: invalid clkout ID %d\n", __func__, id); + return ERR_PTR(-EINVAL); + } + + if (clkout_clk[id]) { + pr_info("%s: clkout%d already registered, not reconfiguring\n", + __func__, id + 1); + return clkout_clk[id]; + } + + if (source > 7) { + pr_err("%s: invalid source ID %d\n", __func__, source); + return ERR_PTR(-EINVAL); + } + + if (divider == 0 || divider > 63) { + pr_err("%s: invalid divider %d\n", __func__, divider); + return ERR_PTR(-EINVAL); + } + + pr_debug("registering clkout%d with source %d and divider %d\n", + id + 1, source, divider); + + clkout = clk_reg_prcmu_clkout(id ? "clkout2" : "clkout1", + u8500_clkout_parents, + ARRAY_SIZE(u8500_clkout_parents), + source, divider); + if (IS_ERR(clkout)) { + pr_err("failed to register clkout%d\n", id + 1); + return ERR_CAST(clkout); + } + + clkout_clk[id] = clkout; + + return clkout; +} + static void u8500_clk_init(struct device_node *np) { struct prcmu_fw_version *fw_version; @@ -77,19 +153,29 @@ static void u8500_clk_init(struct device_node *np) } /* Clock sources */ - clk = clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0, - CLK_IGNORE_UNUSED); - prcmu_clk[PRCMU_PLLSOC0] = clk; + u8500_prcmu_hw_clks.hws[PRCMU_PLLSOC0] = + clk_reg_prcmu_gate("soc0_pll", NULL, PRCMU_PLLSOC0, + CLK_IGNORE_UNUSED); - clk = clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1, - CLK_IGNORE_UNUSED); - prcmu_clk[PRCMU_PLLSOC1] = clk; + u8500_prcmu_hw_clks.hws[PRCMU_PLLSOC1] = + clk_reg_prcmu_gate("soc1_pll", NULL, PRCMU_PLLSOC1, + CLK_IGNORE_UNUSED); - clk = clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR, - CLK_IGNORE_UNUSED); - prcmu_clk[PRCMU_PLLDDR] = clk; + u8500_prcmu_hw_clks.hws[PRCMU_PLLDDR] = + clk_reg_prcmu_gate("ddr_pll", NULL, PRCMU_PLLDDR, + CLK_IGNORE_UNUSED); - /* FIXME: Add sys, ulp and int clocks here. */ + /* + * Read-only clocks that only return their current rate, only used + * as parents to other clocks and not visible in the device tree. + * clk38m_to_clkgen is the same as the SYSCLK, i.e. the root clock. + */ + clk_reg_prcmu_rate("clk38m_to_clkgen", NULL, PRCMU_SYSCLK, + CLK_IGNORE_UNUSED); + clk_reg_prcmu_rate("aclk", NULL, PRCMU_ACLK, + CLK_IGNORE_UNUSED); + + /* TODO: add CLK009 if needed */ rtc_clk = clk_register_fixed_rate(NULL, "rtc32k", "NULL", CLK_IGNORE_UNUSED, @@ -113,146 +199,106 @@ static void u8500_clk_init(struct device_node *np) } if (sgaclk_parent) - clk = clk_reg_prcmu_gate("sgclk", sgaclk_parent, - PRCMU_SGACLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_SGACLK] = + clk_reg_prcmu_gate("sgclk", sgaclk_parent, + PRCMU_SGACLK, 0); else - clk = clk_reg_prcmu_gate("sgclk", NULL, PRCMU_SGACLK, 0); - prcmu_clk[PRCMU_SGACLK] = clk; - - clk = clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0); - prcmu_clk[PRCMU_UARTCLK] = clk; - - clk = clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, 0); - prcmu_clk[PRCMU_MSP02CLK] = clk; - - clk = clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0); - prcmu_clk[PRCMU_MSP1CLK] = clk; - - clk = clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0); - prcmu_clk[PRCMU_I2CCLK] = clk; - - clk = clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0); - prcmu_clk[PRCMU_SLIMCLK] = clk; - - clk = clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0); - prcmu_clk[PRCMU_PER1CLK] = clk; - - clk = clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0); - prcmu_clk[PRCMU_PER2CLK] = clk; - - clk = clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0); - prcmu_clk[PRCMU_PER3CLK] = clk; - - clk = clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0); - prcmu_clk[PRCMU_PER5CLK] = clk; - - clk = clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0); - prcmu_clk[PRCMU_PER6CLK] = clk; - - clk = clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0); - prcmu_clk[PRCMU_PER7CLK] = clk; - - clk = clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0, - CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_LCDCLK] = clk; - - clk = clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0); - prcmu_clk[PRCMU_BMLCLK] = clk; - - clk = clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0, - CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_HSITXCLK] = clk; - - clk = clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0, - CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_HSIRXCLK] = clk; - - clk = clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0, - CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_HDMICLK] = clk; - - clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0); - prcmu_clk[PRCMU_APEATCLK] = clk; - - clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0, - CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_APETRACECLK] = clk; - - clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0); - prcmu_clk[PRCMU_MCDECLK] = clk; - - clk = clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0); - prcmu_clk[PRCMU_IPI2CCLK] = clk; - - clk = clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0); - prcmu_clk[PRCMU_DSIALTCLK] = clk; - - clk = clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0); - prcmu_clk[PRCMU_DMACLK] = clk; - - clk = clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0); - prcmu_clk[PRCMU_B2R2CLK] = clk; - - clk = clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0, - CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_TVCLK] = clk; - - clk = clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0); - prcmu_clk[PRCMU_SSPCLK] = clk; - - clk = clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0); - prcmu_clk[PRCMU_RNGCLK] = clk; - - clk = clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0); - prcmu_clk[PRCMU_UICCCLK] = clk; - - clk = clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0); - prcmu_clk[PRCMU_TIMCLK] = clk; - - clk = clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK, 0); - prcmu_clk[PRCMU_SYSCLK] = clk; - - clk = clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, PRCMU_SDMMCCLK, - 100000000, CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_SDMMCCLK] = clk; - - clk = clk_reg_prcmu_scalable("dsi_pll", "hdmiclk", - PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_PLLDSI] = clk; - - clk = clk_reg_prcmu_scalable("dsi0clk", "dsi_pll", - PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_DSI0CLK] = clk; - - clk = clk_reg_prcmu_scalable("dsi1clk", "dsi_pll", - PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_DSI1CLK] = clk; - - clk = clk_reg_prcmu_scalable("dsi0escclk", "tvclk", - PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_DSI0ESCCLK] = clk; - - clk = clk_reg_prcmu_scalable("dsi1escclk", "tvclk", - PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_DSI1ESCCLK] = clk; - - clk = clk_reg_prcmu_scalable("dsi2escclk", "tvclk", - PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE); - prcmu_clk[PRCMU_DSI2ESCCLK] = clk; - - clk = clk_reg_prcmu_scalable_rate("armss", NULL, - PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED); - prcmu_clk[PRCMU_ARMSS] = clk; + u8500_prcmu_hw_clks.hws[PRCMU_SGACLK] = + clk_reg_prcmu_gate("sgclk", NULL, PRCMU_SGACLK, 0); + + u8500_prcmu_hw_clks.hws[PRCMU_UARTCLK] = + clk_reg_prcmu_gate("uartclk", NULL, PRCMU_UARTCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_MSP02CLK] = + clk_reg_prcmu_gate("msp02clk", NULL, PRCMU_MSP02CLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_MSP1CLK] = + clk_reg_prcmu_gate("msp1clk", NULL, PRCMU_MSP1CLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_I2CCLK] = + clk_reg_prcmu_gate("i2cclk", NULL, PRCMU_I2CCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_SLIMCLK] = + clk_reg_prcmu_gate("slimclk", NULL, PRCMU_SLIMCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_PER1CLK] = + clk_reg_prcmu_gate("per1clk", NULL, PRCMU_PER1CLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_PER2CLK] = + clk_reg_prcmu_gate("per2clk", NULL, PRCMU_PER2CLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_PER3CLK] = + clk_reg_prcmu_gate("per3clk", NULL, PRCMU_PER3CLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_PER5CLK] = + clk_reg_prcmu_gate("per5clk", NULL, PRCMU_PER5CLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_PER6CLK] = + clk_reg_prcmu_gate("per6clk", NULL, PRCMU_PER6CLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_PER7CLK] = + clk_reg_prcmu_gate("per7clk", NULL, PRCMU_PER7CLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_LCDCLK] = + clk_reg_prcmu_scalable("lcdclk", NULL, PRCMU_LCDCLK, 0, + CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_BMLCLK] = + clk_reg_prcmu_opp_gate("bmlclk", NULL, PRCMU_BMLCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_HSITXCLK] = + clk_reg_prcmu_scalable("hsitxclk", NULL, PRCMU_HSITXCLK, 0, + CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_HSIRXCLK] = + clk_reg_prcmu_scalable("hsirxclk", NULL, PRCMU_HSIRXCLK, 0, + CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_HDMICLK] = + clk_reg_prcmu_scalable("hdmiclk", NULL, PRCMU_HDMICLK, 0, + CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_APEATCLK] = + clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_APETRACECLK] = + clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0, + CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_MCDECLK] = + clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_IPI2CCLK] = + clk_reg_prcmu_opp_gate("ipi2cclk", NULL, PRCMU_IPI2CCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_DSIALTCLK] = + clk_reg_prcmu_gate("dsialtclk", NULL, PRCMU_DSIALTCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_DMACLK] = + clk_reg_prcmu_gate("dmaclk", NULL, PRCMU_DMACLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_B2R2CLK] = + clk_reg_prcmu_gate("b2r2clk", NULL, PRCMU_B2R2CLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_TVCLK] = + clk_reg_prcmu_scalable("tvclk", NULL, PRCMU_TVCLK, 0, + CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_SSPCLK] = + clk_reg_prcmu_gate("sspclk", NULL, PRCMU_SSPCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_RNGCLK] = + clk_reg_prcmu_gate("rngclk", NULL, PRCMU_RNGCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_UICCCLK] = + clk_reg_prcmu_gate("uiccclk", NULL, PRCMU_UICCCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_TIMCLK] = + clk_reg_prcmu_gate("timclk", NULL, PRCMU_TIMCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_SYSCLK] = + clk_reg_prcmu_gate("ab8500_sysclk", NULL, PRCMU_SYSCLK, 0); + u8500_prcmu_hw_clks.hws[PRCMU_SDMMCCLK] = + clk_reg_prcmu_opp_volt_scalable("sdmmcclk", NULL, + PRCMU_SDMMCCLK, 100000000, + CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_PLLDSI] = + clk_reg_prcmu_scalable("dsi_pll", "hdmiclk", + PRCMU_PLLDSI, 0, CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_DSI0CLK] = + clk_reg_prcmu_scalable("dsi0clk", "dsi_pll", + PRCMU_DSI0CLK, 0, CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_DSI1CLK] = + clk_reg_prcmu_scalable("dsi1clk", "dsi_pll", + PRCMU_DSI1CLK, 0, CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_DSI0ESCCLK] = + clk_reg_prcmu_scalable("dsi0escclk", "tvclk", + PRCMU_DSI0ESCCLK, 0, CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_DSI1ESCCLK] = + clk_reg_prcmu_scalable("dsi1escclk", "tvclk", + PRCMU_DSI1ESCCLK, 0, CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_DSI2ESCCLK] = + clk_reg_prcmu_scalable("dsi2escclk", "tvclk", + PRCMU_DSI2ESCCLK, 0, CLK_SET_RATE_GATE); + u8500_prcmu_hw_clks.hws[PRCMU_ARMSS] = + clk_reg_prcmu_scalable_rate("armss", NULL, + PRCMU_ARMSS, 0, CLK_IGNORE_UNUSED); twd_clk = clk_register_fixed_factor(NULL, "smp_twd", "armss", CLK_IGNORE_UNUSED, 1, 2); - /* - * FIXME: Add special handled PRCMU clocks here: - * 1. clkout0yuv, use PRCMU as parent + need regulator + pinctrl. - * 2. ab9540_clkout1yuv, see clkout0yuv - */ - /* PRCC P-clocks */ clk = clk_reg_prcc_pclk("p1_pclk0", "per1clk", bases[CLKRST1_INDEX], BIT(0), 0); @@ -546,13 +592,13 @@ static void u8500_clk_init(struct device_node *np) PRCC_KCLK_STORE(clk, 6, 0); for_each_child_of_node(np, child) { - static struct clk_onecell_data clk_data; + if (of_node_name_eq(child, "prcmu-clock")) + of_clk_add_hw_provider(child, of_clk_hw_onecell_get, + &u8500_prcmu_hw_clks); + + if (of_node_name_eq(child, "clkout-clock")) + of_clk_add_hw_provider(child, ux500_clkout_get, NULL); - if (of_node_name_eq(child, "prcmu-clock")) { - clk_data.clks = prcmu_clk; - clk_data.clk_num = ARRAY_SIZE(prcmu_clk); - of_clk_add_provider(child, of_clk_src_onecell_get, &clk_data); - } if (of_node_name_eq(child, "prcc-periph-clock")) of_clk_add_provider(child, ux500_twocell_get, prcc_pclk); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index d092c9bb4ba3..24eaf0ec344d 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -61,6 +61,8 @@ static struct cppc_workaround_oem_info wa_info[] = { } }; +static struct cpufreq_driver cppc_cpufreq_driver; + #ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE /* Frequency invariance support */ @@ -75,7 +77,6 @@ struct cppc_freq_invariance { static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv); static struct kthread_worker *kworker_fie; -static struct cpufreq_driver cppc_cpufreq_driver; static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu); static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data, struct cppc_perf_fb_ctrs *fb_ctrs_t0, @@ -440,6 +441,14 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) } return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; } +#else +static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) +{ + return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; +} +#endif + +#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL) static DEFINE_PER_CPU(unsigned int, efficiency_class); static void cppc_cpufreq_register_em(struct cpufreq_policy *policy); @@ -620,21 +629,12 @@ static void cppc_cpufreq_register_em(struct cpufreq_policy *policy) } #else - -static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu) -{ - return cppc_get_transition_latency(cpu) / NSEC_PER_USEC; -} static int populate_efficiency_class(void) { return 0; } -static void cppc_cpufreq_register_em(struct cpufreq_policy *policy) -{ -} #endif - static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu) { struct cppc_cpudata *cpu_data; diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index 866163883b48..37a1eb20f5ba 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -8,18 +8,22 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpumask.h> +#include <linux/minmax.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/regulator/consumer.h> -#include <linux/slab.h> -#include <linux/thermal.h> -#define MIN_VOLT_SHIFT (100000) -#define MAX_VOLT_SHIFT (200000) -#define MAX_VOLT_LIMIT (1150000) -#define VOLT_TOL (10000) +struct mtk_cpufreq_platform_data { + int min_volt_shift; + int max_volt_shift; + int proc_max_volt; + int sram_min_volt; + int sram_max_volt; + bool ccifreq_supported; +}; /* * The struct mtk_cpu_dvfs_info holds necessary information for doing CPU DVFS @@ -35,6 +39,7 @@ struct mtk_cpu_dvfs_info { struct cpumask cpus; struct device *cpu_dev; + struct device *cci_dev; struct regulator *proc_reg; struct regulator *sram_reg; struct clk *cpu_clk; @@ -42,8 +47,20 @@ struct mtk_cpu_dvfs_info { struct list_head list_head; int intermediate_voltage; bool need_voltage_tracking; + int vproc_on_boot; + int pre_vproc; + /* Avoid race condition for regulators between notify and policy */ + struct mutex reg_lock; + struct notifier_block opp_nb; + unsigned int opp_cpu; + unsigned long current_freq; + const struct mtk_cpufreq_platform_data *soc_data; + int vtrack_max; + bool ccifreq_bound; }; +static struct platform_device *cpufreq_pdev; + static LIST_HEAD(dvfs_info_list); static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu) @@ -61,142 +78,123 @@ static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu) static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info, int new_vproc) { + const struct mtk_cpufreq_platform_data *soc_data = info->soc_data; struct regulator *proc_reg = info->proc_reg; struct regulator *sram_reg = info->sram_reg; - int old_vproc, old_vsram, new_vsram, vsram, vproc, ret; - - old_vproc = regulator_get_voltage(proc_reg); - if (old_vproc < 0) { - pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc); - return old_vproc; - } - /* Vsram should not exceed the maximum allowed voltage of SoC. */ - new_vsram = min(new_vproc + MIN_VOLT_SHIFT, MAX_VOLT_LIMIT); - - if (old_vproc < new_vproc) { - /* - * When scaling up voltages, Vsram and Vproc scale up step - * by step. At each step, set Vsram to (Vproc + 200mV) first, - * then set Vproc to (Vsram - 100mV). - * Keep doing it until Vsram and Vproc hit target voltages. - */ - do { - old_vsram = regulator_get_voltage(sram_reg); - if (old_vsram < 0) { - pr_err("%s: invalid Vsram value: %d\n", - __func__, old_vsram); - return old_vsram; - } - old_vproc = regulator_get_voltage(proc_reg); - if (old_vproc < 0) { - pr_err("%s: invalid Vproc value: %d\n", - __func__, old_vproc); - return old_vproc; - } - - vsram = min(new_vsram, old_vproc + MAX_VOLT_SHIFT); + int pre_vproc, pre_vsram, new_vsram, vsram, vproc, ret; + int retry = info->vtrack_max; + + pre_vproc = regulator_get_voltage(proc_reg); + if (pre_vproc < 0) { + dev_err(info->cpu_dev, + "invalid Vproc value: %d\n", pre_vproc); + return pre_vproc; + } - if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) { - vsram = MAX_VOLT_LIMIT; + pre_vsram = regulator_get_voltage(sram_reg); + if (pre_vsram < 0) { + dev_err(info->cpu_dev, "invalid Vsram value: %d\n", pre_vsram); + return pre_vsram; + } - /* - * If the target Vsram hits the maximum voltage, - * try to set the exact voltage value first. - */ - ret = regulator_set_voltage(sram_reg, vsram, - vsram); - if (ret) - ret = regulator_set_voltage(sram_reg, - vsram - VOLT_TOL, - vsram); + new_vsram = clamp(new_vproc + soc_data->min_volt_shift, + soc_data->sram_min_volt, soc_data->sram_max_volt); - vproc = new_vproc; - } else { - ret = regulator_set_voltage(sram_reg, vsram, - vsram + VOLT_TOL); + do { + if (pre_vproc <= new_vproc) { + vsram = clamp(pre_vproc + soc_data->max_volt_shift, + soc_data->sram_min_volt, new_vsram); + ret = regulator_set_voltage(sram_reg, vsram, + soc_data->sram_max_volt); - vproc = vsram - MIN_VOLT_SHIFT; - } if (ret) return ret; + if (vsram == soc_data->sram_max_volt || + new_vsram == soc_data->sram_min_volt) + vproc = new_vproc; + else + vproc = vsram - soc_data->min_volt_shift; + ret = regulator_set_voltage(proc_reg, vproc, - vproc + VOLT_TOL); + soc_data->proc_max_volt); if (ret) { - regulator_set_voltage(sram_reg, old_vsram, - old_vsram); + regulator_set_voltage(sram_reg, pre_vsram, + soc_data->sram_max_volt); return ret; } - } while (vproc < new_vproc || vsram < new_vsram); - } else if (old_vproc > new_vproc) { - /* - * When scaling down voltages, Vsram and Vproc scale down step - * by step. At each step, set Vproc to (Vsram - 200mV) first, - * then set Vproc to (Vproc + 100mV). - * Keep doing it until Vsram and Vproc hit target voltages. - */ - do { - old_vproc = regulator_get_voltage(proc_reg); - if (old_vproc < 0) { - pr_err("%s: invalid Vproc value: %d\n", - __func__, old_vproc); - return old_vproc; - } - old_vsram = regulator_get_voltage(sram_reg); - if (old_vsram < 0) { - pr_err("%s: invalid Vsram value: %d\n", - __func__, old_vsram); - return old_vsram; - } - - vproc = max(new_vproc, old_vsram - MAX_VOLT_SHIFT); + } else if (pre_vproc > new_vproc) { + vproc = max(new_vproc, + pre_vsram - soc_data->max_volt_shift); ret = regulator_set_voltage(proc_reg, vproc, - vproc + VOLT_TOL); + soc_data->proc_max_volt); if (ret) return ret; if (vproc == new_vproc) vsram = new_vsram; else - vsram = max(new_vsram, vproc + MIN_VOLT_SHIFT); - - if (vsram + VOLT_TOL >= MAX_VOLT_LIMIT) { - vsram = MAX_VOLT_LIMIT; - - /* - * If the target Vsram hits the maximum voltage, - * try to set the exact voltage value first. - */ - ret = regulator_set_voltage(sram_reg, vsram, - vsram); - if (ret) - ret = regulator_set_voltage(sram_reg, - vsram - VOLT_TOL, - vsram); - } else { - ret = regulator_set_voltage(sram_reg, vsram, - vsram + VOLT_TOL); - } + vsram = max(new_vsram, + vproc + soc_data->min_volt_shift); + ret = regulator_set_voltage(sram_reg, vsram, + soc_data->sram_max_volt); if (ret) { - regulator_set_voltage(proc_reg, old_vproc, - old_vproc); + regulator_set_voltage(proc_reg, pre_vproc, + soc_data->proc_max_volt); return ret; } - } while (vproc > new_vproc + VOLT_TOL || - vsram > new_vsram + VOLT_TOL); - } + } + + pre_vproc = vproc; + pre_vsram = vsram; + + if (--retry < 0) { + dev_err(info->cpu_dev, + "over loop count, failed to set voltage\n"); + return -EINVAL; + } + } while (vproc != new_vproc || vsram != new_vsram); return 0; } static int mtk_cpufreq_set_voltage(struct mtk_cpu_dvfs_info *info, int vproc) { + const struct mtk_cpufreq_platform_data *soc_data = info->soc_data; + int ret; + if (info->need_voltage_tracking) - return mtk_cpufreq_voltage_tracking(info, vproc); + ret = mtk_cpufreq_voltage_tracking(info, vproc); else - return regulator_set_voltage(info->proc_reg, vproc, - vproc + VOLT_TOL); + ret = regulator_set_voltage(info->proc_reg, vproc, + soc_data->proc_max_volt); + if (!ret) + info->pre_vproc = vproc; + + return ret; +} + +static bool is_ccifreq_ready(struct mtk_cpu_dvfs_info *info) +{ + struct device_link *sup_link; + + if (info->ccifreq_bound) + return true; + + sup_link = device_link_add(info->cpu_dev, info->cci_dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!sup_link) { + dev_err(info->cpu_dev, "cpu%d: sup_link is NULL\n", info->opp_cpu); + return false; + } + + if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND) + return false; + + info->ccifreq_bound = true; + + return true; } static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, @@ -208,219 +206,367 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, struct mtk_cpu_dvfs_info *info = policy->driver_data; struct device *cpu_dev = info->cpu_dev; struct dev_pm_opp *opp; - long freq_hz, old_freq_hz; - int vproc, old_vproc, inter_vproc, target_vproc, ret; + long freq_hz, pre_freq_hz; + int vproc, pre_vproc, inter_vproc, target_vproc, ret; inter_vproc = info->intermediate_voltage; - old_freq_hz = clk_get_rate(cpu_clk); - old_vproc = regulator_get_voltage(info->proc_reg); - if (old_vproc < 0) { - pr_err("%s: invalid Vproc value: %d\n", __func__, old_vproc); - return old_vproc; + pre_freq_hz = clk_get_rate(cpu_clk); + + mutex_lock(&info->reg_lock); + + if (unlikely(info->pre_vproc <= 0)) + pre_vproc = regulator_get_voltage(info->proc_reg); + else + pre_vproc = info->pre_vproc; + + if (pre_vproc < 0) { + dev_err(cpu_dev, "invalid Vproc value: %d\n", pre_vproc); + ret = pre_vproc; + goto out; } freq_hz = freq_table[index].frequency * 1000; opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); if (IS_ERR(opp)) { - pr_err("cpu%d: failed to find OPP for %ld\n", - policy->cpu, freq_hz); - return PTR_ERR(opp); + dev_err(cpu_dev, "cpu%d: failed to find OPP for %ld\n", + policy->cpu, freq_hz); + ret = PTR_ERR(opp); + goto out; } vproc = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); /* + * If MediaTek cci is supported but is not ready, we will use the value + * of max(target cpu voltage, booting voltage) to prevent high freqeuncy + * low voltage crash. + */ + if (info->soc_data->ccifreq_supported && !is_ccifreq_ready(info)) + vproc = max(vproc, info->vproc_on_boot); + + /* * If the new voltage or the intermediate voltage is higher than the * current voltage, scale up voltage first. */ - target_vproc = (inter_vproc > vproc) ? inter_vproc : vproc; - if (old_vproc < target_vproc) { + target_vproc = max(inter_vproc, vproc); + if (pre_vproc <= target_vproc) { ret = mtk_cpufreq_set_voltage(info, target_vproc); if (ret) { - pr_err("cpu%d: failed to scale up voltage!\n", - policy->cpu); - mtk_cpufreq_set_voltage(info, old_vproc); - return ret; + dev_err(cpu_dev, + "cpu%d: failed to scale up voltage!\n", policy->cpu); + mtk_cpufreq_set_voltage(info, pre_vproc); + goto out; } } /* Reparent the CPU clock to intermediate clock. */ ret = clk_set_parent(cpu_clk, info->inter_clk); if (ret) { - pr_err("cpu%d: failed to re-parent cpu clock!\n", - policy->cpu); - mtk_cpufreq_set_voltage(info, old_vproc); - WARN_ON(1); - return ret; + dev_err(cpu_dev, + "cpu%d: failed to re-parent cpu clock!\n", policy->cpu); + mtk_cpufreq_set_voltage(info, pre_vproc); + goto out; } /* Set the original PLL to target rate. */ ret = clk_set_rate(armpll, freq_hz); if (ret) { - pr_err("cpu%d: failed to scale cpu clock rate!\n", - policy->cpu); + dev_err(cpu_dev, + "cpu%d: failed to scale cpu clock rate!\n", policy->cpu); clk_set_parent(cpu_clk, armpll); - mtk_cpufreq_set_voltage(info, old_vproc); - return ret; + mtk_cpufreq_set_voltage(info, pre_vproc); + goto out; } /* Set parent of CPU clock back to the original PLL. */ ret = clk_set_parent(cpu_clk, armpll); if (ret) { - pr_err("cpu%d: failed to re-parent cpu clock!\n", - policy->cpu); + dev_err(cpu_dev, + "cpu%d: failed to re-parent cpu clock!\n", policy->cpu); mtk_cpufreq_set_voltage(info, inter_vproc); - WARN_ON(1); - return ret; + goto out; } /* * If the new voltage is lower than the intermediate voltage or the * original voltage, scale down to the new voltage. */ - if (vproc < inter_vproc || vproc < old_vproc) { + if (vproc < inter_vproc || vproc < pre_vproc) { ret = mtk_cpufreq_set_voltage(info, vproc); if (ret) { - pr_err("cpu%d: failed to scale down voltage!\n", - policy->cpu); + dev_err(cpu_dev, + "cpu%d: failed to scale down voltage!\n", policy->cpu); clk_set_parent(cpu_clk, info->inter_clk); - clk_set_rate(armpll, old_freq_hz); + clk_set_rate(armpll, pre_freq_hz); clk_set_parent(cpu_clk, armpll); - return ret; + goto out; } } - return 0; + info->current_freq = freq_hz; + +out: + mutex_unlock(&info->reg_lock); + + return ret; } #define DYNAMIC_POWER "dynamic-power-coefficient" +static int mtk_cpufreq_opp_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct dev_pm_opp *opp = data; + struct dev_pm_opp *new_opp; + struct mtk_cpu_dvfs_info *info; + unsigned long freq, volt; + struct cpufreq_policy *policy; + int ret = 0; + + info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb); + + if (event == OPP_EVENT_ADJUST_VOLTAGE) { + freq = dev_pm_opp_get_freq(opp); + + mutex_lock(&info->reg_lock); + if (info->current_freq == freq) { + volt = dev_pm_opp_get_voltage(opp); + ret = mtk_cpufreq_set_voltage(info, volt); + if (ret) + dev_err(info->cpu_dev, + "failed to scale voltage: %d\n", ret); + } + mutex_unlock(&info->reg_lock); + } else if (event == OPP_EVENT_DISABLE) { + freq = dev_pm_opp_get_freq(opp); + + /* case of current opp item is disabled */ + if (info->current_freq == freq) { + freq = 1; + new_opp = dev_pm_opp_find_freq_ceil(info->cpu_dev, + &freq); + if (IS_ERR(new_opp)) { + dev_err(info->cpu_dev, + "all opp items are disabled\n"); + ret = PTR_ERR(new_opp); + return notifier_from_errno(ret); + } + + dev_pm_opp_put(new_opp); + policy = cpufreq_cpu_get(info->opp_cpu); + if (policy) { + cpufreq_driver_target(policy, freq / 1000, + CPUFREQ_RELATION_L); + cpufreq_cpu_put(policy); + } + } + } + + return notifier_from_errno(ret); +} + +static struct device *of_get_cci(struct device *cpu_dev) +{ + struct device_node *np; + struct platform_device *pdev; + + np = of_parse_phandle(cpu_dev->of_node, "mediatek,cci", 0); + if (IS_ERR_OR_NULL(np)) + return NULL; + + pdev = of_find_device_by_node(np); + of_node_put(np); + if (IS_ERR_OR_NULL(pdev)) + return NULL; + + return &pdev->dev; +} + static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) { struct device *cpu_dev; - struct regulator *proc_reg = ERR_PTR(-ENODEV); - struct regulator *sram_reg = ERR_PTR(-ENODEV); - struct clk *cpu_clk = ERR_PTR(-ENODEV); - struct clk *inter_clk = ERR_PTR(-ENODEV); struct dev_pm_opp *opp; unsigned long rate; int ret; cpu_dev = get_cpu_device(cpu); if (!cpu_dev) { - pr_err("failed to get cpu%d device\n", cpu); + dev_err(cpu_dev, "failed to get cpu%d device\n", cpu); return -ENODEV; } + info->cpu_dev = cpu_dev; - cpu_clk = clk_get(cpu_dev, "cpu"); - if (IS_ERR(cpu_clk)) { - if (PTR_ERR(cpu_clk) == -EPROBE_DEFER) - pr_warn("cpu clk for cpu%d not ready, retry.\n", cpu); - else - pr_err("failed to get cpu clk for cpu%d\n", cpu); - - ret = PTR_ERR(cpu_clk); - return ret; + info->ccifreq_bound = false; + if (info->soc_data->ccifreq_supported) { + info->cci_dev = of_get_cci(info->cpu_dev); + if (IS_ERR_OR_NULL(info->cci_dev)) { + ret = PTR_ERR(info->cci_dev); + dev_err(cpu_dev, "cpu%d: failed to get cci device\n", cpu); + return -ENODEV; + } } - inter_clk = clk_get(cpu_dev, "intermediate"); - if (IS_ERR(inter_clk)) { - if (PTR_ERR(inter_clk) == -EPROBE_DEFER) - pr_warn("intermediate clk for cpu%d not ready, retry.\n", - cpu); - else - pr_err("failed to get intermediate clk for cpu%d\n", - cpu); + info->cpu_clk = clk_get(cpu_dev, "cpu"); + if (IS_ERR(info->cpu_clk)) { + ret = PTR_ERR(info->cpu_clk); + return dev_err_probe(cpu_dev, ret, + "cpu%d: failed to get cpu clk\n", cpu); + } - ret = PTR_ERR(inter_clk); + info->inter_clk = clk_get(cpu_dev, "intermediate"); + if (IS_ERR(info->inter_clk)) { + ret = PTR_ERR(info->inter_clk); + dev_err_probe(cpu_dev, ret, + "cpu%d: failed to get intermediate clk\n", cpu); goto out_free_resources; } - proc_reg = regulator_get_optional(cpu_dev, "proc"); - if (IS_ERR(proc_reg)) { - if (PTR_ERR(proc_reg) == -EPROBE_DEFER) - pr_warn("proc regulator for cpu%d not ready, retry.\n", - cpu); - else - pr_err("failed to get proc regulator for cpu%d\n", - cpu); + info->proc_reg = regulator_get_optional(cpu_dev, "proc"); + if (IS_ERR(info->proc_reg)) { + ret = PTR_ERR(info->proc_reg); + dev_err_probe(cpu_dev, ret, + "cpu%d: failed to get proc regulator\n", cpu); + goto out_free_resources; + } - ret = PTR_ERR(proc_reg); + ret = regulator_enable(info->proc_reg); + if (ret) { + dev_warn(cpu_dev, "cpu%d: failed to enable vproc\n", cpu); goto out_free_resources; } /* Both presence and absence of sram regulator are valid cases. */ - sram_reg = regulator_get_exclusive(cpu_dev, "sram"); + info->sram_reg = regulator_get_optional(cpu_dev, "sram"); + if (IS_ERR(info->sram_reg)) + info->sram_reg = NULL; + else { + ret = regulator_enable(info->sram_reg); + if (ret) { + dev_warn(cpu_dev, "cpu%d: failed to enable vsram\n", cpu); + goto out_free_resources; + } + } /* Get OPP-sharing information from "operating-points-v2" bindings */ ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, &info->cpus); if (ret) { - pr_err("failed to get OPP-sharing information for cpu%d\n", - cpu); + dev_err(cpu_dev, + "cpu%d: failed to get OPP-sharing information\n", cpu); goto out_free_resources; } ret = dev_pm_opp_of_cpumask_add_table(&info->cpus); if (ret) { - pr_warn("no OPP table for cpu%d\n", cpu); + dev_warn(cpu_dev, "cpu%d: no OPP table\n", cpu); goto out_free_resources; } + ret = clk_prepare_enable(info->cpu_clk); + if (ret) + goto out_free_opp_table; + + ret = clk_prepare_enable(info->inter_clk); + if (ret) + goto out_disable_mux_clock; + + if (info->soc_data->ccifreq_supported) { + info->vproc_on_boot = regulator_get_voltage(info->proc_reg); + if (info->vproc_on_boot < 0) { + dev_err(info->cpu_dev, + "invalid Vproc value: %d\n", info->vproc_on_boot); + goto out_disable_inter_clock; + } + } + /* Search a safe voltage for intermediate frequency. */ - rate = clk_get_rate(inter_clk); + rate = clk_get_rate(info->inter_clk); opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate); if (IS_ERR(opp)) { - pr_err("failed to get intermediate opp for cpu%d\n", cpu); + dev_err(cpu_dev, "cpu%d: failed to get intermediate opp\n", cpu); ret = PTR_ERR(opp); - goto out_free_opp_table; + goto out_disable_inter_clock; } info->intermediate_voltage = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); - info->cpu_dev = cpu_dev; - info->proc_reg = proc_reg; - info->sram_reg = IS_ERR(sram_reg) ? NULL : sram_reg; - info->cpu_clk = cpu_clk; - info->inter_clk = inter_clk; + mutex_init(&info->reg_lock); + info->current_freq = clk_get_rate(info->cpu_clk); + + info->opp_cpu = cpu; + info->opp_nb.notifier_call = mtk_cpufreq_opp_notifier; + ret = dev_pm_opp_register_notifier(cpu_dev, &info->opp_nb); + if (ret) { + dev_err(cpu_dev, "cpu%d: failed to register opp notifier\n", cpu); + goto out_disable_inter_clock; + } /* * If SRAM regulator is present, software "voltage tracking" is needed * for this CPU power domain. */ - info->need_voltage_tracking = !IS_ERR(sram_reg); + info->need_voltage_tracking = (info->sram_reg != NULL); + + /* + * We assume min voltage is 0 and tracking target voltage using + * min_volt_shift for each iteration. + * The vtrack_max is 3 times of expeted iteration count. + */ + info->vtrack_max = 3 * DIV_ROUND_UP(max(info->soc_data->sram_max_volt, + info->soc_data->proc_max_volt), + info->soc_data->min_volt_shift); return 0; +out_disable_inter_clock: + clk_disable_unprepare(info->inter_clk); + +out_disable_mux_clock: + clk_disable_unprepare(info->cpu_clk); + out_free_opp_table: dev_pm_opp_of_cpumask_remove_table(&info->cpus); out_free_resources: - if (!IS_ERR(proc_reg)) - regulator_put(proc_reg); - if (!IS_ERR(sram_reg)) - regulator_put(sram_reg); - if (!IS_ERR(cpu_clk)) - clk_put(cpu_clk); - if (!IS_ERR(inter_clk)) - clk_put(inter_clk); + if (regulator_is_enabled(info->proc_reg)) + regulator_disable(info->proc_reg); + if (info->sram_reg && regulator_is_enabled(info->sram_reg)) + regulator_disable(info->sram_reg); + + if (!IS_ERR(info->proc_reg)) + regulator_put(info->proc_reg); + if (!IS_ERR(info->sram_reg)) + regulator_put(info->sram_reg); + if (!IS_ERR(info->cpu_clk)) + clk_put(info->cpu_clk); + if (!IS_ERR(info->inter_clk)) + clk_put(info->inter_clk); return ret; } static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info) { - if (!IS_ERR(info->proc_reg)) + if (!IS_ERR(info->proc_reg)) { + regulator_disable(info->proc_reg); regulator_put(info->proc_reg); - if (!IS_ERR(info->sram_reg)) + } + if (!IS_ERR(info->sram_reg)) { + regulator_disable(info->sram_reg); regulator_put(info->sram_reg); - if (!IS_ERR(info->cpu_clk)) + } + if (!IS_ERR(info->cpu_clk)) { + clk_disable_unprepare(info->cpu_clk); clk_put(info->cpu_clk); - if (!IS_ERR(info->inter_clk)) + } + if (!IS_ERR(info->inter_clk)) { + clk_disable_unprepare(info->inter_clk); clk_put(info->inter_clk); + } dev_pm_opp_of_cpumask_remove_table(&info->cpus); + dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb); } static int mtk_cpufreq_init(struct cpufreq_policy *policy) @@ -432,14 +578,15 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy) info = mtk_cpu_dvfs_info_lookup(policy->cpu); if (!info) { pr_err("dvfs info for cpu%d is not initialized.\n", - policy->cpu); + policy->cpu); return -EINVAL; } ret = dev_pm_opp_init_cpufreq_table(info->cpu_dev, &freq_table); if (ret) { - pr_err("failed to init cpufreq table for cpu%d: %d\n", - policy->cpu, ret); + dev_err(info->cpu_dev, + "failed to init cpufreq table for cpu%d: %d\n", + policy->cpu, ret); return ret; } @@ -476,9 +623,17 @@ static struct cpufreq_driver mtk_cpufreq_driver = { static int mtk_cpufreq_probe(struct platform_device *pdev) { + const struct mtk_cpufreq_platform_data *data; struct mtk_cpu_dvfs_info *info, *tmp; int cpu, ret; + data = dev_get_platdata(&pdev->dev); + if (!data) { + dev_err(&pdev->dev, + "failed to get mtk cpufreq platform data\n"); + return -ENODEV; + } + for_each_possible_cpu(cpu) { info = mtk_cpu_dvfs_info_lookup(cpu); if (info) @@ -490,6 +645,7 @@ static int mtk_cpufreq_probe(struct platform_device *pdev) goto release_dvfs_info_list; } + info->soc_data = data; ret = mtk_cpu_dvfs_info_init(info, cpu); if (ret) { dev_err(&pdev->dev, @@ -525,20 +681,47 @@ static struct platform_driver mtk_cpufreq_platdrv = { .probe = mtk_cpufreq_probe, }; +static const struct mtk_cpufreq_platform_data mt2701_platform_data = { + .min_volt_shift = 100000, + .max_volt_shift = 200000, + .proc_max_volt = 1150000, + .sram_min_volt = 0, + .sram_max_volt = 1150000, + .ccifreq_supported = false, +}; + +static const struct mtk_cpufreq_platform_data mt8183_platform_data = { + .min_volt_shift = 100000, + .max_volt_shift = 200000, + .proc_max_volt = 1150000, + .sram_min_volt = 0, + .sram_max_volt = 1150000, + .ccifreq_supported = true, +}; + +static const struct mtk_cpufreq_platform_data mt8186_platform_data = { + .min_volt_shift = 100000, + .max_volt_shift = 250000, + .proc_max_volt = 1118750, + .sram_min_volt = 850000, + .sram_max_volt = 1118750, + .ccifreq_supported = true, +}; + /* List of machines supported by this driver */ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { - { .compatible = "mediatek,mt2701", }, - { .compatible = "mediatek,mt2712", }, - { .compatible = "mediatek,mt7622", }, - { .compatible = "mediatek,mt7623", }, - { .compatible = "mediatek,mt8167", }, - { .compatible = "mediatek,mt817x", }, - { .compatible = "mediatek,mt8173", }, - { .compatible = "mediatek,mt8176", }, - { .compatible = "mediatek,mt8183", }, - { .compatible = "mediatek,mt8365", }, - { .compatible = "mediatek,mt8516", }, - + { .compatible = "mediatek,mt2701", .data = &mt2701_platform_data }, + { .compatible = "mediatek,mt2712", .data = &mt2701_platform_data }, + { .compatible = "mediatek,mt7622", .data = &mt2701_platform_data }, + { .compatible = "mediatek,mt7623", .data = &mt2701_platform_data }, + { .compatible = "mediatek,mt8167", .data = &mt2701_platform_data }, + { .compatible = "mediatek,mt817x", .data = &mt2701_platform_data }, + { .compatible = "mediatek,mt8173", .data = &mt2701_platform_data }, + { .compatible = "mediatek,mt8176", .data = &mt2701_platform_data }, + { .compatible = "mediatek,mt8183", .data = &mt8183_platform_data }, + { .compatible = "mediatek,mt8186", .data = &mt8186_platform_data }, + { .compatible = "mediatek,mt8365", .data = &mt2701_platform_data }, + { .compatible = "mediatek,mt8516", .data = &mt2701_platform_data }, { } }; MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines); @@ -547,7 +730,7 @@ static int __init mtk_cpufreq_driver_init(void) { struct device_node *np; const struct of_device_id *match; - struct platform_device *pdev; + const struct mtk_cpufreq_platform_data *data; int err; np = of_find_node_by_path("/"); @@ -560,6 +743,7 @@ static int __init mtk_cpufreq_driver_init(void) pr_debug("Machine is not compatible with mtk-cpufreq\n"); return -ENODEV; } + data = match->data; err = platform_driver_register(&mtk_cpufreq_platdrv); if (err) @@ -571,16 +755,24 @@ static int __init mtk_cpufreq_driver_init(void) * and the device registration codes are put here to handle defer * probing. */ - pdev = platform_device_register_simple("mtk-cpufreq", -1, NULL, 0); - if (IS_ERR(pdev)) { + cpufreq_pdev = platform_device_register_data(NULL, "mtk-cpufreq", -1, + data, sizeof(*data)); + if (IS_ERR(cpufreq_pdev)) { pr_err("failed to register mtk-cpufreq platform device\n"); platform_driver_unregister(&mtk_cpufreq_platdrv); - return PTR_ERR(pdev); + return PTR_ERR(cpufreq_pdev); } return 0; } -device_initcall(mtk_cpufreq_driver_init); +module_init(mtk_cpufreq_driver_init) + +static void __exit mtk_cpufreq_driver_exit(void) +{ + platform_device_unregister(cpufreq_pdev); + platform_driver_unregister(&mtk_cpufreq_platdrv); +} +module_exit(mtk_cpufreq_driver_exit) MODULE_DESCRIPTION("MediaTek CPUFreq driver"); MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>"); diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index ac381db25dbe..2a6a98764a8c 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved + * Copyright (c) 2020 - 2022, NVIDIA CORPORATION. All rights reserved */ #include <linux/cpu.h> @@ -24,6 +24,17 @@ #define CPUFREQ_TBL_STEP_HZ (50 * KHZ * KHZ) #define MAX_CNT ~0U +#define NDIV_MASK 0x1FF + +#define CORE_OFFSET(cpu) (cpu * 8) +#define CMU_CLKS_BASE 0x2000 +#define SCRATCH_FREQ_CORE_REG(data, cpu) (data->regs + CMU_CLKS_BASE + CORE_OFFSET(cpu)) + +#define MMCRAB_CLUSTER_BASE(cl) (0x30000 + (cl * 0x10000)) +#define CLUSTER_ACTMON_BASE(data, cl) \ + (data->regs + (MMCRAB_CLUSTER_BASE(cl) + data->soc->actmon_cntr_base)) +#define CORE_ACTMON_CNTR_REG(data, cl, cpu) (CLUSTER_ACTMON_BASE(data, cl) + CORE_OFFSET(cpu)) + /* cpufreq transisition latency */ #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ @@ -35,12 +46,6 @@ enum cluster { MAX_CLUSTERS, }; -struct tegra194_cpufreq_data { - void __iomem *regs; - size_t num_clusters; - struct cpufreq_frequency_table **tables; -}; - struct tegra_cpu_ctr { u32 cpu; u32 coreclk_cnt, last_coreclk_cnt; @@ -52,13 +57,127 @@ struct read_counters_work { struct tegra_cpu_ctr c; }; +struct tegra_cpufreq_ops { + void (*read_counters)(struct tegra_cpu_ctr *c); + void (*set_cpu_ndiv)(struct cpufreq_policy *policy, u64 ndiv); + void (*get_cpu_cluster_id)(u32 cpu, u32 *cpuid, u32 *clusterid); + int (*get_cpu_ndiv)(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv); +}; + +struct tegra_cpufreq_soc { + struct tegra_cpufreq_ops *ops; + int maxcpus_per_cluster; + phys_addr_t actmon_cntr_base; +}; + +struct tegra194_cpufreq_data { + void __iomem *regs; + size_t num_clusters; + struct cpufreq_frequency_table **tables; + const struct tegra_cpufreq_soc *soc; +}; + static struct workqueue_struct *read_counters_wq; -static void get_cpu_cluster(void *cluster) +static void tegra_get_cpu_mpidr(void *mpidr) +{ + *((u64 *)mpidr) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; +} + +static void tegra234_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) +{ + u64 mpidr; + + smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true); + + if (cpuid) + *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 1); + if (clusterid) + *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 2); +} + +static int tegra234_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv) { - u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; + struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); + void __iomem *freq_core_reg; + u64 mpidr_id; + + /* use physical id to get address of per core frequency register */ + mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid; + freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id); + + *ndiv = readl(freq_core_reg) & NDIV_MASK; + + return 0; +} - *((uint32_t *)cluster) = MPIDR_AFFINITY_LEVEL(mpidr, 1); +static void tegra234_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv) +{ + struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); + void __iomem *freq_core_reg; + u32 cpu, cpuid, clusterid; + u64 mpidr_id; + + for_each_cpu_and(cpu, policy->cpus, cpu_online_mask) { + data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid); + + /* use physical id to get address of per core frequency register */ + mpidr_id = (clusterid * data->soc->maxcpus_per_cluster) + cpuid; + freq_core_reg = SCRATCH_FREQ_CORE_REG(data, mpidr_id); + + writel(ndiv, freq_core_reg); + } +} + +/* + * This register provides access to two counter values with a single + * 64-bit read. The counter values are used to determine the average + * actual frequency a core has run at over a period of time. + * [63:32] PLLP counter: Counts at fixed frequency (408 MHz) + * [31:0] Core clock counter: Counts on every core clock cycle + */ +static void tegra234_read_counters(struct tegra_cpu_ctr *c) +{ + struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); + void __iomem *actmon_reg; + u32 cpuid, clusterid; + u64 val; + + data->soc->ops->get_cpu_cluster_id(c->cpu, &cpuid, &clusterid); + actmon_reg = CORE_ACTMON_CNTR_REG(data, clusterid, cpuid); + + val = readq(actmon_reg); + c->last_refclk_cnt = upper_32_bits(val); + c->last_coreclk_cnt = lower_32_bits(val); + udelay(US_DELAY); + val = readq(actmon_reg); + c->refclk_cnt = upper_32_bits(val); + c->coreclk_cnt = lower_32_bits(val); +} + +static struct tegra_cpufreq_ops tegra234_cpufreq_ops = { + .read_counters = tegra234_read_counters, + .get_cpu_cluster_id = tegra234_get_cpu_cluster_id, + .get_cpu_ndiv = tegra234_get_cpu_ndiv, + .set_cpu_ndiv = tegra234_set_cpu_ndiv, +}; + +const struct tegra_cpufreq_soc tegra234_cpufreq_soc = { + .ops = &tegra234_cpufreq_ops, + .actmon_cntr_base = 0x9000, + .maxcpus_per_cluster = 4, +}; + +static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) +{ + u64 mpidr; + + smp_call_function_single(cpu, tegra_get_cpu_mpidr, &mpidr, true); + + if (cpuid) + *cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0); + if (clusterid) + *clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); } /* @@ -85,11 +204,24 @@ static inline u32 map_ndiv_to_freq(struct mrq_cpu_ndiv_limits_response return nltbl->ref_clk_hz / KHZ * ndiv / (nltbl->pdiv * nltbl->mdiv); } +static void tegra194_read_counters(struct tegra_cpu_ctr *c) +{ + u64 val; + + val = read_freq_feedback(); + c->last_refclk_cnt = lower_32_bits(val); + c->last_coreclk_cnt = upper_32_bits(val); + udelay(US_DELAY); + val = read_freq_feedback(); + c->refclk_cnt = lower_32_bits(val); + c->coreclk_cnt = upper_32_bits(val); +} + static void tegra_read_counters(struct work_struct *work) { + struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); struct read_counters_work *read_counters_work; struct tegra_cpu_ctr *c; - u64 val; /* * ref_clk_counter(32 bit counter) runs on constant clk, @@ -107,13 +239,7 @@ static void tegra_read_counters(struct work_struct *work) work); c = &read_counters_work->c; - val = read_freq_feedback(); - c->last_refclk_cnt = lower_32_bits(val); - c->last_coreclk_cnt = upper_32_bits(val); - udelay(US_DELAY); - val = read_freq_feedback(); - c->refclk_cnt = lower_32_bits(val); - c->coreclk_cnt = upper_32_bits(val); + data->soc->ops->read_counters(c); } /* @@ -177,7 +303,7 @@ static unsigned int tegra194_calculate_speed(u32 cpu) return (rate_mhz * KHZ); /* in KHz */ } -static void get_cpu_ndiv(void *ndiv) +static void tegra194_get_cpu_ndiv_sysreg(void *ndiv) { u64 ndiv_val; @@ -186,30 +312,43 @@ static void get_cpu_ndiv(void *ndiv) *(u64 *)ndiv = ndiv_val; } -static void set_cpu_ndiv(void *data) +static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv) +{ + int ret; + + ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true); + + return ret; +} + +static void tegra194_set_cpu_ndiv_sysreg(void *data) { - struct cpufreq_frequency_table *tbl = data; - u64 ndiv_val = (u64)tbl->driver_data; + u64 ndiv_val = *(u64 *)data; asm volatile("msr s3_0_c15_c0_4, %0" : : "r" (ndiv_val)); } +static void tegra194_set_cpu_ndiv(struct cpufreq_policy *policy, u64 ndiv) +{ + on_each_cpu_mask(policy->cpus, tegra194_set_cpu_ndiv_sysreg, &ndiv, true); +} + static unsigned int tegra194_get_speed(u32 cpu) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); struct cpufreq_frequency_table *pos; + u32 cpuid, clusterid; unsigned int rate; u64 ndiv; int ret; - u32 cl; - smp_call_function_single(cpu, get_cpu_cluster, &cl, true); + data->soc->ops->get_cpu_cluster_id(cpu, &cpuid, &clusterid); /* reconstruct actual cpu freq using counters */ rate = tegra194_calculate_speed(cpu); /* get last written ndiv value */ - ret = smp_call_function_single(cpu, get_cpu_ndiv, &ndiv, true); + ret = data->soc->ops->get_cpu_ndiv(cpu, cpuid, clusterid, &ndiv); if (WARN_ON_ONCE(ret)) return rate; @@ -219,7 +358,7 @@ static unsigned int tegra194_get_speed(u32 cpu) * to the last written ndiv value from freq_table. This is * done to return consistent value. */ - cpufreq_for_each_valid_entry(pos, data->tables[cl]) { + cpufreq_for_each_valid_entry(pos, data->tables[clusterid]) { if (pos->driver_data != ndiv) continue; @@ -237,19 +376,22 @@ static unsigned int tegra194_get_speed(u32 cpu) static int tegra194_cpufreq_init(struct cpufreq_policy *policy) { struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); - u32 cpu; - u32 cl; + int maxcpus_per_cluster = data->soc->maxcpus_per_cluster; + u32 start_cpu, cpu; + u32 clusterid; - smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true); + data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid); - if (cl >= data->num_clusters || !data->tables[cl]) + if (clusterid >= data->num_clusters || !data->tables[clusterid]) return -EINVAL; + start_cpu = rounddown(policy->cpu, maxcpus_per_cluster); /* set same policy for all cpus in a cluster */ - for (cpu = (cl * 2); cpu < ((cl + 1) * 2); cpu++) - cpumask_set_cpu(cpu, policy->cpus); - - policy->freq_table = data->tables[cl]; + for (cpu = start_cpu; cpu < (start_cpu + maxcpus_per_cluster); cpu++) { + if (cpu_possible(cpu)) + cpumask_set_cpu(cpu, policy->cpus); + } + policy->freq_table = data->tables[clusterid]; policy->cpuinfo.transition_latency = TEGRA_CPUFREQ_TRANSITION_LATENCY; return 0; @@ -259,13 +401,14 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { struct cpufreq_frequency_table *tbl = policy->freq_table + index; + struct tegra194_cpufreq_data *data = cpufreq_get_driver_data(); /* * Each core writes frequency in per core register. Then both cores * in a cluster run at same frequency which is the maximum frequency * request out of the values requested by both cores in that cluster. */ - on_each_cpu_mask(policy->cpus, set_cpu_ndiv, tbl, true); + data->soc->ops->set_cpu_ndiv(policy, (u64)tbl->driver_data); return 0; } @@ -280,6 +423,18 @@ static struct cpufreq_driver tegra194_cpufreq_driver = { .attr = cpufreq_generic_attr, }; +static struct tegra_cpufreq_ops tegra194_cpufreq_ops = { + .read_counters = tegra194_read_counters, + .get_cpu_cluster_id = tegra194_get_cpu_cluster_id, + .get_cpu_ndiv = tegra194_get_cpu_ndiv, + .set_cpu_ndiv = tegra194_set_cpu_ndiv, +}; + +const struct tegra_cpufreq_soc tegra194_cpufreq_soc = { + .ops = &tegra194_cpufreq_ops, + .maxcpus_per_cluster = 2, +}; + static void tegra194_cpufreq_free_resources(void) { destroy_workqueue(read_counters_wq); @@ -359,6 +514,7 @@ init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp, static int tegra194_cpufreq_probe(struct platform_device *pdev) { + const struct tegra_cpufreq_soc *soc; struct tegra194_cpufreq_data *data; struct tegra_bpmp *bpmp; int err, i; @@ -367,12 +523,28 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) if (!data) return -ENOMEM; + soc = of_device_get_match_data(&pdev->dev); + + if (soc->ops && soc->maxcpus_per_cluster) { + data->soc = soc; + } else { + dev_err(&pdev->dev, "soc data missing\n"); + return -EINVAL; + } + data->num_clusters = MAX_CLUSTERS; data->tables = devm_kcalloc(&pdev->dev, data->num_clusters, sizeof(*data->tables), GFP_KERNEL); if (!data->tables) return -ENOMEM; + if (soc->actmon_cntr_base) { + /* mmio registers are used for frequency request and re-construction */ + data->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(data->regs)) + return PTR_ERR(data->regs); + } + platform_set_drvdata(pdev, data); bpmp = tegra_bpmp_get(&pdev->dev); @@ -416,10 +588,10 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev) } static const struct of_device_id tegra194_cpufreq_of_match[] = { - { .compatible = "nvidia,tegra194-ccplex", }, + { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc }, + { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc }, { /* sentinel */ } }; -MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match); static struct platform_driver tegra194_ccplex_driver = { .driver = { diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 7b2d138bc83e..ee99c02c84e8 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -216,9 +216,9 @@ config CRYPTO_AES_S390 config CRYPTO_CHACHA_S390 tristate "ChaCha20 stream cipher" depends on S390 - select CRYPTO_ALGAPI select CRYPTO_SKCIPHER - select CRYPTO_CHACHA20 + select CRYPTO_LIB_CHACHA_GENERIC + select CRYPTO_ARCH_HAVE_LIB_CHACHA help This is the s390 SIMD implementation of the ChaCha20 stream cipher (RFC 7539). diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 0a4fff23d272..f81703a86b98 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_CRYPTO_DEV_ALLWINNER) += allwinner/ obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o +# __init ordering requires atmel-i2c being before atmel-ecc and atmel-sha204a. obj-$(CONFIG_CRYPTO_DEV_ATMEL_I2C) += atmel-i2c.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_ECC) += atmel-ecc.o obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA204A) += atmel-sha204a.o diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c index dec79fa3ebaf..10fe9f73a5fb 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c @@ -20,7 +20,6 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; - void *backup_iv = NULL; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; u32 tx_cnt = 0; @@ -48,10 +47,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) } if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) { - backup_iv = kzalloc(ivsize, GFP_KERNEL); - if (!backup_iv) - return -ENOMEM; - scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0); + scatterwalk_map_and_copy(ctx->backup_iv, areq->src, + areq->cryptlen - ivsize, ivsize, 0); } if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { @@ -134,8 +131,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) if (areq->iv) { if (mode & SS_DECRYPTION) { - memcpy(areq->iv, backup_iv, ivsize); - kfree_sensitive(backup_iv); + memcpy(areq->iv, ctx->backup_iv, ivsize); + memzero_explicit(ctx->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize, ivsize, 0); @@ -199,7 +196,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; - void *backup_iv = NULL; struct sg_mapping_iter mi, mo; unsigned long pi = 0, po = 0; /* progress for in and out */ bool miter_err; @@ -244,10 +240,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) return sun4i_ss_cipher_poll_fallback(areq); if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) { - backup_iv = kzalloc(ivsize, GFP_KERNEL); - if (!backup_iv) - return -ENOMEM; - scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0); + scatterwalk_map_and_copy(ctx->backup_iv, areq->src, + areq->cryptlen - ivsize, ivsize, 0); } if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { @@ -384,8 +378,8 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) } if (areq->iv) { if (mode & SS_DECRYPTION) { - memcpy(areq->iv, backup_iv, ivsize); - kfree_sensitive(backup_iv); + memcpy(areq->iv, ctx->backup_iv, ivsize); + memzero_explicit(ctx->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize, ivsize, 0); diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h index 0fee6f4e2d90..ba59c7a48825 100644 --- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h +++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss.h @@ -183,6 +183,7 @@ struct sun4i_tfm_ctx { struct sun4i_cipher_req_ctx { u32 mode; + u8 backup_iv[AES_BLOCK_SIZE]; struct skcipher_request fallback_req; // keep at the end }; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c index 35e3cadccac2..74b4e910a38d 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c @@ -25,26 +25,62 @@ static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct scatterlist *sg; + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ce_alg_template *algt; + unsigned int todo, len; + + algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); + + if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG || + sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) { + algt->stat_fb_maxsg++; + return true; + } - if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG) + if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) { + algt->stat_fb_leniv++; return true; + } - if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) + if (areq->cryptlen == 0) { + algt->stat_fb_len0++; return true; + } - if (areq->cryptlen == 0 || areq->cryptlen % 16) + if (areq->cryptlen % 16) { + algt->stat_fb_mod16++; return true; + } + len = areq->cryptlen; sg = areq->src; while (sg) { - if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + algt->stat_fb_srcali++; + return true; + } + todo = min(len, sg->length); + if (todo % 4) { + algt->stat_fb_srclen++; return true; + } + len -= todo; sg = sg_next(sg); } + + len = areq->cryptlen; sg = areq->dst; while (sg) { - if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + algt->stat_fb_dstali++; + return true; + } + todo = min(len, sg->length); + if (todo % 4) { + algt->stat_fb_dstlen++; return true; + } + len -= todo; sg = sg_next(sg); } return false; @@ -94,6 +130,8 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req int nr_sgs = 0; int nr_sgd = 0; int err = 0; + int ns = sg_nents_for_len(areq->src, areq->cryptlen); + int nd = sg_nents_for_len(areq->dst, areq->cryptlen); algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher); @@ -152,23 +190,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req ivsize = crypto_skcipher_ivsize(tfm); if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { rctx->ivlen = ivsize; - rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); - if (!rctx->bounce_iv) { - err = -ENOMEM; - goto theend_key; - } if (rctx->op_dir & CE_DECRYPTION) { - rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL); - if (!rctx->backup_iv) { - err = -ENOMEM; - goto theend_key; - } offset = areq->cryptlen - ivsize; - scatterwalk_map_and_copy(rctx->backup_iv, areq->src, + scatterwalk_map_and_copy(chan->backup_iv, areq->src, offset, ivsize, 0); } - memcpy(rctx->bounce_iv, areq->iv, ivsize); - rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen, + memcpy(chan->bounce_iv, areq->iv, ivsize); + rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen, DMA_TO_DEVICE); if (dma_mapping_error(ce->dev, rctx->addr_iv)) { dev_err(ce->dev, "Cannot DMA MAP IV\n"); @@ -179,8 +207,7 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req } if (areq->src == areq->dst) { - nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), - DMA_BIDIRECTIONAL); + nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL); if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; @@ -188,15 +215,13 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req } nr_sgd = nr_sgs; } else { - nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), - DMA_TO_DEVICE); + nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } - nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst), - DMA_FROM_DEVICE); + nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); if (nr_sgd <= 0 || nr_sgd > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; @@ -241,14 +266,11 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req theend_sgs: if (areq->src == areq->dst) { - dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), - DMA_BIDIRECTIONAL); + dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL); } else { if (nr_sgs > 0) - dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), - DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst), - DMA_FROM_DEVICE); + dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); } theend_iv: @@ -257,16 +279,15 @@ theend_iv: dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { - memcpy(areq->iv, rctx->backup_iv, ivsize); - kfree_sensitive(rctx->backup_iv); + memcpy(areq->iv, chan->backup_iv, ivsize); + memzero_explicit(chan->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); } - kfree(rctx->bounce_iv); + memzero_explicit(chan->bounce_iv, ivsize); } -theend_key: dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); theend: @@ -322,13 +343,13 @@ static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_r dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { - memcpy(areq->iv, rctx->backup_iv, ivsize); - kfree_sensitive(rctx->backup_iv); + memcpy(areq->iv, chan->backup_iv, ivsize); + memzero_explicit(chan->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); } - kfree(rctx->bounce_iv); + memzero_explicit(chan->bounce_iv, ivsize); } dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); @@ -398,10 +419,9 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm) sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + crypto_skcipher_reqsize(op->fallback_tfm); - - dev_info(op->ce->dev, "Fallback for %s is %s\n", - crypto_tfm_alg_driver_name(&sktfm->base), - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); + memcpy(algt->fbname, + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), + CRYPTO_MAX_ALG_NAME); op->enginectx.op.do_one_request = sun8i_ce_cipher_run; op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c index d8623c7e0d1d..9f6594699835 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c @@ -283,7 +283,7 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, @@ -310,7 +310,7 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, @@ -336,7 +336,7 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, @@ -363,7 +363,7 @@ static struct sun8i_ce_alg_template ce_algs[] = { .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | - CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, @@ -595,19 +595,47 @@ static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v) continue; switch (ce_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: - seq_printf(seq, "%s %s %lu %lu\n", + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ce_algs[i].alg.skcipher.base.cra_driver_name, ce_algs[i].alg.skcipher.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_fb); + seq_printf(seq, "\tLast fallback is: %s\n", + ce_algs[i].fbname); + seq_printf(seq, "\tFallback due to 0 length: %lu\n", + ce_algs[i].stat_fb_len0); + seq_printf(seq, "\tFallback due to length !mod16: %lu\n", + ce_algs[i].stat_fb_mod16); + seq_printf(seq, "\tFallback due to length < IV: %lu\n", + ce_algs[i].stat_fb_leniv); + seq_printf(seq, "\tFallback due to source alignment: %lu\n", + ce_algs[i].stat_fb_srcali); + seq_printf(seq, "\tFallback due to dest alignment: %lu\n", + ce_algs[i].stat_fb_dstali); + seq_printf(seq, "\tFallback due to source length: %lu\n", + ce_algs[i].stat_fb_srclen); + seq_printf(seq, "\tFallback due to dest length: %lu\n", + ce_algs[i].stat_fb_dstlen); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ce_algs[i].stat_fb_maxsg); break; case CRYPTO_ALG_TYPE_AHASH: - seq_printf(seq, "%s %s %lu %lu\n", + seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ce_algs[i].alg.hash.halg.base.cra_driver_name, ce_algs[i].alg.hash.halg.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_fb); + seq_printf(seq, "\tLast fallback is: %s\n", + ce_algs[i].fbname); + seq_printf(seq, "\tFallback due to 0 length: %lu\n", + ce_algs[i].stat_fb_len0); + seq_printf(seq, "\tFallback due to length: %lu\n", + ce_algs[i].stat_fb_srclen); + seq_printf(seq, "\tFallback due to alignment: %lu\n", + ce_algs[i].stat_fb_srcali); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ce_algs[i].stat_fb_maxsg); break; case CRYPTO_ALG_TYPE_RNG: - seq_printf(seq, "%s %s %lu %lu\n", + seq_printf(seq, "%s %s reqs=%lu bytes=%lu\n", ce_algs[i].alg.rng.base.cra_driver_name, ce_algs[i].alg.rng.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_bytes); @@ -673,6 +701,18 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce) err = -ENOMEM; goto error_engine; } + ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ce->chanlist[i].bounce_iv) { + err = -ENOMEM; + goto error_engine; + } + ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE, + GFP_KERNEL); + if (!ce->chanlist[i].backup_iv) { + err = -ENOMEM; + goto error_engine; + } } return 0; error_engine: diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index 859b7522faaa..8b5b9b9d04c3 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -50,9 +50,9 @@ int sun8i_ce_hash_crainit(struct crypto_tfm *tfm) sizeof(struct sun8i_ce_hash_reqctx) + crypto_ahash_reqsize(op->fallback_tfm)); - dev_info(op->ce->dev, "Fallback for %s is %s\n", - crypto_tfm_alg_driver_name(tfm), - crypto_tfm_alg_driver_name(&op->fallback_tfm->base)); + memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), + CRYPTO_MAX_ALG_NAME); + err = pm_runtime_get_sync(op->ce->dev); if (err < 0) goto error_pm; @@ -199,17 +199,32 @@ static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ce_alg_template *algt; struct scatterlist *sg; - if (areq->nbytes == 0) + algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); + + if (areq->nbytes == 0) { + algt->stat_fb_len0++; return true; + } /* we need to reserve one SG for padding one */ - if (sg_nents(areq->src) > MAX_SG - 1) + if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) { + algt->stat_fb_maxsg++; return true; + } sg = areq->src; while (sg) { - if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) + if (sg->length % 4) { + algt->stat_fb_srclen++; return true; + } + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + algt->stat_fb_srcali++; + return true; + } sg = sg_next(sg); } return false; @@ -229,7 +244,7 @@ int sun8i_ce_hash_digest(struct ahash_request *areq) if (sun8i_ce_hash_need_fallback(areq)) return sun8i_ce_hash_digest_fb(areq); - nr_sgs = sg_nents(areq->src); + nr_sgs = sg_nents_for_len(areq->src, areq->nbytes); if (nr_sgs > MAX_SG - 1) return sun8i_ce_hash_digest_fb(areq); @@ -248,6 +263,64 @@ int sun8i_ce_hash_digest(struct ahash_request *areq) return crypto_transfer_hash_request_to_engine(engine, areq); } +static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs) +{ + u64 fill, min_fill, j, k; + __be64 *bebits; + __le64 *lebits; + + j = padi; + buf[j++] = cpu_to_le32(0x80); + + if (bs == 64) { + fill = 64 - (byte_count % 64); + min_fill = 2 * sizeof(u32) + sizeof(u32); + } else { + fill = 128 - (byte_count % 128); + min_fill = 4 * sizeof(u32) + sizeof(u32); + } + + if (fill < min_fill) + fill += bs; + + k = j; + j += (fill - min_fill) / sizeof(u32); + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + for (; k < j; k++) + buf[k] = 0; + + if (le) { + /* MD5 */ + lebits = (__le64 *)&buf[j]; + *lebits = cpu_to_le64(byte_count << 3); + j += 2; + } else { + if (bs == 64) { + /* sha1 sha224 sha256 */ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } else { + /* sha384 sha512*/ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count >> 61); + j += 2; + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } + } + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + + return j; +} + int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) { struct ahash_request *areq = container_of(breq, struct ahash_request, base); @@ -266,14 +339,11 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) __le32 *bf; void *buf = NULL; int j, i, todo; - int nbw = 0; - u64 fill, min_fill; - __be64 *bebits; - __le64 *lebits; void *result = NULL; u64 bs; int digestsize; dma_addr_t addr_res, addr_pad; + int ns = sg_nents_for_len(areq->src, areq->nbytes); algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash); ce = algt->ce; @@ -318,7 +388,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) cet->t_sym_ctl = 0; cet->t_asym_ctl = 0; - nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); + nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; @@ -348,44 +418,25 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) byte_count = areq->nbytes; j = 0; - bf[j++] = cpu_to_le32(0x80); - - if (bs == 64) { - fill = 64 - (byte_count % 64); - min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32)); - } else { - fill = 128 - (byte_count % 128); - min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32)); - } - - if (fill < min_fill) - fill += bs; - - j += (fill - min_fill) / sizeof(u32); switch (algt->ce_algo_id) { case CE_ID_HASH_MD5: - lebits = (__le64 *)&bf[j]; - *lebits = cpu_to_le64(byte_count << 3); - j += 2; + j = hash_pad(bf, 2 * bs, j, byte_count, true, bs); break; case CE_ID_HASH_SHA1: case CE_ID_HASH_SHA224: case CE_ID_HASH_SHA256: - bebits = (__be64 *)&bf[j]; - *bebits = cpu_to_be64(byte_count << 3); - j += 2; + j = hash_pad(bf, 2 * bs, j, byte_count, false, bs); break; case CE_ID_HASH_SHA384: case CE_ID_HASH_SHA512: - bebits = (__be64 *)&bf[j]; - *bebits = cpu_to_be64(byte_count >> 61); - j += 2; - bebits = (__be64 *)&bf[j]; - *bebits = cpu_to_be64(byte_count << 3); - j += 2; + j = hash_pad(bf, 2 * bs, j, byte_count, false, bs); break; } + if (!j) { + err = -EINVAL; + goto theend; + } addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); cet->t_src[i].addr = cpu_to_le32(addr_pad); @@ -406,8 +457,7 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm)); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src), - DMA_TO_DEVICE); + dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c index b3a9bbfb8831..b3cc43ea6c8a 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -108,11 +108,9 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, goto err_dst; } - err = pm_runtime_get_sync(ce->dev); - if (err < 0) { - pm_runtime_put_noidle(ce->dev); + err = pm_runtime_resume_and_get(ce->dev); + if (err < 0) goto err_pm; - } mutex_lock(&ce->rnglock); chan = &ce->chanlist[flow]; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h index 624a5926f21f..8177aaba4434 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h @@ -186,6 +186,8 @@ struct ce_task { * @status: set to 1 by interrupt if task is done * @t_phy: Physical address of task * @tl: pointer to the current ce_task for this flow + * @backup_iv: buffer which contain the next IV to store + * @bounce_iv: buffer which contain the IV * @stat_req: number of request done by this flow */ struct sun8i_ce_flow { @@ -195,6 +197,8 @@ struct sun8i_ce_flow { dma_addr_t t_phy; int timeout; struct ce_task *tl; + void *backup_iv; + void *bounce_iv; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG unsigned long stat_req; #endif @@ -241,8 +245,6 @@ struct sun8i_ce_dev { * struct sun8i_cipher_req_ctx - context for a skcipher request * @op_dir: direction (encrypt vs decrypt) for this request * @flow: the flow to use for this request - * @backup_iv: buffer which contain the next IV to store - * @bounce_iv: buffer which contain the IV * @ivlen: size of bounce_iv * @nr_sgs: The number of source SG (as given by dma_map_sg()) * @nr_sgd: The number of destination SG (as given by dma_map_sg()) @@ -253,8 +255,6 @@ struct sun8i_ce_dev { struct sun8i_cipher_req_ctx { u32 op_dir; int flow; - void *backup_iv; - void *bounce_iv; unsigned int ivlen; int nr_sgs; int nr_sgd; @@ -333,11 +333,18 @@ struct sun8i_ce_alg_template { struct ahash_alg hash; struct rng_alg rng; } alg; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG unsigned long stat_req; unsigned long stat_fb; unsigned long stat_bytes; -#endif + unsigned long stat_fb_maxsg; + unsigned long stat_fb_leniv; + unsigned long stat_fb_len0; + unsigned long stat_fb_mod16; + unsigned long stat_fb_srcali; + unsigned long stat_fb_srclen; + unsigned long stat_fb_dstali; + unsigned long stat_fb_dstlen; + char fbname[CRYPTO_MAX_ALG_NAME]; }; int sun8i_ce_enqueue(struct crypto_async_request *areq, u32 type); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c index 554e400d41ca..5bb950182026 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c @@ -22,34 +22,53 @@ static bool sun8i_ss_need_fallback(struct skcipher_request *areq) { + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct skcipher_alg *alg = crypto_skcipher_alg(tfm); + struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); struct scatterlist *in_sg = areq->src; struct scatterlist *out_sg = areq->dst; struct scatterlist *sg; + unsigned int todo, len; - if (areq->cryptlen == 0 || areq->cryptlen % 16) + if (areq->cryptlen == 0 || areq->cryptlen % 16) { + algt->stat_fb_len++; return true; + } - if (sg_nents(areq->src) > 8 || sg_nents(areq->dst) > 8) + if (sg_nents_for_len(areq->src, areq->cryptlen) > 8 || + sg_nents_for_len(areq->dst, areq->cryptlen) > 8) { + algt->stat_fb_sgnum++; return true; + } + len = areq->cryptlen; sg = areq->src; while (sg) { - if ((sg->length % 16) != 0) - return true; - if ((sg_dma_len(sg) % 16) != 0) + todo = min(len, sg->length); + if ((todo % 16) != 0) { + algt->stat_fb_sglen++; return true; - if (!IS_ALIGNED(sg->offset, 16)) + } + if (!IS_ALIGNED(sg->offset, 16)) { + algt->stat_fb_align++; return true; + } + len -= todo; sg = sg_next(sg); } + len = areq->cryptlen; sg = areq->dst; while (sg) { - if ((sg->length % 16) != 0) - return true; - if ((sg_dma_len(sg) % 16) != 0) + todo = min(len, sg->length); + if ((todo % 16) != 0) { + algt->stat_fb_sglen++; return true; - if (!IS_ALIGNED(sg->offset, 16)) + } + if (!IS_ALIGNED(sg->offset, 16)) { + algt->stat_fb_align++; return true; + } + len -= todo; sg = sg_next(sg); } @@ -93,6 +112,68 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) return err; } +static int sun8i_ss_setup_ivs(struct skcipher_request *areq) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); + struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); + struct sun8i_ss_dev *ss = op->ss; + struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); + struct scatterlist *sg = areq->src; + unsigned int todo, offset; + unsigned int len = areq->cryptlen; + unsigned int ivsize = crypto_skcipher_ivsize(tfm); + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; + int i = 0; + u32 a; + int err; + + rctx->ivlen = ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + offset = areq->cryptlen - ivsize; + scatterwalk_map_and_copy(sf->biv, areq->src, offset, + ivsize, 0); + } + + /* we need to copy all IVs from source in case DMA is bi-directionnal */ + while (sg && len) { + if (sg_dma_len(sg) == 0) { + sg = sg_next(sg); + continue; + } + if (i == 0) + memcpy(sf->iv[0], areq->iv, ivsize); + a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, a)) { + memzero_explicit(sf->iv[i], ivsize); + dev_err(ss->dev, "Cannot DMA MAP IV\n"); + err = -EFAULT; + goto dma_iv_error; + } + rctx->p_iv[i] = a; + /* we need to setup all others IVs only in the decrypt way */ + if (rctx->op_dir & SS_ENCRYPTION) + return 0; + todo = min(len, sg_dma_len(sg)); + len -= todo; + i++; + if (i < MAX_SG) { + offset = sg->length - ivsize; + scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0); + } + rctx->niv = i; + sg = sg_next(sg); + } + + return 0; +dma_iv_error: + i--; + while (i >= 0) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + } + return err; +} + static int sun8i_ss_cipher(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); @@ -101,12 +182,14 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ss_alg_template *algt; + struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; struct scatterlist *sg; unsigned int todo, len, offset, ivsize; - void *backup_iv = NULL; int nr_sgs = 0; int nr_sgd = 0; int err = 0; + int nsgs = sg_nents_for_len(areq->src, areq->cryptlen); + int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen); int i; algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher); @@ -134,34 +217,12 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) ivsize = crypto_skcipher_ivsize(tfm); if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { - rctx->ivlen = ivsize; - rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); - if (!rctx->biv) { - err = -ENOMEM; + err = sun8i_ss_setup_ivs(areq); + if (err) goto theend_key; - } - if (rctx->op_dir & SS_DECRYPTION) { - backup_iv = kzalloc(ivsize, GFP_KERNEL); - if (!backup_iv) { - err = -ENOMEM; - goto theend_key; - } - offset = areq->cryptlen - ivsize; - scatterwalk_map_and_copy(backup_iv, areq->src, offset, - ivsize, 0); - } - memcpy(rctx->biv, areq->iv, ivsize); - rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen, - DMA_TO_DEVICE); - if (dma_mapping_error(ss->dev, rctx->p_iv)) { - dev_err(ss->dev, "Cannot DMA MAP IV\n"); - err = -ENOMEM; - goto theend_iv; - } } if (areq->src == areq->dst) { - nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), - DMA_BIDIRECTIONAL); + nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); if (nr_sgs <= 0 || nr_sgs > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; @@ -169,15 +230,13 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) } nr_sgd = nr_sgs; } else { - nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), - DMA_TO_DEVICE); + nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } - nr_sgd = dma_map_sg(ss->dev, areq->dst, sg_nents(areq->dst), - DMA_FROM_DEVICE); + nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); if (nr_sgd <= 0 || nr_sgd > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; @@ -233,31 +292,26 @@ sgd_next: theend_sgs: if (areq->src == areq->dst) { - dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), - DMA_BIDIRECTIONAL); + dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); } else { - dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), - DMA_TO_DEVICE); - dma_unmap_sg(ss->dev, areq->dst, sg_nents(areq->dst), - DMA_FROM_DEVICE); + dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); + dma_unmap_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); } theend_iv: - if (rctx->p_iv) - dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen, - DMA_TO_DEVICE); - if (areq->iv && ivsize > 0) { - if (rctx->biv) { - offset = areq->cryptlen - ivsize; - if (rctx->op_dir & SS_DECRYPTION) { - memcpy(areq->iv, backup_iv, ivsize); - kfree_sensitive(backup_iv); - } else { - scatterwalk_map_and_copy(areq->iv, areq->dst, offset, - ivsize, 0); - } - kfree(rctx->biv); + for (i = 0; i < rctx->niv; i++) { + dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); + memzero_explicit(sf->iv[i], ivsize); + } + + offset = areq->cryptlen - ivsize; + if (rctx->op_dir & SS_DECRYPTION) { + memcpy(areq->iv, sf->biv, ivsize); + memzero_explicit(sf->biv, ivsize); + } else { + scatterwalk_map_and_copy(areq->iv, areq->dst, offset, + ivsize, 0); } } @@ -349,9 +403,9 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm) crypto_skcipher_reqsize(op->fallback_tfm); - dev_info(op->ss->dev, "Fallback for %s is %s\n", - crypto_tfm_alg_driver_name(&sktfm->base), - crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm))); + memcpy(algt->fbname, + crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), + CRYPTO_MAX_ALG_NAME); op->enginectx.op.do_one_request = sun8i_ss_handle_cipher_request; op->enginectx.op.prepare_request = NULL; diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c index 319fe3279a71..98593a0cff69 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c @@ -66,6 +66,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx const char *name) { int flow = rctx->flow; + unsigned int ivlen = rctx->ivlen; u32 v = SS_START; int i; @@ -104,15 +105,14 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx mutex_lock(&ss->mlock); writel(rctx->p_key, ss->base + SS_KEY_ADR_REG); - if (i == 0) { - if (rctx->p_iv) - writel(rctx->p_iv, ss->base + SS_IV_ADR_REG); - } else { - if (rctx->biv) { - if (rctx->op_dir == SS_ENCRYPTION) - writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); + if (ivlen) { + if (rctx->op_dir == SS_ENCRYPTION) { + if (i == 0) + writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG); else - writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); + writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG); + } else { + writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG); } } @@ -409,6 +409,37 @@ static struct sun8i_ss_alg_template ss_algs[] = { } } }, +{ .type = CRYPTO_ALG_TYPE_AHASH, + .ss_algo_id = SS_ID_HASH_SHA1, + .alg.hash = { + .init = sun8i_ss_hash_init, + .update = sun8i_ss_hash_update, + .final = sun8i_ss_hash_final, + .finup = sun8i_ss_hash_finup, + .digest = sun8i_ss_hash_digest, + .export = sun8i_ss_hash_export, + .import = sun8i_ss_hash_import, + .setkey = sun8i_ss_hmac_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac-sha1-sun8i-ss", + .cra_priority = 300, + .cra_alignmask = 3, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), + .cra_module = THIS_MODULE, + .cra_init = sun8i_ss_hash_crainit, + .cra_exit = sun8i_ss_hash_craexit, + } + } + } +}, #endif }; @@ -430,6 +461,17 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v) ss_algs[i].alg.skcipher.base.cra_driver_name, ss_algs[i].alg.skcipher.base.cra_name, ss_algs[i].stat_req, ss_algs[i].stat_fb); + + seq_printf(seq, "\tLast fallback is: %s\n", + ss_algs[i].fbname); + seq_printf(seq, "\tFallback due to length: %lu\n", + ss_algs[i].stat_fb_len); + seq_printf(seq, "\tFallback due to SG length: %lu\n", + ss_algs[i].stat_fb_sglen); + seq_printf(seq, "\tFallback due to alignment: %lu\n", + ss_algs[i].stat_fb_align); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ss_algs[i].stat_fb_sgnum); break; case CRYPTO_ALG_TYPE_RNG: seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n", @@ -442,6 +484,16 @@ static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v) ss_algs[i].alg.hash.halg.base.cra_driver_name, ss_algs[i].alg.hash.halg.base.cra_name, ss_algs[i].stat_req, ss_algs[i].stat_fb); + seq_printf(seq, "\tLast fallback is: %s\n", + ss_algs[i].fbname); + seq_printf(seq, "\tFallback due to length: %lu\n", + ss_algs[i].stat_fb_len); + seq_printf(seq, "\tFallback due to SG length: %lu\n", + ss_algs[i].stat_fb_sglen); + seq_printf(seq, "\tFallback due to alignment: %lu\n", + ss_algs[i].stat_fb_align); + seq_printf(seq, "\tFallback due to SG numbers: %lu\n", + ss_algs[i].stat_fb_sgnum); break; } } @@ -464,7 +516,7 @@ static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) */ static int allocate_flows(struct sun8i_ss_dev *ss) { - int i, err; + int i, j, err; ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), GFP_KERNEL); @@ -474,6 +526,28 @@ static int allocate_flows(struct sun8i_ss_dev *ss) for (i = 0; i < MAXFLOW; i++) { init_completion(&ss->flows[i].complete); + ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].biv) + goto error_engine; + + for (j = 0; j < MAX_SG; j++) { + ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].iv[j]) + goto error_engine; + } + + /* the padding could be up to two block. */ + ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].pad) + goto error_engine; + ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE, + GFP_KERNEL | GFP_DMA); + if (!ss->flows[i].result) + goto error_engine; + ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); if (!ss->flows[i].engine) { dev_err(ss->dev, "Cannot allocate engine\n"); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c index 1a71ed49d233..ac417a6b39e5 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c @@ -14,11 +14,99 @@ #include <linux/pm_runtime.h> #include <linux/scatterlist.h> #include <crypto/internal/hash.h> +#include <crypto/hmac.h> +#include <crypto/scatterwalk.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <crypto/md5.h> #include "sun8i-ss.h" +static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key, + unsigned int keylen) +{ + struct crypto_shash *xtfm; + struct shash_desc *sdesc; + size_t len; + int ret = 0; + + xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK); + if (!xtfm) + return -ENOMEM; + + len = sizeof(*sdesc) + crypto_shash_descsize(xtfm); + sdesc = kmalloc(len, GFP_KERNEL); + if (!sdesc) { + ret = -ENOMEM; + goto err_hashkey_sdesc; + } + sdesc->tfm = xtfm; + + ret = crypto_shash_init(sdesc); + if (ret) { + dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret); + goto err_hashkey; + } + ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key); + if (ret) + dev_err(tfmctx->ss->dev, "shash finup error\n"); +err_hashkey: + kfree(sdesc); +err_hashkey_sdesc: + crypto_free_shash(xtfm); + return ret; +} + +int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen) +{ + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash); + struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg); + struct sun8i_ss_alg_template *algt; + int digestsize, i; + int bs = crypto_ahash_blocksize(ahash); + int ret; + + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + digestsize = algt->alg.hash.halg.digestsize; + + if (keylen > bs) { + ret = sun8i_ss_hashkey(tfmctx, key, keylen); + if (ret) + return ret; + tfmctx->keylen = digestsize; + } else { + tfmctx->keylen = keylen; + memcpy(tfmctx->key, key, keylen); + } + + tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA); + if (!tfmctx->ipad) + return -ENOMEM; + tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA); + if (!tfmctx->opad) { + ret = -ENOMEM; + goto err_opad; + } + + memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen); + memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen); + memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen); + for (i = 0; i < bs; i++) { + tfmctx->ipad[i] ^= HMAC_IPAD_VALUE; + tfmctx->opad[i] ^= HMAC_OPAD_VALUE; + } + + ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen); + if (!ret) + return 0; + + memzero_explicit(tfmctx->key, keylen); + kfree_sensitive(tfmctx->opad); +err_opad: + kfree_sensitive(tfmctx->ipad); + return ret; +} + int sun8i_ss_hash_crainit(struct crypto_tfm *tfm) { struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm); @@ -50,9 +138,8 @@ int sun8i_ss_hash_crainit(struct crypto_tfm *tfm) sizeof(struct sun8i_ss_hash_reqctx) + crypto_ahash_reqsize(op->fallback_tfm)); - dev_info(op->ss->dev, "Fallback for %s is %s\n", - crypto_tfm_alg_driver_name(tfm), - crypto_tfm_alg_driver_name(&op->fallback_tfm->base)); + memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME); + err = pm_runtime_get_sync(op->ss->dev); if (err < 0) goto error_pm; @@ -67,6 +154,9 @@ void sun8i_ss_hash_craexit(struct crypto_tfm *tfm) { struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm); + kfree_sensitive(tfmctx->ipad); + kfree_sensitive(tfmctx->opad); + crypto_free_ahash(tfmctx->fallback_tfm); pm_runtime_put_sync_suspend(tfmctx->ss->dev); } @@ -258,23 +348,48 @@ static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss, static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq) { + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); + struct sun8i_ss_alg_template *algt; struct scatterlist *sg; - if (areq->nbytes == 0) + algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); + + if (areq->nbytes == 0) { + algt->stat_fb_len++; + return true; + } + + if (areq->nbytes >= MAX_PAD_SIZE - 64) { + algt->stat_fb_len++; return true; + } + /* we need to reserve one SG for the padding one */ - if (sg_nents(areq->src) > MAX_SG - 1) + if (sg_nents(areq->src) > MAX_SG - 1) { + algt->stat_fb_sgnum++; return true; + } + sg = areq->src; while (sg) { /* SS can operate hash only on full block size * since SS support only MD5,sha1,sha224 and sha256, blocksize * is always 64 - * TODO: handle request if last SG is not len%64 - * but this will need to copy data on a new SG of size=64 */ - if (sg->length % 64 || !IS_ALIGNED(sg->offset, sizeof(u32))) + /* Only the last block could be bounced to the pad buffer */ + if (sg->length % 64 && sg_next(sg)) { + algt->stat_fb_sglen++; + return true; + } + if (!IS_ALIGNED(sg->offset, sizeof(u32))) { + algt->stat_fb_align++; + return true; + } + if (sg->length % 4) { + algt->stat_fb_sglen++; return true; + } sg = sg_next(sg); } return false; @@ -288,21 +403,11 @@ int sun8i_ss_hash_digest(struct ahash_request *areq) struct sun8i_ss_alg_template *algt; struct sun8i_ss_dev *ss; struct crypto_engine *engine; - struct scatterlist *sg; - int nr_sgs, e, i; + int e; if (sun8i_ss_hash_need_fallback(areq)) return sun8i_ss_hash_digest_fb(areq); - nr_sgs = sg_nents(areq->src); - if (nr_sgs > MAX_SG - 1) - return sun8i_ss_hash_digest_fb(areq); - - for_each_sg(areq->src, sg, nr_sgs, i) { - if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) - return sun8i_ss_hash_digest_fb(areq); - } - algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); ss = algt->ss; @@ -313,6 +418,64 @@ int sun8i_ss_hash_digest(struct ahash_request *areq) return crypto_transfer_hash_request_to_engine(engine, areq); } +static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs) +{ + u64 fill, min_fill, j, k; + __be64 *bebits; + __le64 *lebits; + + j = padi; + buf[j++] = cpu_to_le32(0x80); + + if (bs == 64) { + fill = 64 - (byte_count % 64); + min_fill = 2 * sizeof(u32) + sizeof(u32); + } else { + fill = 128 - (byte_count % 128); + min_fill = 4 * sizeof(u32) + sizeof(u32); + } + + if (fill < min_fill) + fill += bs; + + k = j; + j += (fill - min_fill) / sizeof(u32); + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + for (; k < j; k++) + buf[k] = 0; + + if (le) { + /* MD5 */ + lebits = (__le64 *)&buf[j]; + *lebits = cpu_to_le64(byte_count << 3); + j += 2; + } else { + if (bs == 64) { + /* sha1 sha224 sha256 */ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } else { + /* sha384 sha512*/ + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count >> 61); + j += 2; + bebits = (__be64 *)&buf[j]; + *bebits = cpu_to_be64(byte_count << 3); + j += 2; + } + } + if (j * 4 > bufsize) { + pr_err("%s OVERFLOW %llu\n", __func__, j); + return 0; + } + + return j; +} + /* sun8i_ss_hash_run - run an ahash request * Send the data of the request to the SS along with an extra SG with padding */ @@ -320,20 +483,26 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) { struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct sun8i_ss_alg_template *algt; struct sun8i_ss_dev *ss; struct scatterlist *sg; + int bs = crypto_ahash_blocksize(tfm); int nr_sgs, err, digestsize; unsigned int len; - u64 fill, min_fill, byte_count; + u64 byte_count; void *pad, *result; - int j, i, todo; - __be64 *bebits; - __le64 *lebits; - dma_addr_t addr_res, addr_pad; + int j, i, k, todo; + dma_addr_t addr_res, addr_pad, addr_xpad; __le32 *bf; + /* HMAC step: + * 0: normal hashing + * 1: IPAD + * 2: OPAD + */ + int hmac = 0; algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash); ss = algt->ss; @@ -342,18 +511,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) if (digestsize == SHA224_DIGEST_SIZE) digestsize = SHA256_DIGEST_SIZE; - /* the padding could be up to two block. */ - pad = kzalloc(algt->alg.hash.halg.base.cra_blocksize * 2, GFP_KERNEL | GFP_DMA); - if (!pad) - return -ENOMEM; + result = ss->flows[rctx->flow].result; + pad = ss->flows[rctx->flow].pad; bf = (__le32 *)pad; - result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); - if (!result) { - kfree(pad); - return -ENOMEM; - } - for (i = 0; i < MAX_SG; i++) { rctx->t_dst[i].addr = 0; rctx->t_dst[i].len = 0; @@ -376,17 +537,33 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) if (dma_mapping_error(ss->dev, addr_res)) { dev_err(ss->dev, "DMA map dest\n"); err = -EINVAL; - goto theend; + goto err_dma_result; } + j = 0; len = areq->nbytes; - for_each_sg(areq->src, sg, nr_sgs, i) { - rctx->t_src[i].addr = sg_dma_address(sg); + sg = areq->src; + i = 0; + while (len > 0 && sg) { + if (sg_dma_len(sg) == 0) { + sg = sg_next(sg); + continue; + } todo = min(len, sg_dma_len(sg)); - rctx->t_src[i].len = todo / 4; - len -= todo; - rctx->t_dst[i].addr = addr_res; - rctx->t_dst[i].len = digestsize / 4; + /* only the last SG could be with a size not modulo64 */ + if (todo % 64 == 0) { + rctx->t_src[i].addr = sg_dma_address(sg); + rctx->t_src[i].len = todo / 4; + rctx->t_dst[i].addr = addr_res; + rctx->t_dst[i].len = digestsize / 4; + len -= todo; + } else { + scatterwalk_map_and_copy(bf, sg, 0, todo, 0); + j += todo / 4; + len -= todo; + } + sg = sg_next(sg); + i++; } if (len > 0) { dev_err(ss->dev, "remaining len %d\n", len); @@ -394,55 +571,135 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) goto theend; } + if (j > 0) + i--; + +retry: byte_count = areq->nbytes; - j = 0; - bf[j++] = cpu_to_le32(0x80); + if (tfmctx->keylen && hmac == 0) { + hmac = 1; + /* shift all SG one slot up, to free slot 0 for IPAD */ + for (k = 6; k >= 0; k--) { + rctx->t_src[k + 1].addr = rctx->t_src[k].addr; + rctx->t_src[k + 1].len = rctx->t_src[k].len; + rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr; + rctx->t_dst[k + 1].len = rctx->t_dst[k].len; + } + addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, addr_xpad)) { + dev_err(ss->dev, "Fail to create DMA mapping of ipad\n"); + goto err_dma_xpad; + } + rctx->t_src[0].addr = addr_xpad; + rctx->t_src[0].len = bs / 4; + rctx->t_dst[0].addr = addr_res; + rctx->t_dst[0].len = digestsize / 4; + i++; + byte_count = areq->nbytes + bs; + } + if (tfmctx->keylen && hmac == 2) { + for (i = 0; i < MAX_SG; i++) { + rctx->t_src[i].addr = 0; + rctx->t_src[i].len = 0; + rctx->t_dst[i].addr = 0; + rctx->t_dst[i].len = 0; + } - fill = 64 - (byte_count % 64); - min_fill = 3 * sizeof(u32); + addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE); + if (dma_mapping_error(ss->dev, addr_res)) { + dev_err(ss->dev, "Fail to create DMA mapping of result\n"); + err = -EINVAL; + goto err_dma_result; + } + addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE); + if (dma_mapping_error(ss->dev, addr_xpad)) { + dev_err(ss->dev, "Fail to create DMA mapping of opad\n"); + goto err_dma_xpad; + } + rctx->t_src[0].addr = addr_xpad; + rctx->t_src[0].len = bs / 4; - if (fill < min_fill) - fill += 64; + memcpy(bf, result, digestsize); + j = digestsize / 4; + i = 1; + byte_count = digestsize + bs; - j += (fill - min_fill) / sizeof(u32); + rctx->t_dst[0].addr = addr_res; + rctx->t_dst[0].len = digestsize / 4; + } switch (algt->ss_algo_id) { case SS_ID_HASH_MD5: - lebits = (__le64 *)&bf[j]; - *lebits = cpu_to_le64(byte_count << 3); - j += 2; + j = hash_pad(bf, 4096, j, byte_count, true, bs); break; case SS_ID_HASH_SHA1: case SS_ID_HASH_SHA224: case SS_ID_HASH_SHA256: - bebits = (__be64 *)&bf[j]; - *bebits = cpu_to_be64(byte_count << 3); - j += 2; + j = hash_pad(bf, 4096, j, byte_count, false, bs); break; } + if (!j) { + err = -EINVAL; + goto theend; + } addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE); - rctx->t_src[i].addr = addr_pad; - rctx->t_src[i].len = j; - rctx->t_dst[i].addr = addr_res; - rctx->t_dst[i].len = digestsize / 4; if (dma_mapping_error(ss->dev, addr_pad)) { dev_err(ss->dev, "DMA error on padding SG\n"); err = -EINVAL; - goto theend; + goto err_dma_pad; } + rctx->t_src[i].addr = addr_pad; + rctx->t_src[i].len = j; + rctx->t_dst[i].addr = addr_res; + rctx->t_dst[i].len = digestsize / 4; err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); + /* + * mini helper for checking dma map/unmap + * flow start for hmac = 0 (and HMAC = 1) + * HMAC = 0 + * MAP src + * MAP res + * + * retry: + * if hmac then hmac = 1 + * MAP xpad (ipad) + * if hmac == 2 + * MAP res + * MAP xpad (opad) + * MAP pad + * ACTION! + * UNMAP pad + * if hmac + * UNMAP xpad + * UNMAP res + * if hmac < 2 + * UNMAP SRC + * + * if hmac = 1 then hmac = 2 goto retry + */ + dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE); - dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), - DMA_TO_DEVICE); + +err_dma_pad: + if (hmac > 0) + dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE); +err_dma_xpad: dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE); +err_dma_result: + if (hmac < 2) + dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), + DMA_TO_DEVICE); + if (hmac == 1 && !err) { + hmac = 2; + goto retry; + } - memcpy(areq->result, result, algt->alg.hash.halg.digestsize); + if (!err) + memcpy(areq->result, result, algt->alg.hash.halg.digestsize); theend: - kfree(pad); - kfree(result); local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c index 246a6782674c..dd677e9ed06f 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c @@ -112,11 +112,9 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, goto err_iv; } - err = pm_runtime_get_sync(ss->dev); - if (err < 0) { - pm_runtime_put_noidle(ss->dev); + err = pm_runtime_resume_and_get(ss->dev); + if (err < 0) goto err_pm; - } err = 0; mutex_lock(&ss->mlock); diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h index 28188685b910..df6f08f6092f 100644 --- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h +++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss.h @@ -82,6 +82,8 @@ #define PRNG_DATA_SIZE (160 / 8) #define PRNG_SEED_SIZE DIV_ROUND_UP(175, 8) +#define MAX_PAD_SIZE 4096 + /* * struct ss_clock - Describe clocks used by sun8i-ss * @name: Name of clock needed by this variant @@ -121,11 +123,19 @@ struct sginfo { * @complete: completion for the current task on this flow * @status: set to 1 by interrupt if task is done * @stat_req: number of request done by this flow + * @iv: list of IV to use for each step + * @biv: buffer which contain the backuped IV + * @pad: padding buffer for hash operations + * @result: buffer for storing the result of hash operations */ struct sun8i_ss_flow { struct crypto_engine *engine; struct completion complete; int status; + u8 *iv[MAX_SG]; + u8 *biv; + void *pad; + void *result; #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG unsigned long stat_req; #endif @@ -164,28 +174,28 @@ struct sun8i_ss_dev { * @t_src: list of mapped SGs with their size * @t_dst: list of mapped SGs with their size * @p_key: DMA address of the key - * @p_iv: DMA address of the IV + * @p_iv: DMA address of the IVs + * @niv: Number of IVs DMA mapped * @method: current algorithm for this request * @op_mode: op_mode for this request * @op_dir: direction (encrypt vs decrypt) for this request * @flow: the flow to use for this request - * @ivlen: size of biv + * @ivlen: size of IVs * @keylen: keylen for this request - * @biv: buffer which contain the IV * @fallback_req: request struct for invoking the fallback skcipher TFM */ struct sun8i_cipher_req_ctx { struct sginfo t_src[MAX_SG]; struct sginfo t_dst[MAX_SG]; u32 p_key; - u32 p_iv; + u32 p_iv[MAX_SG]; + int niv; u32 method; u32 op_mode; u32 op_dir; int flow; unsigned int ivlen; unsigned int keylen; - void *biv; struct skcipher_request fallback_req; // keep at the end }; @@ -229,6 +239,10 @@ struct sun8i_ss_hash_tfm_ctx { struct crypto_engine_ctx enginectx; struct crypto_ahash *fallback_tfm; struct sun8i_ss_dev *ss; + u8 *ipad; + u8 *opad; + u8 key[SHA256_BLOCK_SIZE]; + int keylen; }; /* @@ -269,11 +283,14 @@ struct sun8i_ss_alg_template { struct rng_alg rng; struct ahash_alg hash; } alg; -#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG unsigned long stat_req; unsigned long stat_fb; unsigned long stat_bytes; -#endif + unsigned long stat_fb_len; + unsigned long stat_fb_sglen; + unsigned long stat_fb_align; + unsigned long stat_fb_sgnum; + char fbname[CRYPTO_MAX_ALG_NAME]; }; int sun8i_ss_enqueue(struct crypto_async_request *areq, u32 type); @@ -306,3 +323,5 @@ int sun8i_ss_hash_update(struct ahash_request *areq); int sun8i_ss_hash_finup(struct ahash_request *areq); int sun8i_ss_hash_digest(struct ahash_request *areq); int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq); +int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, + unsigned int keylen); diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c index 333fbefbbccb..59a57279e77b 100644 --- a/drivers/crypto/atmel-ecc.c +++ b/drivers/crypto/atmel-ecc.c @@ -398,7 +398,7 @@ static int __init atmel_ecc_init(void) static void __exit atmel_ecc_exit(void) { - flush_scheduled_work(); + atmel_i2c_flush_queue(); i2c_del_driver(&atmel_ecc_driver); } diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c index 6fd3e969211d..81ce09bedda8 100644 --- a/drivers/crypto/atmel-i2c.c +++ b/drivers/crypto/atmel-i2c.c @@ -263,6 +263,8 @@ static void atmel_i2c_work_handler(struct work_struct *work) work_data->cbk(work_data, work_data->areq, status); } +static struct workqueue_struct *atmel_wq; + void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data, void (*cbk)(struct atmel_i2c_work_data *work_data, void *areq, int status), @@ -272,10 +274,16 @@ void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data, work_data->areq = areq; INIT_WORK(&work_data->work, atmel_i2c_work_handler); - schedule_work(&work_data->work); + queue_work(atmel_wq, &work_data->work); } EXPORT_SYMBOL(atmel_i2c_enqueue); +void atmel_i2c_flush_queue(void) +{ + flush_workqueue(atmel_wq); +} +EXPORT_SYMBOL(atmel_i2c_flush_queue); + static inline size_t atmel_i2c_wake_token_sz(u32 bus_clk_rate) { u32 no_of_bits = DIV_ROUND_UP(TWLO_USEC * bus_clk_rate, USEC_PER_SEC); @@ -364,14 +372,24 @@ int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) i2c_set_clientdata(client, i2c_priv); - ret = device_sanity_check(client); - if (ret) - return ret; - - return 0; + return device_sanity_check(client); } EXPORT_SYMBOL(atmel_i2c_probe); +static int __init atmel_i2c_init(void) +{ + atmel_wq = alloc_workqueue("atmel_wq", 0, 0); + return atmel_wq ? 0 : -ENOMEM; +} + +static void __exit atmel_i2c_exit(void) +{ + destroy_workqueue(atmel_wq); +} + +module_init(atmel_i2c_init); +module_exit(atmel_i2c_exit); + MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>"); MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h index 63b97b104f16..48929efe2a5b 100644 --- a/drivers/crypto/atmel-i2c.h +++ b/drivers/crypto/atmel-i2c.h @@ -173,6 +173,7 @@ void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data, void (*cbk)(struct atmel_i2c_work_data *work_data, void *areq, int status), void *areq); +void atmel_i2c_flush_queue(void); int atmel_i2c_send_receive(struct i2c_client *client, struct atmel_i2c_cmd *cmd); diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index c96c14e7dab1..e4087bdd2475 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -121,23 +121,24 @@ static int atmel_sha204a_remove(struct i2c_client *client) struct atmel_i2c_client_priv *i2c_priv = i2c_get_clientdata(client); if (atomic_read(&i2c_priv->tfm_count)) { - dev_err(&client->dev, "Device is busy\n"); - return -EBUSY; + dev_emerg(&client->dev, "Device is busy, will remove it anyhow\n"); + return 0; } - if (i2c_priv->hwrng.priv) - kfree((void *)i2c_priv->hwrng.priv); + kfree((void *)i2c_priv->hwrng.priv); return 0; } static const struct of_device_id atmel_sha204a_dt_ids[] = { + { .compatible = "atmel,atsha204", }, { .compatible = "atmel,atsha204a", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_sha204a_dt_ids); static const struct i2c_device_id atmel_sha204a_id[] = { + { "atsha204", 0 }, { "atsha204a", 0 }, { /* sentinel */ } }; @@ -159,7 +160,7 @@ static int __init atmel_sha204a_init(void) static void __exit atmel_sha204a_exit(void) { - flush_scheduled_work(); + atmel_i2c_flush_queue(); i2c_del_driver(&atmel_sha204a_driver); } diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index ea9f8b1ae981..ec6a9e6ad4d2 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -151,6 +151,14 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API Selecting this will register the SEC4 hardware rng to the hw_random API for supplying the kernel entropy pool. +config CRYPTO_DEV_FSL_CAAM_PRNG_API + bool "Register Pseudo random number generation implementation with Crypto API" + default y + select CRYPTO_RNG + help + Selecting this will register the SEC hardware prng to + the Crypto API. + config CRYPTO_DEV_FSL_CAAM_BLOB_GEN bool diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile index 25f7ae5a4642..acf1b197eb84 100644 --- a/drivers/crypto/caam/Makefile +++ b/drivers/crypto/caam/Makefile @@ -20,6 +20,7 @@ caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o +caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API) += caamprng.o caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_BLOB_GEN) += blob_gen.o diff --git a/drivers/crypto/caam/caamprng.c b/drivers/crypto/caam/caamprng.c new file mode 100644 index 000000000000..4839e66300a2 --- /dev/null +++ b/drivers/crypto/caam/caamprng.c @@ -0,0 +1,235 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver to expose SEC4 PRNG via crypto RNG API + * + * Copyright 2022 NXP + * + */ + +#include <linux/completion.h> +#include <crypto/internal/rng.h> +#include "compat.h" +#include "regs.h" +#include "intern.h" +#include "desc_constr.h" +#include "jr.h" +#include "error.h" + +/* + * Length of used descriptors, see caam_init_desc() + */ +#define CAAM_PRNG_MAX_DESC_LEN (CAAM_CMD_SZ + \ + CAAM_CMD_SZ + \ + CAAM_CMD_SZ + CAAM_PTR_SZ_MAX) + +/* prng per-device context */ +struct caam_prng_ctx { + int err; + struct completion done; +}; + +struct caam_prng_alg { + struct rng_alg rng; + bool registered; +}; + +static void caam_prng_done(struct device *jrdev, u32 *desc, u32 err, + void *context) +{ + struct caam_prng_ctx *jctx = context; + + jctx->err = err ? caam_jr_strstatus(jrdev, err) : 0; + + complete(&jctx->done); +} + +static u32 *caam_init_reseed_desc(u32 *desc) +{ + init_job_desc(desc, 0); /* + 1 cmd_sz */ + /* Generate random bytes: + 1 cmd_sz */ + append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | + OP_ALG_AS_FINALIZE); + + print_hex_dump_debug("prng reseed desc@: ", DUMP_PREFIX_ADDRESS, + 16, 4, desc, desc_bytes(desc), 1); + + return desc; +} + +static u32 *caam_init_prng_desc(u32 *desc, dma_addr_t dst_dma, u32 len) +{ + init_job_desc(desc, 0); /* + 1 cmd_sz */ + /* Generate random bytes: + 1 cmd_sz */ + append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); + /* Store bytes: + 1 cmd_sz + caam_ptr_sz */ + append_fifo_store(desc, dst_dma, + len, FIFOST_TYPE_RNGSTORE); + + print_hex_dump_debug("prng job desc@: ", DUMP_PREFIX_ADDRESS, + 16, 4, desc, desc_bytes(desc), 1); + + return desc; +} + +static int caam_prng_generate(struct crypto_rng *tfm, + const u8 *src, unsigned int slen, + u8 *dst, unsigned int dlen) +{ + struct caam_prng_ctx ctx; + struct device *jrdev; + dma_addr_t dst_dma; + u32 *desc; + u8 *buf; + int ret; + + buf = kzalloc(dlen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + jrdev = caam_jr_alloc(); + ret = PTR_ERR_OR_ZERO(jrdev); + if (ret) { + pr_err("Job Ring Device allocation failed\n"); + kfree(buf); + return ret; + } + + desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA); + if (!desc) { + ret = -ENOMEM; + goto out1; + } + + dst_dma = dma_map_single(jrdev, buf, dlen, DMA_FROM_DEVICE); + if (dma_mapping_error(jrdev, dst_dma)) { + dev_err(jrdev, "Failed to map destination buffer memory\n"); + ret = -ENOMEM; + goto out; + } + + init_completion(&ctx.done); + ret = caam_jr_enqueue(jrdev, + caam_init_prng_desc(desc, dst_dma, dlen), + caam_prng_done, &ctx); + + if (ret == -EINPROGRESS) { + wait_for_completion(&ctx.done); + ret = ctx.err; + } + + dma_unmap_single(jrdev, dst_dma, dlen, DMA_FROM_DEVICE); + + if (!ret) + memcpy(dst, buf, dlen); +out: + kfree(desc); +out1: + caam_jr_free(jrdev); + kfree(buf); + return ret; +} + +static void caam_prng_exit(struct crypto_tfm *tfm) {} + +static int caam_prng_init(struct crypto_tfm *tfm) +{ + return 0; +} + +static int caam_prng_seed(struct crypto_rng *tfm, + const u8 *seed, unsigned int slen) +{ + struct caam_prng_ctx ctx; + struct device *jrdev; + u32 *desc; + int ret; + + if (slen) { + pr_err("Seed length should be zero\n"); + return -EINVAL; + } + + jrdev = caam_jr_alloc(); + ret = PTR_ERR_OR_ZERO(jrdev); + if (ret) { + pr_err("Job Ring Device allocation failed\n"); + return ret; + } + + desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA); + if (!desc) { + caam_jr_free(jrdev); + return -ENOMEM; + } + + init_completion(&ctx.done); + ret = caam_jr_enqueue(jrdev, + caam_init_reseed_desc(desc), + caam_prng_done, &ctx); + + if (ret == -EINPROGRESS) { + wait_for_completion(&ctx.done); + ret = ctx.err; + } + + kfree(desc); + caam_jr_free(jrdev); + return ret; +} + +static struct caam_prng_alg caam_prng_alg = { + .rng = { + .generate = caam_prng_generate, + .seed = caam_prng_seed, + .seedsize = 0, + .base = { + .cra_name = "stdrng", + .cra_driver_name = "prng-caam", + .cra_priority = 500, + .cra_ctxsize = sizeof(struct caam_prng_ctx), + .cra_module = THIS_MODULE, + .cra_init = caam_prng_init, + .cra_exit = caam_prng_exit, + }, + } +}; + +void caam_prng_unregister(void *data) +{ + if (caam_prng_alg.registered) + crypto_unregister_rng(&caam_prng_alg.rng); +} + +int caam_prng_register(struct device *ctrldev) +{ + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); + u32 rng_inst; + int ret = 0; + + /* Check for available RNG blocks before registration */ + if (priv->era < 10) + rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) & + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; + else + rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK; + + if (!rng_inst) { + dev_dbg(ctrldev, "RNG block is not available... skipping registering algorithm\n"); + return ret; + } + + ret = crypto_register_rng(&caam_prng_alg.rng); + if (ret) { + dev_err(ctrldev, + "couldn't register rng crypto alg: %d\n", + ret); + return ret; + } + + caam_prng_alg.registered = true; + + dev_info(ctrldev, + "rng crypto API alg registered %s\n", caam_prng_alg.rng.base.cra_driver_name); + + return 0; +} diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 38c4d88a9d03..32253a064d0f 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -609,6 +609,13 @@ static bool check_version(struct fsl_mc_version *mc_version, u32 major, } #endif +static bool needs_entropy_delay_adjustment(void) +{ + if (of_machine_is_compatible("fsl,imx6sx")) + return true; + return false; +} + /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) { @@ -868,6 +875,8 @@ static int caam_probe(struct platform_device *pdev) * Also, if a handle was instantiated, do not change * the TRNG parameters. */ + if (needs_entropy_delay_adjustment()) + ent_delay = 12000; if (!(ctrlpriv->rng4_sh_init || inst_handles)) { dev_info(dev, "Entropy delay = %u\n", @@ -884,6 +893,15 @@ static int caam_probe(struct platform_device *pdev) */ ret = instantiate_rng(dev, inst_handles, gen_sk); + /* + * Entropy delay is determined via TRNG characterization. + * TRNG characterization is run across different voltages + * and temperatures. + * If worst case value for ent_dly is identified, + * the loop can be skipped for that platform. + */ + if (needs_entropy_delay_adjustment()) + break; if (ret == -EAGAIN) /* * if here, the loop will rerun, diff --git a/drivers/crypto/caam/intern.h b/drivers/crypto/caam/intern.h index e92210e2ab76..572cf66c887a 100644 --- a/drivers/crypto/caam/intern.h +++ b/drivers/crypto/caam/intern.h @@ -186,6 +186,21 @@ static inline void caam_rng_exit(struct device *dev) {} #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */ +#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API + +int caam_prng_register(struct device *dev); +void caam_prng_unregister(void *data); + +#else + +static inline int caam_prng_register(struct device *dev) +{ + return 0; +} + +static inline void caam_prng_unregister(void *data) {} +#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API */ + #ifdef CONFIG_CAAM_QI int caam_qi_algapi_init(struct device *dev); diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index 7f2b1101f567..724fdec18bf9 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -39,6 +39,7 @@ static void register_algs(struct caam_drv_private_jr *jrpriv, caam_algapi_hash_init(dev); caam_pkc_init(dev); jrpriv->hwrng = !caam_rng_init(dev); + caam_prng_register(dev); caam_qi_algapi_init(dev); algs_unlock: @@ -53,7 +54,7 @@ static void unregister_algs(void) goto algs_unlock; caam_qi_algapi_exit(); - + caam_prng_unregister(NULL); caam_pkc_exit(); caam_algapi_hash_exit(); caam_algapi_exit(); diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c index 6c61817996a3..432a61aca0c5 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_main.c +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c @@ -269,15 +269,17 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev) struct nitrox_device *nitrox_get_first_device(void) { - struct nitrox_device *ndev; + struct nitrox_device *ndev = NULL, *iter; mutex_lock(&devlist_lock); - list_for_each_entry(ndev, &ndevlist, list) { - if (nitrox_ready(ndev)) + list_for_each_entry(iter, &ndevlist, list) { + if (nitrox_ready(iter)) { + ndev = iter; break; + } } mutex_unlock(&devlist_lock); - if (&ndev->list == &ndevlist) + if (!ndev) return NULL; refcount_inc(&ndev->refcnt); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index ae7b44599914..c9c741ac8442 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -70,17 +70,23 @@ static unsigned int psp_get_capability(struct psp_device *psp) */ if (val == 0xffffffff) { dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n"); - return 0; + return -ENODEV; } + psp->capability = val; + + /* Detect if TSME and SME are both enabled */ + if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING && + psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) && + cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) + dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n"); - return val; + return 0; } -static int psp_check_sev_support(struct psp_device *psp, - unsigned int capability) +static int psp_check_sev_support(struct psp_device *psp) { /* Check if device supports SEV feature */ - if (!(capability & 1)) { + if (!(psp->capability & PSP_CAPABILITY_SEV)) { dev_dbg(psp->dev, "psp does not support SEV\n"); return -ENODEV; } @@ -88,11 +94,10 @@ static int psp_check_sev_support(struct psp_device *psp, return 0; } -static int psp_check_tee_support(struct psp_device *psp, - unsigned int capability) +static int psp_check_tee_support(struct psp_device *psp) { /* Check if device supports TEE feature */ - if (!(capability & 2)) { + if (!(psp->capability & PSP_CAPABILITY_TEE)) { dev_dbg(psp->dev, "psp does not support TEE\n"); return -ENODEV; } @@ -100,30 +105,17 @@ static int psp_check_tee_support(struct psp_device *psp, return 0; } -static int psp_check_support(struct psp_device *psp, - unsigned int capability) -{ - int sev_support = psp_check_sev_support(psp, capability); - int tee_support = psp_check_tee_support(psp, capability); - - /* Return error if device neither supports SEV nor TEE */ - if (sev_support && tee_support) - return -ENODEV; - - return 0; -} - -static int psp_init(struct psp_device *psp, unsigned int capability) +static int psp_init(struct psp_device *psp) { int ret; - if (!psp_check_sev_support(psp, capability)) { + if (!psp_check_sev_support(psp)) { ret = sev_dev_init(psp); if (ret) return ret; } - if (!psp_check_tee_support(psp, capability)) { + if (!psp_check_tee_support(psp)) { ret = tee_dev_init(psp); if (ret) return ret; @@ -136,7 +128,6 @@ int psp_dev_init(struct sp_device *sp) { struct device *dev = sp->dev; struct psp_device *psp; - unsigned int capability; int ret; ret = -ENOMEM; @@ -155,11 +146,7 @@ int psp_dev_init(struct sp_device *sp) psp->io_regs = sp->io_map; - capability = psp_get_capability(psp); - if (!capability) - goto e_disable; - - ret = psp_check_support(psp, capability); + ret = psp_get_capability(psp); if (ret) goto e_disable; @@ -174,7 +161,7 @@ int psp_dev_init(struct sp_device *sp) goto e_err; } - ret = psp_init(psp, capability); + ret = psp_init(psp); if (ret) goto e_irq; diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index ef38e4135d81..d528eb04c3ef 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -45,6 +45,8 @@ struct psp_device { void *sev_data; void *tee_data; + + unsigned int capability; }; void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, @@ -57,4 +59,24 @@ void psp_clear_tee_irq_handler(struct psp_device *psp); struct psp_device *psp_get_master_device(void); +#define PSP_CAPABILITY_SEV BIT(0) +#define PSP_CAPABILITY_TEE BIT(1) +#define PSP_CAPABILITY_PSP_SECURITY_REPORTING BIT(7) + +#define PSP_CAPABILITY_PSP_SECURITY_OFFSET 8 +/* + * The PSP doesn't directly store these bits in the capability register + * but instead copies them from the results of query command. + * + * The offsets from the query command are below, and shifted when used. + */ +#define PSP_SECURITY_FUSED_PART BIT(0) +#define PSP_SECURITY_DEBUG_LOCK_ON BIT(2) +#define PSP_SECURITY_TSME_STATUS BIT(5) +#define PSP_SECURITY_ANTI_ROLLBACK_STATUS BIT(7) +#define PSP_SECURITY_RPMC_PRODUCTION_ENABLED BIT(8) +#define PSP_SECURITY_RPMC_SPIROM_AVAILABLE BIT(9) +#define PSP_SECURITY_HSP_TPM_AVAILABLE BIT(10) +#define PSP_SECURITY_ROM_ARMOR_ENFORCED BIT(11) + #endif /* __PSP_DEV_H */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 6ab93dfd478a..799b476fc3e8 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -23,6 +23,7 @@ #include <linux/gfp.h> #include <linux/cpufeature.h> #include <linux/fs.h> +#include <linux/fs_struct.h> #include <asm/smp.h> @@ -170,6 +171,31 @@ static void *sev_fw_alloc(unsigned long len) return page_address(page); } +static struct file *open_file_as_root(const char *filename, int flags, umode_t mode) +{ + struct file *fp; + struct path root; + struct cred *cred; + const struct cred *old_cred; + + task_lock(&init_task); + get_fs_root(init_task.fs, &root); + task_unlock(&init_task); + + cred = prepare_creds(); + if (!cred) + return ERR_PTR(-ENOMEM); + cred->fsuid = GLOBAL_ROOT_UID; + old_cred = override_creds(cred); + + fp = file_open_root(&root, filename, flags, mode); + path_put(&root); + + revert_creds(old_cred); + + return fp; +} + static int sev_read_init_ex_file(void) { struct sev_device *sev = psp_master->sev_data; @@ -181,7 +207,7 @@ static int sev_read_init_ex_file(void) if (!sev_init_ex_buffer) return -EOPNOTSUPP; - fp = filp_open(init_ex_path, O_RDONLY, 0); + fp = open_file_as_root(init_ex_path, O_RDONLY, 0); if (IS_ERR(fp)) { int ret = PTR_ERR(fp); @@ -217,7 +243,7 @@ static void sev_write_init_ex_file(void) if (!sev_init_ex_buffer) return; - fp = filp_open(init_ex_path, O_CREAT | O_WRONLY, 0600); + fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600); if (IS_ERR(fp)) { dev_err(sev->dev, "SEV: could not open file for write, error %ld\n", @@ -435,7 +461,7 @@ static int __sev_platform_init_locked(int *error) * initialization function should succeed by replacing the state * with a reset state. */ - dev_dbg(sev->dev, "SEV: retrying INIT command"); + dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state."); rc = init_function(&psp_ret); } if (error) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 88c672ad27e4..b5970ae54d0e 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -32,6 +32,67 @@ struct sp_pci { }; static struct sp_device *sp_dev_master; +#define attribute_show(name, def) \ +static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct sp_device *sp = dev_get_drvdata(d); \ + struct psp_device *psp = sp->psp_data; \ + int bit = PSP_SECURITY_##def << PSP_CAPABILITY_PSP_SECURITY_OFFSET; \ + return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \ +} + +attribute_show(fused_part, FUSED_PART) +static DEVICE_ATTR_RO(fused_part); +attribute_show(debug_lock_on, DEBUG_LOCK_ON) +static DEVICE_ATTR_RO(debug_lock_on); +attribute_show(tsme_status, TSME_STATUS) +static DEVICE_ATTR_RO(tsme_status); +attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS) +static DEVICE_ATTR_RO(anti_rollback_status); +attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED) +static DEVICE_ATTR_RO(rpmc_production_enabled); +attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE) +static DEVICE_ATTR_RO(rpmc_spirom_available); +attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE) +static DEVICE_ATTR_RO(hsp_tpm_available); +attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED) +static DEVICE_ATTR_RO(rom_armor_enforced); + +static struct attribute *psp_attrs[] = { + &dev_attr_fused_part.attr, + &dev_attr_debug_lock_on.attr, + &dev_attr_tsme_status.attr, + &dev_attr_anti_rollback_status.attr, + &dev_attr_rpmc_production_enabled.attr, + &dev_attr_rpmc_spirom_available.attr, + &dev_attr_hsp_tpm_available.attr, + &dev_attr_rom_armor_enforced.attr, + NULL +}; + +static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct sp_device *sp = dev_get_drvdata(dev); + struct psp_device *psp = sp->psp_data; + + if (psp && (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING)) + return 0444; + + return 0; +} + +static struct attribute_group psp_attr_group = { + .attrs = psp_attrs, + .is_visible = psp_security_is_visible, +}; + +static const struct attribute_group *psp_groups[] = { + &psp_attr_group, + NULL, +}; + static int sp_get_msix_irqs(struct sp_device *sp) { struct sp_pci *sp_pci = sp->dev_specific; @@ -391,6 +452,7 @@ static struct pci_driver sp_pci_driver = { .remove = sp_pci_remove, .shutdown = sp_pci_shutdown, .driver.pm = &sp_pci_pm_ops, + .dev_groups = psp_groups, }; int sp_pci_init(void) diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c index 11e0278c8631..6140e4927322 100644 --- a/drivers/crypto/ccree/cc_buffer_mgr.c +++ b/drivers/crypto/ccree/cc_buffer_mgr.c @@ -356,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx, req_ctx->mlli_params.mlli_dma_addr); } - dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); - dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); - if (src != dst) { - dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); + dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); + } else { + dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); + dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); } } @@ -377,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, u32 dummy = 0; int rc = 0; u32 mapped_nents = 0; + int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; mlli_params->curr_pool = NULL; @@ -399,7 +402,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, } /* Map the src SGL */ - rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, + rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto cipher_exit; @@ -416,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, } } else { /* Map the dst sg */ - rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE, &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) @@ -456,6 +459,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) struct aead_req_ctx *areq_ctx = aead_request_ctx(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct cc_drvdata *drvdata = dev_get_drvdata(dev); + int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); if (areq_ctx->mac_buf_dma_addr) { dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, @@ -514,13 +518,11 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, areq_ctx->assoclen, req->cryptlen); - dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); if (req->src != req->dst) { dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", sg_virt(req->dst)); - dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, - DMA_BIDIRECTIONAL); + dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); } if (drvdata->coherent && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && @@ -843,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, else size_for_map -= authsize; - rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, &areq_ctx->dst.mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); @@ -1056,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) size_to_map += authsize; } - rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, + rc = cc_map_sg(dev, req->src, size_to_map, + (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), &areq_ctx->src.mapped_nents, (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 790fa9058a36..7d1bee86d581 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c @@ -529,24 +529,26 @@ static int init_cc_resources(struct platform_device *plat_dev) goto post_req_mgr_err; } - /* Allocate crypto algs */ - rc = cc_cipher_alloc(new_drvdata); + /* hash must be allocated first due to use of send_request_init() + * and dependency of AEAD on it + */ + rc = cc_hash_alloc(new_drvdata); if (rc) { - dev_err(dev, "cc_cipher_alloc failed\n"); + dev_err(dev, "cc_hash_alloc failed\n"); goto post_buf_mgr_err; } - /* hash must be allocated before aead since hash exports APIs */ - rc = cc_hash_alloc(new_drvdata); + /* Allocate crypto algs */ + rc = cc_cipher_alloc(new_drvdata); if (rc) { - dev_err(dev, "cc_hash_alloc failed\n"); - goto post_cipher_err; + dev_err(dev, "cc_cipher_alloc failed\n"); + goto post_hash_err; } rc = cc_aead_alloc(new_drvdata); if (rc) { dev_err(dev, "cc_aead_alloc failed\n"); - goto post_hash_err; + goto post_cipher_err; } /* If we got here and FIPS mode is enabled @@ -558,10 +560,10 @@ static int init_cc_resources(struct platform_device *plat_dev) pm_runtime_put(dev); return 0; -post_hash_err: - cc_hash_free(new_drvdata); post_cipher_err: cc_cipher_free(new_drvdata); +post_hash_err: + cc_hash_free(new_drvdata); post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: @@ -593,8 +595,8 @@ static void cleanup_cc_resources(struct platform_device *plat_dev) (struct cc_drvdata *)platform_get_drvdata(plat_dev); cc_aead_free(drvdata); - cc_hash_free(drvdata); cc_cipher_free(drvdata); + cc_hash_free(drvdata); cc_buffer_mgr_fini(drvdata); cc_req_mgr_fini(drvdata); cc_fips_fini(drvdata); diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index e572f9982d4e..27e1fa912063 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -26,6 +26,7 @@ config CRYPTO_DEV_HISI_SEC2 select CRYPTO_SHA1 select CRYPTO_SHA256 select CRYPTO_SHA512 + select CRYPTO_SM4 depends on PCI && PCI_MSI depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index 36ab30e9e654..9d529df0eab9 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -36,6 +36,12 @@ #define HPRE_DATA_WUSER_CFG 0x301040 #define HPRE_INT_MASK 0x301400 #define HPRE_INT_STATUS 0x301800 +#define HPRE_HAC_INT_MSK 0x301400 +#define HPRE_HAC_RAS_CE_ENB 0x301410 +#define HPRE_HAC_RAS_NFE_ENB 0x301414 +#define HPRE_HAC_RAS_FE_ENB 0x301418 +#define HPRE_HAC_INT_SET 0x301500 +#define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 #define HPRE_CORE_INT_DISABLE GENMASK(21, 0) #define HPRE_RDCHN_INI_ST 0x301a00 @@ -107,6 +113,15 @@ #define HPRE_SQE_MASK_OFFSET 8 #define HPRE_SQE_MASK_LEN 24 +#define HPRE_DFX_BASE 0x301000 +#define HPRE_DFX_COMMON1 0x301400 +#define HPRE_DFX_COMMON2 0x301A00 +#define HPRE_DFX_CORE 0x302000 +#define HPRE_DFX_BASE_LEN 0x55 +#define HPRE_DFX_COMMON1_LEN 0x41 +#define HPRE_DFX_COMMON2_LEN 0xE +#define HPRE_DFX_CORE_LEN 0x43 + static const char hpre_name[] = "hisi_hpre"; static struct dentry *hpre_debugfs_root; static const struct pci_device_id hpre_dev_ids[] = { @@ -192,28 +207,32 @@ static const u64 hpre_cluster_offsets[] = { }; static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = { - {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET}, - {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET}, - {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET}, - {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET}, - {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET}, + {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET}, + {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET}, + {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET}, + {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET}, + {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET}, }; static const struct debugfs_reg32 hpre_com_dfx_regs[] = { - {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE}, - {"AXQOS ", HPRE_VFG_AXQOS}, - {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG}, - {"QM_ARUSR_MCFG1 ", QM_ARUSER_M_CFG_1}, - {"QM_AWUSR_MCFG1 ", QM_AWUSER_M_CFG_1}, - {"BD_ENDIAN ", HPRE_BD_ENDIAN}, - {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS}, - {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG}, - {"POISON_BYPASS ", HPRE_POISON_BYPASS}, - {"BD_ARUSER ", HPRE_BD_ARUSR_CFG}, - {"BD_AWUSER ", HPRE_BD_AWUSR_CFG}, - {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG}, - {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG}, - {"INT_STATUS ", HPRE_INT_STATUS}, + {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE}, + {"AXQOS ", HPRE_VFG_AXQOS}, + {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG}, + {"BD_ENDIAN ", HPRE_BD_ENDIAN}, + {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS}, + {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG}, + {"POISON_BYPASS ", HPRE_POISON_BYPASS}, + {"BD_ARUSER ", HPRE_BD_ARUSR_CFG}, + {"BD_AWUSER ", HPRE_BD_AWUSR_CFG}, + {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG}, + {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG}, + {"INT_STATUS ", HPRE_INT_STATUS}, + {"INT_MASK ", HPRE_HAC_INT_MSK}, + {"RAS_CE_ENB ", HPRE_HAC_RAS_CE_ENB}, + {"RAS_NFE_ENB ", HPRE_HAC_RAS_NFE_ENB}, + {"RAS_FE_ENB ", HPRE_HAC_RAS_FE_ENB}, + {"INT_SET ", HPRE_HAC_INT_SET}, + {"RNG_TIMEOUT_NUM ", HPRE_RNG_TIMEOUT_NUM}, }; static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { @@ -226,6 +245,53 @@ static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { "invalid_req_cnt" }; +/* define the HPRE's dfx regs region and region length */ +static struct dfx_diff_registers hpre_diff_regs[] = { + { + .reg_offset = HPRE_DFX_BASE, + .reg_len = HPRE_DFX_BASE_LEN, + }, { + .reg_offset = HPRE_DFX_COMMON1, + .reg_len = HPRE_DFX_COMMON1_LEN, + }, { + .reg_offset = HPRE_DFX_COMMON2, + .reg_len = HPRE_DFX_COMMON2_LEN, + }, { + .reg_offset = HPRE_DFX_CORE, + .reg_len = HPRE_DFX_CORE_LEN, + }, +}; + +static int hpre_diff_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + + hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, + ARRAY_SIZE(hpre_diff_regs)); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs); + +static int hpre_com_regs_show(struct seq_file *s, void *unused) +{ + hisi_qm_regs_dump(s, s->private); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(hpre_com_regs); + +static int hpre_cluster_regs_show(struct seq_file *s, void *unused) +{ + hisi_qm_regs_dump(s, s->private); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs); + static const struct kernel_param_ops hpre_uacce_mode_ops = { .set = uacce_mode_set, .get = param_get_int, @@ -779,24 +845,6 @@ static int hpre_debugfs_atomic64_set(void *data, u64 val) DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, hpre_debugfs_atomic64_set, "%llu\n"); -static int hpre_com_regs_show(struct seq_file *s, void *unused) -{ - hisi_qm_regs_dump(s, s->private); - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(hpre_com_regs); - -static int hpre_cluster_regs_show(struct seq_file *s, void *unused) -{ - hisi_qm_regs_dump(s, s->private); - - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs); - static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, enum hpre_ctrl_dbgfs_file type, int indx) { @@ -895,6 +943,7 @@ static int hpre_ctrl_debug_init(struct hisi_qm *qm) static void hpre_dfx_debug_init(struct hisi_qm *qm) { + struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs; struct hpre *hpre = container_of(qm, struct hpre, qm); struct hpre_dfx *dfx = hpre->debug.dfx; struct dentry *parent; @@ -906,6 +955,10 @@ static void hpre_dfx_debug_init(struct hisi_qm *qm) debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i], &hpre_atomic64_ops); } + + if (qm->fun_type == QM_HW_PF && hpre_regs) + debugfs_create_file("diff_regs", 0444, parent, + qm, &hpre_diff_regs_fops); } static int hpre_debugfs_init(struct hisi_qm *qm) @@ -918,6 +971,13 @@ static int hpre_debugfs_init(struct hisi_qm *qm) qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; + ret = hisi_qm_diff_regs_init(qm, hpre_diff_regs, + ARRAY_SIZE(hpre_diff_regs)); + if (ret) { + dev_warn(dev, "Failed to init HPRE diff regs!\n"); + goto debugfs_remove; + } + hisi_qm_debug_init(qm); if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) { @@ -931,12 +991,16 @@ static int hpre_debugfs_init(struct hisi_qm *qm) return 0; failed_to_create: + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); +debugfs_remove: debugfs_remove_recursive(qm->debug.debug_root); return ret; } static void hpre_debugfs_exit(struct hisi_qm *qm) { + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); + debugfs_remove_recursive(qm->debug.debug_root); } @@ -969,6 +1033,82 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) return hisi_qm_init(qm); } +static int hpre_show_last_regs_init(struct hisi_qm *qm) +{ + int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); + int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); + u8 clusters_num = hpre_cluster_num(qm); + struct qm_debug *debug = &qm->debug; + void __iomem *io_base; + int i, j, idx; + + debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num + + com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); + if (!debug->last_words) + return -ENOMEM; + + for (i = 0; i < com_dfx_regs_num; i++) + debug->last_words[i] = readl_relaxed(qm->io_base + + hpre_com_dfx_regs[i].offset); + + for (i = 0; i < clusters_num; i++) { + io_base = qm->io_base + hpre_cluster_offsets[i]; + for (j = 0; j < cluster_dfx_regs_num; j++) { + idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j; + debug->last_words[idx] = readl_relaxed( + io_base + hpre_cluster_dfx_regs[j].offset); + } + } + + return 0; +} + +static void hpre_show_last_regs_uninit(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + kfree(debug->last_words); + debug->last_words = NULL; +} + +static void hpre_show_last_dfx_regs(struct hisi_qm *qm) +{ + int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); + int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); + u8 clusters_num = hpre_cluster_num(qm); + struct qm_debug *debug = &qm->debug; + struct pci_dev *pdev = qm->pdev; + void __iomem *io_base; + int i, j, idx; + u32 val; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + /* dumps last word of the debugging registers during controller reset */ + for (i = 0; i < com_dfx_regs_num; i++) { + val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset); + if (debug->last_words[i] != val) + pci_info(pdev, "Common_core:%s \t= 0x%08x => 0x%08x\n", + hpre_com_dfx_regs[i].name, debug->last_words[i], val); + } + + for (i = 0; i < clusters_num; i++) { + io_base = qm->io_base + hpre_cluster_offsets[i]; + for (j = 0; j < cluster_dfx_regs_num; j++) { + val = readl_relaxed(io_base + + hpre_cluster_dfx_regs[j].offset); + idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j; + if (debug->last_words[idx] != val) + pci_info(pdev, "cluster-%d:%s \t= 0x%08x => 0x%08x\n", + i, hpre_cluster_dfx_regs[j].name, debug->last_words[idx], val); + } + } +} + static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) { const struct hpre_hw_error *err = hpre_hw_errors; @@ -1027,6 +1167,7 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .open_axi_master_ooo = hpre_open_axi_master_ooo, .open_sva_prefetch = hpre_open_sva_prefetch, .close_sva_prefetch = hpre_close_sva_prefetch, + .show_last_dfx_regs = hpre_show_last_dfx_regs, .err_info_init = hpre_err_info_init, }; @@ -1044,8 +1185,11 @@ static int hpre_pf_probe_init(struct hpre *hpre) qm->err_ini = &hpre_err_ini; qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); + ret = hpre_show_last_regs_init(qm); + if (ret) + pci_err(qm->pdev, "Failed to init last word regs!\n"); - return 0; + return ret; } static int hpre_probe_init(struct hpre *hpre) @@ -1131,6 +1275,7 @@ err_with_qm_start: hisi_qm_stop(qm, QM_NORMAL); err_with_err_init: + hpre_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); err_with_qm_init: @@ -1161,6 +1306,7 @@ static void hpre_remove(struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF) { hpre_cnt_regs_clear(qm); qm->debug.curr_qm_qp_num = 0; + hpre_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); } diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 009132333d2b..b4ca2eb034d7 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -253,7 +253,15 @@ #define QM_QOS_MAX_CIR_U 6 #define QM_QOS_MAX_CIR_S 11 #define QM_QOS_VAL_MAX_LEN 32 - +#define QM_DFX_BASE 0x0100000 +#define QM_DFX_STATE1 0x0104000 +#define QM_DFX_STATE2 0x01040C8 +#define QM_DFX_COMMON 0x0000 +#define QM_DFX_BASE_LEN 0x5A +#define QM_DFX_STATE1_LEN 0x2E +#define QM_DFX_STATE2_LEN 0x11 +#define QM_DFX_COMMON_LEN 0xC3 +#define QM_DFX_REGS_LEN 4UL #define QM_AUTOSUSPEND_DELAY 3000 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ @@ -467,6 +475,23 @@ static const struct hisi_qm_hw_error qm_hw_error[] = { { /* sentinel */ } }; +/* define the QM's dfx regs region and region length */ +static struct dfx_diff_registers qm_diff_regs[] = { + { + .reg_offset = QM_DFX_BASE, + .reg_len = QM_DFX_BASE_LEN, + }, { + .reg_offset = QM_DFX_STATE1, + .reg_len = QM_DFX_STATE1_LEN, + }, { + .reg_offset = QM_DFX_STATE2, + .reg_len = QM_DFX_STATE2_LEN, + }, { + .reg_offset = QM_DFX_COMMON, + .reg_len = QM_DFX_COMMON_LEN, + }, +}; + static const char * const qm_db_timeout[] = { "sq", "cq", "eq", "aeq", }; @@ -687,13 +712,13 @@ static void qm_mb_write(struct hisi_qm *qm, const void *src) if (!IS_ENABLED(CONFIG_ARM64)) { memcpy_toio(fun_base, src, 16); - wmb(); + dma_wmb(); return; } asm volatile("ldp %0, %1, %3\n" "stp %0, %1, %2\n" - "dsb sy\n" + "dmb oshst\n" : "=&r" (tmp0), "=&r" (tmp1), "+Q" (*((char __iomem *)fun_base)) @@ -982,7 +1007,7 @@ static void qm_set_qp_disable(struct hisi_qp *qp, int offset) *addr = 1; /* make sure setup is completed */ - mb(); + smp_wmb(); } static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) @@ -1625,6 +1650,156 @@ static int qm_regs_show(struct seq_file *s, void *unused) DEFINE_SHOW_ATTRIBUTE(qm_regs); +static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm, + const struct dfx_diff_registers *cregs, int reg_len) +{ + struct dfx_diff_registers *diff_regs; + u32 j, base_offset; + int i; + + diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL); + if (!diff_regs) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < reg_len; i++) { + if (!cregs[i].reg_len) + continue; + + diff_regs[i].reg_offset = cregs[i].reg_offset; + diff_regs[i].reg_len = cregs[i].reg_len; + diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len, + GFP_KERNEL); + if (!diff_regs[i].regs) + goto alloc_error; + + for (j = 0; j < diff_regs[i].reg_len; j++) { + base_offset = diff_regs[i].reg_offset + + j * QM_DFX_REGS_LEN; + diff_regs[i].regs[j] = readl(qm->io_base + base_offset); + } + } + + return diff_regs; + +alloc_error: + while (i > 0) { + i--; + kfree(diff_regs[i].regs); + } + kfree(diff_regs); + return ERR_PTR(-ENOMEM); +} + +static void dfx_regs_uninit(struct hisi_qm *qm, + struct dfx_diff_registers *dregs, int reg_len) +{ + int i; + + /* Setting the pointer is NULL to prevent double free */ + for (i = 0; i < reg_len; i++) { + kfree(dregs[i].regs); + dregs[i].regs = NULL; + } + kfree(dregs); + dregs = NULL; +} + +/** + * hisi_qm_diff_regs_init() - Allocate memory for registers. + * @qm: device qm handle. + * @dregs: diff registers handle. + * @reg_len: diff registers region length. + */ +int hisi_qm_diff_regs_init(struct hisi_qm *qm, + struct dfx_diff_registers *dregs, int reg_len) +{ + if (!qm || !dregs || reg_len <= 0) + return -EINVAL; + + if (qm->fun_type != QM_HW_PF) + return 0; + + qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, + ARRAY_SIZE(qm_diff_regs)); + if (IS_ERR(qm->debug.qm_diff_regs)) + return PTR_ERR(qm->debug.qm_diff_regs); + + qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len); + if (IS_ERR(qm->debug.acc_diff_regs)) { + dfx_regs_uninit(qm, qm->debug.qm_diff_regs, + ARRAY_SIZE(qm_diff_regs)); + return PTR_ERR(qm->debug.acc_diff_regs); + } + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init); + +/** + * hisi_qm_diff_regs_uninit() - Free memory for registers. + * @qm: device qm handle. + * @reg_len: diff registers region length. + */ +void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len) +{ + if (!qm || reg_len <= 0 || qm->fun_type != QM_HW_PF) + return; + + dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len); + dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); +} +EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit); + +/** + * hisi_qm_acc_diff_regs_dump() - Dump registers's value. + * @qm: device qm handle. + * @s: Debugfs file handle. + * @dregs: diff registers handle. + * @regs_len: diff registers region length. + */ +void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, + struct dfx_diff_registers *dregs, int regs_len) +{ + u32 j, val, base_offset; + int i, ret; + + if (!qm || !s || !dregs || regs_len <= 0) + return; + + ret = hisi_qm_get_dfx_access(qm); + if (ret) + return; + + down_read(&qm->qps_lock); + for (i = 0; i < regs_len; i++) { + if (!dregs[i].reg_len) + continue; + + for (j = 0; j < dregs[i].reg_len; j++) { + base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN; + val = readl(qm->io_base + base_offset); + if (val != dregs[i].regs[j]) + seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n", + base_offset, dregs[i].regs[j], val); + } + } + up_read(&qm->qps_lock); + + hisi_qm_put_dfx_access(qm); +} +EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump); + +static int qm_diff_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + + hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs, + ARRAY_SIZE(qm_diff_regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(qm_diff_regs); + static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *pos) { @@ -2660,7 +2835,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating * qp memory fails. */ -struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) +static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) { struct hisi_qp *qp; int ret; @@ -2678,7 +2853,6 @@ struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) return qp; } -EXPORT_SYMBOL_GPL(hisi_qm_create_qp); /** * hisi_qm_release_qp() - Release a qp back to its qm. @@ -2686,7 +2860,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_create_qp); * * This function releases the resource of a qp. */ -void hisi_qm_release_qp(struct hisi_qp *qp) +static void hisi_qm_release_qp(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; @@ -2704,7 +2878,6 @@ void hisi_qm_release_qp(struct hisi_qp *qp) qm_pm_put_sync(qm); } -EXPORT_SYMBOL_GPL(hisi_qm_release_qp); static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { @@ -3053,9 +3226,17 @@ static void qm_qp_event_notifier(struct hisi_qp *qp) wake_up_interruptible(&qp->uacce_q->wait); } + /* This function returns free number of qp in qm. */ static int hisi_qm_get_available_instances(struct uacce_device *uacce) { - return hisi_qm_get_free_qp_num(uacce->priv); + struct hisi_qm *qm = uacce->priv; + int ret; + + down_read(&qm->qps_lock); + ret = qm->qp_num - qm->qp_in_used; + up_read(&qm->qps_lock); + + return ret; } static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) @@ -3367,24 +3548,6 @@ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) } EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); -/** - * hisi_qm_get_free_qp_num() - Get free number of qp in qm. - * @qm: The qm which want to get free qp. - * - * This function return free number of qp in qm. - */ -int hisi_qm_get_free_qp_num(struct hisi_qm *qm) -{ - int ret; - - down_read(&qm->qps_lock); - ret = qm->qp_num - qm->qp_in_used; - up_read(&qm->qps_lock); - - return ret; -} -EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num); - static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) { struct device *dev = &qm->pdev->dev; @@ -3498,6 +3661,17 @@ static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) writel(state, qm->io_base + QM_VF_STATE); } +static void qm_last_regs_uninit(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + + if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) + return; + + kfree(debug->qm_last_words); + debug->qm_last_words = NULL; +} + /** * hisi_qm_uninit() - Uninitialize qm. * @qm: The qm needed uninit. @@ -3509,6 +3683,8 @@ void hisi_qm_uninit(struct hisi_qm *qm) struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; + qm_last_regs_uninit(qm); + qm_cmd_uninit(qm); kfree(qm->factor); down_write(&qm->qps_lock); @@ -3550,7 +3726,7 @@ EXPORT_SYMBOL_GPL(hisi_qm_uninit); * * qm hw v1 does not support this interface. */ -int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) +static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) { if (!base || !number) return -EINVAL; @@ -3562,7 +3738,6 @@ int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) return qm->ops->get_vft(qm, base, number); } -EXPORT_SYMBOL_GPL(hisi_qm_get_vft); /** * hisi_qm_set_vft() - Set vft to a qm. @@ -4484,6 +4659,7 @@ static void hisi_qm_set_algqos_init(struct hisi_qm *qm) */ void hisi_qm_debug_init(struct hisi_qm *qm) { + struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs; struct qm_dfx *dfx = &qm->debug.dfx; struct dentry *qm_d; void *data; @@ -4499,6 +4675,10 @@ void hisi_qm_debug_init(struct hisi_qm *qm) qm_create_debugfs_file(qm, qm->debug.qm_d, i); } + if (qm_regs) + debugfs_create_file("diff_regs", 0444, qm->debug.qm_d, + qm, &qm_diff_regs_fops); + debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops); @@ -5181,6 +5361,24 @@ static int qm_controller_reset_done(struct hisi_qm *qm) return 0; } +static void qm_show_last_dfx_regs(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + struct pci_dev *pdev = qm->pdev; + u32 val; + int i; + + if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) + return; + + for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) { + val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset); + if (debug->qm_last_words[i] != val) + pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", + qm_dfx_regs[i].name, debug->qm_last_words[i], val); + } +} + static int qm_controller_reset(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; @@ -5196,6 +5394,10 @@ static int qm_controller_reset(struct hisi_qm *qm) return ret; } + qm_show_last_dfx_regs(qm); + if (qm->err_ini->show_last_dfx_regs) + qm->err_ini->show_last_dfx_regs(qm); + ret = qm_soft_reset(qm); if (ret) { pci_err(pdev, "Controller reset failed (%d)\n", ret); @@ -5906,6 +6108,26 @@ err_alloc_qdma: return ret; } +static void qm_last_regs_init(struct hisi_qm *qm) +{ + int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs); + struct qm_debug *debug = &qm->debug; + int i; + + if (qm->fun_type == QM_HW_VF) + return; + + debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), + GFP_KERNEL); + if (!debug->qm_last_words) + return; + + for (i = 0; i < dfx_regs_num; i++) { + debug->qm_last_words[i] = readl_relaxed(qm->io_base + + qm_dfx_regs[i].offset); + } +} + /** * hisi_qm_init() - Initialize configures about qm. * @qm: The qm needing init. @@ -5958,6 +6180,8 @@ int hisi_qm_init(struct hisi_qm *qm) qm_cmd_init(qm); atomic_set(&qm->status.flags, QM_INIT); + qm_last_regs_init(qm); + return 0; err_alloc_uacce: diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index a91635c348b5..6eebe739893c 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -2113,7 +2113,6 @@ static int sec_skcipher_decrypt(struct skcipher_request *sk_req) .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ .cra_flags = CRYPTO_ALG_ASYNC |\ - CRYPTO_ALG_ALLOCATES_MEMORY |\ CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ @@ -2366,7 +2365,6 @@ static int sec_aead_decrypt(struct aead_request *a_req) .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ .cra_flags = CRYPTO_ALG_ASYNC |\ - CRYPTO_ALG_ALLOCATES_MEMORY |\ CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index 92fae706bdb2..4d85d2cbf376 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -110,6 +110,15 @@ #define SEC_SQE_MASK_LEN 48 #define SEC_SHAPER_TYPE_RATE 400 +#define SEC_DFX_BASE 0x301000 +#define SEC_DFX_CORE 0x302100 +#define SEC_DFX_COMMON1 0x301600 +#define SEC_DFX_COMMON2 0x301C00 +#define SEC_DFX_BASE_LEN 0x9D +#define SEC_DFX_CORE_LEN 0x32B +#define SEC_DFX_COMMON1_LEN 0x45 +#define SEC_DFX_COMMON2_LEN 0xBA + struct sec_hw_error { u32 int_msk; const char *msg; @@ -226,6 +235,34 @@ static const struct debugfs_reg32 sec_dfx_regs[] = { {"SEC_BD_SAA8 ", 0x301C40}, }; +/* define the SEC's dfx regs region and region length */ +static struct dfx_diff_registers sec_diff_regs[] = { + { + .reg_offset = SEC_DFX_BASE, + .reg_len = SEC_DFX_BASE_LEN, + }, { + .reg_offset = SEC_DFX_COMMON1, + .reg_len = SEC_DFX_COMMON1_LEN, + }, { + .reg_offset = SEC_DFX_COMMON2, + .reg_len = SEC_DFX_COMMON2_LEN, + }, { + .reg_offset = SEC_DFX_CORE, + .reg_len = SEC_DFX_CORE_LEN, + }, +}; + +static int sec_diff_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + + hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, + ARRAY_SIZE(sec_diff_regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(sec_diff_regs); + static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) { return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF); @@ -729,6 +766,7 @@ DEFINE_SHOW_ATTRIBUTE(sec_regs); static int sec_core_debug_init(struct hisi_qm *qm) { + struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs; struct sec_dev *sec = container_of(qm, struct sec_dev, qm); struct device *dev = &qm->pdev->dev; struct sec_dfx *dfx = &sec->debug.dfx; @@ -749,6 +787,9 @@ static int sec_core_debug_init(struct hisi_qm *qm) if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops); + if (qm->fun_type == QM_HW_PF && sec_regs) + debugfs_create_file("diff_regs", 0444, tmp_d, + qm, &sec_diff_regs_fops); for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + @@ -790,6 +831,14 @@ static int sec_debugfs_init(struct hisi_qm *qm) sec_debugfs_root); qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; + + ret = hisi_qm_diff_regs_init(qm, sec_diff_regs, + ARRAY_SIZE(sec_diff_regs)); + if (ret) { + dev_warn(dev, "Failed to init SEC diff regs!\n"); + goto debugfs_remove; + } + hisi_qm_debug_init(qm); ret = sec_debug_init(qm); @@ -799,15 +848,66 @@ static int sec_debugfs_init(struct hisi_qm *qm) return 0; failed_to_create: + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); +debugfs_remove: debugfs_remove_recursive(sec_debugfs_root); return ret; } static void sec_debugfs_exit(struct hisi_qm *qm) { + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); + debugfs_remove_recursive(qm->debug.debug_root); } +static int sec_show_last_regs_init(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + int i; + + debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs), + sizeof(unsigned int), GFP_KERNEL); + if (!debug->last_words) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) + debug->last_words[i] = readl_relaxed(qm->io_base + + sec_dfx_regs[i].offset); + + return 0; +} + +static void sec_show_last_regs_uninit(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + kfree(debug->last_words); + debug->last_words = NULL; +} + +static void sec_show_last_dfx_regs(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + struct pci_dev *pdev = qm->pdev; + u32 val; + int i; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + /* dumps last word of the debugging registers during controller reset */ + for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) { + val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset); + if (val != debug->last_words[i]) + pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", + sec_dfx_regs[i].name, debug->last_words[i], val); + } +} + static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) { const struct sec_hw_error *errs = sec_hw_errors; @@ -874,6 +974,7 @@ static const struct hisi_qm_err_ini sec_err_ini = { .open_axi_master_ooo = sec_open_axi_master_ooo, .open_sva_prefetch = sec_open_sva_prefetch, .close_sva_prefetch = sec_close_sva_prefetch, + .show_last_dfx_regs = sec_show_last_dfx_regs, .err_info_init = sec_err_info_init, }; @@ -892,8 +993,11 @@ static int sec_pf_probe_init(struct sec_dev *sec) sec_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); + ret = sec_show_last_regs_init(qm); + if (ret) + pci_err(qm->pdev, "Failed to init last word regs!\n"); - return 0; + return ret; } static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) @@ -1067,6 +1171,7 @@ err_qm_stop: sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: + sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); err_qm_uninit: sec_qm_uninit(qm); @@ -1091,6 +1196,7 @@ static void sec_remove(struct pci_dev *pdev) if (qm->fun_type == QM_HW_PF) sec_debug_regs_clear(qm); + sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c index f7efc02b065f..2b6f2281cfd6 100644 --- a/drivers/crypto/hisilicon/sgl.c +++ b/drivers/crypto/hisilicon/sgl.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ +#include <linux/align.h> #include <linux/dma-mapping.h> #include <linux/hisi_acc_qm.h> #include <linux/module.h> @@ -64,8 +65,9 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX) return ERR_PTR(-EINVAL); - sgl_size = sizeof(struct acc_hw_sge) * sge_nr + - sizeof(struct hisi_acc_hw_sgl); + sgl_size = ALIGN(sizeof(struct acc_hw_sge) * sge_nr + + sizeof(struct hisi_acc_hw_sgl), + HISI_ACC_SGL_ALIGN_SIZE); /* * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1), diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 9520a4113c81..67869513e48c 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -521,7 +521,7 @@ static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *ctx, static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *ctx) { hisi_qm_stop_qp(ctx->qp); - hisi_qm_release_qp(ctx->qp); + hisi_qm_free_qps(&ctx->qp, 1); } static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = { diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 4534e1e107d1..9c925e9c0a2d 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -49,14 +49,18 @@ #define HZIP_QM_IDEL_STATUS 0x3040e4 -#define HZIP_CORE_DEBUG_COMP_0 0x302000 -#define HZIP_CORE_DEBUG_COMP_1 0x303000 -#define HZIP_CORE_DEBUG_DECOMP_0 0x304000 -#define HZIP_CORE_DEBUG_DECOMP_1 0x305000 -#define HZIP_CORE_DEBUG_DECOMP_2 0x306000 -#define HZIP_CORE_DEBUG_DECOMP_3 0x307000 -#define HZIP_CORE_DEBUG_DECOMP_4 0x308000 -#define HZIP_CORE_DEBUG_DECOMP_5 0x309000 +#define HZIP_CORE_DFX_BASE 0x301000 +#define HZIP_CLOCK_GATED_CONTL 0X301004 +#define HZIP_CORE_DFX_COMP_0 0x302000 +#define HZIP_CORE_DFX_COMP_1 0x303000 +#define HZIP_CORE_DFX_DECOMP_0 0x304000 +#define HZIP_CORE_DFX_DECOMP_1 0x305000 +#define HZIP_CORE_DFX_DECOMP_2 0x306000 +#define HZIP_CORE_DFX_DECOMP_3 0x307000 +#define HZIP_CORE_DFX_DECOMP_4 0x308000 +#define HZIP_CORE_DFX_DECOMP_5 0x309000 +#define HZIP_CORE_REGS_BASE_LEN 0xB0 +#define HZIP_CORE_REGS_DFX_LEN 0x28 #define HZIP_CORE_INT_SOURCE 0x3010A0 #define HZIP_CORE_INT_MASK_REG 0x3010A4 @@ -230,6 +234,64 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = { {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull}, }; +static const struct debugfs_reg32 hzip_com_dfx_regs[] = { + {"HZIP_CLOCK_GATE_CTRL ", 0x301004}, + {"HZIP_CORE_INT_RAS_CE_ENB ", 0x301160}, + {"HZIP_CORE_INT_RAS_NFE_ENB ", 0x301164}, + {"HZIP_CORE_INT_RAS_FE_ENB ", 0x301168}, + {"HZIP_UNCOM_ERR_RAS_CTRL ", 0x30116C}, +}; + +static const struct debugfs_reg32 hzip_dump_dfx_regs[] = { + {"HZIP_GET_BD_NUM ", 0x00ull}, + {"HZIP_GET_RIGHT_BD ", 0x04ull}, + {"HZIP_GET_ERROR_BD ", 0x08ull}, + {"HZIP_DONE_BD_NUM ", 0x0cull}, + {"HZIP_MAX_DELAY ", 0x20ull}, +}; + +/* define the ZIP's dfx regs region and region length */ +static struct dfx_diff_registers hzip_diff_regs[] = { + { + .reg_offset = HZIP_CORE_DFX_BASE, + .reg_len = HZIP_CORE_REGS_BASE_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_COMP_0, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_COMP_1, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_0, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_1, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_2, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_3, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_4, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, { + .reg_offset = HZIP_CORE_DFX_DECOMP_5, + .reg_len = HZIP_CORE_REGS_DFX_LEN, + }, +}; + +static int hzip_diff_regs_show(struct seq_file *s, void *unused) +{ + struct hisi_qm *qm = s->private; + + hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, + ARRAY_SIZE(hzip_diff_regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs); static const struct kernel_param_ops zip_uacce_mode_ops = { .set = uacce_mode_set, .get = param_get_int, @@ -621,6 +683,7 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm) static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) { + struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs; struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); struct hisi_zip_dfx *dfx = &zip->dfx; struct dentry *tmp_dir; @@ -634,6 +697,10 @@ static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) 0644, tmp_dir, data, &zip_atomic64_ops); } + + if (qm->fun_type == QM_HW_PF && hzip_regs) + debugfs_create_file("diff_regs", 0444, tmp_dir, + qm, &hzip_diff_regs_fops); } static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm) @@ -666,6 +733,13 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm) qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; qm->debug.debug_root = dev_d; + ret = hisi_qm_diff_regs_init(qm, hzip_diff_regs, + ARRAY_SIZE(hzip_diff_regs)); + if (ret) { + dev_warn(dev, "Failed to init ZIP diff regs!\n"); + goto debugfs_remove; + } + hisi_qm_debug_init(qm); if (qm->fun_type == QM_HW_PF) { @@ -679,6 +753,8 @@ static int hisi_zip_debugfs_init(struct hisi_qm *qm) return 0; failed_to_create: + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); +debugfs_remove: debugfs_remove_recursive(hzip_debugfs_root); return ret; } @@ -703,6 +779,8 @@ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) static void hisi_zip_debugfs_exit(struct hisi_qm *qm) { + hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); + debugfs_remove_recursive(qm->debug.debug_root); if (qm->fun_type == QM_HW_PF) { @@ -711,6 +789,87 @@ static void hisi_zip_debugfs_exit(struct hisi_qm *qm) } } +static int hisi_zip_show_last_regs_init(struct hisi_qm *qm) +{ + int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); + int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); + struct qm_debug *debug = &qm->debug; + void __iomem *io_base; + int i, j, idx; + + debug->last_words = kcalloc(core_dfx_regs_num * HZIP_CORE_NUM + + com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); + if (!debug->last_words) + return -ENOMEM; + + for (i = 0; i < com_dfx_regs_num; i++) { + io_base = qm->io_base + hzip_com_dfx_regs[i].offset; + debug->last_words[i] = readl_relaxed(io_base); + } + + for (i = 0; i < HZIP_CORE_NUM; i++) { + io_base = qm->io_base + core_offsets[i]; + for (j = 0; j < core_dfx_regs_num; j++) { + idx = com_dfx_regs_num + i * core_dfx_regs_num + j; + debug->last_words[idx] = readl_relaxed( + io_base + hzip_dump_dfx_regs[j].offset); + } + } + + return 0; +} + +static void hisi_zip_show_last_regs_uninit(struct hisi_qm *qm) +{ + struct qm_debug *debug = &qm->debug; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + kfree(debug->last_words); + debug->last_words = NULL; +} + +static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm) +{ + int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); + int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); + struct qm_debug *debug = &qm->debug; + char buf[HZIP_BUF_SIZE]; + void __iomem *base; + int i, j, idx; + u32 val; + + if (qm->fun_type == QM_HW_VF || !debug->last_words) + return; + + for (i = 0; i < com_dfx_regs_num; i++) { + val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset); + if (debug->last_words[i] != val) + pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n", + hzip_com_dfx_regs[i].name, debug->last_words[i], val); + } + + for (i = 0; i < HZIP_CORE_NUM; i++) { + if (i < HZIP_COMP_CORE_NUM) + scnprintf(buf, sizeof(buf), "Comp_core-%d", i); + else + scnprintf(buf, sizeof(buf), "Decomp_core-%d", + i - HZIP_COMP_CORE_NUM); + base = qm->io_base + core_offsets[i]; + + pci_info(qm->pdev, "==>%s:\n", buf); + /* dump last word for dfx regs during control resetting */ + for (j = 0; j < core_dfx_regs_num; j++) { + idx = com_dfx_regs_num + i * core_dfx_regs_num + j; + val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset); + if (debug->last_words[idx] != val) + pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n", + hzip_dump_dfx_regs[j].name, debug->last_words[idx], val); + } + } +} + static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) { const struct hisi_zip_hw_error *err = zip_hw_error; @@ -798,6 +957,7 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, .open_sva_prefetch = hisi_zip_open_sva_prefetch, .close_sva_prefetch = hisi_zip_close_sva_prefetch, + .show_last_dfx_regs = hisi_zip_show_last_dfx_regs, .err_info_init = hisi_zip_err_info_init, }; @@ -805,6 +965,7 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) { struct hisi_qm *qm = &hisi_zip->qm; struct hisi_zip_ctrl *ctrl; + int ret; ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) @@ -820,7 +981,11 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); - return 0; + ret = hisi_zip_show_last_regs_init(qm); + if (ret) + pci_err(qm->pdev, "Failed to init last word regs!\n"); + + return ret; } static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) @@ -964,6 +1129,7 @@ err_qm_stop: hisi_qm_stop(qm, QM_NORMAL); err_dev_err_uninit: + hisi_zip_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); err_qm_uninit: @@ -985,6 +1151,7 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); + hisi_zip_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); hisi_zip_qm_uninit(qm); } diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 9ff885d50edf..9b1a158aec29 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -1997,3 +1997,12 @@ MODULE_AUTHOR("Igal Liberman <igall@marvell.com>"); MODULE_DESCRIPTION("Support for SafeXcel cryptographic engines: EIP97 & EIP197"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(CRYPTO_INTERNAL); + +MODULE_FIRMWARE("ifpp.bin"); +MODULE_FIRMWARE("ipue.bin"); +MODULE_FIRMWARE("inside-secure/eip197b/ifpp.bin"); +MODULE_FIRMWARE("inside-secure/eip197b/ipue.bin"); +MODULE_FIRMWARE("inside-secure/eip197d/ifpp.bin"); +MODULE_FIRMWARE("inside-secure/eip197d/ipue.bin"); +MODULE_FIRMWARE("inside-secure/eip197_minifw/ifpp.bin"); +MODULE_FIRMWARE("inside-secure/eip197_minifw/ipue.bin"); diff --git a/drivers/crypto/keembay/keembay-ocs-aes-core.c b/drivers/crypto/keembay/keembay-ocs-aes-core.c index e2a39fdaf623..9953f5590ac4 100644 --- a/drivers/crypto/keembay/keembay-ocs-aes-core.c +++ b/drivers/crypto/keembay/keembay-ocs-aes-core.c @@ -1598,7 +1598,6 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ocs_aes_dev *aes_dev; - struct resource *aes_mem; int rc; aes_dev = devm_kzalloc(dev, sizeof(*aes_dev), GFP_KERNEL); @@ -1616,13 +1615,7 @@ static int kmb_ocs_aes_probe(struct platform_device *pdev) } /* Get base register address. */ - aes_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!aes_mem) { - dev_err(dev, "Could not retrieve io mem resource\n"); - return -ENODEV; - } - - aes_dev->base_reg = devm_ioremap_resource(&pdev->dev, aes_mem); + aes_dev->base_reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(aes_dev->base_reg)) return PTR_ERR(aes_dev->base_reg); diff --git a/drivers/crypto/marvell/cesa/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index b739d3b873dc..c6f2fa753b7c 100644 --- a/drivers/crypto/marvell/cesa/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -624,7 +624,6 @@ struct skcipher_alg mv_cesa_ecb_des3_ede_alg = { .decrypt = mv_cesa_ecb_des3_ede_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, - .ivsize = DES3_EDE_BLOCK_SIZE, .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "mv-ecb-des3-ede", diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index f8f8542ce3e4..67530e90bbfe 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -896,7 +896,6 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, struct crypto_authenc_key_param *param; int enckeylen = 0, authkeylen = 0; struct rtattr *rta = (void *)key; - int status; if (!RTA_OK(rta, keylen)) return -EINVAL; @@ -938,11 +937,7 @@ static int otx2_cpt_aead_cbc_aes_sha_setkey(struct crypto_aead *cipher, ctx->enc_key_len = enckeylen; ctx->auth_key_len = authkeylen; - status = aead_hmac_init(cipher); - if (status) - return status; - - return 0; + return aead_hmac_init(cipher); } static int otx2_cpt_aead_ecb_null_sha_setkey(struct crypto_aead *cipher, diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index 32a036ada5d0..f418817c0f43 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -827,7 +827,7 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, goto err_out; vas_init_rx_win_attr(&rxattr, coproc->ct); - rxattr.rx_fifo = (void *)rx_fifo; + rxattr.rx_fifo = rx_fifo; rxattr.rx_fifo_size = fifo_size; rxattr.lnotify_lpid = lpid; rxattr.lnotify_pid = pid; diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c index fa4c350c1bf9..181fa1c8b3c7 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c @@ -14,6 +14,7 @@ static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), }, + { PCI_VDEVICE(INTEL, ADF_401XX_PCI_DEVICE_ID), }, { } }; MODULE_DEVICE_TABLE(pci, adf_pci_tbl); @@ -75,13 +76,6 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; - /* Temporarily set the number of crypto instances to zero to avoid - * registering the crypto algorithms. - * This will be removed when the algorithms will support the - * CRYPTO_TFM_REQ_MAY_BACKLOG flag - */ - instances = 0; - for (i = 0; i < instances; i++) { val = i; bank = i * 2; diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c index b941fe3713ff..50d5afa26a9b 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -78,19 +78,6 @@ static const u32 *adf_get_arbiter_mapping(void) return thrd_to_arb_map; } -static void adf_enable_ints(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - - addr = (&GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR])->virt_addr; - - /* Enable bundle and misc interrupts */ - ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF0_MASK_OFFSET, - ADF_C3XXX_SMIA0_MASK); - ADF_CSR_WR(addr, ADF_C3XXX_SMIAPF1_MASK_OFFSET, - ADF_C3XXX_SMIA1_MASK); -} - static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable) { adf_gen2_cfg_iov_thds(accel_dev, enable, @@ -133,7 +120,7 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data) hw_data->init_arb = adf_init_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; - hw_data->enable_ints = adf_enable_ints; + hw_data->enable_ints = adf_gen2_enable_ints; hw_data->reset_device = adf_reset_flr; hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; hw_data->disable_iov = adf_disable_sriov; diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h index 1b86f828725c..336a06f11dbd 100644 --- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h +++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h @@ -13,10 +13,6 @@ #define ADF_C3XXX_ACCELERATORS_MASK 0x7 #define ADF_C3XXX_ACCELENGINES_MASK 0x3F #define ADF_C3XXX_ETR_MAX_BANKS 16 -#define ADF_C3XXX_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) -#define ADF_C3XXX_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) -#define ADF_C3XXX_SMIA0_MASK 0xFFFF -#define ADF_C3XXX_SMIA1_MASK 0x1 #define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC /* AE to function mapping */ diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c index b1eac2f81faa..c00386fe6587 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c @@ -80,19 +80,6 @@ static const u32 *adf_get_arbiter_mapping(void) return thrd_to_arb_map; } -static void adf_enable_ints(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - - addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr; - - /* Enable bundle and misc interrupts */ - ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET, - ADF_C62X_SMIA0_MASK); - ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET, - ADF_C62X_SMIA1_MASK); -} - static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable) { adf_gen2_cfg_iov_thds(accel_dev, enable, @@ -135,7 +122,7 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data) hw_data->init_arb = adf_init_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; - hw_data->enable_ints = adf_enable_ints; + hw_data->enable_ints = adf_gen2_enable_ints; hw_data->reset_device = adf_reset_flr; hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer; hw_data->disable_iov = adf_disable_sriov; diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h index 68c3436bd3aa..008c0a3a9769 100644 --- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h +++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h @@ -13,10 +13,6 @@ #define ADF_C62X_ACCELERATORS_MASK 0x1F #define ADF_C62X_ACCELENGINES_MASK 0x3FF #define ADF_C62X_ETR_MAX_BANKS 16 -#define ADF_C62X_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) -#define ADF_C62X_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) -#define ADF_C62X_SMIA0_MASK 0xFFFF -#define ADF_C62X_SMIA1_MASK 0x1 #define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC /* AE to function mapping */ diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index f25a6c8edfc7..04f058acc4d3 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \ qat_crypto.o \ qat_algs.o \ qat_asym_algs.o \ + qat_algs_send.o \ qat_uclo.o \ qat_hal.o diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h index a03c6cf72331..ede6458c9dbf 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h @@ -19,6 +19,8 @@ #define ADF_4XXX_DEVICE_NAME "4xxx" #define ADF_4XXX_PCI_DEVICE_ID 0x4940 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 +#define ADF_401XX_PCI_DEVICE_ID 0x4942 +#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 @@ -152,9 +154,9 @@ struct adf_pfvf_ops { int (*enable_comms)(struct adf_accel_dev *accel_dev); u32 (*get_pf2vf_offset)(u32 i); u32 (*get_vf2pf_offset)(u32 i); - u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr); void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask); - void (*disable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask); + void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr); + u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr); int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg, u32 pfvf_offset, struct mutex *csr_lock); struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev, diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h index e8c9b77c0d66..0464fa257929 100644 --- a/drivers/crypto/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/qat/qat_common/adf_common_drv.h @@ -195,10 +195,8 @@ bool adf_misc_wq_queue_work(struct work_struct *work); #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); -void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - u32 vf_mask); -void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, - u32 vf_mask); +void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask); +void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev); bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev); bool adf_recv_and_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 vf_nr); int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev); @@ -217,14 +215,6 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } -static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) -{ -} - -static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) -{ -} - static inline int adf_init_pf_wq(void) { return 0; @@ -243,10 +233,6 @@ static inline void adf_exit_vf_wq(void) { } -static inline void adf_flush_vf_wq(struct adf_accel_dev *accel_dev) -{ -} - #endif static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev) diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c index 57035b7dd4b2..d1884547b5a1 100644 --- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c @@ -98,6 +98,19 @@ void adf_gen2_get_arb_info(struct arb_info *arb_info) } EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info); +void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr = adf_get_pmisc_base(accel_dev); + u32 val; + + val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1; + + /* Enable bundle and misc interrupts */ + ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val); + ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK); +} +EXPORT_SYMBOL_GPL(adf_gen2_enable_ints); + static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) { return BUILD_RING_BASE_ADDR(addr, size); diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h index f2e0451b11c0..e4bc07529be4 100644 --- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h +++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h @@ -145,6 +145,11 @@ do { \ #define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10) #define ADF_GEN2_ERRSSMSH_EN BIT(3) +/* Interrupts */ +#define ADF_GEN2_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) +#define ADF_GEN2_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) +#define ADF_GEN2_SMIA1_MASK 0x1 + u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self); u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self); void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev); @@ -153,6 +158,7 @@ void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable, void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info); void adf_gen2_get_arb_info(struct arb_info *arb_info); +void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev); u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev); void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); diff --git a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c index 1a9072aac2ca..70ef11963938 100644 --- a/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c +++ b/drivers/crypto/qat/qat_common/adf_gen2_pfvf.c @@ -13,8 +13,9 @@ #include "adf_pfvf_utils.h" /* VF2PF interrupts */ +#define ADF_GEN2_VF_MSK 0xFFFF #define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9) -#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9) +#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & ADF_GEN2_VF_MSK) << 9) #define ADF_GEN2_PF_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04)) #define ADF_GEN2_VF_PF2VF_OFFSET 0x200 @@ -50,43 +51,60 @@ static u32 adf_gen2_vf_get_pfvf_offset(u32 i) return ADF_GEN2_VF_PF2VF_OFFSET; } -static u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr) -{ - u32 errsou3, errmsk3, vf_int_mask; - - /* Get the interrupt sources triggered by VFs */ - errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3); - vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3); - - /* To avoid adding duplicate entries to work queue, clear - * vf_int_mask_sets bits that are already masked in ERRMSK register. - */ - errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3); - vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3); - - return vf_int_mask; -} - -static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, - u32 vf_mask) +static void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) { /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */ - if (vf_mask & 0xFFFF) { + if (vf_mask & ADF_GEN2_VF_MSK) { u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3) & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask); ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val); } } -static void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, - u32 vf_mask) +static void adf_gen2_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr) { /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */ - if (vf_mask & 0xFFFF) { - u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3) - | ADF_GEN2_ERR_MSK_VF2PF(vf_mask); - ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val); - } + u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3) + | ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val); +} + +static u32 adf_gen2_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) +{ + u32 sources, disabled, pending; + u32 errsou3, errmsk3; + + /* Get the interrupt sources triggered by VFs */ + errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3); + sources = ADF_GEN2_ERR_REG_VF2PF(errsou3); + + if (!sources) + return 0; + + /* Get the already disabled interrupts */ + errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3); + disabled = ADF_GEN2_ERR_REG_VF2PF(errmsk3); + + pending = sources & ~disabled; + if (!pending) + return 0; + + /* Due to HW limitations, when disabling the interrupts, we can't + * just disable the requested sources, as this would lead to missed + * interrupts if ERRSOU3 changes just before writing to ERRMSK3. + * To work around it, disable all and re-enable only the sources that + * are not in vf_mask and were not already disabled. Re-enabling will + * trigger a new interrupt for the sources that have changed in the + * meantime, if any. + */ + errmsk3 |= ADF_GEN2_ERR_MSK_VF2PF(ADF_GEN2_VF_MSK); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); + + errmsk3 &= ADF_GEN2_ERR_MSK_VF2PF(sources | disabled); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); + + /* Return the sources of the (new) interrupt(s) */ + return pending; } static u32 gen2_csr_get_int_bit(enum gen2_csr_pos offset) @@ -362,9 +380,9 @@ void adf_gen2_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops) pfvf_ops->enable_comms = adf_enable_pf2vf_comms; pfvf_ops->get_pf2vf_offset = adf_gen2_pf_get_pfvf_offset; pfvf_ops->get_vf2pf_offset = adf_gen2_pf_get_pfvf_offset; - pfvf_ops->get_vf2pf_sources = adf_gen2_get_vf2pf_sources; pfvf_ops->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts; - pfvf_ops->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts; + pfvf_ops->disable_all_vf2pf_interrupts = adf_gen2_disable_all_vf2pf_interrupts; + pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen2_disable_pending_vf2pf_interrupts; pfvf_ops->send_msg = adf_gen2_pf2vf_send; pfvf_ops->recv_msg = adf_gen2_vf2pf_recv; } diff --git a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c index d80d493a7756..8e8efe93f3ee 100644 --- a/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c +++ b/drivers/crypto/qat/qat_common/adf_gen4_pfvf.c @@ -15,6 +15,7 @@ /* VF2PF interrupt source registers */ #define ADF_4XXX_VM2PF_SOU 0x41A180 #define ADF_4XXX_VM2PF_MSK 0x41A1C0 +#define ADF_GEN4_VF_MSK 0xFFFF #define ADF_PFVF_GEN4_MSGTYPE_SHIFT 2 #define ADF_PFVF_GEN4_MSGTYPE_MASK 0x3F @@ -36,32 +37,48 @@ static u32 adf_gen4_pf_get_vf2pf_offset(u32 i) return ADF_4XXX_VM2PF_OFFSET(i); } -static u32 adf_gen4_get_vf2pf_sources(void __iomem *pmisc_addr) +static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) { - u32 sou, mask; + u32 val; - sou = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU); - mask = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK); - - return sou & ~mask; + val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask; + ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val); } -static void adf_gen4_enable_vf2pf_interrupts(void __iomem *pmisc_addr, - u32 vf_mask) +static void adf_gen4_disable_all_vf2pf_interrupts(void __iomem *pmisc_addr) { - unsigned int val; - - val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) & ~vf_mask; - ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val); + ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK); } -static void adf_gen4_disable_vf2pf_interrupts(void __iomem *pmisc_addr, - u32 vf_mask) +static u32 adf_gen4_disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) { - unsigned int val; + u32 sources, disabled, pending; + + /* Get the interrupt sources triggered by VFs */ + sources = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_SOU); + if (!sources) + return 0; + + /* Get the already disabled interrupts */ + disabled = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK); + + pending = sources & ~disabled; + if (!pending) + return 0; + + /* Due to HW limitations, when disabling the interrupts, we can't + * just disable the requested sources, as this would lead to missed + * interrupts if VM2PF_SOU changes just before writing to VM2PF_MSK. + * To work around it, disable all and re-enable only the sources that + * are not in vf_mask and were not already disabled. Re-enabling will + * trigger a new interrupt for the sources that have changed in the + * meantime, if any. + */ + ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, ADF_GEN4_VF_MSK); + ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, disabled | sources); - val = ADF_CSR_RD(pmisc_addr, ADF_4XXX_VM2PF_MSK) | vf_mask; - ADF_CSR_WR(pmisc_addr, ADF_4XXX_VM2PF_MSK, val); + /* Return the sources of the (new) interrupt(s) */ + return pending; } static int adf_gen4_pfvf_send(struct adf_accel_dev *accel_dev, @@ -96,10 +113,16 @@ static struct pfvf_message adf_gen4_pfvf_recv(struct adf_accel_dev *accel_dev, u32 pfvf_offset, u8 compat_ver) { void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + struct pfvf_message msg = { 0 }; u32 csr_val; /* Read message from the CSR */ csr_val = ADF_CSR_RD(pmisc_addr, pfvf_offset); + if (!(csr_val & ADF_PFVF_INT)) { + dev_info(&GET_DEV(accel_dev), + "Spurious PFVF interrupt, msg 0x%.8x. Ignored\n", csr_val); + return msg; + } /* We can now acknowledge the message reception by clearing the * interrupt bit @@ -115,9 +138,9 @@ void adf_gen4_init_pf_pfvf_ops(struct adf_pfvf_ops *pfvf_ops) pfvf_ops->enable_comms = adf_enable_pf2vf_comms; pfvf_ops->get_pf2vf_offset = adf_gen4_pf_get_pf2vf_offset; pfvf_ops->get_vf2pf_offset = adf_gen4_pf_get_vf2pf_offset; - pfvf_ops->get_vf2pf_sources = adf_gen4_get_vf2pf_sources; pfvf_ops->enable_vf2pf_interrupts = adf_gen4_enable_vf2pf_interrupts; - pfvf_ops->disable_vf2pf_interrupts = adf_gen4_disable_vf2pf_interrupts; + pfvf_ops->disable_all_vf2pf_interrupts = adf_gen4_disable_all_vf2pf_interrupts; + pfvf_ops->disable_pending_vf2pf_interrupts = adf_gen4_disable_pending_vf2pf_interrupts; pfvf_ops->send_msg = adf_gen4_pfvf_send; pfvf_ops->recv_msg = adf_gen4_pfvf_recv; } diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c index a35149f8bf1e..ad9e135b8560 100644 --- a/drivers/crypto/qat/qat_common/adf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_isr.c @@ -66,42 +66,39 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags); } -void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask) +void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev) { void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); unsigned long flags; spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags); - GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask); + GET_PFVF_OPS(accel_dev)->disable_all_vf2pf_interrupts(pmisc_addr); spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags); } -static void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, - u32 vf_mask) +static u32 adf_disable_pending_vf2pf_interrupts(struct adf_accel_dev *accel_dev) { void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 pending; spin_lock(&accel_dev->pf.vf2pf_ints_lock); - GET_PFVF_OPS(accel_dev)->disable_vf2pf_interrupts(pmisc_addr, vf_mask); + pending = GET_PFVF_OPS(accel_dev)->disable_pending_vf2pf_interrupts(pmisc_addr); spin_unlock(&accel_dev->pf.vf2pf_ints_lock); + + return pending; } static bool adf_handle_vf2pf_int(struct adf_accel_dev *accel_dev) { - void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); bool irq_handled = false; unsigned long vf_mask; - /* Get the interrupt sources triggered by VFs */ - vf_mask = GET_PFVF_OPS(accel_dev)->get_vf2pf_sources(pmisc_addr); - + /* Get the interrupt sources triggered by VFs, except for those already disabled */ + vf_mask = adf_disable_pending_vf2pf_interrupts(accel_dev); if (vf_mask) { struct adf_accel_vf_info *vf_info; int i; - /* Disable VF2PF interrupts for VFs with pending ints */ - adf_disable_vf2pf_interrupts_irq(accel_dev, vf_mask); - /* * Handle VF2PF interrupt unless the VF is malicious and * is attempting to flood the host OS with VF2PF interrupts. diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h index 9c37a2661392..204a42438992 100644 --- a/drivers/crypto/qat/qat_common/adf_pfvf_msg.h +++ b/drivers/crypto/qat/qat_common/adf_pfvf_msg.h @@ -8,8 +8,8 @@ /* * PF<->VF Gen2 Messaging format * - * The PF has an array of 32-bit PF2VF registers, one for each VF. The - * PF can access all these registers; each VF can access only the one + * The PF has an array of 32-bit PF2VF registers, one for each VF. The + * PF can access all these registers while each VF can access only the one * register associated with that particular VF. * * The register functionally is split into two parts: diff --git a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c index 588352de1ef0..388e58bcbcaf 100644 --- a/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c +++ b/drivers/crypto/qat/qat_common/adf_pfvf_pf_proto.c @@ -154,7 +154,7 @@ static struct pfvf_message handle_blkmsg_req(struct adf_accel_vf_info *vf_info, if (FIELD_GET(ADF_VF2PF_BLOCK_CRC_REQ_MASK, req.data)) { dev_dbg(&GET_DEV(vf_info->accel_dev), "BlockMsg of type %d for CRC over %d bytes received from VF%d\n", - blk_type, blk_byte, vf_info->vf_nr); + blk_type, blk_byte + 1, vf_info->vf_nr); if (!adf_pf2vf_blkmsg_get_data(vf_info, blk_type, blk_byte, byte_max, &resp_data, @@ -242,7 +242,9 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, "VersionRequest received from VF%d (vers %d) to PF (vers %d)\n", vf_nr, vf_compat_ver, ADF_PFVF_COMPAT_THIS_VERSION); - if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) + if (vf_compat_ver == 0) + compat = ADF_PF2VF_VF_INCOMPATIBLE; + else if (vf_compat_ver <= ADF_PFVF_COMPAT_THIS_VERSION) compat = ADF_PF2VF_VF_COMPATIBLE; else compat = ADF_PF2VF_VF_COMPAT_UNKNOWN; diff --git a/drivers/crypto/qat/qat_common/adf_sriov.c b/drivers/crypto/qat/qat_common/adf_sriov.c index b960bca1f9d2..f38b2ffde146 100644 --- a/drivers/crypto/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/qat/qat_common/adf_sriov.c @@ -3,7 +3,6 @@ #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/device.h> -#include <linux/iommu.h> #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_pfvf_pf_msg.h" @@ -74,8 +73,7 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) hw_data->configure_iov_threads(accel_dev, true); /* Enable VF to PF interrupts for all VFs */ - if (hw_data->pfvf_ops.get_pf2vf_offset) - adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1); + adf_enable_vf2pf_interrupts(accel_dev, BIT_ULL(totalvfs) - 1); /* * Due to the hardware design, when SR-IOV and the ring arbiter @@ -104,22 +102,18 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) return; - if (hw_data->pfvf_ops.get_pf2vf_offset) - adf_pf2vf_notify_restarting(accel_dev); - + adf_pf2vf_notify_restarting(accel_dev); pci_disable_sriov(accel_to_pci_dev(accel_dev)); /* Disable VF to PF interrupts */ - if (hw_data->pfvf_ops.get_pf2vf_offset) - adf_disable_vf2pf_interrupts(accel_dev, GENMASK(31, 0)); + adf_disable_all_vf2pf_interrupts(accel_dev); /* Clear Valid bits in AE Thread to PCIe Function Mapping */ if (hw_data->configure_iov_threads) hw_data->configure_iov_threads(accel_dev, false); - for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) { + for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) mutex_destroy(&vf->pf2vf_lock); - } kfree(accel_dev->pf.vf_info); accel_dev->pf.vf_info = NULL; @@ -176,7 +170,7 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) return -EFAULT; } - if (!iommu_present(&pci_bus_type)) + if (!device_iommu_mapped(&pdev->dev)) dev_warn(&pdev->dev, "IOMMU should be enabled for SR-IOV to work correctly\n"); if (accel_dev->pf.vf_info) { diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 8ba28409fb74..630d0483c4e0 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -8,6 +8,9 @@ #include "adf_cfg.h" #include "adf_common_drv.h" +#define ADF_MAX_RING_THRESHOLD 80 +#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100) + static inline u32 adf_modulo(u32 data, u32 shift) { u32 div = data >> shift; @@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) bank->irq_mask); } +bool adf_ring_nearly_full(struct adf_etr_ring_data *ring) +{ + return atomic_read(ring->inflights) > ring->threshold; +} + int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) { struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); @@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, struct adf_etr_bank_data *bank; struct adf_etr_ring_data *ring; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + int max_inflights; u32 ring_num; int ret; @@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); ring->head = 0; ring->tail = 0; + max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size); + ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD); atomic_set(ring->inflights, 0); ret = adf_init_ring(ring); if (ret) diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h index 2c95f1697c76..e6ef6f9b7691 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.h +++ b/drivers/crypto/qat/qat_common/adf_transport.h @@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr); +bool adf_ring_nearly_full(struct adf_etr_ring_data *ring); int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg); void adf_remove_ring(struct adf_etr_ring_data *ring); #endif diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index 501bcf0f1809..8b2c92ba7ca1 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -22,6 +22,7 @@ struct adf_etr_ring_data { spinlock_t lock; /* protects ring data struct */ u16 head; u16 tail; + u32 threshold; u8 ring_number; u8 ring_size; u8 msg_size; diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c index 86c3bd0c9c2b..8c95fcd8e64b 100644 --- a/drivers/crypto/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c @@ -70,6 +70,7 @@ static void adf_dev_stop_async(struct work_struct *work) container_of(work, struct adf_vf_stop_data, work); struct adf_accel_dev *accel_dev = stop_data->accel_dev; + adf_dev_restarting_notify(accel_dev); adf_dev_stop(accel_dev); adf_dev_shutdown(accel_dev); diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index f998ed58457c..148edbe379e3 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -17,7 +17,7 @@ #include <crypto/xts.h> #include <linux/dma-mapping.h> #include "adf_accel_devices.h" -#include "adf_transport.h" +#include "qat_algs_send.h" #include "adf_common_drv.h" #include "qat_crypto.h" #include "icp_qat_hw.h" @@ -46,19 +46,6 @@ static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; -struct qat_alg_buf { - u32 len; - u32 resrvd; - u64 addr; -} __packed; - -struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; - struct qat_alg_buf bufers[]; -} __packed __aligned(64); - /* Common content descriptor */ struct qat_alg_cd { union { @@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, bl->bufers[i].len, DMA_BIDIRECTIONAL); dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - kfree(bl); + + if (!qat_req->buf.sgl_src_valid) + kfree(bl); + if (blp != blpout) { /* If out of place operation dma unmap only data */ int bufless = blout->num_bufs - blout->num_mapped_bufs; @@ -704,14 +694,17 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, DMA_BIDIRECTIONAL); } dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); - kfree(blout); + + if (!qat_req->buf.sgl_dst_valid) + kfree(blout); } } static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct scatterlist *sgl, struct scatterlist *sglout, - struct qat_crypto_request *qat_req) + struct qat_crypto_request *qat_req, + gfp_t flags) { struct device *dev = &GET_DEV(inst->accel_dev); int i, sg_nctr = 0; @@ -721,15 +714,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, dma_addr_t blp = DMA_MAPPING_ERROR; dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; - size_t sz_out, sz = struct_size(bufl, bufers, n + 1); + size_t sz_out, sz = struct_size(bufl, bufers, n); + int node = dev_to_node(&GET_DEV(inst->accel_dev)); if (unlikely(!n)) return -EINVAL; - bufl = kzalloc_node(sz, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!bufl)) - return -ENOMEM; + qat_req->buf.sgl_src_valid = false; + qat_req->buf.sgl_dst_valid = false; + + if (n > QAT_MAX_BUFF_DESC) { + bufl = kzalloc_node(sz, flags, node); + if (unlikely(!bufl)) + return -ENOMEM; + } else { + bufl = &qat_req->buf.sgl_src.sgl_hdr; + memset(bufl, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_src_valid = true; + } for_each_sg(sgl, sg, n, i) bufl->bufers[i].addr = DMA_MAPPING_ERROR; @@ -760,12 +762,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct qat_alg_buf *bufers; n = sg_nents(sglout); - sz_out = struct_size(buflout, bufers, n + 1); + sz_out = struct_size(buflout, bufers, n); sg_nctr = 0; - buflout = kzalloc_node(sz_out, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!buflout)) - goto err_in; + + if (n > QAT_MAX_BUFF_DESC) { + buflout = kzalloc_node(sz_out, flags, node); + if (unlikely(!buflout)) + goto err_in; + } else { + buflout = &qat_req->buf.sgl_dst.sgl_hdr; + memset(buflout, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_dst_valid = true; + } bufers = buflout->bufers; for_each_sg(sglout, sg, n, i) @@ -810,7 +818,9 @@ err_out: dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, DMA_BIDIRECTIONAL); - kfree(buflout); + + if (!qat_req->buf.sgl_dst_valid) + kfree(buflout); err_in: if (!dma_mapping_error(dev, blp)) @@ -823,7 +833,8 @@ err_in: bufl->bufers[i].len, DMA_BIDIRECTIONAL); - kfree(bufl); + if (!qat_req->buf.sgl_src_valid) + kfree(bufl); dev_err(dev, "Failed to map buf for dma\n"); return -ENOMEM; @@ -925,8 +936,25 @@ void qat_alg_callback(void *resp) struct icp_qat_fw_la_resp *qat_resp = resp; struct qat_crypto_request *qat_req = (void *)(__force long)qat_resp->opaque_data; + struct qat_instance_backlog *backlog = qat_req->alg_req.backlog; qat_req->cb(qat_resp, qat_req); + + qat_alg_send_backlog(backlog); +} + +static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req, + struct qat_crypto_instance *inst, + struct crypto_async_request *base) +{ + struct qat_alg_req *alg_req = &qat_req->alg_req; + + alg_req->fw_req = (u32 *)&qat_req->req; + alg_req->tx_ring = inst->sym_tx; + alg_req->base = base; + alg_req->backlog = &inst->backlog; + + return qat_alg_send_message(alg_req); } static int qat_alg_aead_dec(struct aead_request *areq) @@ -939,14 +967,15 @@ static int qat_alg_aead_dec(struct aead_request *areq) struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_bulk_req *msg; int digst_size = crypto_aead_authsize(aead_tfm); - int ret, ctr = 0; + gfp_t f = qat_algs_alloc_flags(&areq->base); + int ret; u32 cipher_len; cipher_len = areq->cryptlen - digst_size; if (cipher_len % AES_BLOCK_SIZE != 0) return -EINVAL; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); if (unlikely(ret)) return ret; @@ -965,15 +994,12 @@ static int qat_alg_aead_dec(struct aead_request *areq) auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); auth_param->auth_off = 0; auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; - do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; } static int qat_alg_aead_enc(struct aead_request *areq) @@ -984,14 +1010,15 @@ static int qat_alg_aead_enc(struct aead_request *areq) struct qat_crypto_request *qat_req = aead_request_ctx(areq); struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; + gfp_t f = qat_algs_alloc_flags(&areq->base); struct icp_qat_fw_la_bulk_req *msg; u8 *iv = areq->iv; - int ret, ctr = 0; + int ret; if (areq->cryptlen % AES_BLOCK_SIZE != 0) return -EINVAL; - ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req, f); if (unlikely(ret)) return ret; @@ -1013,15 +1040,11 @@ static int qat_alg_aead_enc(struct aead_request *areq) auth_param->auth_off = 0; auth_param->auth_len = areq->assoclen + areq->cryptlen; - do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; } static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx, @@ -1173,13 +1196,14 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_crypto_request *qat_req = skcipher_request_ctx(req); struct icp_qat_fw_la_cipher_req_params *cipher_param; + gfp_t f = qat_algs_alloc_flags(&req->base); struct icp_qat_fw_la_bulk_req *msg; - int ret, ctr = 0; + int ret; if (req->cryptlen == 0) return 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); if (unlikely(ret)) return ret; @@ -1198,15 +1222,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) qat_alg_set_req_iv(qat_req); - do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; } static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) @@ -1242,13 +1262,14 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_crypto_request *qat_req = skcipher_request_ctx(req); struct icp_qat_fw_la_cipher_req_params *cipher_param; + gfp_t f = qat_algs_alloc_flags(&req->base); struct icp_qat_fw_la_bulk_req *msg; - int ret, ctr = 0; + int ret; if (req->cryptlen == 0) return 0; - ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req); + ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req, f); if (unlikely(ret)) return ret; @@ -1268,15 +1289,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) qat_alg_set_req_iv(qat_req); qat_alg_update_iv(qat_req); - do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; } static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c new file mode 100644 index 000000000000..ff5b4347f783 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_algs_send.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2022 Intel Corporation */ +#include "adf_transport.h" +#include "qat_algs_send.h" +#include "qat_crypto.h" + +#define ADF_MAX_RETRIES 20 + +static int qat_alg_send_message_retry(struct qat_alg_req *req) +{ + int ret = 0, ctr = 0; + + do { + ret = adf_send_message(req->tx_ring, req->fw_req); + } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES); + + if (ret == -EAGAIN) + return -ENOSPC; + + return -EINPROGRESS; +} + +void qat_alg_send_backlog(struct qat_instance_backlog *backlog) +{ + struct qat_alg_req *req, *tmp; + + spin_lock_bh(&backlog->lock); + list_for_each_entry_safe(req, tmp, &backlog->list, list) { + if (adf_send_message(req->tx_ring, req->fw_req)) { + /* The HW ring is full. Do nothing. + * qat_alg_send_backlog() will be invoked again by + * another callback. + */ + break; + } + list_del(&req->list); + req->base->complete(req->base, -EINPROGRESS); + } + spin_unlock_bh(&backlog->lock); +} + +static void qat_alg_backlog_req(struct qat_alg_req *req, + struct qat_instance_backlog *backlog) +{ + INIT_LIST_HEAD(&req->list); + + spin_lock_bh(&backlog->lock); + list_add_tail(&req->list, &backlog->list); + spin_unlock_bh(&backlog->lock); +} + +static int qat_alg_send_message_maybacklog(struct qat_alg_req *req) +{ + struct qat_instance_backlog *backlog = req->backlog; + struct adf_etr_ring_data *tx_ring = req->tx_ring; + u32 *fw_req = req->fw_req; + + /* If any request is already backlogged, then add to backlog list */ + if (!list_empty(&backlog->list)) + goto enqueue; + + /* If ring is nearly full, then add to backlog list */ + if (adf_ring_nearly_full(tx_ring)) + goto enqueue; + + /* If adding request to HW ring fails, then add to backlog list */ + if (adf_send_message(tx_ring, fw_req)) + goto enqueue; + + return -EINPROGRESS; + +enqueue: + qat_alg_backlog_req(req, backlog); + + return -EBUSY; +} + +int qat_alg_send_message(struct qat_alg_req *req) +{ + u32 flags = req->base->flags; + + if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + return qat_alg_send_message_maybacklog(req); + else + return qat_alg_send_message_retry(req); +} diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h new file mode 100644 index 000000000000..5ce9f4f69d8f --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_algs_send.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef QAT_ALGS_SEND_H +#define QAT_ALGS_SEND_H + +#include "qat_crypto.h" + +int qat_alg_send_message(struct qat_alg_req *req); +void qat_alg_send_backlog(struct qat_instance_backlog *backlog); + +#endif diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index b0b78445418b..16d97db9ea15 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -12,6 +12,7 @@ #include <crypto/scatterwalk.h> #include "icp_qat_fw_pke.h" #include "adf_accel_devices.h" +#include "qat_algs_send.h" #include "adf_transport.h" #include "adf_common_drv.h" #include "qat_crypto.h" @@ -135,8 +136,23 @@ struct qat_asym_request { } areq; int err; void (*cb)(struct icp_qat_fw_pke_resp *resp); + struct qat_alg_req alg_req; } __aligned(64); +static int qat_alg_send_asym_message(struct qat_asym_request *qat_req, + struct qat_crypto_instance *inst, + struct crypto_async_request *base) +{ + struct qat_alg_req *alg_req = &qat_req->alg_req; + + alg_req->fw_req = (u32 *)&qat_req->req; + alg_req->tx_ring = inst->pke_tx; + alg_req->base = base; + alg_req->backlog = &inst->backlog; + + return qat_alg_send_message(alg_req); +} + static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) { struct qat_asym_request *req = (void *)(__force long)resp->opaque; @@ -148,26 +164,21 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; if (areq->src) { - if (req->src_align) - dma_free_coherent(dev, req->ctx.dh->p_size, - req->src_align, req->in.dh.in.b); - else - dma_unmap_single(dev, req->in.dh.in.b, - req->ctx.dh->p_size, DMA_TO_DEVICE); + dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size, + DMA_TO_DEVICE); + kfree_sensitive(req->src_align); } areq->dst_len = req->ctx.dh->p_size; if (req->dst_align) { scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, areq->dst_len, 1); - - dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align, - req->out.dh.r); - } else { - dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, - DMA_FROM_DEVICE); + kfree_sensitive(req->dst_align); } + dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, + DMA_FROM_DEVICE); + dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params), DMA_TO_DEVICE); dma_unmap_single(dev, req->phy_out, @@ -213,8 +224,10 @@ static int qat_dh_compute_value(struct kpp_request *req) struct qat_asym_request *qat_req = PTR_ALIGN(kpp_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; - int ret, ctr = 0; + gfp_t flags = qat_algs_alloc_flags(&req->base); int n_input_params = 0; + u8 *vaddr; + int ret; if (unlikely(!ctx->xa)) return -EINVAL; @@ -223,6 +236,10 @@ static int qat_dh_compute_value(struct kpp_request *req) req->dst_len = ctx->p_size; return -EOVERFLOW; } + + if (req->src_len > ctx->p_size) + return -EINVAL; + memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); @@ -271,27 +288,24 @@ static int qat_dh_compute_value(struct kpp_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->p_size) { qat_req->src_align = NULL; - qat_req->in.dh.in.b = dma_map_single(dev, - sg_virt(req->src), - req->src_len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, - qat_req->in.dh.in.b))) - return ret; - + vaddr = sg_virt(req->src); } else { int shift = ctx->p_size - req->src_len; - qat_req->src_align = dma_alloc_coherent(dev, - ctx->p_size, - &qat_req->in.dh.in.b, - GFP_KERNEL); + qat_req->src_align = kzalloc(ctx->p_size, flags); if (unlikely(!qat_req->src_align)) return ret; scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, 0, req->src_len, 0); + + vaddr = qat_req->src_align; } + + qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b))) + goto unmap_src; } /* * dst can be of any size in valid range, but HW expects it to be the @@ -302,20 +316,18 @@ static int qat_dh_compute_value(struct kpp_request *req) */ if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) { qat_req->dst_align = NULL; - qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst), - req->dst_len, - DMA_FROM_DEVICE); - - if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) - goto unmap_src; - + vaddr = sg_virt(req->dst); } else { - qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, - &qat_req->out.dh.r, - GFP_KERNEL); + qat_req->dst_align = kzalloc(ctx->p_size, flags); if (unlikely(!qat_req->dst_align)) goto unmap_src; + + vaddr = qat_req->dst_align; } + qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) + goto unmap_dst; qat_req->in.dh.in_tab[n_input_params] = 0; qat_req->out.dh.out_tab[1] = 0; @@ -338,13 +350,13 @@ static int qat_dh_compute_value(struct kpp_request *req) msg->input_param_count = n_input_params; msg->output_param_count = 1; - do { - ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); - } while (ret == -EBUSY && ctr++ < 100); + ret = qat_alg_send_asym_message(qat_req, inst, &req->base); + if (ret == -ENOSPC) + goto unmap_all; - if (!ret) - return -EINPROGRESS; + return ret; +unmap_all: if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_dh_output_params), @@ -355,23 +367,17 @@ unmap_in_params: sizeof(struct qat_dh_input_params), DMA_TO_DEVICE); unmap_dst: - if (qat_req->dst_align) - dma_free_coherent(dev, ctx->p_size, qat_req->dst_align, - qat_req->out.dh.r); - else - if (!dma_mapping_error(dev, qat_req->out.dh.r)) - dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, - DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.dh.r)) + dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, + DMA_FROM_DEVICE); + kfree_sensitive(qat_req->dst_align); unmap_src: if (req->src) { - if (qat_req->src_align) - dma_free_coherent(dev, ctx->p_size, qat_req->src_align, - qat_req->in.dh.in.b); - else - if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) - dma_unmap_single(dev, qat_req->in.dh.in.b, - ctx->p_size, - DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) + dma_unmap_single(dev, qat_req->in.dh.in.b, + ctx->p_size, + DMA_TO_DEVICE); + kfree_sensitive(qat_req->src_align); } return ret; } @@ -420,14 +426,17 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx) { if (ctx->g) { + memset(ctx->g, 0, ctx->p_size); dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g); ctx->g = NULL; } if (ctx->xa) { + memset(ctx->xa, 0, ctx->p_size); dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa); ctx->xa = NULL; } if (ctx->p) { + memset(ctx->p, 0, ctx->p_size); dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); ctx->p = NULL; } @@ -510,25 +519,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; - if (req->src_align) - dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align, - req->in.rsa.enc.m); - else - dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, - DMA_TO_DEVICE); + kfree_sensitive(req->src_align); + + dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, + DMA_TO_DEVICE); areq->dst_len = req->ctx.rsa->key_sz; if (req->dst_align) { scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, areq->dst_len, 1); - dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align, - req->out.rsa.enc.c); - } else { - dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, - DMA_FROM_DEVICE); + kfree_sensitive(req->dst_align); } + dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, + DMA_FROM_DEVICE); + dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); dma_unmap_single(dev, req->phy_out, @@ -542,8 +548,11 @@ void qat_alg_asym_callback(void *_resp) { struct icp_qat_fw_pke_resp *resp = _resp; struct qat_asym_request *areq = (void *)(__force long)resp->opaque; + struct qat_instance_backlog *backlog = areq->alg_req.backlog; areq->cb(resp); + + qat_alg_send_backlog(backlog); } #define PKE_RSA_EP_512 0x1c161b21 @@ -642,7 +651,9 @@ static int qat_rsa_enc(struct akcipher_request *req) struct qat_asym_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; - int ret, ctr = 0; + gfp_t flags = qat_algs_alloc_flags(&req->base); + u8 *vaddr; + int ret; if (unlikely(!ctx->n || !ctx->e)) return -EINVAL; @@ -651,6 +662,10 @@ static int qat_rsa_enc(struct akcipher_request *req) req->dst_len = ctx->key_sz; return -EOVERFLOW; } + + if (req->src_len > ctx->key_sz) + return -EINVAL; + memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); @@ -679,40 +694,39 @@ static int qat_rsa_enc(struct akcipher_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { qat_req->src_align = NULL; - qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src), - req->src_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) - return ret; - + vaddr = sg_virt(req->src); } else { int shift = ctx->key_sz - req->src_len; - qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.enc.m, - GFP_KERNEL); + qat_req->src_align = kzalloc(ctx->key_sz, flags); if (unlikely(!qat_req->src_align)) return ret; scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, 0, req->src_len, 0); + vaddr = qat_req->src_align; } - if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { - qat_req->dst_align = NULL; - qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst), - req->dst_len, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) - goto unmap_src; + qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) + goto unmap_src; + if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { + qat_req->dst_align = NULL; + vaddr = sg_virt(req->dst); } else { - qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.enc.c, - GFP_KERNEL); + qat_req->dst_align = kzalloc(ctx->key_sz, flags); if (unlikely(!qat_req->dst_align)) goto unmap_src; - + vaddr = qat_req->dst_align; } + + qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) + goto unmap_dst; + qat_req->in.rsa.in_tab[3] = 0; qat_req->out.rsa.out_tab[1] = 0; qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, @@ -732,13 +746,14 @@ static int qat_rsa_enc(struct akcipher_request *req) msg->pke_mid.opaque = (u64)(__force long)qat_req; msg->input_param_count = 3; msg->output_param_count = 1; - do { - ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); - } while (ret == -EBUSY && ctr++ < 100); - if (!ret) - return -EINPROGRESS; + ret = qat_alg_send_asym_message(qat_req, inst, &req->base); + if (ret == -ENOSPC) + goto unmap_all; + return ret; + +unmap_all: if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_rsa_output_params), @@ -749,21 +764,15 @@ unmap_in_params: sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); unmap_dst: - if (qat_req->dst_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, - qat_req->out.rsa.enc.c); - else - if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) - dma_unmap_single(dev, qat_req->out.rsa.enc.c, - ctx->key_sz, DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) + dma_unmap_single(dev, qat_req->out.rsa.enc.c, + ctx->key_sz, DMA_FROM_DEVICE); + kfree_sensitive(qat_req->dst_align); unmap_src: - if (qat_req->src_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.rsa.enc.m); - else - if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) - dma_unmap_single(dev, qat_req->in.rsa.enc.m, - ctx->key_sz, DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) + dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz, + DMA_TO_DEVICE); + kfree_sensitive(qat_req->src_align); return ret; } @@ -776,7 +785,9 @@ static int qat_rsa_dec(struct akcipher_request *req) struct qat_asym_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; - int ret, ctr = 0; + gfp_t flags = qat_algs_alloc_flags(&req->base); + u8 *vaddr; + int ret; if (unlikely(!ctx->n || !ctx->d)) return -EINVAL; @@ -785,6 +796,10 @@ static int qat_rsa_dec(struct akcipher_request *req) req->dst_len = ctx->key_sz; return -EOVERFLOW; } + + if (req->src_len > ctx->key_sz) + return -EINVAL; + memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); @@ -823,40 +838,37 @@ static int qat_rsa_dec(struct akcipher_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { qat_req->src_align = NULL; - qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src), - req->dst_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) - return ret; - + vaddr = sg_virt(req->src); } else { int shift = ctx->key_sz - req->src_len; - qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.dec.c, - GFP_KERNEL); + qat_req->src_align = kzalloc(ctx->key_sz, flags); if (unlikely(!qat_req->src_align)) return ret; scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, 0, req->src_len, 0); + vaddr = qat_req->src_align; } - if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { - qat_req->dst_align = NULL; - qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst), - req->dst_len, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) - goto unmap_src; + qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) + goto unmap_src; + if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { + qat_req->dst_align = NULL; + vaddr = sg_virt(req->dst); } else { - qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.dec.m, - GFP_KERNEL); + qat_req->dst_align = kzalloc(ctx->key_sz, flags); if (unlikely(!qat_req->dst_align)) goto unmap_src; - + vaddr = qat_req->dst_align; } + qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) + goto unmap_dst; if (ctx->crt_mode) qat_req->in.rsa.in_tab[6] = 0; @@ -884,13 +896,14 @@ static int qat_rsa_dec(struct akcipher_request *req) msg->input_param_count = 3; msg->output_param_count = 1; - do { - ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); - } while (ret == -EBUSY && ctr++ < 100); - if (!ret) - return -EINPROGRESS; + ret = qat_alg_send_asym_message(qat_req, inst, &req->base); + if (ret == -ENOSPC) + goto unmap_all; + + return ret; +unmap_all: if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_rsa_output_params), @@ -901,21 +914,15 @@ unmap_in_params: sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); unmap_dst: - if (qat_req->dst_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, - qat_req->out.rsa.dec.m); - else - if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) - dma_unmap_single(dev, qat_req->out.rsa.dec.m, - ctx->key_sz, DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) + dma_unmap_single(dev, qat_req->out.rsa.dec.m, + ctx->key_sz, DMA_FROM_DEVICE); + kfree_sensitive(qat_req->dst_align); unmap_src: - if (qat_req->src_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.rsa.dec.c); - else - if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) - dma_unmap_single(dev, qat_req->in.rsa.dec.c, - ctx->key_sz, DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) + dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz, + DMA_TO_DEVICE); + kfree_sensitive(qat_req->src_align); return ret; } @@ -1233,18 +1240,8 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev); - if (ctx->n) - dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); - if (ctx->e) - dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); - if (ctx->d) { - memset(ctx->d, '\0', ctx->key_sz); - dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); - } + qat_rsa_clear_ctx(dev, ctx); qat_crypto_put_instance(ctx->inst); - ctx->n = NULL; - ctx->e = NULL; - ctx->d = NULL; } static struct akcipher_alg rsa = { diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 67c9588e89df..9341d892533a 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -161,13 +161,6 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; - /* Temporarily set the number of crypto instances to zero to avoid - * registering the crypto algorithms. - * This will be removed when the algorithms will support the - * CRYPTO_TFM_REQ_MAY_BACKLOG flag - */ - instances = 0; - for (i = 0; i < instances; i++) { val = i; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); @@ -353,6 +346,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) &inst->pke_rx); if (ret) goto err; + + INIT_LIST_HEAD(&inst->backlog.list); + spin_lock_init(&inst->backlog.lock); } return 0; err: diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index b6a4c95ae003..df3c738ce323 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -9,6 +9,19 @@ #include "adf_accel_devices.h" #include "icp_qat_fw_la.h" +struct qat_instance_backlog { + struct list_head list; + spinlock_t lock; /* protects backlog list */ +}; + +struct qat_alg_req { + u32 *fw_req; + struct adf_etr_ring_data *tx_ring; + struct crypto_async_request *base; + struct list_head list; + struct qat_instance_backlog *backlog; +}; + struct qat_crypto_instance { struct adf_etr_ring_data *sym_tx; struct adf_etr_ring_data *sym_rx; @@ -19,8 +32,29 @@ struct qat_crypto_instance { unsigned long state; int id; atomic_t refctr; + struct qat_instance_backlog backlog; }; +#define QAT_MAX_BUFF_DESC 4 + +struct qat_alg_buf { + u32 len; + u32 resrvd; + u64 addr; +} __packed; + +struct qat_alg_buf_list { + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; + struct qat_alg_buf bufers[]; +} __packed; + +struct qat_alg_fixed_buf_list { + struct qat_alg_buf_list sgl_hdr; + struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; +} __packed __aligned(64); + struct qat_crypto_request_buffs { struct qat_alg_buf_list *bl; dma_addr_t blp; @@ -28,6 +62,10 @@ struct qat_crypto_request_buffs { dma_addr_t bloutp; size_t sz; size_t sz_out; + bool sgl_src_valid; + bool sgl_dst_valid; + struct qat_alg_fixed_buf_list sgl_src; + struct qat_alg_fixed_buf_list sgl_dst; }; struct qat_crypto_request; @@ -53,6 +91,7 @@ struct qat_crypto_request { u8 iv[AES_BLOCK_SIZE]; }; bool encryption; + struct qat_alg_req alg_req; }; static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev) @@ -70,4 +109,9 @@ static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev) return true; } +static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req) +{ + return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; +} + #endif diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c index 4bfd8f3566f7..7bba35280dac 100644 --- a/drivers/crypto/qat/qat_common/qat_hal.c +++ b/drivers/crypto/qat/qat_common/qat_hal.c @@ -695,6 +695,7 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, handle->pci_dev = pci_info->pci_dev; switch (handle->pci_dev->device) { case ADF_4XXX_PCI_DEVICE_ID: + case ADF_401XX_PCI_DEVICE_ID: handle->chip_info->mmp_sram_size = 0; handle->chip_info->nn = false; handle->chip_info->lm2lm3 = true; diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c index 6356402a2c9e..0fe5a474aa45 100644 --- a/drivers/crypto/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/qat/qat_common/qat_uclo.c @@ -519,7 +519,7 @@ qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, return NULL; } -static unsigned int +static int qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, struct icp_qat_uof_image *image) { @@ -731,6 +731,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) case PCI_DEVICE_ID_INTEL_QAT_C3XXX: return ICP_QAT_AC_C3XXX_DEV_TYPE; case ADF_4XXX_PCI_DEVICE_ID: + case ADF_401XX_PCI_DEVICE_ID: return ICP_QAT_AC_4XXX_A_DEV_TYPE; default: pr_err("QAT: unsupported device 0x%x\n", diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 09599fe4d2f3..cb3bdd3618fb 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -7,6 +7,8 @@ #include "adf_dh895xcc_hw_data.h" #include "icp_qat_hw.h" +#define ADF_DH895XCC_VF_MSK 0xFFFFFFFF + /* Worker thread to service arbiter mappings */ static const u32 thrd_to_arb_map[ADF_DH895XCC_MAX_ACCELENGINES] = { 0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666, @@ -58,17 +60,24 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | - ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_COMPRESSION; /* Read accelerator capabilities mask */ pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses); - if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) + /* A set bit in legfuses means the feature is OFF in this SKU */ + if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) { capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } if (legfuses & ICP_ACCEL_MASK_PKE_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; - if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) + if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) { capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; @@ -100,43 +109,6 @@ static const u32 *adf_get_arbiter_mapping(void) return thrd_to_arb_map; } -static void adf_enable_ints(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - - addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr; - - /* Enable bundle and misc interrupts */ - ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET, - accel_dev->pf.vf_info ? 0 : - BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1); - ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET, - ADF_DH895XCC_SMIA1_MASK); -} - -static u32 get_vf2pf_sources(void __iomem *pmisc_bar) -{ - u32 errsou3, errmsk3, errsou5, errmsk5, vf_int_mask; - - /* Get the interrupt sources triggered by VFs */ - errsou3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU3); - vf_int_mask = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3); - - /* To avoid adding duplicate entries to work queue, clear - * vf_int_mask_sets bits that are already masked in ERRMSK register. - */ - errmsk3 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK3); - vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3); - - /* Do the same for ERRSOU5 */ - errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5); - errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5); - vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5); - vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5); - - return vf_int_mask; -} - static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) { /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */ @@ -150,27 +122,71 @@ static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) if (vf_mask >> 16) { u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5) & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask); - ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val); } } -static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask) +static void disable_all_vf2pf_interrupts(void __iomem *pmisc_addr) { + u32 val; + /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */ - if (vf_mask & 0xFFFF) { - u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3) - | ADF_DH895XCC_ERR_MSK_VF2PF_L(vf_mask); - ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val); - } + val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3) + | ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val); /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */ - if (vf_mask >> 16) { - u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5) - | ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask); + val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5) + | ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val); +} - ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val); - } +static u32 disable_pending_vf2pf_interrupts(void __iomem *pmisc_addr) +{ + u32 sources, pending, disabled; + u32 errsou3, errmsk3; + u32 errsou5, errmsk5; + + /* Get the interrupt sources triggered by VFs */ + errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3); + errsou5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU5); + sources = ADF_DH895XCC_ERR_REG_VF2PF_L(errsou3) + | ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5); + + if (!sources) + return 0; + + /* Get the already disabled interrupts */ + errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3); + errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5); + disabled = ADF_DH895XCC_ERR_REG_VF2PF_L(errmsk3) + | ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5); + + pending = sources & ~disabled; + if (!pending) + return 0; + + /* Due to HW limitations, when disabling the interrupts, we can't + * just disable the requested sources, as this would lead to missed + * interrupts if sources changes just before writing to ERRMSK3 and + * ERRMSK5. + * To work around it, disable all and re-enable only the sources that + * are not in vf_mask and were not already disabled. Re-enabling will + * trigger a new interrupt for the sources that have changed in the + * meantime, if any. + */ + errmsk3 |= ADF_DH895XCC_ERR_MSK_VF2PF_L(ADF_DH895XCC_VF_MSK); + errmsk5 |= ADF_DH895XCC_ERR_MSK_VF2PF_U(ADF_DH895XCC_VF_MSK); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); + + errmsk3 &= ADF_DH895XCC_ERR_MSK_VF2PF_L(sources | disabled); + errmsk5 &= ADF_DH895XCC_ERR_MSK_VF2PF_U(sources | disabled); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, errmsk3); + ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, errmsk5); + + /* Return the sources of the (new) interrupt(s) */ + return pending; } static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable) @@ -215,14 +231,14 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data) hw_data->init_arb = adf_init_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; - hw_data->enable_ints = adf_enable_ints; + hw_data->enable_ints = adf_gen2_enable_ints; hw_data->reset_device = adf_reset_sbr; hw_data->disable_iov = adf_disable_sriov; adf_gen2_init_pf_pfvf_ops(&hw_data->pfvf_ops); - hw_data->pfvf_ops.get_vf2pf_sources = get_vf2pf_sources; hw_data->pfvf_ops.enable_vf2pf_interrupts = enable_vf2pf_interrupts; - hw_data->pfvf_ops.disable_vf2pf_interrupts = disable_vf2pf_interrupts; + hw_data->pfvf_ops.disable_all_vf2pf_interrupts = disable_all_vf2pf_interrupts; + hw_data->pfvf_ops.disable_pending_vf2pf_interrupts = disable_pending_vf2pf_interrupts; adf_gen2_init_hw_csr_ops(&hw_data->csr_ops); } diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h index aa17272a1507..7b674bbe4192 100644 --- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h +++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h @@ -19,10 +19,6 @@ #define ADF_DH895XCC_ACCELERATORS_MASK 0x3F #define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF #define ADF_DH895XCC_ETR_MAX_BANKS 32 -#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28) -#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30) -#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF -#define ADF_DH895XCC_SMIA1_MASK 0x1 /* Masks for VF2PF interrupts */ #define ADF_DH895XCC_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9) diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index 51b58e57153f..6957a125b447 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -2379,6 +2379,7 @@ static const struct of_device_id of_match[] = { { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, }, { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, }, { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, }, + { .compatible = "ti,am62-sa3ul", .data = &am64_match_data, }, {}, }; MODULE_DEVICE_TABLE(of, of_match); diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 25c9f825b8b5..c9ad6c213090 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -1709,7 +1709,7 @@ static void common_nonsnoop_hash_unmap(struct device *dev, struct talitos_desc *desc2 = (struct talitos_desc *) (edesc->buf + edesc->dma_len); - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); + unmap_single_talitos_ptr(dev, &desc->ptr[5], DMA_FROM_DEVICE); if (desc->next_desc && desc->ptr[5].ptr != desc2->ptr[5].ptr) unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); @@ -1721,8 +1721,8 @@ static void common_nonsnoop_hash_unmap(struct device *dev, talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); /* When using hashctx-in, must unmap it. */ - if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) - unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], + if (from_talitos_ptr_len(&desc->ptr[1], is_sec1)) + unmap_single_talitos_ptr(dev, &desc->ptr[1], DMA_TO_DEVICE); else if (desc->next_desc) unmap_single_talitos_ptr(dev, &desc2->ptr[1], @@ -1736,8 +1736,8 @@ static void common_nonsnoop_hash_unmap(struct device *dev, dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, DMA_BIDIRECTIONAL); - if (edesc->desc.next_desc) - dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc), + if (desc->next_desc) + dma_unmap_single(dev, be32_to_cpu(desc->next_desc), TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL); } diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 5157c118d642..265ef3e96fdd 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -877,9 +877,7 @@ static int hash_dma_final(struct ahash_request *req) __func__); goto out; } - } - - if (!req_ctx->updated) { + } else { ret = hash_setconfiguration(device_data, &ctx->config); if (ret) { dev_err(device_data->dev, diff --git a/drivers/crypto/vmx/Makefile b/drivers/crypto/vmx/Makefile index 709670d2b553..2560cfea1dec 100644 --- a/drivers/crypto/vmx/Makefile +++ b/drivers/crypto/vmx/Makefile @@ -2,21 +2,10 @@ obj-$(CONFIG_CRYPTO_DEV_VMX_ENCRYPT) += vmx-crypto.o vmx-crypto-objs := vmx.o aesp8-ppc.o ghashp8-ppc.o aes.o aes_cbc.o aes_ctr.o aes_xts.o ghash.o -ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) -override flavour := linux-ppc64le -else -override flavour := linux-ppc64 -endif - -quiet_cmd_perl = PERL $@ - cmd_perl = $(PERL) $(<) $(flavour) > $(@) +quiet_cmd_perl = PERL $@ + cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@ targets += aesp8-ppc.S ghashp8-ppc.S -$(obj)/aesp8-ppc.S: $(src)/aesp8-ppc.pl FORCE - $(call if_changed,perl) - -$(obj)/ghashp8-ppc.S: $(src)/ghashp8-ppc.pl FORCE +$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE $(call if_changed,perl) - -clean-files := aesp8-ppc.S ghashp8-ppc.S diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig index b88ab956bb7c..f64e3984689f 100644 --- a/drivers/cxl/Kconfig +++ b/drivers/cxl/Kconfig @@ -98,4 +98,8 @@ config CXL_PORT default CXL_BUS tristate +config CXL_SUSPEND + def_bool y + depends on SUSPEND && CXL_MEM + endif diff --git a/drivers/cxl/Makefile b/drivers/cxl/Makefile index ce267ef11d93..a78270794150 100644 --- a/drivers/cxl/Makefile +++ b/drivers/cxl/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_CXL_BUS) += core/ +obj-y += core/ obj-$(CONFIG_CXL_PCI) += cxl_pci.o obj-$(CONFIG_CXL_MEM) += cxl_mem.o obj-$(CONFIG_CXL_ACPI) += cxl_acpi.o diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index d15a6aec0331..40286f5df812 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -275,6 +275,13 @@ static int add_root_nvdimm_bridge(struct device *match, void *data) return 1; } +static struct lock_class_key cxl_root_key; + +static void cxl_acpi_lock_reset_class(void *dev) +{ + device_lock_reset_class(dev); +} + static int cxl_acpi_probe(struct platform_device *pdev) { int rc; @@ -283,6 +290,12 @@ static int cxl_acpi_probe(struct platform_device *pdev) struct acpi_device *adev = ACPI_COMPANION(host); struct cxl_cfmws_context ctx; + device_lock_set_class(&pdev->dev, &cxl_root_key); + rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class, + &pdev->dev); + if (rc) + return rc; + root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); if (IS_ERR(root_port)) return PTR_ERR(root_port); diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile index 6d37cd78b151..9d35085d25af 100644 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CXL_BUS) += cxl_core.o +obj-$(CONFIG_CXL_SUSPEND) += suspend.o ccflags-y += -I$(srctree)/drivers/cxl cxl_core-y := port.o diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index be61a0d8016b..54f434733b56 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -35,6 +35,7 @@ static bool cxl_raw_allow_all; .flags = _flags, \ } +#define CXL_VARIABLE_PAYLOAD ~0U /* * This table defines the supported mailbox commands for the driver. This table * is made up of a UAPI structure. Non-negative values as parameters in the @@ -44,26 +45,26 @@ static bool cxl_raw_allow_all; static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE), #ifdef CONFIG_CXL_MEM_RAW_COMMANDS - CXL_CMD(RAW, ~0, ~0, 0), + CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0), #endif - CXL_CMD(GET_SUPPORTED_LOGS, 0, ~0, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), CXL_CMD(GET_FW_INFO, 0, 0x50, 0), CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0), - CXL_CMD(GET_LSA, 0x8, ~0, 0), + CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), - CXL_CMD(GET_LOG, 0x18, ~0, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), - CXL_CMD(SET_LSA, ~0, 0, 0), + CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), - CXL_CMD(GET_POISON, 0x10, ~0, 0), + CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(INJECT_POISON, 0x8, 0, 0), CXL_CMD(CLEAR_POISON, 0x48, 0, 0), CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), - CXL_CMD(GET_SCAN_MEDIA, 0, ~0, 0), + CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), }; /* @@ -127,6 +128,17 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) return NULL; } +static const char *cxl_mem_opcode_to_name(u16 opcode) +{ + struct cxl_mem_command *c; + + c = cxl_mem_find_command(opcode); + if (!c) + return NULL; + + return cxl_command_names[c->info.id].name; +} + /** * cxl_mbox_send_cmd() - Send a mailbox command to a device. * @cxlds: The device data for the operation @@ -136,7 +148,7 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) * @out: Caller allocated buffer for the output. * @out_size: Expected size of output. * - * Context: Any context. Will acquire and release mbox_mutex. + * Context: Any context. * Return: * * %>=0 - Number of bytes returned in @out. * * %-E2BIG - Payload is too large for hardware. @@ -169,17 +181,17 @@ int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, if (rc) return rc; - /* TODO: Map return code to proper kernel style errno */ - if (mbox_cmd.return_code != CXL_MBOX_SUCCESS) - return -ENXIO; + if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS) + return cxl_mbox_cmd_rc2errno(&mbox_cmd); /* * Variable sized commands can't be validated and so it's up to the * caller to do that if they wish. */ - if (cmd->info.size_out >= 0 && mbox_cmd.size_out != out_size) - return -EIO; - + if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) { + if (mbox_cmd.size_out != out_size) + return -EIO; + } return 0; } EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL); @@ -208,75 +220,122 @@ static bool cxl_mem_raw_command_allowed(u16 opcode) } /** - * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. - * @cxlds: The device data for the operation - * @send_cmd: &struct cxl_send_command copied in from userspace. - * @out_cmd: Sanitized and populated &struct cxl_mem_command. + * cxl_payload_from_user_allowed() - Check contents of in_payload. + * @opcode: The mailbox command opcode. + * @payload_in: Pointer to the input payload passed in from user space. * * Return: - * * %0 - @out_cmd is ready to send. - * * %-ENOTTY - Invalid command specified. - * * %-EINVAL - Reserved fields or invalid values were used. - * * %-ENOMEM - Input or output buffer wasn't sized properly. - * * %-EPERM - Attempted to use a protected command. - * * %-EBUSY - Kernel has claimed exclusive access to this opcode + * * true - payload_in passes check for @opcode. + * * false - payload_in contains invalid or unsupported values. * - * The result of this command is a fully validated command in @out_cmd that is - * safe to send to the hardware. + * The driver may inspect payload contents before sending a mailbox + * command from user space to the device. The intent is to reject + * commands with input payloads that are known to be unsafe. This + * check is not intended to replace the users careful selection of + * mailbox command parameters and makes no guarantee that the user + * command will succeed, nor that it is appropriate. * - * See handle_mailbox_cmd_from_user() + * The specific checks are determined by the opcode. */ -static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds, - const struct cxl_send_command *send_cmd, - struct cxl_mem_command *out_cmd) +static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) { - const struct cxl_command_info *info; - struct cxl_mem_command *c; + switch (opcode) { + case CXL_MBOX_OP_SET_PARTITION_INFO: { + struct cxl_mbox_set_partition_info *pi = payload_in; - if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) - return -ENOTTY; + if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG) + return false; + break; + } + default: + break; + } + return true; +} - /* - * The user can never specify an input payload larger than what hardware - * supports, but output can be arbitrarily large (simply write out as - * much data as the hardware provides). - */ - if (send_cmd->in.size > cxlds->payload_size) +static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox, + struct cxl_dev_state *cxlds, u16 opcode, + size_t in_size, size_t out_size, u64 in_payload) +{ + *mbox = (struct cxl_mbox_cmd) { + .opcode = opcode, + .size_in = in_size, + }; + + if (in_size) { + mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload), + in_size); + if (IS_ERR(mbox->payload_in)) + return PTR_ERR(mbox->payload_in); + + if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) { + dev_dbg(cxlds->dev, "%s: input payload not allowed\n", + cxl_mem_opcode_to_name(opcode)); + kvfree(mbox->payload_in); + return -EBUSY; + } + } + + /* Prepare to handle a full payload for variable sized output */ + if (out_size == CXL_VARIABLE_PAYLOAD) + mbox->size_out = cxlds->payload_size; + else + mbox->size_out = out_size; + + if (mbox->size_out) { + mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL); + if (!mbox->payload_out) { + kvfree(mbox->payload_in); + return -ENOMEM; + } + } + return 0; +} + +static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox) +{ + kvfree(mbox->payload_in); + kvfree(mbox->payload_out); +} + +static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd, + const struct cxl_send_command *send_cmd, + struct cxl_dev_state *cxlds) +{ + if (send_cmd->raw.rsvd) return -EINVAL; /* - * Checks are bypassed for raw commands but a WARN/taint will occur - * later in the callchain + * Unlike supported commands, the output size of RAW commands + * gets passed along without further checking, so it must be + * validated here. */ - if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) { - const struct cxl_mem_command temp = { - .info = { - .id = CXL_MEM_COMMAND_ID_RAW, - .flags = 0, - .size_in = send_cmd->in.size, - .size_out = send_cmd->out.size, - }, - .opcode = send_cmd->raw.opcode - }; + if (send_cmd->out.size > cxlds->payload_size) + return -EINVAL; - if (send_cmd->raw.rsvd) - return -EINVAL; + if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) + return -EPERM; - /* - * Unlike supported commands, the output size of RAW commands - * gets passed along without further checking, so it must be - * validated here. - */ - if (send_cmd->out.size > cxlds->payload_size) - return -EINVAL; + dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n"); - if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode)) - return -EPERM; + *mem_cmd = (struct cxl_mem_command) { + .info = { + .id = CXL_MEM_COMMAND_ID_RAW, + .size_in = send_cmd->in.size, + .size_out = send_cmd->out.size, + }, + .opcode = send_cmd->raw.opcode + }; - memcpy(out_cmd, &temp, sizeof(temp)); + return 0; +} - return 0; - } +static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd, + const struct cxl_send_command *send_cmd, + struct cxl_dev_state *cxlds) +{ + struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id]; + const struct cxl_command_info *info = &c->info; if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK) return -EINVAL; @@ -287,10 +346,6 @@ static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds, if (send_cmd->in.rsvd || send_cmd->out.rsvd) return -EINVAL; - /* Convert user's command into the internal representation */ - c = &cxl_mem_commands[send_cmd->id]; - info = &c->info; - /* Check that the command is enabled for hardware */ if (!test_bit(info->id, cxlds->enabled_cmds)) return -ENOTTY; @@ -300,22 +355,74 @@ static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds, return -EBUSY; /* Check the input buffer is the expected size */ - if (info->size_in >= 0 && info->size_in != send_cmd->in.size) + if (info->size_in != send_cmd->in.size) return -ENOMEM; /* Check the output buffer is at least large enough */ - if (info->size_out >= 0 && send_cmd->out.size < info->size_out) + if (send_cmd->out.size < info->size_out) return -ENOMEM; - memcpy(out_cmd, c, sizeof(*c)); - out_cmd->info.size_in = send_cmd->in.size; + *mem_cmd = (struct cxl_mem_command) { + .info = { + .id = info->id, + .flags = info->flags, + .size_in = send_cmd->in.size, + .size_out = send_cmd->out.size, + }, + .opcode = c->opcode + }; + + return 0; +} + +/** + * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND. + * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd. + * @cxlds: The device data for the operation + * @send_cmd: &struct cxl_send_command copied in from userspace. + * + * Return: + * * %0 - @out_cmd is ready to send. + * * %-ENOTTY - Invalid command specified. + * * %-EINVAL - Reserved fields or invalid values were used. + * * %-ENOMEM - Input or output buffer wasn't sized properly. + * * %-EPERM - Attempted to use a protected command. + * * %-EBUSY - Kernel has claimed exclusive access to this opcode + * + * The result of this command is a fully validated command in @mbox_cmd that is + * safe to send to the hardware. + */ +static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd, + struct cxl_dev_state *cxlds, + const struct cxl_send_command *send_cmd) +{ + struct cxl_mem_command mem_cmd; + int rc; + + if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX) + return -ENOTTY; + /* - * XXX: out_cmd->info.size_out will be controlled by the driver, and the - * specified number of bytes @send_cmd->out.size will be copied back out - * to userspace. + * The user can never specify an input payload larger than what hardware + * supports, but output can be arbitrarily large (simply write out as + * much data as the hardware provides). */ + if (send_cmd->in.size > cxlds->payload_size) + return -EINVAL; - return 0; + /* Sanitize and construct a cxl_mem_command */ + if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW) + rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds); + else + rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds); + + if (rc) + return rc; + + /* Sanitize and construct a cxl_mbox_cmd */ + return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode, + mem_cmd.info.size_in, mem_cmd.info.size_out, + send_cmd->in.payload); } int cxl_query_cmd(struct cxl_memdev *cxlmd, @@ -355,8 +462,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, /** * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace. * @cxlds: The device data for the operation - * @cmd: The validated command. - * @in_payload: Pointer to userspace's input payload. + * @mbox_cmd: The validated mailbox command. * @out_payload: Pointer to userspace's output payload. * @size_out: (Input) Max payload size to copy out. * (Output) Payload size hardware generated. @@ -371,51 +477,27 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd, * * %-EINTR - Mailbox acquisition interrupted. * * %-EXXX - Transaction level failures. * - * Creates the appropriate mailbox command and dispatches it on behalf of a - * userspace request. The input and output payloads are copied between - * userspace. + * Dispatches a mailbox command on behalf of a userspace request. + * The output payload is copied to userspace. * * See cxl_send_cmd(). */ static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds, - const struct cxl_mem_command *cmd, - u64 in_payload, u64 out_payload, - s32 *size_out, u32 *retval) + struct cxl_mbox_cmd *mbox_cmd, + u64 out_payload, s32 *size_out, + u32 *retval) { struct device *dev = cxlds->dev; - struct cxl_mbox_cmd mbox_cmd = { - .opcode = cmd->opcode, - .size_in = cmd->info.size_in, - .size_out = cmd->info.size_out, - }; int rc; - if (cmd->info.size_out) { - mbox_cmd.payload_out = kvzalloc(cmd->info.size_out, GFP_KERNEL); - if (!mbox_cmd.payload_out) - return -ENOMEM; - } - - if (cmd->info.size_in) { - mbox_cmd.payload_in = vmemdup_user(u64_to_user_ptr(in_payload), - cmd->info.size_in); - if (IS_ERR(mbox_cmd.payload_in)) { - kvfree(mbox_cmd.payload_out); - return PTR_ERR(mbox_cmd.payload_in); - } - } - dev_dbg(dev, "Submitting %s command for user\n" "\topcode: %x\n" - "\tsize: %ub\n", - cxl_command_names[cmd->info.id].name, mbox_cmd.opcode, - cmd->info.size_in); - - dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW, - "raw command path used\n"); + "\tsize: %zx\n", + cxl_mem_opcode_to_name(mbox_cmd->opcode), + mbox_cmd->opcode, mbox_cmd->size_in); - rc = cxlds->mbox_send(cxlds, &mbox_cmd); + rc = cxlds->mbox_send(cxlds, mbox_cmd); if (rc) goto out; @@ -424,22 +506,21 @@ static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds, * to userspace. While the payload may have written more output than * this it will have to be ignored. */ - if (mbox_cmd.size_out) { - dev_WARN_ONCE(dev, mbox_cmd.size_out > *size_out, + if (mbox_cmd->size_out) { + dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out, "Invalid return size\n"); if (copy_to_user(u64_to_user_ptr(out_payload), - mbox_cmd.payload_out, mbox_cmd.size_out)) { + mbox_cmd->payload_out, mbox_cmd->size_out)) { rc = -EFAULT; goto out; } } - *size_out = mbox_cmd.size_out; - *retval = mbox_cmd.return_code; + *size_out = mbox_cmd->size_out; + *retval = mbox_cmd->return_code; out: - kvfree(mbox_cmd.payload_in); - kvfree(mbox_cmd.payload_out); + cxl_mbox_cmd_dtor(mbox_cmd); return rc; } @@ -448,7 +529,7 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) struct cxl_dev_state *cxlds = cxlmd->cxlds; struct device *dev = &cxlmd->dev; struct cxl_send_command send; - struct cxl_mem_command c; + struct cxl_mbox_cmd mbox_cmd; int rc; dev_dbg(dev, "Send IOCTL\n"); @@ -456,17 +537,12 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s) if (copy_from_user(&send, s, sizeof(send))) return -EFAULT; - rc = cxl_validate_cmd_from_user(cxlmd->cxlds, &send, &c); + rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send); if (rc) return rc; - /* Prepare to handle a full payload for variable sized output */ - if (c.info.size_out < 0) - c.info.size_out = cxlds->payload_size; - - rc = handle_mailbox_cmd_from_user(cxlds, &c, send.in.payload, - send.out.payload, &send.out.size, - &send.retval); + rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload, + &send.out.size, &send.retval); if (rc) return rc; diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 1f76b28f9826..f7cdcd33504a 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -228,6 +228,8 @@ static void detach_memdev(struct work_struct *work) put_device(&cxlmd->dev); } +static struct lock_class_key cxl_memdev_key; + static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds, const struct file_operations *fops) { @@ -247,6 +249,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds, dev = &cxlmd->dev; device_initialize(dev); + lockdep_set_class(&dev->mutex, &cxl_memdev_key); dev->parent = cxlds->dev; dev->bus = &cxl_bus_type; dev->devt = MKDEV(cxl_mem_major, cxlmd->id); diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index c9a494d6976a..c4c99ff7b55e 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -1,8 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2021 Intel Corporation. All rights reserved. */ +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/device.h> +#include <linux/delay.h> #include <linux/pci.h> #include <cxlpci.h> +#include <cxlmem.h> #include <cxl.h> #include "core.h" @@ -13,6 +16,10 @@ * a set of helpers for CXL interactions which occur via PCIe. */ +static unsigned short media_ready_timeout = 60; +module_param(media_ready_timeout, ushort, 0644); +MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready"); + struct cxl_walk_context { struct pci_bus *bus; struct cxl_port *port; @@ -94,3 +101,360 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port) return ctx.count; } EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL); + +/* + * Wait up to @media_ready_timeout for the device to report memory + * active. + */ +int cxl_await_media_ready(struct cxl_dev_state *cxlds) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + int d = cxlds->cxl_dvsec; + bool active = false; + u64 md_status; + int rc, i; + + for (i = media_ready_timeout; i; i--) { + u32 temp; + + rc = pci_read_config_dword( + pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp); + if (rc) + return rc; + + active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp); + if (active) + break; + msleep(1000); + } + + if (!active) { + dev_err(&pdev->dev, + "timeout awaiting memory active after %d seconds\n", + media_ready_timeout); + return -ETIMEDOUT; + } + + md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); + if (!CXLMDEV_READY(md_status)) + return -EIO; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_await_media_ready, CXL); + +static int wait_for_valid(struct cxl_dev_state *cxlds) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + int d = cxlds->cxl_dvsec, rc; + u32 val; + + /* + * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high + * and Size Low registers are valid. Must be set within 1 second of + * deassertion of reset to CXL device. Likely it is already set by the + * time this runs, but otherwise give a 1.5 second timeout in case of + * clock skew. + */ + rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); + if (rc) + return rc; + + if (val & CXL_DVSEC_MEM_INFO_VALID) + return 0; + + msleep(1500); + + rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); + if (rc) + return rc; + + if (val & CXL_DVSEC_MEM_INFO_VALID) + return 0; + + return -ETIMEDOUT; +} + +static int cxl_set_mem_enable(struct cxl_dev_state *cxlds, u16 val) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + int d = cxlds->cxl_dvsec; + u16 ctrl; + int rc; + + rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); + if (rc < 0) + return rc; + + if ((ctrl & CXL_DVSEC_MEM_ENABLE) == val) + return 1; + ctrl &= ~CXL_DVSEC_MEM_ENABLE; + ctrl |= val; + + rc = pci_write_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, ctrl); + if (rc < 0) + return rc; + + return 0; +} + +static void clear_mem_enable(void *cxlds) +{ + cxl_set_mem_enable(cxlds, 0); +} + +static int devm_cxl_enable_mem(struct device *host, struct cxl_dev_state *cxlds) +{ + int rc; + + rc = cxl_set_mem_enable(cxlds, CXL_DVSEC_MEM_ENABLE); + if (rc < 0) + return rc; + if (rc > 0) + return 0; + return devm_add_action_or_reset(host, clear_mem_enable, cxlds); +} + +static bool range_contains(struct range *r1, struct range *r2) +{ + return r1->start <= r2->start && r1->end >= r2->end; +} + +/* require dvsec ranges to be covered by a locked platform window */ +static int dvsec_range_allowed(struct device *dev, void *arg) +{ + struct range *dev_range = arg; + struct cxl_decoder *cxld; + struct range root_range; + + if (!is_root_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + + if (!(cxld->flags & CXL_DECODER_F_LOCK)) + return 0; + if (!(cxld->flags & CXL_DECODER_F_RAM)) + return 0; + + root_range = (struct range) { + .start = cxld->platform_res.start, + .end = cxld->platform_res.end, + }; + + return range_contains(&root_range, dev_range); +} + +static void disable_hdm(void *_cxlhdm) +{ + u32 global_ctrl; + struct cxl_hdm *cxlhdm = _cxlhdm; + void __iomem *hdm = cxlhdm->regs.hdm_decoder; + + global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); + writel(global_ctrl & ~CXL_HDM_DECODER_ENABLE, + hdm + CXL_HDM_DECODER_CTRL_OFFSET); +} + +static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm) +{ + void __iomem *hdm = cxlhdm->regs.hdm_decoder; + u32 global_ctrl; + + global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); + writel(global_ctrl | CXL_HDM_DECODER_ENABLE, + hdm + CXL_HDM_DECODER_CTRL_OFFSET); + + return devm_add_action_or_reset(host, disable_hdm, cxlhdm); +} + +static bool __cxl_hdm_decode_init(struct cxl_dev_state *cxlds, + struct cxl_hdm *cxlhdm, + struct cxl_endpoint_dvsec_info *info) +{ + void __iomem *hdm = cxlhdm->regs.hdm_decoder; + struct cxl_port *port = cxlhdm->port; + struct device *dev = cxlds->dev; + struct cxl_port *root; + int i, rc, allowed; + u32 global_ctrl; + + global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET); + + /* + * If the HDM Decoder Capability is already enabled then assume + * that some other agent like platform firmware set it up. + */ + if (global_ctrl & CXL_HDM_DECODER_ENABLE) { + rc = devm_cxl_enable_mem(&port->dev, cxlds); + if (rc) + return false; + return true; + } + + root = to_cxl_port(port->dev.parent); + while (!is_cxl_root(root) && is_cxl_port(root->dev.parent)) + root = to_cxl_port(root->dev.parent); + if (!is_cxl_root(root)) { + dev_err(dev, "Failed to acquire root port for HDM enable\n"); + return false; + } + + for (i = 0, allowed = 0; info->mem_enabled && i < info->ranges; i++) { + struct device *cxld_dev; + + cxld_dev = device_find_child(&root->dev, &info->dvsec_range[i], + dvsec_range_allowed); + if (!cxld_dev) { + dev_dbg(dev, "DVSEC Range%d denied by platform\n", i); + continue; + } + dev_dbg(dev, "DVSEC Range%d allowed by platform\n", i); + put_device(cxld_dev); + allowed++; + } + + if (!allowed) { + cxl_set_mem_enable(cxlds, 0); + info->mem_enabled = 0; + } + + /* + * Per CXL 2.0 Section 8.1.3.8.3 and 8.1.3.8.4 DVSEC CXL Range 1 Base + * [High,Low] when HDM operation is enabled the range register values + * are ignored by the device, but the spec also recommends matching the + * DVSEC Range 1,2 to HDM Decoder Range 0,1. So, non-zero info->ranges + * are expected even though Linux does not require or maintain that + * match. If at least one DVSEC range is enabled and allowed, skip HDM + * Decoder Capability Enable. + */ + if (info->mem_enabled) + return false; + + rc = devm_cxl_enable_hdm(&port->dev, cxlhdm); + if (rc) + return false; + + rc = devm_cxl_enable_mem(&port->dev, cxlds); + if (rc) + return false; + + return true; +} + +/** + * cxl_hdm_decode_init() - Setup HDM decoding for the endpoint + * @cxlds: Device state + * @cxlhdm: Mapped HDM decoder Capability + * + * Try to enable the endpoint's HDM Decoder Capability + */ +int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct cxl_endpoint_dvsec_info info = { 0 }; + int hdm_count, rc, i, ranges = 0; + struct device *dev = &pdev->dev; + int d = cxlds->cxl_dvsec; + u16 cap, ctrl; + + if (!d) { + dev_dbg(dev, "No DVSEC Capability\n"); + return -ENXIO; + } + + rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap); + if (rc) + return rc; + + rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); + if (rc) + return rc; + + if (!(cap & CXL_DVSEC_MEM_CAPABLE)) { + dev_dbg(dev, "Not MEM Capable\n"); + return -ENXIO; + } + + /* + * It is not allowed by spec for MEM.capable to be set and have 0 legacy + * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this + * driver is for a spec defined class code which must be CXL.mem + * capable, there is no point in continuing to enable CXL.mem. + */ + hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap); + if (!hdm_count || hdm_count > 2) + return -EINVAL; + + rc = wait_for_valid(cxlds); + if (rc) { + dev_dbg(dev, "Failure awaiting MEM_INFO_VALID (%d)\n", rc); + return rc; + } + + /* + * The current DVSEC values are moot if the memory capability is + * disabled, and they will remain moot after the HDM Decoder + * capability is enabled. + */ + info.mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl); + if (!info.mem_enabled) + goto hdm_init; + + for (i = 0; i < hdm_count; i++) { + u64 base, size; + u32 temp; + + rc = pci_read_config_dword( + pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp); + if (rc) + return rc; + + size = (u64)temp << 32; + + rc = pci_read_config_dword( + pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp); + if (rc) + return rc; + + size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; + + rc = pci_read_config_dword( + pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp); + if (rc) + return rc; + + base = (u64)temp << 32; + + rc = pci_read_config_dword( + pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp); + if (rc) + return rc; + + base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK; + + info.dvsec_range[i] = (struct range) { + .start = base, + .end = base + size - 1 + }; + + if (size) + ranges++; + } + + info.ranges = ranges; + + /* + * If DVSEC ranges are being used instead of HDM decoder registers there + * is no use in trying to manage those. + */ +hdm_init: + if (!__cxl_hdm_decode_init(cxlds, cxlhdm, &info)) { + dev_err(dev, + "Legacy range registers configuration prevents HDM operation.\n"); + return -EBUSY; + } + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index 8de240c4d96b..bec7cfb54ebf 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -80,6 +80,8 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd) } EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL); +static struct lock_class_key cxl_nvdimm_bridge_key; + static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port) { struct cxl_nvdimm_bridge *cxl_nvb; @@ -99,6 +101,7 @@ static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port) cxl_nvb->port = port; cxl_nvb->state = CXL_NVB_NEW; device_initialize(dev); + lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key); device_set_pm_not_required(dev); dev->parent = &port->dev; dev->bus = &cxl_bus_type; @@ -121,10 +124,10 @@ static void unregister_nvb(void *_cxl_nvb) * work to flush. Once the state has been changed to 'dead' then no new * work can be queued by user-triggered bind. */ - cxl_device_lock(&cxl_nvb->dev); + device_lock(&cxl_nvb->dev); flush = cxl_nvb->state != CXL_NVB_NEW; cxl_nvb->state = CXL_NVB_DEAD; - cxl_device_unlock(&cxl_nvb->dev); + device_unlock(&cxl_nvb->dev); /* * Even though the device core will trigger device_release_driver() @@ -214,6 +217,8 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev) } EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL); +static struct lock_class_key cxl_nvdimm_key; + static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) { struct cxl_nvdimm *cxl_nvd; @@ -226,6 +231,7 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) dev = &cxl_nvd->dev; cxl_nvd->cxlmd = cxlmd; device_initialize(dev); + lockdep_set_class(&dev->mutex, &cxl_nvdimm_key); device_set_pm_not_required(dev); dev->parent = &cxlmd->dev; dev->bus = &cxl_bus_type; diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 2ab1ba4499b3..ea60abda6500 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -312,10 +312,10 @@ static void cxl_port_release(struct device *dev) struct cxl_port *port = to_cxl_port(dev); struct cxl_ep *ep, *_e; - cxl_device_lock(dev); + device_lock(dev); list_for_each_entry_safe(ep, _e, &port->endpoints, list) cxl_ep_release(ep); - cxl_device_unlock(dev); + device_unlock(dev); ida_free(&cxl_port_ida, port->id); kfree(port); } @@ -391,6 +391,8 @@ static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) return devm_add_action_or_reset(host, cxl_unlink_uport, port); } +static struct lock_class_key cxl_port_key; + static struct cxl_port *cxl_port_alloc(struct device *uport, resource_size_t component_reg_phys, struct cxl_port *parent_port) @@ -415,9 +417,10 @@ static struct cxl_port *cxl_port_alloc(struct device *uport, * description. */ dev = &port->dev; - if (parent_port) + if (parent_port) { dev->parent = &parent_port->dev; - else + port->depth = parent_port->depth + 1; + } else dev->parent = uport; port->uport = uport; @@ -427,6 +430,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport, INIT_LIST_HEAD(&port->endpoints); device_initialize(dev); + lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); device_set_pm_not_required(dev); dev->bus = &cxl_bus_type; dev->type = &cxl_port_type; @@ -457,8 +461,6 @@ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, if (IS_ERR(port)) return port; - if (parent_port) - port->depth = parent_port->depth + 1; dev = &port->dev; if (is_cxl_memdev(uport)) rc = dev_set_name(dev, "endpoint%d", port->id); @@ -554,7 +556,7 @@ static int match_root_child(struct device *dev, const void *match) return 0; port = to_cxl_port(dev); - cxl_device_lock(dev); + device_lock(dev); list_for_each_entry(dport, &port->dports, list) { iter = match; while (iter) { @@ -564,7 +566,7 @@ static int match_root_child(struct device *dev, const void *match) } } out: - cxl_device_unlock(dev); + device_unlock(dev); return !!iter; } @@ -623,13 +625,13 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new) static void cond_cxl_root_lock(struct cxl_port *port) { if (is_cxl_root(port)) - cxl_device_lock(&port->dev); + device_lock(&port->dev); } static void cond_cxl_root_unlock(struct cxl_port *port) { if (is_cxl_root(port)) - cxl_device_unlock(&port->dev); + device_unlock(&port->dev); } static void cxl_dport_remove(void *data) @@ -736,15 +738,15 @@ static int add_ep(struct cxl_port *port, struct cxl_ep *new) { struct cxl_ep *dup; - cxl_device_lock(&port->dev); + device_lock(&port->dev); if (port->dead) { - cxl_device_unlock(&port->dev); + device_unlock(&port->dev); return -ENXIO; } dup = find_ep(port, new->ep); if (!dup) list_add_tail(&new->list, &port->endpoints); - cxl_device_unlock(&port->dev); + device_unlock(&port->dev); return dup ? -EEXIST : 0; } @@ -854,12 +856,12 @@ static void delete_endpoint(void *data) goto out; parent = &parent_port->dev; - cxl_device_lock(parent); + device_lock(parent); if (parent->driver && endpoint->uport) { devm_release_action(parent, cxl_unlink_uport, endpoint); devm_release_action(parent, unregister_port, endpoint); } - cxl_device_unlock(parent); + device_unlock(parent); put_device(parent); out: put_device(&endpoint->dev); @@ -920,7 +922,7 @@ static void cxl_detach_ep(void *data) } parent_port = to_cxl_port(port->dev.parent); - cxl_device_lock(&parent_port->dev); + device_lock(&parent_port->dev); if (!parent_port->dev.driver) { /* * The bottom-up race to delete the port lost to a @@ -928,12 +930,12 @@ static void cxl_detach_ep(void *data) * parent_port ->remove() will have cleaned up all * descendants. */ - cxl_device_unlock(&parent_port->dev); + device_unlock(&parent_port->dev); put_device(&port->dev); continue; } - cxl_device_lock(&port->dev); + device_lock(&port->dev); ep = find_ep(port, &cxlmd->dev); dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); @@ -948,7 +950,7 @@ static void cxl_detach_ep(void *data) port->dead = true; list_splice_init(&port->dports, &reap_dports); } - cxl_device_unlock(&port->dev); + device_unlock(&port->dev); if (!list_empty(&reap_dports)) { dev_dbg(&cxlmd->dev, "delete %s\n", @@ -956,7 +958,7 @@ static void cxl_detach_ep(void *data) delete_switch_port(port, &reap_dports); } put_device(&port->dev); - cxl_device_unlock(&parent_port->dev); + device_unlock(&parent_port->dev); } } @@ -1004,7 +1006,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, return -EAGAIN; } - cxl_device_lock(&parent_port->dev); + device_lock(&parent_port->dev); if (!parent_port->dev.driver) { dev_warn(&cxlmd->dev, "port %s:%s disabled, failed to enumerate CXL.mem\n", @@ -1022,7 +1024,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, get_device(&port->dev); } out: - cxl_device_unlock(&parent_port->dev); + device_unlock(&parent_port->dev); if (IS_ERR(port)) rc = PTR_ERR(port); @@ -1133,14 +1135,14 @@ struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port, { struct cxl_dport *dport; - cxl_device_lock(&port->dev); + device_lock(&port->dev); list_for_each_entry(dport, &port->dports, list) if (dport->dport == dev) { - cxl_device_unlock(&port->dev); + device_unlock(&port->dev); return dport; } - cxl_device_unlock(&port->dev); + device_unlock(&port->dev); return NULL; } EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL); @@ -1173,6 +1175,8 @@ static int decoder_populate_targets(struct cxl_decoder *cxld, return rc; } +static struct lock_class_key cxl_decoder_key; + /** * cxl_decoder_alloc - Allocate a new CXL decoder * @port: owning port of this decoder @@ -1214,6 +1218,7 @@ static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, seqlock_init(&cxld->target_lock); dev = &cxld->dev; device_initialize(dev); + lockdep_set_class(&dev->mutex, &cxl_decoder_key); device_set_pm_not_required(dev); dev->parent = &port->dev; dev->bus = &cxl_bus_type; @@ -1379,9 +1384,9 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map) port = to_cxl_port(cxld->dev.parent); - cxl_device_lock(&port->dev); + device_lock(&port->dev); rc = cxl_decoder_add_locked(cxld, target_map); - cxl_device_unlock(&port->dev); + device_unlock(&port->dev); return rc; } @@ -1452,14 +1457,7 @@ static int cxl_bus_probe(struct device *dev) { int rc; - /* - * Take the CXL nested lock since the driver core only holds - * @dev->mutex and not @dev->lockdep_mutex. - */ - cxl_nested_lock(dev); rc = to_cxl_drv(dev->driver)->probe(dev); - cxl_nested_unlock(dev); - dev_dbg(dev, "probe: %d\n", rc); return rc; } @@ -1468,10 +1466,8 @@ static void cxl_bus_remove(struct device *dev) { struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); - cxl_nested_lock(dev); if (cxl_drv->remove) cxl_drv->remove(dev); - cxl_nested_unlock(dev); } static struct workqueue_struct *cxl_bus_wq; diff --git a/drivers/cxl/core/suspend.c b/drivers/cxl/core/suspend.c new file mode 100644 index 000000000000..a5984d96ea1d --- /dev/null +++ b/drivers/cxl/core/suspend.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ +#include <linux/atomic.h> +#include <linux/export.h> +#include "cxlmem.h" + +static atomic_t mem_active; + +bool cxl_mem_active(void) +{ + return atomic_read(&mem_active) != 0; +} + +void cxl_mem_active_inc(void) +{ + atomic_inc(&mem_active); +} +EXPORT_SYMBOL_NS_GPL(cxl_mem_active_inc, CXL); + +void cxl_mem_active_dec(void) +{ + atomic_dec(&mem_active); +} +EXPORT_SYMBOL_NS_GPL(cxl_mem_active_dec, CXL); diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 990b6670222e..140dc3278cde 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -405,82 +405,4 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd); #define __mock static #endif -#ifdef CONFIG_PROVE_CXL_LOCKING -enum cxl_lock_class { - CXL_ANON_LOCK, - CXL_NVDIMM_LOCK, - CXL_NVDIMM_BRIDGE_LOCK, - CXL_PORT_LOCK, - /* - * Be careful to add new lock classes here, CXL_PORT_LOCK is - * extended by the port depth, so a maximum CXL port topology - * depth would need to be defined first. - */ -}; - -static inline void cxl_nested_lock(struct device *dev) -{ - if (is_cxl_port(dev)) { - struct cxl_port *port = to_cxl_port(dev); - - mutex_lock_nested(&dev->lockdep_mutex, - CXL_PORT_LOCK + port->depth); - } else if (is_cxl_decoder(dev)) { - struct cxl_port *port = to_cxl_port(dev->parent); - - /* - * A decoder is the immediate child of a port, so set - * its lock class equal to other child device siblings. - */ - mutex_lock_nested(&dev->lockdep_mutex, - CXL_PORT_LOCK + port->depth + 1); - } else if (is_cxl_nvdimm_bridge(dev)) - mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_BRIDGE_LOCK); - else if (is_cxl_nvdimm(dev)) - mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_LOCK); - else - mutex_lock_nested(&dev->lockdep_mutex, CXL_ANON_LOCK); -} - -static inline void cxl_nested_unlock(struct device *dev) -{ - mutex_unlock(&dev->lockdep_mutex); -} - -static inline void cxl_device_lock(struct device *dev) -{ - /* - * For double lock errors the lockup will happen before lockdep - * warns at cxl_nested_lock(), so assert explicitly. - */ - lockdep_assert_not_held(&dev->lockdep_mutex); - - device_lock(dev); - cxl_nested_lock(dev); -} - -static inline void cxl_device_unlock(struct device *dev) -{ - cxl_nested_unlock(dev); - device_unlock(dev); -} -#else -static inline void cxl_nested_lock(struct device *dev) -{ -} - -static inline void cxl_nested_unlock(struct device *dev) -{ -} - -static inline void cxl_device_lock(struct device *dev) -{ - device_lock(dev); -} - -static inline void cxl_device_unlock(struct device *dev) -{ - device_unlock(dev); -} -#endif #endif /* __CXL_H__ */ diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 5d33ce24fe09..60d10ee1e7fc 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -85,10 +85,61 @@ struct cxl_mbox_cmd { size_t size_in; size_t size_out; u16 return_code; -#define CXL_MBOX_SUCCESS 0 }; /* + * Per CXL 2.0 Section 8.2.8.4.5.1 + */ +#define CMD_CMD_RC_TABLE \ + C(SUCCESS, 0, NULL), \ + C(BACKGROUND, -ENXIO, "background cmd started successfully"), \ + C(INPUT, -ENXIO, "cmd input was invalid"), \ + C(UNSUPPORTED, -ENXIO, "cmd is not supported"), \ + C(INTERNAL, -ENXIO, "internal device error"), \ + C(RETRY, -ENXIO, "temporary error, retry once"), \ + C(BUSY, -ENXIO, "ongoing background operation"), \ + C(MEDIADISABLED, -ENXIO, "media access is disabled"), \ + C(FWINPROGRESS, -ENXIO, "one FW package can be transferred at a time"), \ + C(FWOOO, -ENXIO, "FW package content was transferred out of order"), \ + C(FWAUTH, -ENXIO, "FW package authentication failed"), \ + C(FWSLOT, -ENXIO, "FW slot is not supported for requested operation"), \ + C(FWROLLBACK, -ENXIO, "rolled back to the previous active FW"), \ + C(FWRESET, -ENXIO, "FW failed to activate, needs cold reset"), \ + C(HANDLE, -ENXIO, "one or more Event Record Handles were invalid"), \ + C(PADDR, -ENXIO, "physical address specified is invalid"), \ + C(POISONLMT, -ENXIO, "poison injection limit has been reached"), \ + C(MEDIAFAILURE, -ENXIO, "permanent issue with the media"), \ + C(ABORT, -ENXIO, "background cmd was aborted by device"), \ + C(SECURITY, -ENXIO, "not valid in the current security state"), \ + C(PASSPHRASE, -ENXIO, "phrase doesn't match current set passphrase"), \ + C(MBUNSUPPORTED, -ENXIO, "unsupported on the mailbox it was issued on"),\ + C(PAYLOADLEN, -ENXIO, "invalid payload length") + +#undef C +#define C(a, b, c) CXL_MBOX_CMD_RC_##a +enum { CMD_CMD_RC_TABLE }; +#undef C +#define C(a, b, c) { b, c } +struct cxl_mbox_cmd_rc { + int err; + const char *desc; +}; + +static const +struct cxl_mbox_cmd_rc cxl_mbox_cmd_rctable[] ={ CMD_CMD_RC_TABLE }; +#undef C + +static inline const char *cxl_mbox_cmd_rc2str(struct cxl_mbox_cmd *mbox_cmd) +{ + return cxl_mbox_cmd_rctable[mbox_cmd->return_code].desc; +} + +static inline int cxl_mbox_cmd_rc2errno(struct cxl_mbox_cmd *mbox_cmd) +{ + return cxl_mbox_cmd_rctable[mbox_cmd->return_code].err; +} + +/* * CXL 2.0 - Memory capacity multiplier * See Section 8.2.9.5 * @@ -141,7 +192,6 @@ struct cxl_endpoint_dvsec_info { * @info: Cached DVSEC information about the device. * @serial: PCIe Device Serial Number * @mbox_send: @dev specific transport for transmitting mailbox commands - * @wait_media_ready: @dev specific method to await media ready * * See section 8.2.9.5.2 Capacity Configuration and Label Storage for * details on capacity parameters. @@ -172,11 +222,9 @@ struct cxl_dev_state { u64 next_persistent_bytes; resource_size_t component_reg_phys; - struct cxl_endpoint_dvsec_info info; u64 serial; int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd); - int (*wait_media_ready)(struct cxl_dev_state *cxlds); }; enum cxl_opcode { @@ -262,6 +310,13 @@ struct cxl_mbox_set_lsa { u8 data[]; } __packed; +struct cxl_mbox_set_partition_info { + __le64 volatile_capacity; + u8 flags; +} __packed; + +#define CXL_SET_PARTITION_IMMEDIATE_FLAG BIT(0) + /** * struct cxl_mem_command - Driver representation of a memory device command * @info: Command information as it exists for the UAPI @@ -290,11 +345,23 @@ struct cxl_mem_command { int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, size_t in_size, void *out, size_t out_size); int cxl_dev_state_identify(struct cxl_dev_state *cxlds); +int cxl_await_media_ready(struct cxl_dev_state *cxlds); int cxl_enumerate_cmds(struct cxl_dev_state *cxlds); int cxl_mem_create_range_info(struct cxl_dev_state *cxlds); struct cxl_dev_state *cxl_dev_state_create(struct device *dev); void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds); void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds); +#ifdef CONFIG_CXL_SUSPEND +void cxl_mem_active_inc(void); +void cxl_mem_active_dec(void); +#else +static inline void cxl_mem_active_inc(void) +{ +} +static inline void cxl_mem_active_dec(void) +{ +} +#endif struct cxl_hdm { struct cxl_component_regs regs; diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h index 329e7ea3f36a..fce1c11729c2 100644 --- a/drivers/cxl/cxlpci.h +++ b/drivers/cxl/cxlpci.h @@ -72,4 +72,6 @@ static inline resource_size_t cxl_regmap_to_base(struct pci_dev *pdev, } int devm_cxl_port_enumerate_dports(struct cxl_port *port); +struct cxl_dev_state; +int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm); #endif /* __CXL_PCI_H__ */ diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c index 49a4b1c47299..c310f1fd3db0 100644 --- a/drivers/cxl/mem.c +++ b/drivers/cxl/mem.c @@ -24,27 +24,6 @@ * in higher level operations. */ -static int wait_for_media(struct cxl_memdev *cxlmd) -{ - struct cxl_dev_state *cxlds = cxlmd->cxlds; - struct cxl_endpoint_dvsec_info *info = &cxlds->info; - int rc; - - if (!info->mem_enabled) - return -EBUSY; - - rc = cxlds->wait_media_ready(cxlds); - if (rc) - return rc; - - /* - * We know the device is active, and enabled, if any ranges are non-zero - * we'll need to check later before adding the port since that owns the - * HDM decoder registers. - */ - return 0; -} - static int create_endpoint(struct cxl_memdev *cxlmd, struct cxl_port *parent_port) { @@ -67,72 +46,14 @@ static int create_endpoint(struct cxl_memdev *cxlmd, return cxl_endpoint_autoremove(cxlmd, endpoint); } -/** - * cxl_dvsec_decode_init() - Setup HDM decoding for the endpoint - * @cxlds: Device state - * - * Additionally, enables global HDM decoding. Warning: don't call this outside - * of probe. Once probe is complete, the port driver owns all access to the HDM - * decoder registers. - * - * Returns: false if DVSEC Ranges are being used instead of HDM - * decoders, or if it can not be determined if DVSEC Ranges are in use. - * Otherwise, returns true. - */ -__mock bool cxl_dvsec_decode_init(struct cxl_dev_state *cxlds) +static void enable_suspend(void *data) { - struct cxl_endpoint_dvsec_info *info = &cxlds->info; - struct cxl_register_map map; - struct cxl_component_reg_map *cmap = &map.component_map; - bool global_enable, do_hdm_init = false; - void __iomem *crb; - u32 global_ctrl; - - /* map hdm decoder */ - crb = ioremap(cxlds->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); - if (!crb) { - dev_dbg(cxlds->dev, "Failed to map component registers\n"); - return false; - } - - cxl_probe_component_regs(cxlds->dev, crb, cmap); - if (!cmap->hdm_decoder.valid) { - dev_dbg(cxlds->dev, "Invalid HDM decoder registers\n"); - goto out; - } - - global_ctrl = readl(crb + cmap->hdm_decoder.offset + - CXL_HDM_DECODER_CTRL_OFFSET); - global_enable = global_ctrl & CXL_HDM_DECODER_ENABLE; - if (!global_enable && info->ranges) { - dev_dbg(cxlds->dev, - "DVSEC ranges already programmed and HDM decoders not enabled.\n"); - goto out; - } - - do_hdm_init = true; - - /* - * Permanently (for this boot at least) opt the device into HDM - * operation. Individual HDM decoders still need to be enabled after - * this point. - */ - if (!global_enable) { - dev_dbg(cxlds->dev, "Enabling HDM decode\n"); - writel(global_ctrl | CXL_HDM_DECODER_ENABLE, - crb + cmap->hdm_decoder.offset + - CXL_HDM_DECODER_CTRL_OFFSET); - } - -out: - iounmap(crb); - return do_hdm_init; + cxl_mem_active_dec(); } static int cxl_mem_probe(struct device *dev) { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); - struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_port *parent_port; int rc; @@ -147,44 +68,6 @@ static int cxl_mem_probe(struct device *dev) if (work_pending(&cxlmd->detach_work)) return -EBUSY; - rc = wait_for_media(cxlmd); - if (rc) { - dev_err(dev, "Media not active (%d)\n", rc); - return rc; - } - - /* - * If DVSEC ranges are being used instead of HDM decoder registers there - * is no use in trying to manage those. - */ - if (!cxl_dvsec_decode_init(cxlds)) { - struct cxl_endpoint_dvsec_info *info = &cxlds->info; - int i; - - /* */ - for (i = 0; i < 2; i++) { - u64 base, size; - - /* - * Give a nice warning to the user that BIOS has really - * botched things for them if it didn't place DVSEC - * ranges in the memory map. - */ - base = info->dvsec_range[i].start; - size = range_len(&info->dvsec_range[i]); - if (size && !region_intersects(base, size, - IORESOURCE_SYSTEM_RAM, - IORES_DESC_NONE)) { - dev_err(dev, - "DVSEC range %#llx-%#llx must be reserved by BIOS, but isn't\n", - base, base + size - 1); - } - } - dev_err(dev, - "Active DVSEC range registers in use. Will not bind.\n"); - return -EBUSY; - } - rc = devm_cxl_enumerate_ports(cxlmd); if (rc) return rc; @@ -195,19 +78,36 @@ static int cxl_mem_probe(struct device *dev) return -ENXIO; } - cxl_device_lock(&parent_port->dev); + device_lock(&parent_port->dev); if (!parent_port->dev.driver) { dev_err(dev, "CXL port topology %s not enabled\n", dev_name(&parent_port->dev)); rc = -ENXIO; - goto out; + goto unlock; } rc = create_endpoint(cxlmd, parent_port); -out: - cxl_device_unlock(&parent_port->dev); +unlock: + device_unlock(&parent_port->dev); put_device(&parent_port->dev); - return rc; + if (rc) + return rc; + + /* + * The kernel may be operating out of CXL memory on this device, + * there is no spec defined way to determine whether this device + * preserves contents over suspend, and there is no simple way + * to arrange for the suspend image to avoid CXL memory which + * would setup a circular dependency between PCI resume and save + * state restoration. + * + * TODO: support suspend when all the regions this device is + * hosting are locked and covered by the system address map, + * i.e. platform firmware owns restoring the HDM configuration + * that it locked. + */ + cxl_mem_active_inc(); + return devm_add_action_or_reset(dev, enable_suspend, NULL); } static struct cxl_driver cxl_mem_driver = { diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 3f2182d66829..5a0ae46d4989 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -48,8 +48,7 @@ */ static unsigned short mbox_ready_timeout = 60; module_param(mbox_ready_timeout, ushort, 0644); -MODULE_PARM_DESC(mbox_ready_timeout, - "seconds to wait for mailbox ready / memory active status"); +MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready"); static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds) { @@ -177,9 +176,10 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds, mbox_cmd->return_code = FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); - if (mbox_cmd->return_code != 0) { - dev_dbg(dev, "Mailbox operation had an error\n"); - return 0; + if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) { + dev_dbg(dev, "Mailbox operation had an error: %s\n", + cxl_mbox_cmd_rc2str(mbox_cmd)); + return 0; /* completed but caller must check return_code */ } /* #7 */ @@ -386,164 +386,6 @@ static int cxl_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type, return rc; } -static int wait_for_valid(struct cxl_dev_state *cxlds) -{ - struct pci_dev *pdev = to_pci_dev(cxlds->dev); - int d = cxlds->cxl_dvsec, rc; - u32 val; - - /* - * Memory_Info_Valid: When set, indicates that the CXL Range 1 Size high - * and Size Low registers are valid. Must be set within 1 second of - * deassertion of reset to CXL device. Likely it is already set by the - * time this runs, but otherwise give a 1.5 second timeout in case of - * clock skew. - */ - rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); - if (rc) - return rc; - - if (val & CXL_DVSEC_MEM_INFO_VALID) - return 0; - - msleep(1500); - - rc = pci_read_config_dword(pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &val); - if (rc) - return rc; - - if (val & CXL_DVSEC_MEM_INFO_VALID) - return 0; - - return -ETIMEDOUT; -} - -/* - * Wait up to @mbox_ready_timeout for the device to report memory - * active. - */ -static int wait_for_media_ready(struct cxl_dev_state *cxlds) -{ - struct pci_dev *pdev = to_pci_dev(cxlds->dev); - int d = cxlds->cxl_dvsec; - bool active = false; - u64 md_status; - int rc, i; - - rc = wait_for_valid(cxlds); - if (rc) - return rc; - - for (i = mbox_ready_timeout; i; i--) { - u32 temp; - - rc = pci_read_config_dword( - pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp); - if (rc) - return rc; - - active = FIELD_GET(CXL_DVSEC_MEM_ACTIVE, temp); - if (active) - break; - msleep(1000); - } - - if (!active) { - dev_err(&pdev->dev, - "timeout awaiting memory active after %d seconds\n", - mbox_ready_timeout); - return -ETIMEDOUT; - } - - md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET); - if (!CXLMDEV_READY(md_status)) - return -EIO; - - return 0; -} - -static int cxl_dvsec_ranges(struct cxl_dev_state *cxlds) -{ - struct cxl_endpoint_dvsec_info *info = &cxlds->info; - struct pci_dev *pdev = to_pci_dev(cxlds->dev); - int d = cxlds->cxl_dvsec; - int hdm_count, rc, i; - u16 cap, ctrl; - - if (!d) - return -ENXIO; - - rc = pci_read_config_word(pdev, d + CXL_DVSEC_CAP_OFFSET, &cap); - if (rc) - return rc; - - rc = pci_read_config_word(pdev, d + CXL_DVSEC_CTRL_OFFSET, &ctrl); - if (rc) - return rc; - - if (!(cap & CXL_DVSEC_MEM_CAPABLE)) - return -ENXIO; - - /* - * It is not allowed by spec for MEM.capable to be set and have 0 legacy - * HDM decoders (values > 2 are also undefined as of CXL 2.0). As this - * driver is for a spec defined class code which must be CXL.mem - * capable, there is no point in continuing to enable CXL.mem. - */ - hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap); - if (!hdm_count || hdm_count > 2) - return -EINVAL; - - rc = wait_for_valid(cxlds); - if (rc) - return rc; - - info->mem_enabled = FIELD_GET(CXL_DVSEC_MEM_ENABLE, ctrl); - - for (i = 0; i < hdm_count; i++) { - u64 base, size; - u32 temp; - - rc = pci_read_config_dword( - pdev, d + CXL_DVSEC_RANGE_SIZE_HIGH(i), &temp); - if (rc) - return rc; - - size = (u64)temp << 32; - - rc = pci_read_config_dword( - pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(i), &temp); - if (rc) - return rc; - - size |= temp & CXL_DVSEC_MEM_SIZE_LOW_MASK; - - rc = pci_read_config_dword( - pdev, d + CXL_DVSEC_RANGE_BASE_HIGH(i), &temp); - if (rc) - return rc; - - base = (u64)temp << 32; - - rc = pci_read_config_dword( - pdev, d + CXL_DVSEC_RANGE_BASE_LOW(i), &temp); - if (rc) - return rc; - - base |= temp & CXL_DVSEC_MEM_BASE_LOW_MASK; - - info->dvsec_range[i] = (struct range) { - .start = base, - .end = base + size - 1 - }; - - if (size) - info->ranges++; - } - - return 0; -} - static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct cxl_register_map map; @@ -573,8 +415,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_warn(&pdev->dev, "Device DVSEC not present, skip CXL.mem init\n"); - cxlds->wait_media_ready = wait_for_media_ready; - rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map); if (rc) return rc; @@ -610,11 +450,6 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - rc = cxl_dvsec_ranges(cxlds); - if (rc) - dev_warn(&pdev->dev, - "Failed to get DVSEC range information (%d)\n", rc); - cxlmd = devm_cxl_add_memdev(cxlds); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 15ad666ab03e..bbeef91e637e 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -43,7 +43,7 @@ static int cxl_nvdimm_probe(struct device *dev) if (!cxl_nvb) return -ENXIO; - cxl_device_lock(&cxl_nvb->dev); + device_lock(&cxl_nvb->dev); if (!cxl_nvb->nvdimm_bus) { rc = -ENXIO; goto out; @@ -68,7 +68,7 @@ static int cxl_nvdimm_probe(struct device *dev) dev_set_drvdata(dev, nvdimm); rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm); out: - cxl_device_unlock(&cxl_nvb->dev); + device_unlock(&cxl_nvb->dev); put_device(&cxl_nvb->dev); return rc; @@ -233,7 +233,7 @@ static void cxl_nvb_update_state(struct work_struct *work) struct nvdimm_bus *victim_bus = NULL; bool release = false, rescan = false; - cxl_device_lock(&cxl_nvb->dev); + device_lock(&cxl_nvb->dev); switch (cxl_nvb->state) { case CXL_NVB_ONLINE: if (!online_nvdimm_bus(cxl_nvb)) { @@ -251,7 +251,7 @@ static void cxl_nvb_update_state(struct work_struct *work) default: break; } - cxl_device_unlock(&cxl_nvb->dev); + device_unlock(&cxl_nvb->dev); if (release) device_release_driver(&cxl_nvb->dev); @@ -327,9 +327,9 @@ static int cxl_nvdimm_bridge_reset(struct device *dev, void *data) return 0; cxl_nvb = to_cxl_nvdimm_bridge(dev); - cxl_device_lock(dev); + device_lock(dev); cxl_nvb->state = CXL_NVB_NEW; - cxl_device_unlock(dev); + device_unlock(dev); return 0; } @@ -344,7 +344,6 @@ static __init int cxl_pmem_init(void) { int rc; - set_bit(CXL_MEM_COMMAND_ID_SET_PARTITION_INFO, exclusive_cmds); set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds); set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds); diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c index d420da5fc39c..3cf308f114c4 100644 --- a/drivers/cxl/port.c +++ b/drivers/cxl/port.c @@ -36,14 +36,8 @@ static int cxl_port_probe(struct device *dev) struct cxl_hdm *cxlhdm; int rc; - if (is_cxl_endpoint(port)) { - struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport); - get_device(&cxlmd->dev); - rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd); - if (rc) - return rc; - } else { + if (!is_cxl_endpoint(port)) { rc = devm_cxl_port_enumerate_dports(port); if (rc < 0) return rc; @@ -55,6 +49,26 @@ static int cxl_port_probe(struct device *dev) if (IS_ERR(cxlhdm)) return PTR_ERR(cxlhdm); + if (is_cxl_endpoint(port)) { + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + get_device(&cxlmd->dev); + rc = devm_add_action_or_reset(dev, schedule_detach, cxlmd); + if (rc) + return rc; + + rc = cxl_hdm_decode_init(cxlds, cxlhdm); + if (rc) + return rc; + + rc = cxl_await_media_ready(cxlds); + if (rc) { + dev_err(dev, "Media not active (%d)\n", rc); + return rc; + } + } + rc = devm_cxl_enumerate_decoders(cxlhdm); if (rc) { dev_err(dev, "Couldn't enumerate decoders (%d)\n", rc); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 0211e6f7b47a..50a08b2ec247 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -117,6 +117,7 @@ enum dax_device_flags { * @dax_dev: a dax_device instance representing the logical memory range * @pgoff: offset in pages from the start of the device to translate * @nr_pages: number of consecutive pages caller can handle relative to @pfn + * @mode: indicator on normal access or recovery write * @kaddr: output parameter that returns a virtual address mapping of pfn * @pfn: output parameter that returns an absolute pfn translation of @pgoff * @@ -124,7 +125,7 @@ enum dax_device_flags { * pages accessible at the device relative @pgoff. */ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, - void **kaddr, pfn_t *pfn) + enum dax_access_mode mode, void **kaddr, pfn_t *pfn) { long avail; @@ -138,7 +139,7 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, return -EINVAL; avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, - kaddr, pfn); + mode, kaddr, pfn); if (!avail) return -ERANGE; return min(avail, nr_pages); @@ -194,6 +195,15 @@ int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, } EXPORT_SYMBOL_GPL(dax_zero_page_range); +size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *iter) +{ + if (!dax_dev->ops->recovery_write) + return 0; + return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter); +} +EXPORT_SYMBOL_GPL(dax_recovery_write); + #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d5de3f77d3aa..487ed4ddc3be 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -163,7 +163,7 @@ config DMA_SUN4I config DMA_SUN6I tristate "Allwinner A31 SoCs DMA support" - depends on MACH_SUN6I || MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST + depends on ARCH_SUNXI || COMPILE_TEST depends on RESET_CONTROLLER select DMA_ENGINE select DMA_VIRTUAL_CHANNELS @@ -629,6 +629,18 @@ config TXX9_DMAC Support the TXx9 SoC internal DMA controller. This can be integrated in chips such as the Toshiba TX4927/38/39. +config TEGRA186_GPC_DMA + tristate "NVIDIA Tegra GPC DMA support" + depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT + depends on IOMMU_API + select DMA_ENGINE + help + Support for the NVIDIA Tegra General Purpose Central DMA controller. + The DMA controller has multiple DMA channels which can be configured + for different peripherals like UART, SPI, etc which are on APB bus. + This DMA controller transfers data from memory to peripheral FIFO + or vice versa. It also supports memory to memory data transfer. + config TEGRA20_APB_DMA tristate "NVIDIA Tegra20 APB DMA support" depends on ARCH_TEGRA || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 616d926cf2a5..2f1b87ffd7ab 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -72,6 +72,7 @@ obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o obj-$(CONFIG_SPRD_DMA) += sprd-dma.o obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o +obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index a24882ba3764..a4a794e62ac2 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1535,14 +1535,6 @@ static void pl08x_free_chan_resources(struct dma_chan *chan) vchan_free_chan_resources(to_virt_chan(chan)); } -static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( - struct dma_chan *chan, unsigned long flags) -{ - struct dma_async_tx_descriptor *retval = NULL; - - return retval; -} - /* * Code accessing dma_async_is_complete() in a tight loop may give problems. * If slaves are relying on interrupts to signal completion this function @@ -2760,7 +2752,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->memcpy.dev = &adev->dev; pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; - pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; pl08x->memcpy.device_issue_pending = pl08x_issue_pending; pl08x->memcpy.device_config = pl08x_config; @@ -2787,8 +2778,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->slave.dev = &adev->dev; pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; - pl08x->slave.device_prep_dma_interrupt = - pl08x_prep_dma_interrupt; pl08x->slave.device_tx_status = pl08x_dma_tx_status; pl08x->slave.device_issue_pending = pl08x_issue_pending; pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 30ae36124b1d..5a50423b7378 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -942,6 +942,7 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, struct at_desc *desc; void __iomem *vaddr; dma_addr_t paddr; + char fill_pattern; dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, &dest, value, len, flags); @@ -963,7 +964,14 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, __func__); return NULL; } - *(u32*)vaddr = value; + + /* Only the first byte of value is to be used according to dmaengine */ + fill_pattern = (char)value; + + *(u32*)vaddr = (fill_pattern << 24) | + (fill_pattern << 16) | + (fill_pattern << 8) | + fill_pattern; desc = atc_create_memset_desc(chan, paddr, dest, len); if (!desc) { diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index def564d1e8fa..3e9d726504e2 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1202,6 +1202,7 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, unsigned long flags; size_t ublen; u32 dwidth; + char pattern; /* * WARNING: The channel configuration is set here since there is no * dmaengine_slave_config call in this case. Moreover we don't know the @@ -1244,10 +1245,16 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); + /* Only the first byte of value is to be used according to dmaengine */ + pattern = (char)value; + ublen = len >> dwidth; desc->lld.mbr_da = dst_addr; - desc->lld.mbr_ds = value; + desc->lld.mbr_ds = (pattern << 24) | + (pattern << 16) | + (pattern << 8) | + pattern; desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 | AT_XDMAC_MBR_UBC_NDEN | AT_XDMAC_MBR_UBC_NSEN diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index 8c42e5ca00a9..1822a7034630 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c @@ -17,7 +17,9 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/io.h> #include <asm/irq.h> diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index fc513eb2b289..e2ec540e6519 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -8,6 +8,7 @@ #include <linux/clk.h> #include <linux/dmapool.h> +#include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> @@ -911,6 +912,14 @@ static int jz4780_dma_probe(struct platform_device *pdev) dd = &jzdma->dma_device; + /* + * The real segment size limit is dependent on the size unit selected + * for the transfer. Because the size unit is selected automatically + * and may be as small as 1 byte, use a safe limit of 2^24-1 bytes to + * ensure the 24-bit transfer count in the descriptor cannot overflow. + */ + dma_set_max_seg_size(dev, 0xffffff); + dma_cap_set(DMA_MEMCPY, dd->cap_mask); dma_cap_set(DMA_SLAVE, dd->cap_mask); dma_cap_set(DMA_CYCLIC, dd->cap_mask); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2cfa8458b51b..e80feeea0e01 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1053,9 +1053,7 @@ static int __dma_async_device_channel_register(struct dma_device *device, * When the chan_id is a negative value, we are dynamically adding * the channel. Otherwise we are static enumerating. */ - mutex_lock(&device->chan_mutex); chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); - mutex_unlock(&device->chan_mutex); if (chan->chan_id < 0) { pr_err("%s: unable to alloc ida for chan: %d\n", __func__, chan->chan_id); @@ -1078,9 +1076,7 @@ static int __dma_async_device_channel_register(struct dma_device *device, return 0; err_out_ida: - mutex_lock(&device->chan_mutex); ida_free(&device->chan_ida, chan->chan_id); - mutex_unlock(&device->chan_mutex); err_free_dev: kfree(chan->dev); err_free_local: @@ -1113,9 +1109,7 @@ static void __dma_async_device_channel_unregister(struct dma_device *device, device->chancnt--; chan->dev->chan = NULL; mutex_unlock(&dma_list_mutex); - mutex_lock(&device->chan_mutex); ida_free(&device->chan_ida, chan->chan_id); - mutex_unlock(&device->chan_mutex); device_unregister(&chan->dev->device); free_percpu(chan->local); } @@ -1250,7 +1244,6 @@ int dma_async_device_register(struct dma_device *device) if (rc != 0) return rc; - mutex_init(&device->chan_mutex); ida_init(&device->chan_ida); /* represent channels in sysfs. Probably want devs too */ diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index f696246f57fd..0a2168a4ccb0 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -675,10 +675,16 @@ static int dmatest_func(void *data) /* * src and dst buffers are freed by ourselves below */ - if (params->polled) + if (params->polled) { flags = DMA_CTRL_ACK; - else - flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + } else { + if (dma_has_cap(DMA_INTERRUPT, dev->cap_mask)) { + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; + } else { + pr_err("Channel does not support interrupt!\n"); + goto err_pq_array; + } + } ktime = ktime_get(); while (!(kthread_should_stop() || @@ -906,6 +912,7 @@ error_unmap_continue: runtime = ktime_to_us(ktime); ret = 0; +err_pq_array: kfree(dma_pq); err_srcs_array: kfree(srcs); diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index db25f9b7778c..a9828ddd6d06 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig @@ -16,6 +16,15 @@ config DW_DMAC Support the Synopsys DesignWare AHB DMA controller. This can be integrated in chips such as the Intel Cherrytrail. +config RZN1_DMAMUX + tristate "Renesas RZ/N1 DMAMUX driver" + depends on DW_DMAC + depends on ARCH_RZN1 || COMPILE_TEST + help + Support the Renesas RZ/N1 DMAMUX which is located in front of + the Synopsys DesignWare AHB DMA controller located on Renesas + SoCs. + config DW_DMAC_PCI tristate "Synopsys DesignWare AHB DMA PCI driver" depends on PCI diff --git a/drivers/dma/dw/Makefile b/drivers/dma/dw/Makefile index a6f358ad8591..e1796015f213 100644 --- a/drivers/dma/dw/Makefile +++ b/drivers/dma/dw/Makefile @@ -9,3 +9,5 @@ dw_dmac-$(CONFIG_OF) += of.o obj-$(CONFIG_DW_DMAC_PCI) += dw_dmac_pci.o dw_dmac_pci-y := pci.o + +obj-$(CONFIG_RZN1_DMAMUX) += rzn1-dmamux.o diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index 246118955877..47f2292dba98 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -137,6 +137,7 @@ static void dw_shutdown(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id dw_dma_of_id_table[] = { { .compatible = "snps,dma-spear1340", .data = &dw_dma_chip_pdata }, + { .compatible = "renesas,rzn1-dma", .data = &dw_dma_chip_pdata }, {} }; MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c new file mode 100644 index 000000000000..11d254e450b0 --- /dev/null +++ b/drivers/dma/dw/rzn1-dmamux.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 Schneider-Electric + * Author: Miquel Raynal <miquel.raynal@bootlin.com + * Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com> + */ +#include <linux/bitops.h> +#include <linux/of_device.h> +#include <linux/of_dma.h> +#include <linux/slab.h> +#include <linux/soc/renesas/r9a06g032-sysctrl.h> +#include <linux/types.h> + +#define RNZ1_DMAMUX_NCELLS 6 +#define RZN1_DMAMUX_MAX_LINES 64 +#define RZN1_DMAMUX_LINES_PER_CTLR 16 + +struct rzn1_dmamux_data { + struct dma_router dmarouter; + DECLARE_BITMAP(used_chans, 2 * RZN1_DMAMUX_LINES_PER_CTLR); +}; + +struct rzn1_dmamux_map { + unsigned int req_idx; +}; + +static void rzn1_dmamux_free(struct device *dev, void *route_data) +{ + struct rzn1_dmamux_data *dmamux = dev_get_drvdata(dev); + struct rzn1_dmamux_map *map = route_data; + + dev_dbg(dev, "Unmapping DMAMUX request %u\n", map->req_idx); + + clear_bit(map->req_idx, dmamux->used_chans); + + kfree(map); +} + +static void *rzn1_dmamux_route_allocate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); + struct rzn1_dmamux_data *dmamux = platform_get_drvdata(pdev); + struct rzn1_dmamux_map *map; + unsigned int dmac_idx, chan, val; + u32 mask; + int ret; + + if (dma_spec->args_count != RNZ1_DMAMUX_NCELLS) + return ERR_PTR(-EINVAL); + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) + return ERR_PTR(-ENOMEM); + + chan = dma_spec->args[0]; + map->req_idx = dma_spec->args[4]; + val = dma_spec->args[5]; + dma_spec->args_count -= 2; + + if (chan >= RZN1_DMAMUX_LINES_PER_CTLR) { + dev_err(&pdev->dev, "Invalid DMA request line: %u\n", chan); + ret = -EINVAL; + goto free_map; + } + + if (map->req_idx >= RZN1_DMAMUX_MAX_LINES || + (map->req_idx % RZN1_DMAMUX_LINES_PER_CTLR) != chan) { + dev_err(&pdev->dev, "Invalid MUX request line: %u\n", map->req_idx); + ret = -EINVAL; + goto free_map; + } + + dmac_idx = map->req_idx >= RZN1_DMAMUX_LINES_PER_CTLR ? 1 : 0; + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", dmac_idx); + if (!dma_spec->np) { + dev_err(&pdev->dev, "Can't get DMA master\n"); + ret = -EINVAL; + goto free_map; + } + + dev_dbg(&pdev->dev, "Mapping DMAMUX request %u to DMAC%u request %u\n", + map->req_idx, dmac_idx, chan); + + if (test_and_set_bit(map->req_idx, dmamux->used_chans)) { + ret = -EBUSY; + goto free_map; + } + + mask = BIT(map->req_idx); + ret = r9a06g032_sysctrl_set_dmamux(mask, val ? mask : 0); + if (ret) + goto clear_bitmap; + + return map; + +clear_bitmap: + clear_bit(map->req_idx, dmamux->used_chans); +free_map: + kfree(map); + + return ERR_PTR(ret); +} + +static const struct of_device_id rzn1_dmac_match[] = { + { .compatible = "renesas,rzn1-dma" }, + {} +}; + +static int rzn1_dmamux_probe(struct platform_device *pdev) +{ + struct device_node *mux_node = pdev->dev.of_node; + const struct of_device_id *match; + struct device_node *dmac_node; + struct rzn1_dmamux_data *dmamux; + + dmamux = devm_kzalloc(&pdev->dev, sizeof(*dmamux), GFP_KERNEL); + if (!dmamux) + return -ENOMEM; + + dmac_node = of_parse_phandle(mux_node, "dma-masters", 0); + if (!dmac_node) + return dev_err_probe(&pdev->dev, -ENODEV, "Can't get DMA master node\n"); + + match = of_match_node(rzn1_dmac_match, dmac_node); + of_node_put(dmac_node); + if (!match) + return dev_err_probe(&pdev->dev, -EINVAL, "DMA master is not supported\n"); + + dmamux->dmarouter.dev = &pdev->dev; + dmamux->dmarouter.route_free = rzn1_dmamux_free; + + platform_set_drvdata(pdev, dmamux); + + return of_dma_router_register(mux_node, rzn1_dmamux_route_allocate, + &dmamux->dmarouter); +} + +static const struct of_device_id rzn1_dmamux_match[] = { + { .compatible = "renesas,rzn1-dmamux" }, + {} +}; + +static struct platform_driver rzn1_dmamux_driver = { + .driver = { + .name = "renesas,rzn1-dmamux", + .of_match_table = rzn1_dmamux_match, + }, + .probe = rzn1_dmamux_probe, +}; +module_platform_driver(rzn1_dmamux_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com"); +MODULE_DESCRIPTION("Renesas RZ/N1 DMAMUX driver"); diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 98f9ee70362e..971ff5f9ae84 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -132,7 +132,7 @@ struct ep93xx_dma_desc { /** * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel * @chan: dmaengine API channel - * @edma: pointer to to the engine device + * @edma: pointer to the engine device * @regs: memory mapped registers * @irq: interrupt number of the channel * @clk: clock used by this channel diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index b9b2b4a4124e..c2808fd081d6 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -99,7 +99,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) ctx->wq = wq; filp->private_data = ctx; - if (device_pasid_enabled(idxd)) { + if (device_user_pasid_enabled(idxd)) { sva = iommu_sva_bind_device(dev, current->mm, NULL); if (IS_ERR(sva)) { rc = PTR_ERR(sva); @@ -152,7 +152,7 @@ static int idxd_cdev_release(struct inode *node, struct file *filep) if (wq_shared(wq)) { idxd_device_drain_pasid(idxd, ctx->pasid); } else { - if (device_pasid_enabled(idxd)) { + if (device_user_pasid_enabled(idxd)) { /* The wq disable in the disable pasid function will drain the wq */ rc = idxd_wq_disable_pasid(wq); if (rc < 0) @@ -314,7 +314,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_USER; - rc = __drv_enable_wq(wq); + rc = drv_enable_wq(wq); if (rc < 0) goto err; @@ -329,7 +329,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) return 0; err_cdev: - __drv_disable_wq(wq); + drv_disable_wq(wq); err: wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); @@ -342,7 +342,7 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); idxd_wq_del_cdev(wq); - __drv_disable_wq(wq); + drv_disable_wq(wq); wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); } @@ -369,10 +369,16 @@ int idxd_cdev_register(void) rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, ictx[i].name); if (rc) - return rc; + goto err_free_chrdev_region; } return 0; + +err_free_chrdev_region: + for (i--; i >= 0; i--) + unregister_chrdev_region(ictx[i].devt, MINORMASK); + + return rc; } void idxd_cdev_remove(void) diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index f652da6ab47d..ff0ea60051f0 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -184,7 +184,7 @@ int idxd_wq_enable(struct idxd_wq *wq) if (wq->state == IDXD_WQ_ENABLED) { dev_dbg(dev, "WQ %d already enabled\n", wq->id); - return -ENXIO; + return 0; } idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); @@ -299,24 +299,46 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd) } } -int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) +static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv) { struct idxd_device *idxd = wq->idxd; - int rc; union wqcfg wqcfg; unsigned int offset; - rc = idxd_wq_disable(wq, false); - if (rc < 0) - return rc; + offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX); + spin_lock(&idxd->dev_lock); + wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset); + wqcfg.priv = priv; + wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX]; + iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset); + spin_unlock(&idxd->dev_lock); +} + +static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid) +{ + struct idxd_device *idxd = wq->idxd; + union wqcfg wqcfg; + unsigned int offset; offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); spin_lock(&idxd->dev_lock); wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); wqcfg.pasid_en = 1; wqcfg.pasid = pasid; + wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX]; iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); spin_unlock(&idxd->dev_lock); +} + +int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid) +{ + int rc; + + rc = idxd_wq_disable(wq, false); + if (rc < 0) + return rc; + + __idxd_wq_set_pasid_locked(wq, pasid); rc = idxd_wq_enable(wq); if (rc < 0) @@ -555,19 +577,15 @@ int idxd_device_disable(struct idxd_device *idxd) return -ENXIO; } - spin_lock(&idxd->dev_lock); idxd_device_clear_state(idxd); - idxd->state = IDXD_DEV_DISABLED; - spin_unlock(&idxd->dev_lock); return 0; } void idxd_device_reset(struct idxd_device *idxd) { idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL); - spin_lock(&idxd->dev_lock); idxd_device_clear_state(idxd); - idxd->state = IDXD_DEV_DISABLED; + spin_lock(&idxd->dev_lock); idxd_unmask_error_interrupts(idxd); spin_unlock(&idxd->dev_lock); } @@ -694,15 +712,16 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) { int i; - lockdep_assert_held(&idxd->dev_lock); for (i = 0; i < idxd->max_wqs; i++) { struct idxd_wq *wq = idxd->wqs[i]; + mutex_lock(&wq->wq_lock); if (wq->state == IDXD_WQ_ENABLED) { idxd_wq_disable_cleanup(wq); wq->state = IDXD_WQ_DISABLED; } idxd_wq_device_reset_cleanup(wq); + mutex_unlock(&wq->wq_lock); } } @@ -711,9 +730,12 @@ void idxd_device_clear_state(struct idxd_device *idxd) if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) return; + idxd_device_wqs_clear_state(idxd); + spin_lock(&idxd->dev_lock); idxd_groups_clear_state(idxd); idxd_engines_clear_state(idxd); - idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_DISABLED; + spin_unlock(&idxd->dev_lock); } static void idxd_group_config_write(struct idxd_group *group) @@ -799,7 +821,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq) */ for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wq_offset = WQCFG_OFFSET(idxd, wq->id, i); - wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset); + wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset); } if (wq->size == 0 && wq->type != IDXD_WQT_NONE) @@ -815,14 +837,8 @@ static int idxd_wq_config_write(struct idxd_wq *wq) if (wq_dedicated(wq)) wq->wqcfg->mode = 1; - if (device_pasid_enabled(idxd)) { - wq->wqcfg->pasid_en = 1; - if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq)) - wq->wqcfg->pasid = idxd->pasid; - } - /* - * Here the priv bit is set depending on the WQ type. priv = 1 if the + * The WQ priv bit is set depending on the WQ type. priv = 1 if the * WQ type is kernel to indicate privileged access. This setting only * matters for dedicated WQ. According to the DSA spec: * If the WQ is in dedicated mode, WQ PASID Enable is 1, and the @@ -832,7 +848,6 @@ static int idxd_wq_config_write(struct idxd_wq *wq) * In the case of a dedicated kernel WQ that is not able to support * the PASID cap, then the configuration will be rejected. */ - wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL); if (wq_dedicated(wq) && wq->wqcfg->pasid_en && !idxd_device_pasid_priv_enabled(idxd) && wq->type == IDXD_WQT_KERNEL) { @@ -953,7 +968,7 @@ static int idxd_wqs_setup(struct idxd_device *idxd) if (!wq->group) continue; - if (wq_shared(wq) && !device_swq_supported(idxd)) { + if (wq_shared(wq) && !wq_shared_supported(wq)) { idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; dev_warn(dev, "No shared wq support but configured.\n"); return -EINVAL; @@ -1018,6 +1033,9 @@ static int idxd_wq_load_config(struct idxd_wq *wq) wq->priority = wq->wqcfg->priority; + wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; + wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift; + for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); @@ -1161,7 +1179,9 @@ void idxd_wq_free_irq(struct idxd_wq *wq) struct idxd_device *idxd = wq->idxd; struct idxd_irq_entry *ie = &wq->ie; - synchronize_irq(ie->vector); + if (wq->type != IDXD_WQT_KERNEL) + return; + free_irq(ie->vector, ie); idxd_flush_pending_descs(ie); if (idxd->request_int_handles) @@ -1180,6 +1200,9 @@ int idxd_wq_request_irq(struct idxd_wq *wq) struct idxd_irq_entry *ie; int rc; + if (wq->type != IDXD_WQT_KERNEL) + return 0; + ie = &wq->ie; ie->vector = pci_irq_vector(pdev, ie->id); ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID; @@ -1211,7 +1234,7 @@ err_irq: return rc; } -int __drv_enable_wq(struct idxd_wq *wq) +int drv_enable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1245,7 +1268,7 @@ int __drv_enable_wq(struct idxd_wq *wq) /* Shared WQ checks */ if (wq_shared(wq)) { - if (!device_swq_supported(idxd)) { + if (!wq_shared_supported(wq)) { idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; dev_dbg(dev, "PASID not enabled and shared wq.\n"); goto err; @@ -1265,6 +1288,29 @@ int __drv_enable_wq(struct idxd_wq *wq) } } + /* + * In the event that the WQ is configurable for pasid and priv bits. + * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit. + * However, for non-kernel wq, the driver should only set the pasid_en bit for + * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and + * pasid_en later on so there is no need to setup. + */ + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + int priv = 0; + + if (wq_pasid_enabled(wq)) { + if (is_idxd_wq_kernel(wq) || wq_shared(wq)) { + u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0; + + __idxd_wq_set_pasid_locked(wq, pasid); + } + } + + if (is_idxd_wq_kernel(wq)) + priv = 1; + __idxd_wq_set_priv_locked(wq, priv); + } + rc = 0; spin_lock(&idxd->dev_lock); if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) @@ -1289,8 +1335,36 @@ int __drv_enable_wq(struct idxd_wq *wq) } wq->client_count = 0; + + rc = idxd_wq_request_irq(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; + dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); + goto err_irq; + } + + rc = idxd_wq_alloc_resources(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; + dev_dbg(dev, "WQ resource alloc failed\n"); + goto err_res_alloc; + } + + rc = idxd_wq_init_percpu_ref(wq); + if (rc < 0) { + idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; + dev_dbg(dev, "percpu_ref setup failed\n"); + goto err_ref; + } + return 0; +err_ref: + idxd_wq_free_resources(wq); +err_res_alloc: + idxd_wq_free_irq(wq); +err_irq: + idxd_wq_unmap_portal(wq); err_map_portal: rc = idxd_wq_disable(wq, false); if (rc < 0) @@ -1299,17 +1373,7 @@ err: return rc; } -int drv_enable_wq(struct idxd_wq *wq) -{ - int rc; - - mutex_lock(&wq->wq_lock); - rc = __drv_enable_wq(wq); - mutex_unlock(&wq->wq_lock); - return rc; -} - -void __drv_disable_wq(struct idxd_wq *wq) +void drv_disable_wq(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct device *dev = &idxd->pdev->dev; @@ -1320,21 +1384,16 @@ void __drv_disable_wq(struct idxd_wq *wq) dev_warn(dev, "Clients has claim on wq %d: %d\n", wq->id, idxd_wq_refcount(wq)); + idxd_wq_free_resources(wq); idxd_wq_unmap_portal(wq); - idxd_wq_drain(wq); + idxd_wq_free_irq(wq); idxd_wq_reset(wq); - + percpu_ref_exit(&wq->wq_active); + wq->type = IDXD_WQT_NONE; wq->client_count = 0; } -void drv_disable_wq(struct idxd_wq *wq) -{ - mutex_lock(&wq->wq_lock); - __drv_disable_wq(wq); - mutex_unlock(&wq->wq_lock); -} - int idxd_device_drv_probe(struct idxd_dev *idxd_dev) { struct idxd_device *idxd = idxd_dev_to_idxd(idxd_dev); diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index bfff59617d04..e0874cb4721c 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -88,6 +88,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq, } static struct dma_async_tx_descriptor * +idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags) +{ + struct idxd_wq *wq = to_idxd_wq(c); + u32 desc_flags; + struct idxd_desc *desc; + + if (wq->state != IDXD_WQ_ENABLED) + return NULL; + + op_flag_setup(flags, &desc_flags); + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(desc)) + return NULL; + + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP, + 0, 0, 0, desc->compl_dma, desc_flags); + desc->txd.flags = flags; + return &desc->txd; +} + +static struct dma_async_tx_descriptor * idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { @@ -193,10 +214,12 @@ int idxd_register_dma_device(struct idxd_device *idxd) INIT_LIST_HEAD(&dma->channels); dma->dev = dev; + dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma_cap_set(DMA_PRIVATE, dma->cap_mask); dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask); dma->device_release = idxd_dma_release; + dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt; if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; @@ -227,7 +250,7 @@ void idxd_unregister_dma_device(struct idxd_device *idxd) dma_async_device_unregister(&idxd->idxd_dma->dma); } -int idxd_register_dma_channel(struct idxd_wq *wq) +static int idxd_register_dma_channel(struct idxd_wq *wq) { struct idxd_device *idxd = wq->idxd; struct dma_device *dma = &idxd->idxd_dma->dma; @@ -264,7 +287,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq) return 0; } -void idxd_unregister_dma_channel(struct idxd_wq *wq) +static void idxd_unregister_dma_channel(struct idxd_wq *wq) { struct idxd_dma_chan *idxd_chan = wq->idxd_chan; struct dma_chan *chan = &idxd_chan->chan; @@ -290,34 +313,13 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_KERNEL; - rc = idxd_wq_request_irq(wq); - if (rc < 0) { - idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; - dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); - goto err_irq; - } - - rc = __drv_enable_wq(wq); + rc = drv_enable_wq(wq); if (rc < 0) { dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc); rc = -ENXIO; goto err; } - rc = idxd_wq_alloc_resources(wq); - if (rc < 0) { - idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; - dev_dbg(dev, "WQ resource alloc failed\n"); - goto err_res_alloc; - } - - rc = idxd_wq_init_percpu_ref(wq); - if (rc < 0) { - idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; - dev_dbg(dev, "percpu_ref setup failed\n"); - goto err_ref; - } - rc = idxd_register_dma_channel(wq); if (rc < 0) { idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR; @@ -330,15 +332,8 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev) return 0; err_dma: - __idxd_wq_quiesce(wq); - percpu_ref_exit(&wq->wq_active); -err_ref: - idxd_wq_free_resources(wq); -err_res_alloc: - __drv_disable_wq(wq); + drv_disable_wq(wq); err: - idxd_wq_free_irq(wq); -err_irq: wq->type = IDXD_WQT_NONE; mutex_unlock(&wq->wq_lock); return rc; @@ -351,11 +346,7 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev) mutex_lock(&wq->wq_lock); __idxd_wq_quiesce(wq); idxd_unregister_dma_channel(wq); - idxd_wq_free_resources(wq); - __drv_disable_wq(wq); - percpu_ref_exit(&wq->wq_active); - idxd_wq_free_irq(wq); - wq->type = IDXD_WQT_NONE; + drv_disable_wq(wq); mutex_unlock(&wq->wq_lock); } diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index da72eb15f610..fed0dfc1eaa8 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -239,6 +239,7 @@ enum idxd_device_flag { IDXD_FLAG_CONFIGURABLE = 0, IDXD_FLAG_CMD_RUNNING, IDXD_FLAG_PASID_ENABLED, + IDXD_FLAG_USER_PASID_ENABLED, }; struct idxd_dma_dev { @@ -469,9 +470,20 @@ static inline bool device_pasid_enabled(struct idxd_device *idxd) return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); } -static inline bool device_swq_supported(struct idxd_device *idxd) +static inline bool device_user_pasid_enabled(struct idxd_device *idxd) { - return (support_enqcmd && device_pasid_enabled(idxd)); + return test_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); +} + +static inline bool wq_pasid_enabled(struct idxd_wq *wq) +{ + return (is_idxd_wq_kernel(wq) && device_pasid_enabled(wq->idxd)) || + (is_idxd_wq_user(wq) && device_user_pasid_enabled(wq->idxd)); +} + +static inline bool wq_shared_supported(struct idxd_wq *wq) +{ + return (support_enqcmd && wq_pasid_enabled(wq)); } enum idxd_portal_prot { @@ -559,9 +571,7 @@ void idxd_unregister_idxd_drv(void); int idxd_device_drv_probe(struct idxd_dev *idxd_dev); void idxd_device_drv_remove(struct idxd_dev *idxd_dev); int drv_enable_wq(struct idxd_wq *wq); -int __drv_enable_wq(struct idxd_wq *wq); void drv_disable_wq(struct idxd_wq *wq); -void __drv_disable_wq(struct idxd_wq *wq); int idxd_device_init_reset(struct idxd_device *idxd); int idxd_device_enable(struct idxd_device *idxd); int idxd_device_disable(struct idxd_device *idxd); @@ -602,8 +612,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc); /* dmaengine */ int idxd_register_dma_device(struct idxd_device *idxd); void idxd_unregister_dma_device(struct idxd_device *idxd); -int idxd_register_dma_channel(struct idxd_wq *wq); -void idxd_unregister_dma_channel(struct idxd_wq *wq); void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); void idxd_dma_complete_txd(struct idxd_desc *desc, enum idxd_complete_type comp_type, bool free_desc); diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 993a5dcca24f..355fb3ef4cbf 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -512,18 +512,15 @@ static int idxd_probe(struct idxd_device *idxd) dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { - rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); - if (rc == 0) { - rc = idxd_enable_system_pasid(idxd); - if (rc < 0) { - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); - dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); - } else { - set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); - } - } else { - dev_warn(dev, "Unable to turn on SVA feature.\n"); - } + if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) + dev_warn(dev, "Unable to turn on user SVA feature.\n"); + else + set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); + + if (idxd_enable_system_pasid(idxd)) + dev_warn(dev, "No in-kernel DMA with PASID.\n"); + else + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); } else if (!sva) { dev_warn(dev, "User forced SVA off via module param.\n"); } @@ -561,7 +558,8 @@ static int idxd_probe(struct idxd_device *idxd) err: if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); + if (device_user_pasid_enabled(idxd)) + iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); return rc; } @@ -574,7 +572,8 @@ static void idxd_cleanup(struct idxd_device *idxd) idxd_cleanup_internals(idxd); if (device_pasid_enabled(idxd)) idxd_disable_system_pasid(idxd); - iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); + if (device_user_pasid_enabled(idxd)) + iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); } static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -691,7 +690,8 @@ static void idxd_remove(struct pci_dev *pdev) free_irq(irq_entry->vector, irq_entry); pci_free_irq_vectors(pdev); pci_iounmap(pdev, idxd->reg_base); - iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); + if (device_user_pasid_enabled(idxd)) + iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); pci_disable_device(pdev); destroy_workqueue(idxd->wq); perfmon_pmu_remove(idxd); diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index aa642aecdc0b..02449aa9c454 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -353,6 +353,7 @@ union wqcfg { } __packed; #define WQCFG_PASID_IDX 2 +#define WQCFG_PRIVL_IDX 2 #define WQCFG_OCCUP_IDX 6 #define WQCFG_OCCUP_MASK 0xffff diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index dfd549685c46..3f262a57441b 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -588,7 +588,7 @@ static ssize_t wq_mode_store(struct device *dev, if (sysfs_streq(buf, "dedicated")) { set_bit(WQ_FLAG_DEDICATED, &wq->flags); wq->threshold = 0; - } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) { + } else if (sysfs_streq(buf, "shared")) { clear_bit(WQ_FLAG_DEDICATED, &wq->flags); } else { return -EINVAL; @@ -832,6 +832,7 @@ static ssize_t wq_name_store(struct device *dev, size_t count) { struct idxd_wq *wq = confdev_to_wq(dev); + char *input, *pos; if (wq->state != IDXD_WQ_DISABLED) return -EPERM; @@ -846,9 +847,14 @@ static ssize_t wq_name_store(struct device *dev, if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd)) return -EOPNOTSUPP; + input = kstrndup(buf, count, GFP_KERNEL); + if (!input) + return -ENOMEM; + + pos = strim(input); memset(wq->name, 0, WQ_NAME_SIZE + 1); - strncpy(wq->name, buf, WQ_NAME_SIZE); - strreplace(wq->name, '\n', '\0'); + sprintf(wq->name, "%s", pos); + kfree(input); return count; } diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c index 41ef9f15d3d5..f8847c48ba03 100644 --- a/drivers/dma/mediatek/mtk-cqdma.c +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -751,7 +751,6 @@ static int mtk_cqdma_probe(struct platform_device *pdev) struct mtk_cqdma_device *cqdma; struct mtk_cqdma_vchan *vc; struct dma_device *dd; - struct resource *res; int err; u32 i; @@ -824,13 +823,10 @@ static int mtk_cqdma_probe(struct platform_device *pdev) return PTR_ERR(cqdma->pc[i]->base); /* allocate IRQ resource */ - res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!res) { - dev_err(&pdev->dev, "No irq resource for %s\n", - dev_name(&pdev->dev)); - return -EINVAL; - } - cqdma->pc[i]->irq = res->start; + err = platform_get_irq(pdev, i); + if (err < 0) + return err; + cqdma->pc[i]->irq = err; err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, mtk_cqdma_irq, 0, dev_name(&pdev->dev), diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c index 6ad8afbb95f2..9ebd9231f62f 100644 --- a/drivers/dma/mediatek/mtk-hsdma.c +++ b/drivers/dma/mediatek/mtk-hsdma.c @@ -601,7 +601,7 @@ static void mtk_hsdma_free_rooms_in_ring(struct mtk_hsdma_device *hsdma) cb->flag = 0; } - cb->vd = 0; + cb->vd = NULL; /* * Recycle the RXD with the helper WRITE_ONCE that can ensure @@ -923,13 +923,10 @@ static int mtk_hsdma_probe(struct platform_device *pdev) return PTR_ERR(hsdma->clk); } - res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!res) { - dev_err(&pdev->dev, "No irq resource for %s\n", - dev_name(&pdev->dev)); - return -EINVAL; - } - hsdma->irq = res->start; + err = platform_get_irq(pdev, 0); + if (err < 0) + return err; + hsdma->irq = err; refcount_set(&hsdma->pc_refcnt, 0); spin_lock_init(&hsdma->lock); diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 5a53d7fcef01..e8d71b35593e 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -1043,13 +1043,17 @@ static int mmp_pdma_probe(struct platform_device *op) return PTR_ERR(pdev->base); of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); - if (of_id) - of_property_read_u32(pdev->dev->of_node, "#dma-channels", - &dma_channels); - else if (pdata && pdata->dma_channels) + if (of_id) { + /* Parse new and deprecated dma-channels properties */ + if (of_property_read_u32(pdev->dev->of_node, "dma-channels", + &dma_channels)) + of_property_read_u32(pdev->dev->of_node, "#dma-channels", + &dma_channels); + } else if (pdata && pdata->dma_channels) { dma_channels = pdata->dma_channels; - else + } else { dma_channels = 32; /* default 32 channel */ + } pdev->dma_channels = dma_channels; for (i = 0; i < dma_channels; i++) { diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index 9c8b4084ba2f..f10b29034da1 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -591,14 +591,14 @@ static void mv_xor_v2_tasklet(struct tasklet_struct *t) dma_run_dependencies(&next_pending_sw_desc->async_tx); /* Lock the channel */ - spin_lock_bh(&xor_dev->lock); + spin_lock(&xor_dev->lock); /* add the SW descriptor to the free descriptors list */ list_add(&next_pending_sw_desc->free_list, &xor_dev->free_sw_desc); /* Release the channel */ - spin_unlock_bh(&xor_dev->lock); + spin_unlock(&xor_dev->lock); /* increment the next descriptor */ pending_ptr++; diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 9c52c57919c6..a7063e9cd551 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1294,7 +1294,7 @@ static int nbpf_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct nbpf_device *nbpf; struct dma_device *dma_dev; - struct resource *iomem, *irq_res; + struct resource *iomem; const struct nbpf_config *cfg; int num_channels; int ret, irq, eirq, i; @@ -1335,13 +1335,11 @@ static int nbpf_probe(struct platform_device *pdev) nbpf->config = cfg; for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); - if (!irq_res) - break; - - for (irq = irq_res->start; irq <= irq_res->end; - irq++, irqs++) - irqbuf[irqs] = irq; + irq = platform_get_irq_optional(pdev, i); + if (irq < 0 && irq != -ENXIO) + return irq; + if (irq > 0) + irqbuf[irqs++] = irq; } /* diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c index 1ffcb5ca9788..12725fa1655f 100644 --- a/drivers/dma/plx_dma.c +++ b/drivers/dma/plx_dma.c @@ -137,7 +137,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev) struct plx_dma_desc *desc; u32 flags; - spin_lock_bh(&plxdev->ring_lock); + spin_lock(&plxdev->ring_lock); while (plxdev->tail != plxdev->head) { desc = plx_dma_get_desc(plxdev, plxdev->tail); @@ -165,7 +165,7 @@ static void plx_dma_process_desc(struct plx_dma_dev *plxdev) plxdev->tail++; } - spin_unlock_bh(&plxdev->ring_lock); + spin_unlock(&plxdev->ring_lock); } static void plx_dma_abort_desc(struct plx_dma_dev *plxdev) diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c index daafea5bc35d..377da23012ac 100644 --- a/drivers/dma/ptdma/ptdma-dev.c +++ b/drivers/dma/ptdma/ptdma-dev.c @@ -100,6 +100,7 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, struct pt_passthru_engine *pt_engine) { struct ptdma_desc desc; + struct pt_device *pt = container_of(cmd_q, struct pt_device, cmd_q); cmd_q->cmd_error = 0; cmd_q->total_pt_ops++; @@ -111,17 +112,12 @@ int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, desc.dst_lo = lower_32_bits(pt_engine->dst_dma); desc.dw5.dst_hi = upper_32_bits(pt_engine->dst_dma); - return pt_core_execute_cmd(&desc, cmd_q); -} - -static inline void pt_core_disable_queue_interrupts(struct pt_device *pt) -{ - iowrite32(0, pt->cmd_q.reg_control + 0x000C); -} + if (cmd_q->int_en) + pt_core_enable_queue_interrupts(pt); + else + pt_core_disable_queue_interrupts(pt); -static inline void pt_core_enable_queue_interrupts(struct pt_device *pt) -{ - iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C); + return pt_core_execute_cmd(&desc, cmd_q); } static void pt_do_cmd_complete(unsigned long data) @@ -144,14 +140,10 @@ static void pt_do_cmd_complete(unsigned long data) cmd->pt_cmd_callback(cmd->data, cmd->ret); } -static irqreturn_t pt_core_irq_handler(int irq, void *data) +void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q) { - struct pt_device *pt = data; - struct pt_cmd_queue *cmd_q = &pt->cmd_q; u32 status; - pt_core_disable_queue_interrupts(pt); - pt->total_interrupts++; status = ioread32(cmd_q->reg_control + 0x0010); if (status) { cmd_q->int_status = status; @@ -162,11 +154,21 @@ static irqreturn_t pt_core_irq_handler(int irq, void *data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - /* Acknowledge the interrupt */ + /* Acknowledge the completion */ iowrite32(status, cmd_q->reg_control + 0x0010); - pt_core_enable_queue_interrupts(pt); pt_do_cmd_complete((ulong)&pt->tdata); } +} + +static irqreturn_t pt_core_irq_handler(int irq, void *data) +{ + struct pt_device *pt = data; + struct pt_cmd_queue *cmd_q = &pt->cmd_q; + + pt_core_disable_queue_interrupts(pt); + pt->total_interrupts++; + pt_check_status_trans(pt, cmd_q); + pt_core_enable_queue_interrupts(pt); return IRQ_HANDLED; } diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c index 91b93e8d9779..cc22d162ce25 100644 --- a/drivers/dma/ptdma/ptdma-dmaengine.c +++ b/drivers/dma/ptdma/ptdma-dmaengine.c @@ -171,6 +171,7 @@ static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan, vchan_tx_prep(&chan->vc, &desc->vd, flags); desc->pt = chan->pt; + desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT); desc->issued_to_hw = 0; desc->status = DMA_IN_PROGRESS; @@ -257,6 +258,17 @@ static void pt_issue_pending(struct dma_chan *dma_chan) pt_cmd_callback(desc, 0); } +static enum dma_status +pt_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct pt_device *pt = to_pt_chan(c)->pt; + struct pt_cmd_queue *cmd_q = &pt->cmd_q; + + pt_check_status_trans(pt, cmd_q); + return dma_cookie_status(c, cookie, txstate); +} + static int pt_pause(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); @@ -291,8 +303,10 @@ static int pt_terminate_all(struct dma_chan *dma_chan) { struct pt_dma_chan *chan = to_pt_chan(dma_chan); unsigned long flags; + struct pt_cmd_queue *cmd_q = &chan->pt->cmd_q; LIST_HEAD(head); + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010); spin_lock_irqsave(&chan->vc.lock, flags); vchan_get_all_descriptors(&chan->vc, &head); spin_unlock_irqrestore(&chan->vc.lock, flags); @@ -362,7 +376,7 @@ int pt_dmaengine_register(struct pt_device *pt) dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy; dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt; dma_dev->device_issue_pending = pt_issue_pending; - dma_dev->device_tx_status = dma_cookie_status; + dma_dev->device_tx_status = pt_tx_status; dma_dev->device_pause = pt_pause; dma_dev->device_resume = pt_resume; dma_dev->device_terminate_all = pt_terminate_all; diff --git a/drivers/dma/ptdma/ptdma.h b/drivers/dma/ptdma/ptdma.h index afbf192c9230..d093c43b7d13 100644 --- a/drivers/dma/ptdma/ptdma.h +++ b/drivers/dma/ptdma/ptdma.h @@ -206,6 +206,9 @@ struct pt_cmd_queue { unsigned int active; unsigned int suspended; + /* Interrupt flag */ + bool int_en; + /* Register addresses for queue */ void __iomem *reg_control; u32 qcontrol; /* Cached control register */ @@ -318,7 +321,17 @@ void pt_core_destroy(struct pt_device *pt); int pt_core_perform_passthru(struct pt_cmd_queue *cmd_q, struct pt_passthru_engine *pt_engine); +void pt_check_status_trans(struct pt_device *pt, struct pt_cmd_queue *cmd_q); void pt_start_queue(struct pt_cmd_queue *cmd_q); void pt_stop_queue(struct pt_cmd_queue *cmd_q); +static inline void pt_core_disable_queue_interrupts(struct pt_device *pt) +{ + iowrite32(0, pt->cmd_q.reg_control + 0x000C); +} + +static inline void pt_core_enable_queue_interrupts(struct pt_device *pt) +{ + iowrite32(SUPPORTED_INTERRUPTS, pt->cmd_q.reg_control + 0x000C); +} #endif diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 6078cc81892e..e7034f6f3994 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -1365,10 +1365,17 @@ static int pxad_probe(struct platform_device *op) of_id = of_match_device(pxad_dt_ids, &op->dev); if (of_id) { - of_property_read_u32(op->dev.of_node, "#dma-channels", - &dma_channels); - ret = of_property_read_u32(op->dev.of_node, "#dma-requests", + /* Parse new and deprecated dma-channels properties */ + if (of_property_read_u32(op->dev.of_node, "dma-channels", + &dma_channels)) + of_property_read_u32(op->dev.of_node, "#dma-channels", + &dma_channels); + /* Parse new and deprecated dma-requests properties */ + ret = of_property_read_u32(op->dev.of_node, "dma-requests", &nb_requestors); + if (ret) + ret = of_property_read_u32(op->dev.of_node, "#dma-requests", + &nb_requestors); if (ret) { dev_warn(pdev->slave.dev, "#dma-requests set to default 32 as missing in OF: %d", diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c index 94f3648f7483..8f0c9c4e2efd 100644 --- a/drivers/dma/qcom/gpi.c +++ b/drivers/dma/qcom/gpi.c @@ -1754,10 +1754,14 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc, tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN); tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE); - if (spi->cmd == SPI_RX) + if (spi->cmd == SPI_RX) { tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB); - else + } else if (spi->cmd == SPI_TX) { + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); + } else { /* SPI_DUPLEX */ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN); + tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK); + } } /* create the dma tre */ @@ -2148,6 +2152,7 @@ static int gpi_probe(struct platform_device *pdev) { struct gpi_dev *gpi_dev; unsigned int i; + u32 ee_offset; int ret; gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL); @@ -2175,6 +2180,9 @@ static int gpi_probe(struct platform_device *pdev) return ret; } + ee_offset = (uintptr_t)device_get_match_data(gpi_dev->dev); + gpi_dev->ee_base = gpi_dev->ee_base - ee_offset; + gpi_dev->ev_factor = EV_FACTOR; ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64)); @@ -2278,9 +2286,12 @@ static int gpi_probe(struct platform_device *pdev) } static const struct of_device_id gpi_of_match[] = { - { .compatible = "qcom,sdm845-gpi-dma" }, - { .compatible = "qcom,sm8150-gpi-dma" }, - { .compatible = "qcom,sm8250-gpi-dma" }, + { .compatible = "qcom,sc7280-gpi-dma", .data = (void *)0x10000 }, + { .compatible = "qcom,sdm845-gpi-dma", .data = (void *)0x0 }, + { .compatible = "qcom,sm8150-gpi-dma", .data = (void *)0x0 }, + { .compatible = "qcom,sm8250-gpi-dma", .data = (void *)0x0 }, + { .compatible = "qcom,sm8350-gpi-dma", .data = (void *)0x10000 }, + { .compatible = "qcom,sm8450-gpi-dma", .data = (void *)0x10000 }, { }, }; MODULE_DEVICE_TABLE(of, gpi_of_match); diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c index 51587cf8196b..210f1a9eb441 100644 --- a/drivers/dma/qcom/hidma.c +++ b/drivers/dma/qcom/hidma.c @@ -431,6 +431,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, struct hidma_desc *mdesc = NULL; struct hidma_dev *mdma = mchan->dmadev; unsigned long irqflags; + u64 byte_pattern, fill_pattern; /* Get free descriptor */ spin_lock_irqsave(&mchan->lock, irqflags); @@ -443,9 +444,19 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value, if (!mdesc) return NULL; + byte_pattern = (char)value; + fill_pattern = (byte_pattern << 56) | + (byte_pattern << 48) | + (byte_pattern << 40) | + (byte_pattern << 32) | + (byte_pattern << 24) | + (byte_pattern << 16) | + (byte_pattern << 8) | + byte_pattern; + mdesc->desc.flags = flags; hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, - value, dest, len, flags, + fill_pattern, dest, len, flags, HIDMA_TRE_MEMSET); /* Place descriptor in prepared list */ diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index f12606aeff87..db5a4ef76077 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -482,23 +482,30 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma) static int sf_pdma_probe(struct platform_device *pdev) { struct sf_pdma *pdma; - struct sf_pdma_chan *chan; struct resource *res; - int len, chans; - int ret; + int ret, n_chans; const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; - chans = PDMA_NR_CH; - len = sizeof(*pdma) + sizeof(*chan) * chans; - pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); + ret = of_property_read_u32(pdev->dev.of_node, "dma-channels", &n_chans); + if (ret) { + /* backwards-compatibility for no dma-channels property */ + dev_dbg(&pdev->dev, "set number of channels to default value: 4\n"); + n_chans = PDMA_MAX_NR_CH; + } else if (n_chans > PDMA_MAX_NR_CH) { + dev_err(&pdev->dev, "the number of channels exceeds the maximum\n"); + return -EINVAL; + } + + pdma = devm_kzalloc(&pdev->dev, struct_size(pdma, chans, n_chans), + GFP_KERNEL); if (!pdma) return -ENOMEM; - pdma->n_chans = chans; + pdma->n_chans = n_chans; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pdma->membase = devm_ioremap_resource(&pdev->dev, res); @@ -556,7 +563,7 @@ static int sf_pdma_remove(struct platform_device *pdev) struct sf_pdma_chan *ch; int i; - for (i = 0; i < PDMA_NR_CH; i++) { + for (i = 0; i < pdma->n_chans; i++) { ch = &pdma->chans[i]; devm_free_irq(&pdev->dev, ch->txirq, ch); @@ -574,6 +581,7 @@ static int sf_pdma_remove(struct platform_device *pdev) static const struct of_device_id sf_pdma_dt_ids[] = { { .compatible = "sifive,fu540-c000-pdma" }, + { .compatible = "sifive,pdma0" }, {}, }; MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids); diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h index 0c20167b097d..dcb3687bd5da 100644 --- a/drivers/dma/sf-pdma/sf-pdma.h +++ b/drivers/dma/sf-pdma/sf-pdma.h @@ -22,11 +22,7 @@ #include "../dmaengine.h" #include "../virt-dma.h" -#define PDMA_NR_CH 4 - -#if (PDMA_NR_CH != 4) -#error "Please define PDMA_NR_CH to 4" -#endif +#define PDMA_MAX_NR_CH 4 #define PDMA_BASE_ADDR 0x3000000 #define PDMA_CHAN_OFFSET 0x1000 @@ -118,7 +114,7 @@ struct sf_pdma { void __iomem *membase; void __iomem *mappedbase; u32 n_chans; - struct sf_pdma_chan chans[PDMA_NR_CH]; + struct sf_pdma_chan chans[]; }; #endif /* _SF_PDMA_H */ diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index b35d705f79e7..c0b2997ab7fd 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -50,7 +50,7 @@ config RENESAS_USB_DMAC config RZ_DMAC tristate "Renesas RZ/{G2L,V2L} DMA Controller" - depends on ARCH_R9A07G044 || ARCH_R9A07G054 || COMPILE_TEST + depends on ARCH_RZG2L || COMPILE_TEST select RENESAS_DMA select DMA_VIRTUAL_CHANNELS help diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 7f158ef5672d..2138b80435ab 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -1117,7 +1117,11 @@ static int sprd_dma_probe(struct platform_device *pdev) u32 chn_count; int ret, i; - ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count); + /* Parse new and deprecated dma-channels properties */ + ret = device_property_read_u32(&pdev->dev, "dma-channels", &chn_count); + if (ret) + ret = device_property_read_u32(&pdev->dev, "#dma-channels", + &chn_count); if (ret) { dev_err(&pdev->dev, "get dma channels count failed\n"); return ret; diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index d2365fab1b7a..adb25a11c70f 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -208,6 +208,7 @@ struct stm32_dma_chan { u32 threshold; u32 mem_burst; u32 mem_width; + enum dma_status status; }; struct stm32_dma_device { @@ -485,6 +486,7 @@ static void stm32_dma_stop(struct stm32_dma_chan *chan) } chan->busy = false; + chan->status = DMA_COMPLETE; } static int stm32_dma_terminate_all(struct dma_chan *c) @@ -535,6 +537,13 @@ static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); } +static void stm32_dma_sg_inc(struct stm32_dma_chan *chan) +{ + chan->next_sg++; + if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs)) + chan->next_sg = 0; +} + static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) @@ -575,7 +584,7 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); - chan->next_sg++; + stm32_dma_sg_inc(chan); /* Clear interrupt status if it is there */ status = stm32_dma_irq_status(chan); @@ -588,11 +597,11 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) stm32_dma_dump_reg(chan); /* Start DMA */ + chan->busy = true; + chan->status = DMA_IN_PROGRESS; reg->dma_scr |= STM32_DMA_SCR_EN; stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); - chan->busy = true; - dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); } @@ -605,41 +614,131 @@ static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) id = chan->id; dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); - if (dma_scr & STM32_DMA_SCR_DBM) { - if (chan->next_sg == chan->desc->num_sgs) - chan->next_sg = 0; + sg_req = &chan->desc->sg_req[chan->next_sg]; - sg_req = &chan->desc->sg_req[chan->next_sg]; + if (dma_scr & STM32_DMA_SCR_CT) { + dma_sm0ar = sg_req->chan_reg.dma_sm0ar; + stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); + dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", + stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); + } else { + dma_sm1ar = sg_req->chan_reg.dma_sm1ar; + stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); + dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", + stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); + } +} - if (dma_scr & STM32_DMA_SCR_CT) { - dma_sm0ar = sg_req->chan_reg.dma_sm0ar; - stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); - dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", - stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); - } else { - dma_sm1ar = sg_req->chan_reg.dma_sm1ar; - stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); - dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", - stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); - } +static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan) +{ + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); + u32 dma_scr; + + /* + * Read and store current remaining data items and peripheral/memory addresses to be + * updated on resume + */ + dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); + /* + * Transfer can be paused while between a previous resume and reconfiguration on transfer + * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need + * to set it here in SCR backup to ensure a good reconfiguration on transfer complete. + */ + if (chan->desc && chan->desc->cyclic) { + if (chan->desc->num_sgs == 1) + dma_scr |= STM32_DMA_SCR_CIRC; + else + dma_scr |= STM32_DMA_SCR_DBM; + } + chan->chan_reg.dma_scr = dma_scr; + + /* + * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise + * on resume NDTR autoreload value will be wrong (lower than the initial period length) + */ + if (chan->desc && chan->desc->cyclic) { + dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC); + stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); + } + + chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); + + dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan); +} + +static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan) +{ + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); + struct stm32_dma_sg_req *sg_req; + u32 dma_scr, status, id; + + id = chan->id; + dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); + + /* Clear interrupt status if it is there */ + status = stm32_dma_irq_status(chan); + if (status) + stm32_dma_irq_clear(chan, status); + + if (!chan->next_sg) + sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1]; + else + sg_req = &chan->desc->sg_req[chan->next_sg - 1]; + + /* Reconfigure NDTR with the initial value */ + stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr); + + /* Restore SPAR */ + stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar); + + /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */ + stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar); + stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar); + + /* Reactivate CIRC/DBM if needed */ + if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) { + dma_scr |= STM32_DMA_SCR_DBM; + /* Restore CT */ + if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT) + dma_scr &= ~STM32_DMA_SCR_CT; + else + dma_scr |= STM32_DMA_SCR_CT; + } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) { + dma_scr |= STM32_DMA_SCR_CIRC; } + stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); + + stm32_dma_configure_next_sg(chan); + + stm32_dma_dump_reg(chan); + + dma_scr |= STM32_DMA_SCR_EN; + stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); + + dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan); } -static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) +static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr) { - if (chan->desc) { - if (chan->desc->cyclic) { - vchan_cyclic_callback(&chan->desc->vdesc); - chan->next_sg++; + if (!chan->desc) + return; + + if (chan->desc->cyclic) { + vchan_cyclic_callback(&chan->desc->vdesc); + stm32_dma_sg_inc(chan); + /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */ + if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))) + stm32_dma_post_resume_reconfigure(chan); + else if (scr & STM32_DMA_SCR_DBM) stm32_dma_configure_next_sg(chan); - } else { - chan->busy = false; - if (chan->next_sg == chan->desc->num_sgs) { - vchan_cookie_complete(&chan->desc->vdesc); - chan->desc = NULL; - } - stm32_dma_start_transfer(chan); + } else { + chan->busy = false; + chan->status = DMA_COMPLETE; + if (chan->next_sg == chan->desc->num_sgs) { + vchan_cookie_complete(&chan->desc->vdesc); + chan->desc = NULL; } + stm32_dma_start_transfer(chan); } } @@ -675,8 +774,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) if (status & STM32_DMA_TCI) { stm32_dma_irq_clear(chan, STM32_DMA_TCI); - if (scr & STM32_DMA_SCR_TCIE) - stm32_dma_handle_chan_done(chan); + if (scr & STM32_DMA_SCR_TCIE) { + if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN)) + stm32_dma_handle_chan_paused(chan); + else + stm32_dma_handle_chan_done(chan, scr); + } status &= ~STM32_DMA_TCI; } @@ -711,6 +814,107 @@ static void stm32_dma_issue_pending(struct dma_chan *c) spin_unlock_irqrestore(&chan->vchan.lock, flags); } +static int stm32_dma_pause(struct dma_chan *c) +{ + struct stm32_dma_chan *chan = to_stm32_dma_chan(c); + unsigned long flags; + int ret; + + if (chan->status != DMA_IN_PROGRESS) + return -EPERM; + + spin_lock_irqsave(&chan->vchan.lock, flags); + ret = stm32_dma_disable_chan(chan); + /* + * A transfer complete flag is set to indicate the end of transfer due to the stream + * interruption, so wait for interrupt + */ + if (!ret) + chan->status = DMA_PAUSED; + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + return ret; +} + +static int stm32_dma_resume(struct dma_chan *c) +{ + struct stm32_dma_chan *chan = to_stm32_dma_chan(c); + struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); + struct stm32_dma_chan_reg chan_reg = chan->chan_reg; + u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar; + struct stm32_dma_sg_req *sg_req; + unsigned long flags; + + if (chan->status != DMA_PAUSED) + return -EPERM; + + scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); + if (WARN_ON(scr & STM32_DMA_SCR_EN)) + return -EPERM; + + spin_lock_irqsave(&chan->vchan.lock, flags); + + /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */ + if (!chan->next_sg) + sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1]; + else + sg_req = &chan->desc->sg_req[chan->next_sg - 1]; + + ndtr = sg_req->chan_reg.dma_sndtr; + offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr); + spar = sg_req->chan_reg.dma_spar; + sm0ar = sg_req->chan_reg.dma_sm0ar; + sm1ar = sg_req->chan_reg.dma_sm1ar; + + /* + * The peripheral and/or memory addresses have to be updated in order to adjust the + * address pointers. Need to check increment. + */ + if (chan_reg.dma_scr & STM32_DMA_SCR_PINC) + stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset); + else + stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar); + + if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC)) + offset = 0; + + /* + * In case of DBM, the current target could be SM1AR. + * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so + * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1. + */ + if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT)) + stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset); + else + stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset); + + /* NDTR must be restored otherwise internal HW counter won't be correctly reset */ + stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr); + + /* + * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, + * otherwise NDTR autoreload value will be wrong (lower than the initial period length) + */ + if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)) + chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM); + + if (chan_reg.dma_scr & STM32_DMA_SCR_DBM) + stm32_dma_configure_next_sg(chan); + + stm32_dma_dump_reg(chan); + + /* The stream may then be re-enabled to restart transfer from the point it was stopped */ + chan->status = DMA_IN_PROGRESS; + chan_reg.dma_scr |= STM32_DMA_SCR_EN; + stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr); + + spin_unlock_irqrestore(&chan->vchan.lock, flags); + + dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan); + + return 0; +} + static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, enum dma_transfer_direction direction, enum dma_slave_buswidth *buswidth, @@ -978,10 +1182,12 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( } /* Enable Circular mode or double buffer mode */ - if (buf_len == period_len) + if (buf_len == period_len) { chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; - else + } else { chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; + chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT; + } /* Clear periph ctrl if client set it */ chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; @@ -1091,24 +1297,36 @@ static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan) { struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); struct stm32_dma_sg_req *sg_req; - u32 dma_scr, dma_smar, id; + u32 dma_scr, dma_smar, id, period_len; id = chan->id; dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); + /* In cyclic CIRC but not DBM, CT is not used */ if (!(dma_scr & STM32_DMA_SCR_DBM)) return true; sg_req = &chan->desc->sg_req[chan->next_sg]; + period_len = sg_req->len; + /* DBM - take care of a previous pause/resume not yet post reconfigured */ if (dma_scr & STM32_DMA_SCR_CT) { dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); - return (dma_smar == sg_req->chan_reg.dma_sm0ar); + /* + * If transfer has been pause/resumed, + * SM0AR is in the range of [SM0AR:SM0AR+period_len] + */ + return (dma_smar >= sg_req->chan_reg.dma_sm0ar && + dma_smar < sg_req->chan_reg.dma_sm0ar + period_len); } dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); - - return (dma_smar == sg_req->chan_reg.dma_sm1ar); + /* + * If transfer has been pause/resumed, + * SM1AR is in the range of [SM1AR:SM1AR+period_len] + */ + return (dma_smar >= sg_req->chan_reg.dma_sm1ar && + dma_smar < sg_req->chan_reg.dma_sm1ar + period_len); } static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, @@ -1148,7 +1366,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, residue = stm32_dma_get_remaining_bytes(chan); - if (!stm32_dma_is_current_sg(chan)) { + if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) { n_sg++; if (n_sg == chan->desc->num_sgs) n_sg = 0; @@ -1188,7 +1406,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c, u32 residue = 0; status = dma_cookie_status(c, cookie, state); - if (status == DMA_COMPLETE || !state) + if (status == DMA_COMPLETE) + return status; + + status = chan->status; + + if (!state) return status; spin_lock_irqsave(&chan->vchan.lock, flags); @@ -1377,6 +1600,8 @@ static int stm32_dma_probe(struct platform_device *pdev) dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; dd->device_config = stm32_dma_slave_config; + dd->device_pause = stm32_dma_pause; + dd->device_resume = stm32_dma_resume; dd->device_terminate_all = stm32_dma_terminate_all; dd->device_synchronize = stm32_dma_synchronize; dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | @@ -1482,7 +1707,7 @@ static int stm32_dma_runtime_resume(struct device *dev) #endif #ifdef CONFIG_PM_SLEEP -static int stm32_dma_suspend(struct device *dev) +static int stm32_dma_pm_suspend(struct device *dev) { struct stm32_dma_device *dmadev = dev_get_drvdata(dev); int id, ret, scr; @@ -1506,14 +1731,14 @@ static int stm32_dma_suspend(struct device *dev) return 0; } -static int stm32_dma_resume(struct device *dev) +static int stm32_dma_pm_resume(struct device *dev) { return pm_runtime_force_resume(dev); } #endif static const struct dev_pm_ops stm32_dma_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume) + SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume) SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, stm32_dma_runtime_resume, NULL) }; diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c index d5d55732adba..eee0c5aa5fb5 100644 --- a/drivers/dma/stm32-dmamux.c +++ b/drivers/dma/stm32-dmamux.c @@ -267,7 +267,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev) ret = PTR_ERR(rst); if (ret == -EPROBE_DEFER) goto err_clk; - } else { + } else if (count > 1) { /* Don't reset if there is only one dma-master */ reset_control_assert(rst); udelay(2); reset_control_deassert(rst); diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index 6f57ff0e7b37..caf0cce8f528 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -34,7 +34,6 @@ #include "virt-dma.h" #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ -#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */ /* MDMA Channel x interrupt/status register */ #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */ @@ -73,6 +72,7 @@ #define STM32_MDMA_CCR_WEX BIT(14) #define STM32_MDMA_CCR_HEX BIT(13) #define STM32_MDMA_CCR_BEX BIT(12) +#define STM32_MDMA_CCR_SM BIT(8) #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6) #define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n)) #define STM32_MDMA_CCR_TCIE BIT(5) @@ -168,7 +168,7 @@ #define STM32_MDMA_MAX_BUF_LEN 128 #define STM32_MDMA_MAX_BLOCK_LEN 65536 -#define STM32_MDMA_MAX_CHANNELS 63 +#define STM32_MDMA_MAX_CHANNELS 32 #define STM32_MDMA_MAX_REQUESTS 256 #define STM32_MDMA_MAX_BURST 128 #define STM32_MDMA_VERY_HIGH_PRIORITY 0x3 @@ -248,6 +248,7 @@ struct stm32_mdma_device { u32 nr_channels; u32 nr_requests; u32 nr_ahb_addr_masks; + u32 chan_reserved; struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS]; u32 ahb_addr_masks[]; }; @@ -1317,26 +1318,16 @@ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) { struct stm32_mdma_device *dmadev = devid; - struct stm32_mdma_chan *chan = devid; + struct stm32_mdma_chan *chan; u32 reg, id, ccr, ien, status; /* Find out which channel generates the interrupt */ status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0); - if (status) { - id = __ffs(status); - } else { - status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1); - if (!status) { - dev_dbg(mdma2dev(dmadev), "spurious it\n"); - return IRQ_NONE; - } - id = __ffs(status); - /* - * As GISR0 provides status for channel id from 0 to 31, - * so GISR1 provides status for channel id from 32 to 62 - */ - id += 32; + if (!status) { + dev_dbg(mdma2dev(dmadev), "spurious it\n"); + return IRQ_NONE; } + id = __ffs(status); chan = &dmadev->chan[id]; if (!chan) { @@ -1354,9 +1345,12 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) if (!(status & ien)) { spin_unlock(&chan->vchan.lock); - dev_warn(chan2dev(chan), - "spurious it (status=0x%04x, ien=0x%04x)\n", - status, ien); + if (chan->busy) + dev_warn(chan2dev(chan), + "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien); + else + dev_dbg(chan2dev(chan), + "spurious it (status=0x%04x, ien=0x%04x)\n", status, ien); return IRQ_NONE; } @@ -1456,10 +1450,23 @@ static void stm32_mdma_free_chan_resources(struct dma_chan *c) chan->desc_pool = NULL; } +static bool stm32_mdma_filter_fn(struct dma_chan *c, void *fn_param) +{ + struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); + struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); + + /* Check if chan is marked Secure */ + if (dmadev->chan_reserved & BIT(chan->id)) + return false; + + return true; +} + static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct stm32_mdma_device *dmadev = ofdma->of_dma_data; + dma_cap_mask_t mask = dmadev->ddev.cap_mask; struct stm32_mdma_chan *chan; struct dma_chan *c; struct stm32_mdma_chan_config config; @@ -1485,7 +1492,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, return NULL; } - c = dma_get_any_slave_channel(&dmadev->ddev); + c = __dma_request_channel(&mask, stm32_mdma_filter_fn, &config, ofdma->of_node); if (!c) { dev_err(mdma2dev(dmadev), "No more channels available\n"); return NULL; @@ -1615,6 +1622,10 @@ static int stm32_mdma_probe(struct platform_device *pdev) for (i = 0; i < dmadev->nr_channels; i++) { chan = &dmadev->chan[i]; chan->id = i; + + if (stm32_mdma_read(dmadev, STM32_MDMA_CCR(i)) & STM32_MDMA_CCR_SM) + dmadev->chan_reserved |= BIT(i); + chan->vchan.desc_free = stm32_mdma_desc_free; vchan_init(&chan->vchan, dd); } diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 5cadd4d2b824..b7557f437936 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -90,6 +90,14 @@ #define DMA_CHAN_CUR_PARA 0x1c +/* + * LLI address mangling + * + * The LLI link physical address is also mangled, but we avoid dealing + * with that by allocating LLIs from the DMA32 zone. + */ +#define SRC_HIGH_ADDR(x) (((x) & 0x3U) << 16) +#define DST_HIGH_ADDR(x) (((x) & 0x3U) << 18) /* * Various hardware related defines @@ -132,6 +140,7 @@ struct sun6i_dma_config { u32 dst_burst_lengths; u32 src_addr_widths; u32 dst_addr_widths; + bool has_high_addr; bool has_mbus_clk; }; @@ -241,9 +250,7 @@ static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev) static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, struct sun6i_pchan *pchan) { - phys_addr_t reg = virt_to_phys(pchan->base); - - dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n" + dev_dbg(sdev->slave.dev, "Chan %d reg:\n" "\t___en(%04x): \t0x%08x\n" "\tpause(%04x): \t0x%08x\n" "\tstart(%04x): \t0x%08x\n" @@ -252,7 +259,7 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, "\t__dst(%04x): \t0x%08x\n" "\tcount(%04x): \t0x%08x\n" "\t_para(%04x): \t0x%08x\n\n", - pchan->idx, ®, + pchan->idx, DMA_CHAN_ENABLE, readl(pchan->base + DMA_CHAN_ENABLE), DMA_CHAN_PAUSE, @@ -385,17 +392,16 @@ static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, } static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, - struct sun6i_dma_lli *lli) + struct sun6i_dma_lli *v_lli, + dma_addr_t p_lli) { - phys_addr_t p_lli = virt_to_phys(lli); - dev_dbg(chan2dev(&vchan->vc.chan), - "\n\tdesc: p - %pa v - 0x%p\n" + "\n\tdesc:\tp - %pad v - 0x%p\n" "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n" "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n", - &p_lli, lli, - lli->cfg, lli->src, lli->dst, - lli->len, lli->para, lli->p_lli_next); + &p_lli, v_lli, + v_lli->cfg, v_lli->src, v_lli->dst, + v_lli->len, v_lli->para, v_lli->p_lli_next); } static void sun6i_dma_free_desc(struct virt_dma_desc *vd) @@ -445,7 +451,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) pchan->desc = to_sun6i_desc(&desc->tx); pchan->done = NULL; - sun6i_dma_dump_lli(vchan, pchan->desc->v_lli); + sun6i_dma_dump_lli(vchan, pchan->desc->v_lli, pchan->desc->p_lli); irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; @@ -626,6 +632,18 @@ static int set_config(struct sun6i_dma_dev *sdev, return 0; } +static inline void sun6i_dma_set_addr(struct sun6i_dma_dev *sdev, + struct sun6i_dma_lli *v_lli, + dma_addr_t src, dma_addr_t dst) +{ + v_lli->src = lower_32_bits(src); + v_lli->dst = lower_32_bits(dst); + + if (sdev->cfg->has_high_addr) + v_lli->para |= SRC_HIGH_ADDR(upper_32_bits(src)) | + DST_HIGH_ADDR(upper_32_bits(dst)); +} + static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) @@ -648,16 +666,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( if (!txd) return NULL; - v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); if (!v_lli) { dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); goto err_txd_free; } - v_lli->src = src; - v_lli->dst = dest; v_lli->len = len; v_lli->para = NORMAL_WAIT; + sun6i_dma_set_addr(sdev, v_lli, src, dest); burst = convert_burst(8); width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); @@ -670,7 +687,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); - sun6i_dma_dump_lli(vchan, v_lli); + sun6i_dma_dump_lli(vchan, v_lli, p_lli); return vchan_tx_prep(&vchan->vc, &txd->vd, flags); @@ -708,7 +725,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( return NULL; for_each_sg(sgl, sg, sg_len, i) { - v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); if (!v_lli) goto err_lli_free; @@ -716,8 +733,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( v_lli->para = NORMAL_WAIT; if (dir == DMA_MEM_TO_DEV) { - v_lli->src = sg_dma_address(sg); - v_lli->dst = sconfig->dst_addr; + sun6i_dma_set_addr(sdev, v_lli, + sg_dma_address(sg), + sconfig->dst_addr); v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); @@ -729,8 +747,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( sg_dma_len(sg), flags); } else { - v_lli->src = sconfig->src_addr; - v_lli->dst = sg_dma_address(sg); + sun6i_dma_set_addr(sdev, v_lli, + sconfig->src_addr, + sg_dma_address(sg)); v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); @@ -746,14 +765,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( } dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli); - for (prev = txd->v_lli; prev; prev = prev->v_lli_next) - sun6i_dma_dump_lli(vchan, prev); + for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; + p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) + sun6i_dma_dump_lli(vchan, v_lli, p_lli); return vchan_tx_prep(&vchan->vc, &txd->vd, flags); err_lli_free: - for (prev = txd->v_lli; prev; prev = prev->v_lli_next) - dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); + for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; + p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) + dma_pool_free(sdev->pool, v_lli, p_lli); kfree(txd); return NULL; } @@ -787,7 +808,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( return NULL; for (i = 0; i < periods; i++) { - v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + v_lli = dma_pool_alloc(sdev->pool, GFP_DMA32 | GFP_NOWAIT, &p_lli); if (!v_lli) { dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); goto err_lli_free; @@ -797,14 +818,16 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( v_lli->para = NORMAL_WAIT; if (dir == DMA_MEM_TO_DEV) { - v_lli->src = buf_addr + period_len * i; - v_lli->dst = sconfig->dst_addr; + sun6i_dma_set_addr(sdev, v_lli, + buf_addr + period_len * i, + sconfig->dst_addr); v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, DRQ_SDRAM, vchan->port); sdev->cfg->set_mode(&v_lli->cfg, LINEAR_MODE, IO_MODE); } else { - v_lli->src = sconfig->src_addr; - v_lli->dst = buf_addr + period_len * i; + sun6i_dma_set_addr(sdev, v_lli, + sconfig->src_addr, + buf_addr + period_len * i); v_lli->cfg = lli_cfg; sdev->cfg->set_drq(&v_lli->cfg, vchan->port, DRQ_SDRAM); sdev->cfg->set_mode(&v_lli->cfg, IO_MODE, LINEAR_MODE); @@ -820,8 +843,9 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic( return vchan_tx_prep(&vchan->vc, &txd->vd, flags); err_lli_free: - for (prev = txd->v_lli; prev; prev = prev->v_lli_next) - dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); + for (p_lli = txd->p_lli, v_lli = txd->v_lli; v_lli; + p_lli = v_lli->p_lli_next, v_lli = v_lli->v_lli_next) + dma_pool_free(sdev->pool, v_lli, p_lli); kfree(txd); return NULL; } @@ -1174,8 +1198,6 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = { }; /* - * TODO: Add support for more than 4g physical addressing. - * * The A100 binding uses the number of dma channels from the * device tree node. */ @@ -1194,6 +1216,7 @@ static struct sun6i_dma_config sun50i_a100_dma_cfg = { BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES), + .has_high_addr = true, .has_mbus_clk = true, }; @@ -1248,6 +1271,7 @@ static const struct of_device_id sun6i_dma_match[] = { { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg }, { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg }, + { .compatible = "allwinner,sun20i-d1-dma", .data = &sun50i_a100_dma_cfg }, { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg }, { .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg }, { .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg }, diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c new file mode 100644 index 000000000000..05cd451f541d --- /dev/null +++ b/drivers/dma/tegra186-gpc-dma.c @@ -0,0 +1,1498 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * DMA driver for NVIDIA Tegra GPC DMA controller. + * + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. + */ + +#include <linux/bitfield.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/iommu.h> +#include <linux/iopoll.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_dma.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/slab.h> +#include <dt-bindings/memory/tegra186-mc.h> +#include "virt-dma.h" + +/* CSR register */ +#define TEGRA_GPCDMA_CHAN_CSR 0x00 +#define TEGRA_GPCDMA_CSR_ENB BIT(31) +#define TEGRA_GPCDMA_CSR_IE_EOC BIT(30) +#define TEGRA_GPCDMA_CSR_ONCE BIT(27) + +#define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24) +#define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \ + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0) +#define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \ + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1) +#define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \ + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2) +#define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \ + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3) + +#define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21) +#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0) +#define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1) +#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2) +#define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3) +#define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4) +#define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \ + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6) + +#define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16) +#define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \ + FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4) +#define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15) +#define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10) + +/* STATUS register */ +#define TEGRA_GPCDMA_CHAN_STATUS 0x004 +#define TEGRA_GPCDMA_STATUS_BUSY BIT(31) +#define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30) +#define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28) +#define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27) +#define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26) +#define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25) +#define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24) +#define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23) +#define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21) +#define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20) + +#define TEGRA_GPCDMA_CHAN_CSRE 0x008 +#define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31) + +/* Source address */ +#define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C + +/* Destination address */ +#define TEGRA_GPCDMA_CHAN_DST_PTR 0x010 + +/* High address pointer */ +#define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014 +#define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0) +#define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16) + +/* MC sequence register */ +#define TEGRA_GPCDMA_CHAN_MCSEQ 0x18 +#define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31) +#define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25) +#define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23) +#define TEGRA_GPCDMA_MCSEQ_BURST_2 \ + FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0) +#define TEGRA_GPCDMA_MCSEQ_BURST_16 \ + FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3) +#define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20) +#define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17) +#define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0 + +#define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7) +#define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0) + +/* MMIO sequence register */ +#define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c +#define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31) +#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28) +#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \ + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0) +#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \ + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1) +#define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \ + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2) +#define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27) +#define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23 +#define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U +#define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U +#define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \ + (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT) +#define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19) +#define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16) +#define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7) + +/* Channel WCOUNT */ +#define TEGRA_GPCDMA_CHAN_WCOUNT 0x20 + +/* Transfer count */ +#define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24 + +/* DMA byte count status */ +#define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28 + +/* Error Status Register */ +#define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30 +#define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8 +#define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF +#define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \ + ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \ + TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK) +#define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF +#define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE +#define TEGRA_DMA_PERIPH_ID_ERR 0xD +#define TEGRA_DMA_STREAM_ID_ERR 0xC +#define TEGRA_DMA_MC_SLAVE_ERR 0xB +#define TEGRA_DMA_MMIO_SLAVE_ERR 0xA + +/* Fixed Pattern */ +#define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34 + +#define TEGRA_GPCDMA_CHAN_TZ 0x38 +#define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0) +#define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1) + +#define TEGRA_GPCDMA_CHAN_SPARE 0x3c +#define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16) + +/* + * If any burst is in flight and DMA paused then this is the time to complete + * on-flight burst and update DMA status register. + */ +#define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20 +#define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100 + +/* Channel base address offset from GPCDMA base address */ +#define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000 + +struct tegra_dma; +struct tegra_dma_channel; + +/* + * tegra_dma_chip_data Tegra chip specific DMA data + * @nr_channels: Number of channels available in the controller. + * @channel_reg_size: Channel register size. + * @max_dma_count: Maximum DMA transfer count supported by DMA controller. + * @hw_support_pause: DMA HW engine support pause of the channel. + */ +struct tegra_dma_chip_data { + bool hw_support_pause; + unsigned int nr_channels; + unsigned int channel_reg_size; + unsigned int max_dma_count; + int (*terminate)(struct tegra_dma_channel *tdc); +}; + +/* DMA channel registers */ +struct tegra_dma_channel_regs { + u32 csr; + u32 src_ptr; + u32 dst_ptr; + u32 high_addr_ptr; + u32 mc_seq; + u32 mmio_seq; + u32 wcount; + u32 fixed_pattern; +}; + +/* + * tegra_dma_sg_req: DMA request details to configure hardware. This + * contains the details for one transfer to configure DMA hw. + * The client's request for data transfer can be broken into multiple + * sub-transfer as per requester details and hw support. This sub transfer + * get added as an array in Tegra DMA desc which manages the transfer details. + */ +struct tegra_dma_sg_req { + unsigned int len; + struct tegra_dma_channel_regs ch_regs; +}; + +/* + * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to + * manage client request and keep track of transfer status, callbacks + * and request counts etc. + */ +struct tegra_dma_desc { + bool cyclic; + unsigned int bytes_req; + unsigned int bytes_xfer; + unsigned int sg_idx; + unsigned int sg_count; + struct virt_dma_desc vd; + struct tegra_dma_channel *tdc; + struct tegra_dma_sg_req sg_req[]; +}; + +/* + * tegra_dma_channel: Channel specific information + */ +struct tegra_dma_channel { + bool config_init; + char name[30]; + enum dma_transfer_direction sid_dir; + int id; + int irq; + int slave_id; + struct tegra_dma *tdma; + struct virt_dma_chan vc; + struct tegra_dma_desc *dma_desc; + struct dma_slave_config dma_sconfig; + unsigned int stream_id; + unsigned long chan_base_offset; +}; + +/* + * tegra_dma: Tegra DMA specific information + */ +struct tegra_dma { + const struct tegra_dma_chip_data *chip_data; + unsigned long sid_m2d_reserved; + unsigned long sid_d2m_reserved; + void __iomem *base_addr; + struct device *dev; + struct dma_device dma_dev; + struct reset_control *rst; + struct tegra_dma_channel channels[]; +}; + +static inline void tdc_write(struct tegra_dma_channel *tdc, + u32 reg, u32 val) +{ + writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); +} + +static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) +{ + return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg); +} + +static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) +{ + return container_of(dc, struct tegra_dma_channel, vc.chan); +} + +static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct tegra_dma_desc, vd); +} + +static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) +{ + return tdc->vc.chan.device->dev; +} + +static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc) +{ + dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n", + tdc->id, tdc->name); + dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n", + tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR) + ); + dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n", + tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT), + tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS) + ); + dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n", + tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS)); +} + +static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc, + enum dma_transfer_direction direction) +{ + struct tegra_dma *tdma = tdc->tdma; + int sid = tdc->slave_id; + + if (!is_slave_direction(direction)) + return 0; + + switch (direction) { + case DMA_MEM_TO_DEV: + if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) { + dev_err(tdma->dev, "slave id already in use\n"); + return -EINVAL; + } + break; + case DMA_DEV_TO_MEM: + if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) { + dev_err(tdma->dev, "slave id already in use\n"); + return -EINVAL; + } + break; + default: + break; + } + + tdc->sid_dir = direction; + + return 0; +} + +static void tegra_dma_sid_free(struct tegra_dma_channel *tdc) +{ + struct tegra_dma *tdma = tdc->tdma; + int sid = tdc->slave_id; + + switch (tdc->sid_dir) { + case DMA_MEM_TO_DEV: + clear_bit(sid, &tdma->sid_m2d_reserved); + break; + case DMA_DEV_TO_MEM: + clear_bit(sid, &tdma->sid_d2m_reserved); + break; + default: + break; + } + + tdc->sid_dir = DMA_TRANS_NONE; +} + +static void tegra_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(container_of(vd, struct tegra_dma_desc, vd)); +} + +static int tegra_dma_slave_config(struct dma_chan *dc, + struct dma_slave_config *sconfig) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + + memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); + tdc->config_init = true; + + return 0; +} + +static int tegra_dma_pause(struct tegra_dma_channel *tdc) +{ + int ret; + u32 val; + + val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); + val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE; + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); + + /* Wait until busy bit is de-asserted */ + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + + tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, + val, + !(val & TEGRA_GPCDMA_STATUS_BUSY), + TEGRA_GPCDMA_BURST_COMPLETE_TIME, + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); + + if (ret) { + dev_err(tdc2dev(tdc), "DMA pause timed out\n"); + tegra_dma_dump_chan_regs(tdc); + } + + return ret; +} + +static int tegra_dma_device_pause(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned long flags; + int ret; + + if (!tdc->tdma->chip_data->hw_support_pause) + return -ENOSYS; + + spin_lock_irqsave(&tdc->vc.lock, flags); + ret = tegra_dma_pause(tdc); + spin_unlock_irqrestore(&tdc->vc.lock, flags); + + return ret; +} + +static void tegra_dma_resume(struct tegra_dma_channel *tdc) +{ + u32 val; + + val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); + val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE; + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); +} + +static int tegra_dma_device_resume(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned long flags; + + if (!tdc->tdma->chip_data->hw_support_pause) + return -ENOSYS; + + spin_lock_irqsave(&tdc->vc.lock, flags); + tegra_dma_resume(tdc); + spin_unlock_irqrestore(&tdc->vc.lock, flags); + + return 0; +} + +static void tegra_dma_disable(struct tegra_dma_channel *tdc) +{ + u32 csr, status; + + csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); + + /* Disable interrupts */ + csr &= ~TEGRA_GPCDMA_CSR_IE_EOC; + + /* Disable DMA */ + csr &= ~TEGRA_GPCDMA_CSR_ENB; + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr); + + /* Clear interrupt status if it is there */ + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); + if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) { + dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status); + } +} + +static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc) +{ + struct tegra_dma_desc *dma_desc = tdc->dma_desc; + struct tegra_dma_channel_regs *ch_regs; + int ret; + u32 val; + + dma_desc->sg_idx++; + + /* Reset the sg index for cyclic transfers */ + if (dma_desc->sg_idx == dma_desc->sg_count) + dma_desc->sg_idx = 0; + + /* Configure next transfer immediately after DMA is busy */ + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + + tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, + val, + (val & TEGRA_GPCDMA_STATUS_BUSY), 0, + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); + if (ret) + return; + + ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; + + tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr); + + /* Start DMA */ + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, + ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); +} + +static void tegra_dma_start(struct tegra_dma_channel *tdc) +{ + struct tegra_dma_desc *dma_desc = tdc->dma_desc; + struct tegra_dma_channel_regs *ch_regs; + struct virt_dma_desc *vdesc; + + if (!dma_desc) { + vdesc = vchan_next_desc(&tdc->vc); + if (!vdesc) + return; + + dma_desc = vd_to_tegra_dma_desc(vdesc); + list_del(&vdesc->node); + dma_desc->tdc = tdc; + tdc->dma_desc = dma_desc; + + tegra_dma_resume(tdc); + } + + ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; + + tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr); + + /* Start DMA */ + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, + ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); +} + +static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc) +{ + vchan_cookie_complete(&tdc->dma_desc->vd); + + tegra_dma_sid_free(tdc); + tdc->dma_desc = NULL; +} + +static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc, + unsigned int err_status) +{ + switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) { + case TEGRA_DMA_BM_FIFO_FULL_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d bm fifo full\n", tdc->id); + break; + + case TEGRA_DMA_PERIPH_FIFO_FULL_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d peripheral fifo full\n", tdc->id); + break; + + case TEGRA_DMA_PERIPH_ID_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d illegal peripheral id\n", tdc->id); + break; + + case TEGRA_DMA_STREAM_ID_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d illegal stream id\n", tdc->id); + break; + + case TEGRA_DMA_MC_SLAVE_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d mc slave error\n", tdc->id); + break; + + case TEGRA_DMA_MMIO_SLAVE_ERR: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d mmio slave error\n", tdc->id); + break; + + default: + dev_err(tdc->tdma->dev, + "GPCDMA CH%d security violation %x\n", tdc->id, + err_status); + } +} + +static irqreturn_t tegra_dma_isr(int irq, void *dev_id) +{ + struct tegra_dma_channel *tdc = dev_id; + struct tegra_dma_desc *dma_desc = tdc->dma_desc; + struct tegra_dma_sg_req *sg_req; + u32 status; + + /* Check channel error status register */ + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS); + if (status) { + tegra_dma_chan_decode_error(tdc, status); + tegra_dma_dump_chan_regs(tdc); + tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF); + } + + spin_lock(&tdc->vc.lock); + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); + if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC)) + goto irq_done; + + tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, + TEGRA_GPCDMA_STATUS_ISE_EOC); + + if (!dma_desc) + goto irq_done; + + sg_req = dma_desc->sg_req; + dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len; + + if (dma_desc->cyclic) { + vchan_cyclic_callback(&dma_desc->vd); + tegra_dma_configure_next_sg(tdc); + } else { + dma_desc->sg_idx++; + if (dma_desc->sg_idx == dma_desc->sg_count) + tegra_dma_xfer_complete(tdc); + else + tegra_dma_start(tdc); + } + +irq_done: + spin_unlock(&tdc->vc.lock); + return IRQ_HANDLED; +} + +static void tegra_dma_issue_pending(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned long flags; + + if (tdc->dma_desc) + return; + + spin_lock_irqsave(&tdc->vc.lock, flags); + if (vchan_issue_pending(&tdc->vc)) + tegra_dma_start(tdc); + + /* + * For cyclic DMA transfers, program the second + * transfer parameters as soon as the first DMA + * transfer is started inorder for the DMA + * controller to trigger the second transfer + * with the correct parameters. + */ + if (tdc->dma_desc && tdc->dma_desc->cyclic) + tegra_dma_configure_next_sg(tdc); + + spin_unlock_irqrestore(&tdc->vc.lock, flags); +} + +static int tegra_dma_stop_client(struct tegra_dma_channel *tdc) +{ + int ret; + u32 status, csr; + + /* + * Change the client associated with the DMA channel + * to stop DMA engine from starting any more bursts for + * the given client and wait for in flight bursts to complete + */ + csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); + csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK); + csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED; + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr); + + /* Wait for in flight data transfer to finish */ + udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME); + + /* If TX/RX path is still active wait till it becomes + * inactive + */ + + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + + tdc->chan_base_offset + + TEGRA_GPCDMA_CHAN_STATUS, + status, + !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX | + TEGRA_GPCDMA_STATUS_CHANNEL_RX)), + 5, + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); + if (ret) { + dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n"); + tegra_dma_dump_chan_regs(tdc); + } + + return ret; +} + +static int tegra_dma_terminate_all(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned long flags; + LIST_HEAD(head); + int err; + + spin_lock_irqsave(&tdc->vc.lock, flags); + + if (tdc->dma_desc) { + err = tdc->tdma->chip_data->terminate(tdc); + if (err) { + spin_unlock_irqrestore(&tdc->vc.lock, flags); + return err; + } + + tegra_dma_disable(tdc); + tdc->dma_desc = NULL; + } + + tegra_dma_sid_free(tdc); + vchan_get_all_descriptors(&tdc->vc, &head); + spin_unlock_irqrestore(&tdc->vc.lock, flags); + + vchan_dma_desc_free_list(&tdc->vc, &head); + + return 0; +} + +static int tegra_dma_get_residual(struct tegra_dma_channel *tdc) +{ + struct tegra_dma_desc *dma_desc = tdc->dma_desc; + struct tegra_dma_sg_req *sg_req = dma_desc->sg_req; + unsigned int bytes_xfer, residual; + u32 wcount = 0, status; + + wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT); + + /* + * Set wcount = 0 if EOC bit is set. The transfer would have + * already completed and the CHAN_XFER_COUNT could have updated + * for the next transfer, specifically in case of cyclic transfers. + */ + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); + if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) + wcount = 0; + + bytes_xfer = dma_desc->bytes_xfer + + sg_req[dma_desc->sg_idx].len - (wcount * 4); + + residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req); + + return residual; +} + +static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma_desc *dma_desc; + struct virt_dma_desc *vd; + unsigned int residual; + unsigned long flags; + enum dma_status ret; + + ret = dma_cookie_status(dc, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + spin_lock_irqsave(&tdc->vc.lock, flags); + vd = vchan_find_desc(&tdc->vc, cookie); + if (vd) { + dma_desc = vd_to_tegra_dma_desc(vd); + residual = dma_desc->bytes_req; + dma_set_residue(txstate, residual); + } else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) { + residual = tegra_dma_get_residual(tdc); + dma_set_residue(txstate, residual); + } else { + dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie); + } + spin_unlock_irqrestore(&tdc->vc.lock, flags); + + return ret; +} + +static inline int get_bus_width(struct tegra_dma_channel *tdc, + enum dma_slave_buswidth slave_bw) +{ + switch (slave_bw) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32; + default: + dev_err(tdc2dev(tdc), "given slave bus width is not supported\n"); + return -EINVAL; + } +} + +static unsigned int get_burst_size(struct tegra_dma_channel *tdc, + u32 burst_size, enum dma_slave_buswidth slave_bw, + int len) +{ + unsigned int burst_mmio_width, burst_byte; + + /* + * burst_size from client is in terms of the bus_width. + * convert that into words. + * If burst_size is not specified from client, then use + * len to calculate the optimum burst size + */ + burst_byte = burst_size ? burst_size * slave_bw : len; + burst_mmio_width = burst_byte / 4; + + if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN) + return 0; + + burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX); + + return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width); +} + +static int get_transfer_param(struct tegra_dma_channel *tdc, + enum dma_transfer_direction direction, + u32 *apb_addr, + u32 *mmio_seq, + u32 *csr, + unsigned int *burst_size, + enum dma_slave_buswidth *slave_bw) +{ + switch (direction) { + case DMA_MEM_TO_DEV: + *apb_addr = tdc->dma_sconfig.dst_addr; + *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); + *burst_size = tdc->dma_sconfig.dst_maxburst; + *slave_bw = tdc->dma_sconfig.dst_addr_width; + *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC; + return 0; + case DMA_DEV_TO_MEM: + *apb_addr = tdc->dma_sconfig.src_addr; + *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); + *burst_size = tdc->dma_sconfig.src_maxburst; + *slave_bw = tdc->dma_sconfig.src_addr_width; + *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC; + return 0; + default: + dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); + } + + return -EINVAL; +} + +static struct dma_async_tx_descriptor * +tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value, + size_t len, unsigned long flags) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; + struct tegra_dma_sg_req *sg_req; + struct tegra_dma_desc *dma_desc; + u32 csr, mc_seq; + + if ((len & 3) || (dest & 3) || len > max_dma_count) { + dev_err(tdc2dev(tdc), + "DMA length/memory address is not supported\n"); + return NULL; + } + + /* Set DMA mode to fixed pattern */ + csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT; + /* Enable once or continuous mode */ + csr |= TEGRA_GPCDMA_CSR_ONCE; + /* Enable IRQ mask */ + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; + /* Enable the DMA interrupt */ + if (flags & DMA_PREP_INTERRUPT) + csr |= TEGRA_GPCDMA_CSR_IE_EOC; + /* Configure default priority weight for the channel */ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); + + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + /* retain stream-id and clean rest */ + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; + + /* Set the address wrapping */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + + /* Program outstanding MC requests */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); + /* Set burst size */ + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; + + dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); + if (!dma_desc) + return NULL; + + dma_desc->bytes_req = len; + dma_desc->sg_count = 1; + sg_req = dma_desc->sg_req; + + sg_req[0].ch_regs.src_ptr = 0; + sg_req[0].ch_regs.dst_ptr = dest; + sg_req[0].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); + sg_req[0].ch_regs.fixed_pattern = value; + /* Word count reg takes value as (N +1) words */ + sg_req[0].ch_regs.wcount = ((len - 4) >> 2); + sg_req[0].ch_regs.csr = csr; + sg_req[0].ch_regs.mmio_seq = 0; + sg_req[0].ch_regs.mc_seq = mc_seq; + sg_req[0].len = len; + + dma_desc->cyclic = false; + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); +} + +static struct dma_async_tx_descriptor * +tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma_sg_req *sg_req; + struct tegra_dma_desc *dma_desc; + unsigned int max_dma_count; + u32 csr, mc_seq; + + max_dma_count = tdc->tdma->chip_data->max_dma_count; + if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) { + dev_err(tdc2dev(tdc), + "DMA length/memory address is not supported\n"); + return NULL; + } + + /* Set DMA mode to memory to memory transfer */ + csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM; + /* Enable once or continuous mode */ + csr |= TEGRA_GPCDMA_CSR_ONCE; + /* Enable IRQ mask */ + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; + /* Enable the DMA interrupt */ + if (flags & DMA_PREP_INTERRUPT) + csr |= TEGRA_GPCDMA_CSR_IE_EOC; + /* Configure default priority weight for the channel */ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); + + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + /* retain stream-id and clean rest */ + mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) | + (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); + + /* Set the address wrapping */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + + /* Program outstanding MC requests */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); + /* Set burst size */ + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; + + dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); + if (!dma_desc) + return NULL; + + dma_desc->bytes_req = len; + dma_desc->sg_count = 1; + sg_req = dma_desc->sg_req; + + sg_req[0].ch_regs.src_ptr = src; + sg_req[0].ch_regs.dst_ptr = dest; + sg_req[0].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32)); + sg_req[0].ch_regs.high_addr_ptr |= + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); + /* Word count reg takes value as (N +1) words */ + sg_req[0].ch_regs.wcount = ((len - 4) >> 2); + sg_req[0].ch_regs.csr = csr; + sg_req[0].ch_regs.mmio_seq = 0; + sg_req[0].ch_regs.mc_seq = mc_seq; + sg_req[0].len = len; + + dma_desc->cyclic = false; + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); +} + +static struct dma_async_tx_descriptor * +tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; + enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED; + u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0; + struct tegra_dma_sg_req *sg_req; + struct tegra_dma_desc *dma_desc; + struct scatterlist *sg; + u32 burst_size; + unsigned int i; + int ret; + + if (!tdc->config_init) { + dev_err(tdc2dev(tdc), "DMA channel is not configured\n"); + return NULL; + } + if (sg_len < 1) { + dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); + return NULL; + } + + ret = tegra_dma_sid_reserve(tdc, direction); + if (ret) + return NULL; + + ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr, + &burst_size, &slave_bw); + if (ret < 0) + return NULL; + + /* Enable once or continuous mode */ + csr |= TEGRA_GPCDMA_CSR_ONCE; + /* Program the slave id in requestor select */ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); + /* Enable IRQ mask */ + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; + /* Configure default priority weight for the channel*/ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); + + /* Enable the DMA interrupt */ + if (flags & DMA_PREP_INTERRUPT) + csr |= TEGRA_GPCDMA_CSR_IE_EOC; + + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + /* retain stream-id and clean rest */ + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; + + /* Set the address wrapping on both MC and MMIO side */ + + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); + + /* Program 2 MC outstanding requests by default. */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); + + /* Setting MC burst size depending on MMIO burst size */ + if (burst_size == 64) + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; + else + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; + + dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT); + if (!dma_desc) + return NULL; + + dma_desc->sg_count = sg_len; + sg_req = dma_desc->sg_req; + + /* Make transfer requests */ + for_each_sg(sgl, sg, sg_len, i) { + u32 len; + dma_addr_t mem; + + mem = sg_dma_address(sg); + len = sg_dma_len(sg); + + if ((len & 3) || (mem & 3) || len > max_dma_count) { + dev_err(tdc2dev(tdc), + "DMA length/memory address is not supported\n"); + kfree(dma_desc); + return NULL; + } + + mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); + dma_desc->bytes_req += len; + + if (direction == DMA_MEM_TO_DEV) { + sg_req[i].ch_regs.src_ptr = mem; + sg_req[i].ch_regs.dst_ptr = apb_ptr; + sg_req[i].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); + } else if (direction == DMA_DEV_TO_MEM) { + sg_req[i].ch_regs.src_ptr = apb_ptr; + sg_req[i].ch_regs.dst_ptr = mem; + sg_req[i].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); + } + + /* + * Word count register takes input in words. Writing a value + * of N into word count register means a req of (N+1) words. + */ + sg_req[i].ch_regs.wcount = ((len - 4) >> 2); + sg_req[i].ch_regs.csr = csr; + sg_req[i].ch_regs.mmio_seq = mmio_seq; + sg_req[i].ch_regs.mc_seq = mc_seq; + sg_req[i].len = len; + } + + dma_desc->cyclic = false; + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); +} + +static struct dma_async_tx_descriptor * +tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction direction, + unsigned long flags) +{ + enum dma_slave_buswidth slave_bw = DMA_SLAVE_BUSWIDTH_UNDEFINED; + u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size; + unsigned int max_dma_count, len, period_count, i; + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma_desc *dma_desc; + struct tegra_dma_sg_req *sg_req; + dma_addr_t mem = buf_addr; + int ret; + + if (!buf_len || !period_len) { + dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); + return NULL; + } + + if (!tdc->config_init) { + dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); + return NULL; + } + + ret = tegra_dma_sid_reserve(tdc, direction); + if (ret) + return NULL; + + /* + * We only support cycle transfer when buf_len is multiple of + * period_len. + */ + if (buf_len % period_len) { + dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); + return NULL; + } + + len = period_len; + max_dma_count = tdc->tdma->chip_data->max_dma_count; + if ((len & 3) || (buf_addr & 3) || len > max_dma_count) { + dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); + return NULL; + } + + ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr, + &burst_size, &slave_bw); + if (ret < 0) + return NULL; + + /* Enable once or continuous mode */ + csr &= ~TEGRA_GPCDMA_CSR_ONCE; + /* Program the slave id in requestor select */ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); + /* Enable IRQ mask */ + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; + /* Configure default priority weight for the channel*/ + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); + + /* Enable the DMA interrupt */ + if (flags & DMA_PREP_INTERRUPT) + csr |= TEGRA_GPCDMA_CSR_IE_EOC; + + mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); + + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + /* retain stream-id and clean rest */ + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; + + /* Set the address wrapping on both MC and MMIO side */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); + + /* Program 2 MC outstanding requests by default. */ + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); + /* Setting MC burst size depending on MMIO burst size */ + if (burst_size == 64) + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; + else + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; + + period_count = buf_len / period_len; + dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count), + GFP_NOWAIT); + if (!dma_desc) + return NULL; + + dma_desc->bytes_req = buf_len; + dma_desc->sg_count = period_count; + sg_req = dma_desc->sg_req; + + /* Split transfer equal to period size */ + for (i = 0; i < period_count; i++) { + mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); + if (direction == DMA_MEM_TO_DEV) { + sg_req[i].ch_regs.src_ptr = mem; + sg_req[i].ch_regs.dst_ptr = apb_ptr; + sg_req[i].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); + } else if (direction == DMA_DEV_TO_MEM) { + sg_req[i].ch_regs.src_ptr = apb_ptr; + sg_req[i].ch_regs.dst_ptr = mem; + sg_req[i].ch_regs.high_addr_ptr = + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); + } + /* + * Word count register takes input in words. Writing a value + * of N into word count register means a req of (N+1) words. + */ + sg_req[i].ch_regs.wcount = ((len - 4) >> 2); + sg_req[i].ch_regs.csr = csr; + sg_req[i].ch_regs.mmio_seq = mmio_seq; + sg_req[i].ch_regs.mc_seq = mc_seq; + sg_req[i].len = len; + + mem += len; + } + + dma_desc->cyclic = true; + + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); +} + +static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + int ret; + + ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc); + if (ret) { + dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name); + return ret; + } + + dma_cookie_init(&tdc->vc.chan); + tdc->config_init = false; + return 0; +} + +static void tegra_dma_chan_synchronize(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + + synchronize_irq(tdc->irq); + vchan_synchronize(&tdc->vc); +} + +static void tegra_dma_free_chan_resources(struct dma_chan *dc) +{ + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + + dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); + + tegra_dma_terminate_all(dc); + synchronize_irq(tdc->irq); + + tasklet_kill(&tdc->vc.task); + tdc->config_init = false; + tdc->slave_id = -1; + tdc->sid_dir = DMA_TRANS_NONE; + free_irq(tdc->irq, tdc); + + vchan_free_chan_resources(&tdc->vc); +} + +static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct tegra_dma *tdma = ofdma->of_dma_data; + struct tegra_dma_channel *tdc; + struct dma_chan *chan; + + chan = dma_get_any_slave_channel(&tdma->dma_dev); + if (!chan) + return NULL; + + tdc = to_tegra_dma_chan(chan); + tdc->slave_id = dma_spec->args[0]; + + return chan; +} + +static const struct tegra_dma_chip_data tegra186_dma_chip_data = { + .nr_channels = 31, + .channel_reg_size = SZ_64K, + .max_dma_count = SZ_1G, + .hw_support_pause = false, + .terminate = tegra_dma_stop_client, +}; + +static const struct tegra_dma_chip_data tegra194_dma_chip_data = { + .nr_channels = 31, + .channel_reg_size = SZ_64K, + .max_dma_count = SZ_1G, + .hw_support_pause = true, + .terminate = tegra_dma_pause, +}; + +static const struct of_device_id tegra_dma_of_match[] = { + { + .compatible = "nvidia,tegra186-gpcdma", + .data = &tegra186_dma_chip_data, + }, { + .compatible = "nvidia,tegra194-gpcdma", + .data = &tegra194_dma_chip_data, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, tegra_dma_of_match); + +static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id) +{ + unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); + + reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK); + reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); + + reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id); + reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id); + + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val); + return 0; +} + +static int tegra_dma_probe(struct platform_device *pdev) +{ + const struct tegra_dma_chip_data *cdata = NULL; + struct iommu_fwspec *iommu_spec; + unsigned int stream_id, i; + struct tegra_dma *tdma; + int ret; + + cdata = of_device_get_match_data(&pdev->dev); + + tdma = devm_kzalloc(&pdev->dev, + struct_size(tdma, channels, cdata->nr_channels), + GFP_KERNEL); + if (!tdma) + return -ENOMEM; + + tdma->dev = &pdev->dev; + tdma->chip_data = cdata; + platform_set_drvdata(pdev, tdma); + + tdma->base_addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(tdma->base_addr)) + return PTR_ERR(tdma->base_addr); + + tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma"); + if (IS_ERR(tdma->rst)) { + return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst), + "Missing controller reset\n"); + } + reset_control_reset(tdma->rst); + + tdma->dma_dev.dev = &pdev->dev; + + iommu_spec = dev_iommu_fwspec_get(&pdev->dev); + if (!iommu_spec) { + dev_err(&pdev->dev, "Missing iommu stream-id\n"); + return -EINVAL; + } + stream_id = iommu_spec->ids[0] & 0xffff; + + INIT_LIST_HEAD(&tdma->dma_dev.channels); + for (i = 0; i < cdata->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + + tdc->irq = platform_get_irq(pdev, i); + if (tdc->irq < 0) + return tdc->irq; + + tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET + + i * cdata->channel_reg_size; + snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i); + tdc->tdma = tdma; + tdc->id = i; + tdc->slave_id = -1; + + vchan_init(&tdc->vc, &tdma->dma_dev); + tdc->vc.desc_free = tegra_dma_desc_free; + + /* program stream-id for this channel */ + tegra_dma_program_sid(tdc, stream_id); + tdc->stream_id = stream_id; + } + + dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); + dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); + dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask); + dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask); + dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); + + /* + * Only word aligned transfers are supported. Set the copy + * alignment shift. + */ + tdma->dma_dev.copy_align = 2; + tdma->dma_dev.fill_align = 2; + tdma->dma_dev.device_alloc_chan_resources = + tegra_dma_alloc_chan_resources; + tdma->dma_dev.device_free_chan_resources = + tegra_dma_free_chan_resources; + tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; + tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy; + tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset; + tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; + tdma->dma_dev.device_config = tegra_dma_slave_config; + tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; + tdma->dma_dev.device_tx_status = tegra_dma_tx_status; + tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; + tdma->dma_dev.device_pause = tegra_dma_device_pause; + tdma->dma_dev.device_resume = tegra_dma_device_resume; + tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize; + tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + ret = dma_async_device_register(&tdma->dma_dev); + if (ret < 0) { + dev_err_probe(&pdev->dev, ret, + "GPC DMA driver registration failed\n"); + return ret; + } + + ret = of_dma_controller_register(pdev->dev.of_node, + tegra_dma_of_xlate, tdma); + if (ret < 0) { + dev_err_probe(&pdev->dev, ret, + "GPC DMA OF registration failed\n"); + + dma_async_device_unregister(&tdma->dma_dev); + return ret; + } + + dev_info(&pdev->dev, "GPC DMA driver register %d channels\n", + cdata->nr_channels); + + return 0; +} + +static int tegra_dma_remove(struct platform_device *pdev) +{ + struct tegra_dma *tdma = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&tdma->dma_dev); + + return 0; +} + +static int __maybe_unused tegra_dma_pm_suspend(struct device *dev) +{ + struct tegra_dma *tdma = dev_get_drvdata(dev); + unsigned int i; + + for (i = 0; i < tdma->chip_data->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + + if (tdc->dma_desc) { + dev_err(tdma->dev, "channel %u busy\n", i); + return -EBUSY; + } + } + + return 0; +} + +static int __maybe_unused tegra_dma_pm_resume(struct device *dev) +{ + struct tegra_dma *tdma = dev_get_drvdata(dev); + unsigned int i; + + reset_control_reset(tdma->rst); + + for (i = 0; i < tdma->chip_data->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + + tegra_dma_program_sid(tdc, tdc->stream_id); + } + + return 0; +} + +static const struct dev_pm_ops tegra_dma_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) +}; + +static struct platform_driver tegra_dma_driver = { + .driver = { + .name = "tegra-gpcdma", + .pm = &tegra_dma_dev_pm_ops, + .of_match_table = tegra_dma_of_match, + }, + .probe = tegra_dma_probe, + .remove = tegra_dma_remove, +}; + +module_platform_driver(tegra_dma_driver); + +MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver"); +MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>"); +MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c index 062bd9bd4de0..695915dba707 100644 --- a/drivers/dma/ti/cppi41.c +++ b/drivers/dma/ti/cppi41.c @@ -1105,8 +1105,12 @@ static int cppi41_dma_probe(struct platform_device *pdev) cdd->qmgr_num_pend = glue_info->qmgr_num_pend; cdd->first_completion_queue = glue_info->first_completion_queue; + /* Parse new and deprecated dma-channels properties */ ret = of_property_read_u32(dev->of_node, - "#dma-channels", &cdd->n_chans); + "dma-channels", &cdd->n_chans); + if (ret) + ret = of_property_read_u32(dev->of_node, + "#dma-channels", &cdd->n_chans); if (ret) goto err_get_n_chans; diff --git a/drivers/dma/ti/k3-psil-am62.c b/drivers/dma/ti/k3-psil-am62.c index d431e2033237..2b6fd6e37c61 100644 --- a/drivers/dma/ti/k3-psil-am62.c +++ b/drivers/dma/ti/k3-psil-am62.c @@ -70,10 +70,10 @@ /* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ static struct psil_ep am62_src_ep_map[] = { /* SAUL */ - PSIL_SAUL(0x7500, 20, 35, 8, 35, 0), - PSIL_SAUL(0x7501, 21, 35, 8, 36, 0), - PSIL_SAUL(0x7502, 22, 43, 8, 43, 0), - PSIL_SAUL(0x7503, 23, 43, 8, 44, 0), + PSIL_SAUL(0x7504, 20, 35, 8, 35, 0), + PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), + PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), + PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), /* PDMA_MAIN0 - SPI0-3 */ PSIL_PDMA_XY_PKT(0x4302), PSIL_PDMA_XY_PKT(0x4303), diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 7aa63b652027..dc299ab36818 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -229,7 +229,7 @@ struct zynqmp_dma_chan { bool is_dmacoherent; struct tasklet_struct tasklet; bool idle; - u32 desc_size; + size_t desc_size; bool err; u32 bus_width; u32 src_burst_len; @@ -486,7 +486,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan) } chan->desc_pool_v = dma_alloc_coherent(chan->dev, - (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS), + (2 * ZYNQMP_DMA_DESC_SIZE(chan) * + ZYNQMP_DMA_NUM_DESCS), &chan->desc_pool_p, GFP_KERNEL); if (!chan->desc_pool_v) return -ENOMEM; @@ -1077,7 +1078,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(zdev->dev, ZDMA_PM_TIMEOUT); pm_runtime_use_autosuspend(zdev->dev); pm_runtime_enable(zdev->dev); - pm_runtime_get_sync(zdev->dev); + ret = pm_runtime_resume_and_get(zdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "device wakeup failed.\n"); + pm_runtime_disable(zdev->dev); + } if (!pm_runtime_enabled(zdev->dev)) { ret = zynqmp_dma_runtime_resume(zdev->dev); if (ret) @@ -1093,7 +1098,11 @@ static int zynqmp_dma_probe(struct platform_device *pdev) p->dst_addr_widths = BIT(zdev->chan->bus_width / 8); p->src_addr_widths = BIT(zdev->chan->bus_width / 8); - dma_async_device_register(&zdev->common); + ret = dma_async_device_register(&zdev->common); + if (ret) { + dev_err(zdev->dev, "failed to register the dma device\n"); + goto free_chan_resources; + } ret = of_dma_controller_register(pdev->dev.of_node, of_zynqmp_dma_xlate, zdev); diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index 099e358d2491..e342a6dc4c6c 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -19,6 +19,7 @@ #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> +#include <linux/pinctrl/pinconf-generic.h> #include <linux/regmap.h> #include "../pinctrl/core.h" @@ -706,7 +707,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev) struct device_node *pctlnp = of_get_parent(np); struct pinctrl_dev *pctldev = NULL; struct rockchip_pin_bank *bank = NULL; - struct rockchip_pin_output_deferred *cfg; + struct rockchip_pin_deferred *cfg; static int gpio; int id, ret; @@ -747,15 +748,27 @@ static int rockchip_gpio_probe(struct platform_device *pdev) return ret; } - while (!list_empty(&bank->deferred_output)) { - cfg = list_first_entry(&bank->deferred_output, - struct rockchip_pin_output_deferred, head); + while (!list_empty(&bank->deferred_pins)) { + cfg = list_first_entry(&bank->deferred_pins, + struct rockchip_pin_deferred, head); list_del(&cfg->head); - ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg); - if (ret) - dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg); - + switch (cfg->param) { + case PIN_CONFIG_OUTPUT: + ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg); + if (ret) + dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, + cfg->arg); + break; + case PIN_CONFIG_INPUT_ENABLE: + ret = rockchip_gpio_direction_input(&bank->gpio_chip, cfg->pin); + if (ret) + dev_warn(dev, "setting input pin %u failed\n", cfg->pin); + break; + default: + dev_warn(dev, "unknown deferred config param %d\n", cfg->param); + break; + } kfree(cfg); } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index b7ac07f43b95..3d6c3ffd5576 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -930,6 +930,11 @@ static int of_gpiochip_add_pin_range(struct gpio_chip *chip) if (!np) return 0; + if (!of_property_read_bool(np, "gpio-ranges") && + chip->of_gpio_ranges_fallback) { + return chip->of_gpio_ranges_fallback(chip, np); + } + group_names = of_find_property(np, group_names_propname, NULL); for (;; index++) { diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c index e82b815f83a6..27f4fcb058f9 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c @@ -123,8 +123,11 @@ static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe, if (fb->format->format != DRM_FORMAT_XRGB8888) return -EINVAL; - if (fb->pitches[0] * fb->height > hv->fb_size) + if (fb->pitches[0] * fb->height > hv->fb_size) { + drm_err(&hv->dev, "fb size requested by %s for %dX%d (pitch %d) greater than %ld\n", + current->comm, fb->width, fb->height, fb->pitches[0], hv->fb_size); return -EINVAL; + } return 0; } diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c index c0155c6271bf..76a182a9a765 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_proto.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_proto.c @@ -18,16 +18,16 @@ #define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major)) #define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff) #define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16) + +/* Support for VERSION_WIN7 is removed. #define is retained for reference. */ #define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0) #define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2) #define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5) -#define SYNTHVID_DEPTH_WIN7 16 #define SYNTHVID_DEPTH_WIN8 32 -#define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024) +#define SYNTHVID_WIDTH_WIN8 1600 +#define SYNTHVID_HEIGHT_WIN8 1200 #define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024) -#define SYNTHVID_WIDTH_MAX_WIN7 1600 -#define SYNTHVID_HEIGHT_MAX_WIN7 1200 enum pipe_msg_type { PIPE_MSG_INVALID, @@ -496,12 +496,6 @@ int hyperv_connect_vsp(struct hv_device *hdev) case VERSION_WIN8: case VERSION_WIN8_1: ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN8); - if (!ret) - break; - fallthrough; - case VERSION_WS2008: - case VERSION_WIN7: - ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN7); break; default: ret = hyperv_negotiate_version(hdev, SYNTHVID_VERSION_WIN10); @@ -513,18 +507,15 @@ int hyperv_connect_vsp(struct hv_device *hdev) goto error; } - if (hv->synthvid_version == SYNTHVID_VERSION_WIN7) - hv->screen_depth = SYNTHVID_DEPTH_WIN7; - else - hv->screen_depth = SYNTHVID_DEPTH_WIN8; + hv->screen_depth = SYNTHVID_DEPTH_WIN8; if (hyperv_version_ge(hv->synthvid_version, SYNTHVID_VERSION_WIN10)) { ret = hyperv_get_supported_resolution(hdev); if (ret) drm_err(dev, "Failed to get supported resolution from host, use default\n"); } else { - hv->screen_width_max = SYNTHVID_WIDTH_MAX_WIN7; - hv->screen_height_max = SYNTHVID_HEIGHT_MAX_WIN7; + hv->screen_width_max = SYNTHVID_WIDTH_WIN8; + hv->screen_height_max = SYNTHVID_HEIGHT_WIN8; } hv->mmio_megabytes = hdev->channel->offermsg.offer.mmio_megabytes; diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 9c5cc2800975..b4f69364f9a1 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -51,7 +51,7 @@ static int preallocated_oos_pages = 8192; static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn) { - struct kvm *kvm = vgpu->kvm; + struct kvm *kvm = vgpu->vfio_device.kvm; int idx; bool ret; @@ -1185,7 +1185,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, if (!vgpu->attached) return -EINVAL; - pfn = gfn_to_pfn(vgpu->kvm, ops->get_pfn(entry)); + pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry)); if (is_error_noslot_pfn(pfn)) return -EINVAL; return PageTransHuge(pfn_to_page(pfn)); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 03ecffc2ba56..aee1a45da74b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -227,11 +227,7 @@ struct intel_vgpu { struct mutex cache_lock; struct notifier_block iommu_notifier; - struct notifier_block group_notifier; - struct kvm *kvm; - struct work_struct release_work; atomic_t released; - struct vfio_group *vfio_group; struct kvm_page_track_notifier_node track_node; #define NR_BKT (1 << 18) @@ -732,7 +728,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa, { if (!vgpu->attached) return -ESRCH; - return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false); + return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false); } /** @@ -750,7 +746,7 @@ static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu, { if (!vgpu->attached) return -ESRCH; - return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true); + return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true); } void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 0787ba5c301f..e2f6c56ab342 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -228,8 +228,6 @@ static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) } } -static void intel_vgpu_release_work(struct work_struct *work); - static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size) { @@ -243,7 +241,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, for (npage = 0; npage < total_pages; npage++) { unsigned long cur_gfn = gfn + npage; - ret = vfio_group_unpin_pages(vgpu->vfio_group, &cur_gfn, 1); + ret = vfio_unpin_pages(&vgpu->vfio_device, &cur_gfn, 1); drm_WARN_ON(&i915->drm, ret != 1); } } @@ -266,8 +264,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long cur_gfn = gfn + npage; unsigned long pfn; - ret = vfio_group_pin_pages(vgpu->vfio_group, &cur_gfn, 1, - IOMMU_READ | IOMMU_WRITE, &pfn); + ret = vfio_pin_pages(&vgpu->vfio_device, &cur_gfn, 1, + IOMMU_READ | IOMMU_WRITE, &pfn); if (ret != 1) { gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", cur_gfn, ret); @@ -761,23 +759,6 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb, return NOTIFY_OK; } -static int intel_vgpu_group_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct intel_vgpu *vgpu = - container_of(nb, struct intel_vgpu, group_notifier); - - /* the only action we care about */ - if (action == VFIO_GROUP_NOTIFY_SET_KVM) { - vgpu->kvm = data; - - if (!data) - schedule_work(&vgpu->release_work); - } - - return NOTIFY_OK; -} - static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu) { struct intel_vgpu *itr; @@ -789,7 +770,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu) if (!itr->attached) continue; - if (vgpu->kvm == itr->kvm) { + if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) { ret = true; goto out; } @@ -804,61 +785,44 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); unsigned long events; int ret; - struct vfio_group *vfio_group; vgpu->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier; - vgpu->group_notifier.notifier_call = intel_vgpu_group_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; - ret = vfio_register_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, &events, - &vgpu->iommu_notifier); + ret = vfio_register_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, &events, + &vgpu->iommu_notifier); if (ret != 0) { gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", ret); goto out; } - events = VFIO_GROUP_NOTIFY_SET_KVM; - ret = vfio_register_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, &events, - &vgpu->group_notifier); - if (ret != 0) { - gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", - ret); - goto undo_iommu; - } - - vfio_group = - vfio_group_get_external_user_from_dev(vgpu->vfio_device.dev); - if (IS_ERR_OR_NULL(vfio_group)) { - ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group); - gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n"); - goto undo_register; - } - vgpu->vfio_group = vfio_group; - ret = -EEXIST; if (vgpu->attached) - goto undo_group; + goto undo_iommu; ret = -ESRCH; - if (!vgpu->kvm || vgpu->kvm->mm != current->mm) { + if (!vgpu->vfio_device.kvm || + vgpu->vfio_device.kvm->mm != current->mm) { gvt_vgpu_err("KVM is required to use Intel vGPU\n"); - goto undo_group; + goto undo_iommu; } + kvm_get_kvm(vgpu->vfio_device.kvm); + ret = -EEXIST; if (__kvmgt_vgpu_exist(vgpu)) - goto undo_group; + goto undo_iommu; vgpu->attached = true; - kvm_get_kvm(vgpu->kvm); kvmgt_protect_table_init(vgpu); gvt_cache_init(vgpu); vgpu->track_node.track_write = kvmgt_page_track_write; vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot; - kvm_page_track_register_notifier(vgpu->kvm, &vgpu->track_node); + kvm_page_track_register_notifier(vgpu->vfio_device.kvm, + &vgpu->track_node); debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs, &vgpu->nr_cache_entries); @@ -868,17 +832,9 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev) atomic_set(&vgpu->released, 0); return 0; -undo_group: - vfio_group_put_external_user(vgpu->vfio_group); - vgpu->vfio_group = NULL; - -undo_register: - vfio_unregister_notifier(vfio_dev->dev, VFIO_GROUP_NOTIFY, - &vgpu->group_notifier); - undo_iommu: - vfio_unregister_notifier(vfio_dev->dev, VFIO_IOMMU_NOTIFY, - &vgpu->iommu_notifier); + vfio_unregister_notifier(vfio_dev, VFIO_IOMMU_NOTIFY, + &vgpu->iommu_notifier); out: return ret; } @@ -894,8 +850,9 @@ static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu) } } -static void __intel_vgpu_release(struct intel_vgpu *vgpu) +static void intel_vgpu_close_device(struct vfio_device *vfio_dev) { + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); struct drm_i915_private *i915 = vgpu->gvt->gt->i915; int ret; @@ -907,41 +864,24 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) intel_gvt_release_vgpu(vgpu); - ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_IOMMU_NOTIFY, - &vgpu->iommu_notifier); + ret = vfio_unregister_notifier(&vgpu->vfio_device, VFIO_IOMMU_NOTIFY, + &vgpu->iommu_notifier); drm_WARN(&i915->drm, ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); - ret = vfio_unregister_notifier(vgpu->vfio_device.dev, VFIO_GROUP_NOTIFY, - &vgpu->group_notifier); - drm_WARN(&i915->drm, ret, - "vfio_unregister_notifier for group failed: %d\n", ret); - debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs)); - kvm_page_track_unregister_notifier(vgpu->kvm, &vgpu->track_node); - kvm_put_kvm(vgpu->kvm); + kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm, + &vgpu->track_node); kvmgt_protect_table_destroy(vgpu); gvt_cache_destroy(vgpu); intel_vgpu_release_msi_eventfd_ctx(vgpu); - vfio_group_put_external_user(vgpu->vfio_group); - vgpu->kvm = NULL; vgpu->attached = false; -} - -static void intel_vgpu_close_device(struct vfio_device *vfio_dev) -{ - __intel_vgpu_release(vfio_dev_to_vgpu(vfio_dev)); -} - -static void intel_vgpu_release_work(struct work_struct *work) -{ - struct intel_vgpu *vgpu = - container_of(work, struct intel_vgpu, release_work); - __intel_vgpu_release(vgpu); + if (vgpu->vfio_device.kvm) + kvm_put_kvm(vgpu->vfio_device.kvm); } static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) @@ -1690,7 +1630,6 @@ static int intel_vgpu_probe(struct mdev_device *mdev) return PTR_ERR(vgpu); } - INIT_WORK(&vgpu->release_work, intel_vgpu_release_work); vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev, &intel_vgpu_dev_ops); @@ -1728,7 +1667,7 @@ static struct mdev_driver intel_vgpu_mdev_driver = { int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) { - struct kvm *kvm = info->kvm; + struct kvm *kvm = info->vfio_device.kvm; struct kvm_memory_slot *slot; int idx; @@ -1758,7 +1697,7 @@ out: int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) { - struct kvm *kvm = info->kvm; + struct kvm *kvm = info->vfio_device.kvm; struct kvm_memory_slot *slot; int idx; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index dc5c35210c16..56f7e06c673e 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -1022,11 +1022,13 @@ void vmbus_close(struct vmbus_channel *channel) EXPORT_SYMBOL_GPL(vmbus_close); /** - * vmbus_sendpacket() - Send the specified buffer on the given channel + * vmbus_sendpacket_getid() - Send the specified buffer on the given channel * @channel: Pointer to vmbus_channel structure * @buffer: Pointer to the buffer you want to send the data from. * @bufferlen: Maximum size of what the buffer holds. * @requestid: Identifier of the request + * @trans_id: Identifier of the transaction associated to this request, if + * the send is successful; undefined, otherwise. * @type: Type of packet that is being sent e.g. negotiate, time * packet etc. * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED @@ -1036,8 +1038,8 @@ EXPORT_SYMBOL_GPL(vmbus_close); * * Mainly used by Hyper-V drivers. */ -int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, - u32 bufferlen, u64 requestid, +int vmbus_sendpacket_getid(struct vmbus_channel *channel, void *buffer, + u32 bufferlen, u64 requestid, u64 *trans_id, enum vmbus_packet_type type, u32 flags) { struct vmpacket_descriptor desc; @@ -1063,7 +1065,31 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid); + return hv_ringbuffer_write(channel, bufferlist, num_vecs, requestid, trans_id); +} +EXPORT_SYMBOL(vmbus_sendpacket_getid); + +/** + * vmbus_sendpacket() - Send the specified buffer on the given channel + * @channel: Pointer to vmbus_channel structure + * @buffer: Pointer to the buffer you want to send the data from. + * @bufferlen: Maximum size of what the buffer holds. + * @requestid: Identifier of the request + * @type: Type of packet that is being sent e.g. negotiate, time + * packet etc. + * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED + * + * Sends data in @buffer directly to Hyper-V via the vmbus. + * This will send the data unparsed to Hyper-V. + * + * Mainly used by Hyper-V drivers. + */ +int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, + u32 bufferlen, u64 requestid, + enum vmbus_packet_type type, u32 flags) +{ + return vmbus_sendpacket_getid(channel, buffer, bufferlen, + requestid, NULL, type, flags); } EXPORT_SYMBOL(vmbus_sendpacket); @@ -1122,7 +1148,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - return hv_ringbuffer_write(channel, bufferlist, 3, requestid); + return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL); } EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); @@ -1160,7 +1186,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, bufferlist[2].iov_base = &aligned_data; bufferlist[2].iov_len = (packetlen_aligned - packetlen); - return hv_ringbuffer_write(channel, bufferlist, 3, requestid); + return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL); } EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); @@ -1226,12 +1252,12 @@ u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr) if (!channel->rqstor_size) return VMBUS_NO_RQSTOR; - spin_lock_irqsave(&rqstor->req_lock, flags); + lock_requestor(channel, flags); current_id = rqstor->next_request_id; /* Requestor array is full */ if (current_id >= rqstor->size) { - spin_unlock_irqrestore(&rqstor->req_lock, flags); + unlock_requestor(channel, flags); return VMBUS_RQST_ERROR; } @@ -1241,27 +1267,23 @@ u64 vmbus_next_request_id(struct vmbus_channel *channel, u64 rqst_addr) /* The already held spin lock provides atomicity */ bitmap_set(rqstor->req_bitmap, current_id, 1); - spin_unlock_irqrestore(&rqstor->req_lock, flags); + unlock_requestor(channel, flags); /* * Cannot return an ID of 0, which is reserved for an unsolicited - * message from Hyper-V. + * message from Hyper-V; Hyper-V does not acknowledge (respond to) + * VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED requests with ID of + * 0 sent by the guest. */ return current_id + 1; } EXPORT_SYMBOL_GPL(vmbus_next_request_id); -/* - * vmbus_request_addr - Returns the memory address stored at @trans_id - * in @rqstor. Uses a spin lock to avoid race conditions. - * @channel: Pointer to the VMbus channel struct - * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's - * next request id. - */ -u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id) +/* As in vmbus_request_addr_match() but without the requestor lock */ +u64 __vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id, + u64 rqst_addr) { struct vmbus_requestor *rqstor = &channel->requestor; - unsigned long flags; u64 req_addr; /* Check rqstor has been initialized */ @@ -1270,27 +1292,61 @@ u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id) /* Hyper-V can send an unsolicited message with ID of 0 */ if (!trans_id) - return trans_id; - - spin_lock_irqsave(&rqstor->req_lock, flags); + return VMBUS_RQST_ERROR; /* Data corresponding to trans_id is stored at trans_id - 1 */ trans_id--; /* Invalid trans_id */ - if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap)) { - spin_unlock_irqrestore(&rqstor->req_lock, flags); + if (trans_id >= rqstor->size || !test_bit(trans_id, rqstor->req_bitmap)) return VMBUS_RQST_ERROR; - } req_addr = rqstor->req_arr[trans_id]; - rqstor->req_arr[trans_id] = rqstor->next_request_id; - rqstor->next_request_id = trans_id; + if (rqst_addr == VMBUS_RQST_ADDR_ANY || req_addr == rqst_addr) { + rqstor->req_arr[trans_id] = rqstor->next_request_id; + rqstor->next_request_id = trans_id; - /* The already held spin lock provides atomicity */ - bitmap_clear(rqstor->req_bitmap, trans_id, 1); + /* The already held spin lock provides atomicity */ + bitmap_clear(rqstor->req_bitmap, trans_id, 1); + } - spin_unlock_irqrestore(&rqstor->req_lock, flags); return req_addr; } +EXPORT_SYMBOL_GPL(__vmbus_request_addr_match); + +/* + * vmbus_request_addr_match - Clears/removes @trans_id from the @channel's + * requestor, provided the memory address stored at @trans_id equals @rqst_addr + * (or provided @rqst_addr matches the sentinel value VMBUS_RQST_ADDR_ANY). + * + * Returns the memory address stored at @trans_id, or VMBUS_RQST_ERROR if + * @trans_id is not contained in the requestor. + * + * Acquires and releases the requestor spin lock. + */ +u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id, + u64 rqst_addr) +{ + unsigned long flags; + u64 req_addr; + + lock_requestor(channel, flags); + req_addr = __vmbus_request_addr_match(channel, trans_id, rqst_addr); + unlock_requestor(channel, flags); + + return req_addr; +} +EXPORT_SYMBOL_GPL(vmbus_request_addr_match); + +/* + * vmbus_request_addr - Returns the memory address stored at @trans_id + * in @rqstor. Uses a spin lock to avoid race conditions. + * @channel: Pointer to the VMbus channel struct + * @trans_id: Request id sent back from Hyper-V. Becomes the requestor's + * next request id. + */ +u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id) +{ + return vmbus_request_addr_match(channel, trans_id, VMBUS_RQST_ADDR_ANY); +} EXPORT_SYMBOL_GPL(vmbus_request_addr); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 85a2142c9384..b60f13481bdc 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -152,6 +152,7 @@ static const struct { { HV_AVMA1_GUID }, { HV_AVMA2_GUID }, { HV_RDV_GUID }, + { HV_IMC_GUID }, }; /* @@ -442,7 +443,7 @@ void hv_process_channel_removal(struct vmbus_channel *channel) /* * Upon suspend, an in-use hv_sock channel is removed from the array of * channels and the relid is invalidated. After hibernation, when the - * user-space appplication destroys the channel, it's unnecessary and + * user-space application destroys the channel, it's unnecessary and * unsafe to remove the channel from the array of channels. See also * the inline comments before the call of vmbus_release_relid() below. */ @@ -713,15 +714,13 @@ static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn) static int next_numa_node_id; /* - * Starting with Win8, we can statically distribute the incoming - * channel interrupt load by binding a channel to VCPU. + * We can statically distribute the incoming channel interrupt load + * by binding a channel to VCPU. * - * For pre-win8 hosts or non-performance critical channels we assign the - * VMBUS_CONNECT_CPU. - * - * Starting with win8, performance critical channels will be distributed - * evenly among all the available NUMA nodes. Once the node is assigned, - * we will assign the CPU based on a simple round robin scheme. + * For non-performance critical channels we assign the VMBUS_CONNECT_CPU. + * Performance critical channels will be distributed evenly among all + * the available NUMA nodes. Once the node is assigned, we will assign + * the CPU based on a simple round robin scheme. */ static void init_vp_index(struct vmbus_channel *channel) { @@ -732,13 +731,10 @@ static void init_vp_index(struct vmbus_channel *channel) u32 target_cpu; int numa_node; - if ((vmbus_proto_version == VERSION_WS2008) || - (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) || + if (!perf_chn || !alloc_cpumask_var(&available_mask, GFP_KERNEL)) { /* - * Prior to win8, all channel interrupts are - * delivered on VMBUS_CONNECT_CPU. - * Also if the channel is not a performance critical + * If the channel is not a performance critical * channel, bind it to VMBUS_CONNECT_CPU. * In case alloc_cpumask_var() fails, bind it to * VMBUS_CONNECT_CPU. @@ -931,11 +927,9 @@ static void vmbus_setup_channel_state(struct vmbus_channel *channel, */ channel->sig_event = VMBUS_EVENT_CONNECTION_ID; - if (vmbus_proto_version != VERSION_WS2008) { - channel->is_dedicated_interrupt = - (offer->is_dedicated_interrupt != 0); - channel->sig_event = offer->connection_id; - } + channel->is_dedicated_interrupt = + (offer->is_dedicated_interrupt != 0); + channel->sig_event = offer->connection_id; memcpy(&channel->offermsg, offer, sizeof(struct vmbus_channel_offer_channel)); @@ -975,13 +969,17 @@ find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer) return channel; } -static bool vmbus_is_valid_device(const guid_t *guid) +static bool vmbus_is_valid_offer(const struct vmbus_channel_offer_channel *offer) { + const guid_t *guid = &offer->offer.if_type; u16 i; if (!hv_is_isolation_supported()) return true; + if (is_hvsock_offer(offer)) + return true; + for (i = 0; i < ARRAY_SIZE(vmbus_devs); i++) { if (guid_equal(guid, &vmbus_devs[i].guid)) return vmbus_devs[i].allowed_in_isolated; @@ -1003,7 +1001,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) trace_vmbus_onoffer(offer); - if (!vmbus_is_valid_device(&offer->offer.if_type)) { + if (!vmbus_is_valid_offer(offer)) { pr_err_ratelimited("Invalid offer %d from the host supporting isolation\n", offer->child_relid); atomic_dec(&vmbus_connection.offer_in_progress); diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index a3d8be8d6cfb..6218bbf6863a 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -47,6 +47,8 @@ EXPORT_SYMBOL_GPL(vmbus_proto_version); /* * Table of VMBus versions listed from newest to oldest. + * VERSION_WIN7 and VERSION_WS2008 are no longer supported in + * Linux guests and are not listed. */ static __u32 vmbus_versions[] = { VERSION_WIN10_V5_3, @@ -56,9 +58,7 @@ static __u32 vmbus_versions[] = { VERSION_WIN10_V4_1, VERSION_WIN10, VERSION_WIN8_1, - VERSION_WIN8, - VERSION_WIN7, - VERSION_WS2008 + VERSION_WIN8 }; /* diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index 3248b48f37f6..91e8a72eee14 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1842,7 +1842,7 @@ static int balloon_probe(struct hv_device *dev, ret = balloon_connect_vsp(dev); if (ret != 0) - return ret; + goto connect_error; enable_page_reporting(); dm_device.state = DM_INITIALIZED; @@ -1861,6 +1861,7 @@ probe_error: dm_device.thread = NULL; disable_page_reporting(); vmbus_close(dev->channel); +connect_error: #ifdef CONFIG_MEMORY_HOTPLUG unregister_memory_notifier(&hv_memory_nb); restore_online_page_callback(&hv_online_page); @@ -1882,12 +1883,21 @@ static int balloon_remove(struct hv_device *dev) cancel_work_sync(&dm->ha_wrk.wrk); kthread_stop(dm->thread); - disable_page_reporting(); - vmbus_close(dev->channel); + + /* + * This is to handle the case when balloon_resume() + * call has failed and some cleanup has been done as + * a part of the error handling. + */ + if (dm_device.state != DM_INIT_ERROR) { + disable_page_reporting(); + vmbus_close(dev->channel); #ifdef CONFIG_MEMORY_HOTPLUG - unregister_memory_notifier(&hv_memory_nb); - restore_online_page_callback(&hv_online_page); + unregister_memory_notifier(&hv_memory_nb); + restore_online_page_callback(&hv_online_page); #endif + } + spin_lock_irqsave(&dm_device.ha_lock, flags); list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) { list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) { @@ -1948,6 +1958,7 @@ close_channel: vmbus_close(dev->channel); out: dm_device.state = DM_INIT_ERROR; + disable_page_reporting(); #ifdef CONFIG_MEMORY_HOTPLUG unregister_memory_notifier(&hv_memory_nb); restore_online_page_callback(&hv_online_page); diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 6b45c22bb717..4f5b824b16cf 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -181,7 +181,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); int hv_ringbuffer_write(struct vmbus_channel *channel, const struct kvec *kv_list, u32 kv_count, - u64 requestid); + u64 requestid, u64 *trans_id); int hv_ringbuffer_read(struct vmbus_channel *channel, void *buffer, u32 buflen, u32 *buffer_actual_len, diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 3d215d9dec43..59a4aa86d1f3 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -283,7 +283,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) /* Write to the ring buffer. */ int hv_ringbuffer_write(struct vmbus_channel *channel, const struct kvec *kv_list, u32 kv_count, - u64 requestid) + u64 requestid, u64 *trans_id) { int i; u32 bytes_avail_towrite; @@ -294,7 +294,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, unsigned long flags; struct hv_ring_buffer_info *outring_info = &channel->outbound; struct vmpacket_descriptor *desc = kv_list[0].iov_base; - u64 rqst_id = VMBUS_NO_RQSTOR; + u64 __trans_id, rqst_id = VMBUS_NO_RQSTOR; if (channel->rescind) return -ENODEV; @@ -353,7 +353,15 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, } } desc = hv_get_ring_buffer(outring_info) + old_write; - desc->trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id; + __trans_id = (rqst_id == VMBUS_NO_RQSTOR) ? requestid : rqst_id; + /* + * Ensure the compiler doesn't generate code that reads the value of + * the transaction ID from the ring buffer, which is shared with the + * Hyper-V host and subject to being changed at any time. + */ + WRITE_ONCE(desc->trans_id, __trans_id); + if (trans_id) + *trans_id = __trans_id; /* Set previous packet start */ prev_indices = hv_get_ring_bufferindices(outring_info); @@ -421,7 +429,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel, memcpy(buffer, (const char *)desc + offset, packetlen); /* Advance ring index to next packet descriptor */ - __hv_pkt_iter_next(channel, desc, true); + __hv_pkt_iter_next(channel, desc); /* Notify host of update */ hv_pkt_iter_close(channel); @@ -457,22 +465,6 @@ static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi) } /* - * Get first vmbus packet without copying it out of the ring buffer - */ -struct vmpacket_descriptor *hv_pkt_iter_first_raw(struct vmbus_channel *channel) -{ - struct hv_ring_buffer_info *rbi = &channel->inbound; - - hv_debug_delay_test(channel, MESSAGE_DELAY); - - if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor)) - return NULL; - - return (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index); -} -EXPORT_SYMBOL_GPL(hv_pkt_iter_first_raw); - -/* * Get first vmbus packet from ring buffer after read_index * * If ring buffer is empty, returns NULL and no other action needed. @@ -483,11 +475,14 @@ struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel) struct vmpacket_descriptor *desc, *desc_copy; u32 bytes_avail, pkt_len, pkt_offset; - desc = hv_pkt_iter_first_raw(channel); - if (!desc) + hv_debug_delay_test(channel, MESSAGE_DELAY); + + bytes_avail = hv_pkt_iter_avail(rbi); + if (bytes_avail < sizeof(struct vmpacket_descriptor)) return NULL; + bytes_avail = min(rbi->pkt_buffer_size, bytes_avail); - bytes_avail = min(rbi->pkt_buffer_size, hv_pkt_iter_avail(rbi)); + desc = (struct vmpacket_descriptor *)(hv_get_ring_buffer(rbi) + rbi->priv_read_index); /* * Ensure the compiler does not use references to incoming Hyper-V values (which @@ -534,8 +529,7 @@ EXPORT_SYMBOL_GPL(hv_pkt_iter_first); */ struct vmpacket_descriptor * __hv_pkt_iter_next(struct vmbus_channel *channel, - const struct vmpacket_descriptor *desc, - bool copy) + const struct vmpacket_descriptor *desc) { struct hv_ring_buffer_info *rbi = &channel->inbound; u32 packetlen = desc->len8 << 3; @@ -548,7 +542,7 @@ __hv_pkt_iter_next(struct vmbus_channel *channel, rbi->priv_read_index -= dsize; /* more data? */ - return copy ? hv_pkt_iter_first(channel) : hv_pkt_iter_first_raw(channel); + return hv_pkt_iter_first(channel); } EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 14de17087864..9c1b3620775c 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1263,23 +1263,17 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) unsigned long *recv_int_page; u32 maxbits, relid; - if (vmbus_proto_version < VERSION_WIN8) { - maxbits = MAX_NUM_CHANNELS_SUPPORTED; - recv_int_page = vmbus_connection.recv_int_page; - } else { - /* - * When the host is win8 and beyond, the event page - * can be directly checked to get the id of the channel - * that has the interrupt pending. - */ - void *page_addr = hv_cpu->synic_event_page; - union hv_synic_event_flags *event - = (union hv_synic_event_flags *)page_addr + - VMBUS_MESSAGE_SINT; + /* + * The event page can be directly checked to get the id of + * the channel that has the interrupt pending. + */ + void *page_addr = hv_cpu->synic_event_page; + union hv_synic_event_flags *event + = (union hv_synic_event_flags *)page_addr + + VMBUS_MESSAGE_SINT; - maxbits = HV_EVENT_FLAGS_COUNT; - recv_int_page = event->flags; - } + maxbits = HV_EVENT_FLAGS_COUNT; + recv_int_page = event->flags; if (unlikely(!recv_int_page)) return; @@ -1351,40 +1345,10 @@ static void vmbus_isr(void) { struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context); - void *page_addr = hv_cpu->synic_event_page; + void *page_addr; struct hv_message *msg; - union hv_synic_event_flags *event; - bool handled = false; - - if (unlikely(page_addr == NULL)) - return; - - event = (union hv_synic_event_flags *)page_addr + - VMBUS_MESSAGE_SINT; - /* - * Check for events before checking for messages. This is the order - * in which events and messages are checked in Windows guests on - * Hyper-V, and the Windows team suggested we do the same. - */ - - if ((vmbus_proto_version == VERSION_WS2008) || - (vmbus_proto_version == VERSION_WIN7)) { - - /* Since we are a child, we only need to check bit 0 */ - if (sync_test_and_clear_bit(0, event->flags)) - handled = true; - } else { - /* - * Our host is win8 or above. The signaling mechanism - * has changed and we can directly look at the event page. - * If bit n is set then we have an interrup on the channel - * whose id is n. - */ - handled = true; - } - if (handled) - vmbus_chan_sched(hv_cpu); + vmbus_chan_sched(hv_cpu); page_addr = hv_cpu->synic_message_page; msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index b0eae94909f4..c0c35785a0dc 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c @@ -656,6 +656,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) unsigned int_addr_flag = 0; struct i2c_msg *m_start = msg; bool is_read; + u8 *dma_buf = NULL; dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); @@ -703,7 +704,17 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) dev->msg = m_start; dev->recv_len_abort = false; + if (dev->use_dma) { + dma_buf = i2c_get_dma_safe_msg_buf(m_start, 1); + if (!dma_buf) { + ret = -ENOMEM; + goto out; + } + dev->buf = dma_buf; + } + ret = at91_do_twi_transfer(dev); + i2c_put_dma_safe_msg_buf(dma_buf, m_start, !ret); ret = (ret < 0) ? ret : num; out: diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 805c77143a0f..b4c1ad19cdae 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -760,7 +760,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap) static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, struct i2c_adapter *adap) { - unsigned long time_left; + unsigned long time_left, msg_timeout; u32 reg; id->p_msg = msg; @@ -785,8 +785,16 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg, else cdns_i2c_msend(id); + /* Minimal time to execute this message */ + msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk); + /* Plus some wiggle room */ + msg_timeout += msecs_to_jiffies(500); + + if (msg_timeout < adap->timeout) + msg_timeout = adap->timeout; + /* Wait for the signal of completion */ - time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout); + time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout); if (time_left == 0) { cdns_i2c_master_reset(adap); dev_err(id->adap.dev.parent, diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index e9d07323c604..9e09db31a937 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -539,10 +539,9 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); - ret = pm_runtime_get_sync(dev->dev); + ret = pm_runtime_resume_and_get(dev->dev); if (ret < 0) { dev_err(dev->dev, "Failed to runtime_get device: %d\n", ret); - pm_runtime_put_noidle(dev->dev); return ret; } @@ -821,10 +820,9 @@ static int davinci_i2c_probe(struct platform_device *pdev) pm_runtime_enable(dev->dev); - r = pm_runtime_get_sync(dev->dev); + r = pm_runtime_resume_and_get(dev->dev); if (r < 0) { dev_err(dev->dev, "failed to runtime_get device: %d\n", r); - pm_runtime_put_noidle(dev->dev); return r; } @@ -898,11 +896,9 @@ static int davinci_i2c_remove(struct platform_device *pdev) i2c_del_adapter(&dev->adapter); - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) return ret; - } davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0); diff --git a/drivers/i2c/busses/i2c-designware-amdpsp.c b/drivers/i2c/busses/i2c-designware-amdpsp.c index 9b37f2b95abc..b624356c945f 100644 --- a/drivers/i2c/busses/i2c-designware-amdpsp.c +++ b/drivers/i2c/busses/i2c-designware-amdpsp.c @@ -16,8 +16,8 @@ #define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC) #define PSP_I2C_REQ_BUS_CMD 0x64 -#define PSP_I2C_REQ_RETRY_CNT 10 -#define PSP_I2C_REQ_RETRY_DELAY_US (50 * USEC_PER_MSEC) +#define PSP_I2C_REQ_RETRY_CNT 400 +#define PSP_I2C_REQ_RETRY_DELAY_US (25 * USEC_PER_MSEC) #define PSP_I2C_REQ_STS_OK 0x0 #define PSP_I2C_REQ_STS_BUS_BUSY 0x1 #define PSP_I2C_REQ_STS_INV_PARAM 0x3 diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 9f8574320eb2..e7d316b1401a 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -266,9 +266,9 @@ int i2c_dw_acpi_configure(struct device *device) * selected speed modes. */ i2c_dw_acpi_params(device, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht); + i2c_dw_acpi_params(device, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht); i2c_dw_acpi_params(device, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht); i2c_dw_acpi_params(device, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht); - i2c_dw_acpi_params(device, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht); switch (t->bus_freq_hz) { case I2C_MAX_STANDARD_MODE_FREQ: diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index c16157ee8c52..6078fa0c0d48 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c @@ -528,6 +528,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, case I2C_SMBUS_BLOCK_PROC_CALL: dev_dbg(dev, "I2C_SMBUS_BLOCK_PROC_CALL\n"); + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) + return -EINVAL; + dma_size = I2C_SMBUS_BLOCK_MAX; desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, 1); desc->wr_len_cmd = data->block[0] + 1; diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 07eb819072c4..61cc5b2462c6 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c @@ -30,18 +30,21 @@ #define REG_TOK_RDATA1 0x1c /* Control register fields */ -#define REG_CTRL_START BIT(0) -#define REG_CTRL_ACK_IGNORE BIT(1) -#define REG_CTRL_STATUS BIT(2) -#define REG_CTRL_ERROR BIT(3) -#define REG_CTRL_CLKDIV GENMASK(21, 12) -#define REG_CTRL_CLKDIVEXT GENMASK(29, 28) - -#define REG_SLV_ADDR GENMASK(7, 0) -#define REG_SLV_SDA_FILTER GENMASK(10, 8) -#define REG_SLV_SCL_FILTER GENMASK(13, 11) -#define REG_SLV_SCL_LOW GENMASK(27, 16) -#define REG_SLV_SCL_LOW_EN BIT(28) +#define REG_CTRL_START BIT(0) +#define REG_CTRL_ACK_IGNORE BIT(1) +#define REG_CTRL_STATUS BIT(2) +#define REG_CTRL_ERROR BIT(3) +#define REG_CTRL_CLKDIV_SHIFT 12 +#define REG_CTRL_CLKDIV_MASK GENMASK(21, REG_CTRL_CLKDIV_SHIFT) +#define REG_CTRL_CLKDIVEXT_SHIFT 28 +#define REG_CTRL_CLKDIVEXT_MASK GENMASK(29, REG_CTRL_CLKDIVEXT_SHIFT) + +#define REG_SLV_ADDR_MASK GENMASK(7, 0) +#define REG_SLV_SDA_FILTER_MASK GENMASK(10, 8) +#define REG_SLV_SCL_FILTER_MASK GENMASK(13, 11) +#define REG_SLV_SCL_LOW_SHIFT 16 +#define REG_SLV_SCL_LOW_MASK GENMASK(27, REG_SLV_SCL_LOW_SHIFT) +#define REG_SLV_SCL_LOW_EN BIT(28) #define I2C_TIMEOUT_MS 500 #define FILTER_DELAY 15 @@ -62,10 +65,6 @@ enum { STATE_WRITE, }; -struct meson_i2c_data { - unsigned char div_factor; -}; - /** * struct meson_i2c - Meson I2C device private data * @@ -83,7 +82,7 @@ struct meson_i2c_data { * @done: Completion used to wait for transfer termination * @tokens: Sequence of tokens to be written to the device * @num_tokens: Number of tokens - * @data: Pointer to the controlller's platform data + * @data: Pointer to the controller's platform data */ struct meson_i2c { struct i2c_adapter adap; @@ -106,6 +105,10 @@ struct meson_i2c { const struct meson_i2c_data *data; }; +struct meson_i2c_data { + void (*set_clk_div)(struct meson_i2c *i2c, unsigned int freq); +}; + static void meson_i2c_set_mask(struct meson_i2c *i2c, int reg, u32 mask, u32 val) { @@ -134,14 +137,62 @@ static void meson_i2c_add_token(struct meson_i2c *i2c, int token) i2c->num_tokens++; } -static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) +static void meson_gxbb_axg_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) +{ + unsigned long clk_rate = clk_get_rate(i2c->clk); + unsigned int div_h, div_l; + + /* According to I2C-BUS Spec 2.1, in FAST-MODE, the minimum LOW period is 1.3uS, and + * minimum HIGH is least 0.6us. + * For 400000 freq, the period is 2.5us. To keep within the specs, give 40% of period to + * HIGH and 60% to LOW. This means HIGH at 1.0us and LOW 1.5us. + * The same applies for Fast-mode plus, where LOW is 0.5us and HIGH is 0.26us. + * Duty = H/(H + L) = 2/5 + */ + if (freq <= I2C_MAX_STANDARD_MODE_FREQ) { + div_h = DIV_ROUND_UP(clk_rate, freq); + div_l = DIV_ROUND_UP(div_h, 4); + div_h = DIV_ROUND_UP(div_h, 2) - FILTER_DELAY; + } else { + div_h = DIV_ROUND_UP(clk_rate * 2, freq * 5) - FILTER_DELAY; + div_l = DIV_ROUND_UP(clk_rate * 3, freq * 5 * 2); + } + + /* clock divider has 12 bits */ + if (div_h > GENMASK(11, 0)) { + dev_err(i2c->dev, "requested bus frequency too low\n"); + div_h = GENMASK(11, 0); + } + if (div_l > GENMASK(11, 0)) { + dev_err(i2c->dev, "requested bus frequency too low\n"); + div_l = GENMASK(11, 0); + } + + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK, + FIELD_PREP(REG_CTRL_CLKDIV_MASK, div_h & GENMASK(9, 0))); + + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK, + FIELD_PREP(REG_CTRL_CLKDIVEXT_MASK, div_h >> 10)); + + /* set SCL low delay */ + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_MASK, + FIELD_PREP(REG_SLV_SCL_LOW_MASK, div_l)); + + /* Enable HIGH/LOW mode */ + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, REG_SLV_SCL_LOW_EN); + + dev_dbg(i2c->dev, "%s: clk %lu, freq %u, divh %u, divl %u\n", __func__, + clk_rate, freq, div_h, div_l); +} + +static void meson6_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) { unsigned long clk_rate = clk_get_rate(i2c->clk); unsigned int div; div = DIV_ROUND_UP(clk_rate, freq); div -= FILTER_DELAY; - div = DIV_ROUND_UP(div, i2c->data->div_factor); + div = DIV_ROUND_UP(div, 4); /* clock divider has 12 bits */ if (div > GENMASK(11, 0)) { @@ -149,11 +200,11 @@ static void meson_i2c_set_clk_div(struct meson_i2c *i2c, unsigned int freq) div = GENMASK(11, 0); } - meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV, - FIELD_PREP(REG_CTRL_CLKDIV, div & GENMASK(9, 0))); + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIV_MASK, + FIELD_PREP(REG_CTRL_CLKDIV_MASK, div & GENMASK(9, 0))); - meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT, - FIELD_PREP(REG_CTRL_CLKDIVEXT, div >> 10)); + meson_i2c_set_mask(i2c, REG_CTRL, REG_CTRL_CLKDIVEXT_MASK, + FIELD_PREP(REG_CTRL_CLKDIVEXT_MASK, div >> 10)); /* Disable HIGH/LOW mode */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_SCL_LOW_EN, 0); @@ -292,8 +343,8 @@ static void meson_i2c_do_start(struct meson_i2c *i2c, struct i2c_msg *msg) TOKEN_SLAVE_ADDR_WRITE; - meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR, - FIELD_PREP(REG_SLV_ADDR, msg->addr << 1)); + meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, REG_SLV_ADDR_MASK, + FIELD_PREP(REG_SLV_ADDR_MASK, msg->addr << 1)); meson_i2c_add_token(i2c, TOKEN_START); meson_i2c_add_token(i2c, token); @@ -467,9 +518,13 @@ static int meson_i2c_probe(struct platform_device *pdev) /* Disable filtering */ meson_i2c_set_mask(i2c, REG_SLAVE_ADDR, - REG_SLV_SDA_FILTER | REG_SLV_SCL_FILTER, 0); + REG_SLV_SDA_FILTER_MASK | REG_SLV_SCL_FILTER_MASK, 0); - meson_i2c_set_clk_div(i2c, timings.bus_freq_hz); + if (!i2c->data->set_clk_div) { + clk_disable_unprepare(i2c->clk); + return -EINVAL; + } + i2c->data->set_clk_div(i2c, timings.bus_freq_hz); ret = i2c_add_adapter(&i2c->adap); if (ret < 0) { @@ -491,15 +546,15 @@ static int meson_i2c_remove(struct platform_device *pdev) } static const struct meson_i2c_data i2c_meson6_data = { - .div_factor = 4, + .set_clk_div = meson6_i2c_set_clk_div, }; static const struct meson_i2c_data i2c_gxbb_data = { - .div_factor = 4, + .set_clk_div = meson_gxbb_axg_i2c_set_clk_div, }; static const struct meson_i2c_data i2c_axg_data = { - .div_factor = 3, + .set_clk_div = meson_gxbb_axg_i2c_set_clk_div, }; static const struct of_device_id meson_i2c_match[] = { diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index f651d3e124d6..bdecb78bfc26 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1177,7 +1177,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, int left_num = num; struct mtk_i2c *i2c = i2c_get_adapdata(adap); - ret = clk_bulk_prepare_enable(I2C_MT65XX_CLK_MAX, i2c->clocks); + ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks); if (ret) return ret; @@ -1231,7 +1231,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap, ret = num; err_exit: - clk_bulk_disable_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); + clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks); return ret; } @@ -1412,7 +1412,7 @@ static int mtk_i2c_probe(struct platform_device *pdev) return ret; } mtk_i2c_init_hw(i2c); - clk_bulk_disable_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); + clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks); ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq, IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, @@ -1439,6 +1439,8 @@ static int mtk_i2c_remove(struct platform_device *pdev) i2c_del_adapter(&i2c->adap); + clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); + return 0; } @@ -1448,6 +1450,7 @@ static int mtk_i2c_suspend_noirq(struct device *dev) struct mtk_i2c *i2c = dev_get_drvdata(dev); i2c_mark_adapter_suspended(&i2c->adap); + clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); return 0; } @@ -1465,7 +1468,7 @@ static int mtk_i2c_resume_noirq(struct device *dev) mtk_i2c_init_hw(i2c); - clk_bulk_disable_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); + clk_bulk_disable(I2C_MT65XX_CLK_MAX, i2c->clocks); i2c_mark_adapter_resumed(&i2c->adap); diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c index 901f0fb04fee..cfe6de8175dd 100644 --- a/drivers/i2c/busses/i2c-mt7621.c +++ b/drivers/i2c/busses/i2c-mt7621.c @@ -270,18 +270,15 @@ static void mtk_i2c_init(struct mtk_i2c *i2c) static int mtk_i2c_probe(struct platform_device *pdev) { - struct resource *res; struct mtk_i2c *i2c; struct i2c_adapter *adap; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - i2c = devm_kzalloc(&pdev->dev, sizeof(struct mtk_i2c), GFP_KERNEL); if (!i2c) return -ENOMEM; - i2c->base = devm_ioremap_resource(&pdev->dev, res); + i2c->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(i2c->base)) return PTR_ERR(i2c->base); diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 71aad029425d..5960ccde6574 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -314,6 +314,7 @@ struct npcm_i2c { u64 rec_fail_cnt; u64 nack_cnt; u64 timeout_cnt; + u64 tx_complete_cnt; }; static inline void npcm_i2c_select_bank(struct npcm_i2c *bus, @@ -359,14 +360,14 @@ static int npcm_i2c_get_SCL(struct i2c_adapter *_adap) { struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); - return !!(I2CCTL3_SCL_LVL & ioread32(bus->reg + NPCM_I2CCTL3)); + return !!(I2CCTL3_SCL_LVL & ioread8(bus->reg + NPCM_I2CCTL3)); } static int npcm_i2c_get_SDA(struct i2c_adapter *_adap) { struct npcm_i2c *bus = container_of(_adap, struct npcm_i2c, adap); - return !!(I2CCTL3_SDA_LVL & ioread32(bus->reg + NPCM_I2CCTL3)); + return !!(I2CCTL3_SDA_LVL & ioread8(bus->reg + NPCM_I2CCTL3)); } static inline u16 npcm_i2c_get_index(struct npcm_i2c *bus) @@ -563,6 +564,15 @@ static inline void npcm_i2c_nack(struct npcm_i2c *bus) iowrite8(val, bus->reg + NPCM_I2CCTL1); } +static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus) +{ + u8 val; + + /* Clear NEGACK, STASTR and BER bits */ + val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR; + iowrite8(val, bus->reg + NPCM_I2CST); +} + #if IS_ENABLED(CONFIG_I2C_SLAVE) static void npcm_i2c_slave_int_enable(struct npcm_i2c *bus, bool enable) { @@ -642,8 +652,8 @@ static void npcm_i2c_reset(struct npcm_i2c *bus) iowrite8(NPCM_I2CCST_BB, bus->reg + NPCM_I2CCST); iowrite8(0xFF, bus->reg + NPCM_I2CST); - /* Clear EOB bit */ - iowrite8(NPCM_I2CCST3_EO_BUSY, bus->reg + NPCM_I2CCST3); + /* Clear and disable EOB */ + npcm_i2c_eob_int(bus, false); /* Clear all fifo bits: */ iowrite8(NPCM_I2CFIF_CTS_CLR_FIFO, bus->reg + NPCM_I2CFIF_CTS); @@ -655,6 +665,9 @@ static void npcm_i2c_reset(struct npcm_i2c *bus) } #endif + /* clear status bits for spurious interrupts */ + npcm_i2c_clear_master_status(bus); + bus->state = I2C_IDLE; } @@ -684,6 +697,8 @@ static void npcm_i2c_callback(struct npcm_i2c *bus, switch (op_status) { case I2C_MASTER_DONE_IND: bus->cmd_err = bus->msgs_num; + if (bus->tx_complete_cnt < ULLONG_MAX) + bus->tx_complete_cnt++; fallthrough; case I2C_BLOCK_BYTES_ERR_IND: /* Master tx finished and all transmit bytes were sent */ @@ -815,15 +830,6 @@ static void npcm_i2c_read_fifo(struct npcm_i2c *bus, u8 bytes_in_fifo) } } -static inline void npcm_i2c_clear_master_status(struct npcm_i2c *bus) -{ - u8 val; - - /* Clear NEGACK, STASTR and BER bits */ - val = NPCM_I2CST_BER | NPCM_I2CST_NEGACK | NPCM_I2CST_STASTR; - iowrite8(val, bus->reg + NPCM_I2CST); -} - static void npcm_i2c_master_abort(struct npcm_i2c *bus) { /* Only current master is allowed to issue a stop condition */ @@ -1231,7 +1237,16 @@ static irqreturn_t npcm_i2c_int_slave_handler(struct npcm_i2c *bus) ret = IRQ_HANDLED; } /* SDAST */ - return ret; + /* + * if irq is not one of the above, make sure EOB is disabled and all + * status bits are cleared. + */ + if (ret == IRQ_NONE) { + npcm_i2c_eob_int(bus, false); + npcm_i2c_clear_master_status(bus); + } + + return IRQ_HANDLED; } static int npcm_i2c_reg_slave(struct i2c_client *client) @@ -1467,6 +1482,9 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus) npcm_i2c_eob_int(bus, false); npcm_i2c_master_stop(bus); + /* Clear SDA Status bit (by reading dummy byte) */ + npcm_i2c_rd_byte(bus); + /* * The bus is released from stall only after the SW clears * NEGACK bit. Then a Stop condition is sent. @@ -1474,6 +1492,8 @@ static void npcm_i2c_irq_handle_nack(struct npcm_i2c *bus) npcm_i2c_clear_master_status(bus); readx_poll_timeout_atomic(ioread8, bus->reg + NPCM_I2CCST, val, !(val & NPCM_I2CCST_BUSY), 10, 200); + /* verify no status bits are still set after bus is released */ + npcm_i2c_clear_master_status(bus); } bus->state = I2C_IDLE; @@ -1672,10 +1692,10 @@ static int npcm_i2c_recovery_tgclk(struct i2c_adapter *_adap) int iter = 27; if ((npcm_i2c_get_SDA(_adap) == 1) && (npcm_i2c_get_SCL(_adap) == 1)) { - dev_dbg(bus->dev, "bus%d recovery skipped, bus not stuck", - bus->num); + dev_dbg(bus->dev, "bus%d-0x%x recovery skipped, bus not stuck", + bus->num, bus->dest_addr); npcm_i2c_reset(bus); - return status; + return 0; } npcm_i2c_int_enable(bus, false); @@ -1909,6 +1929,7 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode, bus_freq_hz < I2C_FREQ_MIN_HZ || bus_freq_hz > I2C_FREQ_MAX_HZ) return -EINVAL; + npcm_i2c_int_enable(bus, false); npcm_i2c_disable(bus); /* Configure FIFO mode : */ @@ -1937,10 +1958,17 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode, val = (val | NPCM_I2CCTL1_NMINTE) & ~NPCM_I2CCTL1_RWS; iowrite8(val, bus->reg + NPCM_I2CCTL1); - npcm_i2c_int_enable(bus, true); - npcm_i2c_reset(bus); + /* check HW is OK: SDA and SCL should be high at this point. */ + if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) { + dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num); + dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap), + npcm_i2c_get_SCL(&bus->adap)); + return -ENXIO; + } + + npcm_i2c_int_enable(bus, true); return 0; } @@ -1988,10 +2016,14 @@ static irqreturn_t npcm_i2c_bus_irq(int irq, void *dev_id) #if IS_ENABLED(CONFIG_I2C_SLAVE) if (bus->slave) { bus->master_or_slave = I2C_SLAVE; - return npcm_i2c_int_slave_handler(bus); + if (npcm_i2c_int_slave_handler(bus)) + return IRQ_HANDLED; } #endif - return IRQ_NONE; + /* clear status bits for spurious interrupts */ + npcm_i2c_clear_master_status(bus); + + return IRQ_HANDLED; } static bool npcm_i2c_master_start_xmit(struct npcm_i2c *bus, @@ -2047,8 +2079,7 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, u16 nwrite, nread; u8 *write_data, *read_data; u8 slave_addr; - int timeout; - int ret = 0; + unsigned long timeout; bool read_block = false; bool read_PEC = false; u8 bus_busy; @@ -2099,13 +2130,13 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, * 9: bits per transaction (including the ack/nack) */ timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite); - timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec)); + timeout = max_t(unsigned long, bus->adap.timeout, usecs_to_jiffies(timeout_usec)); if (nwrite >= 32 * 1024 || nread >= 32 * 1024) { dev_err(bus->dev, "i2c%d buffer too big\n", bus->num); return -EINVAL; } - time_left = jiffies + msecs_to_jiffies(DEFAULT_STALL_COUNT) + 1; + time_left = jiffies + timeout + 1; do { /* * we must clear slave address immediately when the bus is not @@ -2138,12 +2169,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, bus->read_block_use = read_block; reinit_completion(&bus->cmd_complete); - if (!npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread, - write_data, read_data, read_PEC, - read_block)) - ret = -EBUSY; - if (ret != -EBUSY) { + npcm_i2c_int_enable(bus, true); + + if (npcm_i2c_master_start_xmit(bus, slave_addr, nwrite, nread, + write_data, read_data, read_PEC, + read_block)) { time_left = wait_for_completion_timeout(&bus->cmd_complete, timeout); @@ -2157,26 +2188,31 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, } } } - ret = bus->cmd_err; /* if there was BER, check if need to recover the bus: */ if (bus->cmd_err == -EAGAIN) - ret = i2c_recover_bus(adap); + bus->cmd_err = i2c_recover_bus(adap); /* * After any type of error, check if LAST bit is still set, * due to a HW issue. * It cannot be cleared without resetting the module. */ - if (bus->cmd_err && - (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL))) + else if (bus->cmd_err && + (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL))) npcm_i2c_reset(bus); + /* after any xfer, successful or not, stall and EOB must be disabled */ + npcm_i2c_stall_after_start(bus, false); + npcm_i2c_eob_int(bus, false); + #if IS_ENABLED(CONFIG_I2C_SLAVE) /* reenable slave if it was enabled */ if (bus->slave) iowrite8((bus->slave->addr & 0x7F) | NPCM_I2CADDR_SAEN, bus->reg + NPCM_I2CADDR1); +#else + npcm_i2c_int_enable(bus, false); #endif return bus->cmd_err; } @@ -2223,17 +2259,18 @@ static void npcm_i2c_init_debugfs(struct platform_device *pdev, debugfs_create_u64("rec_succ_cnt", 0444, d, &bus->rec_succ_cnt); debugfs_create_u64("rec_fail_cnt", 0444, d, &bus->rec_fail_cnt); debugfs_create_u64("timeout_cnt", 0444, d, &bus->timeout_cnt); + debugfs_create_u64("tx_complete_cnt", 0444, d, &bus->tx_complete_cnt); bus->debugfs = d; } static int npcm_i2c_probe_bus(struct platform_device *pdev) { - struct npcm_i2c *bus; + struct device_node *np = pdev->dev.of_node; + static struct regmap *gcr_regmap; struct i2c_adapter *adap; + struct npcm_i2c *bus; struct clk *i2c_clk; - static struct regmap *gcr_regmap; - static struct regmap *clk_regmap; int irq; int ret; @@ -2250,15 +2287,14 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev) return PTR_ERR(i2c_clk); bus->apb_clk = clk_get_rate(i2c_clk); - gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr"); + gcr_regmap = syscon_regmap_lookup_by_phandle(np, "nuvoton,sys-mgr"); + if (IS_ERR(gcr_regmap)) + gcr_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr"); + if (IS_ERR(gcr_regmap)) return PTR_ERR(gcr_regmap); regmap_write(gcr_regmap, NPCM_I2CSEGCTL, NPCM_I2CSEGCTL_INIT_VAL); - clk_regmap = syscon_regmap_lookup_by_compatible("nuvoton,npcm750-clk"); - if (IS_ERR(clk_regmap)) - return PTR_ERR(clk_regmap); - bus->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(bus->reg)) return PTR_ERR(bus->reg); @@ -2269,7 +2305,7 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev) adap = &bus->adap; adap->owner = THIS_MODULE; adap->retries = 3; - adap->timeout = HZ; + adap->timeout = msecs_to_jiffies(35); adap->algo = &npcm_i2c_algo; adap->quirks = &npcm_i2c_quirks; adap->algo_data = bus; diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c index 5241e6f414e9..2e74747eec9c 100644 --- a/drivers/i2c/busses/i2c-powermac.c +++ b/drivers/i2c/busses/i2c-powermac.c @@ -15,7 +15,7 @@ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/of_irq.h> -#include <asm/prom.h> + #include <asm/pmac_low_i2c.h> MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 5b920f0fc7dd..6ac402ea58fb 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -727,16 +727,14 @@ static int setup_gpi_dma(struct geni_i2c_dev *gi2c) if (IS_ERR(gi2c->tx_c)) { ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->tx_c), "Failed to get tx DMA ch\n"); - if (ret < 0) - goto err_tx; + goto err_tx; } gi2c->rx_c = dma_request_chan(gi2c->se.dev, "rx"); if (IS_ERR(gi2c->rx_c)) { ret = dev_err_probe(gi2c->se.dev, PTR_ERR(gi2c->rx_c), "Failed to get rx DMA ch\n"); - if (ret < 0) - goto err_rx; + goto err_rx; } dev_dbg(gi2c->se.dev, "Grabbed GPI dma channels\n"); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 0db3d7559066..6e7be9d9f504 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -45,44 +45,44 @@ #define ICDMAER 0x3c /* DMA enable (Gen3) */ /* ICSCR */ -#define SDBS (1 << 3) /* slave data buffer select */ -#define SIE (1 << 2) /* slave interface enable */ -#define GCAE (1 << 1) /* general call address enable */ -#define FNA (1 << 0) /* forced non acknowledgment */ +#define SDBS BIT(3) /* slave data buffer select */ +#define SIE BIT(2) /* slave interface enable */ +#define GCAE BIT(1) /* general call address enable */ +#define FNA BIT(0) /* forced non acknowledgment */ /* ICMCR */ -#define MDBS (1 << 7) /* non-fifo mode switch */ -#define FSCL (1 << 6) /* override SCL pin */ -#define FSDA (1 << 5) /* override SDA pin */ -#define OBPC (1 << 4) /* override pins */ -#define MIE (1 << 3) /* master if enable */ -#define TSBE (1 << 2) -#define FSB (1 << 1) /* force stop bit */ -#define ESG (1 << 0) /* enable start bit gen */ +#define MDBS BIT(7) /* non-fifo mode switch */ +#define FSCL BIT(6) /* override SCL pin */ +#define FSDA BIT(5) /* override SDA pin */ +#define OBPC BIT(4) /* override pins */ +#define MIE BIT(3) /* master if enable */ +#define TSBE BIT(2) +#define FSB BIT(1) /* force stop bit */ +#define ESG BIT(0) /* enable start bit gen */ /* ICSSR (also for ICSIER) */ -#define GCAR (1 << 6) /* general call received */ -#define STM (1 << 5) /* slave transmit mode */ -#define SSR (1 << 4) /* stop received */ -#define SDE (1 << 3) /* slave data empty */ -#define SDT (1 << 2) /* slave data transmitted */ -#define SDR (1 << 1) /* slave data received */ -#define SAR (1 << 0) /* slave addr received */ +#define GCAR BIT(6) /* general call received */ +#define STM BIT(5) /* slave transmit mode */ +#define SSR BIT(4) /* stop received */ +#define SDE BIT(3) /* slave data empty */ +#define SDT BIT(2) /* slave data transmitted */ +#define SDR BIT(1) /* slave data received */ +#define SAR BIT(0) /* slave addr received */ /* ICMSR (also for ICMIE) */ -#define MNR (1 << 6) /* nack received */ -#define MAL (1 << 5) /* arbitration lost */ -#define MST (1 << 4) /* sent a stop */ -#define MDE (1 << 3) -#define MDT (1 << 2) -#define MDR (1 << 1) -#define MAT (1 << 0) /* slave addr xfer done */ +#define MNR BIT(6) /* nack received */ +#define MAL BIT(5) /* arbitration lost */ +#define MST BIT(4) /* sent a stop */ +#define MDE BIT(3) +#define MDT BIT(2) +#define MDR BIT(1) +#define MAT BIT(0) /* slave addr xfer done */ /* ICDMAER */ -#define RSDMAE (1 << 3) /* DMA Slave Received Enable */ -#define TSDMAE (1 << 2) /* DMA Slave Transmitted Enable */ -#define RMDMAE (1 << 1) /* DMA Master Received Enable */ -#define TMDMAE (1 << 0) /* DMA Master Transmitted Enable */ +#define RSDMAE BIT(3) /* DMA Slave Received Enable */ +#define TSDMAE BIT(2) /* DMA Slave Transmitted Enable */ +#define RMDMAE BIT(1) /* DMA Master Received Enable */ +#define TMDMAE BIT(0) /* DMA Master Transmitted Enable */ /* ICFBSCR */ #define TCYC17 0x0f /* 17*Tcyc delay 1st bit between SDA and SCL */ @@ -97,17 +97,15 @@ #define RCAR_IRQ_RECV (MNR | MAL | MST | MAT | MDR) #define RCAR_IRQ_STOP (MST) -#define RCAR_IRQ_ACK_SEND (~(MAT | MDE) & 0x7F) -#define RCAR_IRQ_ACK_RECV (~(MAT | MDR) & 0x7F) - -#define ID_LAST_MSG (1 << 0) -#define ID_FIRST_MSG (1 << 1) -#define ID_DONE (1 << 2) -#define ID_ARBLOST (1 << 3) -#define ID_NACK (1 << 4) +#define ID_LAST_MSG BIT(0) +#define ID_REP_AFTER_RD BIT(1) +#define ID_DONE BIT(2) +#define ID_ARBLOST BIT(3) +#define ID_NACK BIT(4) +#define ID_EPROTO BIT(5) /* persistent flags */ -#define ID_P_HOST_NOTIFY BIT(28) -#define ID_P_REP_AFTER_RD BIT(29) +#define ID_P_NOT_ATOMIC BIT(28) +#define ID_P_HOST_NOTIFY BIT(29) #define ID_P_NO_RXDMA BIT(30) /* HW forbids RXDMA sometimes */ #define ID_P_PM_BLOCKED BIT(31) #define ID_P_MASK GENMASK(31, 28) @@ -141,7 +139,6 @@ struct rcar_i2c_priv { enum dma_data_direction dma_direction; struct reset_control *rstc; - bool atomic_xfer; int irq; struct i2c_client *host_notify_client; @@ -160,6 +157,11 @@ static u32 rcar_i2c_read(struct rcar_i2c_priv *priv, int reg) return readl(priv->io + reg); } +static void rcar_i2c_clear_irq(struct rcar_i2c_priv *priv, u32 val) +{ + writel(~val & 0x7f, priv->io + ICMSR); +} + static int rcar_i2c_get_scl(struct i2c_adapter *adap) { struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); @@ -330,41 +332,46 @@ scgd_find: return 0; } +/* + * We don't have a test case but the HW engineers say that the write order of + * ICMSR and ICMCR depends on whether we issue START or REP_START. So, ICMSR + * handling is outside of this function. First messages clear ICMSR before this + * function, interrupt handlers clear the relevant bits after this function. + */ static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv) { int read = !!rcar_i2c_is_recv(priv); + bool rep_start = !(priv->flags & ID_REP_AFTER_RD); priv->pos = 0; + priv->flags &= ID_P_MASK; + if (priv->msgs_left == 1) priv->flags |= ID_LAST_MSG; rcar_i2c_write(priv, ICMAR, i2c_8bit_addr_from_msg(priv->msg)); - if (!priv->atomic_xfer) + if (priv->flags & ID_P_NOT_ATOMIC) rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND); - /* - * We don't have a test case but the HW engineers say that the write order - * of ICMSR and ICMCR depends on whether we issue START or REP_START. Since - * it didn't cause a drawback for me, let's rather be safe than sorry. - */ - if (priv->flags & ID_FIRST_MSG) { - rcar_i2c_write(priv, ICMSR, 0); + if (rep_start) rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START); - } else { - if (priv->flags & ID_P_REP_AFTER_RD) - priv->flags &= ~ID_P_REP_AFTER_RD; - else - rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START); - rcar_i2c_write(priv, ICMSR, 0); - } +} + +static void rcar_i2c_first_msg(struct rcar_i2c_priv *priv, + struct i2c_msg *msgs, int num) +{ + priv->msg = msgs; + priv->msgs_left = num; + rcar_i2c_write(priv, ICMSR, 0); /* must be before preparing msg */ + rcar_i2c_prepare_msg(priv); } static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv) { priv->msg++; priv->msgs_left--; - priv->flags &= ID_P_MASK; rcar_i2c_prepare_msg(priv); + /* ICMSR handling must come afterwards in the irq handler */ } static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate) @@ -413,7 +420,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv) int len; /* Do various checks to see if DMA is feasible at all */ - if (priv->atomic_xfer || IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN || + if (!(priv->flags & ID_P_NOT_ATOMIC) || IS_ERR(chan) || msg->len < RCAR_MIN_DMA_LEN || !(msg->flags & I2C_M_DMA_SAFE) || (read && priv->flags & ID_P_NO_RXDMA)) return false; @@ -475,11 +482,15 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv) static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr) { struct i2c_msg *msg = priv->msg; + u32 irqs_to_clear = MDE; /* FIXME: sometimes, unknown interrupt happened. Do nothing */ if (!(msr & MDE)) return; + if (msr & MAT) + irqs_to_clear |= MAT; + /* Check if DMA can be enabled and take over */ if (priv->pos == 1 && rcar_i2c_dma(priv)) return; @@ -503,31 +514,32 @@ static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr) * [ICRXTX] -> [SHIFT] -> [I2C bus] */ - if (priv->flags & ID_LAST_MSG) { + if (priv->flags & ID_LAST_MSG) /* * If current msg is the _LAST_ msg, * prepare stop condition here. * ID_DONE will be set on STOP irq. */ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP); - } else { + else rcar_i2c_next_msg(priv); - return; - } } - rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_SEND); + rcar_i2c_clear_irq(priv, irqs_to_clear); } static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr) { struct i2c_msg *msg = priv->msg; + bool recv_len_init = priv->pos == 0 && msg->flags & I2C_M_RECV_LEN; + u32 irqs_to_clear = MDR; /* FIXME: sometimes, unknown interrupt happened. Do nothing */ if (!(msr & MDR)) return; if (msr & MAT) { + irqs_to_clear |= MAT; /* * Address transfer phase finished, but no data at this point. * Try to use DMA to receive data. @@ -535,24 +547,41 @@ static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr) rcar_i2c_dma(priv); } else if (priv->pos < msg->len) { /* get received data */ - msg->buf[priv->pos] = rcar_i2c_read(priv, ICRXTX); + u8 data = rcar_i2c_read(priv, ICRXTX); + + msg->buf[priv->pos] = data; + if (recv_len_init) { + if (data == 0 || data > I2C_SMBUS_BLOCK_MAX) { + priv->flags |= ID_DONE | ID_EPROTO; + return; + } + msg->len += msg->buf[0]; + /* Enough data for DMA? */ + if (rcar_i2c_dma(priv)) + return; + /* new length after RECV_LEN now properly initialized */ + recv_len_init = false; + } priv->pos++; } - /* If next received data is the _LAST_, go to new phase. */ - if (priv->pos + 1 == msg->len) { + /* + * If next received data is the _LAST_ and we are not waiting for a new + * length because of RECV_LEN, then go to a new phase. + */ + if (priv->pos + 1 == msg->len && !recv_len_init) { if (priv->flags & ID_LAST_MSG) { rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP); } else { rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START); - priv->flags |= ID_P_REP_AFTER_RD; + priv->flags |= ID_REP_AFTER_RD; } } if (priv->pos == msg->len && !(priv->flags & ID_LAST_MSG)) rcar_i2c_next_msg(priv); - else - rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_RECV); + + rcar_i2c_clear_irq(priv, irqs_to_clear); } static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) @@ -641,7 +670,7 @@ static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr) /* Nack */ if (msr & MNR) { /* HW automatically sends STOP after received NACK */ - if (!priv->atomic_xfer) + if (priv->flags & ID_P_NOT_ATOMIC) rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP); priv->flags |= ID_NACK; goto out; @@ -663,7 +692,7 @@ out: if (priv->flags & ID_DONE) { rcar_i2c_write(priv, ICMIER, 0); rcar_i2c_write(priv, ICMSR, 0); - if (!priv->atomic_xfer) + if (priv->flags & ID_P_NOT_ATOMIC) wake_up(&priv->wait); } @@ -676,12 +705,12 @@ static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr) u32 msr; /* Clear START or STOP immediately, except for REPSTART after read */ - if (likely(!(priv->flags & ID_P_REP_AFTER_RD))) + if (likely(!(priv->flags & ID_REP_AFTER_RD))) rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA); /* Only handle interrupts that are currently enabled */ msr = rcar_i2c_read(priv, ICMSR); - if (!priv->atomic_xfer) + if (priv->flags & ID_P_NOT_ATOMIC) msr &= rcar_i2c_read(priv, ICMIER); return rcar_i2c_irq(irq, priv, msr); @@ -694,14 +723,14 @@ static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr) /* Only handle interrupts that are currently enabled */ msr = rcar_i2c_read(priv, ICMSR); - if (!priv->atomic_xfer) + if (priv->flags & ID_P_NOT_ATOMIC) msr &= rcar_i2c_read(priv, ICMIER); /* * Clear START or STOP immediately, except for REPSTART after read or * if a spurious interrupt was detected. */ - if (likely(!(priv->flags & ID_P_REP_AFTER_RD) && msr)) + if (likely(!(priv->flags & ID_REP_AFTER_RD) && msr)) rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA); return rcar_i2c_irq(irq, priv, msr); @@ -803,7 +832,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, int i, ret; long time_left; - priv->atomic_xfer = false; + priv->flags |= ID_P_NOT_ATOMIC; pm_runtime_get_sync(dev); @@ -827,11 +856,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, for (i = 0; i < num; i++) rcar_i2c_request_dma(priv, msgs + i); - /* init first message */ - priv->msg = msgs; - priv->msgs_left = num; - priv->flags = (priv->flags & ID_P_MASK) | ID_FIRST_MSG; - rcar_i2c_prepare_msg(priv); + rcar_i2c_first_msg(priv, msgs, num); time_left = wait_event_timeout(priv->wait, priv->flags & ID_DONE, num * adap->timeout); @@ -847,6 +872,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, ret = -ENXIO; } else if (priv->flags & ID_ARBLOST) { ret = -EAGAIN; + } else if (priv->flags & ID_EPROTO) { + ret = -EPROTO; } else { ret = num - priv->msgs_left; /* The number of transfer */ } @@ -869,7 +896,7 @@ static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap, bool time_left; int ret; - priv->atomic_xfer = true; + priv->flags &= ~ID_P_NOT_ATOMIC; pm_runtime_get_sync(dev); @@ -879,12 +906,7 @@ static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap, goto out; rcar_i2c_init(priv); - - /* init first message */ - priv->msg = msgs; - priv->msgs_left = num; - priv->flags = (priv->flags & ID_P_MASK) | ID_FIRST_MSG; - rcar_i2c_prepare_msg(priv); + rcar_i2c_first_msg(priv, msgs, num); j = jiffies + num * adap->timeout; do { @@ -909,6 +931,8 @@ static int rcar_i2c_master_xfer_atomic(struct i2c_adapter *adap, ret = -ENXIO; } else if (priv->flags & ID_ARBLOST) { ret = -EAGAIN; + } else if (priv->flags & ID_EPROTO) { + ret = -EPROTO; } else { ret = num - priv->msgs_left; /* The number of transfer */ } @@ -975,7 +999,7 @@ static u32 rcar_i2c_func(struct i2c_adapter *adap) * I2C_M_IGNORE_NAK (automatically sends STOP after NAK) */ u32 func = I2C_FUNC_I2C | I2C_FUNC_SLAVE | - (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); + (I2C_FUNC_SMBUS_EMUL_ALL & ~I2C_FUNC_SMBUS_QUICK); if (priv->flags & ID_P_HOST_NOTIFY) func |= I2C_FUNC_SMBUS_HOST_NOTIFY; @@ -1063,8 +1087,10 @@ static int rcar_i2c_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_get_sync(dev); ret = rcar_i2c_clock_calculate(priv); - if (ret < 0) - goto out_pm_put; + if (ret < 0) { + pm_runtime_put(dev); + goto out_pm_disable; + } rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ @@ -1093,19 +1119,19 @@ static int rcar_i2c_probe(struct platform_device *pdev) ret = platform_get_irq(pdev, 0); if (ret < 0) - goto out_pm_disable; + goto out_pm_put; priv->irq = ret; ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv); if (ret < 0) { dev_err(dev, "cannot get irq %d\n", priv->irq); - goto out_pm_disable; + goto out_pm_put; } platform_set_drvdata(pdev, priv); ret = i2c_add_numbered_adapter(adap); if (ret < 0) - goto out_pm_disable; + goto out_pm_put; if (priv->flags & ID_P_HOST_NOTIFY) { priv->host_notify_client = i2c_new_slave_host_notify_device(adap); @@ -1122,7 +1148,8 @@ static int rcar_i2c_probe(struct platform_device *pdev) out_del_device: i2c_del_adapter(&priv->adap); out_pm_put: - pm_runtime_put(dev); + if (priv->flags & ID_P_PM_BLOCKED) + pm_runtime_put(dev); out_pm_disable: pm_runtime_disable(dev); return ret; diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index ffefe3c482e9..9a1c3f8b7048 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -78,24 +78,23 @@ struct xiic_i2c { bool singlemaster; }; - #define XIIC_MSB_OFFSET 0 -#define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET) +#define XIIC_REG_OFFSET (0x100 + XIIC_MSB_OFFSET) /* * Register offsets in bytes from RegisterBase. Three is added to the * base offset to access LSB (IBM style) of the word */ -#define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */ -#define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */ -#define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */ -#define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */ -#define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */ -#define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */ -#define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */ -#define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */ -#define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */ -#define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */ +#define XIIC_CR_REG_OFFSET (0x00 + XIIC_REG_OFFSET) /* Control Register */ +#define XIIC_SR_REG_OFFSET (0x04 + XIIC_REG_OFFSET) /* Status Register */ +#define XIIC_DTR_REG_OFFSET (0x08 + XIIC_REG_OFFSET) /* Data Tx Register */ +#define XIIC_DRR_REG_OFFSET (0x0C + XIIC_REG_OFFSET) /* Data Rx Register */ +#define XIIC_ADR_REG_OFFSET (0x10 + XIIC_REG_OFFSET) /* Address Register */ +#define XIIC_TFO_REG_OFFSET (0x14 + XIIC_REG_OFFSET) /* Tx FIFO Occupancy */ +#define XIIC_RFO_REG_OFFSET (0x18 + XIIC_REG_OFFSET) /* Rx FIFO Occupancy */ +#define XIIC_TBA_REG_OFFSET (0x1C + XIIC_REG_OFFSET) /* 10 Bit Address reg */ +#define XIIC_RFD_REG_OFFSET (0x20 + XIIC_REG_OFFSET) /* Rx FIFO Depth reg */ +#define XIIC_GPO_REG_OFFSET (0x24 + XIIC_REG_OFFSET) /* Output Register */ /* Control Register masks */ #define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */ @@ -233,18 +232,21 @@ static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg) static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask) { u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); + xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask); } static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask) { u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET); + xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask); } static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask) { u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET); + xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask); } @@ -355,7 +357,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c) while (len--) { u16 data = i2c->tx_msg->buf[i2c->tx_pos++]; - if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) { + + if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) { /* last message in transfer -> STOP */ data |= XIIC_TX_DYN_STOP_MASK; dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__); @@ -381,6 +384,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id) int xfer_more = 0; int wakeup_req = 0; int wakeup_code = 0; + int ret; /* Get the interrupt Status from the IPIF. There is no clearing of * interrupts in the IPIF. Interrupts must be cleared at the source. @@ -401,8 +405,8 @@ static irqreturn_t xiic_process(int irq, void *dev_id) /* Service requesting interrupt */ if ((pend & XIIC_INTR_ARB_LOST_MASK) || - ((pend & XIIC_INTR_TX_ERROR_MASK) && - !(pend & XIIC_INTR_RX_FULL_MASK))) { + ((pend & XIIC_INTR_TX_ERROR_MASK) && + !(pend & XIIC_INTR_RX_FULL_MASK))) { /* bus arbritration lost, or... * Transmit error _OR_ RX completed * if this happens when RX_FULL is not set @@ -415,7 +419,9 @@ static irqreturn_t xiic_process(int irq, void *dev_id) * fifos and the next message is a TX with len 0 (only addr) * reset the IP instead of just flush fifos */ - xiic_reinit(i2c); + ret = xiic_reinit(i2c); + if (!ret) + dev_dbg(i2c->adap.dev.parent, "reinit failed\n"); if (i2c->rx_msg) { wakeup_req = 1; @@ -462,24 +468,6 @@ static irqreturn_t xiic_process(int irq, void *dev_id) } } } - if (pend & XIIC_INTR_BNB_MASK) { - /* IIC bus has transitioned to not busy */ - clr |= XIIC_INTR_BNB_MASK; - - /* The bus is not busy, disable BusNotBusy interrupt */ - xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK); - - if (!i2c->tx_msg) - goto out; - - wakeup_req = 1; - - if (i2c->nmsgs == 1 && !i2c->rx_msg && - xiic_tx_space(i2c) == 0) - wakeup_code = STATE_DONE; - else - wakeup_code = STATE_ERROR; - } if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) { /* Transmit register/FIFO is empty or ½ empty */ @@ -516,6 +504,26 @@ static irqreturn_t xiic_process(int irq, void *dev_id) */ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK); } + + if (pend & XIIC_INTR_BNB_MASK) { + /* IIC bus has transitioned to not busy */ + clr |= XIIC_INTR_BNB_MASK; + + /* The bus is not busy, disable BusNotBusy interrupt */ + xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK); + + if (!i2c->tx_msg) + goto out; + + wakeup_req = 1; + + if (i2c->nmsgs == 1 && !i2c->rx_msg && + xiic_tx_space(i2c) == 0) + wakeup_code = STATE_DONE; + else + wakeup_code = STATE_ERROR; + } + out: dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr); @@ -570,7 +578,7 @@ static int xiic_busy(struct xiic_i2c *i2c) static void xiic_start_recv(struct xiic_i2c *i2c) { - u8 rx_watermark; + u16 rx_watermark; struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; /* Clear and enable Rx full interrupt. */ @@ -585,7 +593,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c) rx_watermark = msg->len; if (rx_watermark > IIC_RX_FIFO_DEPTH) rx_watermark = IIC_RX_FIFO_DEPTH; - xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1); + xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, (u8)(rx_watermark - 1)); if (!(msg->flags & I2C_M_NOSTART)) /* write the address */ @@ -638,6 +646,7 @@ static void xiic_start_send(struct xiic_i2c *i2c) static void __xiic_start_xfer(struct xiic_i2c *i2c) { int fifo_space = xiic_tx_fifo_space(i2c); + dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n", __func__, i2c->tx_msg, fifo_space); @@ -739,7 +748,6 @@ static const struct i2c_adapter xiic_adapter = { .quirks = &xiic_quirks, }; - static int xiic_i2c_probe(struct platform_device *pdev) { struct xiic_i2c *i2c; @@ -899,6 +907,7 @@ static const struct dev_pm_ops xiic_dev_pm_ops = { SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend, xiic_i2c_runtime_resume, NULL) }; + static struct platform_driver xiic_i2c_driver = { .probe = xiic_i2c_probe, .remove = xiic_i2c_remove, @@ -914,4 +923,3 @@ module_platform_driver(xiic_i2c_driver); MODULE_AUTHOR("info@mocean-labs.com"); MODULE_DESCRIPTION("Xilinx I2C bus driver"); MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("platform:"DRIVER_NAME); diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index 8c01123dc4ed..6aef5ce43cc1 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -768,13 +768,8 @@ static int i3c_hci_probe(struct platform_device *pdev) static int i3c_hci_remove(struct platform_device *pdev) { struct i3c_hci *hci = platform_get_drvdata(pdev); - int ret; - ret = i3c_master_unregister(&hci->master); - if (ret) - return ret; - - return 0; + return i3c_master_unregister(&hci->master); } static const __maybe_unused struct of_device_id i3c_hci_of_match[] = { diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index 7550dad64ecf..d6e9ed74cdcf 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -1597,12 +1597,11 @@ static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev) static int __maybe_unused svc_i3c_runtime_resume(struct device *dev) { struct svc_i3c_master *master = dev_get_drvdata(dev); - int ret = 0; pinctrl_pm_select_default_state(dev); svc_i3c_master_prepare_clks(master); - return ret; + return 0; } static const struct dev_pm_ops svc_i3c_pm_ops = { diff --git a/drivers/input/input.c b/drivers/input/input.c index e5a668ce884d..1365c9dfb5f2 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -1793,8 +1793,6 @@ EXPORT_SYMBOL(input_reset_device); static int input_inhibit_device(struct input_dev *dev) { - int ret = 0; - mutex_lock(&dev->mutex); if (dev->inhibited) @@ -1816,7 +1814,7 @@ static int input_inhibit_device(struct input_dev *dev) out: mutex_unlock(&dev->mutex); - return ret; + return 0; } static int input_uninhibit_device(struct input_dev *dev) diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 3b23078bc7b5..505a032e2786 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -399,4 +399,15 @@ config JOYSTICK_N64 Say Y here if you want enable support for the four built-in controller ports on the Nintendo 64 console. +config JOYSTICK_SENSEHAT + tristate "Raspberry Pi Sense HAT joystick" + depends on INPUT && I2C + select MFD_SIMPLE_MFD_I2C + help + Say Y here if you want to enable the driver for the + the Raspberry Pi Sense HAT. + + To compile this driver as a module, choose M here: the + module will be called sensehat_joystick. + endif diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile index 5174b8aba2dd..3937535f0098 100644 --- a/drivers/input/joystick/Makefile +++ b/drivers/input/joystick/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_JOYSTICK_N64) += n64joy.o obj-$(CONFIG_JOYSTICK_PSXPAD_SPI) += psxpad-spi.o obj-$(CONFIG_JOYSTICK_PXRC) += pxrc.o obj-$(CONFIG_JOYSTICK_QWIIC) += qwiic-joystick.o +obj-$(CONFIG_JOYSTICK_SENSEHAT) += sensehat-joystick.o obj-$(CONFIG_JOYSTICK_SIDEWINDER) += sidewinder.o obj-$(CONFIG_JOYSTICK_SPACEBALL) += spaceball.o obj-$(CONFIG_JOYSTICK_SPACEORB) += spaceorb.o diff --git a/drivers/input/joystick/sensehat-joystick.c b/drivers/input/joystick/sensehat-joystick.c new file mode 100644 index 000000000000..5ad1fe4ff496 --- /dev/null +++ b/drivers/input/joystick/sensehat-joystick.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Raspberry Pi Sense HAT joystick driver + * http://raspberrypi.org + * + * Copyright (C) 2015 Raspberry Pi + * Copyright (C) 2021 Charles Mirabile, Mwesigwa Guma, Joel Savitz + * + * Original Author: Serge Schneider + * Revised for upstream Linux by: Charles Mirabile, Mwesigwa Guma, Joel Savitz + */ + +#include <linux/module.h> +#include <linux/input.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/property.h> + +#define JOYSTICK_SMB_REG 0xf2 + +struct sensehat_joystick { + struct platform_device *pdev; + struct input_dev *keys_dev; + unsigned long prev_states; + struct regmap *regmap; +}; + +static const unsigned int keymap[] = { + BTN_DPAD_DOWN, BTN_DPAD_RIGHT, BTN_DPAD_UP, BTN_SELECT, BTN_DPAD_LEFT, +}; + +static irqreturn_t sensehat_joystick_report(int irq, void *cookie) +{ + struct sensehat_joystick *sensehat_joystick = cookie; + unsigned long curr_states, changes; + unsigned int keys; + int error; + int i; + + error = regmap_read(sensehat_joystick->regmap, JOYSTICK_SMB_REG, &keys); + if (error < 0) { + dev_err(&sensehat_joystick->pdev->dev, + "Failed to read joystick state: %d", error); + return IRQ_NONE; + } + curr_states = keys; + bitmap_xor(&changes, &curr_states, &sensehat_joystick->prev_states, + ARRAY_SIZE(keymap)); + + for_each_set_bit(i, &changes, ARRAY_SIZE(keymap)) + input_report_key(sensehat_joystick->keys_dev, keymap[i], + curr_states & BIT(i)); + + input_sync(sensehat_joystick->keys_dev); + sensehat_joystick->prev_states = keys; + return IRQ_HANDLED; +} + +static int sensehat_joystick_probe(struct platform_device *pdev) +{ + struct sensehat_joystick *sensehat_joystick; + int error, i, irq; + + sensehat_joystick = devm_kzalloc(&pdev->dev, sizeof(*sensehat_joystick), + GFP_KERNEL); + if (!sensehat_joystick) + return -ENOMEM; + + sensehat_joystick->pdev = pdev; + + sensehat_joystick->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!sensehat_joystick->regmap) { + dev_err(&pdev->dev, "unable to get sensehat regmap"); + return -ENODEV; + } + + sensehat_joystick->keys_dev = devm_input_allocate_device(&pdev->dev); + if (!sensehat_joystick->keys_dev) { + dev_err(&pdev->dev, "Could not allocate input device"); + return -ENOMEM; + } + + sensehat_joystick->keys_dev->name = "Raspberry Pi Sense HAT Joystick"; + sensehat_joystick->keys_dev->phys = "sensehat-joystick/input0"; + sensehat_joystick->keys_dev->id.bustype = BUS_I2C; + + __set_bit(EV_KEY, sensehat_joystick->keys_dev->evbit); + __set_bit(EV_REP, sensehat_joystick->keys_dev->evbit); + for (i = 0; i < ARRAY_SIZE(keymap); i++) + __set_bit(keymap[i], sensehat_joystick->keys_dev->keybit); + + error = input_register_device(sensehat_joystick->keys_dev); + if (error) { + dev_err(&pdev->dev, "Could not register input device"); + return error; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Could not retrieve interrupt request"); + return irq; + } + + error = devm_request_threaded_irq(&pdev->dev, irq, + NULL, sensehat_joystick_report, + IRQF_ONESHOT, "keys", + sensehat_joystick); + if (error) { + dev_err(&pdev->dev, "IRQ request failed"); + return error; + } + + return 0; +} + +static const struct of_device_id sensehat_joystick_device_id[] = { + { .compatible = "raspberrypi,sensehat-joystick" }, + {}, +}; +MODULE_DEVICE_TABLE(of, sensehat_joystick_device_id); + +static struct platform_driver sensehat_joystick_driver = { + .probe = sensehat_joystick_probe, + .driver = { + .name = "sensehat-joystick", + .of_match_table = sensehat_joystick_device_id, + }, +}; + +module_platform_driver(sensehat_joystick_driver); + +MODULE_DESCRIPTION("Raspberry Pi Sense HAT joystick driver"); +MODULE_AUTHOR("Charles Mirabile <cmirabil@redhat.com>"); +MODULE_AUTHOR("Serge Schneider <serge@raspberrypi.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/keyboard/bcm-keypad.c b/drivers/input/keyboard/bcm-keypad.c index 2b771c3a5578..166d6023a538 100644 --- a/drivers/input/keyboard/bcm-keypad.c +++ b/drivers/input/keyboard/bcm-keypad.c @@ -183,8 +183,7 @@ static void bcm_kp_stop(const struct bcm_kp *kp) writel(0xFFFFFFFF, kp->base + KPICR0_OFFSET); writel(0xFFFFFFFF, kp->base + KPICR1_OFFSET); - if (kp->clk) - clk_disable_unprepare(kp->clk); + clk_disable_unprepare(kp->clk); } static int bcm_kp_open(struct input_dev *dev) diff --git a/drivers/input/keyboard/clps711x-keypad.c b/drivers/input/keyboard/clps711x-keypad.c index 019dd6ed2c29..939c88655fc0 100644 --- a/drivers/input/keyboard/clps711x-keypad.c +++ b/drivers/input/keyboard/clps711x-keypad.c @@ -95,8 +95,7 @@ static int clps711x_keypad_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->syscon = - syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1"); + priv->syscon = syscon_regmap_lookup_by_phandle(np, "syscon"); if (IS_ERR(priv->syscon)) return PTR_ERR(priv->syscon); diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c index 6534dfca60b4..cc73a149da28 100644 --- a/drivers/input/keyboard/cros_ec_keyb.c +++ b/drivers/input/keyboard/cros_ec_keyb.c @@ -435,10 +435,13 @@ static __maybe_unused int cros_ec_keyb_resume(struct device *dev) * but the ckdev->bs_idev will remain NULL when this function exits. * * @ckdev: The keyboard device + * @expect_buttons_switches: Indicates that EC must report button and/or + * switch events * * Returns 0 if no error or -error upon error. */ -static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev) +static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev, + bool expect_buttons_switches) { struct cros_ec_device *ec_dev = ckdev->ec; struct device *dev = ckdev->dev; @@ -465,7 +468,7 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev) switches = get_unaligned_le32(&event_data.switches); if (!buttons && !switches) - return 0; + return expect_buttons_switches ? -EINVAL : 0; /* * We call the non-matrix buttons/switches 'input1', if present. @@ -516,7 +519,7 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev) } /** - * cros_ec_keyb_register_bs - Register matrix keys + * cros_ec_keyb_register_matrix - Register matrix keys * * Handles all the bits of the keyboard driver related to matrix keys. * @@ -648,12 +651,12 @@ static const struct attribute_group cros_ec_keyb_attr_group = { .attrs = cros_ec_keyb_attrs, }; - static int cros_ec_keyb_probe(struct platform_device *pdev) { struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent); struct device *dev = &pdev->dev; struct cros_ec_keyb *ckdev; + bool buttons_switches_only = device_get_match_data(dev); int err; if (!dev->of_node) @@ -667,13 +670,16 @@ static int cros_ec_keyb_probe(struct platform_device *pdev) ckdev->dev = dev; dev_set_drvdata(dev, ckdev); - err = cros_ec_keyb_register_matrix(ckdev); - if (err) { - dev_err(dev, "cannot register matrix inputs: %d\n", err); - return err; + if (!buttons_switches_only) { + err = cros_ec_keyb_register_matrix(ckdev); + if (err) { + dev_err(dev, "cannot register matrix inputs: %d\n", + err); + return err; + } } - err = cros_ec_keyb_register_bs(ckdev); + err = cros_ec_keyb_register_bs(ckdev, buttons_switches_only); if (err) { dev_err(dev, "cannot register non-matrix inputs: %d\n", err); return err; @@ -681,7 +687,7 @@ static int cros_ec_keyb_probe(struct platform_device *pdev) err = devm_device_add_group(dev, &cros_ec_keyb_attr_group); if (err) { - dev_err(dev, "failed to create attributes. err=%d\n", err); + dev_err(dev, "failed to create attributes: %d\n", err); return err; } @@ -710,7 +716,8 @@ static int cros_ec_keyb_remove(struct platform_device *pdev) #ifdef CONFIG_OF static const struct of_device_id cros_ec_keyb_of_match[] = { { .compatible = "google,cros-ec-keyb" }, - {}, + { .compatible = "google,cros-ec-keyb-switches", .data = (void *)true }, + {} }; MODULE_DEVICE_TABLE(of, cros_ec_keyb_of_match); #endif diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c index 272a4f1c6e81..7a3b0664ab4f 100644 --- a/drivers/input/keyboard/ep93xx_keypad.c +++ b/drivers/input/keyboard/ep93xx_keypad.c @@ -231,7 +231,6 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) struct ep93xx_keypad *keypad; const struct matrix_keymap_data *keymap_data; struct input_dev *input_dev; - struct resource *res; int err; keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad), GFP_KERNEL); @@ -250,11 +249,7 @@ static int ep93xx_keypad_probe(struct platform_device *pdev) if (keypad->irq < 0) return keypad->irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENXIO; - - keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res); + keypad->mmio_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(keypad->mmio_base)) return PTR_ERR(keypad->mmio_base); diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index d75a8b179a8a..a5dc4ab87fa1 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -131,7 +131,7 @@ static void gpio_keys_quiesce_key(void *data) if (!bdata->gpiod) hrtimer_cancel(&bdata->release_timer); - if (bdata->debounce_use_hrtimer) + else if (bdata->debounce_use_hrtimer) hrtimer_cancel(&bdata->debounce_timer); else cancel_delayed_work_sync(&bdata->work); diff --git a/drivers/input/keyboard/mt6779-keypad.c b/drivers/input/keyboard/mt6779-keypad.c index 0dbbddc7f298..2e7c9187c10f 100644 --- a/drivers/input/keyboard/mt6779-keypad.c +++ b/drivers/input/keyboard/mt6779-keypad.c @@ -24,7 +24,6 @@ struct mt6779_keypad { struct regmap *regmap; struct input_dev *input_dev; struct clk *clk; - void __iomem *base; u32 n_rows; u32 n_cols; DECLARE_BITMAP(keymap_state, MTK_KPD_NUM_BITS); @@ -91,6 +90,7 @@ static void mt6779_keypad_clk_disable(void *data) static int mt6779_keypad_pdrv_probe(struct platform_device *pdev) { struct mt6779_keypad *keypad; + void __iomem *base; int irq; u32 debounce; bool wakeup; @@ -100,11 +100,11 @@ static int mt6779_keypad_pdrv_probe(struct platform_device *pdev) if (!keypad) return -ENOMEM; - keypad->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(keypad->base)) - return PTR_ERR(keypad->base); + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); - keypad->regmap = devm_regmap_init_mmio(&pdev->dev, keypad->base, + keypad->regmap = devm_regmap_init_mmio(&pdev->dev, base, &mt6779_keypad_regmap_cfg); if (IS_ERR(keypad->regmap)) { dev_err(&pdev->dev, diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c index 4a796bed48ac..15c15c0958b0 100644 --- a/drivers/input/keyboard/sun4i-lradc-keys.c +++ b/drivers/input/keyboard/sun4i-lradc-keys.c @@ -14,6 +14,7 @@ * there are no boards known to use channel 1. */ +#include <linux/clk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/input.h> @@ -22,7 +23,10 @@ #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <linux/pm_wakeirq.h> +#include <linux/pm_wakeup.h> #include <linux/regulator/consumer.h> +#include <linux/reset.h> #include <linux/slab.h> #define LRADC_CTRL 0x00 @@ -58,10 +62,12 @@ /* struct lradc_variant - Describe sun4i-a10-lradc-keys hardware variant * @divisor_numerator: The numerator of lradc Vref internally divisor * @divisor_denominator: The denominator of lradc Vref internally divisor + * @has_clock_reset: If the binding requires a clock and reset */ struct lradc_variant { u8 divisor_numerator; u8 divisor_denominator; + bool has_clock_reset; }; static const struct lradc_variant lradc_variant_a10 = { @@ -74,6 +80,12 @@ static const struct lradc_variant r_lradc_variant_a83t = { .divisor_denominator = 4 }; +static const struct lradc_variant lradc_variant_r329 = { + .divisor_numerator = 3, + .divisor_denominator = 4, + .has_clock_reset = true, +}; + struct sun4i_lradc_keymap { u32 voltage; u32 keycode; @@ -83,6 +95,8 @@ struct sun4i_lradc_data { struct device *dev; struct input_dev *input; void __iomem *base; + struct clk *clk; + struct reset_control *reset; struct regulator *vref_supply; struct sun4i_lradc_keymap *chan0_map; const struct lradc_variant *variant; @@ -140,6 +154,14 @@ static int sun4i_lradc_open(struct input_dev *dev) if (error) return error; + error = reset_control_deassert(lradc->reset); + if (error) + goto err_disable_reg; + + error = clk_prepare_enable(lradc->clk); + if (error) + goto err_assert_reset; + lradc->vref = regulator_get_voltage(lradc->vref_supply) * lradc->variant->divisor_numerator / lradc->variant->divisor_denominator; @@ -153,6 +175,13 @@ static int sun4i_lradc_open(struct input_dev *dev) writel(CHAN0_KEYUP_IRQ | CHAN0_KEYDOWN_IRQ, lradc->base + LRADC_INTC); return 0; + +err_assert_reset: + reset_control_assert(lradc->reset); +err_disable_reg: + regulator_disable(lradc->vref_supply); + + return error; } static void sun4i_lradc_close(struct input_dev *dev) @@ -164,6 +193,8 @@ static void sun4i_lradc_close(struct input_dev *dev) SAMPLE_RATE(2), lradc->base + LRADC_CTRL); writel(0, lradc->base + LRADC_INTC); + clk_disable_unprepare(lradc->clk); + reset_control_assert(lradc->reset); regulator_disable(lradc->vref_supply); } @@ -226,8 +257,7 @@ static int sun4i_lradc_probe(struct platform_device *pdev) { struct sun4i_lradc_data *lradc; struct device *dev = &pdev->dev; - int i; - int error; + int error, i, irq; lradc = devm_kzalloc(dev, sizeof(struct sun4i_lradc_data), GFP_KERNEL); if (!lradc) @@ -243,6 +273,16 @@ static int sun4i_lradc_probe(struct platform_device *pdev) return -EINVAL; } + if (lradc->variant->has_clock_reset) { + lradc->clk = devm_clk_get(dev, NULL); + if (IS_ERR(lradc->clk)) + return PTR_ERR(lradc->clk); + + lradc->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(lradc->reset)) + return PTR_ERR(lradc->reset); + } + lradc->vref_supply = devm_regulator_get(dev, "vref"); if (IS_ERR(lradc->vref_supply)) return PTR_ERR(lradc->vref_supply); @@ -272,8 +312,11 @@ static int sun4i_lradc_probe(struct platform_device *pdev) if (IS_ERR(lradc->base)) return PTR_ERR(lradc->base); - error = devm_request_irq(dev, platform_get_irq(pdev, 0), - sun4i_lradc_irq, 0, + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + error = devm_request_irq(dev, irq, sun4i_lradc_irq, 0, "sun4i-a10-lradc-keys", lradc); if (error) return error; @@ -282,6 +325,16 @@ static int sun4i_lradc_probe(struct platform_device *pdev) if (error) return error; + if (device_property_read_bool(dev, "wakeup-source")) { + error = dev_pm_set_wake_irq(dev, irq); + if (error) + dev_warn(dev, + "Failed to set IRQ %d as a wake IRQ: %d\n", + irq, error); + else + device_set_wakeup_capable(dev, true); + } + return 0; } @@ -290,6 +343,8 @@ static const struct of_device_id sun4i_lradc_of_match[] = { .data = &lradc_variant_a10 }, { .compatible = "allwinner,sun8i-a83t-r-lradc", .data = &r_lradc_variant_a83t }, + { .compatible = "allwinner,sun50i-r329-lradc", + .data = &lradc_variant_r329 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sun4i_lradc_of_match); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index dd5227cf8696..a18ab7358d8f 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -762,6 +762,16 @@ config INPUT_IQS626A To compile this driver as a module, choose M here: the module will be called iqs626a. +config INPUT_IQS7222 + tristate "Azoteq IQS7222A/B/C capacitive touch controller" + depends on I2C + help + Say Y to enable support for the Azoteq IQS7222A/B/C family + of capacitive touch controllers. + + To compile this driver as a module, choose M here: the + module will be called iqs7222. + config INPUT_CMA3000 tristate "VTI CMA3000 Tri-axis accelerometer" help diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index b92c53a6b5ae..28dfc444f0a9 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -44,6 +44,7 @@ obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o obj-$(CONFIG_INPUT_IQS269A) += iqs269a.o obj-$(CONFIG_INPUT_IQS626A) += iqs626a.o +obj-$(CONFIG_INPUT_IQS7222) += iqs7222.o obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c new file mode 100644 index 000000000000..6b4138771a3f --- /dev/null +++ b/drivers/input/misc/iqs7222.c @@ -0,0 +1,2446 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Azoteq IQS7222A/B/C Capacitive Touch Controller + * + * Copyright (C) 2022 Jeff LaBundy <jeff@labundy.com> + */ + +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/ktime.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/property.h> +#include <linux/slab.h> +#include <asm/unaligned.h> + +#define IQS7222_PROD_NUM 0x00 +#define IQS7222_PROD_NUM_A 840 +#define IQS7222_PROD_NUM_B 698 +#define IQS7222_PROD_NUM_C 863 + +#define IQS7222_SYS_STATUS 0x10 +#define IQS7222_SYS_STATUS_RESET BIT(3) +#define IQS7222_SYS_STATUS_ATI_ERROR BIT(1) +#define IQS7222_SYS_STATUS_ATI_ACTIVE BIT(0) + +#define IQS7222_CHAN_SETUP_0_REF_MODE_MASK GENMASK(15, 14) +#define IQS7222_CHAN_SETUP_0_REF_MODE_FOLLOW BIT(15) +#define IQS7222_CHAN_SETUP_0_REF_MODE_REF BIT(14) +#define IQS7222_CHAN_SETUP_0_CHAN_EN BIT(8) + +#define IQS7222_SLDR_SETUP_0_CHAN_CNT_MASK GENMASK(2, 0) +#define IQS7222_SLDR_SETUP_2_RES_MASK GENMASK(15, 8) +#define IQS7222_SLDR_SETUP_2_RES_SHIFT 8 +#define IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK GENMASK(7, 0) +#define IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK GENMASK(9, 0) + +#define IQS7222_GPIO_SETUP_0_GPIO_EN BIT(0) + +#define IQS7222_SYS_SETUP 0xD0 +#define IQS7222_SYS_SETUP_INTF_MODE_MASK GENMASK(7, 6) +#define IQS7222_SYS_SETUP_INTF_MODE_TOUCH BIT(7) +#define IQS7222_SYS_SETUP_INTF_MODE_EVENT BIT(6) +#define IQS7222_SYS_SETUP_PWR_MODE_MASK GENMASK(5, 4) +#define IQS7222_SYS_SETUP_PWR_MODE_AUTO IQS7222_SYS_SETUP_PWR_MODE_MASK +#define IQS7222_SYS_SETUP_REDO_ATI BIT(2) +#define IQS7222_SYS_SETUP_ACK_RESET BIT(0) + +#define IQS7222_EVENT_MASK_ATI BIT(12) + +#define IQS7222_COMMS_HOLD BIT(0) +#define IQS7222_COMMS_ERROR 0xEEEE +#define IQS7222_COMMS_RETRY_MS 50 +#define IQS7222_COMMS_TIMEOUT_MS 100 +#define IQS7222_RESET_TIMEOUT_MS 250 +#define IQS7222_ATI_TIMEOUT_MS 2000 + +#define IQS7222_MAX_COLS_STAT 8 +#define IQS7222_MAX_COLS_CYCLE 3 +#define IQS7222_MAX_COLS_GLBL 3 +#define IQS7222_MAX_COLS_BTN 3 +#define IQS7222_MAX_COLS_CHAN 6 +#define IQS7222_MAX_COLS_FILT 2 +#define IQS7222_MAX_COLS_SLDR 11 +#define IQS7222_MAX_COLS_GPIO 3 +#define IQS7222_MAX_COLS_SYS 13 + +#define IQS7222_MAX_CHAN 20 +#define IQS7222_MAX_SLDR 2 + +#define IQS7222_NUM_RETRIES 5 +#define IQS7222_REG_OFFSET 0x100 + +enum iqs7222_reg_key_id { + IQS7222_REG_KEY_NONE, + IQS7222_REG_KEY_PROX, + IQS7222_REG_KEY_TOUCH, + IQS7222_REG_KEY_DEBOUNCE, + IQS7222_REG_KEY_TAP, + IQS7222_REG_KEY_AXIAL, + IQS7222_REG_KEY_WHEEL, + IQS7222_REG_KEY_NO_WHEEL, + IQS7222_REG_KEY_RESERVED +}; + +enum iqs7222_reg_grp_id { + IQS7222_REG_GRP_STAT, + IQS7222_REG_GRP_CYCLE, + IQS7222_REG_GRP_GLBL, + IQS7222_REG_GRP_BTN, + IQS7222_REG_GRP_CHAN, + IQS7222_REG_GRP_FILT, + IQS7222_REG_GRP_SLDR, + IQS7222_REG_GRP_GPIO, + IQS7222_REG_GRP_SYS, + IQS7222_NUM_REG_GRPS +}; + +static const char * const iqs7222_reg_grp_names[] = { + [IQS7222_REG_GRP_CYCLE] = "cycle", + [IQS7222_REG_GRP_CHAN] = "channel", + [IQS7222_REG_GRP_SLDR] = "slider", + [IQS7222_REG_GRP_GPIO] = "gpio", +}; + +static const unsigned int iqs7222_max_cols[] = { + [IQS7222_REG_GRP_STAT] = IQS7222_MAX_COLS_STAT, + [IQS7222_REG_GRP_CYCLE] = IQS7222_MAX_COLS_CYCLE, + [IQS7222_REG_GRP_GLBL] = IQS7222_MAX_COLS_GLBL, + [IQS7222_REG_GRP_BTN] = IQS7222_MAX_COLS_BTN, + [IQS7222_REG_GRP_CHAN] = IQS7222_MAX_COLS_CHAN, + [IQS7222_REG_GRP_FILT] = IQS7222_MAX_COLS_FILT, + [IQS7222_REG_GRP_SLDR] = IQS7222_MAX_COLS_SLDR, + [IQS7222_REG_GRP_GPIO] = IQS7222_MAX_COLS_GPIO, + [IQS7222_REG_GRP_SYS] = IQS7222_MAX_COLS_SYS, +}; + +static const unsigned int iqs7222_gpio_links[] = { 2, 5, 6, }; + +struct iqs7222_event_desc { + const char *name; + u16 mask; + u16 val; + u16 enable; + enum iqs7222_reg_key_id reg_key; +}; + +static const struct iqs7222_event_desc iqs7222_kp_events[] = { + { + .name = "event-prox", + .enable = BIT(0), + .reg_key = IQS7222_REG_KEY_PROX, + }, + { + .name = "event-touch", + .enable = BIT(1), + .reg_key = IQS7222_REG_KEY_TOUCH, + }, +}; + +static const struct iqs7222_event_desc iqs7222_sl_events[] = { + { .name = "event-press", }, + { + .name = "event-tap", + .mask = BIT(0), + .val = BIT(0), + .enable = BIT(0), + .reg_key = IQS7222_REG_KEY_TAP, + }, + { + .name = "event-swipe-pos", + .mask = BIT(5) | BIT(1), + .val = BIT(1), + .enable = BIT(1), + .reg_key = IQS7222_REG_KEY_AXIAL, + }, + { + .name = "event-swipe-neg", + .mask = BIT(5) | BIT(1), + .val = BIT(5) | BIT(1), + .enable = BIT(1), + .reg_key = IQS7222_REG_KEY_AXIAL, + }, + { + .name = "event-flick-pos", + .mask = BIT(5) | BIT(2), + .val = BIT(2), + .enable = BIT(2), + .reg_key = IQS7222_REG_KEY_AXIAL, + }, + { + .name = "event-flick-neg", + .mask = BIT(5) | BIT(2), + .val = BIT(5) | BIT(2), + .enable = BIT(2), + .reg_key = IQS7222_REG_KEY_AXIAL, + }, +}; + +struct iqs7222_reg_grp_desc { + u16 base; + int num_row; + int num_col; +}; + +struct iqs7222_dev_desc { + u16 prod_num; + u16 fw_major; + u16 fw_minor; + u16 sldr_res; + u16 touch_link; + u16 wheel_enable; + int allow_offset; + int event_offset; + int comms_offset; + struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS]; +}; + +static const struct iqs7222_dev_desc iqs7222_devs[] = { + { + .prod_num = IQS7222_PROD_NUM_A, + .fw_major = 1, + .fw_minor = 12, + .sldr_res = U8_MAX * 16, + .touch_link = 1768, + .allow_offset = 9, + .event_offset = 10, + .comms_offset = 12, + .reg_grps = { + [IQS7222_REG_GRP_STAT] = { + .base = IQS7222_SYS_STATUS, + .num_row = 1, + .num_col = 8, + }, + [IQS7222_REG_GRP_CYCLE] = { + .base = 0x8000, + .num_row = 7, + .num_col = 3, + }, + [IQS7222_REG_GRP_GLBL] = { + .base = 0x8700, + .num_row = 1, + .num_col = 3, + }, + [IQS7222_REG_GRP_BTN] = { + .base = 0x9000, + .num_row = 12, + .num_col = 3, + }, + [IQS7222_REG_GRP_CHAN] = { + .base = 0xA000, + .num_row = 12, + .num_col = 6, + }, + [IQS7222_REG_GRP_FILT] = { + .base = 0xAC00, + .num_row = 1, + .num_col = 2, + }, + [IQS7222_REG_GRP_SLDR] = { + .base = 0xB000, + .num_row = 2, + .num_col = 11, + }, + [IQS7222_REG_GRP_GPIO] = { + .base = 0xC000, + .num_row = 1, + .num_col = 3, + }, + [IQS7222_REG_GRP_SYS] = { + .base = IQS7222_SYS_SETUP, + .num_row = 1, + .num_col = 13, + }, + }, + }, + { + .prod_num = IQS7222_PROD_NUM_B, + .fw_major = 1, + .fw_minor = 43, + .event_offset = 10, + .comms_offset = 11, + .reg_grps = { + [IQS7222_REG_GRP_STAT] = { + .base = IQS7222_SYS_STATUS, + .num_row = 1, + .num_col = 6, + }, + [IQS7222_REG_GRP_CYCLE] = { + .base = 0x8000, + .num_row = 10, + .num_col = 2, + }, + [IQS7222_REG_GRP_GLBL] = { + .base = 0x8A00, + .num_row = 1, + .num_col = 3, + }, + [IQS7222_REG_GRP_BTN] = { + .base = 0x9000, + .num_row = 20, + .num_col = 2, + }, + [IQS7222_REG_GRP_CHAN] = { + .base = 0xB000, + .num_row = 20, + .num_col = 4, + }, + [IQS7222_REG_GRP_FILT] = { + .base = 0xC400, + .num_row = 1, + .num_col = 2, + }, + [IQS7222_REG_GRP_SYS] = { + .base = IQS7222_SYS_SETUP, + .num_row = 1, + .num_col = 13, + }, + }, + }, + { + .prod_num = IQS7222_PROD_NUM_B, + .fw_major = 1, + .fw_minor = 27, + .reg_grps = { + [IQS7222_REG_GRP_STAT] = { + .base = IQS7222_SYS_STATUS, + .num_row = 1, + .num_col = 6, + }, + [IQS7222_REG_GRP_CYCLE] = { + .base = 0x8000, + .num_row = 10, + .num_col = 2, + }, + [IQS7222_REG_GRP_GLBL] = { + .base = 0x8A00, + .num_row = 1, + .num_col = 3, + }, + [IQS7222_REG_GRP_BTN] = { + .base = 0x9000, + .num_row = 20, + .num_col = 2, + }, + [IQS7222_REG_GRP_CHAN] = { + .base = 0xB000, + .num_row = 20, + .num_col = 4, + }, + [IQS7222_REG_GRP_FILT] = { + .base = 0xC400, + .num_row = 1, + .num_col = 2, + }, + [IQS7222_REG_GRP_SYS] = { + .base = IQS7222_SYS_SETUP, + .num_row = 1, + .num_col = 10, + }, + }, + }, + { + .prod_num = IQS7222_PROD_NUM_C, + .fw_major = 2, + .fw_minor = 6, + .sldr_res = U16_MAX, + .touch_link = 1686, + .wheel_enable = BIT(3), + .event_offset = 9, + .comms_offset = 10, + .reg_grps = { + [IQS7222_REG_GRP_STAT] = { + .base = IQS7222_SYS_STATUS, + .num_row = 1, + .num_col = 6, + }, + [IQS7222_REG_GRP_CYCLE] = { + .base = 0x8000, + .num_row = 5, + .num_col = 3, + }, + [IQS7222_REG_GRP_GLBL] = { + .base = 0x8500, + .num_row = 1, + .num_col = 3, + }, + [IQS7222_REG_GRP_BTN] = { + .base = 0x9000, + .num_row = 10, + .num_col = 3, + }, + [IQS7222_REG_GRP_CHAN] = { + .base = 0xA000, + .num_row = 10, + .num_col = 6, + }, + [IQS7222_REG_GRP_FILT] = { + .base = 0xAA00, + .num_row = 1, + .num_col = 2, + }, + [IQS7222_REG_GRP_SLDR] = { + .base = 0xB000, + .num_row = 2, + .num_col = 10, + }, + [IQS7222_REG_GRP_GPIO] = { + .base = 0xC000, + .num_row = 3, + .num_col = 3, + }, + [IQS7222_REG_GRP_SYS] = { + .base = IQS7222_SYS_SETUP, + .num_row = 1, + .num_col = 12, + }, + }, + }, + { + .prod_num = IQS7222_PROD_NUM_C, + .fw_major = 1, + .fw_minor = 13, + .sldr_res = U16_MAX, + .touch_link = 1674, + .wheel_enable = BIT(3), + .event_offset = 9, + .comms_offset = 10, + .reg_grps = { + [IQS7222_REG_GRP_STAT] = { + .base = IQS7222_SYS_STATUS, + .num_row = 1, + .num_col = 6, + }, + [IQS7222_REG_GRP_CYCLE] = { + .base = 0x8000, + .num_row = 5, + .num_col = 3, + }, + [IQS7222_REG_GRP_GLBL] = { + .base = 0x8500, + .num_row = 1, + .num_col = 3, + }, + [IQS7222_REG_GRP_BTN] = { + .base = 0x9000, + .num_row = 10, + .num_col = 3, + }, + [IQS7222_REG_GRP_CHAN] = { + .base = 0xA000, + .num_row = 10, + .num_col = 6, + }, + [IQS7222_REG_GRP_FILT] = { + .base = 0xAA00, + .num_row = 1, + .num_col = 2, + }, + [IQS7222_REG_GRP_SLDR] = { + .base = 0xB000, + .num_row = 2, + .num_col = 10, + }, + [IQS7222_REG_GRP_GPIO] = { + .base = 0xC000, + .num_row = 1, + .num_col = 3, + }, + [IQS7222_REG_GRP_SYS] = { + .base = IQS7222_SYS_SETUP, + .num_row = 1, + .num_col = 11, + }, + }, + }, +}; + +struct iqs7222_prop_desc { + const char *name; + enum iqs7222_reg_grp_id reg_grp; + enum iqs7222_reg_key_id reg_key; + int reg_offset; + int reg_shift; + int reg_width; + int val_pitch; + int val_min; + int val_max; + bool invert; + const char *label; +}; + +static const struct iqs7222_prop_desc iqs7222_props[] = { + { + .name = "azoteq,conv-period", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 0, + .reg_shift = 8, + .reg_width = 8, + .label = "conversion period", + }, + { + .name = "azoteq,conv-frac", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 0, + .reg_shift = 0, + .reg_width = 8, + .label = "conversion frequency fractional divider", + }, + { + .name = "azoteq,rx-float-inactive", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 1, + .reg_shift = 6, + .reg_width = 1, + .invert = true, + }, + { + .name = "azoteq,dead-time-enable", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 1, + .reg_shift = 5, + .reg_width = 1, + }, + { + .name = "azoteq,tx-freq-fosc", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 1, + .reg_shift = 4, + .reg_width = 1, + }, + { + .name = "azoteq,vbias-enable", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 1, + .reg_shift = 3, + .reg_width = 1, + }, + { + .name = "azoteq,sense-mode", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 1, + .reg_shift = 0, + .reg_width = 3, + .val_max = 3, + .label = "sensing mode", + }, + { + .name = "azoteq,iref-enable", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 2, + .reg_shift = 10, + .reg_width = 1, + }, + { + .name = "azoteq,iref-level", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 2, + .reg_shift = 4, + .reg_width = 4, + .label = "current reference level", + }, + { + .name = "azoteq,iref-trim", + .reg_grp = IQS7222_REG_GRP_CYCLE, + .reg_offset = 2, + .reg_shift = 0, + .reg_width = 4, + .label = "current reference trim", + }, + { + .name = "azoteq,rf-filt-enable", + .reg_grp = IQS7222_REG_GRP_GLBL, + .reg_offset = 0, + .reg_shift = 15, + .reg_width = 1, + }, + { + .name = "azoteq,max-counts", + .reg_grp = IQS7222_REG_GRP_GLBL, + .reg_offset = 0, + .reg_shift = 13, + .reg_width = 2, + .label = "maximum counts", + }, + { + .name = "azoteq,auto-mode", + .reg_grp = IQS7222_REG_GRP_GLBL, + .reg_offset = 0, + .reg_shift = 2, + .reg_width = 2, + .label = "number of conversions", + }, + { + .name = "azoteq,ati-frac-div-fine", + .reg_grp = IQS7222_REG_GRP_GLBL, + .reg_offset = 1, + .reg_shift = 9, + .reg_width = 5, + .label = "ATI fine fractional divider", + }, + { + .name = "azoteq,ati-frac-div-coarse", + .reg_grp = IQS7222_REG_GRP_GLBL, + .reg_offset = 1, + .reg_shift = 0, + .reg_width = 5, + .label = "ATI coarse fractional divider", + }, + { + .name = "azoteq,ati-comp-select", + .reg_grp = IQS7222_REG_GRP_GLBL, + .reg_offset = 2, + .reg_shift = 0, + .reg_width = 10, + .label = "ATI compensation selection", + }, + { + .name = "azoteq,ati-band", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 0, + .reg_shift = 12, + .reg_width = 2, + .label = "ATI band", + }, + { + .name = "azoteq,global-halt", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 0, + .reg_shift = 11, + .reg_width = 1, + }, + { + .name = "azoteq,invert-enable", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 0, + .reg_shift = 10, + .reg_width = 1, + }, + { + .name = "azoteq,dual-direction", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 0, + .reg_shift = 9, + .reg_width = 1, + }, + { + .name = "azoteq,samp-cap-double", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 0, + .reg_shift = 3, + .reg_width = 1, + }, + { + .name = "azoteq,vref-half", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 0, + .reg_shift = 2, + .reg_width = 1, + }, + { + .name = "azoteq,proj-bias", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 0, + .reg_shift = 0, + .reg_width = 2, + .label = "projected bias current", + }, + { + .name = "azoteq,ati-target", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 1, + .reg_shift = 8, + .reg_width = 8, + .val_pitch = 8, + .label = "ATI target", + }, + { + .name = "azoteq,ati-base", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 1, + .reg_shift = 3, + .reg_width = 5, + .val_pitch = 16, + .label = "ATI base", + }, + { + .name = "azoteq,ati-mode", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 1, + .reg_shift = 0, + .reg_width = 3, + .val_max = 5, + .label = "ATI mode", + }, + { + .name = "azoteq,ati-frac-div-fine", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 2, + .reg_shift = 9, + .reg_width = 5, + .label = "ATI fine fractional divider", + }, + { + .name = "azoteq,ati-frac-mult-coarse", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 2, + .reg_shift = 5, + .reg_width = 4, + .label = "ATI coarse fractional multiplier", + }, + { + .name = "azoteq,ati-frac-div-coarse", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 2, + .reg_shift = 0, + .reg_width = 5, + .label = "ATI coarse fractional divider", + }, + { + .name = "azoteq,ati-comp-div", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 3, + .reg_shift = 11, + .reg_width = 5, + .label = "ATI compensation divider", + }, + { + .name = "azoteq,ati-comp-select", + .reg_grp = IQS7222_REG_GRP_CHAN, + .reg_offset = 3, + .reg_shift = 0, + .reg_width = 10, + .label = "ATI compensation selection", + }, + { + .name = "azoteq,debounce-exit", + .reg_grp = IQS7222_REG_GRP_BTN, + .reg_key = IQS7222_REG_KEY_DEBOUNCE, + .reg_offset = 0, + .reg_shift = 12, + .reg_width = 4, + .label = "debounce exit factor", + }, + { + .name = "azoteq,debounce-enter", + .reg_grp = IQS7222_REG_GRP_BTN, + .reg_key = IQS7222_REG_KEY_DEBOUNCE, + .reg_offset = 0, + .reg_shift = 8, + .reg_width = 4, + .label = "debounce entrance factor", + }, + { + .name = "azoteq,thresh", + .reg_grp = IQS7222_REG_GRP_BTN, + .reg_key = IQS7222_REG_KEY_PROX, + .reg_offset = 0, + .reg_shift = 0, + .reg_width = 8, + .val_max = 127, + .label = "threshold", + }, + { + .name = "azoteq,thresh", + .reg_grp = IQS7222_REG_GRP_BTN, + .reg_key = IQS7222_REG_KEY_TOUCH, + .reg_offset = 1, + .reg_shift = 0, + .reg_width = 8, + .label = "threshold", + }, + { + .name = "azoteq,hyst", + .reg_grp = IQS7222_REG_GRP_BTN, + .reg_key = IQS7222_REG_KEY_TOUCH, + .reg_offset = 1, + .reg_shift = 8, + .reg_width = 8, + .label = "hysteresis", + }, + { + .name = "azoteq,lta-beta-lp", + .reg_grp = IQS7222_REG_GRP_FILT, + .reg_offset = 0, + .reg_shift = 12, + .reg_width = 4, + .label = "low-power mode long-term average beta", + }, + { + .name = "azoteq,lta-beta-np", + .reg_grp = IQS7222_REG_GRP_FILT, + .reg_offset = 0, + .reg_shift = 8, + .reg_width = 4, + .label = "normal-power mode long-term average beta", + }, + { + .name = "azoteq,counts-beta-lp", + .reg_grp = IQS7222_REG_GRP_FILT, + .reg_offset = 0, + .reg_shift = 4, + .reg_width = 4, + .label = "low-power mode counts beta", + }, + { + .name = "azoteq,counts-beta-np", + .reg_grp = IQS7222_REG_GRP_FILT, + .reg_offset = 0, + .reg_shift = 0, + .reg_width = 4, + .label = "normal-power mode counts beta", + }, + { + .name = "azoteq,lta-fast-beta-lp", + .reg_grp = IQS7222_REG_GRP_FILT, + .reg_offset = 1, + .reg_shift = 4, + .reg_width = 4, + .label = "low-power mode long-term average fast beta", + }, + { + .name = "azoteq,lta-fast-beta-np", + .reg_grp = IQS7222_REG_GRP_FILT, + .reg_offset = 1, + .reg_shift = 0, + .reg_width = 4, + .label = "normal-power mode long-term average fast beta", + }, + { + .name = "azoteq,lower-cal", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_offset = 0, + .reg_shift = 8, + .reg_width = 8, + .label = "lower calibration", + }, + { + .name = "azoteq,static-beta", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_key = IQS7222_REG_KEY_NO_WHEEL, + .reg_offset = 0, + .reg_shift = 6, + .reg_width = 1, + }, + { + .name = "azoteq,bottom-beta", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_key = IQS7222_REG_KEY_NO_WHEEL, + .reg_offset = 0, + .reg_shift = 3, + .reg_width = 3, + .label = "bottom beta", + }, + { + .name = "azoteq,static-beta", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_key = IQS7222_REG_KEY_WHEEL, + .reg_offset = 0, + .reg_shift = 7, + .reg_width = 1, + }, + { + .name = "azoteq,bottom-beta", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_key = IQS7222_REG_KEY_WHEEL, + .reg_offset = 0, + .reg_shift = 4, + .reg_width = 3, + .label = "bottom beta", + }, + { + .name = "azoteq,bottom-speed", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_offset = 1, + .reg_shift = 8, + .reg_width = 8, + .label = "bottom speed", + }, + { + .name = "azoteq,upper-cal", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_offset = 1, + .reg_shift = 0, + .reg_width = 8, + .label = "upper calibration", + }, + { + .name = "azoteq,gesture-max-ms", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_key = IQS7222_REG_KEY_TAP, + .reg_offset = 9, + .reg_shift = 8, + .reg_width = 8, + .val_pitch = 4, + .label = "maximum gesture time", + }, + { + .name = "azoteq,gesture-min-ms", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_key = IQS7222_REG_KEY_TAP, + .reg_offset = 9, + .reg_shift = 3, + .reg_width = 5, + .val_pitch = 4, + .label = "minimum gesture time", + }, + { + .name = "azoteq,gesture-dist", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_key = IQS7222_REG_KEY_AXIAL, + .reg_offset = 10, + .reg_shift = 8, + .reg_width = 8, + .val_pitch = 16, + .label = "gesture distance", + }, + { + .name = "azoteq,gesture-max-ms", + .reg_grp = IQS7222_REG_GRP_SLDR, + .reg_key = IQS7222_REG_KEY_AXIAL, + .reg_offset = 10, + .reg_shift = 0, + .reg_width = 8, + .val_pitch = 4, + .label = "maximum gesture time", + }, + { + .name = "drive-open-drain", + .reg_grp = IQS7222_REG_GRP_GPIO, + .reg_offset = 0, + .reg_shift = 1, + .reg_width = 1, + }, + { + .name = "azoteq,timeout-ati-ms", + .reg_grp = IQS7222_REG_GRP_SYS, + .reg_offset = 1, + .reg_shift = 0, + .reg_width = 16, + .val_pitch = 500, + .label = "ATI error timeout", + }, + { + .name = "azoteq,rate-ati-ms", + .reg_grp = IQS7222_REG_GRP_SYS, + .reg_offset = 2, + .reg_shift = 0, + .reg_width = 16, + .label = "ATI report rate", + }, + { + .name = "azoteq,timeout-np-ms", + .reg_grp = IQS7222_REG_GRP_SYS, + .reg_offset = 3, + .reg_shift = 0, + .reg_width = 16, + .label = "normal-power mode timeout", + }, + { + .name = "azoteq,rate-np-ms", + .reg_grp = IQS7222_REG_GRP_SYS, + .reg_offset = 4, + .reg_shift = 0, + .reg_width = 16, + .val_max = 3000, + .label = "normal-power mode report rate", + }, + { + .name = "azoteq,timeout-lp-ms", + .reg_grp = IQS7222_REG_GRP_SYS, + .reg_offset = 5, + .reg_shift = 0, + .reg_width = 16, + .label = "low-power mode timeout", + }, + { + .name = "azoteq,rate-lp-ms", + .reg_grp = IQS7222_REG_GRP_SYS, + .reg_offset = 6, + .reg_shift = 0, + .reg_width = 16, + .val_max = 3000, + .label = "low-power mode report rate", + }, + { + .name = "azoteq,timeout-ulp-ms", + .reg_grp = IQS7222_REG_GRP_SYS, + .reg_offset = 7, + .reg_shift = 0, + .reg_width = 16, + .label = "ultra-low-power mode timeout", + }, + { + .name = "azoteq,rate-ulp-ms", + .reg_grp = IQS7222_REG_GRP_SYS, + .reg_offset = 8, + .reg_shift = 0, + .reg_width = 16, + .val_max = 3000, + .label = "ultra-low-power mode report rate", + }, +}; + +struct iqs7222_private { + const struct iqs7222_dev_desc *dev_desc; + struct gpio_desc *reset_gpio; + struct gpio_desc *irq_gpio; + struct i2c_client *client; + struct input_dev *keypad; + unsigned int kp_type[IQS7222_MAX_CHAN][ARRAY_SIZE(iqs7222_kp_events)]; + unsigned int kp_code[IQS7222_MAX_CHAN][ARRAY_SIZE(iqs7222_kp_events)]; + unsigned int sl_code[IQS7222_MAX_SLDR][ARRAY_SIZE(iqs7222_sl_events)]; + unsigned int sl_axis[IQS7222_MAX_SLDR]; + u16 cycle_setup[IQS7222_MAX_CHAN / 2][IQS7222_MAX_COLS_CYCLE]; + u16 glbl_setup[IQS7222_MAX_COLS_GLBL]; + u16 btn_setup[IQS7222_MAX_CHAN][IQS7222_MAX_COLS_BTN]; + u16 chan_setup[IQS7222_MAX_CHAN][IQS7222_MAX_COLS_CHAN]; + u16 filt_setup[IQS7222_MAX_COLS_FILT]; + u16 sldr_setup[IQS7222_MAX_SLDR][IQS7222_MAX_COLS_SLDR]; + u16 gpio_setup[ARRAY_SIZE(iqs7222_gpio_links)][IQS7222_MAX_COLS_GPIO]; + u16 sys_setup[IQS7222_MAX_COLS_SYS]; +}; + +static u16 *iqs7222_setup(struct iqs7222_private *iqs7222, + enum iqs7222_reg_grp_id reg_grp, int row) +{ + switch (reg_grp) { + case IQS7222_REG_GRP_CYCLE: + return iqs7222->cycle_setup[row]; + + case IQS7222_REG_GRP_GLBL: + return iqs7222->glbl_setup; + + case IQS7222_REG_GRP_BTN: + return iqs7222->btn_setup[row]; + + case IQS7222_REG_GRP_CHAN: + return iqs7222->chan_setup[row]; + + case IQS7222_REG_GRP_FILT: + return iqs7222->filt_setup; + + case IQS7222_REG_GRP_SLDR: + return iqs7222->sldr_setup[row]; + + case IQS7222_REG_GRP_GPIO: + return iqs7222->gpio_setup[row]; + + case IQS7222_REG_GRP_SYS: + return iqs7222->sys_setup; + + default: + return NULL; + } +} + +static int iqs7222_irq_poll(struct iqs7222_private *iqs7222, u16 timeout_ms) +{ + ktime_t irq_timeout = ktime_add_ms(ktime_get(), timeout_ms); + int ret; + + do { + usleep_range(1000, 1100); + + ret = gpiod_get_value_cansleep(iqs7222->irq_gpio); + if (ret < 0) + return ret; + else if (ret > 0) + return 0; + } while (ktime_compare(ktime_get(), irq_timeout) < 0); + + return -EBUSY; +} + +static int iqs7222_hard_reset(struct iqs7222_private *iqs7222) +{ + struct i2c_client *client = iqs7222->client; + int error; + + if (!iqs7222->reset_gpio) + return 0; + + gpiod_set_value_cansleep(iqs7222->reset_gpio, 1); + usleep_range(1000, 1100); + + gpiod_set_value_cansleep(iqs7222->reset_gpio, 0); + + error = iqs7222_irq_poll(iqs7222, IQS7222_RESET_TIMEOUT_MS); + if (error) + dev_err(&client->dev, "Failed to reset device: %d\n", error); + + return error; +} + +static int iqs7222_force_comms(struct iqs7222_private *iqs7222) +{ + u8 msg_buf[] = { 0xFF, 0x00, }; + int ret; + + /* + * The device cannot communicate until it asserts its interrupt (RDY) + * pin. Attempts to do so while RDY is deasserted return an ACK; how- + * ever all write data is ignored, and all read data returns 0xEE. + * + * Unsolicited communication must be preceded by a special force com- + * munication command, after which the device eventually asserts its + * RDY pin and agrees to communicate. + * + * Regardless of whether communication is forced or the result of an + * interrupt, the device automatically deasserts its RDY pin once it + * detects an I2C stop condition, or a timeout expires. + */ + ret = gpiod_get_value_cansleep(iqs7222->irq_gpio); + if (ret < 0) + return ret; + else if (ret > 0) + return 0; + + ret = i2c_master_send(iqs7222->client, msg_buf, sizeof(msg_buf)); + if (ret < (int)sizeof(msg_buf)) { + if (ret >= 0) + ret = -EIO; + + /* + * The datasheet states that the host must wait to retry any + * failed attempt to communicate over I2C. + */ + msleep(IQS7222_COMMS_RETRY_MS); + return ret; + } + + return iqs7222_irq_poll(iqs7222, IQS7222_COMMS_TIMEOUT_MS); +} + +static int iqs7222_read_burst(struct iqs7222_private *iqs7222, + u16 reg, void *val, u16 num_val) +{ + u8 reg_buf[sizeof(__be16)]; + int ret, i; + struct i2c_client *client = iqs7222->client; + struct i2c_msg msg[] = { + { + .addr = client->addr, + .flags = 0, + .len = reg > U8_MAX ? sizeof(reg) : sizeof(u8), + .buf = reg_buf, + }, + { + .addr = client->addr, + .flags = I2C_M_RD, + .len = num_val * sizeof(__le16), + .buf = (u8 *)val, + }, + }; + + if (reg > U8_MAX) + put_unaligned_be16(reg, reg_buf); + else + *reg_buf = (u8)reg; + + /* + * The following loop protects against an edge case in which the RDY + * pin is automatically deasserted just as the read is initiated. In + * that case, the read must be retried using forced communication. + */ + for (i = 0; i < IQS7222_NUM_RETRIES; i++) { + ret = iqs7222_force_comms(iqs7222); + if (ret < 0) + continue; + + ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg)); + if (ret < (int)ARRAY_SIZE(msg)) { + if (ret >= 0) + ret = -EIO; + + msleep(IQS7222_COMMS_RETRY_MS); + continue; + } + + if (get_unaligned_le16(msg[1].buf) == IQS7222_COMMS_ERROR) { + ret = -ENODATA; + continue; + } + + ret = 0; + break; + } + + /* + * The following delay ensures the device has deasserted the RDY pin + * following the I2C stop condition. + */ + usleep_range(50, 100); + + if (ret < 0) + dev_err(&client->dev, + "Failed to read from address 0x%04X: %d\n", reg, ret); + + return ret; +} + +static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val) +{ + __le16 val_buf; + int error; + + error = iqs7222_read_burst(iqs7222, reg, &val_buf, 1); + if (error) + return error; + + *val = le16_to_cpu(val_buf); + + return 0; +} + +static int iqs7222_write_burst(struct iqs7222_private *iqs7222, + u16 reg, const void *val, u16 num_val) +{ + int reg_len = reg > U8_MAX ? sizeof(reg) : sizeof(u8); + int val_len = num_val * sizeof(__le16); + int msg_len = reg_len + val_len; + int ret, i; + struct i2c_client *client = iqs7222->client; + u8 *msg_buf; + + msg_buf = kzalloc(msg_len, GFP_KERNEL); + if (!msg_buf) + return -ENOMEM; + + if (reg > U8_MAX) + put_unaligned_be16(reg, msg_buf); + else + *msg_buf = (u8)reg; + + memcpy(msg_buf + reg_len, val, val_len); + + /* + * The following loop protects against an edge case in which the RDY + * pin is automatically asserted just before the force communication + * command is sent. + * + * In that case, the subsequent I2C stop condition tricks the device + * into preemptively deasserting the RDY pin and the command must be + * sent again. + */ + for (i = 0; i < IQS7222_NUM_RETRIES; i++) { + ret = iqs7222_force_comms(iqs7222); + if (ret < 0) + continue; + + ret = i2c_master_send(client, msg_buf, msg_len); + if (ret < msg_len) { + if (ret >= 0) + ret = -EIO; + + msleep(IQS7222_COMMS_RETRY_MS); + continue; + } + + ret = 0; + break; + } + + kfree(msg_buf); + + usleep_range(50, 100); + + if (ret < 0) + dev_err(&client->dev, + "Failed to write to address 0x%04X: %d\n", reg, ret); + + return ret; +} + +static int iqs7222_write_word(struct iqs7222_private *iqs7222, u16 reg, u16 val) +{ + __le16 val_buf = cpu_to_le16(val); + + return iqs7222_write_burst(iqs7222, reg, &val_buf, 1); +} + +static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222) +{ + struct i2c_client *client = iqs7222->client; + ktime_t ati_timeout; + u16 sys_status = 0; + u16 sys_setup = iqs7222->sys_setup[0] & ~IQS7222_SYS_SETUP_ACK_RESET; + int error, i; + + for (i = 0; i < IQS7222_NUM_RETRIES; i++) { + /* + * Trigger ATI from streaming and normal-power modes so that + * the RDY pin continues to be asserted during ATI. + */ + error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP, + sys_setup | + IQS7222_SYS_SETUP_REDO_ATI); + if (error) + return error; + + ati_timeout = ktime_add_ms(ktime_get(), IQS7222_ATI_TIMEOUT_MS); + + do { + error = iqs7222_irq_poll(iqs7222, + IQS7222_COMMS_TIMEOUT_MS); + if (error) + continue; + + error = iqs7222_read_word(iqs7222, IQS7222_SYS_STATUS, + &sys_status); + if (error) + return error; + + if (sys_status & IQS7222_SYS_STATUS_ATI_ACTIVE) + continue; + + if (sys_status & IQS7222_SYS_STATUS_ATI_ERROR) + break; + + /* + * Use stream-in-touch mode if either slider reports + * absolute position. + */ + sys_setup |= test_bit(EV_ABS, iqs7222->keypad->evbit) + ? IQS7222_SYS_SETUP_INTF_MODE_TOUCH + : IQS7222_SYS_SETUP_INTF_MODE_EVENT; + sys_setup |= IQS7222_SYS_SETUP_PWR_MODE_AUTO; + + return iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP, + sys_setup); + } while (ktime_compare(ktime_get(), ati_timeout) < 0); + + dev_err(&client->dev, + "ATI attempt %d of %d failed with status 0x%02X, %s\n", + i + 1, IQS7222_NUM_RETRIES, (u8)sys_status, + i < IQS7222_NUM_RETRIES ? "retrying..." : "stopping"); + } + + return -ETIMEDOUT; +} + +static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir) +{ + const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; + int comms_offset = dev_desc->comms_offset; + int error, i, j, k; + + /* + * Take advantage of the stop-bit disable function, if available, to + * save the trouble of having to reopen a communication window after + * each burst read or write. + */ + if (comms_offset) { + u16 comms_setup; + + error = iqs7222_read_word(iqs7222, + IQS7222_SYS_SETUP + comms_offset, + &comms_setup); + if (error) + return error; + + error = iqs7222_write_word(iqs7222, + IQS7222_SYS_SETUP + comms_offset, + comms_setup | IQS7222_COMMS_HOLD); + if (error) + return error; + } + + for (i = 0; i < IQS7222_NUM_REG_GRPS; i++) { + int num_row = dev_desc->reg_grps[i].num_row; + int num_col = dev_desc->reg_grps[i].num_col; + u16 reg = dev_desc->reg_grps[i].base; + __le16 *val_buf; + u16 *val; + + if (!num_col) + continue; + + val = iqs7222_setup(iqs7222, i, 0); + if (!val) + continue; + + val_buf = kcalloc(num_col, sizeof(__le16), GFP_KERNEL); + if (!val_buf) + return -ENOMEM; + + for (j = 0; j < num_row; j++) { + switch (dir) { + case READ: + error = iqs7222_read_burst(iqs7222, reg, + val_buf, num_col); + for (k = 0; k < num_col; k++) + val[k] = le16_to_cpu(val_buf[k]); + break; + + case WRITE: + for (k = 0; k < num_col; k++) + val_buf[k] = cpu_to_le16(val[k]); + error = iqs7222_write_burst(iqs7222, reg, + val_buf, num_col); + break; + + default: + error = -EINVAL; + } + + if (error) + break; + + reg += IQS7222_REG_OFFSET; + val += iqs7222_max_cols[i]; + } + + kfree(val_buf); + + if (error) + return error; + } + + if (comms_offset) { + u16 comms_setup; + + error = iqs7222_read_word(iqs7222, + IQS7222_SYS_SETUP + comms_offset, + &comms_setup); + if (error) + return error; + + error = iqs7222_write_word(iqs7222, + IQS7222_SYS_SETUP + comms_offset, + comms_setup & ~IQS7222_COMMS_HOLD); + if (error) + return error; + } + + if (dir == READ) + return 0; + + return iqs7222_ati_trigger(iqs7222); +} + +static int iqs7222_dev_info(struct iqs7222_private *iqs7222) +{ + struct i2c_client *client = iqs7222->client; + bool prod_num_valid = false; + __le16 dev_id[3]; + int error, i; + + error = iqs7222_read_burst(iqs7222, IQS7222_PROD_NUM, dev_id, + ARRAY_SIZE(dev_id)); + if (error) + return error; + + for (i = 0; i < ARRAY_SIZE(iqs7222_devs); i++) { + if (le16_to_cpu(dev_id[0]) != iqs7222_devs[i].prod_num) + continue; + + prod_num_valid = true; + + if (le16_to_cpu(dev_id[1]) < iqs7222_devs[i].fw_major) + continue; + + if (le16_to_cpu(dev_id[2]) < iqs7222_devs[i].fw_minor) + continue; + + iqs7222->dev_desc = &iqs7222_devs[i]; + return 0; + } + + if (prod_num_valid) + dev_err(&client->dev, "Unsupported firmware revision: %u.%u\n", + le16_to_cpu(dev_id[1]), le16_to_cpu(dev_id[2])); + else + dev_err(&client->dev, "Unrecognized product number: %u\n", + le16_to_cpu(dev_id[0])); + + return -EINVAL; +} + +static int iqs7222_gpio_select(struct iqs7222_private *iqs7222, + struct fwnode_handle *child_node, + int child_enable, u16 child_link) +{ + const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; + struct i2c_client *client = iqs7222->client; + int num_gpio = dev_desc->reg_grps[IQS7222_REG_GRP_GPIO].num_row; + int error, count, i; + unsigned int gpio_sel[ARRAY_SIZE(iqs7222_gpio_links)]; + + if (!num_gpio) + return 0; + + if (!fwnode_property_present(child_node, "azoteq,gpio-select")) + return 0; + + count = fwnode_property_count_u32(child_node, "azoteq,gpio-select"); + if (count > num_gpio) { + dev_err(&client->dev, "Invalid number of %s GPIOs\n", + fwnode_get_name(child_node)); + return -EINVAL; + } else if (count < 0) { + dev_err(&client->dev, "Failed to count %s GPIOs: %d\n", + fwnode_get_name(child_node), count); + return count; + } + + error = fwnode_property_read_u32_array(child_node, + "azoteq,gpio-select", + gpio_sel, count); + if (error) { + dev_err(&client->dev, "Failed to read %s GPIOs: %d\n", + fwnode_get_name(child_node), error); + return error; + } + + for (i = 0; i < count; i++) { + u16 *gpio_setup; + + if (gpio_sel[i] >= num_gpio) { + dev_err(&client->dev, "Invalid %s GPIO: %u\n", + fwnode_get_name(child_node), gpio_sel[i]); + return -EINVAL; + } + + gpio_setup = iqs7222->gpio_setup[gpio_sel[i]]; + + if (gpio_setup[2] && child_link != gpio_setup[2]) { + dev_err(&client->dev, + "Conflicting GPIO %u event types\n", + gpio_sel[i]); + return -EINVAL; + } + + gpio_setup[0] |= IQS7222_GPIO_SETUP_0_GPIO_EN; + gpio_setup[1] |= child_enable; + gpio_setup[2] = child_link; + } + + return 0; +} + +static int iqs7222_parse_props(struct iqs7222_private *iqs7222, + struct fwnode_handle **child_node, + int child_index, + enum iqs7222_reg_grp_id reg_grp, + enum iqs7222_reg_key_id reg_key) +{ + u16 *setup = iqs7222_setup(iqs7222, reg_grp, child_index); + struct i2c_client *client = iqs7222->client; + struct fwnode_handle *reg_grp_node; + char reg_grp_name[16]; + int i; + + switch (reg_grp) { + case IQS7222_REG_GRP_CYCLE: + case IQS7222_REG_GRP_CHAN: + case IQS7222_REG_GRP_SLDR: + case IQS7222_REG_GRP_GPIO: + case IQS7222_REG_GRP_BTN: + /* + * These groups derive a child node and return it to the caller + * for additional group-specific processing. In some cases, the + * child node may have already been derived. + */ + reg_grp_node = *child_node; + if (reg_grp_node) + break; + + snprintf(reg_grp_name, sizeof(reg_grp_name), "%s-%d", + iqs7222_reg_grp_names[reg_grp], child_index); + + reg_grp_node = device_get_named_child_node(&client->dev, + reg_grp_name); + if (!reg_grp_node) + return 0; + + *child_node = reg_grp_node; + break; + + case IQS7222_REG_GRP_GLBL: + case IQS7222_REG_GRP_FILT: + case IQS7222_REG_GRP_SYS: + /* + * These groups are not organized beneath a child node, nor are + * they subject to any additional processing by the caller. + */ + reg_grp_node = dev_fwnode(&client->dev); + break; + + default: + return -EINVAL; + } + + for (i = 0; i < ARRAY_SIZE(iqs7222_props); i++) { + const char *name = iqs7222_props[i].name; + int reg_offset = iqs7222_props[i].reg_offset; + int reg_shift = iqs7222_props[i].reg_shift; + int reg_width = iqs7222_props[i].reg_width; + int val_pitch = iqs7222_props[i].val_pitch ? : 1; + int val_min = iqs7222_props[i].val_min; + int val_max = iqs7222_props[i].val_max; + bool invert = iqs7222_props[i].invert; + const char *label = iqs7222_props[i].label ? : name; + unsigned int val; + int error; + + if (iqs7222_props[i].reg_grp != reg_grp || + iqs7222_props[i].reg_key != reg_key) + continue; + + /* + * Boolean register fields are one bit wide; they are forcibly + * reset to provide a means to undo changes by a bootloader if + * necessary. + * + * Scalar fields, on the other hand, are left untouched unless + * their corresponding properties are present. + */ + if (reg_width == 1) { + if (invert) + setup[reg_offset] |= BIT(reg_shift); + else + setup[reg_offset] &= ~BIT(reg_shift); + } + + if (!fwnode_property_present(reg_grp_node, name)) + continue; + + if (reg_width == 1) { + if (invert) + setup[reg_offset] &= ~BIT(reg_shift); + else + setup[reg_offset] |= BIT(reg_shift); + + continue; + } + + error = fwnode_property_read_u32(reg_grp_node, name, &val); + if (error) { + dev_err(&client->dev, "Failed to read %s %s: %d\n", + fwnode_get_name(reg_grp_node), label, error); + return error; + } + + if (!val_max) + val_max = GENMASK(reg_width - 1, 0) * val_pitch; + + if (val < val_min || val > val_max) { + dev_err(&client->dev, "Invalid %s %s: %u\n", + fwnode_get_name(reg_grp_node), label, val); + return -EINVAL; + } + + setup[reg_offset] &= ~GENMASK(reg_shift + reg_width - 1, + reg_shift); + setup[reg_offset] |= (val / val_pitch << reg_shift); + } + + return 0; +} + +static int iqs7222_parse_cycle(struct iqs7222_private *iqs7222, int cycle_index) +{ + u16 *cycle_setup = iqs7222->cycle_setup[cycle_index]; + struct i2c_client *client = iqs7222->client; + struct fwnode_handle *cycle_node = NULL; + unsigned int pins[9]; + int error, count, i; + + /* + * Each channel shares a cycle with one other channel; the mapping of + * channels to cycles is fixed. Properties defined for a cycle impact + * both channels tied to the cycle. + */ + error = iqs7222_parse_props(iqs7222, &cycle_node, cycle_index, + IQS7222_REG_GRP_CYCLE, + IQS7222_REG_KEY_NONE); + if (error) + return error; + + if (!cycle_node) + return 0; + + /* + * Unlike channels which are restricted to a select range of CRx pins + * based on channel number, any cycle can claim any of the device's 9 + * CTx pins (CTx0-8). + */ + if (!fwnode_property_present(cycle_node, "azoteq,tx-enable")) + return 0; + + count = fwnode_property_count_u32(cycle_node, "azoteq,tx-enable"); + if (count < 0) { + dev_err(&client->dev, "Failed to count %s CTx pins: %d\n", + fwnode_get_name(cycle_node), count); + return count; + } else if (count > ARRAY_SIZE(pins)) { + dev_err(&client->dev, "Invalid number of %s CTx pins\n", + fwnode_get_name(cycle_node)); + return -EINVAL; + } + + error = fwnode_property_read_u32_array(cycle_node, "azoteq,tx-enable", + pins, count); + if (error) { + dev_err(&client->dev, "Failed to read %s CTx pins: %d\n", + fwnode_get_name(cycle_node), error); + return error; + } + + cycle_setup[1] &= ~GENMASK(7 + ARRAY_SIZE(pins) - 1, 7); + + for (i = 0; i < count; i++) { + if (pins[i] > 8) { + dev_err(&client->dev, "Invalid %s CTx pin: %u\n", + fwnode_get_name(cycle_node), pins[i]); + return -EINVAL; + } + + cycle_setup[1] |= BIT(pins[i] + 7); + } + + return 0; +} + +static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, int chan_index) +{ + const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; + struct i2c_client *client = iqs7222->client; + struct fwnode_handle *chan_node = NULL; + int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; + int ext_chan = rounddown(num_chan, 10); + int error, i; + u16 *chan_setup = iqs7222->chan_setup[chan_index]; + u16 *sys_setup = iqs7222->sys_setup; + unsigned int val; + + error = iqs7222_parse_props(iqs7222, &chan_node, chan_index, + IQS7222_REG_GRP_CHAN, + IQS7222_REG_KEY_NONE); + if (error) + return error; + + if (!chan_node) + return 0; + + if (dev_desc->allow_offset) { + sys_setup[dev_desc->allow_offset] |= BIT(chan_index); + if (fwnode_property_present(chan_node, "azoteq,ulp-allow")) + sys_setup[dev_desc->allow_offset] &= ~BIT(chan_index); + } + + chan_setup[0] |= IQS7222_CHAN_SETUP_0_CHAN_EN; + + /* + * The reference channel function allows for differential measurements + * and is only available in the case of IQS7222A or IQS7222C. + */ + if (dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_col > 4 && + fwnode_property_present(chan_node, "azoteq,ref-select")) { + u16 *ref_setup; + + error = fwnode_property_read_u32(chan_node, "azoteq,ref-select", + &val); + if (error) { + dev_err(&client->dev, + "Failed to read %s reference channel: %d\n", + fwnode_get_name(chan_node), error); + return error; + } + + if (val >= ext_chan) { + dev_err(&client->dev, + "Invalid %s reference channel: %u\n", + fwnode_get_name(chan_node), val); + return -EINVAL; + } + + ref_setup = iqs7222->chan_setup[val]; + + /* + * Configure the current channel as a follower of the selected + * reference channel. + */ + chan_setup[0] |= IQS7222_CHAN_SETUP_0_REF_MODE_FOLLOW; + chan_setup[4] = val * 42 + 1048; + + if (!fwnode_property_read_u32(chan_node, "azoteq,ref-weight", + &val)) { + if (val > U16_MAX) { + dev_err(&client->dev, + "Invalid %s reference weight: %u\n", + fwnode_get_name(chan_node), val); + return -EINVAL; + } + + chan_setup[5] = val; + } + + /* + * Configure the selected channel as a reference channel which + * serves the current channel. + */ + ref_setup[0] |= IQS7222_CHAN_SETUP_0_REF_MODE_REF; + ref_setup[5] |= BIT(chan_index); + + ref_setup[4] = dev_desc->touch_link; + if (fwnode_property_present(chan_node, "azoteq,use-prox")) + ref_setup[4] -= 2; + } + + if (fwnode_property_present(chan_node, "azoteq,rx-enable")) { + /* + * Each channel can claim up to 4 CRx pins. The first half of + * the channels can use CRx0-3, while the second half can use + * CRx4-7. + */ + unsigned int pins[4]; + int count; + + count = fwnode_property_count_u32(chan_node, + "azoteq,rx-enable"); + if (count < 0) { + dev_err(&client->dev, + "Failed to count %s CRx pins: %d\n", + fwnode_get_name(chan_node), count); + return count; + } else if (count > ARRAY_SIZE(pins)) { + dev_err(&client->dev, + "Invalid number of %s CRx pins\n", + fwnode_get_name(chan_node)); + return -EINVAL; + } + + error = fwnode_property_read_u32_array(chan_node, + "azoteq,rx-enable", + pins, count); + if (error) { + dev_err(&client->dev, + "Failed to read %s CRx pins: %d\n", + fwnode_get_name(chan_node), error); + return error; + } + + chan_setup[0] &= ~GENMASK(4 + ARRAY_SIZE(pins) - 1, 4); + + for (i = 0; i < count; i++) { + int min_crx = chan_index < ext_chan / 2 ? 0 : 4; + + if (pins[i] < min_crx || pins[i] > min_crx + 3) { + dev_err(&client->dev, + "Invalid %s CRx pin: %u\n", + fwnode_get_name(chan_node), pins[i]); + return -EINVAL; + } + + chan_setup[0] |= BIT(pins[i] + 4 - min_crx); + } + } + + for (i = 0; i < ARRAY_SIZE(iqs7222_kp_events); i++) { + const char *event_name = iqs7222_kp_events[i].name; + u16 event_enable = iqs7222_kp_events[i].enable; + struct fwnode_handle *event_node; + + event_node = fwnode_get_named_child_node(chan_node, event_name); + if (!event_node) + continue; + + error = iqs7222_parse_props(iqs7222, &event_node, chan_index, + IQS7222_REG_GRP_BTN, + iqs7222_kp_events[i].reg_key); + if (error) + return error; + + error = iqs7222_gpio_select(iqs7222, event_node, + BIT(chan_index), + dev_desc->touch_link - (i ? 0 : 2)); + if (error) + return error; + + if (!fwnode_property_read_u32(event_node, + "azoteq,timeout-press-ms", + &val)) { + /* + * The IQS7222B employs a global pair of press timeout + * registers as opposed to channel-specific registers. + */ + u16 *setup = dev_desc->reg_grps + [IQS7222_REG_GRP_BTN].num_col > 2 ? + &iqs7222->btn_setup[chan_index][2] : + &sys_setup[9]; + + if (val > U8_MAX * 500) { + dev_err(&client->dev, + "Invalid %s press timeout: %u\n", + fwnode_get_name(chan_node), val); + return -EINVAL; + } + + *setup &= ~(U8_MAX << i * 8); + *setup |= (val / 500 << i * 8); + } + + error = fwnode_property_read_u32(event_node, "linux,code", + &val); + if (error) { + dev_err(&client->dev, "Failed to read %s code: %d\n", + fwnode_get_name(chan_node), error); + return error; + } + + iqs7222->kp_code[chan_index][i] = val; + iqs7222->kp_type[chan_index][i] = EV_KEY; + + if (fwnode_property_present(event_node, "linux,input-type")) { + error = fwnode_property_read_u32(event_node, + "linux,input-type", + &val); + if (error) { + dev_err(&client->dev, + "Failed to read %s input type: %d\n", + fwnode_get_name(chan_node), error); + return error; + } + + if (val != EV_KEY && val != EV_SW) { + dev_err(&client->dev, + "Invalid %s input type: %u\n", + fwnode_get_name(chan_node), val); + return -EINVAL; + } + + iqs7222->kp_type[chan_index][i] = val; + } + + /* + * Reference channels can opt out of event reporting by using + * KEY_RESERVED in place of a true key or switch code. + */ + if (iqs7222->kp_type[chan_index][i] == EV_KEY && + iqs7222->kp_code[chan_index][i] == KEY_RESERVED) + continue; + + input_set_capability(iqs7222->keypad, + iqs7222->kp_type[chan_index][i], + iqs7222->kp_code[chan_index][i]); + + if (!dev_desc->event_offset) + continue; + + sys_setup[dev_desc->event_offset] |= event_enable; + } + + /* + * The following call handles a special pair of properties that apply + * to a channel node, but reside within the button (event) group. + */ + return iqs7222_parse_props(iqs7222, &chan_node, chan_index, + IQS7222_REG_GRP_BTN, + IQS7222_REG_KEY_DEBOUNCE); +} + +static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, int sldr_index) +{ + const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; + struct i2c_client *client = iqs7222->client; + struct fwnode_handle *sldr_node = NULL; + int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; + int ext_chan = rounddown(num_chan, 10); + int count, error, reg_offset, i; + u16 *sldr_setup = iqs7222->sldr_setup[sldr_index]; + u16 *sys_setup = iqs7222->sys_setup; + unsigned int chan_sel[4], val; + + error = iqs7222_parse_props(iqs7222, &sldr_node, sldr_index, + IQS7222_REG_GRP_SLDR, + IQS7222_REG_KEY_NONE); + if (error) + return error; + + if (!sldr_node) + return 0; + + /* + * Each slider can be spread across 3 to 4 channels. It is possible to + * select only 2 channels, but doing so prevents the slider from using + * the specified resolution. + */ + count = fwnode_property_count_u32(sldr_node, "azoteq,channel-select"); + if (count < 0) { + dev_err(&client->dev, "Failed to count %s channels: %d\n", + fwnode_get_name(sldr_node), count); + return count; + } else if (count < 3 || count > ARRAY_SIZE(chan_sel)) { + dev_err(&client->dev, "Invalid number of %s channels\n", + fwnode_get_name(sldr_node)); + return -EINVAL; + } + + error = fwnode_property_read_u32_array(sldr_node, + "azoteq,channel-select", + chan_sel, count); + if (error) { + dev_err(&client->dev, "Failed to read %s channels: %d\n", + fwnode_get_name(sldr_node), error); + return error; + } + + /* + * Resolution and top speed, if small enough, are packed into a single + * register. Otherwise, each occupies its own register and the rest of + * the slider-related register addresses are offset by one. + */ + reg_offset = dev_desc->sldr_res < U16_MAX ? 0 : 1; + + sldr_setup[0] |= count; + sldr_setup[3 + reg_offset] &= ~IQS7222_SLDR_SETUP_3_CHAN_SEL_MASK; + + for (i = 0; i < ARRAY_SIZE(chan_sel); i++) { + sldr_setup[5 + reg_offset + i] = 0; + if (i >= count) + continue; + + if (chan_sel[i] >= ext_chan) { + dev_err(&client->dev, "Invalid %s channel: %u\n", + fwnode_get_name(sldr_node), chan_sel[i]); + return -EINVAL; + } + + /* + * The following fields indicate which channels participate in + * the slider, as well as each channel's relative placement. + */ + sldr_setup[3 + reg_offset] |= BIT(chan_sel[i]); + sldr_setup[5 + reg_offset + i] = chan_sel[i] * 42 + 1080; + } + + sldr_setup[4 + reg_offset] = dev_desc->touch_link; + if (fwnode_property_present(sldr_node, "azoteq,use-prox")) + sldr_setup[4 + reg_offset] -= 2; + + if (!fwnode_property_read_u32(sldr_node, "azoteq,slider-size", &val)) { + if (!val || val > dev_desc->sldr_res) { + dev_err(&client->dev, "Invalid %s size: %u\n", + fwnode_get_name(sldr_node), val); + return -EINVAL; + } + + if (reg_offset) { + sldr_setup[3] = val; + } else { + sldr_setup[2] &= ~IQS7222_SLDR_SETUP_2_RES_MASK; + sldr_setup[2] |= (val / 16 << + IQS7222_SLDR_SETUP_2_RES_SHIFT); + } + } + + if (!fwnode_property_read_u32(sldr_node, "azoteq,top-speed", &val)) { + if (val > (reg_offset ? U16_MAX : U8_MAX * 4)) { + dev_err(&client->dev, "Invalid %s top speed: %u\n", + fwnode_get_name(sldr_node), val); + return -EINVAL; + } + + if (reg_offset) { + sldr_setup[2] = val; + } else { + sldr_setup[2] &= ~IQS7222_SLDR_SETUP_2_TOP_SPEED_MASK; + sldr_setup[2] |= (val / 4); + } + } + + if (!fwnode_property_read_u32(sldr_node, "linux,axis", &val)) { + u16 sldr_max = sldr_setup[3] - 1; + + if (!reg_offset) { + sldr_max = sldr_setup[2]; + + sldr_max &= IQS7222_SLDR_SETUP_2_RES_MASK; + sldr_max >>= IQS7222_SLDR_SETUP_2_RES_SHIFT; + + sldr_max = sldr_max * 16 - 1; + } + + input_set_abs_params(iqs7222->keypad, val, 0, sldr_max, 0, 0); + iqs7222->sl_axis[sldr_index] = val; + } + + if (dev_desc->wheel_enable) { + sldr_setup[0] &= ~dev_desc->wheel_enable; + if (iqs7222->sl_axis[sldr_index] == ABS_WHEEL) + sldr_setup[0] |= dev_desc->wheel_enable; + } + + for (i = 0; i < ARRAY_SIZE(iqs7222_sl_events); i++) { + const char *event_name = iqs7222_sl_events[i].name; + struct fwnode_handle *event_node; + + /* + * The absence of a register offset means the remaining fields + * in the group represent gesture settings. + */ + if (iqs7222_sl_events[i].enable && !reg_offset) + sldr_setup[9] &= ~iqs7222_sl_events[i].enable; + + event_node = fwnode_get_named_child_node(sldr_node, event_name); + if (!event_node) + continue; + + error = iqs7222_parse_props(iqs7222, &event_node, sldr_index, + IQS7222_REG_GRP_SLDR, + reg_offset ? + IQS7222_REG_KEY_RESERVED : + iqs7222_sl_events[i].reg_key); + if (error) + return error; + + error = fwnode_property_read_u32(event_node, "linux,code", + &val); + if (error) { + dev_err(&client->dev, "Failed to read %s code: %d\n", + fwnode_get_name(sldr_node), error); + return error; + } + + iqs7222->sl_code[sldr_index][i] = val; + input_set_capability(iqs7222->keypad, EV_KEY, val); + + /* + * The press/release event is determined based on whether the + * coordinate field reports 0xFFFF and has no explicit enable + * control. + */ + if (!iqs7222_sl_events[i].enable || reg_offset) + continue; + + sldr_setup[9] |= iqs7222_sl_events[i].enable; + + error = iqs7222_gpio_select(iqs7222, event_node, + iqs7222_sl_events[i].enable, + 1568 + sldr_index * 30); + if (error) + return error; + + if (!dev_desc->event_offset) + continue; + + sys_setup[dev_desc->event_offset] |= BIT(10 + sldr_index); + } + + /* + * The following call handles a special pair of properties that shift + * to make room for a wheel enable control in the case of IQS7222C. + */ + return iqs7222_parse_props(iqs7222, &sldr_node, sldr_index, + IQS7222_REG_GRP_SLDR, + dev_desc->wheel_enable ? + IQS7222_REG_KEY_WHEEL : + IQS7222_REG_KEY_NO_WHEEL); +} + +static int iqs7222_parse_all(struct iqs7222_private *iqs7222) +{ + const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; + const struct iqs7222_reg_grp_desc *reg_grps = dev_desc->reg_grps; + u16 *sys_setup = iqs7222->sys_setup; + int error, i; + + if (dev_desc->event_offset) + sys_setup[dev_desc->event_offset] = IQS7222_EVENT_MASK_ATI; + + for (i = 0; i < reg_grps[IQS7222_REG_GRP_CYCLE].num_row; i++) { + error = iqs7222_parse_cycle(iqs7222, i); + if (error) + return error; + } + + error = iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_GLBL, + IQS7222_REG_KEY_NONE); + if (error) + return error; + + for (i = 0; i < reg_grps[IQS7222_REG_GRP_GPIO].num_row; i++) { + struct fwnode_handle *gpio_node = NULL; + u16 *gpio_setup = iqs7222->gpio_setup[i]; + int j; + + gpio_setup[0] &= ~IQS7222_GPIO_SETUP_0_GPIO_EN; + gpio_setup[1] = 0; + gpio_setup[2] = 0; + + error = iqs7222_parse_props(iqs7222, &gpio_node, i, + IQS7222_REG_GRP_GPIO, + IQS7222_REG_KEY_NONE); + if (error) + return error; + + if (reg_grps[IQS7222_REG_GRP_GPIO].num_row == 1) + continue; + + /* + * The IQS7222C exposes multiple GPIO and must be informed + * as to which GPIO this group represents. + */ + for (j = 0; j < ARRAY_SIZE(iqs7222_gpio_links); j++) + gpio_setup[0] &= ~BIT(iqs7222_gpio_links[j]); + + gpio_setup[0] |= BIT(iqs7222_gpio_links[i]); + } + + for (i = 0; i < reg_grps[IQS7222_REG_GRP_CHAN].num_row; i++) { + u16 *chan_setup = iqs7222->chan_setup[i]; + + chan_setup[0] &= ~IQS7222_CHAN_SETUP_0_REF_MODE_MASK; + chan_setup[0] &= ~IQS7222_CHAN_SETUP_0_CHAN_EN; + + chan_setup[5] = 0; + } + + for (i = 0; i < reg_grps[IQS7222_REG_GRP_CHAN].num_row; i++) { + error = iqs7222_parse_chan(iqs7222, i); + if (error) + return error; + } + + error = iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_FILT, + IQS7222_REG_KEY_NONE); + if (error) + return error; + + for (i = 0; i < reg_grps[IQS7222_REG_GRP_SLDR].num_row; i++) { + u16 *sldr_setup = iqs7222->sldr_setup[i]; + + sldr_setup[0] &= ~IQS7222_SLDR_SETUP_0_CHAN_CNT_MASK; + + error = iqs7222_parse_sldr(iqs7222, i); + if (error) + return error; + } + + sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK; + sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK; + + sys_setup[0] |= IQS7222_SYS_SETUP_ACK_RESET; + + return iqs7222_parse_props(iqs7222, NULL, 0, IQS7222_REG_GRP_SYS, + IQS7222_REG_KEY_NONE); +} + +static int iqs7222_report(struct iqs7222_private *iqs7222) +{ + const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; + struct i2c_client *client = iqs7222->client; + int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; + int num_stat = dev_desc->reg_grps[IQS7222_REG_GRP_STAT].num_col; + int error, i, j; + __le16 status[IQS7222_MAX_COLS_STAT]; + + error = iqs7222_read_burst(iqs7222, IQS7222_SYS_STATUS, status, + num_stat); + if (error) + return error; + + if (le16_to_cpu(status[0]) & IQS7222_SYS_STATUS_RESET) { + dev_err(&client->dev, "Unexpected device reset\n"); + return iqs7222_dev_init(iqs7222, WRITE); + } + + if (le16_to_cpu(status[0]) & IQS7222_SYS_STATUS_ATI_ERROR) { + dev_err(&client->dev, "Unexpected ATI error\n"); + return iqs7222_ati_trigger(iqs7222); + } + + if (le16_to_cpu(status[0]) & IQS7222_SYS_STATUS_ATI_ACTIVE) + return 0; + + for (i = 0; i < num_chan; i++) { + u16 *chan_setup = iqs7222->chan_setup[i]; + + if (!(chan_setup[0] & IQS7222_CHAN_SETUP_0_CHAN_EN)) + continue; + + for (j = 0; j < ARRAY_SIZE(iqs7222_kp_events); j++) { + /* + * Proximity state begins at offset 2 and spills into + * offset 3 for devices with more than 16 channels. + * + * Touch state begins at the first offset immediately + * following proximity state. + */ + int k = 2 + j * (num_chan > 16 ? 2 : 1); + u16 state = le16_to_cpu(status[k + i / 16]); + + input_event(iqs7222->keypad, + iqs7222->kp_type[i][j], + iqs7222->kp_code[i][j], + !!(state & BIT(i % 16))); + } + } + + for (i = 0; i < dev_desc->reg_grps[IQS7222_REG_GRP_SLDR].num_row; i++) { + u16 *sldr_setup = iqs7222->sldr_setup[i]; + u16 sldr_pos = le16_to_cpu(status[4 + i]); + u16 state = le16_to_cpu(status[6 + i]); + + if (!(sldr_setup[0] & IQS7222_SLDR_SETUP_0_CHAN_CNT_MASK)) + continue; + + if (sldr_pos < dev_desc->sldr_res) + input_report_abs(iqs7222->keypad, iqs7222->sl_axis[i], + sldr_pos); + + for (j = 0; j < ARRAY_SIZE(iqs7222_sl_events); j++) { + u16 mask = iqs7222_sl_events[j].mask; + u16 val = iqs7222_sl_events[j].val; + + if (!iqs7222_sl_events[j].enable) { + input_report_key(iqs7222->keypad, + iqs7222->sl_code[i][j], + sldr_pos < dev_desc->sldr_res); + continue; + } + + /* + * The remaining offsets represent gesture state, and + * are discarded in the case of IQS7222C because only + * absolute position is reported. + */ + if (num_stat < IQS7222_MAX_COLS_STAT) + continue; + + input_report_key(iqs7222->keypad, + iqs7222->sl_code[i][j], + (state & mask) == val); + } + } + + input_sync(iqs7222->keypad); + + return 0; +} + +static irqreturn_t iqs7222_irq(int irq, void *context) +{ + struct iqs7222_private *iqs7222 = context; + + return iqs7222_report(iqs7222) ? IRQ_NONE : IRQ_HANDLED; +} + +static int iqs7222_probe(struct i2c_client *client) +{ + struct iqs7222_private *iqs7222; + unsigned long irq_flags; + int error, irq; + + iqs7222 = devm_kzalloc(&client->dev, sizeof(*iqs7222), GFP_KERNEL); + if (!iqs7222) + return -ENOMEM; + + i2c_set_clientdata(client, iqs7222); + iqs7222->client = client; + + iqs7222->keypad = devm_input_allocate_device(&client->dev); + if (!iqs7222->keypad) + return -ENOMEM; + + iqs7222->keypad->name = client->name; + iqs7222->keypad->id.bustype = BUS_I2C; + + /* + * The RDY pin behaves as an interrupt, but must also be polled ahead + * of unsolicited I2C communication. As such, it is first opened as a + * GPIO and then passed to gpiod_to_irq() to register the interrupt. + */ + iqs7222->irq_gpio = devm_gpiod_get(&client->dev, "irq", GPIOD_IN); + if (IS_ERR(iqs7222->irq_gpio)) { + error = PTR_ERR(iqs7222->irq_gpio); + dev_err(&client->dev, "Failed to request IRQ GPIO: %d\n", + error); + return error; + } + + iqs7222->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(iqs7222->reset_gpio)) { + error = PTR_ERR(iqs7222->reset_gpio); + dev_err(&client->dev, "Failed to request reset GPIO: %d\n", + error); + return error; + } + + error = iqs7222_hard_reset(iqs7222); + if (error) + return error; + + error = iqs7222_dev_info(iqs7222); + if (error) + return error; + + error = iqs7222_dev_init(iqs7222, READ); + if (error) + return error; + + error = iqs7222_parse_all(iqs7222); + if (error) + return error; + + error = iqs7222_dev_init(iqs7222, WRITE); + if (error) + return error; + + error = iqs7222_report(iqs7222); + if (error) + return error; + + error = input_register_device(iqs7222->keypad); + if (error) { + dev_err(&client->dev, "Failed to register device: %d\n", error); + return error; + } + + irq = gpiod_to_irq(iqs7222->irq_gpio); + if (irq < 0) + return irq; + + irq_flags = gpiod_is_active_low(iqs7222->irq_gpio) ? IRQF_TRIGGER_LOW + : IRQF_TRIGGER_HIGH; + irq_flags |= IRQF_ONESHOT; + + error = devm_request_threaded_irq(&client->dev, irq, NULL, iqs7222_irq, + irq_flags, client->name, iqs7222); + if (error) + dev_err(&client->dev, "Failed to request IRQ: %d\n", error); + + return error; +} + +static const struct of_device_id iqs7222_of_match[] = { + { .compatible = "azoteq,iqs7222a" }, + { .compatible = "azoteq,iqs7222b" }, + { .compatible = "azoteq,iqs7222c" }, + { } +}; +MODULE_DEVICE_TABLE(of, iqs7222_of_match); + +static struct i2c_driver iqs7222_i2c_driver = { + .driver = { + .name = "iqs7222", + .of_match_table = iqs7222_of_match, + }, + .probe_new = iqs7222_probe, +}; +module_i2c_driver(iqs7222_i2c_driver); + +MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>"); +MODULE_DESCRIPTION("Azoteq IQS7222A/B/C Capacitive Touch Controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index 89af52498c96..549df01b6ee3 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c @@ -9,9 +9,11 @@ #include <linux/input.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/ktime.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/reboot.h> @@ -19,6 +21,16 @@ #define PON_REV2 0x01 +#define PON_SUBTYPE 0x05 + +#define PON_SUBTYPE_PRIMARY 0x01 +#define PON_SUBTYPE_SECONDARY 0x02 +#define PON_SUBTYPE_1REG 0x03 +#define PON_SUBTYPE_GEN2_PRIMARY 0x04 +#define PON_SUBTYPE_GEN2_SECONDARY 0x05 +#define PON_SUBTYPE_GEN3_PBS 0x08 +#define PON_SUBTYPE_GEN3_HLOS 0x09 + #define PON_RT_STS 0x10 #define PON_KPDPWR_N_SET BIT(0) #define PON_RESIN_N_SET BIT(1) @@ -45,6 +57,7 @@ struct pm8941_data { unsigned int status_bit; bool supports_ps_hold_poff_config; bool supports_debounce_config; + bool has_pon_pbs; const char *name; const char *phys; }; @@ -53,13 +66,18 @@ struct pm8941_pwrkey { struct device *dev; int irq; u32 baseaddr; + u32 pon_pbs_baseaddr; struct regmap *regmap; struct input_dev *input; unsigned int revision; + unsigned int subtype; struct notifier_block reboot_notifier; u32 code; + u32 sw_debounce_time_us; + ktime_t sw_debounce_end_time; + bool last_status; const struct pm8941_data *data; }; @@ -129,20 +147,76 @@ static irqreturn_t pm8941_pwrkey_irq(int irq, void *_data) { struct pm8941_pwrkey *pwrkey = _data; unsigned int sts; - int error; + int err; + + if (pwrkey->sw_debounce_time_us) { + if (ktime_before(ktime_get(), pwrkey->sw_debounce_end_time)) { + dev_dbg(pwrkey->dev, + "ignoring key event received before debounce end %llu us\n", + pwrkey->sw_debounce_end_time); + return IRQ_HANDLED; + } + } - error = regmap_read(pwrkey->regmap, - pwrkey->baseaddr + PON_RT_STS, &sts); - if (error) + err = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_RT_STS, &sts); + if (err) return IRQ_HANDLED; - input_report_key(pwrkey->input, pwrkey->code, - sts & pwrkey->data->status_bit); + sts &= pwrkey->data->status_bit; + + if (pwrkey->sw_debounce_time_us && !sts) + pwrkey->sw_debounce_end_time = ktime_add_us(ktime_get(), + pwrkey->sw_debounce_time_us); + + /* + * Simulate a press event in case a release event occurred without a + * corresponding press event. + */ + if (!pwrkey->last_status && !sts) { + input_report_key(pwrkey->input, pwrkey->code, 1); + input_sync(pwrkey->input); + } + pwrkey->last_status = sts; + + input_report_key(pwrkey->input, pwrkey->code, sts); input_sync(pwrkey->input); return IRQ_HANDLED; } +static int pm8941_pwrkey_sw_debounce_init(struct pm8941_pwrkey *pwrkey) +{ + unsigned int val, addr, mask; + int error; + + if (pwrkey->data->has_pon_pbs && !pwrkey->pon_pbs_baseaddr) { + dev_err(pwrkey->dev, + "PON_PBS address missing, can't read HW debounce time\n"); + return 0; + } + + if (pwrkey->pon_pbs_baseaddr) + addr = pwrkey->pon_pbs_baseaddr + PON_DBC_CTL; + else + addr = pwrkey->baseaddr + PON_DBC_CTL; + error = regmap_read(pwrkey->regmap, addr, &val); + if (error) + return error; + + if (pwrkey->subtype >= PON_SUBTYPE_GEN2_PRIMARY) + mask = 0xf; + else + mask = 0x7; + + pwrkey->sw_debounce_time_us = + 2 * USEC_PER_SEC / (1 << (mask - (val & mask))); + + dev_dbg(pwrkey->dev, "SW debounce time = %u us\n", + pwrkey->sw_debounce_time_us); + + return 0; +} + static int __maybe_unused pm8941_pwrkey_suspend(struct device *dev) { struct pm8941_pwrkey *pwrkey = dev_get_drvdata(dev); @@ -171,6 +245,8 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev) struct pm8941_pwrkey *pwrkey; bool pull_up; struct device *parent; + struct device_node *regmap_node; + const __be32 *addr; u32 req_delay; int error; @@ -192,8 +268,10 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev) pwrkey->data = of_device_get_match_data(&pdev->dev); parent = pdev->dev.parent; + regmap_node = pdev->dev.of_node; pwrkey->regmap = dev_get_regmap(parent, NULL); if (!pwrkey->regmap) { + regmap_node = parent->of_node; /* * We failed to get regmap for parent. Let's see if we are * a child of pon node and read regmap and reg from its @@ -204,15 +282,21 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to locate regmap\n"); return -ENODEV; } + } - error = of_property_read_u32(parent->of_node, - "reg", &pwrkey->baseaddr); - } else { - error = of_property_read_u32(pdev->dev.of_node, "reg", - &pwrkey->baseaddr); + addr = of_get_address(regmap_node, 0, NULL, NULL); + if (!addr) { + dev_err(&pdev->dev, "reg property missing\n"); + return -EINVAL; + } + pwrkey->baseaddr = be32_to_cpup(addr); + + if (pwrkey->data->has_pon_pbs) { + /* PON_PBS base address is optional */ + addr = of_get_address(regmap_node, 1, NULL, NULL); + if (addr) + pwrkey->pon_pbs_baseaddr = be32_to_cpup(addr); } - if (error) - return error; pwrkey->irq = platform_get_irq(pdev, 0); if (pwrkey->irq < 0) @@ -221,7 +305,14 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev) error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_REV2, &pwrkey->revision); if (error) { - dev_err(&pdev->dev, "failed to set debounce: %d\n", error); + dev_err(&pdev->dev, "failed to read revision: %d\n", error); + return error; + } + + error = regmap_read(pwrkey->regmap, pwrkey->baseaddr + PON_SUBTYPE, + &pwrkey->subtype); + if (error) { + dev_err(&pdev->dev, "failed to read subtype: %d\n", error); return error; } @@ -259,6 +350,10 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev) } } + error = pm8941_pwrkey_sw_debounce_init(pwrkey); + if (error) + return error; + if (pwrkey->data->pull_up_bit) { error = regmap_update_bits(pwrkey->regmap, pwrkey->baseaddr + PON_PULL_CTL, @@ -320,6 +415,7 @@ static const struct pm8941_data pwrkey_data = { .phys = "pm8941_pwrkey/input0", .supports_ps_hold_poff_config = true, .supports_debounce_config = true, + .has_pon_pbs = false, }; static const struct pm8941_data resin_data = { @@ -329,6 +425,7 @@ static const struct pm8941_data resin_data = { .phys = "pm8941_resin/input0", .supports_ps_hold_poff_config = true, .supports_debounce_config = true, + .has_pon_pbs = false, }; static const struct pm8941_data pon_gen3_pwrkey_data = { @@ -337,6 +434,7 @@ static const struct pm8941_data pon_gen3_pwrkey_data = { .phys = "pmic_pwrkey/input0", .supports_ps_hold_poff_config = false, .supports_debounce_config = false, + .has_pon_pbs = true, }; static const struct pm8941_data pon_gen3_resin_data = { @@ -345,6 +443,7 @@ static const struct pm8941_data pon_gen3_resin_data = { .phys = "pmic_resin/input0", .supports_ps_hold_poff_config = false, .supports_debounce_config = false, + .has_pon_pbs = true, }; static const struct of_device_id pm8941_pwr_key_id_table[] = { diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c index fe43e5557ed7..cdcb7737c46a 100644 --- a/drivers/input/misc/sparcspkr.c +++ b/drivers/input/misc/sparcspkr.c @@ -205,6 +205,7 @@ static int bbc_beep_probe(struct platform_device *op) info = &state->u.bbc; info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0); + of_node_put(dp); if (!info->clock_freq) goto out_free; diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c index 5f868009d35b..d272f1ec27ba 100644 --- a/drivers/input/mouse/cypress_ps2.c +++ b/drivers/input/mouse/cypress_ps2.c @@ -696,7 +696,7 @@ int cypress_init(struct psmouse *psmouse) err_exit: /* * Reset Cypress Trackpad as a standard mouse. Then - * let psmouse driver commmunicating with it as default PS2 mouse. + * let psmouse driver communicating with it as default PS2 mouse. */ cypress_reset(psmouse); diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c index 164f6c757f6b..2a2459b1b4f2 100644 --- a/drivers/input/mouse/psmouse-smbus.c +++ b/drivers/input/mouse/psmouse-smbus.c @@ -26,6 +26,8 @@ struct psmouse_smbus_dev { static LIST_HEAD(psmouse_smbus_list); static DEFINE_MUTEX(psmouse_smbus_mutex); +static struct workqueue_struct *psmouse_smbus_wq; + static void psmouse_smbus_check_adapter(struct i2c_adapter *adapter) { struct psmouse_smbus_dev *smbdev; @@ -161,7 +163,7 @@ static void psmouse_smbus_schedule_remove(struct i2c_client *client) INIT_WORK(&rwork->work, psmouse_smbus_remove_i2c_device); rwork->client = client; - schedule_work(&rwork->work); + queue_work(psmouse_smbus_wq, &rwork->work); } } @@ -305,9 +307,14 @@ int __init psmouse_smbus_module_init(void) { int error; + psmouse_smbus_wq = alloc_workqueue("psmouse-smbus", 0, 0); + if (!psmouse_smbus_wq) + return -ENOMEM; + error = bus_register_notifier(&i2c_bus_type, &psmouse_smbus_notifier); if (error) { pr_err("failed to register i2c bus notifier: %d\n", error); + destroy_workqueue(psmouse_smbus_wq); return error; } @@ -317,5 +324,5 @@ int __init psmouse_smbus_module_init(void) void psmouse_smbus_module_exit(void) { bus_unregister_notifier(&i2c_bus_type, &psmouse_smbus_notifier); - flush_scheduled_work(); + destroy_workqueue(psmouse_smbus_wq); } diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c index 42443ffba7c4..ea9eff7c8099 100644 --- a/drivers/input/mouse/vmmouse.c +++ b/drivers/input/mouse/vmmouse.c @@ -366,6 +366,19 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties) } /** + * vmmouse_reset - Disable vmmouse and reset + * + * @psmouse: Pointer to the psmouse struct + * + * Tries to disable vmmouse mode before enter suspend. + */ +static void vmmouse_reset(struct psmouse *psmouse) +{ + vmmouse_disable(psmouse); + psmouse_reset(psmouse); +} + +/** * vmmouse_disconnect - Take down vmmouse driver * * @psmouse: Pointer to the psmouse struct @@ -472,6 +485,7 @@ int vmmouse_init(struct psmouse *psmouse) psmouse->protocol_handler = vmmouse_process_byte; psmouse->disconnect = vmmouse_disconnect; psmouse->reconnect = vmmouse_reconnect; + psmouse->cleanup = vmmouse_reset; return 0; diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index 93b328c796c6..c5ce907535ef 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -733,7 +733,6 @@ remove_v4l2: v4l2_device_unregister(&f54->v4l2); remove_wq: cancel_delayed_work_sync(&f54->work); - flush_workqueue(f54->workqueue); destroy_workqueue(f54->workqueue); return ret; } diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 1581f6ef0927..24ec4844a5c3 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -931,8 +931,7 @@ aiptek_query(struct aiptek *aiptek, unsigned char command, unsigned char data) } msleep(aiptek->curSetting.programmableDelay); - if ((ret = - aiptek_get_report(aiptek, 3, 2, buf, sizeof_buf)) != sizeof_buf) { + if (aiptek_get_report(aiptek, 3, 2, buf, sizeof_buf) != sizeof_buf) { dev_dbg(&aiptek->intf->dev, "aiptek_query failed: returned 0x%02x 0x%02x 0x%02x\n", buf[0], buf[1], buf[2]); diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index 72e0b767e1ba..c175d44c52f3 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c @@ -337,13 +337,15 @@ static int stmfts_input_open(struct input_dev *dev) struct stmfts_data *sdata = input_get_drvdata(dev); int err; - err = pm_runtime_get_sync(&sdata->client->dev); - if (err < 0) - goto out; + err = pm_runtime_resume_and_get(&sdata->client->dev); + if (err) + return err; err = i2c_smbus_write_byte(sdata->client, STMFTS_MS_MT_SENSE_ON); - if (err) - goto out; + if (err) { + pm_runtime_put_sync(&sdata->client->dev); + return err; + } mutex_lock(&sdata->mutex); sdata->running = true; @@ -366,9 +368,7 @@ static int stmfts_input_open(struct input_dev *dev) "failed to enable touchkey\n"); } -out: - pm_runtime_put_noidle(&sdata->client->dev); - return err; + return 0; } static void stmfts_input_close(struct input_dev *dev) diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 47108ed44fbb..72d0f5e2f651 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -407,6 +407,7 @@ /* IOMMU IVINFO */ #define IOMMU_IVINFO_OFFSET 36 #define IOMMU_IVINFO_EFRSUP BIT(0) +#define IOMMU_IVINFO_DMA_REMAP BIT(1) /* IOMMU Feature Reporting Field (for IVHD type 10h */ #define IOMMU_FEAT_GASUP_SHIFT 6 @@ -449,6 +450,9 @@ extern struct irq_remap_table **irq_lookup_table; /* Interrupt remapping feature used? */ extern bool amd_iommu_irq_remap; +/* IVRS indicates that pre-boot remapping was enabled */ +extern bool amdr_ivrs_remap_support; + /* kmem_cache to get tables with 128 byte alignement */ extern struct kmem_cache *amd_iommu_irq_cache; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 1a3ad58ba846..1d08f87e734b 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -83,7 +83,7 @@ #define ACPI_DEVFLAG_LINT1 0x80 #define ACPI_DEVFLAG_ATSDIS 0x10000000 -#define LOOP_TIMEOUT 100000 +#define LOOP_TIMEOUT 2000000 /* * ACPI table definitions * @@ -181,6 +181,7 @@ u32 amd_iommu_max_pasid __read_mostly = ~0; bool amd_iommu_v2_present __read_mostly; static bool amd_iommu_pc_present __read_mostly; +bool amdr_ivrs_remap_support __read_mostly; bool amd_iommu_force_isolation __read_mostly; @@ -325,6 +326,8 @@ static void __init early_iommu_features_init(struct amd_iommu *iommu, { if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) iommu->features = h->efr_reg; + if (amd_iommu_ivinfo & IOMMU_IVINFO_DMA_REMAP) + amdr_ivrs_remap_support = true; } /* Access to l1 and l2 indexed register spaces */ @@ -1985,8 +1988,7 @@ static int __init amd_iommu_init_pci(void) for_each_iommu(iommu) iommu_flush_all_caches(iommu); - if (!ret) - print_iommu_info(); + print_iommu_info(); out: return ret; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index b47220ac09ea..840831d5d2ad 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1838,20 +1838,10 @@ void amd_iommu_domain_update(struct protection_domain *domain) amd_iommu_domain_flush_complete(domain); } -static void __init amd_iommu_init_dma_ops(void) -{ - if (iommu_default_passthrough() || sme_me_mask) - x86_swiotlb_enable = true; - else - x86_swiotlb_enable = false; -} - int __init amd_iommu_init_api(void) { int err; - amd_iommu_init_dma_ops(); - err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops); if (err) return err; @@ -2165,6 +2155,8 @@ static bool amd_iommu_capable(enum iommu_cap cap) return (irq_remapping_enabled == 1); case IOMMU_CAP_NOEXEC: return false; + case IOMMU_CAP_PRE_BOOT_PROTECTION: + return amdr_ivrs_remap_support; default: break; } @@ -2274,6 +2266,12 @@ static int amd_iommu_def_domain_type(struct device *dev) return 0; } +static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain) +{ + /* IOMMU_PTE_FC is always set */ + return true; +} + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, @@ -2296,6 +2294,7 @@ const struct iommu_ops amd_iommu_ops = { .flush_iotlb_all = amd_iommu_flush_iotlb_all, .iotlb_sync = amd_iommu_iotlb_sync, .free = amd_iommu_domain_free, + .enforce_cache_coherency = amd_iommu_enforce_cache_coherency, } }; diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index e56b137ceabd..afb3efd565b7 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -956,6 +956,7 @@ static void __exit amd_iommu_v2_exit(void) { struct device_state *dev_state, *next; unsigned long flags; + LIST_HEAD(freelist); if (!amd_iommu_v2_supported()) return; @@ -975,11 +976,20 @@ static void __exit amd_iommu_v2_exit(void) put_device_state(dev_state); list_del(&dev_state->list); - free_device_state(dev_state); + list_add_tail(&dev_state->list, &freelist); } spin_unlock_irqrestore(&state_lock, flags); + /* + * Since free_device_state waits on the count to be zero, + * we need to free dev_state outside the spinlock. + */ + list_for_each_entry_safe(dev_state, next, &freelist, list) { + list_del(&dev_state->list); + free_device_state(dev_state); + } + destroy_workqueue(iommu_wq); } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index c623dae1e115..1ef7bbb4acf3 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -6,6 +6,7 @@ #include <linux/mm.h> #include <linux/mmu_context.h> #include <linux/mmu_notifier.h> +#include <linux/sched/mm.h> #include <linux/slab.h> #include "arm-smmu-v3.h" @@ -96,9 +97,14 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) struct arm_smmu_ctx_desc *cd; struct arm_smmu_ctx_desc *ret = NULL; + /* Don't free the mm until we release the ASID */ + mmgrab(mm); + asid = arm64_mm_context_get(mm); - if (!asid) - return ERR_PTR(-ESRCH); + if (!asid) { + err = -ESRCH; + goto out_drop_mm; + } cd = kzalloc(sizeof(*cd), GFP_KERNEL); if (!cd) { @@ -165,6 +171,8 @@ out_free_cd: kfree(cd); out_put_context: arm64_mm_context_put(mm); +out_drop_mm: + mmdrop(mm); return err < 0 ? ERR_PTR(err) : ret; } @@ -173,6 +181,7 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) if (arm_smmu_free_asid(cd)) { /* Unpin ASID */ arm64_mm_context_put(cd->mm); + mmdrop(cd->mm); kfree(cd); } } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 627a3ed5ee8f..88817a3376ef 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3770,6 +3770,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev) /* Base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; if (resource_size(res) < arm_smmu_resource_size(smmu)) { dev_err(dev, "MMIO region too small (%pr)\n", res); return -EINVAL; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 2c25cce38060..658f3cc83278 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -211,7 +211,8 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) if (of_property_read_bool(np, "calxeda,smmu-secure-config-access")) smmu->impl = &calxeda_impl; - if (of_device_is_compatible(np, "nvidia,tegra194-smmu") || + if (of_device_is_compatible(np, "nvidia,tegra234-smmu") || + of_device_is_compatible(np, "nvidia,tegra194-smmu") || of_device_is_compatible(np, "nvidia,tegra186-smmu")) return nvidia_smmu_impl_init(smmu); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index ba6298c7140e..7820711c4560 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -408,6 +408,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { { .compatible = "qcom,sc7180-smmu-500" }, { .compatible = "qcom,sc7280-smmu-500" }, { .compatible = "qcom,sc8180x-smmu-500" }, + { .compatible = "qcom,sc8280xp-smmu-500" }, { .compatible = "qcom,sdm630-smmu-v2" }, { .compatible = "qcom,sdm845-smmu-500" }, { .compatible = "qcom,sm6125-smmu-500" }, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 568cce590ccc..2ed3594f384e 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1574,6 +1574,9 @@ static int arm_smmu_def_domain_type(struct device *dev) struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); const struct arm_smmu_impl *impl = cfg->smmu->impl; + if (using_legacy_binding) + return IOMMU_DOMAIN_IDENTITY; + if (impl && impl->def_domain_type) return impl->def_domain_type(dev); @@ -2092,11 +2095,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (err) return err; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ioaddr = res->start; - smmu->base = devm_ioremap_resource(dev, res); + smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); + ioaddr = res->start; /* * The resource size should effectively match the value of SMMU_TOP; * stash that temporarily until we know PAGESIZE to validate it with. diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 09f6e1c0f9c0..f90251572a5d 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -20,6 +20,7 @@ #include <linux/iommu.h> #include <linux/iova.h> #include <linux/irq.h> +#include <linux/list_sort.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/pci.h> @@ -414,6 +415,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, return 0; } +static int iommu_dma_ranges_sort(void *priv, const struct list_head *a, + const struct list_head *b) +{ + struct resource_entry *res_a = list_entry(a, typeof(*res_a), node); + struct resource_entry *res_b = list_entry(b, typeof(*res_b), node); + + return res_a->res->start > res_b->res->start; +} + static int iova_reserve_pci_windows(struct pci_dev *dev, struct iova_domain *iovad) { @@ -432,6 +442,7 @@ static int iova_reserve_pci_windows(struct pci_dev *dev, } /* Get reserved DMA windows from host bridge */ + list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort); resource_list_for_each_entry(window, &bridge->dma_ranges) { end = window->res->start - window->offset; resv_iova: @@ -440,7 +451,7 @@ resv_iova: hi = iova_pfn(iovad, end); reserve_iova(iovad, lo, hi); } else if (end < start) { - /* dma_ranges list should be sorted */ + /* DMA ranges should be non-overlapping */ dev_err(&dev->dev, "Failed to reserve IOVA [%pa-%pa]\n", &start, &end); @@ -776,6 +787,7 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; struct page **pages; dma_addr_t iova; + ssize_t ret; if (static_branch_unlikely(&iommu_deferred_attach_enabled) && iommu_deferred_attach(dev, domain)) @@ -813,8 +825,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, arch_dma_prep_coherent(sg_page(sg), sg->length); } - if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot) - < size) + ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot); + if (ret < 0 || ret < size) goto out_free_sg; sgt->sgl->dma_address = iova; @@ -971,6 +983,11 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, void *padding_start; size_t padding_size, aligned_size; + if (!is_swiotlb_active(dev)) { + dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); + return DMA_MAPPING_ERROR; + } + aligned_size = iova_align(iovad, size); phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, iova_mask(iovad), dir, attrs); @@ -1209,7 +1226,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, * implementation - it knows better than we do. */ ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot); - if (ret < iova_len) + if (ret < 0 || ret < iova_len) goto out_free_iova; return __finalise_sg(dev, sg, nents, iova); diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index fc38b1fba7cf..0d03f837a5d4 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -11,6 +11,9 @@ #include <linux/fsl/guts.h> #include <linux/interrupt.h> #include <linux/genalloc.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> #include <asm/mpc85xx.h> diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 69a4a62dc3b9..94b4589dc67c 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -9,6 +9,7 @@ #include "fsl_pamu_domain.h" +#include <linux/platform_device.h> #include <sysdev/fsl_pci.h> /* diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index ba9a63cac47c..44016594831d 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -533,33 +533,6 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) rcu_read_unlock(); } -static bool domain_update_iommu_snooping(struct intel_iommu *skip) -{ - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; - bool ret = true; - - rcu_read_lock(); - for_each_active_iommu(iommu, drhd) { - if (iommu != skip) { - /* - * If the hardware is operating in the scalable mode, - * the snooping control is always supported since we - * always set PASID-table-entry.PGSNP bit if the domain - * is managed outside (UNMANAGED). - */ - if (!sm_supported(iommu) && - !ecap_sc_support(iommu->ecap)) { - ret = false; - break; - } - } - } - rcu_read_unlock(); - - return ret; -} - static int domain_update_iommu_superpage(struct dmar_domain *domain, struct intel_iommu *skip) { @@ -641,7 +614,6 @@ static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain) static void domain_update_iommu_cap(struct dmar_domain *domain) { domain_update_iommu_coherency(domain); - domain->iommu_snooping = domain_update_iommu_snooping(NULL); domain->iommu_superpage = domain_update_iommu_superpage(domain, NULL); /* @@ -2460,7 +2432,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu, if (level == 5) flags |= PASID_FLAG_FL5LP; - if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED) + if (domain->force_snooping) flags |= PASID_FLAG_PAGE_SNOOP; return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid, @@ -2474,64 +2446,6 @@ static bool dev_is_real_dma_subdevice(struct device *dev) pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev); } -static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, - int bus, int devfn, - struct device *dev, - struct dmar_domain *domain) -{ - struct device_domain_info *info = dev_iommu_priv_get(dev); - unsigned long flags; - int ret; - - spin_lock_irqsave(&device_domain_lock, flags); - info->domain = domain; - spin_lock(&iommu->lock); - ret = domain_attach_iommu(domain, iommu); - spin_unlock(&iommu->lock); - if (ret) { - spin_unlock_irqrestore(&device_domain_lock, flags); - return NULL; - } - list_add(&info->link, &domain->devices); - spin_unlock_irqrestore(&device_domain_lock, flags); - - /* PASID table is mandatory for a PCI device in scalable mode. */ - if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { - ret = intel_pasid_alloc_table(dev); - if (ret) { - dev_err(dev, "PASID table allocation failed\n"); - dmar_remove_one_dev_info(dev); - return NULL; - } - - /* Setup the PASID entry for requests without PASID: */ - spin_lock_irqsave(&iommu->lock, flags); - if (hw_pass_through && domain_type_is_si(domain)) - ret = intel_pasid_setup_pass_through(iommu, domain, - dev, PASID_RID2PASID); - else if (domain_use_first_level(domain)) - ret = domain_setup_first_level(iommu, domain, dev, - PASID_RID2PASID); - else - ret = intel_pasid_setup_second_level(iommu, domain, - dev, PASID_RID2PASID); - spin_unlock_irqrestore(&iommu->lock, flags); - if (ret) { - dev_err(dev, "Setup RID2PASID failed\n"); - dmar_remove_one_dev_info(dev); - return NULL; - } - } - - if (dev && domain_context_mapping(domain, dev)) { - dev_err(dev, "Domain context map failed\n"); - dmar_remove_one_dev_info(dev); - return NULL; - } - - return domain; -} - static int iommu_domain_identity_map(struct dmar_domain *domain, unsigned long first_vpfn, unsigned long last_vpfn) @@ -2607,17 +2521,62 @@ static int __init si_domain_init(int hw) static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) { - struct dmar_domain *ndomain; + struct device_domain_info *info = dev_iommu_priv_get(dev); struct intel_iommu *iommu; + unsigned long flags; u8 bus, devfn; + int ret; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return -ENODEV; - ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); - if (ndomain != domain) - return -EBUSY; + spin_lock_irqsave(&device_domain_lock, flags); + info->domain = domain; + spin_lock(&iommu->lock); + ret = domain_attach_iommu(domain, iommu); + spin_unlock(&iommu->lock); + if (ret) { + spin_unlock_irqrestore(&device_domain_lock, flags); + return ret; + } + list_add(&info->link, &domain->devices); + spin_unlock_irqrestore(&device_domain_lock, flags); + + /* PASID table is mandatory for a PCI device in scalable mode. */ + if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { + ret = intel_pasid_alloc_table(dev); + if (ret) { + dev_err(dev, "PASID table allocation failed\n"); + dmar_remove_one_dev_info(dev); + return ret; + } + + /* Setup the PASID entry for requests without PASID: */ + spin_lock_irqsave(&iommu->lock, flags); + if (hw_pass_through && domain_type_is_si(domain)) + ret = intel_pasid_setup_pass_through(iommu, domain, + dev, PASID_RID2PASID); + else if (domain_use_first_level(domain)) + ret = domain_setup_first_level(iommu, domain, dev, + PASID_RID2PASID); + else + ret = intel_pasid_setup_second_level(iommu, domain, + dev, PASID_RID2PASID); + spin_unlock_irqrestore(&iommu->lock, flags); + if (ret) { + dev_err(dev, "Setup RID2PASID failed\n"); + dmar_remove_one_dev_info(dev); + return ret; + } + } + + ret = domain_context_mapping(domain, dev); + if (ret) { + dev_err(dev, "Domain context map failed\n"); + dmar_remove_one_dev_info(dev); + return ret; + } return 0; } @@ -3607,12 +3566,7 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru) iommu->name); return -ENXIO; } - if (!ecap_sc_support(iommu->ecap) && - domain_update_iommu_snooping(iommu)) { - pr_warn("%s: Doesn't support snooping.\n", - iommu->name); - return -ENXIO; - } + sp = domain_update_iommu_superpage(NULL, iommu) - 1; if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { pr_warn("%s: Doesn't support large page.\n", @@ -4304,7 +4258,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) domain->agaw = width_to_agaw(adjust_width); domain->iommu_coherency = false; - domain->iommu_snooping = false; domain->iommu_superpage = 0; domain->max_addr = 0; @@ -4369,6 +4322,9 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, if (!iommu) return -ENODEV; + if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) + return -EOPNOTSUPP; + /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) @@ -4443,7 +4399,7 @@ static int intel_iommu_map(struct iommu_domain *domain, prot |= DMA_PTE_READ; if (iommu_prot & IOMMU_WRITE) prot |= DMA_PTE_WRITE; - if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) + if (dmar_domain->set_pte_snp) prot |= DMA_PTE_SNP; max_addr = iova + size; @@ -4566,12 +4522,71 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, return phys; } +static bool domain_support_force_snooping(struct dmar_domain *domain) +{ + struct device_domain_info *info; + bool support = true; + + assert_spin_locked(&device_domain_lock); + list_for_each_entry(info, &domain->devices, link) { + if (!ecap_sc_support(info->iommu->ecap)) { + support = false; + break; + } + } + + return support; +} + +static void domain_set_force_snooping(struct dmar_domain *domain) +{ + struct device_domain_info *info; + + assert_spin_locked(&device_domain_lock); + + /* + * Second level page table supports per-PTE snoop control. The + * iommu_map() interface will handle this by setting SNP bit. + */ + if (!domain_use_first_level(domain)) { + domain->set_pte_snp = true; + return; + } + + list_for_each_entry(info, &domain->devices, link) + intel_pasid_setup_page_snoop_control(info->iommu, info->dev, + PASID_RID2PASID); +} + +static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + unsigned long flags; + + if (dmar_domain->force_snooping) + return true; + + spin_lock_irqsave(&device_domain_lock, flags); + if (!domain_support_force_snooping(dmar_domain)) { + spin_unlock_irqrestore(&device_domain_lock, flags); + return false; + } + + domain_set_force_snooping(dmar_domain); + dmar_domain->force_snooping = true; + spin_unlock_irqrestore(&device_domain_lock, flags); + + return true; +} + static bool intel_iommu_capable(enum iommu_cap cap) { if (cap == IOMMU_CAP_CACHE_COHERENCY) - return domain_update_iommu_snooping(NULL); + return true; if (cap == IOMMU_CAP_INTR_REMAP) return irq_remapping_enabled == 1; + if (cap == IOMMU_CAP_PRE_BOOT_PROTECTION) + return dmar_platform_optin(); return false; } @@ -4919,6 +4934,7 @@ const struct iommu_ops intel_iommu_ops = { .iotlb_sync = intel_iommu_tlb_sync, .iova_to_phys = intel_iommu_iova_to_phys, .free = intel_iommu_domain_free, + .enforce_cache_coherency = intel_iommu_enforce_cache_coherency, } }; diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index f8d215d85695..cb4c1d0cf25c 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -710,9 +710,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, pasid_set_fault_enable(pte); pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); - if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED) - pasid_set_pgsnp(pte); - /* * Since it is a second level only translation setup, we should * set SRE bit as well (addresses are expected to be GPAs). @@ -762,3 +759,45 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, return 0; } + +/* + * Set the page snoop control for a pasid entry which has been set up. + */ +void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, + struct device *dev, u32 pasid) +{ + struct pasid_entry *pte; + u16 did; + + spin_lock(&iommu->lock); + pte = intel_pasid_get_entry(dev, pasid); + if (WARN_ON(!pte || !pasid_pte_is_present(pte))) { + spin_unlock(&iommu->lock); + return; + } + + pasid_set_pgsnp(pte); + did = pasid_get_domain_id(pte); + spin_unlock(&iommu->lock); + + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(pte, sizeof(*pte)); + + /* + * VT-d spec 3.4 table23 states guides for cache invalidation: + * + * - PASID-selective-within-Domain PASID-cache invalidation + * - PASID-selective PASID-based IOTLB invalidation + * - If (pasid is RID_PASID) + * - Global Device-TLB invalidation to affected functions + * Else + * - PASID-based Device-TLB invalidation (with S=1 and + * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions + */ + pasid_cache_invalidation_with_pasid(iommu, did, pasid); + qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + + /* Device IOTLB doesn't need to be flushed in caching mode. */ + if (!cap_caching_mode(iommu->cap)) + devtlb_invalidation_with_pasid(iommu, dev, pasid); +} diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index ab4408c824a5..583ea67fc783 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -123,4 +123,6 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, bool fault_ignore); int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid); void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid); +void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, + struct device *dev, u32 pasid); #endif /* __INTEL_PASID_H */ diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 857d4c2fd1a2..847ad47a2dfd 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -18,7 +18,6 @@ #include <linux/errno.h> #include <linux/iommu.h> #include <linux/idr.h> -#include <linux/notifier.h> #include <linux/err.h> #include <linux/pci.h> #include <linux/bitops.h> @@ -40,14 +39,16 @@ struct iommu_group { struct kobject *devices_kobj; struct list_head devices; struct mutex mutex; - struct blocking_notifier_head notifier; void *iommu_data; void (*iommu_data_release)(void *iommu_data); char *name; int id; struct iommu_domain *default_domain; + struct iommu_domain *blocking_domain; struct iommu_domain *domain; struct list_head entry; + unsigned int owner_cnt; + void *owner; }; struct group_device { @@ -82,8 +83,8 @@ static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev); static int __iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group); -static void __iommu_detach_group(struct iommu_domain *domain, - struct iommu_group *group); +static int __iommu_group_set_domain(struct iommu_group *group, + struct iommu_domain *new_domain); static int iommu_create_device_direct_mappings(struct iommu_group *group, struct device *dev); static struct iommu_group *iommu_group_get_for_dev(struct device *dev); @@ -294,7 +295,11 @@ int iommu_probe_device(struct device *dev) mutex_lock(&group->mutex); iommu_alloc_default_domain(group, dev); - if (group->default_domain) { + /* + * If device joined an existing group which has been claimed, don't + * attach the default domain. + */ + if (group->default_domain && !group->owner) { ret = __iommu_attach_device(group->default_domain, dev); if (ret) { mutex_unlock(&group->mutex); @@ -599,6 +604,8 @@ static void iommu_group_release(struct kobject *kobj) if (group->default_domain) iommu_domain_free(group->default_domain); + if (group->blocking_domain) + iommu_domain_free(group->blocking_domain); kfree(group->name); kfree(group); @@ -633,7 +640,6 @@ struct iommu_group *iommu_group_alloc(void) mutex_init(&group->mutex); INIT_LIST_HEAD(&group->devices); INIT_LIST_HEAD(&group->entry); - BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL); if (ret < 0) { @@ -906,10 +912,6 @@ rename: if (ret) goto err_put_group; - /* Notify any listeners about change to group. */ - blocking_notifier_call_chain(&group->notifier, - IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev); - trace_add_device_to_group(group->id, dev); dev_info(dev, "Adding to iommu group %d\n", group->id); @@ -951,10 +953,6 @@ void iommu_group_remove_device(struct device *dev) dev_info(dev, "Removing from iommu group %d\n", group->id); - /* Pre-notify listeners that a device is being removed. */ - blocking_notifier_call_chain(&group->notifier, - IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev); - mutex_lock(&group->mutex); list_for_each_entry(tmp_device, &group->devices, list) { if (tmp_device->dev == dev) { @@ -1077,36 +1075,6 @@ void iommu_group_put(struct iommu_group *group) EXPORT_SYMBOL_GPL(iommu_group_put); /** - * iommu_group_register_notifier - Register a notifier for group changes - * @group: the group to watch - * @nb: notifier block to signal - * - * This function allows iommu group users to track changes in a group. - * See include/linux/iommu.h for actions sent via this notifier. Caller - * should hold a reference to the group throughout notifier registration. - */ -int iommu_group_register_notifier(struct iommu_group *group, - struct notifier_block *nb) -{ - return blocking_notifier_chain_register(&group->notifier, nb); -} -EXPORT_SYMBOL_GPL(iommu_group_register_notifier); - -/** - * iommu_group_unregister_notifier - Unregister a notifier - * @group: the group to watch - * @nb: notifier block to signal - * - * Unregister a previously registered group notifier block. - */ -int iommu_group_unregister_notifier(struct iommu_group *group, - struct notifier_block *nb) -{ - return blocking_notifier_chain_unregister(&group->notifier, nb); -} -EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier); - -/** * iommu_register_device_fault_handler() - Register a device fault handler * @dev: the device * @handler: the fault handler @@ -1651,14 +1619,8 @@ static int remove_iommu_group(struct device *dev, void *data) static int iommu_bus_notifier(struct notifier_block *nb, unsigned long action, void *data) { - unsigned long group_action = 0; struct device *dev = data; - struct iommu_group *group; - /* - * ADD/DEL call into iommu driver ops if provided, which may - * result in ADD/DEL notifiers to group->notifier - */ if (action == BUS_NOTIFY_ADD_DEVICE) { int ret; @@ -1669,34 +1631,6 @@ static int iommu_bus_notifier(struct notifier_block *nb, return NOTIFY_OK; } - /* - * Remaining BUS_NOTIFYs get filtered and republished to the - * group, if anyone is listening - */ - group = iommu_group_get(dev); - if (!group) - return 0; - - switch (action) { - case BUS_NOTIFY_BIND_DRIVER: - group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER; - break; - case BUS_NOTIFY_BOUND_DRIVER: - group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER; - break; - case BUS_NOTIFY_UNBIND_DRIVER: - group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER; - break; - case BUS_NOTIFY_UNBOUND_DRIVER: - group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER; - break; - } - - if (group_action) - blocking_notifier_call_chain(&group->notifier, - group_action, dev); - - iommu_group_put(group); return 0; } @@ -1913,6 +1847,29 @@ bool iommu_present(struct bus_type *bus) } EXPORT_SYMBOL_GPL(iommu_present); +/** + * device_iommu_capable() - check for a general IOMMU capability + * @dev: device to which the capability would be relevant, if available + * @cap: IOMMU capability + * + * Return: true if an IOMMU is present and supports the given capability + * for the given device, otherwise false. + */ +bool device_iommu_capable(struct device *dev, enum iommu_cap cap) +{ + const struct iommu_ops *ops; + + if (!dev->iommu || !dev->iommu->iommu_dev) + return false; + + ops = dev_iommu_ops(dev); + if (!ops->capable) + return false; + + return ops->capable(cap); +} +EXPORT_SYMBOL_GPL(device_iommu_capable); + bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) { if (!bus->iommu_ops || !bus->iommu_ops->capable) @@ -1983,6 +1940,24 @@ void iommu_domain_free(struct iommu_domain *domain) } EXPORT_SYMBOL_GPL(iommu_domain_free); +/* + * Put the group's domain back to the appropriate core-owned domain - either the + * standard kernel-mode DMA configuration or an all-DMA-blocked domain. + */ +static void __iommu_group_set_core_domain(struct iommu_group *group) +{ + struct iommu_domain *new_domain; + int ret; + + if (group->owner) + new_domain = group->blocking_domain; + else + new_domain = group->default_domain; + + ret = __iommu_group_set_domain(group, new_domain); + WARN(ret, "iommu driver failed to attach the default/blocking domain"); +} + static int __iommu_attach_device(struct iommu_domain *domain, struct device *dev) { @@ -2039,9 +2014,6 @@ static void __iommu_detach_device(struct iommu_domain *domain, if (iommu_is_attach_deferred(dev)) return; - if (unlikely(domain->ops->detach_dev == NULL)) - return; - domain->ops->detach_dev(domain, dev); trace_detach_device_from_domain(dev); } @@ -2055,12 +2027,10 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) return; mutex_lock(&group->mutex); - if (iommu_group_device_count(group) != 1) { - WARN_ON(1); + if (WARN_ON(domain != group->domain) || + WARN_ON(iommu_group_device_count(group) != 1)) goto out_unlock; - } - - __iommu_detach_group(domain, group); + __iommu_group_set_core_domain(group); out_unlock: mutex_unlock(&group->mutex); @@ -2116,7 +2086,8 @@ static int __iommu_attach_group(struct iommu_domain *domain, { int ret; - if (group->default_domain && group->domain != group->default_domain) + if (group->domain && group->domain != group->default_domain && + group->domain != group->blocking_domain) return -EBUSY; ret = __iommu_group_for_each_dev(group, domain, @@ -2148,34 +2119,49 @@ static int iommu_group_do_detach_device(struct device *dev, void *data) return 0; } -static void __iommu_detach_group(struct iommu_domain *domain, - struct iommu_group *group) +static int __iommu_group_set_domain(struct iommu_group *group, + struct iommu_domain *new_domain) { int ret; - if (!group->default_domain) { - __iommu_group_for_each_dev(group, domain, + if (group->domain == new_domain) + return 0; + + /* + * New drivers should support default domains and so the detach_dev() op + * will never be called. Otherwise the NULL domain represents some + * platform specific behavior. + */ + if (!new_domain) { + if (WARN_ON(!group->domain->ops->detach_dev)) + return -EINVAL; + __iommu_group_for_each_dev(group, group->domain, iommu_group_do_detach_device); group->domain = NULL; - return; + return 0; } - if (group->domain == group->default_domain) - return; - - /* Detach by re-attaching to the default domain */ - ret = __iommu_group_for_each_dev(group, group->default_domain, + /* + * Changing the domain is done by calling attach_dev() on the new + * domain. This switch does not have to be atomic and DMA can be + * discarded during the transition. DMA must only be able to access + * either new_domain or group->domain, never something else. + * + * Note that this is called in error unwind paths, attaching to a + * domain that has already been attached cannot fail. + */ + ret = __iommu_group_for_each_dev(group, new_domain, iommu_group_do_attach_device); - if (ret != 0) - WARN_ON(1); - else - group->domain = group->default_domain; + if (ret) + return ret; + group->domain = new_domain; + return 0; } void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) { mutex_lock(&group->mutex); - __iommu_detach_group(domain, group); + __iommu_group_set_core_domain(group); mutex_unlock(&group->mutex); } EXPORT_SYMBOL_GPL(iommu_detach_group); @@ -3102,3 +3088,167 @@ out: return ret; } + +/** + * iommu_device_use_default_domain() - Device driver wants to handle device + * DMA through the kernel DMA API. + * @dev: The device. + * + * The device driver about to bind @dev wants to do DMA through the kernel + * DMA API. Return 0 if it is allowed, otherwise an error. + */ +int iommu_device_use_default_domain(struct device *dev) +{ + struct iommu_group *group = iommu_group_get(dev); + int ret = 0; + + if (!group) + return 0; + + mutex_lock(&group->mutex); + if (group->owner_cnt) { + if (group->domain != group->default_domain || + group->owner) { + ret = -EBUSY; + goto unlock_out; + } + } + + group->owner_cnt++; + +unlock_out: + mutex_unlock(&group->mutex); + iommu_group_put(group); + + return ret; +} + +/** + * iommu_device_unuse_default_domain() - Device driver stops handling device + * DMA through the kernel DMA API. + * @dev: The device. + * + * The device driver doesn't want to do DMA through kernel DMA API anymore. + * It must be called after iommu_device_use_default_domain(). + */ +void iommu_device_unuse_default_domain(struct device *dev) +{ + struct iommu_group *group = iommu_group_get(dev); + + if (!group) + return; + + mutex_lock(&group->mutex); + if (!WARN_ON(!group->owner_cnt)) + group->owner_cnt--; + + mutex_unlock(&group->mutex); + iommu_group_put(group); +} + +static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) +{ + struct group_device *dev = + list_first_entry(&group->devices, struct group_device, list); + + if (group->blocking_domain) + return 0; + + group->blocking_domain = + __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED); + if (!group->blocking_domain) { + /* + * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED + * create an empty domain instead. + */ + group->blocking_domain = __iommu_domain_alloc( + dev->dev->bus, IOMMU_DOMAIN_UNMANAGED); + if (!group->blocking_domain) + return -EINVAL; + } + return 0; +} + +/** + * iommu_group_claim_dma_owner() - Set DMA ownership of a group + * @group: The group. + * @owner: Caller specified pointer. Used for exclusive ownership. + * + * This is to support backward compatibility for vfio which manages + * the dma ownership in iommu_group level. New invocations on this + * interface should be prohibited. + */ +int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) +{ + int ret = 0; + + mutex_lock(&group->mutex); + if (group->owner_cnt) { + ret = -EPERM; + goto unlock_out; + } else { + if (group->domain && group->domain != group->default_domain) { + ret = -EBUSY; + goto unlock_out; + } + + ret = __iommu_group_alloc_blocking_domain(group); + if (ret) + goto unlock_out; + + ret = __iommu_group_set_domain(group, group->blocking_domain); + if (ret) + goto unlock_out; + group->owner = owner; + } + + group->owner_cnt++; +unlock_out: + mutex_unlock(&group->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner); + +/** + * iommu_group_release_dma_owner() - Release DMA ownership of a group + * @group: The group. + * + * Release the DMA ownership claimed by iommu_group_claim_dma_owner(). + */ +void iommu_group_release_dma_owner(struct iommu_group *group) +{ + int ret; + + mutex_lock(&group->mutex); + if (WARN_ON(!group->owner_cnt || !group->owner)) + goto unlock_out; + + group->owner_cnt = 0; + group->owner = NULL; + ret = __iommu_group_set_domain(group, group->default_domain); + WARN(ret, "iommu driver failed to attach the default domain"); + +unlock_out: + mutex_unlock(&group->mutex); +} +EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner); + +/** + * iommu_group_dma_owner_claimed() - Query group dma ownership status + * @group: The group. + * + * This provides status query on a given group. It is racy and only for + * non-binding status reporting. + */ +bool iommu_group_dma_owner_claimed(struct iommu_group *group) +{ + unsigned int user; + + mutex_lock(&group->mutex); + user = group->owner_cnt; + mutex_unlock(&group->mutex); + + return user; +} +EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed); diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 50f57624610f..f09aedfdd462 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -583,7 +583,7 @@ static void print_ctx_regs(void __iomem *base, int ctx) GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); } -static void insert_iommu_master(struct device *dev, +static int insert_iommu_master(struct device *dev, struct msm_iommu_dev **iommu, struct of_phandle_args *spec) { @@ -592,6 +592,10 @@ static void insert_iommu_master(struct device *dev, if (list_empty(&(*iommu)->ctx_list)) { master = kzalloc(sizeof(*master), GFP_ATOMIC); + if (!master) { + dev_err(dev, "Failed to allocate iommu_master\n"); + return -ENOMEM; + } master->of_node = dev->of_node; list_add(&master->list, &(*iommu)->ctx_list); dev_iommu_priv_set(dev, master); @@ -601,30 +605,34 @@ static void insert_iommu_master(struct device *dev, if (master->mids[sid] == spec->args[0]) { dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n", sid); - return; + return 0; } master->mids[master->num_mids++] = spec->args[0]; + return 0; } static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *spec) { - struct msm_iommu_dev *iommu; + struct msm_iommu_dev *iommu = NULL, *iter; unsigned long flags; int ret = 0; spin_lock_irqsave(&msm_iommu_lock, flags); - list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) - if (iommu->dev->of_node == spec->np) + list_for_each_entry(iter, &qcom_iommu_devices, dev_node) { + if (iter->dev->of_node == spec->np) { + iommu = iter; break; + } + } - if (!iommu || iommu->dev->of_node != spec->np) { + if (!iommu) { ret = -ENODEV; goto fail; } - insert_iommu_master(dev, &iommu, spec); + ret = insert_iommu_master(dev, &iommu, spec); fail: spin_unlock_irqrestore(&msm_iommu_lock, flags); diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 6fd75a60abd6..bb9dd92c9898 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -14,12 +14,14 @@ #include <linux/io.h> #include <linux/iommu.h> #include <linux/iopoll.h> +#include <linux/io-pgtable.h> #include <linux/list.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/pci.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> @@ -29,7 +31,7 @@ #include <asm/barrier.h> #include <soc/mediatek/smi.h> -#include "mtk_iommu.h" +#include <dt-bindings/memory/mtk-memory-port.h> #define REG_MMU_PT_BASE_ADDR 0x000 #define MMU_PT_ADDR_MASK GENMASK(31, 7) @@ -51,6 +53,8 @@ #define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19)) #define REG_MMU_DCM_DIS 0x050 +#define F_MMU_DCM BIT(8) + #define REG_MMU_WR_LEN_CTRL 0x054 #define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21)) @@ -103,10 +107,15 @@ #define REG_MMU1_INT_ID 0x154 #define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7) #define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3) +#define F_MMU_INT_ID_COMM_ID_EXT(a) (((a) >> 10) & 0x7) +#define F_MMU_INT_ID_SUB_COMM_ID_EXT(a) (((a) >> 7) & 0x7) #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) #define MTK_PROTECT_PA_ALIGN 256 +#define MTK_IOMMU_BANK_SZ 0x1000 + +#define PERICFG_IOMMU_1 0x714 #define HAS_4GB_MODE BIT(0) /* HW will use the EMI clock if there isn't the "bclk". */ @@ -114,25 +123,147 @@ #define HAS_VLD_PA_RNG BIT(2) #define RESET_AXI BIT(3) #define OUT_ORDER_WR_EN BIT(4) -#define HAS_SUB_COMM BIT(5) -#define WR_THROT_EN BIT(6) -#define HAS_LEGACY_IVRP_PADDR BIT(7) -#define IOVA_34_EN BIT(8) +#define HAS_SUB_COMM_2BITS BIT(5) +#define HAS_SUB_COMM_3BITS BIT(6) +#define WR_THROT_EN BIT(7) +#define HAS_LEGACY_IVRP_PADDR BIT(8) +#define IOVA_34_EN BIT(9) +#define SHARE_PGTABLE BIT(10) /* 2 HW share pgtable */ +#define DCM_DISABLE BIT(11) +#define STD_AXI_MODE BIT(12) /* For non MM iommu */ +/* 2 bits: iommu type */ +#define MTK_IOMMU_TYPE_MM (0x0 << 13) +#define MTK_IOMMU_TYPE_INFRA (0x1 << 13) +#define MTK_IOMMU_TYPE_MASK (0x3 << 13) +/* PM and clock always on. e.g. infra iommu */ +#define PM_CLK_AO BIT(15) +#define IFA_IOMMU_PCIE_SUPPORT BIT(16) + +#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \ + ((((pdata)->flags) & (mask)) == (_x)) + +#define MTK_IOMMU_HAS_FLAG(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, _x) +#define MTK_IOMMU_IS_TYPE(pdata, _x) MTK_IOMMU_HAS_FLAG_MASK(pdata, _x,\ + MTK_IOMMU_TYPE_MASK) + +#define MTK_INVALID_LARBID MTK_LARB_NR_MAX + +#define MTK_LARB_COM_MAX 8 +#define MTK_LARB_SUBCOM_MAX 8 + +#define MTK_IOMMU_GROUP_MAX 8 +#define MTK_IOMMU_BANK_MAX 5 + +enum mtk_iommu_plat { + M4U_MT2712, + M4U_MT6779, + M4U_MT8167, + M4U_MT8173, + M4U_MT8183, + M4U_MT8186, + M4U_MT8192, + M4U_MT8195, +}; + +struct mtk_iommu_iova_region { + dma_addr_t iova_base; + unsigned long long size; +}; + +struct mtk_iommu_suspend_reg { + u32 misc_ctrl; + u32 dcm_dis; + u32 ctrl_reg; + u32 vld_pa_rng; + u32 wr_len_ctrl; + + u32 int_control[MTK_IOMMU_BANK_MAX]; + u32 int_main_control[MTK_IOMMU_BANK_MAX]; + u32 ivrp_paddr[MTK_IOMMU_BANK_MAX]; +}; + +struct mtk_iommu_plat_data { + enum mtk_iommu_plat m4u_plat; + u32 flags; + u32 inv_sel_reg; + + char *pericfg_comp_str; + struct list_head *hw_list; + unsigned int iova_region_nr; + const struct mtk_iommu_iova_region *iova_region; + + u8 banks_num; + bool banks_enable[MTK_IOMMU_BANK_MAX]; + unsigned int banks_portmsk[MTK_IOMMU_BANK_MAX]; + unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX]; +}; + +struct mtk_iommu_bank_data { + void __iomem *base; + int irq; + u8 id; + struct device *parent_dev; + struct mtk_iommu_data *parent_data; + spinlock_t tlb_lock; /* lock for tlb range flush */ + struct mtk_iommu_domain *m4u_dom; /* Each bank has a domain */ +}; + +struct mtk_iommu_data { + struct device *dev; + struct clk *bclk; + phys_addr_t protect_base; /* protect memory base */ + struct mtk_iommu_suspend_reg reg; + struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX]; + bool enable_4GB; -#define MTK_IOMMU_HAS_FLAG(pdata, _x) \ - ((((pdata)->flags) & (_x)) == (_x)) + struct iommu_device iommu; + const struct mtk_iommu_plat_data *plat_data; + struct device *smicomm_dev; + + struct mtk_iommu_bank_data *bank; + + struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */ + struct regmap *pericfg; + + struct mutex mutex; /* Protect m4u_group/m4u_dom above */ + + /* + * In the sharing pgtable case, list data->list to the global list like m4ulist. + * In the non-sharing pgtable case, list data->list to the itself hw_list_head. + */ + struct list_head *hw_list; + struct list_head hw_list_head; + struct list_head list; + struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; +}; struct mtk_iommu_domain { struct io_pgtable_cfg cfg; struct io_pgtable_ops *iop; - struct mtk_iommu_data *data; + struct mtk_iommu_bank_data *bank; struct iommu_domain domain; + + struct mutex mutex; /* Protect "data" in this structure */ }; +static int mtk_iommu_bind(struct device *dev) +{ + struct mtk_iommu_data *data = dev_get_drvdata(dev); + + return component_bind_all(dev, &data->larb_imu); +} + +static void mtk_iommu_unbind(struct device *dev) +{ + struct mtk_iommu_data *data = dev_get_drvdata(dev); + + component_unbind_all(dev, &data->larb_imu); +} + static const struct iommu_ops mtk_iommu_ops; -static int mtk_iommu_hw_init(const struct mtk_iommu_data *data); +static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid); #define MTK_IOMMU_TLB_ADDR(iova) ({ \ dma_addr_t _addr = iova; \ @@ -165,42 +296,28 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data); static LIST_HEAD(m4ulist); /* List all the M4U HWs */ -#define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) - -struct mtk_iommu_iova_region { - dma_addr_t iova_base; - unsigned long long size; -}; +#define for_each_m4u(data, head) list_for_each_entry(data, head, list) static const struct mtk_iommu_iova_region single_domain[] = { {.iova_base = 0, .size = SZ_4G}, }; static const struct mtk_iommu_iova_region mt8192_multi_dom[] = { - { .iova_base = 0x0, .size = SZ_4G}, /* disp: 0 ~ 4G */ + { .iova_base = 0x0, .size = SZ_4G}, /* 0 ~ 4G */ #if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) - { .iova_base = SZ_4G, .size = SZ_4G}, /* vdec: 4G ~ 8G */ - { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* CAM/MDP: 8G ~ 12G */ + { .iova_base = SZ_4G, .size = SZ_4G}, /* 4G ~ 8G */ + { .iova_base = SZ_4G * 2, .size = SZ_4G}, /* 8G ~ 12G */ + { .iova_base = SZ_4G * 3, .size = SZ_4G}, /* 12G ~ 16G */ + { .iova_base = 0x240000000ULL, .size = 0x4000000}, /* CCU0 */ { .iova_base = 0x244000000ULL, .size = 0x4000000}, /* CCU1 */ #endif }; -/* - * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain - * for the performance. - * - * Here always return the mtk_iommu_data of the first probed M4U where the - * iommu domain information is recorded. - */ -static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void) +/* If 2 M4U share a domain(use the same hwlist), Put the corresponding info in first data.*/ +static struct mtk_iommu_data *mtk_iommu_get_frst_data(struct list_head *hwlist) { - struct mtk_iommu_data *data; - - for_each_m4u(data) - return data; - - return NULL; + return list_first_entry(hwlist, struct mtk_iommu_data, list); } static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) @@ -210,46 +327,72 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) { + /* Tlb flush all always is in bank0. */ + struct mtk_iommu_bank_data *bank = &data->bank[0]; + void __iomem *base = bank->base; unsigned long flags; - spin_lock_irqsave(&data->tlb_lock, flags); - writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, - data->base + data->plat_data->inv_sel_reg); - writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); + spin_lock_irqsave(&bank->tlb_lock, flags); + writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg); + writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE); wmb(); /* Make sure the tlb flush all done */ - spin_unlock_irqrestore(&data->tlb_lock, flags); + spin_unlock_irqrestore(&bank->tlb_lock, flags); } static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, - size_t granule, - struct mtk_iommu_data *data) + struct mtk_iommu_bank_data *bank) { + struct list_head *head = bank->parent_data->hw_list; + struct mtk_iommu_bank_data *curbank; + struct mtk_iommu_data *data; + bool check_pm_status; unsigned long flags; + void __iomem *base; int ret; u32 tmp; - for_each_m4u(data) { - if (pm_runtime_get_if_in_use(data->dev) <= 0) - continue; + for_each_m4u(data, head) { + /* + * To avoid resume the iommu device frequently when the iommu device + * is not active, it doesn't always call pm_runtime_get here, then tlb + * flush depends on the tlb flush all in the runtime resume. + * + * There are 2 special cases: + * + * Case1: The iommu dev doesn't have power domain but has bclk. This case + * should also avoid the tlb flush while the dev is not active to mute + * the tlb timeout log. like mt8173. + * + * Case2: The power/clock of infra iommu is always on, and it doesn't + * have the device link with the master devices. This case should avoid + * the PM status check. + */ + check_pm_status = !MTK_IOMMU_HAS_FLAG(data->plat_data, PM_CLK_AO); - spin_lock_irqsave(&data->tlb_lock, flags); + if (check_pm_status) { + if (pm_runtime_get_if_in_use(data->dev) <= 0) + continue; + } + + curbank = &data->bank[bank->id]; + base = curbank->base; + + spin_lock_irqsave(&curbank->tlb_lock, flags); writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, - data->base + data->plat_data->inv_sel_reg); + base + data->plat_data->inv_sel_reg); - writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), - data->base + REG_MMU_INVLD_START_A); + writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A); writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1), - data->base + REG_MMU_INVLD_END_A); - writel_relaxed(F_MMU_INV_RANGE, - data->base + REG_MMU_INVALIDATE); + base + REG_MMU_INVLD_END_A); + writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE); /* tlb sync */ - ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, + ret = readl_poll_timeout_atomic(base + REG_MMU_CPE_DONE, tmp, tmp != 0, 10, 1000); /* Clear the CPE status */ - writel_relaxed(0, data->base + REG_MMU_CPE_DONE); - spin_unlock_irqrestore(&data->tlb_lock, flags); + writel_relaxed(0, base + REG_MMU_CPE_DONE); + spin_unlock_irqrestore(&curbank->tlb_lock, flags); if (ret) { dev_warn(data->dev, @@ -257,70 +400,103 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, mtk_iommu_tlb_flush_all(data); } - pm_runtime_put(data->dev); + if (check_pm_status) + pm_runtime_put(data->dev); } } static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) { - struct mtk_iommu_data *data = dev_id; - struct mtk_iommu_domain *dom = data->m4u_dom; - unsigned int fault_larb, fault_port, sub_comm = 0; + struct mtk_iommu_bank_data *bank = dev_id; + struct mtk_iommu_data *data = bank->parent_data; + struct mtk_iommu_domain *dom = bank->m4u_dom; + unsigned int fault_larb = MTK_INVALID_LARBID, fault_port = 0, sub_comm = 0; u32 int_state, regval, va34_32, pa34_32; + const struct mtk_iommu_plat_data *plat_data = data->plat_data; + void __iomem *base = bank->base; u64 fault_iova, fault_pa; bool layer, write; /* Read error info from registers */ - int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); + int_state = readl_relaxed(base + REG_MMU_FAULT_ST1); if (int_state & F_REG_MMU0_FAULT_MASK) { - regval = readl_relaxed(data->base + REG_MMU0_INT_ID); - fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA); - fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA); + regval = readl_relaxed(base + REG_MMU0_INT_ID); + fault_iova = readl_relaxed(base + REG_MMU0_FAULT_VA); + fault_pa = readl_relaxed(base + REG_MMU0_INVLD_PA); } else { - regval = readl_relaxed(data->base + REG_MMU1_INT_ID); - fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA); - fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA); + regval = readl_relaxed(base + REG_MMU1_INT_ID); + fault_iova = readl_relaxed(base + REG_MMU1_FAULT_VA); + fault_pa = readl_relaxed(base + REG_MMU1_INVLD_PA); } layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; - if (MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN)) { + if (MTK_IOMMU_HAS_FLAG(plat_data, IOVA_34_EN)) { va34_32 = FIELD_GET(F_MMU_INVAL_VA_34_32_MASK, fault_iova); - pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova); fault_iova = fault_iova & F_MMU_INVAL_VA_31_12_MASK; fault_iova |= (u64)va34_32 << 32; - fault_pa |= (u64)pa34_32 << 32; } - - fault_port = F_MMU_INT_ID_PORT_ID(regval); - if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) { - fault_larb = F_MMU_INT_ID_COMM_ID(regval); - sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); - } else { - fault_larb = F_MMU_INT_ID_LARB_ID(regval); + pa34_32 = FIELD_GET(F_MMU_INVAL_PA_34_32_MASK, fault_iova); + fault_pa |= (u64)pa34_32 << 32; + + if (MTK_IOMMU_IS_TYPE(plat_data, MTK_IOMMU_TYPE_MM)) { + fault_port = F_MMU_INT_ID_PORT_ID(regval); + if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_2BITS)) { + fault_larb = F_MMU_INT_ID_COMM_ID(regval); + sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); + } else if (MTK_IOMMU_HAS_FLAG(plat_data, HAS_SUB_COMM_3BITS)) { + fault_larb = F_MMU_INT_ID_COMM_ID_EXT(regval); + sub_comm = F_MMU_INT_ID_SUB_COMM_ID_EXT(regval); + } else { + fault_larb = F_MMU_INT_ID_LARB_ID(regval); + } + fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; } - fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; - if (report_iommu_fault(&dom->domain, data->dev, fault_iova, + if (report_iommu_fault(&dom->domain, bank->parent_dev, fault_iova, write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { dev_err_ratelimited( - data->dev, - "fault type=0x%x iova=0x%llx pa=0x%llx larb=%d port=%d layer=%d %s\n", - int_state, fault_iova, fault_pa, fault_larb, fault_port, + bank->parent_dev, + "fault type=0x%x iova=0x%llx pa=0x%llx master=0x%x(larb=%d port=%d) layer=%d %s\n", + int_state, fault_iova, fault_pa, regval, fault_larb, fault_port, layer, write ? "write" : "read"); } /* Interrupt clear */ - regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0); + regval = readl_relaxed(base + REG_MMU_INT_CONTROL0); regval |= F_INT_CLR_BIT; - writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); + writel_relaxed(regval, base + REG_MMU_INT_CONTROL0); mtk_iommu_tlb_flush_all(data); return IRQ_HANDLED; } -static int mtk_iommu_get_domain_id(struct device *dev, - const struct mtk_iommu_plat_data *plat_data) +static unsigned int mtk_iommu_get_bank_id(struct device *dev, + const struct mtk_iommu_plat_data *plat_data) +{ + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + unsigned int i, portmsk = 0, bankid = 0; + + if (plat_data->banks_num == 1) + return bankid; + + for (i = 0; i < fwspec->num_ids; i++) + portmsk |= BIT(MTK_M4U_TO_PORT(fwspec->ids[i])); + + for (i = 0; i < plat_data->banks_num && i < MTK_IOMMU_BANK_MAX; i++) { + if (!plat_data->banks_enable[i]) + continue; + + if (portmsk & plat_data->banks_portmsk[i]) { + bankid = i; + break; + } + } + return bankid; /* default is 0 */ +} + +static int mtk_iommu_get_iova_region_id(struct device *dev, + const struct mtk_iommu_plat_data *plat_data) { const struct mtk_iommu_iova_region *rgn = plat_data->iova_region; const struct bus_dma_region *dma_rgn = dev->dma_range_map; @@ -349,46 +525,65 @@ static int mtk_iommu_get_domain_id(struct device *dev, return -EINVAL; } -static void mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev, - bool enable, unsigned int domid) +static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev, + bool enable, unsigned int regionid) { struct mtk_smi_larb_iommu *larb_mmu; unsigned int larbid, portid; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); const struct mtk_iommu_iova_region *region; - int i; + u32 peri_mmuen, peri_mmuen_msk; + int i, ret = 0; for (i = 0; i < fwspec->num_ids; ++i) { larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); portid = MTK_M4U_TO_PORT(fwspec->ids[i]); - larb_mmu = &data->larb_imu[larbid]; - - region = data->plat_data->iova_region + domid; - larb_mmu->bank[portid] = upper_32_bits(region->iova_base); - - dev_dbg(dev, "%s iommu for larb(%s) port %d dom %d bank %d.\n", - enable ? "enable" : "disable", dev_name(larb_mmu->dev), - portid, domid, larb_mmu->bank[portid]); - - if (enable) - larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); - else - larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { + larb_mmu = &data->larb_imu[larbid]; + + region = data->plat_data->iova_region + regionid; + larb_mmu->bank[portid] = upper_32_bits(region->iova_base); + + dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n", + enable ? "enable" : "disable", dev_name(larb_mmu->dev), + portid, regionid, larb_mmu->bank[portid]); + + if (enable) + larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); + else + larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); + } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { + peri_mmuen_msk = BIT(portid); + /* PCI dev has only one output id, enable the next writing bit for PCIe */ + if (dev_is_pci(dev)) + peri_mmuen_msk |= BIT(portid + 1); + + peri_mmuen = enable ? peri_mmuen_msk : 0; + ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1, + peri_mmuen_msk, peri_mmuen); + if (ret) + dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n", + enable ? "enable" : "disable", + dev_name(data->dev), peri_mmuen_msk, ret); + } } + return ret; } static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, struct mtk_iommu_data *data, - unsigned int domid) + unsigned int region_id) { const struct mtk_iommu_iova_region *region; - - /* Use the exist domain as there is only one pgtable here. */ - if (data->m4u_dom) { - dom->iop = data->m4u_dom->iop; - dom->cfg = data->m4u_dom->cfg; - dom->domain.pgsize_bitmap = data->m4u_dom->cfg.pgsize_bitmap; + struct mtk_iommu_domain *m4u_dom; + + /* Always use bank0 in sharing pgtable case */ + m4u_dom = data->bank[0].m4u_dom; + if (m4u_dom) { + dom->iop = m4u_dom->iop; + dom->cfg = m4u_dom->cfg; + dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap; goto update_iova_region; } @@ -417,7 +612,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, update_iova_region: /* Update the iova region for this domain */ - region = data->plat_data->iova_region + domid; + region = data->plat_data->iova_region + region_id; dom->domain.geometry.aperture_start = region->iova_base; dom->domain.geometry.aperture_end = region->iova_base + region->size - 1; dom->domain.geometry.force_aperture = true; @@ -428,12 +623,13 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) { struct mtk_iommu_domain *dom; - if (type != IOMMU_DOMAIN_DMA) + if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED) return NULL; dom = kzalloc(sizeof(*dom), GFP_KERNEL); if (!dom) return NULL; + mutex_init(&dom->mutex); return &dom->domain; } @@ -446,40 +642,60 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain) static int mtk_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { - struct mtk_iommu_data *data = dev_iommu_priv_get(dev); + struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata; struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct list_head *hw_list = data->hw_list; struct device *m4udev = data->dev; - int ret, domid; + struct mtk_iommu_bank_data *bank; + unsigned int bankid; + int ret, region_id; - domid = mtk_iommu_get_domain_id(dev, data->plat_data); - if (domid < 0) - return domid; + region_id = mtk_iommu_get_iova_region_id(dev, data->plat_data); + if (region_id < 0) + return region_id; - if (!dom->data) { - if (mtk_iommu_domain_finalise(dom, data, domid)) + bankid = mtk_iommu_get_bank_id(dev, data->plat_data); + mutex_lock(&dom->mutex); + if (!dom->bank) { + /* Data is in the frstdata in sharing pgtable case. */ + frstdata = mtk_iommu_get_frst_data(hw_list); + + ret = mtk_iommu_domain_finalise(dom, frstdata, region_id); + if (ret) { + mutex_unlock(&dom->mutex); return -ENODEV; - dom->data = data; + } + dom->bank = &data->bank[bankid]; } + mutex_unlock(&dom->mutex); - if (!data->m4u_dom) { /* Initialize the M4U HW */ + mutex_lock(&data->mutex); + bank = &data->bank[bankid]; + if (!bank->m4u_dom) { /* Initialize the M4U HW for each a BANK */ ret = pm_runtime_resume_and_get(m4udev); - if (ret < 0) - return ret; + if (ret < 0) { + dev_err(m4udev, "pm get fail(%d) in attach.\n", ret); + goto err_unlock; + } - ret = mtk_iommu_hw_init(data); + ret = mtk_iommu_hw_init(data, bankid); if (ret) { pm_runtime_put(m4udev); - return ret; + goto err_unlock; } - data->m4u_dom = dom; + bank->m4u_dom = dom; writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, - data->base + REG_MMU_PT_BASE_ADDR); + bank->base + REG_MMU_PT_BASE_ADDR); pm_runtime_put(m4udev); } + mutex_unlock(&data->mutex); - mtk_iommu_config(data, dev, true, domid); - return 0; + return mtk_iommu_config(data, dev, true, region_id); + +err_unlock: + mutex_unlock(&data->mutex); + return ret; } static void mtk_iommu_detach_device(struct iommu_domain *domain, @@ -496,7 +712,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, struct mtk_iommu_domain *dom = to_mtk_domain(domain); /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ - if (dom->data->enable_4GB) + if (dom->bank->parent_data->enable_4GB) paddr |= BIT_ULL(32); /* Synchronize with the tlb_lock */ @@ -517,7 +733,7 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) { struct mtk_iommu_domain *dom = to_mtk_domain(domain); - mtk_iommu_tlb_flush_all(dom->data); + mtk_iommu_tlb_flush_all(dom->bank->parent_data); } static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, @@ -526,8 +742,7 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, struct mtk_iommu_domain *dom = to_mtk_domain(domain); size_t length = gather->end - gather->start + 1; - mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize, - dom->data); + mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank); } static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, @@ -535,7 +750,7 @@ static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, { struct mtk_iommu_domain *dom = to_mtk_domain(domain); - mtk_iommu_tlb_flush_range_sync(iova, size, size, dom->data); + mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank); } static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, @@ -546,7 +761,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, pa = dom->iop->iova_to_phys(dom->iop, iova); if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) && - dom->data->enable_4GB && + dom->bank->parent_data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) pa &= ~BIT_ULL(32); @@ -566,12 +781,18 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev) data = dev_iommu_priv_get(dev); + if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) + return &data->iommu; + /* * Link the consumer device with the smi-larb device(supplier). * The device that connects with each a larb is a independent HW. * All the ports in each a device should be in the same larbs. */ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); + if (larbid >= MTK_LARB_NR_MAX) + return ERR_PTR(-EINVAL); + for (i = 1; i < fwspec->num_ids; i++) { larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]); if (larbid != larbidx) { @@ -581,6 +802,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev) } } larbdev = data->larb_imu[larbid].dev; + if (!larbdev) + return ERR_PTR(-EINVAL); + link = device_link_add(dev, larbdev, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); if (!link) @@ -599,34 +823,55 @@ static void mtk_iommu_release_device(struct device *dev) return; data = dev_iommu_priv_get(dev); - larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); - larbdev = data->larb_imu[larbid].dev; - device_link_remove(dev, larbdev); + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { + larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); + larbdev = data->larb_imu[larbid].dev; + device_link_remove(dev, larbdev); + } iommu_fwspec_free(dev); } +static int mtk_iommu_get_group_id(struct device *dev, const struct mtk_iommu_plat_data *plat_data) +{ + unsigned int bankid; + + /* + * If the bank function is enabled, each bank is a iommu group/domain. + * Otherwise, each iova region is a iommu group/domain. + */ + bankid = mtk_iommu_get_bank_id(dev, plat_data); + if (bankid) + return bankid; + + return mtk_iommu_get_iova_region_id(dev, plat_data); +} + static struct iommu_group *mtk_iommu_device_group(struct device *dev) { - struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); + struct mtk_iommu_data *c_data = dev_iommu_priv_get(dev), *data; + struct list_head *hw_list = c_data->hw_list; struct iommu_group *group; - int domid; + int groupid; + data = mtk_iommu_get_frst_data(hw_list); if (!data) return ERR_PTR(-ENODEV); - domid = mtk_iommu_get_domain_id(dev, data->plat_data); - if (domid < 0) - return ERR_PTR(domid); + groupid = mtk_iommu_get_group_id(dev, data->plat_data); + if (groupid < 0) + return ERR_PTR(groupid); - group = data->m4u_group[domid]; + mutex_lock(&data->mutex); + group = data->m4u_group[groupid]; if (!group) { group = iommu_group_alloc(); if (!IS_ERR(group)) - data->m4u_group[domid] = group; + data->m4u_group[groupid] = group; } else { iommu_group_ref_get(group); } + mutex_unlock(&data->mutex); return group; } @@ -656,14 +901,14 @@ static void mtk_iommu_get_resv_regions(struct device *dev, struct list_head *head) { struct mtk_iommu_data *data = dev_iommu_priv_get(dev); - unsigned int domid = mtk_iommu_get_domain_id(dev, data->plat_data), i; + unsigned int regionid = mtk_iommu_get_iova_region_id(dev, data->plat_data), i; const struct mtk_iommu_iova_region *resv, *curdom; struct iommu_resv_region *region; int prot = IOMMU_WRITE | IOMMU_READ; - if ((int)domid < 0) + if ((int)regionid < 0) return; - curdom = data->plat_data->iova_region + domid; + curdom = data->plat_data->iova_region + regionid; for (i = 0; i < data->plat_data->iova_region_nr; i++) { resv = data->plat_data->iova_region + i; @@ -704,42 +949,24 @@ static const struct iommu_ops mtk_iommu_ops = { } }; -static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) +static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int bankid) { + const struct mtk_iommu_bank_data *bankx = &data->bank[bankid]; + const struct mtk_iommu_bank_data *bank0 = &data->bank[0]; u32 regval; + /* + * Global control settings are in bank0. May re-init these global registers + * since no sure if there is bank0 consumers. + */ if (data->plat_data->m4u_plat == M4U_MT8173) { regval = F_MMU_PREFETCH_RT_REPLACE_MOD | F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; } else { - regval = readl_relaxed(data->base + REG_MMU_CTRL_REG); + regval = readl_relaxed(bank0->base + REG_MMU_CTRL_REG); regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR; } - writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); - - regval = F_L2_MULIT_HIT_EN | - F_TABLE_WALK_FAULT_INT_EN | - F_PREETCH_FIFO_OVERFLOW_INT_EN | - F_MISS_FIFO_OVERFLOW_INT_EN | - F_PREFETCH_FIFO_ERR_INT_EN | - F_MISS_FIFO_ERR_INT_EN; - writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); - - regval = F_INT_TRANSLATION_FAULT | - F_INT_MAIN_MULTI_HIT_FAULT | - F_INT_INVALID_PA_FAULT | - F_INT_ENTRY_REPLACEMENT_FAULT | - F_INT_TLB_MISS_FAULT | - F_INT_MISS_TRANSACTION_FIFO_FAULT | - F_INT_PRETETCH_TRANSATION_FIFO_FAULT; - writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); - - if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR)) - regval = (data->protect_base >> 1) | (data->enable_4GB << 31); - else - regval = lower_32_bits(data->protect_base) | - upper_32_bits(data->protect_base); - writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); + writel_relaxed(regval, bank0->base + REG_MMU_CTRL_REG); if (data->enable_4GB && MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) { @@ -748,31 +975,61 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. */ regval = F_MMU_VLD_PA_RNG(7, 4); - writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); + writel_relaxed(regval, bank0->base + REG_MMU_VLD_PA_RNG); } - writel_relaxed(0, data->base + REG_MMU_DCM_DIS); + if (MTK_IOMMU_HAS_FLAG(data->plat_data, DCM_DISABLE)) + writel_relaxed(F_MMU_DCM, bank0->base + REG_MMU_DCM_DIS); + else + writel_relaxed(0, bank0->base + REG_MMU_DCM_DIS); + if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) { /* write command throttling mode */ - regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL); + regval = readl_relaxed(bank0->base + REG_MMU_WR_LEN_CTRL); regval &= ~F_MMU_WR_THROT_DIS_MASK; - writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL); + writel_relaxed(regval, bank0->base + REG_MMU_WR_LEN_CTRL); } if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) { /* The register is called STANDARD_AXI_MODE in this case */ regval = 0; } else { - regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL); - regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; + regval = readl_relaxed(bank0->base + REG_MMU_MISC_CTRL); + if (!MTK_IOMMU_HAS_FLAG(data->plat_data, STD_AXI_MODE)) + regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN)) regval &= ~F_MMU_IN_ORDER_WR_EN_MASK; } - writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL); + writel_relaxed(regval, bank0->base + REG_MMU_MISC_CTRL); - if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, - dev_name(data->dev), (void *)data)) { - writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); - dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); + /* Independent settings for each bank */ + regval = F_L2_MULIT_HIT_EN | + F_TABLE_WALK_FAULT_INT_EN | + F_PREETCH_FIFO_OVERFLOW_INT_EN | + F_MISS_FIFO_OVERFLOW_INT_EN | + F_PREFETCH_FIFO_ERR_INT_EN | + F_MISS_FIFO_ERR_INT_EN; + writel_relaxed(regval, bankx->base + REG_MMU_INT_CONTROL0); + + regval = F_INT_TRANSLATION_FAULT | + F_INT_MAIN_MULTI_HIT_FAULT | + F_INT_INVALID_PA_FAULT | + F_INT_ENTRY_REPLACEMENT_FAULT | + F_INT_TLB_MISS_FAULT | + F_INT_MISS_TRANSACTION_FIFO_FAULT | + F_INT_PRETETCH_TRANSATION_FIFO_FAULT; + writel_relaxed(regval, bankx->base + REG_MMU_INT_MAIN_CONTROL); + + if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_LEGACY_IVRP_PADDR)) + regval = (data->protect_base >> 1) | (data->enable_4GB << 31); + else + regval = lower_32_bits(data->protect_base) | + upper_32_bits(data->protect_base); + writel_relaxed(regval, bankx->base + REG_MMU_IVRP_PADDR); + + if (devm_request_irq(bankx->parent_dev, bankx->irq, mtk_iommu_isr, 0, + dev_name(bankx->parent_dev), (void *)bankx)) { + writel_relaxed(0, bankx->base + REG_MMU_PT_BASE_ADDR); + dev_err(bankx->parent_dev, "Failed @ IRQ-%d Request\n", bankx->irq); return -ENODEV; } @@ -784,21 +1041,91 @@ static const struct component_master_ops mtk_iommu_com_ops = { .unbind = mtk_iommu_unbind, }; +static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **match, + struct mtk_iommu_data *data) +{ + struct device_node *larbnode, *smicomm_node, *smi_subcomm_node; + struct platform_device *plarbdev; + struct device_link *link; + int i, larb_nr, ret; + + larb_nr = of_count_phandle_with_args(dev->of_node, "mediatek,larbs", NULL); + if (larb_nr < 0) + return larb_nr; + + for (i = 0; i < larb_nr; i++) { + u32 id; + + larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); + if (!larbnode) + return -EINVAL; + + if (!of_device_is_available(larbnode)) { + of_node_put(larbnode); + continue; + } + + ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); + if (ret)/* The id is consecutive if there is no this property */ + id = i; + + plarbdev = of_find_device_by_node(larbnode); + if (!plarbdev) { + of_node_put(larbnode); + return -ENODEV; + } + if (!plarbdev->dev.driver) { + of_node_put(larbnode); + return -EPROBE_DEFER; + } + data->larb_imu[id].dev = &plarbdev->dev; + + component_match_add_release(dev, match, component_release_of, + component_compare_of, larbnode); + } + + /* Get smi-(sub)-common dev from the last larb. */ + smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0); + if (!smi_subcomm_node) + return -EINVAL; + + /* + * It may have two level smi-common. the node is smi-sub-common if it + * has a new mediatek,smi property. otherwise it is smi-commmon. + */ + smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0); + if (smicomm_node) + of_node_put(smi_subcomm_node); + else + smicomm_node = smi_subcomm_node; + + plarbdev = of_find_device_by_node(smicomm_node); + of_node_put(smicomm_node); + data->smicomm_dev = &plarbdev->dev; + + link = device_link_add(data->smicomm_dev, dev, + DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); + if (!link) { + dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev)); + return -EINVAL; + } + return 0; +} + static int mtk_iommu_probe(struct platform_device *pdev) { struct mtk_iommu_data *data; struct device *dev = &pdev->dev; - struct device_node *larbnode, *smicomm_node; - struct platform_device *plarbdev; - struct device_link *link; struct resource *res; resource_size_t ioaddr; struct component_match *match = NULL; struct regmap *infracfg; void *protect; - int i, larb_nr, ret; + int ret, banks_num, i = 0; u32 val; char *p; + struct mtk_iommu_bank_data *bank; + void __iomem *base; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -835,15 +1162,36 @@ static int mtk_iommu_probe(struct platform_device *pdev) data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN); } + banks_num = data->plat_data->banks_num; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - data->base = devm_ioremap_resource(dev, res); - if (IS_ERR(data->base)) - return PTR_ERR(data->base); + if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) { + dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res); + return -EINVAL; + } + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); ioaddr = res->start; - data->irq = platform_get_irq(pdev, 0); - if (data->irq < 0) - return data->irq; + data->bank = devm_kmalloc(dev, banks_num * sizeof(*data->bank), GFP_KERNEL); + if (!data->bank) + return -ENOMEM; + + do { + if (!data->plat_data->banks_enable[i]) + continue; + bank = &data->bank[i]; + bank->id = i; + bank->base = base + i * MTK_IOMMU_BANK_SZ; + bank->m4u_dom = NULL; + + bank->irq = platform_get_irq(pdev, i); + if (bank->irq < 0) + return bank->irq; + bank->parent_dev = dev; + bank->parent_data = data; + spin_lock_init(&bank->tlb_lock); + } while (++i < banks_num); if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) { data->bclk = devm_clk_get(dev, "bclk"); @@ -851,62 +1199,27 @@ static int mtk_iommu_probe(struct platform_device *pdev) return PTR_ERR(data->bclk); } - larb_nr = of_count_phandle_with_args(dev->of_node, - "mediatek,larbs", NULL); - if (larb_nr < 0) - return larb_nr; - - for (i = 0; i < larb_nr; i++) { - u32 id; - - larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); - if (!larbnode) - return -EINVAL; - - if (!of_device_is_available(larbnode)) { - of_node_put(larbnode); - continue; - } - - ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); - if (ret)/* The id is consecutive if there is no this property */ - id = i; + pm_runtime_enable(dev); - plarbdev = of_find_device_by_node(larbnode); - if (!plarbdev) { - of_node_put(larbnode); - return -ENODEV; + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { + ret = mtk_iommu_mm_dts_parse(dev, &match, data); + if (ret) { + dev_err(dev, "mm dts parse fail(%d).", ret); + goto out_runtime_disable; } - if (!plarbdev->dev.driver) { - of_node_put(larbnode); - return -EPROBE_DEFER; + } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) && + data->plat_data->pericfg_comp_str) { + infracfg = syscon_regmap_lookup_by_compatible(data->plat_data->pericfg_comp_str); + if (IS_ERR(infracfg)) { + ret = PTR_ERR(infracfg); + goto out_runtime_disable; } - data->larb_imu[id].dev = &plarbdev->dev; - - component_match_add_release(dev, &match, component_release_of, - component_compare_of, larbnode); - } - - /* Get smi-common dev from the last larb. */ - smicomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0); - if (!smicomm_node) - return -EINVAL; - - plarbdev = of_find_device_by_node(smicomm_node); - of_node_put(smicomm_node); - data->smicomm_dev = &plarbdev->dev; - - pm_runtime_enable(dev); - link = device_link_add(data->smicomm_dev, dev, - DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME); - if (!link) { - dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev)); - ret = -EINVAL; - goto out_runtime_disable; + data->pericfg = infracfg; } platform_set_drvdata(pdev, data); + mutex_init(&data->mutex); ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, "mtk-iommu.%pa", &ioaddr); @@ -917,8 +1230,14 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) goto out_sysfs_remove; - spin_lock_init(&data->tlb_lock); - list_add_tail(&data->list, &m4ulist); + if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) { + list_add_tail(&data->list, data->plat_data->hw_list); + data->hw_list = data->plat_data->hw_list; + } else { + INIT_LIST_HEAD(&data->hw_list_head); + list_add_tail(&data->list, &data->hw_list_head); + data->hw_list = &data->hw_list_head; + } if (!iommu_present(&platform_bus_type)) { ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); @@ -926,9 +1245,20 @@ static int mtk_iommu_probe(struct platform_device *pdev) goto out_list_del; } - ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); - if (ret) - goto out_bus_set_null; + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { + ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); + if (ret) + goto out_bus_set_null; + } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) && + MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) { +#ifdef CONFIG_PCI + if (!iommu_present(&pci_bus_type)) { + ret = bus_set_iommu(&pci_bus_type, &mtk_iommu_ops); + if (ret) /* PCIe fail don't affect platform_bus. */ + goto out_list_del; + } +#endif + } return ret; out_bus_set_null: @@ -939,7 +1269,8 @@ out_list_del: out_sysfs_remove: iommu_device_sysfs_remove(&data->iommu); out_link_remove: - device_link_remove(data->smicomm_dev, dev); + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) + device_link_remove(data->smicomm_dev, dev); out_runtime_disable: pm_runtime_disable(dev); return ret; @@ -948,18 +1279,30 @@ out_runtime_disable: static int mtk_iommu_remove(struct platform_device *pdev) { struct mtk_iommu_data *data = platform_get_drvdata(pdev); + struct mtk_iommu_bank_data *bank; + int i; iommu_device_sysfs_remove(&data->iommu); iommu_device_unregister(&data->iommu); - if (iommu_present(&platform_bus_type)) - bus_set_iommu(&platform_bus_type, NULL); + list_del(&data->list); - clk_disable_unprepare(data->bclk); - device_link_remove(data->smicomm_dev, &pdev->dev); + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { + device_link_remove(data->smicomm_dev, &pdev->dev); + component_master_del(&pdev->dev, &mtk_iommu_com_ops); + } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) && + MTK_IOMMU_HAS_FLAG(data->plat_data, IFA_IOMMU_PCIE_SUPPORT)) { +#ifdef CONFIG_PCI + bus_set_iommu(&pci_bus_type, NULL); +#endif + } pm_runtime_disable(&pdev->dev); - devm_free_irq(&pdev->dev, data->irq, data); - component_master_del(&pdev->dev, &mtk_iommu_com_ops); + for (i = 0; i < data->plat_data->banks_num; i++) { + bank = &data->bank[i]; + if (!bank->m4u_dom) + continue; + devm_free_irq(&pdev->dev, bank->irq, bank); + } return 0; } @@ -967,16 +1310,23 @@ static int __maybe_unused mtk_iommu_runtime_suspend(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); struct mtk_iommu_suspend_reg *reg = &data->reg; - void __iomem *base = data->base; + void __iomem *base; + int i = 0; + base = data->bank[i].base; reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL); reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL); reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); - reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); - reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); - reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); + do { + if (!data->plat_data->banks_enable[i]) + continue; + base = data->bank[i].base; + reg->int_control[i] = readl_relaxed(base + REG_MMU_INT_CONTROL0); + reg->int_main_control[i] = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); + reg->ivrp_paddr[i] = readl_relaxed(base + REG_MMU_IVRP_PADDR); + } while (++i < data->plat_data->banks_num); clk_disable_unprepare(data->bclk); return 0; } @@ -985,9 +1335,9 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev) { struct mtk_iommu_data *data = dev_get_drvdata(dev); struct mtk_iommu_suspend_reg *reg = &data->reg; - struct mtk_iommu_domain *m4u_dom = data->m4u_dom; - void __iomem *base = data->base; - int ret; + struct mtk_iommu_domain *m4u_dom; + void __iomem *base; + int ret, i = 0; ret = clk_prepare_enable(data->bclk); if (ret) { @@ -999,18 +1349,26 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev) * Uppon first resume, only enable the clk and return, since the values of the * registers are not yet set. */ - if (!m4u_dom) + if (!reg->wr_len_ctrl) return 0; + base = data->bank[i].base; writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL); writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL); writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); - writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); - writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); - writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); - writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR); + do { + m4u_dom = data->bank[i].m4u_dom; + if (!data->plat_data->banks_enable[i] || !m4u_dom) + continue; + base = data->bank[i].base; + writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0); + writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL); + writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR); + writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, + base + REG_MMU_PT_BASE_ADDR); + } while (++i < data->plat_data->banks_num); /* * Users may allocate dma buffer before they call pm_runtime_get, @@ -1029,17 +1387,24 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = { static const struct mtk_iommu_plat_data mt2712_data = { .m4u_plat = M4U_MT2712, - .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG, + .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG | SHARE_PGTABLE | + MTK_IOMMU_TYPE_MM, + .hw_list = &m4ulist, .inv_sel_reg = REG_MMU_INV_SEL_GEN1, .iova_region = single_domain, + .banks_num = 1, + .banks_enable = {true}, .iova_region_nr = ARRAY_SIZE(single_domain), .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}, }; static const struct mtk_iommu_plat_data mt6779_data = { .m4u_plat = M4U_MT6779, - .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN, + .flags = HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | WR_THROT_EN | + MTK_IOMMU_TYPE_MM, .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 1, + .banks_enable = {true}, .iova_region = single_domain, .iova_region_nr = ARRAY_SIZE(single_domain), .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}}, @@ -1047,8 +1412,10 @@ static const struct mtk_iommu_plat_data mt6779_data = { static const struct mtk_iommu_plat_data mt8167_data = { .m4u_plat = M4U_MT8167, - .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR, + .flags = RESET_AXI | HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM, .inv_sel_reg = REG_MMU_INV_SEL_GEN1, + .banks_num = 1, + .banks_enable = {true}, .iova_region = single_domain, .iova_region_nr = ARRAY_SIZE(single_domain), .larbid_remap = {{0}, {1}, {2}}, /* Linear mapping. */ @@ -1057,8 +1424,10 @@ static const struct mtk_iommu_plat_data mt8167_data = { static const struct mtk_iommu_plat_data mt8173_data = { .m4u_plat = M4U_MT8173, .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI | - HAS_LEGACY_IVRP_PADDR, + HAS_LEGACY_IVRP_PADDR | MTK_IOMMU_TYPE_MM, .inv_sel_reg = REG_MMU_INV_SEL_GEN1, + .banks_num = 1, + .banks_enable = {true}, .iova_region = single_domain, .iova_region_nr = ARRAY_SIZE(single_domain), .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ @@ -1066,31 +1435,100 @@ static const struct mtk_iommu_plat_data mt8173_data = { static const struct mtk_iommu_plat_data mt8183_data = { .m4u_plat = M4U_MT8183, - .flags = RESET_AXI, + .flags = RESET_AXI | MTK_IOMMU_TYPE_MM, .inv_sel_reg = REG_MMU_INV_SEL_GEN1, + .banks_num = 1, + .banks_enable = {true}, .iova_region = single_domain, .iova_region_nr = ARRAY_SIZE(single_domain), .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}}, }; +static const struct mtk_iommu_plat_data mt8186_data_mm = { + .m4u_plat = M4U_MT8186, + .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | + WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM, + .larbid_remap = {{0}, {1, MTK_INVALID_LARBID, 8}, {4}, {7}, {2}, {9, 11, 19, 20}, + {MTK_INVALID_LARBID, 14, 16}, + {MTK_INVALID_LARBID, 13, MTK_INVALID_LARBID, 17}}, + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 1, + .banks_enable = {true}, + .iova_region = mt8192_multi_dom, + .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), +}; + static const struct mtk_iommu_plat_data mt8192_data = { .m4u_plat = M4U_MT8192, - .flags = HAS_BCLK | HAS_SUB_COMM | OUT_ORDER_WR_EN | - WR_THROT_EN | IOVA_34_EN, + .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | + WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM, .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 1, + .banks_enable = {true}, .iova_region = mt8192_multi_dom, .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), .larbid_remap = {{0}, {1}, {4, 5}, {7}, {2}, {9, 11, 19, 20}, {0, 14, 16}, {0, 13, 18, 17}}, }; +static const struct mtk_iommu_plat_data mt8195_data_infra = { + .m4u_plat = M4U_MT8195, + .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO | + MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT, + .pericfg_comp_str = "mediatek,mt8195-pericfg_ao", + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 5, + .banks_enable = {true, false, false, false, true}, + .banks_portmsk = {[0] = GENMASK(19, 16), /* PCIe */ + [4] = GENMASK(31, 20), /* USB */ + }, + .iova_region = single_domain, + .iova_region_nr = ARRAY_SIZE(single_domain), +}; + +static const struct mtk_iommu_plat_data mt8195_data_vdo = { + .m4u_plat = M4U_MT8195, + .flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN | + WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM, + .hw_list = &m4ulist, + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 1, + .banks_enable = {true}, + .iova_region = mt8192_multi_dom, + .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), + .larbid_remap = {{2, 0}, {21}, {24}, {7}, {19}, {9, 10, 11}, + {13, 17, 15/* 17b */, 25}, {5}}, +}; + +static const struct mtk_iommu_plat_data mt8195_data_vpp = { + .m4u_plat = M4U_MT8195, + .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN | + WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE | MTK_IOMMU_TYPE_MM, + .hw_list = &m4ulist, + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .banks_num = 1, + .banks_enable = {true}, + .iova_region = mt8192_multi_dom, + .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom), + .larbid_remap = {{1}, {3}, + {22, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 23}, + {8}, {20}, {12}, + /* 16: 16a; 29: 16b; 30: CCUtop0; 31: CCUtop1 */ + {14, 16, 29, 26, 30, 31, 18}, + {4, MTK_INVALID_LARBID, MTK_INVALID_LARBID, MTK_INVALID_LARBID, 6}}, +}; + static const struct of_device_id mtk_iommu_of_ids[] = { { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data}, { .compatible = "mediatek,mt8167-m4u", .data = &mt8167_data}, { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, + { .compatible = "mediatek,mt8186-iommu-mm", .data = &mt8186_data_mm}, /* mm: m4u */ { .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data}, + { .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra}, + { .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo}, + { .compatible = "mediatek,mt8195-iommu-vpp", .data = &mt8195_data_vpp}, {} }; diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h deleted file mode 100644 index b742432220c5..000000000000 --- a/drivers/iommu/mtk_iommu.h +++ /dev/null @@ -1,101 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2015-2016 MediaTek Inc. - * Author: Honghui Zhang <honghui.zhang@mediatek.com> - */ - -#ifndef _MTK_IOMMU_H_ -#define _MTK_IOMMU_H_ - -#include <linux/clk.h> -#include <linux/component.h> -#include <linux/device.h> -#include <linux/io.h> -#include <linux/io-pgtable.h> -#include <linux/iommu.h> -#include <linux/list.h> -#include <linux/spinlock.h> -#include <linux/dma-mapping.h> -#include <soc/mediatek/smi.h> -#include <dt-bindings/memory/mtk-memory-port.h> - -#define MTK_LARB_COM_MAX 8 -#define MTK_LARB_SUBCOM_MAX 4 - -#define MTK_IOMMU_GROUP_MAX 8 - -struct mtk_iommu_suspend_reg { - union { - u32 standard_axi_mode;/* v1 */ - u32 misc_ctrl;/* v2 */ - }; - u32 dcm_dis; - u32 ctrl_reg; - u32 int_control0; - u32 int_main_control; - u32 ivrp_paddr; - u32 vld_pa_rng; - u32 wr_len_ctrl; -}; - -enum mtk_iommu_plat { - M4U_MT2701, - M4U_MT2712, - M4U_MT6779, - M4U_MT8167, - M4U_MT8173, - M4U_MT8183, - M4U_MT8192, -}; - -struct mtk_iommu_iova_region; - -struct mtk_iommu_plat_data { - enum mtk_iommu_plat m4u_plat; - u32 flags; - u32 inv_sel_reg; - - unsigned int iova_region_nr; - const struct mtk_iommu_iova_region *iova_region; - unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX]; -}; - -struct mtk_iommu_domain; - -struct mtk_iommu_data { - void __iomem *base; - int irq; - struct device *dev; - struct clk *bclk; - phys_addr_t protect_base; /* protect memory base */ - struct mtk_iommu_suspend_reg reg; - struct mtk_iommu_domain *m4u_dom; - struct iommu_group *m4u_group[MTK_IOMMU_GROUP_MAX]; - bool enable_4GB; - spinlock_t tlb_lock; /* lock for tlb range flush */ - - struct iommu_device iommu; - const struct mtk_iommu_plat_data *plat_data; - struct device *smicomm_dev; - - struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */ - - struct list_head list; - struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; -}; - -static inline int mtk_iommu_bind(struct device *dev) -{ - struct mtk_iommu_data *data = dev_get_drvdata(dev); - - return component_bind_all(dev, &data->larb_imu); -} - -static inline void mtk_iommu_unbind(struct device *dev) -{ - struct mtk_iommu_data *data = dev_get_drvdata(dev); - - component_unbind_all(dev, &data->larb_imu); -} - -#endif diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index ecff800656e6..e1cb51b9866c 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -7,7 +7,6 @@ * * Based on driver/iommu/mtk_iommu.c */ -#include <linux/memblock.h> #include <linux/bug.h> #include <linux/clk.h> #include <linux/component.h> @@ -28,10 +27,9 @@ #include <linux/spinlock.h> #include <asm/barrier.h> #include <asm/dma-iommu.h> -#include <linux/init.h> +#include <dt-bindings/memory/mtk-memory-port.h> #include <dt-bindings/memory/mt2701-larb-port.h> #include <soc/mediatek/smi.h> -#include "mtk_iommu.h" #define REG_MMU_PT_BASE_ADDR 0x000 @@ -80,6 +78,7 @@ /* MTK generation one iommu HW only support 4K size mapping */ #define MT2701_IOMMU_PAGE_SHIFT 12 #define MT2701_IOMMU_PAGE_SIZE (1UL << MT2701_IOMMU_PAGE_SHIFT) +#define MT2701_LARB_NR_MAX 3 /* * MTK m4u support 4GB iova address space, and only support 4K page @@ -87,17 +86,53 @@ */ #define M2701_IOMMU_PGT_SIZE SZ_4M -struct mtk_iommu_domain { +struct mtk_iommu_v1_suspend_reg { + u32 standard_axi_mode; + u32 dcm_dis; + u32 ctrl_reg; + u32 int_control0; +}; + +struct mtk_iommu_v1_data { + void __iomem *base; + int irq; + struct device *dev; + struct clk *bclk; + phys_addr_t protect_base; /* protect memory base */ + struct mtk_iommu_v1_domain *m4u_dom; + + struct iommu_device iommu; + struct dma_iommu_mapping *mapping; + struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; + + struct mtk_iommu_v1_suspend_reg reg; +}; + +struct mtk_iommu_v1_domain { spinlock_t pgtlock; /* lock for page table */ struct iommu_domain domain; u32 *pgt_va; dma_addr_t pgt_pa; - struct mtk_iommu_data *data; + struct mtk_iommu_v1_data *data; }; -static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) +static int mtk_iommu_v1_bind(struct device *dev) +{ + struct mtk_iommu_v1_data *data = dev_get_drvdata(dev); + + return component_bind_all(dev, &data->larb_imu); +} + +static void mtk_iommu_v1_unbind(struct device *dev) +{ + struct mtk_iommu_v1_data *data = dev_get_drvdata(dev); + + component_unbind_all(dev, &data->larb_imu); +} + +static struct mtk_iommu_v1_domain *to_mtk_domain(struct iommu_domain *dom) { - return container_of(dom, struct mtk_iommu_domain, domain); + return container_of(dom, struct mtk_iommu_v1_domain, domain); } static const int mt2701_m4u_in_larb[] = { @@ -123,7 +158,7 @@ static inline int mt2701_m4u_to_port(int id) return id - mt2701_m4u_in_larb[larb]; } -static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) +static void mtk_iommu_v1_tlb_flush_all(struct mtk_iommu_v1_data *data) { writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, data->base + REG_MMU_INV_SEL); @@ -131,8 +166,8 @@ static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data) wmb(); /* Make sure the tlb flush all done */ } -static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data, - unsigned long iova, size_t size) +static void mtk_iommu_v1_tlb_flush_range(struct mtk_iommu_v1_data *data, + unsigned long iova, size_t size) { int ret; u32 tmp; @@ -150,16 +185,16 @@ static void mtk_iommu_tlb_flush_range(struct mtk_iommu_data *data, if (ret) { dev_warn(data->dev, "Partial TLB flush timed out, falling back to full flush\n"); - mtk_iommu_tlb_flush_all(data); + mtk_iommu_v1_tlb_flush_all(data); } /* Clear the CPE status */ writel_relaxed(0, data->base + REG_MMU_CPE_DONE); } -static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) +static irqreturn_t mtk_iommu_v1_isr(int irq, void *dev_id) { - struct mtk_iommu_data *data = dev_id; - struct mtk_iommu_domain *dom = data->m4u_dom; + struct mtk_iommu_v1_data *data = dev_id; + struct mtk_iommu_v1_domain *dom = data->m4u_dom; u32 int_state, regval, fault_iova, fault_pa; unsigned int fault_larb, fault_port; @@ -189,13 +224,13 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) regval |= F_INT_CLR_BIT; writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL); - mtk_iommu_tlb_flush_all(data); + mtk_iommu_v1_tlb_flush_all(data); return IRQ_HANDLED; } -static void mtk_iommu_config(struct mtk_iommu_data *data, - struct device *dev, bool enable) +static void mtk_iommu_v1_config(struct mtk_iommu_v1_data *data, + struct device *dev, bool enable) { struct mtk_smi_larb_iommu *larb_mmu; unsigned int larbid, portid; @@ -217,9 +252,9 @@ static void mtk_iommu_config(struct mtk_iommu_data *data, } } -static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) +static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data) { - struct mtk_iommu_domain *dom = data->m4u_dom; + struct mtk_iommu_v1_domain *dom = data->m4u_dom; spin_lock_init(&dom->pgtlock); @@ -235,9 +270,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data) return 0; } -static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) +static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type) { - struct mtk_iommu_domain *dom; + struct mtk_iommu_v1_domain *dom; if (type != IOMMU_DOMAIN_UNMANAGED) return NULL; @@ -249,21 +284,20 @@ static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) return &dom->domain; } -static void mtk_iommu_domain_free(struct iommu_domain *domain) +static void mtk_iommu_v1_domain_free(struct iommu_domain *domain) { - struct mtk_iommu_domain *dom = to_mtk_domain(domain); - struct mtk_iommu_data *data = dom->data; + struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); + struct mtk_iommu_v1_data *data = dom->data; dma_free_coherent(data->dev, M2701_IOMMU_PGT_SIZE, dom->pgt_va, dom->pgt_pa); kfree(to_mtk_domain(domain)); } -static int mtk_iommu_attach_device(struct iommu_domain *domain, - struct device *dev) +static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev) { - struct mtk_iommu_data *data = dev_iommu_priv_get(dev); - struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev); + struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); struct dma_iommu_mapping *mtk_mapping; int ret; @@ -274,29 +308,28 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, if (!data->m4u_dom) { data->m4u_dom = dom; - ret = mtk_iommu_domain_finalise(data); + ret = mtk_iommu_v1_domain_finalise(data); if (ret) { data->m4u_dom = NULL; return ret; } } - mtk_iommu_config(data, dev, true); + mtk_iommu_v1_config(data, dev, true); return 0; } -static void mtk_iommu_detach_device(struct iommu_domain *domain, - struct device *dev) +static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct device *dev) { - struct mtk_iommu_data *data = dev_iommu_priv_get(dev); + struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev); - mtk_iommu_config(data, dev, false); + mtk_iommu_v1_config(data, dev, false); } -static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { - struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; unsigned long flags; unsigned int i; @@ -317,16 +350,15 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, spin_unlock_irqrestore(&dom->pgtlock, flags); - mtk_iommu_tlb_flush_range(dom->data, iova, size); + mtk_iommu_v1_tlb_flush_range(dom->data, iova, size); return map_size == size ? 0 : -EEXIST; } -static size_t mtk_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, size_t size, - struct iommu_iotlb_gather *gather) +static size_t mtk_iommu_v1_unmap(struct iommu_domain *domain, unsigned long iova, + size_t size, struct iommu_iotlb_gather *gather) { - struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); unsigned long flags; u32 *pgt_base_iova = dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT); unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; @@ -335,15 +367,14 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, memset(pgt_base_iova, 0, page_num * sizeof(u32)); spin_unlock_irqrestore(&dom->pgtlock, flags); - mtk_iommu_tlb_flush_range(dom->data, iova, size); + mtk_iommu_v1_tlb_flush_range(dom->data, iova, size); return size; } -static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, - dma_addr_t iova) +static phys_addr_t mtk_iommu_v1_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - struct mtk_iommu_domain *dom = to_mtk_domain(domain); + struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain); unsigned long flags; phys_addr_t pa; @@ -355,17 +386,16 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, return pa; } -static const struct iommu_ops mtk_iommu_ops; +static const struct iommu_ops mtk_iommu_v1_ops; /* * MTK generation one iommu HW only support one iommu domain, and all the client * sharing the same iova address space. */ -static int mtk_iommu_create_mapping(struct device *dev, - struct of_phandle_args *args) +static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_args *args) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct mtk_iommu_data *data; + struct mtk_iommu_v1_data *data; struct platform_device *m4updev; struct dma_iommu_mapping *mtk_mapping; int ret; @@ -377,11 +407,11 @@ static int mtk_iommu_create_mapping(struct device *dev, } if (!fwspec) { - ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_ops); + ret = iommu_fwspec_init(dev, &args->np->fwnode, &mtk_iommu_v1_ops); if (ret) return ret; fwspec = dev_iommu_fwspec_get(dev); - } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_ops) { + } else if (dev_iommu_fwspec_get(dev)->ops != &mtk_iommu_v1_ops) { return -EINVAL; } @@ -413,16 +443,16 @@ static int mtk_iommu_create_mapping(struct device *dev, return 0; } -static int mtk_iommu_def_domain_type(struct device *dev) +static int mtk_iommu_v1_def_domain_type(struct device *dev) { return IOMMU_DOMAIN_UNMANAGED; } -static struct iommu_device *mtk_iommu_probe_device(struct device *dev) +static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct of_phandle_args iommu_spec; - struct mtk_iommu_data *data; + struct mtk_iommu_v1_data *data; int err, idx = 0, larbid, larbidx; struct device_link *link; struct device *larbdev; @@ -440,7 +470,7 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev) "#iommu-cells", idx, &iommu_spec)) { - err = mtk_iommu_create_mapping(dev, &iommu_spec); + err = mtk_iommu_v1_create_mapping(dev, &iommu_spec); of_node_put(iommu_spec.np); if (err) return ERR_PTR(err); @@ -450,13 +480,16 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev) idx++; } - if (!fwspec || fwspec->ops != &mtk_iommu_ops) + if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops) return ERR_PTR(-ENODEV); /* Not a iommu client device */ data = dev_iommu_priv_get(dev); /* Link the consumer device with the smi-larb device(supplier) */ larbid = mt2701_m4u_to_larb(fwspec->ids[0]); + if (larbid >= MT2701_LARB_NR_MAX) + return ERR_PTR(-EINVAL); + for (idx = 1; idx < fwspec->num_ids; idx++) { larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]); if (larbid != larbidx) { @@ -467,6 +500,9 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev) } larbdev = data->larb_imu[larbid].dev; + if (!larbdev) + return ERR_PTR(-EINVAL); + link = device_link_add(dev, larbdev, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); if (!link) @@ -475,10 +511,10 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev) return &data->iommu; } -static void mtk_iommu_probe_finalize(struct device *dev) +static void mtk_iommu_v1_probe_finalize(struct device *dev) { struct dma_iommu_mapping *mtk_mapping; - struct mtk_iommu_data *data; + struct mtk_iommu_v1_data *data; int err; data = dev_iommu_priv_get(dev); @@ -489,14 +525,14 @@ static void mtk_iommu_probe_finalize(struct device *dev) dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); } -static void mtk_iommu_release_device(struct device *dev) +static void mtk_iommu_v1_release_device(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct mtk_iommu_data *data; + struct mtk_iommu_v1_data *data; struct device *larbdev; unsigned int larbid; - if (!fwspec || fwspec->ops != &mtk_iommu_ops) + if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops) return; data = dev_iommu_priv_get(dev); @@ -507,7 +543,7 @@ static void mtk_iommu_release_device(struct device *dev) iommu_fwspec_free(dev); } -static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) +static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data) { u32 regval; int ret; @@ -537,7 +573,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) writel_relaxed(F_MMU_DCM_ON, data->base + REG_MMU_DCM); - if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, + if (devm_request_irq(data->dev, data->irq, mtk_iommu_v1_isr, 0, dev_name(data->dev), (void *)data)) { writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); clk_disable_unprepare(data->bclk); @@ -548,39 +584,39 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) return 0; } -static const struct iommu_ops mtk_iommu_ops = { - .domain_alloc = mtk_iommu_domain_alloc, - .probe_device = mtk_iommu_probe_device, - .probe_finalize = mtk_iommu_probe_finalize, - .release_device = mtk_iommu_release_device, - .def_domain_type = mtk_iommu_def_domain_type, +static const struct iommu_ops mtk_iommu_v1_ops = { + .domain_alloc = mtk_iommu_v1_domain_alloc, + .probe_device = mtk_iommu_v1_probe_device, + .probe_finalize = mtk_iommu_v1_probe_finalize, + .release_device = mtk_iommu_v1_release_device, + .def_domain_type = mtk_iommu_v1_def_domain_type, .device_group = generic_device_group, .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { - .attach_dev = mtk_iommu_attach_device, - .detach_dev = mtk_iommu_detach_device, - .map = mtk_iommu_map, - .unmap = mtk_iommu_unmap, - .iova_to_phys = mtk_iommu_iova_to_phys, - .free = mtk_iommu_domain_free, + .attach_dev = mtk_iommu_v1_attach_device, + .detach_dev = mtk_iommu_v1_detach_device, + .map = mtk_iommu_v1_map, + .unmap = mtk_iommu_v1_unmap, + .iova_to_phys = mtk_iommu_v1_iova_to_phys, + .free = mtk_iommu_v1_domain_free, } }; -static const struct of_device_id mtk_iommu_of_ids[] = { +static const struct of_device_id mtk_iommu_v1_of_ids[] = { { .compatible = "mediatek,mt2701-m4u", }, {} }; -static const struct component_master_ops mtk_iommu_com_ops = { - .bind = mtk_iommu_bind, - .unbind = mtk_iommu_unbind, +static const struct component_master_ops mtk_iommu_v1_com_ops = { + .bind = mtk_iommu_v1_bind, + .unbind = mtk_iommu_v1_unbind, }; -static int mtk_iommu_probe(struct platform_device *pdev) +static int mtk_iommu_v1_probe(struct platform_device *pdev) { - struct mtk_iommu_data *data; struct device *dev = &pdev->dev; + struct mtk_iommu_v1_data *data; struct resource *res; struct component_match *match = NULL; void *protect; @@ -647,7 +683,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); - ret = mtk_iommu_hw_init(data); + ret = mtk_iommu_v1_hw_init(data); if (ret) return ret; @@ -656,17 +692,17 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) return ret; - ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); + ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev); if (ret) goto out_sysfs_remove; if (!iommu_present(&platform_bus_type)) { - ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); + ret = bus_set_iommu(&platform_bus_type, &mtk_iommu_v1_ops); if (ret) goto out_dev_unreg; } - ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); + ret = component_master_add_with_match(dev, &mtk_iommu_v1_com_ops, match); if (ret) goto out_bus_set_null; return ret; @@ -680,9 +716,9 @@ out_sysfs_remove: return ret; } -static int mtk_iommu_remove(struct platform_device *pdev) +static int mtk_iommu_v1_remove(struct platform_device *pdev) { - struct mtk_iommu_data *data = platform_get_drvdata(pdev); + struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev); iommu_device_sysfs_remove(&data->iommu); iommu_device_unregister(&data->iommu); @@ -692,14 +728,14 @@ static int mtk_iommu_remove(struct platform_device *pdev) clk_disable_unprepare(data->bclk); devm_free_irq(&pdev->dev, data->irq, data); - component_master_del(&pdev->dev, &mtk_iommu_com_ops); + component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops); return 0; } -static int __maybe_unused mtk_iommu_suspend(struct device *dev) +static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev) { - struct mtk_iommu_data *data = dev_get_drvdata(dev); - struct mtk_iommu_suspend_reg *reg = &data->reg; + struct mtk_iommu_v1_data *data = dev_get_drvdata(dev); + struct mtk_iommu_v1_suspend_reg *reg = &data->reg; void __iomem *base = data->base; reg->standard_axi_mode = readl_relaxed(base + @@ -710,10 +746,10 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev) return 0; } -static int __maybe_unused mtk_iommu_resume(struct device *dev) +static int __maybe_unused mtk_iommu_v1_resume(struct device *dev) { - struct mtk_iommu_data *data = dev_get_drvdata(dev); - struct mtk_iommu_suspend_reg *reg = &data->reg; + struct mtk_iommu_v1_data *data = dev_get_drvdata(dev); + struct mtk_iommu_v1_suspend_reg *reg = &data->reg; void __iomem *base = data->base; writel_relaxed(data->m4u_dom->pgt_pa, base + REG_MMU_PT_BASE_ADDR); @@ -726,20 +762,20 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) return 0; } -static const struct dev_pm_ops mtk_iommu_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) +static const struct dev_pm_ops mtk_iommu_v1_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_v1_suspend, mtk_iommu_v1_resume) }; -static struct platform_driver mtk_iommu_driver = { - .probe = mtk_iommu_probe, - .remove = mtk_iommu_remove, +static struct platform_driver mtk_iommu_v1_driver = { + .probe = mtk_iommu_v1_probe, + .remove = mtk_iommu_v1_remove, .driver = { .name = "mtk-iommu-v1", - .of_match_table = mtk_iommu_of_ids, - .pm = &mtk_iommu_pm_ops, + .of_match_table = mtk_iommu_v1_of_ids, + .pm = &mtk_iommu_v1_pm_ops, } }; -module_platform_driver(mtk_iommu_driver); +module_platform_driver(mtk_iommu_v1_driver); MODULE_DESCRIPTION("IOMMU API for MediaTek M4U v1 implementations"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 3833e86c6e7b..c898bcbbce11 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -99,7 +99,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, if (!domain_device) return -ENOMEM; - if (zdev->dma_table) { + if (zdev->dma_table && !zdev->s390_domain) { cc = zpci_dma_exit_device(zdev); if (cc) { rc = -EIO; @@ -107,6 +107,9 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, } } + if (zdev->s390_domain) + zpci_unregister_ioat(zdev, 0); + zdev->dma_table = s390_domain->dma_table; cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, virt_to_phys(zdev->dma_table)); @@ -136,7 +139,13 @@ static int s390_iommu_attach_device(struct iommu_domain *domain, return 0; out_restore: - zpci_dma_init_device(zdev); + if (!zdev->s390_domain) { + zpci_dma_init_device(zdev); + } else { + zdev->dma_table = zdev->s390_domain->dma_table; + zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma, + virt_to_phys(zdev->dma_table)); + } out_free: kfree(domain_device); @@ -167,7 +176,7 @@ static void s390_iommu_detach_device(struct iommu_domain *domain, } spin_unlock_irqrestore(&s390_domain->list_lock, flags); - if (found) { + if (found && (zdev->s390_domain == s390_domain)) { zdev->s390_domain = NULL; zpci_unregister_ioat(zdev, 0); zpci_dma_init_device(zdev); diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 6090e647daee..a49979f41eee 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -869,6 +869,9 @@ source "drivers/leds/blink/Kconfig" comment "Flash and Torch LED drivers" source "drivers/leds/flash/Kconfig" +comment "RGB LED drivers" +source "drivers/leds/rgb/Kconfig" + comment "LED Triggers" source "drivers/leds/trigger/Kconfig" diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index e58ecb36360f..4fd2f92cd198 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -99,6 +99,9 @@ obj-$(CONFIG_LEDS_USER) += uleds.o # Flash and Torch LED Drivers obj-$(CONFIG_LEDS_CLASS_FLASH) += flash/ +# RGB LED Drivers +obj-$(CONFIG_LEDS_CLASS_MULTICOLOR) += rgb/ + # LED Triggers obj-$(CONFIG_LEDS_TRIGGERS) += trigger/ diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c index ed1f20a58bf6..670f3bf2e906 100644 --- a/drivers/leds/flash/leds-ktd2692.c +++ b/drivers/leds/flash/leds-ktd2692.c @@ -279,17 +279,12 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev, led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS); ret = PTR_ERR_OR_ZERO(led->ctrl_gpio); - if (ret) { - dev_err(dev, "cannot get ctrl-gpios %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "cannot get ctrl-gpios\n"); - led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS); - ret = PTR_ERR_OR_ZERO(led->aux_gpio); - if (ret) { - dev_err(dev, "cannot get aux-gpios %d\n", ret); - return ret; - } + led->aux_gpio = devm_gpiod_get_optional(dev, "aux", GPIOD_ASIS); + if (IS_ERR(led->aux_gpio)) + return dev_err_probe(dev, PTR_ERR(led->aux_gpio), "cannot get aux-gpios\n"); led->regulator = devm_regulator_get(dev, "vin"); if (IS_ERR(led->regulator)) diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c index 22c092a4394a..fc63fce38c19 100644 --- a/drivers/leds/leds-is31fl32xx.c +++ b/drivers/leds/leds-is31fl32xx.c @@ -460,8 +460,14 @@ static int is31fl32xx_probe(struct i2c_client *client, static int is31fl32xx_remove(struct i2c_client *client) { struct is31fl32xx_priv *priv = i2c_get_clientdata(client); + int ret; - return is31fl32xx_reset_regs(priv); + ret = is31fl32xx_reset_regs(priv); + if (ret) + dev_err(&client->dev, "Failed to reset registers on removal (%pe)\n", + ERR_PTR(ret)); + + return 0; } /* diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c index 50b195ff96ca..e129dcc656b8 100644 --- a/drivers/leds/leds-lp50xx.c +++ b/drivers/leds/leds-lp50xx.c @@ -569,10 +569,8 @@ static int lp50xx_remove(struct i2c_client *client) int ret; ret = lp50xx_enable_disable(led, 0); - if (ret) { + if (ret) dev_err(led->dev, "Failed to disable chip\n"); - return ret; - } if (led->regulator) { ret = regulator_disable(led->regulator); diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 017794bb87ae..f72b5d1be3a6 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c @@ -318,13 +318,10 @@ static int pca9532_gpio_direction_output(struct gpio_chip *gc, unsigned offset, } #endif /* CONFIG_LEDS_PCA9532_GPIO */ -static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs) +static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs) { int i = n_devs; - if (!data) - return -EINVAL; - while (--i >= 0) { switch (data->leds[i].type) { case PCA9532_TYPE_NONE: @@ -346,8 +343,6 @@ static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs) if (data->gpio.parent) gpiochip_remove(&data->gpio); #endif - - return 0; } static int pca9532_configure(struct i2c_client *client, @@ -555,7 +550,9 @@ static int pca9532_remove(struct i2c_client *client) { struct pca9532_data *data = i2c_get_clientdata(client); - return pca9532_destroy_devices(data, data->chip_info->num_leds); + pca9532_destroy_devices(data, data->chip_info->num_leds); + + return 0; } module_i2c_driver(pca9532_driver); diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c index 208c98918433..8a8b73b4e358 100644 --- a/drivers/leds/leds-regulator.c +++ b/drivers/leds/leds-regulator.c @@ -8,6 +8,7 @@ */ #include <linux/module.h> +#include <linux/mod_devicetable.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/leds.h> @@ -123,34 +124,37 @@ static int regulator_led_probe(struct platform_device *pdev) { struct led_regulator_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; + struct led_init_data init_data = {}; struct regulator_led *led; struct regulator *vcc; int ret = 0; - if (pdata == NULL) { - dev_err(&pdev->dev, "no platform data\n"); - return -ENODEV; - } - - vcc = devm_regulator_get_exclusive(&pdev->dev, "vled"); + vcc = devm_regulator_get_exclusive(dev, "vled"); if (IS_ERR(vcc)) { - dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name); + dev_err(dev, "Cannot get vcc\n"); return PTR_ERR(vcc); } - led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL); + led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); if (led == NULL) return -ENOMEM; + init_data.fwnode = dev->fwnode; + led->cdev.max_brightness = led_regulator_get_max_brightness(vcc); - if (pdata->brightness > led->cdev.max_brightness) { - dev_err(&pdev->dev, "Invalid default brightness %d\n", + /* Legacy platform data label assignment */ + if (pdata) { + if (pdata->brightness > led->cdev.max_brightness) { + dev_err(dev, "Invalid default brightness %d\n", pdata->brightness); - return -EINVAL; + return -EINVAL; + } + led->cdev.brightness = pdata->brightness; + init_data.default_label = pdata->name; } led->cdev.brightness_set_blocking = regulator_led_brightness_set; - led->cdev.name = pdata->name; led->cdev.flags |= LED_CORE_SUSPENDRESUME; led->vcc = vcc; @@ -162,16 +166,10 @@ static int regulator_led_probe(struct platform_device *pdev) platform_set_drvdata(pdev, led); - ret = led_classdev_register(&pdev->dev, &led->cdev); + ret = led_classdev_register_ext(dev, &led->cdev, &init_data); if (ret < 0) return ret; - /* to expose the default value to userspace */ - led->cdev.brightness = pdata->brightness; - - /* Set the default led status */ - regulator_led_brightness_set(&led->cdev, led->cdev.brightness); - return 0; } @@ -184,10 +182,17 @@ static int regulator_led_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id regulator_led_of_match[] = { + { .compatible = "regulator-led", }, + {} +}; +MODULE_DEVICE_TABLE(of, regulator_led_of_match); + static struct platform_driver regulator_led_driver = { .driver = { - .name = "leds-regulator", - }, + .name = "leds-regulator", + .of_match_table = regulator_led_of_match, + }, .probe = regulator_led_probe, .remove = regulator_led_remove, }; diff --git a/drivers/leds/rgb/Kconfig b/drivers/leds/rgb/Kconfig new file mode 100644 index 000000000000..204cf470beae --- /dev/null +++ b/drivers/leds/rgb/Kconfig @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0 + +if LEDS_CLASS_MULTICOLOR + +config LEDS_PWM_MULTICOLOR + tristate "PWM driven multi-color LED Support" + depends on PWM + help + This option enables support for PWM driven monochrome LEDs that are + grouped into multicolor LEDs. + + To compile this driver as a module, choose M here: the module + will be called leds-pwm-multicolor. + +config LEDS_QCOM_LPG + tristate "LED support for Qualcomm LPG" + depends on OF + depends on PWM + depends on SPMI + help + This option enables support for the Light Pulse Generator found in a + wide variety of Qualcomm PMICs. The LPG consists of a number of PWM + channels and typically a shared pattern lookup table and a current + sink, intended to drive RGB LEDs. Each channel can either be used as + a LED, grouped to represent a RGB LED or exposed as PWM channels. + + If compiled as a module, the module will be named leds-qcom-lpg. + +endif # LEDS_CLASS_MULTICOLOR diff --git a/drivers/leds/rgb/Makefile b/drivers/leds/rgb/Makefile new file mode 100644 index 000000000000..0675bc0f6e18 --- /dev/null +++ b/drivers/leds/rgb/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_LEDS_PWM_MULTICOLOR) += leds-pwm-multicolor.o +obj-$(CONFIG_LEDS_QCOM_LPG) += leds-qcom-lpg.o diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c new file mode 100644 index 000000000000..45e38708ecb1 --- /dev/null +++ b/drivers/leds/rgb/leds-pwm-multicolor.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PWM-based multi-color LED control + * + * Copyright 2022 Sven Schwermer <sven.schwermer@disruptive-technologies.com> + */ + +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/led-class-multicolor.h> +#include <linux/leds.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/pwm.h> + +struct pwm_led { + struct pwm_device *pwm; + struct pwm_state state; +}; + +struct pwm_mc_led { + struct led_classdev_mc mc_cdev; + struct mutex lock; + struct pwm_led leds[]; +}; + +static int led_pwm_mc_set(struct led_classdev *cdev, + enum led_brightness brightness) +{ + struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev); + struct pwm_mc_led *priv = container_of(mc_cdev, struct pwm_mc_led, mc_cdev); + unsigned long long duty; + int ret = 0; + int i; + + led_mc_calc_color_components(mc_cdev, brightness); + + mutex_lock(&priv->lock); + + for (i = 0; i < mc_cdev->num_colors; i++) { + duty = priv->leds[i].state.period; + duty *= mc_cdev->subled_info[i].brightness; + do_div(duty, cdev->max_brightness); + + priv->leds[i].state.duty_cycle = duty; + priv->leds[i].state.enabled = duty > 0; + ret = pwm_apply_state(priv->leds[i].pwm, + &priv->leds[i].state); + if (ret) + break; + } + + mutex_unlock(&priv->lock); + + return ret; +} + +static int iterate_subleds(struct device *dev, struct pwm_mc_led *priv, + struct fwnode_handle *mcnode) +{ + struct mc_subled *subled = priv->mc_cdev.subled_info; + struct fwnode_handle *fwnode; + struct pwm_led *pwmled; + u32 color; + int ret; + + /* iterate over the nodes inside the multi-led node */ + fwnode_for_each_child_node(mcnode, fwnode) { + pwmled = &priv->leds[priv->mc_cdev.num_colors]; + pwmled->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL); + if (IS_ERR(pwmled->pwm)) { + ret = PTR_ERR(pwmled->pwm); + dev_err(dev, "unable to request PWM: %d\n", ret); + goto release_fwnode; + } + pwm_init_state(pwmled->pwm, &pwmled->state); + + ret = fwnode_property_read_u32(fwnode, "color", &color); + if (ret) { + dev_err(dev, "cannot read color: %d\n", ret); + goto release_fwnode; + } + + subled[priv->mc_cdev.num_colors].color_index = color; + priv->mc_cdev.num_colors++; + } + + return 0; + +release_fwnode: + fwnode_handle_put(fwnode); + return ret; +} + +static int led_pwm_mc_probe(struct platform_device *pdev) +{ + struct fwnode_handle *mcnode, *fwnode; + struct led_init_data init_data = {}; + struct led_classdev *cdev; + struct mc_subled *subled; + struct pwm_mc_led *priv; + int count = 0; + int ret = 0; + + mcnode = device_get_named_child_node(&pdev->dev, "multi-led"); + if (!mcnode) + return dev_err_probe(&pdev->dev, -ENODEV, + "expected multi-led node\n"); + + /* count the nodes inside the multi-led node */ + fwnode_for_each_child_node(mcnode, fwnode) + count++; + + priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds, count), + GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto release_mcnode; + } + mutex_init(&priv->lock); + + subled = devm_kcalloc(&pdev->dev, count, sizeof(*subled), GFP_KERNEL); + if (!subled) { + ret = -ENOMEM; + goto release_mcnode; + } + priv->mc_cdev.subled_info = subled; + + /* init the multicolor's LED class device */ + cdev = &priv->mc_cdev.led_cdev; + fwnode_property_read_u32(mcnode, "max-brightness", + &cdev->max_brightness); + cdev->flags = LED_CORE_SUSPENDRESUME; + cdev->brightness_set_blocking = led_pwm_mc_set; + + ret = iterate_subleds(&pdev->dev, priv, mcnode); + if (ret) + goto release_mcnode; + + init_data.fwnode = mcnode; + ret = devm_led_classdev_multicolor_register_ext(&pdev->dev, + &priv->mc_cdev, + &init_data); + if (ret) { + dev_err(&pdev->dev, + "failed to register multicolor PWM led for %s: %d\n", + cdev->name, ret); + goto release_mcnode; + } + + ret = led_pwm_mc_set(cdev, cdev->brightness); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to set led PWM value for %s: %d", + cdev->name, ret); + + platform_set_drvdata(pdev, priv); + return 0; + +release_mcnode: + fwnode_handle_put(mcnode); + return ret; +} + +static const struct of_device_id of_pwm_leds_mc_match[] = { + { .compatible = "pwm-leds-multicolor", }, + {} +}; +MODULE_DEVICE_TABLE(of, of_pwm_leds_mc_match); + +static struct platform_driver led_pwm_mc_driver = { + .probe = led_pwm_mc_probe, + .driver = { + .name = "leds_pwm_multicolor", + .of_match_table = of_pwm_leds_mc_match, + }, +}; +module_platform_driver(led_pwm_mc_driver); + +MODULE_AUTHOR("Sven Schwermer <sven.schwermer@disruptive-technologies.com>"); +MODULE_DESCRIPTION("multi-color PWM LED driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:leds-pwm-multicolor"); diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c new file mode 100644 index 000000000000..02f51cc61837 --- /dev/null +++ b/drivers/leds/rgb/leds-qcom-lpg.c @@ -0,0 +1,1451 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2017-2022 Linaro Ltd + * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. + */ +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/led-class-multicolor.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#define LPG_SUBTYPE_REG 0x05 +#define LPG_SUBTYPE_LPG 0x2 +#define LPG_SUBTYPE_PWM 0xb +#define LPG_SUBTYPE_LPG_LITE 0x11 +#define LPG_PATTERN_CONFIG_REG 0x40 +#define LPG_SIZE_CLK_REG 0x41 +#define PWM_CLK_SELECT_MASK GENMASK(1, 0) +#define LPG_PREDIV_CLK_REG 0x42 +#define PWM_FREQ_PRE_DIV_MASK GENMASK(6, 5) +#define PWM_FREQ_EXP_MASK GENMASK(2, 0) +#define PWM_TYPE_CONFIG_REG 0x43 +#define PWM_VALUE_REG 0x44 +#define PWM_ENABLE_CONTROL_REG 0x46 +#define PWM_SYNC_REG 0x47 +#define LPG_RAMP_DURATION_REG 0x50 +#define LPG_HI_PAUSE_REG 0x52 +#define LPG_LO_PAUSE_REG 0x54 +#define LPG_HI_IDX_REG 0x56 +#define LPG_LO_IDX_REG 0x57 +#define PWM_SEC_ACCESS_REG 0xd0 +#define PWM_DTEST_REG(x) (0xe2 + (x) - 1) + +#define TRI_LED_SRC_SEL 0x45 +#define TRI_LED_EN_CTL 0x46 +#define TRI_LED_ATC_CTL 0x47 + +#define LPG_LUT_REG(x) (0x40 + (x) * 2) +#define RAMP_CONTROL_REG 0xc8 + +#define LPG_RESOLUTION 512 +#define LPG_MAX_M 7 + +struct lpg_channel; +struct lpg_data; + +/** + * struct lpg - LPG device context + * @dev: pointer to LPG device + * @map: regmap for register access + * @lock: used to synchronize LED and pwm callback requests + * @pwm: PWM-chip object, if operating in PWM mode + * @data: reference to version specific data + * @lut_base: base address of the LUT block (optional) + * @lut_size: number of entries in the LUT block + * @lut_bitmap: allocation bitmap for LUT entries + * @triled_base: base address of the TRILED block (optional) + * @triled_src: power-source for the TRILED + * @triled_has_atc_ctl: true if there is TRI_LED_ATC_CTL register + * @triled_has_src_sel: true if there is TRI_LED_SRC_SEL register + * @channels: list of PWM channels + * @num_channels: number of @channels + */ +struct lpg { + struct device *dev; + struct regmap *map; + + struct mutex lock; + + struct pwm_chip pwm; + + const struct lpg_data *data; + + u32 lut_base; + u32 lut_size; + unsigned long *lut_bitmap; + + u32 triled_base; + u32 triled_src; + bool triled_has_atc_ctl; + bool triled_has_src_sel; + + struct lpg_channel *channels; + unsigned int num_channels; +}; + +/** + * struct lpg_channel - per channel data + * @lpg: reference to parent lpg + * @base: base address of the PWM channel + * @triled_mask: mask in TRILED to enable this channel + * @lut_mask: mask in LUT to start pattern generator for this channel + * @subtype: PMIC hardware block subtype + * @in_use: channel is exposed to LED framework + * @color: color of the LED attached to this channel + * @dtest_line: DTEST line for output, or 0 if disabled + * @dtest_value: DTEST line configuration + * @pwm_value: duty (in microseconds) of the generated pulses, overridden by LUT + * @enabled: output enabled? + * @period: period (in nanoseconds) of the generated pulses + * @clk_sel: reference clock frequency selector + * @pre_div_sel: divider selector of the reference clock + * @pre_div_exp: exponential divider of the reference clock + * @ramp_enabled: duty cycle is driven by iterating over lookup table + * @ramp_ping_pong: reverse through pattern, rather than wrapping to start + * @ramp_oneshot: perform only a single pass over the pattern + * @ramp_reverse: iterate over pattern backwards + * @ramp_tick_ms: length (in milliseconds) of one step in the pattern + * @ramp_lo_pause_ms: pause (in milliseconds) before iterating over pattern + * @ramp_hi_pause_ms: pause (in milliseconds) after iterating over pattern + * @pattern_lo_idx: start index of associated pattern + * @pattern_hi_idx: last index of associated pattern + */ +struct lpg_channel { + struct lpg *lpg; + + u32 base; + unsigned int triled_mask; + unsigned int lut_mask; + unsigned int subtype; + + bool in_use; + + int color; + + u32 dtest_line; + u32 dtest_value; + + u16 pwm_value; + bool enabled; + + u64 period; + unsigned int clk_sel; + unsigned int pre_div_sel; + unsigned int pre_div_exp; + + bool ramp_enabled; + bool ramp_ping_pong; + bool ramp_oneshot; + bool ramp_reverse; + unsigned short ramp_tick_ms; + unsigned long ramp_lo_pause_ms; + unsigned long ramp_hi_pause_ms; + + unsigned int pattern_lo_idx; + unsigned int pattern_hi_idx; +}; + +/** + * struct lpg_led - logical LED object + * @lpg: lpg context reference + * @cdev: LED class device + * @mcdev: Multicolor LED class device + * @num_channels: number of @channels + * @channels: list of channels associated with the LED + */ +struct lpg_led { + struct lpg *lpg; + + struct led_classdev cdev; + struct led_classdev_mc mcdev; + + unsigned int num_channels; + struct lpg_channel *channels[]; +}; + +/** + * struct lpg_channel_data - per channel initialization data + * @base: base address for PWM channel registers + * @triled_mask: bitmask for controlling this channel in TRILED + */ +struct lpg_channel_data { + unsigned int base; + u8 triled_mask; +}; + +/** + * struct lpg_data - initialization data + * @lut_base: base address of LUT block + * @lut_size: number of entries in LUT + * @triled_base: base address of TRILED + * @triled_has_atc_ctl: true if there is TRI_LED_ATC_CTL register + * @triled_has_src_sel: true if there is TRI_LED_SRC_SEL register + * @num_channels: number of channels in LPG + * @channels: list of channel initialization data + */ +struct lpg_data { + unsigned int lut_base; + unsigned int lut_size; + unsigned int triled_base; + bool triled_has_atc_ctl; + bool triled_has_src_sel; + int num_channels; + const struct lpg_channel_data *channels; +}; + +static int triled_set(struct lpg *lpg, unsigned int mask, unsigned int enable) +{ + /* Skip if we don't have a triled block */ + if (!lpg->triled_base) + return 0; + + return regmap_update_bits(lpg->map, lpg->triled_base + TRI_LED_EN_CTL, + mask, enable); +} + +static int lpg_lut_store(struct lpg *lpg, struct led_pattern *pattern, + size_t len, unsigned int *lo_idx, unsigned int *hi_idx) +{ + unsigned int idx; + u16 val; + int i; + + idx = bitmap_find_next_zero_area(lpg->lut_bitmap, lpg->lut_size, + 0, len, 0); + if (idx >= lpg->lut_size) + return -ENOMEM; + + for (i = 0; i < len; i++) { + val = pattern[i].brightness; + + regmap_bulk_write(lpg->map, lpg->lut_base + LPG_LUT_REG(idx + i), + &val, sizeof(val)); + } + + bitmap_set(lpg->lut_bitmap, idx, len); + + *lo_idx = idx; + *hi_idx = idx + len - 1; + + return 0; +} + +static void lpg_lut_free(struct lpg *lpg, unsigned int lo_idx, unsigned int hi_idx) +{ + int len; + + len = hi_idx - lo_idx + 1; + if (len == 1) + return; + + bitmap_clear(lpg->lut_bitmap, lo_idx, len); +} + +static int lpg_lut_sync(struct lpg *lpg, unsigned int mask) +{ + return regmap_write(lpg->map, lpg->lut_base + RAMP_CONTROL_REG, mask); +} + +static const unsigned int lpg_clk_rates[] = {0, 1024, 32768, 19200000}; +static const unsigned int lpg_pre_divs[] = {1, 3, 5, 6}; + +static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period) +{ + unsigned int clk_sel, best_clk = 0; + unsigned int div, best_div = 0; + unsigned int m, best_m = 0; + unsigned int error; + unsigned int best_err = UINT_MAX; + u64 best_period = 0; + u64 max_period; + + /* + * The PWM period is determined by: + * + * resolution * pre_div * 2^M + * period = -------------------------- + * refclk + * + * With resolution fixed at 2^9 bits, pre_div = {1, 3, 5, 6} and + * M = [0..7]. + * + * This allows for periods between 27uS and 384s, as the PWM framework + * wants a period of equal or lower length than requested, reject + * anything below 27uS. + */ + if (period <= (u64)NSEC_PER_SEC * LPG_RESOLUTION / 19200000) + return -EINVAL; + + /* Limit period to largest possible value, to avoid overflows */ + max_period = (u64)NSEC_PER_SEC * LPG_RESOLUTION * 6 * (1 << LPG_MAX_M) / 1024; + if (period > max_period) + period = max_period; + + /* + * Search for the pre_div, refclk and M by solving the rewritten formula + * for each refclk and pre_div value: + * + * period * refclk + * M = log2 ------------------------------------- + * NSEC_PER_SEC * pre_div * resolution + */ + for (clk_sel = 1; clk_sel < ARRAY_SIZE(lpg_clk_rates); clk_sel++) { + u64 numerator = period * lpg_clk_rates[clk_sel]; + + for (div = 0; div < ARRAY_SIZE(lpg_pre_divs); div++) { + u64 denominator = (u64)NSEC_PER_SEC * lpg_pre_divs[div] * LPG_RESOLUTION; + u64 actual; + u64 ratio; + + if (numerator < denominator) + continue; + + ratio = div64_u64(numerator, denominator); + m = ilog2(ratio); + if (m > LPG_MAX_M) + m = LPG_MAX_M; + + actual = DIV_ROUND_UP_ULL(denominator * (1 << m), lpg_clk_rates[clk_sel]); + + error = period - actual; + if (error < best_err) { + best_err = error; + + best_div = div; + best_m = m; + best_clk = clk_sel; + best_period = actual; + } + } + } + + chan->clk_sel = best_clk; + chan->pre_div_sel = best_div; + chan->pre_div_exp = best_m; + chan->period = best_period; + + return 0; +} + +static void lpg_calc_duty(struct lpg_channel *chan, uint64_t duty) +{ + unsigned int max = LPG_RESOLUTION - 1; + unsigned int val; + + val = div64_u64(duty * lpg_clk_rates[chan->clk_sel], + (u64)NSEC_PER_SEC * lpg_pre_divs[chan->pre_div_sel] * (1 << chan->pre_div_exp)); + + chan->pwm_value = min(val, max); +} + +static void lpg_apply_freq(struct lpg_channel *chan) +{ + unsigned long val; + struct lpg *lpg = chan->lpg; + + if (!chan->enabled) + return; + + val = chan->clk_sel; + + /* Specify 9bit resolution, based on the subtype of the channel */ + switch (chan->subtype) { + case LPG_SUBTYPE_LPG: + val |= GENMASK(5, 4); + break; + case LPG_SUBTYPE_PWM: + val |= BIT(2); + break; + case LPG_SUBTYPE_LPG_LITE: + default: + val |= BIT(4); + break; + } + + regmap_write(lpg->map, chan->base + LPG_SIZE_CLK_REG, val); + + val = FIELD_PREP(PWM_FREQ_PRE_DIV_MASK, chan->pre_div_sel) | + FIELD_PREP(PWM_FREQ_EXP_MASK, chan->pre_div_exp); + regmap_write(lpg->map, chan->base + LPG_PREDIV_CLK_REG, val); +} + +#define LPG_ENABLE_GLITCH_REMOVAL BIT(5) + +static void lpg_enable_glitch(struct lpg_channel *chan) +{ + struct lpg *lpg = chan->lpg; + + regmap_update_bits(lpg->map, chan->base + PWM_TYPE_CONFIG_REG, + LPG_ENABLE_GLITCH_REMOVAL, 0); +} + +static void lpg_disable_glitch(struct lpg_channel *chan) +{ + struct lpg *lpg = chan->lpg; + + regmap_update_bits(lpg->map, chan->base + PWM_TYPE_CONFIG_REG, + LPG_ENABLE_GLITCH_REMOVAL, + LPG_ENABLE_GLITCH_REMOVAL); +} + +static void lpg_apply_pwm_value(struct lpg_channel *chan) +{ + struct lpg *lpg = chan->lpg; + u16 val = chan->pwm_value; + + if (!chan->enabled) + return; + + regmap_bulk_write(lpg->map, chan->base + PWM_VALUE_REG, &val, sizeof(val)); +} + +#define LPG_PATTERN_CONFIG_LO_TO_HI BIT(4) +#define LPG_PATTERN_CONFIG_REPEAT BIT(3) +#define LPG_PATTERN_CONFIG_TOGGLE BIT(2) +#define LPG_PATTERN_CONFIG_PAUSE_HI BIT(1) +#define LPG_PATTERN_CONFIG_PAUSE_LO BIT(0) + +static void lpg_apply_lut_control(struct lpg_channel *chan) +{ + struct lpg *lpg = chan->lpg; + unsigned int hi_pause; + unsigned int lo_pause; + unsigned int conf = 0; + unsigned int lo_idx = chan->pattern_lo_idx; + unsigned int hi_idx = chan->pattern_hi_idx; + u16 step = chan->ramp_tick_ms; + + if (!chan->ramp_enabled || chan->pattern_lo_idx == chan->pattern_hi_idx) + return; + + hi_pause = DIV_ROUND_UP(chan->ramp_hi_pause_ms, step); + lo_pause = DIV_ROUND_UP(chan->ramp_lo_pause_ms, step); + + if (!chan->ramp_reverse) + conf |= LPG_PATTERN_CONFIG_LO_TO_HI; + if (!chan->ramp_oneshot) + conf |= LPG_PATTERN_CONFIG_REPEAT; + if (chan->ramp_ping_pong) + conf |= LPG_PATTERN_CONFIG_TOGGLE; + if (chan->ramp_hi_pause_ms) + conf |= LPG_PATTERN_CONFIG_PAUSE_HI; + if (chan->ramp_lo_pause_ms) + conf |= LPG_PATTERN_CONFIG_PAUSE_LO; + + regmap_write(lpg->map, chan->base + LPG_PATTERN_CONFIG_REG, conf); + regmap_write(lpg->map, chan->base + LPG_HI_IDX_REG, hi_idx); + regmap_write(lpg->map, chan->base + LPG_LO_IDX_REG, lo_idx); + + regmap_bulk_write(lpg->map, chan->base + LPG_RAMP_DURATION_REG, &step, sizeof(step)); + regmap_write(lpg->map, chan->base + LPG_HI_PAUSE_REG, hi_pause); + regmap_write(lpg->map, chan->base + LPG_LO_PAUSE_REG, lo_pause); +} + +#define LPG_ENABLE_CONTROL_OUTPUT BIT(7) +#define LPG_ENABLE_CONTROL_BUFFER_TRISTATE BIT(5) +#define LPG_ENABLE_CONTROL_SRC_PWM BIT(2) +#define LPG_ENABLE_CONTROL_RAMP_GEN BIT(1) + +static void lpg_apply_control(struct lpg_channel *chan) +{ + unsigned int ctrl; + struct lpg *lpg = chan->lpg; + + ctrl = LPG_ENABLE_CONTROL_BUFFER_TRISTATE; + + if (chan->enabled) + ctrl |= LPG_ENABLE_CONTROL_OUTPUT; + + if (chan->pattern_lo_idx != chan->pattern_hi_idx) + ctrl |= LPG_ENABLE_CONTROL_RAMP_GEN; + else + ctrl |= LPG_ENABLE_CONTROL_SRC_PWM; + + regmap_write(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, ctrl); + + /* + * Due to LPG hardware bug, in the PWM mode, having enabled PWM, + * We have to write PWM values one more time. + */ + if (chan->enabled) + lpg_apply_pwm_value(chan); +} + +#define LPG_SYNC_PWM BIT(0) + +static void lpg_apply_sync(struct lpg_channel *chan) +{ + struct lpg *lpg = chan->lpg; + + regmap_write(lpg->map, chan->base + PWM_SYNC_REG, LPG_SYNC_PWM); +} + +static int lpg_parse_dtest(struct lpg *lpg) +{ + struct lpg_channel *chan; + struct device_node *np = lpg->dev->of_node; + int count; + int ret; + int i; + + count = of_property_count_u32_elems(np, "qcom,dtest"); + if (count == -EINVAL) { + return 0; + } else if (count < 0) { + ret = count; + goto err_malformed; + } else if (count != lpg->data->num_channels * 2) { + dev_err(lpg->dev, "qcom,dtest needs to be %d items\n", + lpg->data->num_channels * 2); + return -EINVAL; + } + + for (i = 0; i < lpg->data->num_channels; i++) { + chan = &lpg->channels[i]; + + ret = of_property_read_u32_index(np, "qcom,dtest", i * 2, + &chan->dtest_line); + if (ret) + goto err_malformed; + + ret = of_property_read_u32_index(np, "qcom,dtest", i * 2 + 1, + &chan->dtest_value); + if (ret) + goto err_malformed; + } + + return 0; + +err_malformed: + dev_err(lpg->dev, "malformed qcom,dtest\n"); + return ret; +} + +static void lpg_apply_dtest(struct lpg_channel *chan) +{ + struct lpg *lpg = chan->lpg; + + if (!chan->dtest_line) + return; + + regmap_write(lpg->map, chan->base + PWM_SEC_ACCESS_REG, 0xa5); + regmap_write(lpg->map, chan->base + PWM_DTEST_REG(chan->dtest_line), + chan->dtest_value); +} + +static void lpg_apply(struct lpg_channel *chan) +{ + lpg_disable_glitch(chan); + lpg_apply_freq(chan); + lpg_apply_pwm_value(chan); + lpg_apply_control(chan); + lpg_apply_sync(chan); + lpg_apply_lut_control(chan); + lpg_enable_glitch(chan); +} + +static void lpg_brightness_set(struct lpg_led *led, struct led_classdev *cdev, + struct mc_subled *subleds) +{ + enum led_brightness brightness; + struct lpg_channel *chan; + unsigned int triled_enabled = 0; + unsigned int triled_mask = 0; + unsigned int lut_mask = 0; + unsigned int duty; + struct lpg *lpg = led->lpg; + int i; + + for (i = 0; i < led->num_channels; i++) { + chan = led->channels[i]; + brightness = subleds[i].brightness; + + if (brightness == LED_OFF) { + chan->enabled = false; + chan->ramp_enabled = false; + } else if (chan->pattern_lo_idx != chan->pattern_hi_idx) { + lpg_calc_freq(chan, NSEC_PER_MSEC); + + chan->enabled = true; + chan->ramp_enabled = true; + + lut_mask |= chan->lut_mask; + triled_enabled |= chan->triled_mask; + } else { + lpg_calc_freq(chan, NSEC_PER_MSEC); + + duty = div_u64(brightness * chan->period, cdev->max_brightness); + lpg_calc_duty(chan, duty); + chan->enabled = true; + chan->ramp_enabled = false; + + triled_enabled |= chan->triled_mask; + } + + triled_mask |= chan->triled_mask; + + lpg_apply(chan); + } + + /* Toggle triled lines */ + if (triled_mask) + triled_set(lpg, triled_mask, triled_enabled); + + /* Trigger start of ramp generator(s) */ + if (lut_mask) + lpg_lut_sync(lpg, lut_mask); +} + +static void lpg_brightness_single_set(struct led_classdev *cdev, + enum led_brightness value) +{ + struct lpg_led *led = container_of(cdev, struct lpg_led, cdev); + struct mc_subled info; + + mutex_lock(&led->lpg->lock); + + info.brightness = value; + lpg_brightness_set(led, cdev, &info); + + mutex_unlock(&led->lpg->lock); +} + +static void lpg_brightness_mc_set(struct led_classdev *cdev, + enum led_brightness value) +{ + struct led_classdev_mc *mc = lcdev_to_mccdev(cdev); + struct lpg_led *led = container_of(mc, struct lpg_led, mcdev); + + mutex_lock(&led->lpg->lock); + + led_mc_calc_color_components(mc, value); + lpg_brightness_set(led, cdev, mc->subled_info); + + mutex_unlock(&led->lpg->lock); +} + +static int lpg_blink_set(struct lpg_led *led, + unsigned long *delay_on, unsigned long *delay_off) +{ + struct lpg_channel *chan; + unsigned int period; + unsigned int triled_mask = 0; + struct lpg *lpg = led->lpg; + u64 duty; + int i; + + if (!*delay_on && !*delay_off) { + *delay_on = 500; + *delay_off = 500; + } + + duty = *delay_on * NSEC_PER_MSEC; + period = (*delay_on + *delay_off) * NSEC_PER_MSEC; + + for (i = 0; i < led->num_channels; i++) { + chan = led->channels[i]; + + lpg_calc_freq(chan, period); + lpg_calc_duty(chan, duty); + + chan->enabled = true; + chan->ramp_enabled = false; + + triled_mask |= chan->triled_mask; + + lpg_apply(chan); + } + + /* Enable triled lines */ + triled_set(lpg, triled_mask, triled_mask); + + chan = led->channels[0]; + duty = div_u64(chan->pwm_value * chan->period, LPG_RESOLUTION); + *delay_on = div_u64(duty, NSEC_PER_MSEC); + *delay_off = div_u64(chan->period - duty, NSEC_PER_MSEC); + + return 0; +} + +static int lpg_blink_single_set(struct led_classdev *cdev, + unsigned long *delay_on, unsigned long *delay_off) +{ + struct lpg_led *led = container_of(cdev, struct lpg_led, cdev); + int ret; + + mutex_lock(&led->lpg->lock); + + ret = lpg_blink_set(led, delay_on, delay_off); + + mutex_unlock(&led->lpg->lock); + + return ret; +} + +static int lpg_blink_mc_set(struct led_classdev *cdev, + unsigned long *delay_on, unsigned long *delay_off) +{ + struct led_classdev_mc *mc = lcdev_to_mccdev(cdev); + struct lpg_led *led = container_of(mc, struct lpg_led, mcdev); + int ret; + + mutex_lock(&led->lpg->lock); + + ret = lpg_blink_set(led, delay_on, delay_off); + + mutex_unlock(&led->lpg->lock); + + return ret; +} + +static int lpg_pattern_set(struct lpg_led *led, struct led_pattern *led_pattern, + u32 len, int repeat) +{ + struct lpg_channel *chan; + struct lpg *lpg = led->lpg; + struct led_pattern *pattern; + unsigned int brightness_a; + unsigned int brightness_b; + unsigned int actual_len; + unsigned int hi_pause; + unsigned int lo_pause; + unsigned int delta_t; + unsigned int lo_idx; + unsigned int hi_idx; + unsigned int i; + bool ping_pong = true; + int ret = -EINVAL; + + /* Hardware only support oneshot or indefinite loops */ + if (repeat != -1 && repeat != 1) + return -EINVAL; + + /* + * The standardized leds-trigger-pattern format defines that the + * brightness of the LED follows a linear transition from one entry + * in the pattern to the next, over the given delta_t time. It + * describes that the way to perform instant transitions a zero-length + * entry should be added following a pattern entry. + * + * The LPG hardware is only able to perform the latter (no linear + * transitions), so require each entry in the pattern to be followed by + * a zero-length transition. + */ + if (len % 2) + return -EINVAL; + + pattern = kcalloc(len / 2, sizeof(*pattern), GFP_KERNEL); + if (!pattern) + return -ENOMEM; + + for (i = 0; i < len; i += 2) { + if (led_pattern[i].brightness != led_pattern[i + 1].brightness) + goto out_free_pattern; + if (led_pattern[i + 1].delta_t != 0) + goto out_free_pattern; + + pattern[i / 2].brightness = led_pattern[i].brightness; + pattern[i / 2].delta_t = led_pattern[i].delta_t; + } + + len /= 2; + + /* + * Specifying a pattern of length 1 causes the hardware to iterate + * through the entire LUT, so prohibit this. + */ + if (len < 2) + goto out_free_pattern; + + /* + * The LPG plays patterns with at a fixed pace, a "low pause" can be + * used to stretch the first delay of the pattern and a "high pause" + * the last one. + * + * In order to save space the pattern can be played in "ping pong" + * mode, in which the pattern is first played forward, then "high + * pause" is applied, then the pattern is played backwards and finally + * the "low pause" is applied. + * + * The middle elements of the pattern are used to determine delta_t and + * the "low pause" and "high pause" multipliers are derrived from this. + * + * The first element in the pattern is used to determine "low pause". + * + * If the specified pattern is a palindrome the ping pong mode is + * enabled. In this scenario the delta_t of the middle entry (i.e. the + * last in the programmed pattern) determines the "high pause". + */ + + /* Detect palindromes and use "ping pong" to reduce LUT usage */ + for (i = 0; i < len / 2; i++) { + brightness_a = pattern[i].brightness; + brightness_b = pattern[len - i - 1].brightness; + + if (brightness_a != brightness_b) { + ping_pong = false; + break; + } + } + + /* The pattern length to be written to the LUT */ + if (ping_pong) + actual_len = (len + 1) / 2; + else + actual_len = len; + + /* + * Validate that all delta_t in the pattern are the same, with the + * exception of the middle element in case of ping_pong. + */ + delta_t = pattern[1].delta_t; + for (i = 2; i < len; i++) { + if (pattern[i].delta_t != delta_t) { + /* + * Allow last entry in the full or shortened pattern to + * specify hi pause. Reject other variations. + */ + if (i != actual_len - 1) + goto out_free_pattern; + } + } + + /* LPG_RAMP_DURATION_REG is a 9bit */ + if (delta_t >= BIT(9)) + goto out_free_pattern; + + /* Find "low pause" and "high pause" in the pattern */ + lo_pause = pattern[0].delta_t; + hi_pause = pattern[actual_len - 1].delta_t; + + mutex_lock(&lpg->lock); + ret = lpg_lut_store(lpg, pattern, actual_len, &lo_idx, &hi_idx); + if (ret < 0) + goto out_unlock; + + for (i = 0; i < led->num_channels; i++) { + chan = led->channels[i]; + + chan->ramp_tick_ms = delta_t; + chan->ramp_ping_pong = ping_pong; + chan->ramp_oneshot = repeat != -1; + + chan->ramp_lo_pause_ms = lo_pause; + chan->ramp_hi_pause_ms = hi_pause; + + chan->pattern_lo_idx = lo_idx; + chan->pattern_hi_idx = hi_idx; + } + +out_unlock: + mutex_unlock(&lpg->lock); +out_free_pattern: + kfree(pattern); + + return ret; +} + +static int lpg_pattern_single_set(struct led_classdev *cdev, + struct led_pattern *pattern, u32 len, + int repeat) +{ + struct lpg_led *led = container_of(cdev, struct lpg_led, cdev); + int ret; + + ret = lpg_pattern_set(led, pattern, len, repeat); + if (ret < 0) + return ret; + + lpg_brightness_single_set(cdev, LED_FULL); + + return 0; +} + +static int lpg_pattern_mc_set(struct led_classdev *cdev, + struct led_pattern *pattern, u32 len, + int repeat) +{ + struct led_classdev_mc *mc = lcdev_to_mccdev(cdev); + struct lpg_led *led = container_of(mc, struct lpg_led, mcdev); + int ret; + + ret = lpg_pattern_set(led, pattern, len, repeat); + if (ret < 0) + return ret; + + led_mc_calc_color_components(mc, LED_FULL); + lpg_brightness_set(led, cdev, mc->subled_info); + + return 0; +} + +static int lpg_pattern_clear(struct lpg_led *led) +{ + struct lpg_channel *chan; + struct lpg *lpg = led->lpg; + int i; + + mutex_lock(&lpg->lock); + + chan = led->channels[0]; + lpg_lut_free(lpg, chan->pattern_lo_idx, chan->pattern_hi_idx); + + for (i = 0; i < led->num_channels; i++) { + chan = led->channels[i]; + chan->pattern_lo_idx = 0; + chan->pattern_hi_idx = 0; + } + + mutex_unlock(&lpg->lock); + + return 0; +} + +static int lpg_pattern_single_clear(struct led_classdev *cdev) +{ + struct lpg_led *led = container_of(cdev, struct lpg_led, cdev); + + return lpg_pattern_clear(led); +} + +static int lpg_pattern_mc_clear(struct led_classdev *cdev) +{ + struct led_classdev_mc *mc = lcdev_to_mccdev(cdev); + struct lpg_led *led = container_of(mc, struct lpg_led, mcdev); + + return lpg_pattern_clear(led); +} + +static int lpg_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct lpg *lpg = container_of(chip, struct lpg, pwm); + struct lpg_channel *chan = &lpg->channels[pwm->hwpwm]; + + return chan->in_use ? -EBUSY : 0; +} + +/* + * Limitations: + * - Updating both duty and period is not done atomically, so the output signal + * will momentarily be a mix of the settings. + * - Changed parameters takes effect immediately. + * - A disabled channel outputs a logical 0. + */ +static int lpg_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct lpg *lpg = container_of(chip, struct lpg, pwm); + struct lpg_channel *chan = &lpg->channels[pwm->hwpwm]; + int ret = 0; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + mutex_lock(&lpg->lock); + + if (state->enabled) { + ret = lpg_calc_freq(chan, state->period); + if (ret < 0) + goto out_unlock; + + lpg_calc_duty(chan, state->duty_cycle); + } + chan->enabled = state->enabled; + + lpg_apply(chan); + + triled_set(lpg, chan->triled_mask, chan->enabled ? chan->triled_mask : 0); + +out_unlock: + mutex_unlock(&lpg->lock); + + return ret; +} + +static void lpg_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct lpg *lpg = container_of(chip, struct lpg, pwm); + struct lpg_channel *chan = &lpg->channels[pwm->hwpwm]; + unsigned int pre_div; + unsigned int refclk; + unsigned int val; + unsigned int m; + u16 pwm_value; + int ret; + + ret = regmap_read(lpg->map, chan->base + LPG_SIZE_CLK_REG, &val); + if (ret) + return; + + refclk = lpg_clk_rates[val & PWM_CLK_SELECT_MASK]; + if (refclk) { + ret = regmap_read(lpg->map, chan->base + LPG_PREDIV_CLK_REG, &val); + if (ret) + return; + + pre_div = lpg_pre_divs[FIELD_GET(PWM_FREQ_PRE_DIV_MASK, val)]; + m = FIELD_GET(PWM_FREQ_EXP_MASK, val); + + ret = regmap_bulk_read(lpg->map, chan->base + PWM_VALUE_REG, &pwm_value, sizeof(pwm_value)); + if (ret) + return; + + state->period = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * LPG_RESOLUTION * pre_div * (1 << m), refclk); + state->duty_cycle = DIV_ROUND_UP_ULL((u64)NSEC_PER_SEC * pwm_value * pre_div * (1 << m), refclk); + } else { + state->period = 0; + state->duty_cycle = 0; + } + + ret = regmap_read(lpg->map, chan->base + PWM_ENABLE_CONTROL_REG, &val); + if (ret) + return; + + state->enabled = FIELD_GET(LPG_ENABLE_CONTROL_OUTPUT, val); + state->polarity = PWM_POLARITY_NORMAL; + + if (state->duty_cycle > state->period) + state->duty_cycle = state->period; +} + +static const struct pwm_ops lpg_pwm_ops = { + .request = lpg_pwm_request, + .apply = lpg_pwm_apply, + .get_state = lpg_pwm_get_state, + .owner = THIS_MODULE, +}; + +static int lpg_add_pwm(struct lpg *lpg) +{ + int ret; + + lpg->pwm.base = -1; + lpg->pwm.dev = lpg->dev; + lpg->pwm.npwm = lpg->num_channels; + lpg->pwm.ops = &lpg_pwm_ops; + + ret = pwmchip_add(&lpg->pwm); + if (ret) + dev_err(lpg->dev, "failed to add PWM chip: ret %d\n", ret); + + return ret; +} + +static int lpg_parse_channel(struct lpg *lpg, struct device_node *np, + struct lpg_channel **channel) +{ + struct lpg_channel *chan; + u32 color = LED_COLOR_ID_GREEN; + u32 reg; + int ret; + + ret = of_property_read_u32(np, "reg", ®); + if (ret || !reg || reg > lpg->num_channels) { + dev_err(lpg->dev, "invalid \"reg\" of %pOFn\n", np); + return -EINVAL; + } + + chan = &lpg->channels[reg - 1]; + chan->in_use = true; + + ret = of_property_read_u32(np, "color", &color); + if (ret < 0 && ret != -EINVAL) { + dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np); + return ret; + } + + chan->color = color; + + *channel = chan; + + return 0; +} + +static int lpg_add_led(struct lpg *lpg, struct device_node *np) +{ + struct led_init_data init_data = {}; + struct led_classdev *cdev; + struct device_node *child; + struct mc_subled *info; + struct lpg_led *led; + const char *state; + int num_channels; + u32 color = 0; + int ret; + int i; + + ret = of_property_read_u32(np, "color", &color); + if (ret < 0 && ret != -EINVAL) { + dev_err(lpg->dev, "failed to parse \"color\" of %pOF\n", np); + return ret; + } + + if (color == LED_COLOR_ID_RGB) + num_channels = of_get_available_child_count(np); + else + num_channels = 1; + + led = devm_kzalloc(lpg->dev, struct_size(led, channels, num_channels), GFP_KERNEL); + if (!led) + return -ENOMEM; + + led->lpg = lpg; + led->num_channels = num_channels; + + if (color == LED_COLOR_ID_RGB) { + info = devm_kcalloc(lpg->dev, num_channels, sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + i = 0; + for_each_available_child_of_node(np, child) { + ret = lpg_parse_channel(lpg, child, &led->channels[i]); + if (ret < 0) + return ret; + + info[i].color_index = led->channels[i]->color; + info[i].intensity = 0; + i++; + } + + led->mcdev.subled_info = info; + led->mcdev.num_colors = num_channels; + + cdev = &led->mcdev.led_cdev; + cdev->brightness_set = lpg_brightness_mc_set; + cdev->blink_set = lpg_blink_mc_set; + + /* Register pattern accessors only if we have a LUT block */ + if (lpg->lut_base) { + cdev->pattern_set = lpg_pattern_mc_set; + cdev->pattern_clear = lpg_pattern_mc_clear; + } + } else { + ret = lpg_parse_channel(lpg, np, &led->channels[0]); + if (ret < 0) + return ret; + + cdev = &led->cdev; + cdev->brightness_set = lpg_brightness_single_set; + cdev->blink_set = lpg_blink_single_set; + + /* Register pattern accessors only if we have a LUT block */ + if (lpg->lut_base) { + cdev->pattern_set = lpg_pattern_single_set; + cdev->pattern_clear = lpg_pattern_single_clear; + } + } + + cdev->default_trigger = of_get_property(np, "linux,default-trigger", NULL); + cdev->max_brightness = LPG_RESOLUTION - 1; + + if (!of_property_read_string(np, "default-state", &state) && + !strcmp(state, "on")) + cdev->brightness = cdev->max_brightness; + else + cdev->brightness = LED_OFF; + + cdev->brightness_set(cdev, cdev->brightness); + + init_data.fwnode = of_fwnode_handle(np); + + if (color == LED_COLOR_ID_RGB) + ret = devm_led_classdev_multicolor_register_ext(lpg->dev, &led->mcdev, &init_data); + else + ret = devm_led_classdev_register_ext(lpg->dev, &led->cdev, &init_data); + if (ret) + dev_err(lpg->dev, "unable to register %s\n", cdev->name); + + return ret; +} + +static int lpg_init_channels(struct lpg *lpg) +{ + const struct lpg_data *data = lpg->data; + struct lpg_channel *chan; + int i; + + lpg->num_channels = data->num_channels; + lpg->channels = devm_kcalloc(lpg->dev, data->num_channels, + sizeof(struct lpg_channel), GFP_KERNEL); + if (!lpg->channels) + return -ENOMEM; + + for (i = 0; i < data->num_channels; i++) { + chan = &lpg->channels[i]; + + chan->lpg = lpg; + chan->base = data->channels[i].base; + chan->triled_mask = data->channels[i].triled_mask; + chan->lut_mask = BIT(i); + + regmap_read(lpg->map, chan->base + LPG_SUBTYPE_REG, &chan->subtype); + } + + return 0; +} + +static int lpg_init_triled(struct lpg *lpg) +{ + struct device_node *np = lpg->dev->of_node; + int ret; + + /* Skip initialization if we don't have a triled block */ + if (!lpg->data->triled_base) + return 0; + + lpg->triled_base = lpg->data->triled_base; + lpg->triled_has_atc_ctl = lpg->data->triled_has_atc_ctl; + lpg->triled_has_src_sel = lpg->data->triled_has_src_sel; + + if (lpg->triled_has_src_sel) { + ret = of_property_read_u32(np, "qcom,power-source", &lpg->triled_src); + if (ret || lpg->triled_src == 2 || lpg->triled_src > 3) { + dev_err(lpg->dev, "invalid power source\n"); + return -EINVAL; + } + } + + /* Disable automatic trickle charge LED */ + if (lpg->triled_has_atc_ctl) + regmap_write(lpg->map, lpg->triled_base + TRI_LED_ATC_CTL, 0); + + /* Configure power source */ + if (lpg->triled_has_src_sel) + regmap_write(lpg->map, lpg->triled_base + TRI_LED_SRC_SEL, lpg->triled_src); + + /* Default all outputs to off */ + regmap_write(lpg->map, lpg->triled_base + TRI_LED_EN_CTL, 0); + + return 0; +} + +static int lpg_init_lut(struct lpg *lpg) +{ + const struct lpg_data *data = lpg->data; + + if (!data->lut_base) + return 0; + + lpg->lut_base = data->lut_base; + lpg->lut_size = data->lut_size; + + lpg->lut_bitmap = devm_bitmap_zalloc(lpg->dev, lpg->lut_size, GFP_KERNEL); + if (!lpg->lut_bitmap) + return -ENOMEM; + + return 0; +} + +static int lpg_probe(struct platform_device *pdev) +{ + struct device_node *np; + struct lpg *lpg; + int ret; + int i; + + lpg = devm_kzalloc(&pdev->dev, sizeof(*lpg), GFP_KERNEL); + if (!lpg) + return -ENOMEM; + + lpg->data = of_device_get_match_data(&pdev->dev); + if (!lpg->data) + return -EINVAL; + + platform_set_drvdata(pdev, lpg); + + lpg->dev = &pdev->dev; + mutex_init(&lpg->lock); + + lpg->map = dev_get_regmap(pdev->dev.parent, NULL); + if (!lpg->map) + return dev_err_probe(&pdev->dev, -ENXIO, "parent regmap unavailable\n"); + + ret = lpg_init_channels(lpg); + if (ret < 0) + return ret; + + ret = lpg_parse_dtest(lpg); + if (ret < 0) + return ret; + + ret = lpg_init_triled(lpg); + if (ret < 0) + return ret; + + ret = lpg_init_lut(lpg); + if (ret < 0) + return ret; + + for_each_available_child_of_node(pdev->dev.of_node, np) { + ret = lpg_add_led(lpg, np); + if (ret) + return ret; + } + + for (i = 0; i < lpg->num_channels; i++) + lpg_apply_dtest(&lpg->channels[i]); + + return lpg_add_pwm(lpg); +} + +static int lpg_remove(struct platform_device *pdev) +{ + struct lpg *lpg = platform_get_drvdata(pdev); + + pwmchip_remove(&lpg->pwm); + + return 0; +} + +static const struct lpg_data pm8916_pwm_data = { + .num_channels = 1, + .channels = (const struct lpg_channel_data[]) { + { .base = 0xbc00 }, + }, +}; + +static const struct lpg_data pm8941_lpg_data = { + .lut_base = 0xb000, + .lut_size = 64, + + .triled_base = 0xd000, + .triled_has_atc_ctl = true, + .triled_has_src_sel = true, + + .num_channels = 8, + .channels = (const struct lpg_channel_data[]) { + { .base = 0xb100 }, + { .base = 0xb200 }, + { .base = 0xb300 }, + { .base = 0xb400 }, + { .base = 0xb500, .triled_mask = BIT(5) }, + { .base = 0xb600, .triled_mask = BIT(6) }, + { .base = 0xb700, .triled_mask = BIT(7) }, + { .base = 0xb800 }, + }, +}; + +static const struct lpg_data pm8994_lpg_data = { + .lut_base = 0xb000, + .lut_size = 64, + + .num_channels = 6, + .channels = (const struct lpg_channel_data[]) { + { .base = 0xb100 }, + { .base = 0xb200 }, + { .base = 0xb300 }, + { .base = 0xb400 }, + { .base = 0xb500 }, + { .base = 0xb600 }, + }, +}; + +static const struct lpg_data pmi8994_lpg_data = { + .lut_base = 0xb000, + .lut_size = 24, + + .triled_base = 0xd000, + .triled_has_atc_ctl = true, + .triled_has_src_sel = true, + + .num_channels = 4, + .channels = (const struct lpg_channel_data[]) { + { .base = 0xb100, .triled_mask = BIT(5) }, + { .base = 0xb200, .triled_mask = BIT(6) }, + { .base = 0xb300, .triled_mask = BIT(7) }, + { .base = 0xb400 }, + }, +}; + +static const struct lpg_data pmi8998_lpg_data = { + .lut_base = 0xb000, + .lut_size = 49, + + .triled_base = 0xd000, + + .num_channels = 6, + .channels = (const struct lpg_channel_data[]) { + { .base = 0xb100 }, + { .base = 0xb200 }, + { .base = 0xb300, .triled_mask = BIT(5) }, + { .base = 0xb400, .triled_mask = BIT(6) }, + { .base = 0xb500, .triled_mask = BIT(7) }, + { .base = 0xb600 }, + }, +}; + +static const struct lpg_data pm8150b_lpg_data = { + .lut_base = 0xb000, + .lut_size = 24, + + .triled_base = 0xd000, + + .num_channels = 2, + .channels = (const struct lpg_channel_data[]) { + { .base = 0xb100, .triled_mask = BIT(7) }, + { .base = 0xb200, .triled_mask = BIT(6) }, + }, +}; + +static const struct lpg_data pm8150l_lpg_data = { + .lut_base = 0xb000, + .lut_size = 48, + + .triled_base = 0xd000, + + .num_channels = 5, + .channels = (const struct lpg_channel_data[]) { + { .base = 0xb100, .triled_mask = BIT(7) }, + { .base = 0xb200, .triled_mask = BIT(6) }, + { .base = 0xb300, .triled_mask = BIT(5) }, + { .base = 0xbc00 }, + { .base = 0xbd00 }, + + }, +}; + +static const struct lpg_data pm8350c_pwm_data = { + .triled_base = 0xef00, + + .num_channels = 4, + .channels = (const struct lpg_channel_data[]) { + { .base = 0xe800, .triled_mask = BIT(7) }, + { .base = 0xe900, .triled_mask = BIT(6) }, + { .base = 0xea00, .triled_mask = BIT(5) }, + { .base = 0xeb00 }, + }, +}; + +static const struct of_device_id lpg_of_table[] = { + { .compatible = "qcom,pm8150b-lpg", .data = &pm8150b_lpg_data }, + { .compatible = "qcom,pm8150l-lpg", .data = &pm8150l_lpg_data }, + { .compatible = "qcom,pm8350c-pwm", .data = &pm8350c_pwm_data }, + { .compatible = "qcom,pm8916-pwm", .data = &pm8916_pwm_data }, + { .compatible = "qcom,pm8941-lpg", .data = &pm8941_lpg_data }, + { .compatible = "qcom,pm8994-lpg", .data = &pm8994_lpg_data }, + { .compatible = "qcom,pmi8994-lpg", .data = &pmi8994_lpg_data }, + { .compatible = "qcom,pmi8998-lpg", .data = &pmi8998_lpg_data }, + { .compatible = "qcom,pmc8180c-lpg", .data = &pm8150l_lpg_data }, + {} +}; +MODULE_DEVICE_TABLE(of, lpg_of_table); + +static struct platform_driver lpg_driver = { + .probe = lpg_probe, + .remove = lpg_remove, + .driver = { + .name = "qcom-spmi-lpg", + .of_match_table = lpg_of_table, + }, +}; +module_platform_driver(lpg_driver); + +MODULE_DESCRIPTION("Qualcomm LPG LED driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index 5cdc361da37c..539a2ed4e13d 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -44,6 +44,7 @@ config ADB_IOP config ADB_CUDA bool "Support for Cuda/Egret based Macs and PowerMacs" depends on (ADB || PPC_PMAC) && !PPC_PMAC64 + select RTC_LIB help This provides support for Cuda/Egret based Macintosh and Power Macintosh systems. This includes most m68k based Macs, @@ -57,6 +58,7 @@ config ADB_CUDA config ADB_PMU bool "Support for PMU based PowerMacs and PowerBooks" depends on PPC_PMAC || MAC + select RTC_LIB help On PowerBooks, iBooks, and recent iMacs and Power Macintoshes, the PMU is an embedded microprocessor whose primary function is to @@ -67,6 +69,10 @@ config ADB_PMU this device; you should do so if your machine is one of those mentioned above. +config ADB_PMU_EVENT + def_bool y + depends on ADB_PMU && INPUT=y + config ADB_PMU_LED bool "Support for the Power/iBook front LED" depends on PPC_PMAC && ADB_PMU diff --git a/drivers/macintosh/Makefile b/drivers/macintosh/Makefile index 49819b1b6f20..712edcb3e0b0 100644 --- a/drivers/macintosh/Makefile +++ b/drivers/macintosh/Makefile @@ -12,7 +12,8 @@ obj-$(CONFIG_MAC_EMUMOUSEBTN) += mac_hid.o obj-$(CONFIG_INPUT_ADBHID) += adbhid.o obj-$(CONFIG_ANSLCD) += ans-lcd.o -obj-$(CONFIG_ADB_PMU) += via-pmu.o via-pmu-event.o +obj-$(CONFIG_ADB_PMU) += via-pmu.o +obj-$(CONFIG_ADB_PMU_EVENT) += via-pmu-event.o obj-$(CONFIG_ADB_PMU_LED) += via-pmu-led.o obj-$(CONFIG_PMAC_BACKLIGHT) += via-pmu-backlight.o obj-$(CONFIG_ADB_CUDA) += via-cuda.o diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 73b396189039..439fab4eaa85 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -38,10 +38,10 @@ #include <linux/kthread.h> #include <linux/platform_device.h> #include <linux/mutex.h> +#include <linux/of.h> #include <linux/uaccess.h> #ifdef CONFIG_PPC -#include <asm/prom.h> #include <asm/machdep.h> #endif diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c index 994ba5cb3678..b2fe7a3dc471 100644 --- a/drivers/macintosh/adbhid.c +++ b/drivers/macintosh/adbhid.c @@ -789,7 +789,8 @@ adbhid_input_register(int id, int default_id, int original_handler_id, switch (default_id) { case ADB_KEYBOARD: - hid->keycode = kmalloc(sizeof(adb_to_linux_keycodes), GFP_KERNEL); + hid->keycode = kmemdup(adb_to_linux_keycodes, + sizeof(adb_to_linux_keycodes), GFP_KERNEL); if (!hid->keycode) { err = -ENOMEM; goto fail; @@ -797,8 +798,6 @@ adbhid_input_register(int id, int default_id, int original_handler_id, sprintf(hid->name, "ADB keyboard"); - memcpy(hid->keycode, adb_to_linux_keycodes, sizeof(adb_to_linux_keycodes)); - switch (original_handler_id) { default: keyboard_type = "<unknown>"; @@ -817,9 +816,7 @@ adbhid_input_register(int id, int default_id, int original_handler_id, case 0xC4: case 0xC7: keyboard_type = "ISO, swapping keys"; input_dev->id.version = ADB_KEYBOARD_ISO; - i = hid->keycode[10]; - hid->keycode[10] = hid->keycode[50]; - hid->keycode[50] = i; + swap(hid->keycode[10], hid->keycode[50]); break; case 0x12: case 0x15: case 0x16: case 0x17: case 0x1A: diff --git a/drivers/macintosh/ams/ams-core.c b/drivers/macintosh/ams/ams-core.c index 01eeb2336d1a..877e8cb23128 100644 --- a/drivers/macintosh/ams/ams-core.c +++ b/drivers/macintosh/ams/ams-core.c @@ -50,7 +50,7 @@ static ssize_t ams_show_current(struct device *dev, ams_sensors(&x, &y, &z); mutex_unlock(&ams_info.lock); - return snprintf(buf, PAGE_SIZE, "%d %d %d\n", x, y, z); + return sysfs_emit(buf, "%d %d %d\n", x, y, z); } static DEVICE_ATTR(current, S_IRUGO, ams_show_current, NULL); diff --git a/drivers/macintosh/ams/ams-i2c.c b/drivers/macintosh/ams/ams-i2c.c index 21271b2e9259..d2f0cde6f9c7 100644 --- a/drivers/macintosh/ams/ams-i2c.c +++ b/drivers/macintosh/ams/ams-i2c.c @@ -256,8 +256,6 @@ static void ams_i2c_exit(void) int __init ams_i2c_init(struct device_node *np) { - int result; - /* Set implementation stuff */ ams_info.of_node = np; ams_info.exit = ams_i2c_exit; @@ -266,7 +264,5 @@ int __init ams_i2c_init(struct device_node *np) ams_info.clear_irq = ams_i2c_clear_irq; ams_info.bustype = BUS_I2C; - result = i2c_add_driver(&ams_i2c_driver); - - return result; + return i2c_add_driver(&ams_i2c_driver); } diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index b4821c751d04..fa904b24a600 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -11,10 +11,10 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/fs.h> +#include <linux/of.h> #include <linux/uaccess.h> #include <asm/sections.h> -#include <asm/prom.h> #include <asm/io.h> #include "ans-lcd.h" diff --git a/drivers/macintosh/macio-adb.c b/drivers/macintosh/macio-adb.c index dc634c2932fd..9b63bd2551c6 100644 --- a/drivers/macintosh/macio-adb.c +++ b/drivers/macintosh/macio-adb.c @@ -9,8 +9,11 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/pgtable.h> -#include <asm/prom.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/adb.h> + #include <asm/io.h> #include <asm/hydra.h> #include <asm/irq.h> diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index 1943a007e2d5..1ec1e5984563 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -20,13 +20,14 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/of_irq.h> #include <asm/machdep.h> #include <asm/macio.h> #include <asm/pmac_feature.h> -#include <asm/prom.h> #undef DEBUG @@ -472,7 +473,7 @@ static void macio_pci_add_devices(struct macio_chip *chip) root_res = &rdev->resource[0]; /* First scan 1st level */ - for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { + for_each_child_of_node(pnode, np) { if (macio_skip_device(np)) continue; of_node_get(np); @@ -489,7 +490,7 @@ static void macio_pci_add_devices(struct macio_chip *chip) /* Add media bay devices if any */ if (mbdev) { pnode = mbdev->ofdev.dev.of_node; - for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { + for_each_child_of_node(pnode, np) { if (macio_skip_device(np)) continue; of_node_get(np); @@ -502,7 +503,7 @@ static void macio_pci_add_devices(struct macio_chip *chip) /* Add serial ports if any */ if (sdev) { pnode = sdev->ofdev.dev.of_node; - for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) { + for_each_child_of_node(pnode, np) { if (macio_skip_device(np)) continue; of_node_get(np); diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c index 27f5eefc508f..2bbe359b26d9 100644 --- a/drivers/macintosh/macio_sysfs.c +++ b/drivers/macintosh/macio_sysfs.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <linux/stat.h> #include <asm/macio.h> diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c index b17660c022eb..36070c6586d1 100644 --- a/drivers/macintosh/mediabay.c +++ b/drivers/macintosh/mediabay.c @@ -17,7 +17,7 @@ #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/pgtable.h> -#include <asm/prom.h> + #include <asm/io.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 60311e8d6240..c28893e41a8b 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -27,7 +27,6 @@ #include <linux/of_irq.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/dbdma.h> diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index a4fbc3fc713d..b495bfa77896 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -41,7 +41,6 @@ #include <asm/byteorder.h> #include <asm/io.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/smu.h> @@ -1087,7 +1086,7 @@ static int smu_open(struct inode *inode, struct file *file) unsigned long flags; pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL); - if (pp == 0) + if (!pp) return -ENOMEM; spin_lock_init(&pp->lock); pp->mode = smu_file_commands; @@ -1254,7 +1253,7 @@ static __poll_t smu_fpoll(struct file *file, poll_table *wait) __poll_t mask = 0; unsigned long flags; - if (pp == 0) + if (!pp) return 0; if (pp->mode == smu_file_commands) { @@ -1277,7 +1276,7 @@ static int smu_release(struct inode *inode, struct file *file) unsigned long flags; unsigned int busy; - if (pp == 0) + if (!pp) return 0; file->private_data = NULL; diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c index 7e218437730c..e604cbc91763 100644 --- a/drivers/macintosh/therm_adt746x.c +++ b/drivers/macintosh/therm_adt746x.c @@ -27,7 +27,6 @@ #include <linux/freezer.h> #include <linux/of_platform.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c index f55f6adf5e5f..9226b74fa08f 100644 --- a/drivers/macintosh/therm_windtunnel.c +++ b/drivers/macintosh/therm_windtunnel.c @@ -38,7 +38,6 @@ #include <linux/kthread.h> #include <linux/of_platform.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c index 3d0d0b9d471d..5071289063f0 100644 --- a/drivers/macintosh/via-cuda.c +++ b/drivers/macintosh/via-cuda.c @@ -18,8 +18,10 @@ #include <linux/cuda.h> #include <linux/spinlock.h> #include <linux/interrupt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + #ifdef CONFIG_PPC -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #else @@ -237,10 +239,10 @@ int __init find_via_cuda(void) const u32 *reg; int err; - if (vias != 0) + if (vias) return 1; vias = of_find_node_by_name(NULL, "via-cuda"); - if (vias == 0) + if (!vias) return 0; reg = of_get_property(vias, "reg", NULL); @@ -518,7 +520,7 @@ cuda_write(struct adb_request *req) req->reply_len = 0; spin_lock_irqsave(&cuda_lock, flags); - if (current_req != 0) { + if (current_req) { last_req->next = req; last_req = req; } else { diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c index 50ada02ae75d..2194016122d2 100644 --- a/drivers/macintosh/via-pmu-backlight.c +++ b/drivers/macintosh/via-pmu-backlight.c @@ -12,7 +12,6 @@ #include <linux/adb.h> #include <linux/pmu.h> #include <asm/backlight.h> -#include <asm/prom.h> #define MAX_PMU_LEVEL 0xFF diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c index ae067ab2373d..a4fb16d7db3c 100644 --- a/drivers/macintosh/via-pmu-led.c +++ b/drivers/macintosh/via-pmu-led.c @@ -25,7 +25,7 @@ #include <linux/leds.h> #include <linux/adb.h> #include <linux/pmu.h> -#include <asm/prom.h> +#include <linux/of.h> static spinlock_t pmu_blink_lock; static struct adb_request pmu_blink_req; diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 4b98bc26a94b..49657962d892 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -59,7 +59,6 @@ #include <asm/pmac_feature.h> #include <asm/pmac_pfunc.h> #include <asm/pmac_low_i2c.h> -#include <asm/prom.h> #include <asm/mmu_context.h> #include <asm/cputable.h> #include <asm/time.h> @@ -161,7 +160,7 @@ static unsigned char __iomem *gpio_reg; static int gpio_irq = 0; static int gpio_irq_enabled = -1; static volatile int pmu_suspended; -static spinlock_t pmu_lock; +static DEFINE_SPINLOCK(pmu_lock); static u8 pmu_intr_mask; static int pmu_version; static int drop_interrupts; @@ -305,8 +304,6 @@ int __init find_via_pmu(void) goto fail; } - spin_lock_init(&pmu_lock); - pmu_has_adb = 1; pmu_intr_mask = PMU_INT_PCEJECT | @@ -388,8 +385,6 @@ int __init find_via_pmu(void) pmu_kind = PMU_UNKNOWN; - spin_lock_init(&pmu_lock); - pmu_has_adb = 1; pmu_intr_mask = PMU_INT_PCEJECT | @@ -1459,7 +1454,7 @@ next: pmu_pass_intr(data, len); /* len == 6 is probably a bad check. But how do I * know what PMU versions send what events here? */ - if (len == 6) { + if (IS_ENABLED(CONFIG_ADB_PMU_EVENT) && len == 6) { via_pmu_event(PMU_EVT_POWER, !!(data[1]&8)); via_pmu_event(PMU_EVT_LID, data[1]&1); } diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c index e7dec328c7cf..6ad6441abcbc 100644 --- a/drivers/macintosh/windfarm_ad7417_sensor.c +++ b/drivers/macintosh/windfarm_ad7417_sensor.c @@ -13,7 +13,7 @@ #include <linux/init.h> #include <linux/wait.h> #include <linux/i2c.h> -#include <asm/prom.h> + #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c index 07f91ec1f960..5307b1e34261 100644 --- a/drivers/macintosh/windfarm_core.c +++ b/drivers/macintosh/windfarm_core.c @@ -35,8 +35,6 @@ #include <linux/mutex.h> #include <linux/freezer.h> -#include <asm/prom.h> - #include "windfarm.h" #define VERSION "0.2" diff --git a/drivers/macintosh/windfarm_cpufreq_clamp.c b/drivers/macintosh/windfarm_cpufreq_clamp.c index 7b726f00f183..28d18ef22bbb 100644 --- a/drivers/macintosh/windfarm_cpufreq_clamp.c +++ b/drivers/macintosh/windfarm_cpufreq_clamp.c @@ -10,8 +10,6 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> -#include <asm/prom.h> - #include "windfarm.h" #define VERSION "0.3" diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c index 2470e5a725c8..82e7b2005ae7 100644 --- a/drivers/macintosh/windfarm_fcu_controls.c +++ b/drivers/macintosh/windfarm_fcu_controls.c @@ -14,7 +14,7 @@ #include <linux/init.h> #include <linux/wait.h> #include <linux/i2c.h> -#include <asm/prom.h> + #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index 29f48c2028b6..eb7e7f0bd219 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -15,7 +15,6 @@ #include <linux/wait.h> #include <linux/i2c.h> #include <linux/of_device.h> -#include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c index 9fab0b47cd3d..807efdde86bc 100644 --- a/drivers/macintosh/windfarm_lm87_sensor.c +++ b/drivers/macintosh/windfarm_lm87_sensor.c @@ -13,7 +13,7 @@ #include <linux/init.h> #include <linux/wait.h> #include <linux/i2c.h> -#include <asm/prom.h> + #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c index 1e7b03d44ad9..55ee417fb878 100644 --- a/drivers/macintosh/windfarm_max6690_sensor.c +++ b/drivers/macintosh/windfarm_max6690_sensor.c @@ -10,7 +10,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> -#include <asm/prom.h> + #include <asm/pmac_low_i2c.h> #include "windfarm.h" diff --git a/drivers/macintosh/windfarm_mpu.h b/drivers/macintosh/windfarm_mpu.h index 157ce6e3f32e..b5ce347d12d4 100644 --- a/drivers/macintosh/windfarm_mpu.h +++ b/drivers/macintosh/windfarm_mpu.h @@ -8,6 +8,8 @@ #ifndef __WINDFARM_MPU_H #define __WINDFARM_MPU_H +#include <linux/of.h> + typedef unsigned short fu16; typedef int fs32; typedef short fs16; diff --git a/drivers/macintosh/windfarm_pm112.c b/drivers/macintosh/windfarm_pm112.c index e8377ce0a95a..d1dec314ae30 100644 --- a/drivers/macintosh/windfarm_pm112.c +++ b/drivers/macintosh/windfarm_pm112.c @@ -12,7 +12,9 @@ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/reboot.h> -#include <asm/prom.h> +#include <linux/of.h> +#include <linux/slab.h> + #include <asm/smu.h> #include "windfarm.h" diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c index ba1ec6fc11d2..36312f163aac 100644 --- a/drivers/macintosh/windfarm_pm121.c +++ b/drivers/macintosh/windfarm_pm121.c @@ -201,7 +201,8 @@ #include <linux/kmod.h> #include <linux/device.h> #include <linux/platform_device.h> -#include <asm/prom.h> +#include <linux/of.h> + #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/windfarm_pm72.c b/drivers/macintosh/windfarm_pm72.c index e81746b87cff..e21f973551cc 100644 --- a/drivers/macintosh/windfarm_pm72.c +++ b/drivers/macintosh/windfarm_pm72.c @@ -11,7 +11,7 @@ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/reboot.h> -#include <asm/prom.h> + #include <asm/smu.h> #include "windfarm.h" diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c index 82c67a4ee5f7..e0f4743f21cc 100644 --- a/drivers/macintosh/windfarm_pm81.c +++ b/drivers/macintosh/windfarm_pm81.c @@ -102,7 +102,8 @@ #include <linux/kmod.h> #include <linux/device.h> #include <linux/platform_device.h> -#include <asm/prom.h> +#include <linux/of.h> + #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c index 3f346af9e3f7..c8535855360d 100644 --- a/drivers/macintosh/windfarm_pm91.c +++ b/drivers/macintosh/windfarm_pm91.c @@ -37,7 +37,8 @@ #include <linux/kmod.h> #include <linux/device.h> #include <linux/platform_device.h> -#include <asm/prom.h> +#include <linux/of.h> + #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c index 7acd1684c451..e9eb7fdde48c 100644 --- a/drivers/macintosh/windfarm_rm31.c +++ b/drivers/macintosh/windfarm_rm31.c @@ -11,7 +11,7 @@ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/reboot.h> -#include <asm/prom.h> + #include <asm/smu.h> #include "windfarm.h" diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c index 75966052819a..e9957ad49a2a 100644 --- a/drivers/macintosh/windfarm_smu_controls.c +++ b/drivers/macintosh/windfarm_smu_controls.c @@ -14,7 +14,8 @@ #include <linux/init.h> #include <linux/wait.h> #include <linux/completion.h> -#include <asm/prom.h> +#include <linux/of.h> + #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c index e46e1153a0b4..5ade627eaa78 100644 --- a/drivers/macintosh/windfarm_smu_sat.c +++ b/drivers/macintosh/windfarm_smu_sat.c @@ -13,7 +13,7 @@ #include <linux/wait.h> #include <linux/i2c.h> #include <linux/mutex.h> -#include <asm/prom.h> + #include <asm/smu.h> #include <asm/pmac_low_i2c.h> diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c index c8706cfb83fd..00c6fe25fcba 100644 --- a/drivers/macintosh/windfarm_smu_sensors.c +++ b/drivers/macintosh/windfarm_smu_sensors.c @@ -14,7 +14,8 @@ #include <linux/init.h> #include <linux/wait.h> #include <linux/completion.h> -#include <asm/prom.h> +#include <linux/of.h> + #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 0a6abbbe3745..3212ef6aa81b 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -165,11 +165,12 @@ static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) } static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, enum dax_access_mode mode, void **kaddr, + pfn_t *pfn) { struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); - return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); + return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn); } static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, @@ -180,9 +181,18 @@ static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, return dax_zero_page_range(dax_dev, pgoff, nr_pages); } +static size_t linear_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i) +{ + struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff); + + return dax_recovery_write(dax_dev, pgoff, addr, bytes, i); +} + #else #define linear_dax_direct_access NULL #define linear_dax_zero_page_range NULL +#define linear_dax_recovery_write NULL #endif static struct target_type linear_target = { @@ -200,6 +210,7 @@ static struct target_type linear_target = { .iterate_devices = linear_iterate_devices, .direct_access = linear_dax_direct_access, .dax_zero_page_range = linear_dax_zero_page_range, + .dax_recovery_write = linear_dax_recovery_write, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index e194226c89e5..20fd688f72e7 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -888,11 +888,12 @@ static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti, } static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, enum dax_access_mode mode, void **kaddr, + pfn_t *pfn) { struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); - return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); + return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn); } static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, @@ -903,9 +904,18 @@ static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT); } +static size_t log_writes_dax_recovery_write(struct dm_target *ti, + pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) +{ + struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff); + + return dax_recovery_write(dax_dev, pgoff, addr, bytes, i); +} + #else #define log_writes_dax_direct_access NULL #define log_writes_dax_zero_page_range NULL +#define log_writes_dax_recovery_write NULL #endif static struct target_type log_writes_target = { @@ -923,6 +933,7 @@ static struct target_type log_writes_target = { .io_hints = log_writes_io_hints, .direct_access = log_writes_dax_direct_access, .dax_zero_page_range = log_writes_dax_zero_page_range, + .dax_recovery_write = log_writes_dax_recovery_write, }; static int __init dm_log_writes_init(void) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index c81d331d1afe..baa085cc67bd 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -315,11 +315,12 @@ static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff) } static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, enum dax_access_mode mode, void **kaddr, + pfn_t *pfn) { struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff); - return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); + return dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn); } static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, @@ -330,9 +331,18 @@ static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, return dax_zero_page_range(dax_dev, pgoff, nr_pages); } +static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i) +{ + struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff); + + return dax_recovery_write(dax_dev, pgoff, addr, bytes, i); +} + #else #define stripe_dax_direct_access NULL #define stripe_dax_zero_page_range NULL +#define stripe_dax_recovery_write NULL #endif /* @@ -469,6 +479,7 @@ static struct target_type stripe_target = { .io_hints = stripe_io_hints, .direct_access = stripe_dax_direct_access, .dax_zero_page_range = stripe_dax_zero_page_range, + .dax_recovery_write = stripe_dax_recovery_write, }; int __init dm_stripe_init(void) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index a37c7b763643..0e833a154b31 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1005,7 +1005,7 @@ bool dm_table_request_based(struct dm_table *t) return __table_type_request_based(dm_table_get_type(t)); } -static int dm_table_supports_poll(struct dm_table *t); +static bool dm_table_supports_poll(struct dm_table *t); static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) { @@ -1027,7 +1027,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * per_io_data_size = max(per_io_data_size, ti->per_io_data_size); min_pool_size = max(min_pool_size, ti->num_flush_bios); } - poll_supported = !!dm_table_supports_poll(t); + poll_supported = dm_table_supports_poll(t); } t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size, @@ -1547,9 +1547,20 @@ static int count_device(struct dm_target *ti, struct dm_dev *dev, return 0; } -static int dm_table_supports_poll(struct dm_table *t) +static bool dm_table_supports_poll(struct dm_table *t) { - return !dm_table_any_dev_attr(t, device_not_poll_capable, NULL); + struct dm_target *ti; + unsigned i = 0; + + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (!ti->type->iterate_devices || + ti->type->iterate_devices(ti, device_not_poll_capable, NULL)) + return false; + } + + return true; } /* diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c index 64dd0b34fcf4..8cd5184e62f0 100644 --- a/drivers/md/dm-target.c +++ b/drivers/md/dm-target.c @@ -10,6 +10,7 @@ #include <linux/init.h> #include <linux/kmod.h> #include <linux/bio.h> +#include <linux/dax.h> #define DM_MSG_PREFIX "target" @@ -142,7 +143,8 @@ static void io_err_release_clone_rq(struct request *clone, } static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, enum dax_access_mode mode, void **kaddr, + pfn_t *pfn) { return -EIO; } diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 80133aae0db3..d6dbd47492a8 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1312,6 +1312,7 @@ bad: static struct target_type verity_target = { .name = "verity", + .features = DM_TARGET_IMMUTABLE, .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = verity_ctr, diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 5630b470ba42..d74c5a7a0ab4 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -286,7 +286,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) id = dax_read_lock(); - da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); + da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, DAX_ACCESS, + &wc->memory_map, &pfn); if (da < 0) { wc->memory_map = NULL; r = da; @@ -308,8 +309,8 @@ static int persistent_memory_claim(struct dm_writecache *wc) i = 0; do { long daa; - daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, - NULL, &pfn); + daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, + p - i, DAX_ACCESS, NULL, &pfn); if (daa <= 0) { r = daa ? daa : -EINVAL; goto err3; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d62f1354ecbf..dfb0a551bd88 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1143,7 +1143,8 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, } static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, enum dax_access_mode mode, void **kaddr, + pfn_t *pfn) { struct mapped_device *md = dax_get_private(dax_dev); sector_t sector = pgoff * PAGE_SECTORS; @@ -1161,7 +1162,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, if (len < 1) goto out; nr_pages = min(len, nr_pages); - ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); + ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn); out: dm_put_live_table(md, srcu_idx); @@ -1196,6 +1197,25 @@ static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } +static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i) +{ + struct mapped_device *md = dax_get_private(dax_dev); + sector_t sector = pgoff * PAGE_SECTORS; + struct dm_target *ti; + int srcu_idx; + long ret = 0; + + ti = dm_dax_get_live_target(md, sector, &srcu_idx); + if (!ti || !ti->type->dax_recovery_write) + goto out; + + ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i); +out: + dm_put_live_table(md, srcu_idx); + return ret; +} + /* * A target may call dm_accept_partial_bio only from the map routine. It is * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management @@ -3231,6 +3251,7 @@ static const struct block_device_operations dm_rq_blk_dops = { static const struct dax_operations dm_dax_ops = { .direct_access = dm_dax_direct_access, .zero_page_range = dm_dax_zero_page_range, + .recovery_write = dm_dax_recovery_write, }; /* diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c index 6c2a421b86e3..f305643209f0 100644 --- a/drivers/memory/emif.c +++ b/drivers/memory/emif.c @@ -630,7 +630,7 @@ static irqreturn_t emif_threaded_isr(int irq, void *dev_id) dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n"); /* If we have Power OFF ability, use it, else try restarting */ - if (pm_power_off) { + if (kernel_can_power_off()) { kernel_power_off(); } else { WARN(1, "FIXME: NO pm_power_off!!! trying restart\n"); diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index 546feef851ab..596731caf407 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c @@ -114,6 +114,9 @@ static const struct mfd_cell cros_ec_platform_cells[] = { { .name = "cros-ec-chardev", }, { .name = "cros-ec-debugfs", }, { .name = "cros-ec-sysfs", }, +}; + +static const struct mfd_cell cros_ec_pchg_cells[] = { { .name = "cros-ec-pchg", }, }; @@ -137,6 +140,7 @@ static int ec_device_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct cros_ec_platform *ec_platform = dev_get_platdata(dev); struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL); + struct ec_response_pchg_count pchg_count; int i; if (!ec) @@ -243,6 +247,21 @@ static int ec_device_probe(struct platform_device *pdev) } /* + * The PCHG device cannot be detected by sending EC_FEATURE_GET_CMD, but + * it can be detected by querying the number of peripheral chargers. + */ + retval = cros_ec_command(ec->ec_dev, 0, EC_CMD_PCHG_COUNT, NULL, 0, + &pchg_count, sizeof(pchg_count)); + if (retval >= 0 && pchg_count.port_count) { + retval = mfd_add_hotplug_devices(ec->dev, + cros_ec_pchg_cells, + ARRAY_SIZE(cros_ec_pchg_cells)); + if (retval) + dev_warn(ec->dev, "failed to add pchg: %d\n", + retval); + } + + /* * The following subdevices cannot be detected by sending the * EC_FEATURE_GET_CMD to the Embedded Controller device. */ diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c index e5c8bc998eb4..965820481f1e 100644 --- a/drivers/mfd/davinci_voicecodec.c +++ b/drivers/mfd/davinci_voicecodec.c @@ -46,14 +46,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev) } clk_enable(davinci_vc->clk); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - fifo_base = (dma_addr_t)res->start; - davinci_vc->base = devm_ioremap_resource(&pdev->dev, res); + davinci_vc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(davinci_vc->base)) { ret = PTR_ERR(davinci_vc->base); goto fail; } + fifo_base = (dma_addr_t)res->start; davinci_vc->regmap = devm_regmap_init_mmio(&pdev->dev, davinci_vc->base, diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c index 6909d075d017..a58e42ddcd0c 100644 --- a/drivers/mfd/hi655x-pmic.c +++ b/drivers/mfd/hi655x-pmic.c @@ -9,14 +9,13 @@ * Fei Wang <w.f@huawei.com> */ -#include <linux/gpio.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/mfd/core.h> #include <linux/mfd/hi655x-pmic.h> #include <linux/module.h> -#include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/regmap.h> @@ -94,7 +93,6 @@ static int hi655x_pmic_probe(struct platform_device *pdev) int ret; struct hi655x_pmic *pmic; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; void __iomem *base; pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL); @@ -120,21 +118,12 @@ static int hi655x_pmic_probe(struct platform_device *pdev) hi655x_local_irq_clear(pmic->regmap); - pmic->gpio = of_get_named_gpio(np, "pmic-gpios", 0); - if (!gpio_is_valid(pmic->gpio)) { - dev_err(dev, "Failed to get the pmic-gpios\n"); - return -ENODEV; - } - - ret = devm_gpio_request_one(dev, pmic->gpio, GPIOF_IN, - "hi655x_pmic_irq"); - if (ret < 0) { - dev_err(dev, "Failed to request gpio %d ret = %d\n", - pmic->gpio, ret); - return ret; - } + pmic->gpio = devm_gpiod_get_optional(dev, "pmic", GPIOD_IN); + if (IS_ERR(pmic->gpio)) + return dev_err_probe(dev, PTR_ERR(pmic->gpio), + "Failed to request hi655x pmic-gpio"); - ret = regmap_add_irq_chip(pmic->regmap, gpio_to_irq(pmic->gpio), + ret = regmap_add_irq_chip(pmic->regmap, gpiod_to_irq(pmic->gpio), IRQF_TRIGGER_LOW | IRQF_NO_SUSPEND, 0, &hi655x_irq_chip, &pmic->irq_data); if (ret) { @@ -149,7 +138,7 @@ static int hi655x_pmic_probe(struct platform_device *pdev) regmap_irq_get_domain(pmic->irq_data)); if (ret) { dev_err(dev, "Failed to register device %d\n", ret); - regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data); + regmap_del_irq_chip(gpiod_to_irq(pmic->gpio), pmic->irq_data); return ret; } @@ -160,7 +149,7 @@ static int hi655x_pmic_remove(struct platform_device *pdev) { struct hi655x_pmic *pmic = platform_get_drvdata(pdev); - regmap_del_irq_chip(gpio_to_irq(pmic->gpio), pmic->irq_data); + regmap_del_irq_chip(gpiod_to_irq(pmic->gpio), pmic->irq_data); mfd_remove_devices(&pdev->dev); return 0; } diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 962ee14c62dd..f7950d2197df 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -319,6 +319,8 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x51c5), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x51c6), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x51c7), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x51d8), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x51d9), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x51e8), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x51e9), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x51ea), (kernel_ulong_t)&bxt_i2c_info }, diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c index e92eeeb67a98..4cd5ecc72211 100644 --- a/drivers/mfd/ipaq-micro.c +++ b/drivers/mfd/ipaq-micro.c @@ -403,7 +403,7 @@ static int __init micro_probe(struct platform_device *pdev) micro_reset_comm(micro); irq = platform_get_irq(pdev, 0); - if (!irq) + if (irq < 0) return -EINVAL; ret = devm_request_irq(&pdev->dev, irq, micro_serial_isr, IRQF_SHARED, "ipaq-micro", diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c index bddb40054b9e..1a368ad08f58 100644 --- a/drivers/mfd/mt6397-core.c +++ b/drivers/mfd/mt6397-core.c @@ -54,6 +54,13 @@ static const struct resource mt6358_keys_resources[] = { DEFINE_RES_IRQ_NAMED(MT6358_IRQ_HOMEKEY_R, "homekey_r"), }; +static const struct resource mt6359_keys_resources[] = { + DEFINE_RES_IRQ_NAMED(MT6359_IRQ_PWRKEY, "powerkey"), + DEFINE_RES_IRQ_NAMED(MT6359_IRQ_HOMEKEY, "homekey"), + DEFINE_RES_IRQ_NAMED(MT6359_IRQ_PWRKEY_R, "powerkey_r"), + DEFINE_RES_IRQ_NAMED(MT6359_IRQ_HOMEKEY_R, "homekey_r"), +}; + static const struct resource mt6323_keys_resources[] = { DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_PWRKEY, "powerkey"), DEFINE_RES_IRQ_NAMED(MT6323_IRQ_STATUS_FCHRKEY, "homekey"), @@ -122,6 +129,12 @@ static const struct mfd_cell mt6359_devs[] = { .of_compatible = "mediatek,mt6358-rtc", }, { .name = "mt6359-sound", }, + { + .name = "mtk-pmic-keys", + .num_resources = ARRAY_SIZE(mt6359_keys_resources), + .resources = mt6359_keys_resources, + .of_compatible = "mediatek,mt6359-keys" + }, }; static const struct mfd_cell mt6397_devs[] = { diff --git a/drivers/mfd/rt4831.c b/drivers/mfd/rt4831.c index b169781ac675..fb3bd788a3eb 100644 --- a/drivers/mfd/rt4831.c +++ b/drivers/mfd/rt4831.c @@ -90,9 +90,14 @@ static int rt4831_probe(struct i2c_client *client) static int rt4831_remove(struct i2c_client *client) { struct regmap *regmap = dev_get_regmap(&client->dev, NULL); + int ret; /* Disable WLED and DSV outputs */ - return regmap_update_bits(regmap, RT4831_REG_ENABLE, RT4831_RESET_MASK, RT4831_RESET_MASK); + ret = regmap_update_bits(regmap, RT4831_REG_ENABLE, RT4831_RESET_MASK, RT4831_RESET_MASK); + if (ret) + dev_warn(&client->dev, "Failed to disable outputs (%pe)\n", ERR_PTR(ret)); + + return 0; } static const struct of_device_id __maybe_unused rt4831_of_match[] = { diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index 55d2c31bdfb2..d05a47c5187f 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c @@ -240,13 +240,14 @@ static int sprd_pmic_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(sprd_pmic_pm_ops, sprd_pmic_suspend, sprd_pmic_resume); static const struct of_device_id sprd_pmic_match[] = { - { .compatible = "sprd,sc2731", .data = &sc2731_data }, { .compatible = "sprd,sc2730", .data = &sc2730_data }, + { .compatible = "sprd,sc2731", .data = &sc2731_data }, {}, }; MODULE_DEVICE_TABLE(of, sprd_pmic_match); static const struct spi_device_id sprd_pmic_spi_ids[] = { + { .name = "sc2730", .driver_data = (unsigned long)&sc2730_data }, { .name = "sc2731", .driver_data = (unsigned long)&sc2731_data }, {}, }; diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 289b556dede2..bd6659cf3bc0 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -1036,15 +1036,11 @@ static void clocks_init(struct device *dev, static int twl_remove(struct i2c_client *client) { unsigned i, num_slaves; - int status; if (twl_class_is_4030()) - status = twl4030_exit_irq(); + twl4030_exit_irq(); else - status = twl6030_exit_irq(); - - if (status < 0) - return status; + twl6030_exit_irq(); num_slaves = twl_get_num_slaves(); for (i = 0; i < num_slaves; i++) { diff --git a/drivers/mfd/twl-core.h b/drivers/mfd/twl-core.h index 6f96c2009a9f..b4bf6a233bd0 100644 --- a/drivers/mfd/twl-core.h +++ b/drivers/mfd/twl-core.h @@ -3,9 +3,9 @@ #define __TWL_CORE_H__ extern int twl6030_init_irq(struct device *dev, int irq_num); -extern int twl6030_exit_irq(void); +extern void twl6030_exit_irq(void); extern int twl4030_init_irq(struct device *dev, int irq_num); -extern int twl4030_exit_irq(void); +extern void twl4030_exit_irq(void); extern int twl4030_init_chip_irq(const char *chip); #endif /* __TWL_CORE_H__ */ diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index ab417438d1fa..4f576f0160a9 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -753,14 +753,11 @@ fail: return status; } -int twl4030_exit_irq(void) +void twl4030_exit_irq(void) { /* FIXME undo twl_init_irq() */ - if (twl4030_irq_base) { + if (twl4030_irq_base) pr_err("twl4030: can't yet clean up IRQs?\n"); - return -ENOSYS; - } - return 0; } int twl4030_init_chip_irq(const char *chip) diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index 97af6c2a6007..3c03681c124c 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c @@ -438,7 +438,7 @@ fail_irq: return status; } -int twl6030_exit_irq(void) +void twl6030_exit_irq(void) { if (twl6030_irq && twl6030_irq->twl_irq) { unregister_pm_notifier(&twl6030_irq->pm_nb); @@ -453,6 +453,5 @@ int twl6030_exit_irq(void) * in this module. */ } - return 0; } diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c index b493de962153..d85c56530863 100644 --- a/drivers/misc/cxl/api.c +++ b/drivers/misc/cxl/api.c @@ -12,6 +12,7 @@ #include <linux/pseudo_fs.h> #include <linux/sched/mm.h> #include <linux/mmu_context.h> +#include <linux/irqdomain.h> #include "cxl.h" diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 5dc0f6093f9d..7a6dd91987fd 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -25,6 +25,8 @@ extern uint cxl_verbose; +struct property; + #define CXL_TIMEOUT 5 /* diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c index 53b919856426..e5fe0a171472 100644 --- a/drivers/misc/cxl/cxllib.c +++ b/drivers/misc/cxl/cxllib.c @@ -5,6 +5,7 @@ #include <linux/hugetlb.h> #include <linux/sched/mm.h> +#include <asm/opal-api.h> #include <asm/pnv-pci.h> #include <misc/cxllib.h> diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c index 5b93ff51d82a..eee9decc121e 100644 --- a/drivers/misc/cxl/flash.c +++ b/drivers/misc/cxl/flash.c @@ -4,6 +4,7 @@ #include <linux/semaphore.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include <linux/of.h> #include <asm/rtas.h> #include "cxl.h" diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c index 9d485c9e3fff..3321c014913c 100644 --- a/drivers/misc/cxl/guest.c +++ b/drivers/misc/cxl/guest.c @@ -6,6 +6,8 @@ #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/delay.h> +#include <linux/irqdomain.h> +#include <linux/platform_device.h> #include "cxl.h" #include "hcalls.h" diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c index 4cb829d5d873..5f0e2dcebb34 100644 --- a/drivers/misc/cxl/irq.c +++ b/drivers/misc/cxl/irq.c @@ -4,6 +4,7 @@ */ #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/wait.h> diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c index 43b312d06e3e..c1fbf6f588f7 100644 --- a/drivers/misc/cxl/main.c +++ b/drivers/misc/cxl/main.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/idr.h> #include <linux/pci.h> +#include <linux/platform_device.h> #include <linux/sched/task.h> #include <asm/cputable.h> diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index 1a7f22836041..50b0c44bb8d7 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -11,6 +11,7 @@ #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/delay.h> +#include <linux/irqdomain.h> #include <asm/synch.h> #include <asm/switch_to.h> #include <misc/cxl-base.h> diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c index ecdcfae025b7..a06920b7e049 100644 --- a/drivers/misc/ocxl/afu_irq.c +++ b/drivers/misc/ocxl/afu_irq.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // Copyright 2017 IBM Corp. #include <linux/interrupt.h> +#include <linux/irqdomain.h> #include <asm/pnv-ocxl.h> #include <asm/xive.h> #include "ocxl_internal.h" diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c index d881f5e40ad9..6777c419a8da 100644 --- a/drivers/misc/ocxl/file.c +++ b/drivers/misc/ocxl/file.c @@ -556,7 +556,9 @@ int ocxl_file_register_afu(struct ocxl_afu *afu) err_unregister: ocxl_sysfs_unregister_afu(info); // safe to call even if register failed + free_minor(info); device_unregister(&info->dev); + return rc; err_put: ocxl_afu_put(afu); free_minor(info); diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c index 9670d02c927f..4cf4c55a5f00 100644 --- a/drivers/misc/ocxl/link.c +++ b/drivers/misc/ocxl/link.c @@ -6,6 +6,7 @@ #include <linux/mm_types.h> #include <linux/mmu_context.h> #include <linux/mmu_notifier.h> +#include <linux/irqdomain.h> #include <asm/copro.h> #include <asm/pnv-ocxl.h> #include <asm/xive.h> diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index af6c3c329076..d6144978e32d 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -508,7 +508,7 @@ config MMC_OMAP_HS config MMC_WBSD tristate "Winbond W83L51xD SD/MMC Card Interface support" - depends on ISA_DMA_API && !M68K + depends on ISA_DMA_API help This selects the Winbond(R) W83L51xD Secure digital and Multimedia card Interface. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 887ee0f729d1..2935614f6fa9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -87,6 +87,11 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) enable_vfs_hca: num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs); for (vf = 0; vf < num_vfs; vf++) { + /* Notify the VF before its enablement to let it set + * some stuff. + */ + blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier, + MLX5_PF_NOTIFY_ENABLE_VF, dev); err = mlx5_core_enable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err); @@ -127,6 +132,11 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf) for (vf = num_vfs - 1; vf >= 0; vf--) { if (!sriov->vfs_ctx[vf].enabled) continue; + /* Notify the VF before its disablement to let it clean + * some resources. + */ + blocking_notifier_call_chain(&sriov->vfs_ctx[vf].notifier, + MLX5_PF_NOTIFY_DISABLE_VF, dev); err = mlx5_core_disable_hca(dev, vf + 1); if (err) { mlx5_core_warn(dev, "failed to disable VF %d\n", vf); @@ -257,7 +267,7 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev) { struct mlx5_core_sriov *sriov = &dev->priv.sriov; struct pci_dev *pdev = dev->pdev; - int total_vfs; + int total_vfs, i; if (!mlx5_core_is_pf(dev)) return 0; @@ -269,6 +279,9 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev) if (!sriov->vfs_ctx) return -ENOMEM; + for (i = 0; i < total_vfs; i++) + BLOCKING_INIT_NOTIFIER_HEAD(&sriov->vfs_ctx[i].notifier); + return 0; } @@ -281,3 +294,53 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev) kfree(sriov->vfs_ctx); } + +/** + * mlx5_sriov_blocking_notifier_unregister - Unregister a VF from + * a notification block chain. + * + * @mdev: The mlx5 core device. + * @vf_id: The VF id. + * @nb: The notifier block to be unregistered. + */ +void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev, + int vf_id, + struct notifier_block *nb) +{ + struct mlx5_vf_context *vfs_ctx; + struct mlx5_core_sriov *sriov; + + sriov = &mdev->priv.sriov; + if (WARN_ON(vf_id < 0 || vf_id >= sriov->num_vfs)) + return; + + vfs_ctx = &sriov->vfs_ctx[vf_id]; + blocking_notifier_chain_unregister(&vfs_ctx->notifier, nb); +} +EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_unregister); + +/** + * mlx5_sriov_blocking_notifier_register - Register a VF notification + * block chain. + * + * @mdev: The mlx5 core device. + * @vf_id: The VF id. + * @nb: The notifier block to be called upon the VF events. + * + * Returns 0 on success or an error code. + */ +int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev, + int vf_id, + struct notifier_block *nb) +{ + struct mlx5_vf_context *vfs_ctx; + struct mlx5_core_sriov *sriov; + + sriov = &mdev->priv.sriov; + if (vf_id < 0 || vf_id >= sriov->num_vfs) + return -EINVAL; + + vfs_ctx = &sriov->vfs_ctx[vf_id]; + return blocking_notifier_chain_register(&vfs_ctx->notifier, nb); +} +EXPORT_SYMBOL(mlx5_sriov_blocking_notifier_register); diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c index e5a58520d398..fabbb31f2c35 100644 --- a/drivers/nvdimm/btt_devs.c +++ b/drivers/nvdimm/btt_devs.c @@ -50,14 +50,14 @@ static ssize_t sector_size_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); rc = nd_size_select_store(dev, buf, &nd_btt->lbasize, btt_lbasize_supported); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc ? rc : len; } @@ -79,11 +79,11 @@ static ssize_t uuid_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - nd_device_unlock(dev); + device_unlock(dev); return rc ? rc : len; } @@ -108,13 +108,13 @@ static ssize_t namespace_store(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -126,14 +126,14 @@ static ssize_t size_show(struct device *dev, struct nd_btt *nd_btt = to_nd_btt(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); if (dev->driver) rc = sprintf(buf, "%llu\n", nd_btt->size); else { /* no size to convey if the btt instance is disabled */ rc = -ENXIO; } - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -178,6 +178,8 @@ bool is_nd_btt(struct device *dev) } EXPORT_SYMBOL(is_nd_btt); +static struct lock_class_key nvdimm_btt_key; + static struct device *__nd_btt_create(struct nd_region *nd_region, unsigned long lbasize, uuid_t *uuid, struct nd_namespace_common *ndns) @@ -205,6 +207,7 @@ static struct device *__nd_btt_create(struct nd_region *nd_region, dev->parent = &nd_region->dev; dev->type = &nd_btt_device_type; device_initialize(&nd_btt->dev); + lockdep_set_class(&nd_btt->dev.mutex, &nvdimm_btt_key); if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) { dev_dbg(&ndns->dev, "failed, already claimed by %s\n", dev_name(ndns->claim)); @@ -225,7 +228,7 @@ struct device *nd_btt_create(struct nd_region *nd_region) { struct device *dev = __nd_btt_create(nd_region, 0, NULL, NULL); - __nd_device_register(dev); + nd_device_register(dev); return dev; } @@ -324,7 +327,7 @@ static int __nd_btt_probe(struct nd_btt *nd_btt, if (!nd_btt->uuid) return -ENOMEM; - __nd_device_register(&nd_btt->dev); + nd_device_register(&nd_btt->dev); return 0; } diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 7b0d1443217a..a4fc17db707c 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -88,10 +88,7 @@ static int nvdimm_bus_probe(struct device *dev) dev->driver->name, dev_name(dev)); nvdimm_bus_probe_start(nvdimm_bus); - debug_nvdimm_lock(dev); rc = nd_drv->probe(dev); - debug_nvdimm_unlock(dev); - if ((rc == 0 || rc == -EOPNOTSUPP) && dev->parent && is_nd_region(dev->parent)) nd_region_advance_seeds(to_nd_region(dev->parent), dev); @@ -111,11 +108,8 @@ static void nvdimm_bus_remove(struct device *dev) struct module *provider = to_bus_provider(dev); struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); - if (nd_drv->remove) { - debug_nvdimm_lock(dev); + if (nd_drv->remove) nd_drv->remove(dev); - debug_nvdimm_unlock(dev); - } dev_dbg(&nvdimm_bus->dev, "%s.remove(%s)\n", dev->driver->name, dev_name(dev)); @@ -139,7 +133,7 @@ static void nvdimm_bus_shutdown(struct device *dev) void nd_device_notify(struct device *dev, enum nvdimm_event event) { - nd_device_lock(dev); + device_lock(dev); if (dev->driver) { struct nd_device_driver *nd_drv; @@ -147,7 +141,7 @@ void nd_device_notify(struct device *dev, enum nvdimm_event event) if (nd_drv->notify) nd_drv->notify(dev, event); } - nd_device_unlock(dev); + device_unlock(dev); } EXPORT_SYMBOL(nd_device_notify); @@ -334,6 +328,8 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm) } EXPORT_SYMBOL_GPL(nvdimm_to_bus); +static struct lock_class_key nvdimm_bus_key; + struct nvdimm_bus *nvdimm_bus_register(struct device *parent, struct nvdimm_bus_descriptor *nd_desc) { @@ -360,6 +356,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent, nvdimm_bus->dev.bus = &nvdimm_bus_type; nvdimm_bus->dev.of_node = nd_desc->of_node; device_initialize(&nvdimm_bus->dev); + lockdep_set_class(&nvdimm_bus->dev.mutex, &nvdimm_bus_key); device_set_pm_not_required(&nvdimm_bus->dev); rc = dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id); if (rc) @@ -511,7 +508,7 @@ static void nd_async_device_unregister(void *d, async_cookie_t cookie) put_device(dev); } -void __nd_device_register(struct device *dev) +void nd_device_register(struct device *dev) { if (!dev) return; @@ -537,12 +534,6 @@ void __nd_device_register(struct device *dev) async_schedule_dev_domain(nd_async_device_register, dev, &nd_async_domain); } - -void nd_device_register(struct device *dev) -{ - device_initialize(dev); - __nd_device_register(dev); -} EXPORT_SYMBOL(nd_device_register); void nd_device_unregister(struct device *dev, enum nd_async_mode mode) @@ -572,9 +563,9 @@ void nd_device_unregister(struct device *dev, enum nd_async_mode mode) * or otherwise let the async path handle it if the * unregistration was already queued. */ - nd_device_lock(dev); + device_lock(dev); killed = kill_device(dev); - nd_device_unlock(dev); + device_unlock(dev); if (!killed) return; @@ -724,6 +715,8 @@ static void ndctl_release(struct device *dev) kfree(dev); } +static struct lock_class_key nvdimm_ndctl_key; + int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus) { dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id); @@ -734,6 +727,7 @@ int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus) if (!dev) return -ENOMEM; device_initialize(dev); + lockdep_set_class(&dev->mutex, &nvdimm_ndctl_key); device_set_pm_not_required(dev); dev->class = nd_class; dev->parent = &nvdimm_bus->dev; @@ -930,10 +924,10 @@ void wait_nvdimm_bus_probe_idle(struct device *dev) if (nvdimm_bus->probe_active == 0) break; nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); wait_event(nvdimm_bus->wait, nvdimm_bus->probe_active == 0); - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); } while (true); } @@ -1167,7 +1161,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, goto out; } - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf); if (rc) @@ -1189,7 +1183,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, out_unlock: nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); out: kfree(in_env); kfree(out_env); diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 69a03358817f..d91799b71d23 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -215,7 +215,7 @@ EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev); * * Enforce that uuids can only be changed while the device is disabled * (driver detached) - * LOCKING: expects nd_device_lock() is held on entry + * LOCKING: expects device_lock() is held on entry */ int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf, size_t len) @@ -316,15 +316,15 @@ static DEVICE_ATTR_RO(provider); static int flush_namespaces(struct device *dev, void *data) { - nd_device_lock(dev); - nd_device_unlock(dev); + device_lock(dev); + device_unlock(dev); return 0; } static int flush_regions_dimms(struct device *dev, void *data) { - nd_device_lock(dev); - nd_device_unlock(dev); + device_lock(dev); + device_unlock(dev); device_for_each_child(dev, NULL, flush_namespaces); return 0; } @@ -368,9 +368,7 @@ static ssize_t capability_show(struct device *dev, if (!nd_desc->fw_ops) return -EOPNOTSUPP; - nvdimm_bus_lock(dev); cap = nd_desc->fw_ops->capability(nd_desc); - nvdimm_bus_unlock(dev); switch (cap) { case NVDIMM_FWA_CAP_QUIESCE: @@ -395,10 +393,8 @@ static ssize_t activate_show(struct device *dev, if (!nd_desc->fw_ops) return -EOPNOTSUPP; - nvdimm_bus_lock(dev); cap = nd_desc->fw_ops->capability(nd_desc); state = nd_desc->fw_ops->activate_state(nd_desc); - nvdimm_bus_unlock(dev); if (cap < NVDIMM_FWA_CAP_QUIESCE) return -EOPNOTSUPP; @@ -443,7 +439,6 @@ static ssize_t activate_store(struct device *dev, else return -EINVAL; - nvdimm_bus_lock(dev); state = nd_desc->fw_ops->activate_state(nd_desc); switch (state) { @@ -461,7 +456,6 @@ static ssize_t activate_store(struct device *dev, default: rc = -ENXIO; } - nvdimm_bus_unlock(dev); if (rc == 0) rc = len; @@ -484,10 +478,7 @@ static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribut if (!nd_desc->fw_ops) return 0; - nvdimm_bus_lock(dev); cap = nd_desc->fw_ops->capability(nd_desc); - nvdimm_bus_unlock(dev); - if (cap < NVDIMM_FWA_CAP_QUIESCE) return 0; diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c index 99965077bac4..7f4a9d28b670 100644 --- a/drivers/nvdimm/dax_devs.c +++ b/drivers/nvdimm/dax_devs.c @@ -80,7 +80,7 @@ struct device *nd_dax_create(struct nd_region *nd_region) nd_dax = nd_dax_alloc(nd_region); if (nd_dax) dev = nd_pfn_devinit(&nd_dax->nd_pfn, NULL); - __nd_device_register(dev); + nd_device_register(dev); return dev; } @@ -119,7 +119,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) nd_detach_ndns(dax_dev, &nd_pfn->ndns); put_device(dax_dev); } else - __nd_device_register(dax_dev); + nd_device_register(dax_dev); return rc; } diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index ee507eed42b5..c7c980577491 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -341,9 +341,9 @@ static ssize_t available_slots_show(struct device *dev, { ssize_t rc; - nd_device_lock(dev); + device_lock(dev); rc = __available_slots_show(dev_get_drvdata(dev), buf); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -386,12 +386,12 @@ static ssize_t security_store(struct device *dev, * done while probing is idle and the DIMM is not in active use * in any region. */ - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = nvdimm_security_store(dev, buf, len); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -570,6 +570,8 @@ bool is_nvdimm(struct device *dev) return dev->type == &nvdimm_device_type; } +static struct lock_class_key nvdimm_key; + struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, @@ -613,6 +615,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, /* get security state and extended (master) state */ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); + device_initialize(dev); + lockdep_set_class(&dev->mutex, &nvdimm_key); nd_device_register(dev); return nvdimm; diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 62b83b2e26e3..bf4f5c09d9b1 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -264,7 +264,7 @@ static ssize_t alt_name_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __alt_name_store(dev, buf, len); @@ -272,7 +272,7 @@ static ssize_t alt_name_store(struct device *dev, rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc < 0 ? rc : len; } @@ -846,7 +846,7 @@ static ssize_t size_store(struct device *dev, if (rc) return rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __size_store(dev, val); @@ -868,7 +868,7 @@ static ssize_t size_store(struct device *dev, dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc < 0 ? rc : len; } @@ -1043,7 +1043,7 @@ static ssize_t uuid_store(struct device *dev, } else return -ENXIO; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); if (to_ndns(dev)->claim) @@ -1059,7 +1059,7 @@ static ssize_t uuid_store(struct device *dev, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc < 0 ? rc : len; } @@ -1118,7 +1118,7 @@ static ssize_t sector_size_store(struct device *dev, } else return -ENXIO; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); if (to_ndns(dev)->claim) rc = -EBUSY; @@ -1129,7 +1129,7 @@ static ssize_t sector_size_store(struct device *dev, dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote", buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc ? rc : len; } @@ -1239,9 +1239,9 @@ static ssize_t holder_show(struct device *dev, struct nd_namespace_common *ndns = to_ndns(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : ""); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -1278,7 +1278,7 @@ static ssize_t holder_class_store(struct device *dev, struct nd_region *nd_region = to_nd_region(dev->parent); int rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); rc = __holder_class_store(dev, buf); @@ -1286,7 +1286,7 @@ static ssize_t holder_class_store(struct device *dev, rc = nd_namespace_label_update(nd_region, dev); dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc < 0 ? rc : len; } @@ -1297,7 +1297,7 @@ static ssize_t holder_class_show(struct device *dev, struct nd_namespace_common *ndns = to_ndns(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); if (ndns->claim_class == NVDIMM_CCLASS_NONE) rc = sprintf(buf, "\n"); else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) || @@ -1309,7 +1309,7 @@ static ssize_t holder_class_show(struct device *dev, rc = sprintf(buf, "dax\n"); else rc = sprintf(buf, "<unknown>\n"); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -1323,7 +1323,7 @@ static ssize_t mode_show(struct device *dev, char *mode; ssize_t rc; - nd_device_lock(dev); + device_lock(dev); claim = ndns->claim; if (claim && is_nd_btt(claim)) mode = "safe"; @@ -1336,7 +1336,7 @@ static ssize_t mode_show(struct device *dev, else mode = "raw"; rc = sprintf(buf, "%s\n", mode); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -1456,8 +1456,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) * Flush any in-progess probes / removals in the driver * for the raw personality of this namespace. */ - nd_device_lock(&ndns->dev); - nd_device_unlock(&ndns->dev); + device_lock(&ndns->dev); + device_unlock(&ndns->dev); if (ndns->dev.driver) { dev_dbg(&ndns->dev, "is active, can't bind %s\n", dev_name(dev)); @@ -1830,6 +1830,8 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) return dev; } +static struct lock_class_key nvdimm_namespace_key; + void nd_region_create_ns_seed(struct nd_region *nd_region) { WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); @@ -1845,8 +1847,12 @@ void nd_region_create_ns_seed(struct nd_region *nd_region) */ if (!nd_region->ns_seed) dev_err(&nd_region->dev, "failed to create namespace\n"); - else + else { + device_initialize(nd_region->ns_seed); + lockdep_set_class(&nd_region->ns_seed->mutex, + &nvdimm_namespace_key); nd_device_register(nd_region->ns_seed); + } } void nd_region_create_dax_seed(struct nd_region *nd_region) @@ -2200,6 +2206,8 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err) if (id < 0) break; dev_set_name(dev, "namespace%d.%d", nd_region->id, id); + device_initialize(dev); + lockdep_set_class(&dev->mutex, &nvdimm_namespace_key); nd_device_register(dev); } if (i) diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 448f9dcb4bb7..cc86ee09d7c0 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -106,7 +106,7 @@ void nd_region_create_dax_seed(struct nd_region *nd_region); int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus); void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus); void nd_synchronize(void); -void __nd_device_register(struct device *dev); +void nd_device_register(struct device *dev); struct nd_label_id; char *nd_label_gen_id(struct nd_label_id *label_id, const uuid_t *uuid, u32 flags); @@ -161,70 +161,4 @@ static inline void devm_nsio_disable(struct device *dev, { } #endif - -#ifdef CONFIG_PROVE_NVDIMM_LOCKING -extern struct class *nd_class; - -enum { - LOCK_BUS, - LOCK_NDCTL, - LOCK_REGION, - LOCK_DIMM = LOCK_REGION, - LOCK_NAMESPACE, - LOCK_CLAIM, -}; - -static inline void debug_nvdimm_lock(struct device *dev) -{ - if (is_nd_region(dev)) - mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION); - else if (is_nvdimm(dev)) - mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM); - else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev)) - mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM); - else if (dev->parent && (is_nd_region(dev->parent))) - mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE); - else if (is_nvdimm_bus(dev)) - mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS); - else if (dev->class && dev->class == nd_class) - mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL); - else - dev_WARN(dev, "unknown lock level\n"); -} - -static inline void debug_nvdimm_unlock(struct device *dev) -{ - mutex_unlock(&dev->lockdep_mutex); -} - -static inline void nd_device_lock(struct device *dev) -{ - device_lock(dev); - debug_nvdimm_lock(dev); -} - -static inline void nd_device_unlock(struct device *dev) -{ - debug_nvdimm_unlock(dev); - device_unlock(dev); -} -#else -static inline void nd_device_lock(struct device *dev) -{ - device_lock(dev); -} - -static inline void nd_device_unlock(struct device *dev) -{ - device_unlock(dev); -} - -static inline void debug_nvdimm_lock(struct device *dev) -{ -} - -static inline void debug_nvdimm_unlock(struct device *dev) -{ -} -#endif #endif /* __ND_CORE_H__ */ diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index c31e184bfa45..0e92ab4b3283 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -55,7 +55,7 @@ static ssize_t mode_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc = 0; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); if (dev->driver) rc = -EBUSY; @@ -77,7 +77,7 @@ static ssize_t mode_store(struct device *dev, dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc ? rc : len; } @@ -123,14 +123,14 @@ static ssize_t align_store(struct device *dev, unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, }; ssize_t rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); rc = nd_size_select_store(dev, buf, &nd_pfn->align, nd_pfn_supported_alignments(aligns)); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc ? rc : len; } @@ -152,11 +152,11 @@ static ssize_t uuid_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); - nd_device_unlock(dev); + device_unlock(dev); return rc ? rc : len; } @@ -181,13 +181,13 @@ static ssize_t namespace_store(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len); dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf, buf[len - 1] == '\n' ? "" : "\n"); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -199,7 +199,7 @@ static ssize_t resource_show(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); if (dev->driver) { struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; u64 offset = __le64_to_cpu(pfn_sb->dataoff); @@ -213,7 +213,7 @@ static ssize_t resource_show(struct device *dev, /* no address to convey if the pfn instance is disabled */ rc = -ENXIO; } - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -225,7 +225,7 @@ static ssize_t size_show(struct device *dev, struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); if (dev->driver) { struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; u64 offset = __le64_to_cpu(pfn_sb->dataoff); @@ -241,7 +241,7 @@ static ssize_t size_show(struct device *dev, /* no size to convey if the pfn instance is disabled */ rc = -ENXIO; } - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -291,6 +291,8 @@ bool is_nd_pfn(struct device *dev) } EXPORT_SYMBOL(is_nd_pfn); +static struct lock_class_key nvdimm_pfn_key; + struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, struct nd_namespace_common *ndns) { @@ -303,6 +305,7 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, nd_pfn->align = nd_pfn_default_alignment(); dev = &nd_pfn->dev; device_initialize(&nd_pfn->dev); + lockdep_set_class(&nd_pfn->dev.mutex, &nvdimm_pfn_key); if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { dev_dbg(&ndns->dev, "failed, already claimed by %s\n", dev_name(ndns->claim)); @@ -346,7 +349,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region) nd_pfn = nd_pfn_alloc(nd_region); dev = nd_pfn_devinit(nd_pfn, NULL); - __nd_device_register(dev); + nd_device_register(dev); return dev; } @@ -643,7 +646,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) nd_detach_ndns(pfn_dev, &nd_pfn->ndns); put_device(pfn_dev); } else - __nd_device_register(pfn_dev); + nd_device_register(pfn_dev); return rc; } diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 58d95242a836..629d10fcf53b 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -45,9 +45,25 @@ static struct nd_region *to_region(struct pmem_device *pmem) return to_nd_region(to_dev(pmem)->parent); } -static void hwpoison_clear(struct pmem_device *pmem, - phys_addr_t phys, unsigned int len) +static phys_addr_t to_phys(struct pmem_device *pmem, phys_addr_t offset) { + return pmem->phys_addr + offset; +} + +static sector_t to_sect(struct pmem_device *pmem, phys_addr_t offset) +{ + return (offset - pmem->data_offset) >> SECTOR_SHIFT; +} + +static phys_addr_t to_offset(struct pmem_device *pmem, sector_t sector) +{ + return (sector << SECTOR_SHIFT) + pmem->data_offset; +} + +static void pmem_mkpage_present(struct pmem_device *pmem, phys_addr_t offset, + unsigned int len) +{ + phys_addr_t phys = to_phys(pmem, offset); unsigned long pfn_start, pfn_end, pfn; /* only pmem in the linear map supports HWPoison */ @@ -69,33 +85,40 @@ static void hwpoison_clear(struct pmem_device *pmem, } } -static blk_status_t pmem_clear_poison(struct pmem_device *pmem, - phys_addr_t offset, unsigned int len) +static void pmem_clear_bb(struct pmem_device *pmem, sector_t sector, long blks) { - struct device *dev = to_dev(pmem); - sector_t sector; - long cleared; - blk_status_t rc = BLK_STS_OK; + if (blks == 0) + return; + badblocks_clear(&pmem->bb, sector, blks); + if (pmem->bb_state) + sysfs_notify_dirent(pmem->bb_state); +} - sector = (offset - pmem->data_offset) / 512; +static long __pmem_clear_poison(struct pmem_device *pmem, + phys_addr_t offset, unsigned int len) +{ + phys_addr_t phys = to_phys(pmem, offset); + long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len); - cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); - if (cleared < len) - rc = BLK_STS_IOERR; - if (cleared > 0 && cleared / 512) { - hwpoison_clear(pmem, pmem->phys_addr + offset, cleared); - cleared /= 512; - dev_dbg(dev, "%#llx clear %ld sector%s\n", - (unsigned long long) sector, cleared, - cleared > 1 ? "s" : ""); - badblocks_clear(&pmem->bb, sector, cleared); - if (pmem->bb_state) - sysfs_notify_dirent(pmem->bb_state); + if (cleared > 0) { + pmem_mkpage_present(pmem, offset, cleared); + arch_invalidate_pmem(pmem->virt_addr + offset, len); } + return cleared; +} + +static blk_status_t pmem_clear_poison(struct pmem_device *pmem, + phys_addr_t offset, unsigned int len) +{ + long cleared = __pmem_clear_poison(pmem, offset, len); - arch_invalidate_pmem(pmem->virt_addr + offset, len); + if (cleared < 0) + return BLK_STS_IOERR; - return rc; + pmem_clear_bb(pmem, to_sect(pmem, offset), cleared >> SECTOR_SHIFT); + if (cleared < len) + return BLK_STS_IOERR; + return BLK_STS_OK; } static void write_pmem(void *pmem_addr, struct page *page, @@ -143,7 +166,7 @@ static blk_status_t pmem_do_read(struct pmem_device *pmem, sector_t sector, unsigned int len) { blk_status_t rc; - phys_addr_t pmem_off = sector * 512 + pmem->data_offset; + phys_addr_t pmem_off = to_offset(pmem, sector); void *pmem_addr = pmem->virt_addr + pmem_off; if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) @@ -158,36 +181,20 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem, struct page *page, unsigned int page_off, sector_t sector, unsigned int len) { - blk_status_t rc = BLK_STS_OK; - bool bad_pmem = false; - phys_addr_t pmem_off = sector * 512 + pmem->data_offset; + phys_addr_t pmem_off = to_offset(pmem, sector); void *pmem_addr = pmem->virt_addr + pmem_off; - if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) - bad_pmem = true; + if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) { + blk_status_t rc = pmem_clear_poison(pmem, pmem_off, len); + + if (rc != BLK_STS_OK) + return rc; + } - /* - * Note that we write the data both before and after - * clearing poison. The write before clear poison - * handles situations where the latest written data is - * preserved and the clear poison operation simply marks - * the address range as valid without changing the data. - * In this case application software can assume that an - * interrupted write will either return the new good - * data or an error. - * - * However, if pmem_clear_poison() leaves the data in an - * indeterminate state we need to perform the write - * after clear poison. - */ flush_dcache_page(page); write_pmem(pmem_addr, page, page_off, len); - if (unlikely(bad_pmem)) { - rc = pmem_clear_poison(pmem, pmem_off, len); - write_pmem(pmem_addr, page, page_off, len); - } - return rc; + return BLK_STS_OK; } static void pmem_submit_bio(struct bio *bio) @@ -255,24 +262,47 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, enum dax_access_mode mode, void **kaddr, + pfn_t *pfn) { resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; - - if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, - PFN_PHYS(nr_pages)))) - return -EIO; + sector_t sector = PFN_PHYS(pgoff) >> SECTOR_SHIFT; + unsigned int num = PFN_PHYS(nr_pages) >> SECTOR_SHIFT; + struct badblocks *bb = &pmem->bb; + sector_t first_bad; + int num_bad; if (kaddr) *kaddr = pmem->virt_addr + offset; if (pfn) *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); + if (bb->count && + badblocks_check(bb, sector, num, &first_bad, &num_bad)) { + long actual_nr; + + if (mode != DAX_RECOVERY_WRITE) + return -EIO; + + /* + * Set the recovery stride is set to kernel page size because + * the underlying driver and firmware clear poison functions + * don't appear to handle large chunk(such as 2MiB) reliably. + */ + actual_nr = PHYS_PFN( + PAGE_ALIGN((first_bad - sector) << SECTOR_SHIFT)); + dev_dbg(pmem->bb.dev, "start sector(%llu), nr_pages(%ld), first_bad(%llu), actual_nr(%ld)\n", + sector, nr_pages, first_bad, actual_nr); + if (actual_nr) + return actual_nr; + return 1; + } + /* - * If badblocks are present, limit known good range to the - * requested range. + * If badblocks are present but not in the range, limit known good range + * to the requested range. */ - if (unlikely(pmem->bb.count)) + if (bb->count) return nr_pages; return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); } @@ -294,16 +324,73 @@ static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, } static long pmem_dax_direct_access(struct dax_device *dax_dev, - pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) + pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, + void **kaddr, pfn_t *pfn) +{ + struct pmem_device *pmem = dax_get_private(dax_dev); + + return __pmem_direct_access(pmem, pgoff, nr_pages, mode, kaddr, pfn); +} + +/* + * The recovery write thread started out as a normal pwrite thread and + * when the filesystem was told about potential media error in the + * range, filesystem turns the normal pwrite to a dax_recovery_write. + * + * The recovery write consists of clearing media poison, clearing page + * HWPoison bit, reenable page-wide read-write permission, flush the + * caches and finally write. A competing pread thread will be held + * off during the recovery process since data read back might not be + * valid, and this is achieved by clearing the badblock records after + * the recovery write is complete. Competing recovery write threads + * are already serialized by writer lock held by dax_iomap_rw(). + */ +static size_t pmem_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i) { struct pmem_device *pmem = dax_get_private(dax_dev); + size_t olen, len, off; + phys_addr_t pmem_off; + struct device *dev = pmem->bb.dev; + long cleared; + + off = offset_in_page(addr); + len = PFN_PHYS(PFN_UP(off + bytes)); + if (!is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) >> SECTOR_SHIFT, len)) + return _copy_from_iter_flushcache(addr, bytes, i); + + /* + * Not page-aligned range cannot be recovered. This should not + * happen unless something else went wrong. + */ + if (off || !PAGE_ALIGNED(bytes)) { + dev_dbg(dev, "Found poison, but addr(%p) or bytes(%#zx) not page aligned\n", + addr, bytes); + return 0; + } + + pmem_off = PFN_PHYS(pgoff) + pmem->data_offset; + cleared = __pmem_clear_poison(pmem, pmem_off, len); + if (cleared > 0 && cleared < len) { + dev_dbg(dev, "poison cleared only %ld out of %zu bytes\n", + cleared, len); + return 0; + } + if (cleared < 0) { + dev_dbg(dev, "poison clear failed: %ld\n", cleared); + return 0; + } + + olen = _copy_from_iter_flushcache(addr, bytes, i); + pmem_clear_bb(pmem, to_sect(pmem, pmem_off), cleared >> SECTOR_SHIFT); - return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); + return olen; } static const struct dax_operations pmem_dax_ops = { .direct_access = pmem_dax_direct_access, .zero_page_range = pmem_dax_zero_page_range, + .recovery_write = pmem_recovery_write, }; static ssize_t write_cache_show(struct device *dev, @@ -573,7 +660,7 @@ static void nd_pmem_remove(struct device *dev) nvdimm_namespace_detach_btt(to_nd_btt(dev)); else { /* - * Note, this assumes nd_device_lock() context to not + * Note, this assumes device_lock() context to not * race nd_pmem_notify() */ sysfs_put(pmem->bb_state); diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index 1f51a2361429..392b0b38acb9 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -8,6 +8,8 @@ #include <linux/pfn_t.h> #include <linux/fs.h> +enum dax_access_mode; + /* this definition is in it's own header for tools/testing/nvdimm to consume */ struct pmem_device { /* One contiguous memory region per device */ @@ -28,7 +30,8 @@ struct pmem_device { }; long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn); + long nr_pages, enum dax_access_mode mode, void **kaddr, + pfn_t *pfn); #ifdef CONFIG_MEMORY_FAILURE static inline bool test_and_clear_pmem_poison(struct page *page) diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c index 188560b1c110..390123d293ea 100644 --- a/drivers/nvdimm/region.c +++ b/drivers/nvdimm/region.c @@ -95,7 +95,7 @@ static void nd_region_remove(struct device *dev) nvdimm_bus_unlock(dev); /* - * Note, this assumes nd_device_lock() context to not race + * Note, this assumes device_lock() context to not race * nd_region_notify() */ sysfs_put(nd_region->bb_state); diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index 0cb274c2b508..d976260eca7a 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -279,7 +279,7 @@ static ssize_t set_cookie_show(struct device *dev, * the v1.1 namespace label cookie definition. To read all this * data we need to wait for probing to settle. */ - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); if (nd_region->ndr_mappings) { @@ -296,7 +296,7 @@ static ssize_t set_cookie_show(struct device *dev, } } nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); if (rc) return rc; @@ -353,12 +353,12 @@ static ssize_t available_size_show(struct device *dev, * memory nvdimm_bus_lock() is dropped, but that's userspace's * problem to not race itself. */ - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_available_dpa(nd_region); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return sprintf(buf, "%llu\n", available); } @@ -370,12 +370,12 @@ static ssize_t max_available_extent_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); unsigned long long available = 0; - nd_device_lock(dev); + device_lock(dev); nvdimm_bus_lock(dev); wait_nvdimm_bus_probe_idle(dev); available = nd_region_allocatable_dpa(nd_region); nvdimm_bus_unlock(dev); - nd_device_unlock(dev); + device_unlock(dev); return sprintf(buf, "%llu\n", available); } @@ -549,12 +549,12 @@ static ssize_t region_badblocks_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); ssize_t rc; - nd_device_lock(dev); + device_lock(dev); if (dev->driver) rc = badblocks_show(&nd_region->bb, buf, 0); else rc = -ENXIO; - nd_device_unlock(dev); + device_unlock(dev); return rc; } @@ -949,6 +949,8 @@ static unsigned long default_align(struct nd_region *nd_region) return align; } +static struct lock_class_key nvdimm_region_key; + static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc, const struct device_type *dev_type, const char *caller) @@ -1035,6 +1037,8 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, else nd_region->flush = NULL; + device_initialize(dev); + lockdep_set_class(&dev->mutex, &nvdimm_region_key); nd_device_register(dev); return nd_region; diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 4b80150e4afa..b5aa55c61461 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -379,11 +379,6 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid) || !nvdimm->sec.flags) return -EOPNOTSUPP; - if (dev->driver == NULL) { - dev_dbg(dev, "Unable to overwrite while DIMM active.\n"); - return -EINVAL; - } - rc = check_security_state(nvdimm); if (rc) return rc; diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 740407252298..84063eaebb91 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -456,103 +456,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); -/** - * dev_pm_opp_find_level_exact() - search for an exact level - * @dev: device for which we do this operation - * @level: level to search for - * - * Return: Searches for exact match in the opp table and returns pointer to the - * matching opp if found, else returns ERR_PTR in case of error and should - * be handled using IS_ERR. Error return values can be: - * EINVAL: for bad pointer - * ERANGE: no match found for search - * ENODEV: if device not found in list of registered devices - * - * The callers are required to call dev_pm_opp_put() for the returned OPP after - * use. - */ -struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, - unsigned int level) -{ - struct opp_table *opp_table; - struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); - - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) { - int r = PTR_ERR(opp_table); - - dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); - return ERR_PTR(r); - } - - mutex_lock(&opp_table->lock); - - list_for_each_entry(temp_opp, &opp_table->opp_list, node) { - if (temp_opp->level == level) { - opp = temp_opp; - - /* Increment the reference count of OPP */ - dev_pm_opp_get(opp); - break; - } - } - - mutex_unlock(&opp_table->lock); - dev_pm_opp_put_opp_table(opp_table); - - return opp; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); - -/** - * dev_pm_opp_find_level_ceil() - search for an rounded up level - * @dev: device for which we do this operation - * @level: level to search for - * - * Return: Searches for rounded up match in the opp table and returns pointer - * to the matching opp if found, else returns ERR_PTR in case of error and - * should be handled using IS_ERR. Error return values can be: - * EINVAL: for bad pointer - * ERANGE: no match found for search - * ENODEV: if device not found in list of registered devices - * - * The callers are required to call dev_pm_opp_put() for the returned OPP after - * use. - */ -struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, - unsigned int *level) -{ - struct opp_table *opp_table; - struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); - - opp_table = _find_opp_table(dev); - if (IS_ERR(opp_table)) { - int r = PTR_ERR(opp_table); - - dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); - return ERR_PTR(r); - } - - mutex_lock(&opp_table->lock); - - list_for_each_entry(temp_opp, &opp_table->opp_list, node) { - if (temp_opp->available && temp_opp->level >= *level) { - opp = temp_opp; - *level = opp->level; - - /* Increment the reference count of OPP */ - dev_pm_opp_get(opp); - break; - } - } - - mutex_unlock(&opp_table->lock); - dev_pm_opp_put_opp_table(opp_table); - - return opp; -} -EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); - static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, unsigned long *freq) { @@ -729,6 +632,223 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_by_volt); +/** + * dev_pm_opp_find_level_exact() - search for an exact level + * @dev: device for which we do this operation + * @level: level to search for + * + * Return: Searches for exact match in the opp table and returns pointer to the + * matching opp if found, else returns ERR_PTR in case of error and should + * be handled using IS_ERR. Error return values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, + unsigned int level) +{ + struct opp_table *opp_table; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + int r = PTR_ERR(opp_table); + + dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); + return ERR_PTR(r); + } + + mutex_lock(&opp_table->lock); + + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { + if (temp_opp->level == level) { + opp = temp_opp; + + /* Increment the reference count of OPP */ + dev_pm_opp_get(opp); + break; + } + } + + mutex_unlock(&opp_table->lock); + dev_pm_opp_put_opp_table(opp_table); + + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_exact); + +/** + * dev_pm_opp_find_level_ceil() - search for an rounded up level + * @dev: device for which we do this operation + * @level: level to search for + * + * Return: Searches for rounded up match in the opp table and returns pointer + * to the matching opp if found, else returns ERR_PTR in case of error and + * should be handled using IS_ERR. Error return values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level) +{ + struct opp_table *opp_table; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) { + int r = PTR_ERR(opp_table); + + dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r); + return ERR_PTR(r); + } + + mutex_lock(&opp_table->lock); + + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { + if (temp_opp->available && temp_opp->level >= *level) { + opp = temp_opp; + *level = opp->level; + + /* Increment the reference count of OPP */ + dev_pm_opp_get(opp); + break; + } + } + + mutex_unlock(&opp_table->lock); + dev_pm_opp_put_opp_table(opp_table); + + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_level_ceil); + +/** + * dev_pm_opp_find_bw_ceil() - Search for a rounded ceil bandwidth + * @dev: device for which we do this operation + * @freq: start bandwidth + * @index: which bandwidth to compare, in case of OPPs with several values + * + * Search for the matching floor *available* OPP from a starting bandwidth + * for a device. + * + * Return: matching *opp and refreshes *bw accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, + unsigned int *bw, int index) +{ + struct opp_table *opp_table; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + + if (!dev || !bw) { + dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw); + return ERR_PTR(-EINVAL); + } + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return ERR_CAST(opp_table); + + if (index >= opp_table->path_count) + return ERR_PTR(-EINVAL); + + mutex_lock(&opp_table->lock); + + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { + if (temp_opp->available && temp_opp->bandwidth) { + if (temp_opp->bandwidth[index].peak >= *bw) { + opp = temp_opp; + *bw = opp->bandwidth[index].peak; + + /* Increment the reference count of OPP */ + dev_pm_opp_get(opp); + break; + } + } + } + + mutex_unlock(&opp_table->lock); + dev_pm_opp_put_opp_table(opp_table); + + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_ceil); + +/** + * dev_pm_opp_find_bw_floor() - Search for a rounded floor bandwidth + * @dev: device for which we do this operation + * @freq: start bandwidth + * @index: which bandwidth to compare, in case of OPPs with several values + * + * Search for the matching floor *available* OPP from a starting bandwidth + * for a device. + * + * Return: matching *opp and refreshes *bw accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, + unsigned int *bw, int index) +{ + struct opp_table *opp_table; + struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); + + if (!dev || !bw) { + dev_err(dev, "%s: Invalid argument bw=%p\n", __func__, bw); + return ERR_PTR(-EINVAL); + } + + opp_table = _find_opp_table(dev); + if (IS_ERR(opp_table)) + return ERR_CAST(opp_table); + + if (index >= opp_table->path_count) + return ERR_PTR(-EINVAL); + + mutex_lock(&opp_table->lock); + + list_for_each_entry(temp_opp, &opp_table->opp_list, node) { + if (temp_opp->available && temp_opp->bandwidth) { + /* go to the next node, before choosing prev */ + if (temp_opp->bandwidth[index].peak > *bw) + break; + opp = temp_opp; + } + } + + /* Increment the reference count of OPP */ + if (!IS_ERR(opp)) + dev_pm_opp_get(opp); + mutex_unlock(&opp_table->lock); + dev_pm_opp_put_opp_table(opp_table); + + if (!IS_ERR(opp)) + *bw = opp->bandwidth[index].peak; + + return opp; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_bw_floor); + static int _set_opp_voltage(struct device *dev, struct regulator *reg, struct dev_pm_opp_supply *supply) { @@ -1486,9 +1606,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put); */ void dev_pm_opp_remove(struct device *dev, unsigned long freq) { - struct dev_pm_opp *opp; + struct dev_pm_opp *opp = NULL, *iter; struct opp_table *opp_table; - bool found = false; opp_table = _find_opp_table(dev); if (IS_ERR(opp_table)) @@ -1496,16 +1615,16 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) mutex_lock(&opp_table->lock); - list_for_each_entry(opp, &opp_table->opp_list, node) { - if (opp->rate == freq) { - found = true; + list_for_each_entry(iter, &opp_table->opp_list, node) { + if (iter->rate == freq) { + opp = iter; break; } } mutex_unlock(&opp_table->lock); - if (found) { + if (opp) { dev_pm_opp_put(opp); /* Drop the reference taken by dev_pm_opp_add() */ @@ -2019,10 +2138,9 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, for (i = 0; i < count; i++) { reg = regulator_get_optional(dev, names[i]); if (IS_ERR(reg)) { - ret = PTR_ERR(reg); - if (ret != -EPROBE_DEFER) - dev_err(dev, "%s: no regulator (%s) found: %d\n", - __func__, names[i], ret); + ret = dev_err_probe(dev, PTR_ERR(reg), + "%s: no regulator (%s) found\n", + __func__, names[i]); goto free_regulators; } @@ -2168,11 +2286,8 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) /* Find clk for the device */ opp_table->clk = clk_get(dev, name); if (IS_ERR(opp_table->clk)) { - ret = PTR_ERR(opp_table->clk); - if (ret != -EPROBE_DEFER) { - dev_err(dev, "%s: Couldn't find clock: %d\n", __func__, - ret); - } + ret = dev_err_probe(dev, PTR_ERR(opp_table->clk), + "%s: Couldn't find clock\n", __func__); goto err; } diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c index 3fcc1f97f2d1..1b6e5c55c3ed 100644 --- a/drivers/opp/debugfs.c +++ b/drivers/opp/debugfs.c @@ -195,14 +195,18 @@ void opp_debug_register(struct opp_device *opp_dev, struct opp_table *opp_table) static void opp_migrate_dentry(struct opp_device *opp_dev, struct opp_table *opp_table) { - struct opp_device *new_dev; + struct opp_device *new_dev = NULL, *iter; const struct device *dev; struct dentry *dentry; /* Look for next opp-dev */ - list_for_each_entry(new_dev, &opp_table->dev_list, node) - if (new_dev != opp_dev) + list_for_each_entry(iter, &opp_table->dev_list, node) + if (iter != opp_dev) { + new_dev = iter; break; + } + + BUG_ON(!new_dev); /* new_dev is guaranteed to be valid here */ dev = new_dev->dev; diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 485ea980bde7..30394929d700 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -437,11 +437,11 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table) /* Checking only first OPP is sufficient */ np = of_get_next_available_child(opp_np, NULL); + of_node_put(opp_np); if (!np) { dev_err(dev, "OPP table empty\n"); return -EINVAL; } - of_node_put(opp_np); prop = of_find_property(np, "opp-peak-kBps", NULL); of_node_put(np); diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index d270a204324e..db814f7b93ba 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -92,6 +92,13 @@ static enum pci_protocol_version_t pci_protocol_versions[] = { #define SLOT_NAME_SIZE 11 /* + * Size of requestor for VMbus; the value is based on the observation + * that having more than one request outstanding is 'rare', and so 64 + * should be generous in ensuring that we don't ever run out. + */ +#define HV_PCI_RQSTOR_SIZE 64 + +/* * Message Types */ @@ -604,17 +611,19 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) return cfg->vector; } -static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, - struct msi_desc *msi_desc) -{ - msi_entry->address.as_uint32 = msi_desc->msg.address_lo; - msi_entry->data.as_uint32 = msi_desc->msg.data; -} - static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *info) { - return pci_msi_prepare(domain, dev, nvec, info); + int ret = pci_msi_prepare(domain, dev, nvec, info); + + /* + * By using the interrupt remapper in the hypervisor IOMMU, contiguous + * CPU vectors is not needed for multi-MSI + */ + if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) + info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; + + return ret; } /** @@ -631,6 +640,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) { struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct hv_retarget_device_interrupt *params; + struct tran_int_desc *int_desc; struct hv_pcibus_device *hbus; struct cpumask *dest; cpumask_var_t tmp; @@ -645,6 +655,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) pdev = msi_desc_to_pci_dev(msi_desc); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); + int_desc = data->chip_data; spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); @@ -652,7 +663,8 @@ static void hv_arch_irq_unmask(struct irq_data *data) memset(params, 0, sizeof(*params)); params->partition_id = HV_PARTITION_ID_SELF; params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; - hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc); + params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff; + params->int_entry.msi_entry.data.as_uint32 = int_desc->data; params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | (hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[7] << 8) | @@ -969,11 +981,7 @@ static void hv_pci_generic_compl(void *context, struct pci_response *resp, { struct hv_pci_compl *comp_pkt = context; - if (resp_packet_size >= offsetofend(struct pci_response, status)) - comp_pkt->completion_status = resp->status; - else - comp_pkt->completion_status = -1; - + comp_pkt->completion_status = resp->status; complete(&comp_pkt->host_event); } @@ -1513,6 +1521,10 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, u8 buffer[sizeof(struct pci_delete_interrupt)]; } ctxt; + if (!int_desc->vector_count) { + kfree(int_desc); + return; + } memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = @@ -1520,7 +1532,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, int_pkt->wslot.slot = hpdev->desc.win_slot.slot; int_pkt->int_desc = *int_desc; vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt), - (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); + 0, VM_PKT_DATA_INBAND, 0); kfree(int_desc); } @@ -1590,19 +1602,24 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp, struct pci_create_int_response *int_resp = (struct pci_create_int_response *)resp; + if (resp_packet_size < sizeof(*int_resp)) { + comp_pkt->comp_pkt.completion_status = -1; + goto out; + } comp_pkt->comp_pkt.completion_status = resp->status; comp_pkt->int_desc = int_resp->int_desc; +out: complete(&comp_pkt->comp_pkt.host_event); } static u32 hv_compose_msi_req_v1( struct pci_create_interrupt *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) + u32 slot, u8 vector, u8 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; /* @@ -1625,14 +1642,14 @@ static int hv_compose_msi_req_get_cpu(struct cpumask *affinity) static u32 hv_compose_msi_req_v2( struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) + u32 slot, u8 vector, u8 vector_count) { int cpu; int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = @@ -1644,7 +1661,7 @@ static u32 hv_compose_msi_req_v2( static u32 hv_compose_msi_req_v3( struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity, - u32 slot, u32 vector) + u32 slot, u32 vector, u8 vector_count) { int cpu; @@ -1652,7 +1669,7 @@ static u32 hv_compose_msi_req_v3( int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.reserved = 0; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = @@ -1683,6 +1700,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; + struct msi_desc *msi_desc; + u8 vector, vector_count; struct { struct pci_packet pci_pkt; union { @@ -1691,11 +1710,21 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct pci_create_interrupt3 v3; } int_pkts; } __packed ctxt; - + u64 trans_id; u32 size; int ret; - pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + /* Reuse the previous allocation */ + if (data->chip_data) { + int_desc = data->chip_data; + msg->address_hi = int_desc->address >> 32; + msg->address_lo = int_desc->address & 0xffffffff; + msg->data = int_desc->data; + return; + } + + msi_desc = irq_data_get_msi_desc(data); + pdev = msi_desc_to_pci_dev(msi_desc); dest = irq_data_get_effective_affinity_mask(data); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); @@ -1704,17 +1733,40 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) if (!hpdev) goto return_null_message; - /* Free any previous message that might have already been composed. */ - if (data->chip_data) { - int_desc = data->chip_data; - data->chip_data = NULL; - hv_int_desc_free(hpdev, int_desc); - } - int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); if (!int_desc) goto drop_reference; + if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) { + /* + * If this is not the first MSI of Multi MSI, we already have + * a mapping. Can exit early. + */ + if (msi_desc->irq != data->irq) { + data->chip_data = int_desc; + int_desc->address = msi_desc->msg.address_lo | + (u64)msi_desc->msg.address_hi << 32; + int_desc->data = msi_desc->msg.data + + (data->irq - msi_desc->irq); + msg->address_hi = msi_desc->msg.address_hi; + msg->address_lo = msi_desc->msg.address_lo; + msg->data = int_desc->data; + put_pcichild(hpdev); + return; + } + /* + * The vector we select here is a dummy value. The correct + * value gets sent to the hypervisor in unmask(). This needs + * to be aligned with the count, and also not zero. Multi-msi + * is powers of 2 up to 32, so 32 will always work here. + */ + vector = 32; + vector_count = msi_desc->nvec_used; + } else { + vector = hv_msi_get_int_vector(data); + vector_count = 1; + } + memset(&ctxt, 0, sizeof(ctxt)); init_completion(&comp.comp_pkt.host_event); ctxt.pci_pkt.completion_func = hv_pci_compose_compl; @@ -1725,7 +1777,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, dest, hpdev->desc.win_slot.slot, - hv_msi_get_int_vector(data)); + vector, + vector_count); break; case PCI_PROTOCOL_VERSION_1_2: @@ -1733,14 +1786,16 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, dest, hpdev->desc.win_slot.slot, - hv_msi_get_int_vector(data)); + vector, + vector_count); break; case PCI_PROTOCOL_VERSION_1_4: size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3, dest, hpdev->desc.win_slot.slot, - hv_msi_get_int_vector(data)); + vector, + vector_count); break; default: @@ -1753,10 +1808,10 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) goto free_int_desc; } - ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, - size, (unsigned long)&ctxt.pci_pkt, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts, + size, (unsigned long)&ctxt.pci_pkt, + &trans_id, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) { dev_err(&hbus->hdev->device, "Sending request for interrupt failed: 0x%x", @@ -1835,6 +1890,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) enable_tasklet: tasklet_enable(&channel->callback_event); + /* + * The completion packet on the stack becomes invalid after 'return'; + * remove the ID from the VMbus requestor if the identifier is still + * mapped to/associated with the packet. (The identifier could have + * been 're-used', i.e., already removed and (re-)mapped.) + * + * Cf. hv_pci_onchannelcallback(). + */ + vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt); free_int_desc: kfree(int_desc); drop_reference: @@ -2082,12 +2146,17 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus) } } if (high_size <= 1 && low_size <= 1) { - /* Set the memory enable bit. */ - _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, - &command); - command |= PCI_COMMAND_MEMORY; - _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, - command); + /* + * No need to set the PCI_COMMAND_MEMORY bit as + * the core PCI driver doesn't require the bit + * to be pre-set. Actually here we intentionally + * keep the bit off so that the PCI BAR probing + * in the core PCI driver doesn't cause Hyper-V + * to unnecessarily unmap/map the virtual BARs + * from/to the physical BARs multiple times. + * This reduces the VM boot time significantly + * if the BAR sizes are huge. + */ break; } } @@ -2223,12 +2292,14 @@ static void q_resource_requirements(void *context, struct pci_response *resp, struct q_res_req_compl *completion = context; struct pci_q_res_req_response *q_res_req = (struct pci_q_res_req_response *)resp; + s32 status; int i; - if (resp->status < 0) { + status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status; + if (status < 0) { dev_err(&completion->hpdev->hbus->hdev->device, "query resource requirements failed: %x\n", - resp->status); + status); } else { for (i = 0; i < PCI_STD_NUM_BARS; i++) { completion->hpdev->probed_bar[i] = @@ -2652,7 +2723,7 @@ static void hv_eject_device_work(struct work_struct *work) ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; vmbus_sendpacket(hbus->hdev->channel, ejct_pkt, - sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, + sizeof(*ejct_pkt), 0, VM_PKT_DATA_INBAND, 0); /* For the get_pcichild() in hv_pci_eject_device() */ @@ -2699,8 +2770,9 @@ static void hv_pci_onchannelcallback(void *context) const int packet_size = 0x100; int ret; struct hv_pcibus_device *hbus = context; + struct vmbus_channel *chan = hbus->hdev->channel; u32 bytes_recvd; - u64 req_id; + u64 req_id, req_addr; struct vmpacket_descriptor *desc; unsigned char *buffer; int bufferlen = packet_size; @@ -2712,14 +2784,15 @@ static void hv_pci_onchannelcallback(void *context) struct pci_dev_inval_block *inval; struct pci_dev_incoming *dev_message; struct hv_pci_dev *hpdev; + unsigned long flags; buffer = kmalloc(bufferlen, GFP_ATOMIC); if (!buffer) return; while (1) { - ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer, - bufferlen, &bytes_recvd, &req_id); + ret = vmbus_recvpacket_raw(chan, buffer, bufferlen, + &bytes_recvd, &req_id); if (ret == -ENOBUFS) { kfree(buffer); @@ -2746,15 +2819,29 @@ static void hv_pci_onchannelcallback(void *context) switch (desc->type) { case VM_PKT_COMP: + lock_requestor(chan, flags); + req_addr = __vmbus_request_addr_match(chan, req_id, + VMBUS_RQST_ADDR_ANY); + if (req_addr == VMBUS_RQST_ERROR) { + unlock_requestor(chan, flags); + dev_err(&hbus->hdev->device, + "Invalid transaction ID %llx\n", + req_id); + break; + } + comp_packet = (struct pci_packet *)req_addr; + response = (struct pci_response *)buffer; /* - * The host is trusted, and thus it's safe to interpret - * this transaction ID as a pointer. + * Call ->completion_func() within the critical section to make + * sure that the packet pointer is still valid during the call: + * here 'valid' means that there's a task still waiting for the + * completion, and that the packet data is still on the waiting + * task's stack. Cf. hv_compose_msi_msg(). */ - comp_packet = (struct pci_packet *)req_id; - response = (struct pci_response *)buffer; comp_packet->completion_func(comp_packet->compl_ctxt, response, bytes_recvd); + unlock_requestor(chan, flags); break; case VM_PKT_DATA_INBAND: @@ -2764,7 +2851,8 @@ static void hv_pci_onchannelcallback(void *context) case PCI_BUS_RELATIONS: bus_rel = (struct pci_bus_relations *)buffer; - if (bytes_recvd < + if (bytes_recvd < sizeof(*bus_rel) || + bytes_recvd < struct_size(bus_rel, func, bus_rel->device_count)) { dev_err(&hbus->hdev->device, @@ -2778,7 +2866,8 @@ static void hv_pci_onchannelcallback(void *context) case PCI_BUS_RELATIONS2: bus_rel2 = (struct pci_bus_relations2 *)buffer; - if (bytes_recvd < + if (bytes_recvd < sizeof(*bus_rel2) || + bytes_recvd < struct_size(bus_rel2, func, bus_rel2->device_count)) { dev_err(&hbus->hdev->device, @@ -2792,6 +2881,11 @@ static void hv_pci_onchannelcallback(void *context) case PCI_EJECT: dev_message = (struct pci_dev_incoming *)buffer; + if (bytes_recvd < sizeof(*dev_message)) { + dev_err(&hbus->hdev->device, + "eject message too small\n"); + break; + } hpdev = get_pcichild_wslot(hbus, dev_message->wslot.slot); if (hpdev) { @@ -2803,6 +2897,11 @@ static void hv_pci_onchannelcallback(void *context) case PCI_INVALIDATE_BLOCK: inval = (struct pci_dev_inval_block *)buffer; + if (bytes_recvd < sizeof(*inval)) { + dev_err(&hbus->hdev->device, + "invalidate message too small\n"); + break; + } hpdev = get_pcichild_wslot(hbus, inval->wslot.slot); if (hpdev) { @@ -3431,6 +3530,10 @@ static int hv_pci_probe(struct hv_device *hdev, goto free_dom; } + hdev->channel->next_request_id_callback = vmbus_next_request_id; + hdev->channel->request_addr_callback = vmbus_request_addr; + hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE; + ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, hv_pci_onchannelcallback, hbus); if (ret) @@ -3561,6 +3664,7 @@ free_bus: static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) { struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); + struct vmbus_channel *chan = hdev->channel; struct { struct pci_packet teardown_packet; u8 buffer[sizeof(struct pci_message)]; @@ -3568,13 +3672,14 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) struct hv_pci_compl comp_pkt; struct hv_pci_dev *hpdev, *tmp; unsigned long flags; + u64 trans_id; int ret; /* * After the host sends the RESCIND_CHANNEL message, it doesn't * access the per-channel ringbuffer any longer. */ - if (hdev->channel->rescind) + if (chan->rescind) return 0; if (!keep_devs) { @@ -3611,16 +3716,26 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs) pkt.teardown_packet.compl_ctxt = &comp_pkt; pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT; - ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message, - sizeof(struct pci_message), - (unsigned long)&pkt.teardown_packet, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message, + sizeof(struct pci_message), + (unsigned long)&pkt.teardown_packet, + &trans_id, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) return ret; - if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) + if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) { + /* + * The completion packet on the stack becomes invalid after + * 'return'; remove the ID from the VMbus requestor if the + * identifier is still mapped to/associated with the packet. + * + * Cf. hv_pci_onchannelcallback(). + */ + vmbus_request_addr_match(chan, trans_id, + (unsigned long)&pkt.teardown_packet); return -ETIMEDOUT; + } return 0; } @@ -3761,6 +3876,10 @@ static int hv_pci_resume(struct hv_device *hdev) hbus->state = hv_pcibus_init; + hdev->channel->next_request_id_callback = vmbus_next_request_id; + hdev->channel->request_addr_callback = vmbus_request_addr; + hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE; + ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, hv_pci_onchannelcallback, hbus); if (ret) diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 6c1b81304665..196834ed44fe 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -369,7 +369,6 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev, dev_dbg(dev, "Parsing dma-ranges property...\n"); for_each_of_pci_range(&parser, &range) { - struct resource_entry *entry; /* * If we failed translation or got a zero-sized region * then skip this range @@ -393,12 +392,7 @@ static int devm_of_pci_get_host_bridge_resources(struct device *dev, goto failed; } - /* Keep the resource list sorted */ - resource_list_for_each_entry(entry, ib_resources) - if (entry->res->start > res->start) - break; - - pci_add_resource_offset(&entry->node, res, + pci_add_resource_offset(ib_resources, res, res->start - range.pci_addr); } diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 2f3b69adfc9e..49238ddd39ee 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -20,6 +20,7 @@ #include <linux/of_device.h> #include <linux/acpi.h> #include <linux/dma-map-ops.h> +#include <linux/iommu.h> #include "pci.h" #include "pcie/portdrv.h" @@ -1620,6 +1621,7 @@ static int pci_bus_num_vf(struct device *dev) */ static int pci_dma_configure(struct device *dev) { + struct pci_driver *driver = to_pci_driver(dev->driver); struct device *bridge; int ret = 0; @@ -1635,9 +1637,24 @@ static int pci_dma_configure(struct device *dev) } pci_put_host_bridge_device(bridge); + + if (!ret && !driver->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); + } + return ret; } +static void pci_dma_cleanup(struct device *dev) +{ + struct pci_driver *driver = to_pci_driver(dev->driver); + + if (!driver->driver_managed_dma) + iommu_device_unuse_default_domain(dev); +} + struct bus_type pci_bus_type = { .name = "pci", .match = pci_bus_match, @@ -1651,6 +1668,7 @@ struct bus_type pci_bus_type = { .pm = PCI_PM_OPS_PTR, .num_vf = pci_bus_num_vf, .dma_configure = pci_dma_configure, + .dma_cleanup = pci_dma_cleanup, }; EXPORT_SYMBOL(pci_bus_type); diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index e408099fea52..d1f4c1ce7bd1 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c @@ -36,6 +36,7 @@ static struct pci_driver stub_driver = { .name = "pci-stub", .id_table = NULL, /* only dynamic id's */ .probe = pci_stub_probe, + .driver_managed_dma = true, }; static int __init pci_stub_init(void) diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 4b8801656ffb..7f8788a970ae 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -202,6 +202,8 @@ static struct pci_driver pcie_portdriver = { .err_handler = &pcie_portdrv_err_handler, + .driver_managed_dma = true, + .driver.pm = PCIE_PORTDRV_PM_OPS, }; diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index ec977f031bc2..bf495bf0f48a 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -151,7 +151,7 @@ config TCIC config PCMCIA_ALCHEMY_DEVBOARD tristate "Alchemy Db/Pb1xxx PCMCIA socket services" - depends on MIPS_ALCHEMY && PCMCIA + depends on MIPS_DB1XXX && PCMCIA help Enable this driver of you want PCMCIA support on your Alchemy Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300 diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c index 16f573173471..bb06311d0b5f 100644 --- a/drivers/pcmcia/bcm63xx_pcmcia.c +++ b/drivers/pcmcia/bcm63xx_pcmcia.c @@ -327,10 +327,11 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev) { struct bcm63xx_pcmcia_socket *skt; struct pcmcia_socket *sock; - struct resource *res, *irq_res; + struct resource *res; unsigned int regmem_size = 0, iomem_size = 0; u32 val; int ret; + int irq; skt = kzalloc(sizeof(*skt), GFP_KERNEL); if (!skt) @@ -342,9 +343,9 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev) /* make sure we have all resources we need */ skt->common_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); skt->attr_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + irq = platform_get_irq(pdev, 0); skt->pd = pdev->dev.platform_data; - if (!skt->common_res || !skt->attr_res || !irq_res || !skt->pd) { + if (!skt->common_res || !skt->attr_res || (irq < 0) || !skt->pd) { ret = -EINVAL; goto err; } @@ -380,7 +381,7 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev) sock->dev.parent = &pdev->dev; sock->features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD; sock->io_offset = (unsigned long)skt->io_base; - sock->pci_irq = irq_res->start; + sock->pci_irq = irq; #ifdef CONFIG_CARDBUS sock->cb_dev = bcm63xx_cb_dev; diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 6b6c578b5f92..ad1141fddb4c 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -394,7 +394,7 @@ static int do_validate_mem(struct pcmcia_socket *s, * do_mem_probe() checks a memory region for use by the PCMCIA subsystem. * To do so, the area is split up into sensible parts, and then passed * into the @validate() function. Only if @validate() and @fallback() fail, - * the area is marked as unavaibale for use by the PCMCIA subsystem. The + * the area is marked as unavailable for use by the PCMCIA subsystem. The * function returns the size of the usable memory area. */ static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num, diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 47e433e09c5c..dad453054776 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -358,6 +358,22 @@ static int bcm2835_gpio_direction_output(struct gpio_chip *chip, return 0; } +static int bcm2835_of_gpio_ranges_fallback(struct gpio_chip *gc, + struct device_node *np) +{ + struct pinctrl_dev *pctldev = of_pinctrl_get(np); + + of_node_put(np); + + if (!pctldev) + return 0; + + gpiochip_add_pin_range(gc, pinctrl_dev_get_devname(pctldev), 0, 0, + gc->ngpio); + + return 0; +} + static const struct gpio_chip bcm2835_gpio_chip = { .label = MODULE_NAME, .owner = THIS_MODULE, @@ -372,6 +388,7 @@ static const struct gpio_chip bcm2835_gpio_chip = { .base = -1, .ngpio = BCM2835_NUM_GPIOS, .can_sleep = false, + .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback, }; static const struct gpio_chip bcm2711_gpio_chip = { @@ -388,6 +405,7 @@ static const struct gpio_chip bcm2711_gpio_chip = { .base = -1, .ngpio = BCM2711_NUM_GPIOS, .can_sleep = false, + .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback, }; static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc, diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c index 6a7fe929a68b..3026a3b3da2d 100644 --- a/drivers/pinctrl/berlin/berlin-bg4ct.c +++ b/drivers/pinctrl/berlin/berlin-bg4ct.c @@ -460,8 +460,7 @@ static int berlin4ct_pinctrl_probe(struct platform_device *pdev) if (!rmconfig) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, res); + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig index 453dc47f4fa4..d96b1130efd3 100644 --- a/drivers/pinctrl/freescale/Kconfig +++ b/drivers/pinctrl/freescale/Kconfig @@ -206,3 +206,10 @@ config PINCTRL_IMX23 config PINCTRL_IMX28 bool select PINCTRL_MXS + +config PINCTRL_IMXRT1170 + bool "IMXRT1170 pinctrl driver" + depends on ARCH_MXC + select PINCTRL_IMX + help + Say Y here to enable the imxrt1170 pinctrl driver diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile index 9f5d1c090338..647dff060477 100644 --- a/drivers/pinctrl/freescale/Makefile +++ b/drivers/pinctrl/freescale/Makefile @@ -32,3 +32,4 @@ obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o obj-$(CONFIG_PINCTRL_IMXRT1050) += pinctrl-imxrt1050.o +obj-$(CONFIG_PINCTRL_IMXRT1170) += pinctrl-imxrt1170.o diff --git a/drivers/pinctrl/freescale/pinctrl-imxrt1170.c b/drivers/pinctrl/freescale/pinctrl-imxrt1170.c new file mode 100644 index 000000000000..5da1545fde91 --- /dev/null +++ b/drivers/pinctrl/freescale/pinctrl-imxrt1170.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 + * Author(s): Jesse Taube <Mr.Bossman075@gmail.com> + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/platform_device.h> + +#include "pinctrl-imx.h" + +enum imxrt1170_pads { + IMXRT1170_PAD_RESERVE0, + IMXRT1170_PAD_RESERVE1, + IMXRT1170_PAD_RESERVE2, + IMXRT1170_PAD_RESERVE3, + IMXRT1170_PAD_EMC_B1_00, + IMXRT1170_PAD_EMC_B1_01, + IMXRT1170_PAD_EMC_B1_02, + IMXRT1170_PAD_EMC_B1_03, + IMXRT1170_PAD_EMC_B1_04, + IMXRT1170_PAD_EMC_B1_05, + IMXRT1170_PAD_EMC_B1_06, + IMXRT1170_PAD_EMC_B1_07, + IMXRT1170_PAD_EMC_B1_08, + IMXRT1170_PAD_EMC_B1_09, + IMXRT1170_PAD_EMC_B1_10, + IMXRT1170_PAD_EMC_B1_11, + IMXRT1170_PAD_EMC_B1_12, + IMXRT1170_PAD_EMC_B1_13, + IMXRT1170_PAD_EMC_B1_14, + IMXRT1170_PAD_EMC_B1_15, + IMXRT1170_PAD_EMC_B1_16, + IMXRT1170_PAD_EMC_B1_17, + IMXRT1170_PAD_EMC_B1_18, + IMXRT1170_PAD_EMC_B1_19, + IMXRT1170_PAD_EMC_B1_20, + IMXRT1170_PAD_EMC_B1_21, + IMXRT1170_PAD_EMC_B1_22, + IMXRT1170_PAD_EMC_B1_23, + IMXRT1170_PAD_EMC_B1_24, + IMXRT1170_PAD_EMC_B1_25, + IMXRT1170_PAD_EMC_B1_26, + IMXRT1170_PAD_EMC_B1_27, + IMXRT1170_PAD_EMC_B1_28, + IMXRT1170_PAD_EMC_B1_29, + IMXRT1170_PAD_EMC_B1_30, + IMXRT1170_PAD_EMC_B1_31, + IMXRT1170_PAD_EMC_B1_32, + IMXRT1170_PAD_EMC_B1_33, + IMXRT1170_PAD_EMC_B1_34, + IMXRT1170_PAD_EMC_B1_35, + IMXRT1170_PAD_EMC_B1_36, + IMXRT1170_PAD_EMC_B1_37, + IMXRT1170_PAD_EMC_B1_38, + IMXRT1170_PAD_EMC_B1_39, + IMXRT1170_PAD_EMC_B1_40, + IMXRT1170_PAD_EMC_B1_41, + IMXRT1170_PAD_EMC_B2_00, + IMXRT1170_PAD_EMC_B2_01, + IMXRT1170_PAD_EMC_B2_02, + IMXRT1170_PAD_EMC_B2_03, + IMXRT1170_PAD_EMC_B2_04, + IMXRT1170_PAD_EMC_B2_05, + IMXRT1170_PAD_EMC_B2_06, + IMXRT1170_PAD_EMC_B2_07, + IMXRT1170_PAD_EMC_B2_08, + IMXRT1170_PAD_EMC_B2_09, + IMXRT1170_PAD_EMC_B2_10, + IMXRT1170_PAD_EMC_B2_11, + IMXRT1170_PAD_EMC_B2_12, + IMXRT1170_PAD_EMC_B2_13, + IMXRT1170_PAD_EMC_B2_14, + IMXRT1170_PAD_EMC_B2_15, + IMXRT1170_PAD_EMC_B2_16, + IMXRT1170_PAD_EMC_B2_17, + IMXRT1170_PAD_EMC_B2_18, + IMXRT1170_PAD_EMC_B2_19, + IMXRT1170_PAD_EMC_B2_20, + IMXRT1170_PAD_AD_00, + IMXRT1170_PAD_AD_01, + IMXRT1170_PAD_AD_02, + IMXRT1170_PAD_AD_03, + IMXRT1170_PAD_AD_04, + IMXRT1170_PAD_AD_05, + IMXRT1170_PAD_AD_06, + IMXRT1170_PAD_AD_07, + IMXRT1170_PAD_AD_08, + IMXRT1170_PAD_AD_09, + IMXRT1170_PAD_AD_10, + IMXRT1170_PAD_AD_11, + IMXRT1170_PAD_AD_12, + IMXRT1170_PAD_AD_13, + IMXRT1170_PAD_AD_14, + IMXRT1170_PAD_AD_15, + IMXRT1170_PAD_AD_16, + IMXRT1170_PAD_AD_17, + IMXRT1170_PAD_AD_18, + IMXRT1170_PAD_AD_19, + IMXRT1170_PAD_AD_20, + IMXRT1170_PAD_AD_21, + IMXRT1170_PAD_AD_22, + IMXRT1170_PAD_AD_23, + IMXRT1170_PAD_AD_24, + IMXRT1170_PAD_AD_25, + IMXRT1170_PAD_AD_26, + IMXRT1170_PAD_AD_27, + IMXRT1170_PAD_AD_28, + IMXRT1170_PAD_AD_29, + IMXRT1170_PAD_AD_30, + IMXRT1170_PAD_AD_31, + IMXRT1170_PAD_AD_32, + IMXRT1170_PAD_AD_33, + IMXRT1170_PAD_AD_34, + IMXRT1170_PAD_AD_35, + IMXRT1170_PAD_SD_B1_00, + IMXRT1170_PAD_SD_B1_01, + IMXRT1170_PAD_SD_B1_02, + IMXRT1170_PAD_SD_B1_03, + IMXRT1170_PAD_SD_B1_04, + IMXRT1170_PAD_SD_B1_05, + IMXRT1170_PAD_SD_B2_00, + IMXRT1170_PAD_SD_B2_01, + IMXRT1170_PAD_SD_B2_02, + IMXRT1170_PAD_SD_B2_03, + IMXRT1170_PAD_SD_B2_04, + IMXRT1170_PAD_SD_B2_05, + IMXRT1170_PAD_SD_B2_06, + IMXRT1170_PAD_SD_B2_07, + IMXRT1170_PAD_SD_B2_08, + IMXRT1170_PAD_SD_B2_09, + IMXRT1170_PAD_SD_B2_10, + IMXRT1170_PAD_SD_B2_11, + IMXRT1170_PAD_DISP_B1_00, + IMXRT1170_PAD_DISP_B1_01, + IMXRT1170_PAD_DISP_B1_02, + IMXRT1170_PAD_DISP_B1_03, + IMXRT1170_PAD_DISP_B1_04, + IMXRT1170_PAD_DISP_B1_05, + IMXRT1170_PAD_DISP_B1_06, + IMXRT1170_PAD_DISP_B1_07, + IMXRT1170_PAD_DISP_B1_08, + IMXRT1170_PAD_DISP_B1_09, + IMXRT1170_PAD_DISP_B1_10, + IMXRT1170_PAD_DISP_B1_11, + IMXRT1170_PAD_DISP_B2_00, + IMXRT1170_PAD_DISP_B2_01, + IMXRT1170_PAD_DISP_B2_02, + IMXRT1170_PAD_DISP_B2_03, + IMXRT1170_PAD_DISP_B2_04, + IMXRT1170_PAD_DISP_B2_05, + IMXRT1170_PAD_DISP_B2_06, + IMXRT1170_PAD_DISP_B2_07, + IMXRT1170_PAD_DISP_B2_08, + IMXRT1170_PAD_DISP_B2_09, + IMXRT1170_PAD_DISP_B2_10, + IMXRT1170_PAD_DISP_B2_11, + IMXRT1170_PAD_DISP_B2_12, + IMXRT1170_PAD_DISP_B2_13, + IMXRT1170_PAD_DISP_B2_14, + IMXRT1170_PAD_DISP_B2_15, +}; + +/* Pad names for the pinmux subsystem */ +static const struct pinctrl_pin_desc imxrt1170_pinctrl_pads[] = { + IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE0), + IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE1), + IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE2), + IMX_PINCTRL_PIN(IMXRT1170_PAD_RESERVE3), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_12), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_13), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_14), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_15), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_16), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_17), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_18), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_19), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_20), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_21), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_22), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_23), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_24), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_25), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_26), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_27), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_28), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_29), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_30), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_31), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_32), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_33), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_34), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_35), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_36), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_37), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_38), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_39), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_40), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B1_41), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_12), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_13), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_14), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_15), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_16), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_17), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_18), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_19), + IMX_PINCTRL_PIN(IMXRT1170_PAD_EMC_B2_20), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_12), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_13), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_14), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_15), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_16), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_17), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_18), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_19), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_20), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_21), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_22), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_23), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_24), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_25), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_26), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_27), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_28), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_29), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_30), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_31), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_32), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_33), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_34), + IMX_PINCTRL_PIN(IMXRT1170_PAD_AD_35), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B1_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_SD_B2_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B1_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_00), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_01), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_02), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_03), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_04), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_05), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_06), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_07), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_08), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_09), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_10), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_11), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_12), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_13), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_14), + IMX_PINCTRL_PIN(IMXRT1170_PAD_DISP_B2_15), +}; + +static const struct imx_pinctrl_soc_info imxrt1170_pinctrl_info = { + .pins = imxrt1170_pinctrl_pads, + .npins = ARRAY_SIZE(imxrt1170_pinctrl_pads), + .gpr_compatible = "fsl,imxrt1170-iomuxc-gpr", +}; + +static const struct of_device_id imxrt1170_pinctrl_of_match[] = { + { .compatible = "fsl,imxrt1170-iomuxc", .data = &imxrt1170_pinctrl_info, }, + { /* sentinel */ } +}; + +static int imxrt1170_pinctrl_probe(struct platform_device *pdev) +{ + return imx_pinctrl_probe(pdev, &imxrt1170_pinctrl_info); +} + +static struct platform_driver imxrt1170_pinctrl_driver = { + .driver = { + .name = "imxrt1170-pinctrl", + .of_match_table = of_match_ptr(imxrt1170_pinctrl_of_match), + .suppress_bind_attrs = true, + }, + .probe = imxrt1170_pinctrl_probe, +}; + +static int __init imxrt1170_pinctrl_init(void) +{ + return platform_driver_register(&imxrt1170_pinctrl_driver); +} +arch_initcall(imxrt1170_pinctrl_init); diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index f89c9fcd4e1b..31f8f271628c 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1350,15 +1350,15 @@ static void byt_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *vg = gpiochip_get_data(gc); - unsigned int offset = irqd_to_hwirq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); void __iomem *reg; - reg = byt_gpio_reg(vg, offset, BYT_INT_STAT_REG); + reg = byt_gpio_reg(vg, hwirq, BYT_INT_STAT_REG); if (!reg) return; raw_spin_lock(&byt_lock); - writel(BIT(offset % 32), reg); + writel(BIT(hwirq % 32), reg); raw_spin_unlock(&byt_lock); } @@ -1366,20 +1366,24 @@ static void byt_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *vg = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); - byt_gpio_clear_triggering(vg, irqd_to_hwirq(d)); + byt_gpio_clear_triggering(vg, hwirq); + gpiochip_disable_irq(gc, hwirq); } static void byt_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *vg = gpiochip_get_data(gc); - unsigned int offset = irqd_to_hwirq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; void __iomem *reg; u32 value; - reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); + gpiochip_enable_irq(gc, hwirq); + + reg = byt_gpio_reg(vg, hwirq, BYT_CONF0_REG); if (!reg) return; @@ -1412,12 +1416,13 @@ static void byt_irq_unmask(struct irq_data *d) static int byt_irq_type(struct irq_data *d, unsigned int type) { struct intel_pinctrl *vg = gpiochip_get_data(irq_data_get_irq_chip_data(d)); - u32 offset = irqd_to_hwirq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); u32 value; unsigned long flags; - void __iomem *reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG); + void __iomem *reg; - if (!reg || offset >= vg->chip.ngpio) + reg = byt_gpio_reg(vg, hwirq, BYT_CONF0_REG); + if (!reg) return -EINVAL; raw_spin_lock_irqsave(&byt_lock, flags); @@ -1447,6 +1452,16 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) return 0; } +static const struct irq_chip byt_gpio_irq_chip = { + .name = "BYT-GPIO", + .irq_ack = byt_irq_ack, + .irq_mask = byt_irq_mask, + .irq_unmask = byt_irq_unmask, + .irq_set_type = byt_irq_type, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static void byt_gpio_irq_handler(struct irq_desc *desc) { struct irq_data *data = irq_desc_get_irq_data(desc); @@ -1633,15 +1648,8 @@ static int byt_gpio_probe(struct intel_pinctrl *vg) if (irq > 0) { struct gpio_irq_chip *girq; - vg->irqchip.name = "BYT-GPIO", - vg->irqchip.irq_ack = byt_irq_ack, - vg->irqchip.irq_mask = byt_irq_mask, - vg->irqchip.irq_unmask = byt_irq_unmask, - vg->irqchip.irq_set_type = byt_irq_type, - vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED, - girq = &gc->irq; - girq->chip = &vg->irqchip; + gpio_irq_chip_set_chip(girq, &byt_gpio_irq_chip); girq->init_hw = byt_gpio_irq_init_hw; girq->init_valid_mask = byt_init_irq_valid_mask; girq->parent_handler = byt_gpio_irq_handler; diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c index 2be7e414f803..fb15cd10a32f 100644 --- a/drivers/pinctrl/intel/pinctrl-broxton.c +++ b/drivers/pinctrl/intel/pinctrl-broxton.c @@ -1035,4 +1035,5 @@ module_exit(bxt_pinctrl_exit); MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); MODULE_DESCRIPTION("Intel Broxton SoC pinctrl/GPIO driver"); MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:apollolake-pinctrl"); MODULE_ALIAS("platform:broxton-pinctrl"); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 1d5818269076..26b2a425d201 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1242,12 +1242,12 @@ static void chv_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *pctrl = gpiochip_get_data(gc); - int pin = irqd_to_hwirq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); u32 intr_line; raw_spin_lock(&chv_lock); - intr_line = chv_readl(pctrl, pin, CHV_PADCTRL0); + intr_line = chv_readl(pctrl, hwirq, CHV_PADCTRL0); intr_line &= CHV_PADCTRL0_INTSEL_MASK; intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT; chv_pctrl_writel(pctrl, CHV_INTSTAT, BIT(intr_line)); @@ -1255,17 +1255,15 @@ static void chv_gpio_irq_ack(struct irq_data *d) raw_spin_unlock(&chv_lock); } -static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) +static void chv_gpio_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t hwirq, bool mask) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *pctrl = gpiochip_get_data(gc); - int pin = irqd_to_hwirq(d); u32 value, intr_line; unsigned long flags; raw_spin_lock_irqsave(&chv_lock, flags); - intr_line = chv_readl(pctrl, pin, CHV_PADCTRL0); + intr_line = chv_readl(pctrl, hwirq, CHV_PADCTRL0); intr_line &= CHV_PADCTRL0_INTSEL_MASK; intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT; @@ -1281,12 +1279,20 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) static void chv_gpio_irq_mask(struct irq_data *d) { - chv_gpio_irq_mask_unmask(d, true); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + chv_gpio_irq_mask_unmask(gc, hwirq, true); + gpiochip_disable_irq(gc, hwirq); } static void chv_gpio_irq_unmask(struct irq_data *d) { - chv_gpio_irq_mask_unmask(d, false); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + gpiochip_enable_irq(gc, hwirq); + chv_gpio_irq_mask_unmask(gc, hwirq, false); } static unsigned chv_gpio_irq_startup(struct irq_data *d) @@ -1306,17 +1312,17 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d) struct intel_pinctrl *pctrl = gpiochip_get_data(gc); struct device *dev = pctrl->dev; struct intel_community_context *cctx = &pctrl->context.communities[0]; - unsigned int pin = irqd_to_hwirq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); irq_flow_handler_t handler; unsigned long flags; u32 intsel, value; raw_spin_lock_irqsave(&chv_lock, flags); - intsel = chv_readl(pctrl, pin, CHV_PADCTRL0); + intsel = chv_readl(pctrl, hwirq, CHV_PADCTRL0); intsel &= CHV_PADCTRL0_INTSEL_MASK; intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; - value = chv_readl(pctrl, pin, CHV_PADCTRL1); + value = chv_readl(pctrl, hwirq, CHV_PADCTRL1); if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL) handler = handle_level_irq; else @@ -1324,9 +1330,9 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d) if (cctx->intr_lines[intsel] == CHV_INVALID_HWIRQ) { irq_set_handler_locked(d, handler); - dev_dbg(dev, "using interrupt line %u for IRQ_TYPE_NONE on pin %u\n", - intsel, pin); - cctx->intr_lines[intsel] = pin; + dev_dbg(dev, "using interrupt line %u for IRQ_TYPE_NONE on pin %lu\n", + intsel, hwirq); + cctx->intr_lines[intsel] = hwirq; } raw_spin_unlock_irqrestore(&chv_lock, flags); } @@ -1392,14 +1398,14 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *pctrl = gpiochip_get_data(gc); - unsigned int pin = irqd_to_hwirq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 value; int ret; raw_spin_lock_irqsave(&chv_lock, flags); - ret = chv_gpio_set_intr_line(pctrl, pin); + ret = chv_gpio_set_intr_line(pctrl, hwirq); if (ret) goto out_unlock; @@ -1416,8 +1422,8 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type) * 2. If the pin cfg is not locked in BIOS: * Driver programs the IntWakeCfg bits and save the mapping. */ - if (!chv_pad_locked(pctrl, pin)) { - value = chv_readl(pctrl, pin, CHV_PADCTRL1); + if (!chv_pad_locked(pctrl, hwirq)) { + value = chv_readl(pctrl, hwirq, CHV_PADCTRL1); value &= ~CHV_PADCTRL1_INTWAKECFG_MASK; value &= ~CHV_PADCTRL1_INVRXTX_MASK; @@ -1434,7 +1440,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type) value |= CHV_PADCTRL1_INVRXTX_RXDATA; } - chv_writel(pctrl, pin, CHV_PADCTRL1, value); + chv_writel(pctrl, hwirq, CHV_PADCTRL1, value); } if (type & IRQ_TYPE_EDGE_BOTH) @@ -1448,6 +1454,17 @@ out_unlock: return ret; } +static const struct irq_chip chv_gpio_irq_chip = { + .name = "chv-gpio", + .irq_startup = chv_gpio_irq_startup, + .irq_ack = chv_gpio_irq_ack, + .irq_mask = chv_gpio_irq_mask, + .irq_unmask = chv_gpio_irq_unmask, + .irq_set_type = chv_gpio_irq_type, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static void chv_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); @@ -1611,15 +1628,8 @@ static int chv_gpio_probe(struct intel_pinctrl *pctrl, int irq) chip->base = -1; pctrl->irq = irq; - pctrl->irqchip.name = "chv-gpio"; - pctrl->irqchip.irq_startup = chv_gpio_irq_startup; - pctrl->irqchip.irq_ack = chv_gpio_irq_ack; - pctrl->irqchip.irq_mask = chv_gpio_irq_mask; - pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask; - pctrl->irqchip.irq_set_type = chv_gpio_irq_type; - pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE; - - chip->irq.chip = &pctrl->irqchip; + + gpio_irq_chip_set_chip(&chip->irq, &chv_gpio_irq_chip); chip->irq.init_hw = chv_gpio_irq_init_hw; chip->irq.parent_handler = chv_gpio_irq_handler; chip->irq.num_parents = 1; diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 826d494f3cc6..ffc045f7bf00 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -858,6 +858,9 @@ static const struct pinctrl_desc intel_pinctrl_desc = { * When coming through gpiolib irqchip, the GPIO offset is not * automatically translated to pinctrl pin number. This function can be * used to find out the corresponding pinctrl pin. + * + * Return: a pin number and pointers to the community and pad group, which + * the pin belongs to, or negative error code if translation can't be done. */ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset, const struct intel_community **community, @@ -899,6 +902,8 @@ static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned int offset, * @pin: pin number * * Translate the pin number of pinctrl to GPIO offset + * + * Return: a GPIO offset, or negative error code if translation can't be done. */ static __maybe_unused int intel_pin_to_gpio(struct intel_pinctrl *pctrl, int pin) { @@ -1039,15 +1044,14 @@ static void intel_gpio_irq_ack(struct irq_data *d) } } -static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) +static void intel_gpio_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t hwirq, bool mask) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *pctrl = gpiochip_get_data(gc); const struct intel_community *community; const struct intel_padgroup *padgrp; int pin; - pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), &community, &padgrp); + pin = intel_gpio_to_pin(pctrl, hwirq, &community, &padgrp); if (pin >= 0) { unsigned int gpp, gpp_offset; unsigned long flags; @@ -1077,12 +1081,20 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) static void intel_gpio_irq_mask(struct irq_data *d) { - intel_gpio_irq_mask_unmask(d, true); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + intel_gpio_irq_mask_unmask(gc, hwirq, true); + gpiochip_disable_irq(gc, hwirq); } static void intel_gpio_irq_unmask(struct irq_data *d) { - intel_gpio_irq_mask_unmask(d, false); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); + + gpiochip_enable_irq(gc, hwirq); + intel_gpio_irq_mask_unmask(gc, hwirq, false); } static int intel_gpio_irq_type(struct irq_data *d, unsigned int type) @@ -1157,6 +1169,17 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on) return 0; } +static const struct irq_chip intel_gpio_irq_chip = { + .name = "intel-gpio", + .irq_ack = intel_gpio_irq_ack, + .irq_mask = intel_gpio_irq_mask, + .irq_unmask = intel_gpio_irq_unmask, + .irq_set_type = intel_gpio_irq_type, + .irq_set_wake = intel_gpio_irq_wake, + .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, const struct intel_community *community) { @@ -1319,15 +1342,6 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) pctrl->chip.add_pin_ranges = intel_gpio_add_pin_ranges; pctrl->irq = irq; - /* Setup IRQ chip */ - pctrl->irqchip.name = dev_name(pctrl->dev); - pctrl->irqchip.irq_ack = intel_gpio_irq_ack; - pctrl->irqchip.irq_mask = intel_gpio_irq_mask; - pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask; - pctrl->irqchip.irq_set_type = intel_gpio_irq_type; - pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake; - pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND; - /* * On some platforms several GPIO controllers share the same interrupt * line. @@ -1340,8 +1354,9 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) return ret; } + /* Setup IRQ chip */ girq = &pctrl->chip.irq; - girq->chip = &pctrl->irqchip; + gpio_irq_chip_set_chip(girq, &intel_gpio_irq_chip); /* This will let us handle the IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index c4fef03b663f..710341bb67cc 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h @@ -223,7 +223,6 @@ struct intel_pinctrl_context { * @pctldesc: Pin controller description * @pctldev: Pointer to the pin controller device * @chip: GPIO chip in this pin controller - * @irqchip: IRQ chip in this pin controller * @soc: SoC/PCH specific pin configuration data * @communities: All communities in this pin controller * @ncommunities: Number of communities in this pin controller @@ -236,7 +235,6 @@ struct intel_pinctrl { struct pinctrl_desc pctldesc; struct pinctrl_dev *pctldev; struct gpio_chip chip; - struct irq_chip irqchip; const struct intel_pinctrl_soc_data *soc; struct intel_community *communities; size_t ncommunities; diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c index 561fa322b0b4..4fb39eb30902 100644 --- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c +++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c @@ -663,7 +663,7 @@ static void lp_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *lg = gpiochip_get_data(gc); - u32 hwirq = irqd_to_hwirq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_STAT); unsigned long flags; @@ -684,10 +684,12 @@ static void lp_irq_enable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *lg = gpiochip_get_data(gc); - u32 hwirq = irqd_to_hwirq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE); unsigned long flags; + gpiochip_enable_irq(gc, hwirq); + raw_spin_lock_irqsave(&lg->lock, flags); iowrite32(ioread32(reg) | BIT(hwirq % 32), reg); raw_spin_unlock_irqrestore(&lg->lock, flags); @@ -697,30 +699,33 @@ static void lp_irq_disable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *lg = gpiochip_get_data(gc); - u32 hwirq = irqd_to_hwirq(d); + irq_hw_number_t hwirq = irqd_to_hwirq(d); void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE); unsigned long flags; raw_spin_lock_irqsave(&lg->lock, flags); iowrite32(ioread32(reg) & ~BIT(hwirq % 32), reg); raw_spin_unlock_irqrestore(&lg->lock, flags); + + gpiochip_disable_irq(gc, hwirq); } static int lp_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct intel_pinctrl *lg = gpiochip_get_data(gc); - u32 hwirq = irqd_to_hwirq(d); - void __iomem *reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1); + irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; + void __iomem *reg; u32 value; - if (hwirq >= lg->chip.ngpio) + reg = lp_gpio_reg(&lg->chip, hwirq, LP_CONFIG1); + if (!reg) return -EINVAL; /* Fail if BIOS reserved pin for ACPI use */ if (lp_gpio_acpi_use(lg, hwirq)) { - dev_err(lg->dev, "pin %u can't be used as IRQ\n", hwirq); + dev_err(lg->dev, "pin %lu can't be used as IRQ\n", hwirq); return -EBUSY; } @@ -755,7 +760,7 @@ static int lp_irq_set_type(struct irq_data *d, unsigned int type) return 0; } -static struct irq_chip lp_irqchip = { +static const struct irq_chip lp_irqchip = { .name = "LP-GPIO", .irq_ack = lp_irq_ack, .irq_mask = lp_irq_mask, @@ -763,7 +768,8 @@ static struct irq_chip lp_irqchip = { .irq_enable = lp_irq_enable, .irq_disable = lp_irq_disable, .irq_set_type = lp_irq_set_type, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int lp_gpio_irq_init_hw(struct gpio_chip *chip) @@ -884,7 +890,7 @@ static int lp_gpio_probe(struct platform_device *pdev) struct gpio_irq_chip *girq; girq = &gc->irq; - girq->chip = &lp_irqchip; + gpio_irq_chip_set_chip(girq, &lp_irqchip); girq->init_hw = lp_gpio_irq_init_hw; girq->parent_handler = lp_gpio_irq_handler; girq->num_parents = 1; diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index 40accd110c3d..1600a2c18eee 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig @@ -106,6 +106,13 @@ config PINCTRL_MT6779 In MTK platform, we support virtual gpio and use it to map specific eint which doesn't have real gpio pin. +config PINCTRL_MT6795 + bool "Mediatek MT6795 pin control" + depends on OF + depends on ARM64 || COMPILE_TEST + default ARM64 && ARCH_MEDIATEK + select PINCTRL_MTK_PARIS + config PINCTRL_MT6797 bool "Mediatek MT6797 pin control" depends on OF @@ -166,6 +173,7 @@ config PINCTRL_MT8195 bool "Mediatek MT8195 pin control" depends on OF depends on ARM64 || COMPILE_TEST + default ARM64 && ARCH_MEDIATEK select PINCTRL_MTK_PARIS config PINCTRL_MT8365 diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile index 29018d6ad0de..c8f226ae36c9 100644 --- a/drivers/pinctrl/mediatek/Makefile +++ b/drivers/pinctrl/mediatek/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_PINCTRL_MT8135) += pinctrl-mt8135.o obj-$(CONFIG_PINCTRL_MT8127) += pinctrl-mt8127.o obj-$(CONFIG_PINCTRL_MT6765) += pinctrl-mt6765.o obj-$(CONFIG_PINCTRL_MT6779) += pinctrl-mt6779.o +obj-$(CONFIG_PINCTRL_MT6795) += pinctrl-mt6795.o obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6795.c b/drivers/pinctrl/mediatek/pinctrl-mt6795.c new file mode 100644 index 000000000000..f90152261a0f --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mt6795.c @@ -0,0 +1,623 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Collabora Ltd. + * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#include "pinctrl-mtk-mt6795.h" +#include "pinctrl-paris.h" + +#define PIN_FIELD15(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \ + PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \ + _x_bits, 15, 0) + +#define PIN_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits) \ + PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \ + _x_bits, 16, 0) + +#define PINS_FIELD16(_s_pin, _e_pin, _s_addr, _x_addrs, _s_bit, _x_bits)\ + PIN_FIELD_CALC(_s_pin, _e_pin, 0, _s_addr, _x_addrs, _s_bit, \ + _x_bits, 16, 1) + +static const struct mtk_pin_field_calc mt6795_pin_dir_range[] = { + PIN_FIELD16(0, 196, 0x0, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_pullen_range[] = { + PIN_FIELD16(0, 196, 0x100, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_pullsel_range[] = { + PIN_FIELD16(0, 196, 0x200, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_do_range[] = { + PIN_FIELD16(0, 196, 0x400, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_di_range[] = { + PIN_FIELD16(0, 196, 0x500, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_mode_range[] = { + PIN_FIELD15(0, 196, 0x600, 0x10, 0, 3), +}; + +static const struct mtk_pin_field_calc mt6795_pin_ies_range[] = { + PINS_FIELD16(0, 4, 0x900, 0x10, 1, 1), + PINS_FIELD16(5, 9, 0x900, 0x10, 2, 1), + PINS_FIELD16(10, 15, 0x900, 0x10, 10, 1), + PINS_FIELD16(16, 16, 0x900, 0x10, 2, 1), + PINS_FIELD16(17, 19, 0x910, 0x10, 3, 1), + PINS_FIELD16(20, 22, 0x910, 0x10, 4, 1), + PINS_FIELD16(23, 26, 0xce0, 0x10, 14, 1), + PINS_FIELD16(27, 27, 0xcc0, 0x10, 14, 1), + PINS_FIELD16(28, 28, 0xcd0, 0x10, 14, 1), + PINS_FIELD16(29, 32, 0x900, 0x10, 3, 1), + PINS_FIELD16(33, 33, 0x900, 0x10, 4, 1), + PINS_FIELD16(34, 36, 0x900, 0x10, 5, 1), + PINS_FIELD16(37, 38, 0x900, 0x10, 6, 1), + PINS_FIELD16(39, 39, 0x900, 0x10, 7, 1), + PINS_FIELD16(40, 40, 0x900, 0x10, 8, 1), + PINS_FIELD16(41, 42, 0x900, 0x10, 9, 1), + PINS_FIELD16(43, 46, 0x900, 0x10, 11, 1), + PINS_FIELD16(47, 61, 0x920, 0x10, 3, 1), + PINS_FIELD16(62, 66, 0x920, 0x10, 4, 1), + PINS_FIELD16(67, 67, 0x920, 0x10, 3, 1), + PINS_FIELD16(68, 72, 0x920, 0x10, 5, 1), + PINS_FIELD16(73, 77, 0x920, 0x10, 6, 1), + PINS_FIELD16(78, 91, 0x920, 0x10, 7, 1), + PINS_FIELD16(92, 92, 0x900, 0x10, 13, 1), + PINS_FIELD16(93, 95, 0x900, 0x10, 14, 1), + PINS_FIELD16(96, 99, 0x900, 0x10, 15, 1), + PINS_FIELD16(100, 103, 0xca0, 0x10, 14, 1), + PINS_FIELD16(104, 104, 0xc80, 0x10, 14, 1), + PINS_FIELD16(105, 105, 0xc90, 0x10, 14, 1), + PINS_FIELD16(106, 107, 0x910, 0x10, 0, 1), + PINS_FIELD16(108, 112, 0x910, 0x10, 1, 1), + PINS_FIELD16(113, 116, 0x910, 0x10, 2, 1), + PINS_FIELD16(117, 118, 0x910, 0x10, 5, 1), + PINS_FIELD16(119, 124, 0x910, 0x10, 6, 1), + PINS_FIELD16(125, 126, 0x910, 0x10, 7, 1), + PINS_FIELD16(129, 129, 0x910, 0x10, 8, 1), + PINS_FIELD16(130, 131, 0x910, 0x10, 9, 1), + PINS_FIELD16(132, 135, 0x910, 0x10, 8, 1), + PINS_FIELD16(136, 137, 0x910, 0x10, 7, 1), + PINS_FIELD16(154, 161, 0xc20, 0x10, 14, 1), + PINS_FIELD16(162, 162, 0xc10, 0x10, 14, 1), + PINS_FIELD16(163, 163, 0xc00, 0x10, 14, 1), + PINS_FIELD16(164, 164, 0xd10, 0x10, 14, 1), + PINS_FIELD16(165, 165, 0xd00, 0x10, 14, 1), + PINS_FIELD16(166, 169, 0x910, 0x10, 14, 1), + PINS_FIELD16(176, 179, 0x910, 0x10, 15, 1), + PINS_FIELD16(180, 180, 0x920, 0x10, 0, 1), + PINS_FIELD16(181, 184, 0x920, 0x10, 1, 1), + PINS_FIELD16(185, 191, 0x920, 0x10, 2, 1), + PINS_FIELD16(192, 192, 0x920, 0x10, 8, 1), + PINS_FIELD16(193, 194, 0x920, 0x10, 9, 1), + PINS_FIELD16(195, 196, 0x920, 0x10, 8, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_smt_range[] = { + PINS_FIELD16(0, 4, 0x930, 0x10, 1, 1), + PINS_FIELD16(5, 9, 0x930, 0x10, 2, 1), + PINS_FIELD16(10, 15, 0x930, 0x10, 10, 1), + PINS_FIELD16(16, 16, 0x930, 0x10, 2, 1), + PINS_FIELD16(17, 19, 0x940, 0x10, 3, 1), + PINS_FIELD16(20, 22, 0x940, 0x10, 4, 1), + PINS_FIELD16(23, 26, 0xce0, 0x10, 13, 1), + PINS_FIELD16(27, 27, 0xcc0, 0x10, 13, 1), + PINS_FIELD16(28, 28, 0xcd0, 0x10, 13, 1), + PINS_FIELD16(29, 32, 0x930, 0x10, 3, 1), + PINS_FIELD16(33, 33, 0x930, 0x10, 4, 1), + PINS_FIELD16(34, 36, 0x930, 0x10, 5, 1), + PINS_FIELD16(37, 38, 0x930, 0x10, 6, 1), + PINS_FIELD16(39, 39, 0x930, 0x10, 7, 1), + PINS_FIELD16(40, 40, 0x930, 0x10, 8, 1), + PINS_FIELD16(41, 42, 0x930, 0x10, 9, 1), + PINS_FIELD16(43, 46, 0x930, 0x10, 11, 1), + PINS_FIELD16(47, 61, 0x950, 0x10, 3, 1), + PINS_FIELD16(62, 66, 0x950, 0x10, 4, 1), + PINS_FIELD16(67, 67, 0x950, 0x10, 3, 1), + PINS_FIELD16(68, 72, 0x950, 0x10, 5, 1), + PINS_FIELD16(73, 77, 0x950, 0x10, 6, 1), + PINS_FIELD16(78, 91, 0x950, 0x10, 7, 1), + PINS_FIELD16(92, 92, 0x930, 0x10, 13, 1), + PINS_FIELD16(93, 95, 0x930, 0x10, 14, 1), + PINS_FIELD16(96, 99, 0x930, 0x10, 15, 1), + PINS_FIELD16(100, 103, 0xca0, 0x10, 13, 1), + PINS_FIELD16(104, 104, 0xc80, 0x10, 13, 1), + PINS_FIELD16(105, 105, 0xc90, 0x10, 13, 1), + PINS_FIELD16(106, 107, 0x940, 0x10, 0, 1), + PINS_FIELD16(108, 112, 0x940, 0x10, 1, 1), + PINS_FIELD16(113, 116, 0x940, 0x10, 2, 1), + PINS_FIELD16(117, 118, 0x940, 0x10, 5, 1), + PINS_FIELD16(119, 124, 0x940, 0x10, 6, 1), + PINS_FIELD16(125, 126, 0x940, 0x10, 7, 1), + PINS_FIELD16(129, 129, 0x940, 0x10, 8, 1), + PINS_FIELD16(130, 131, 0x940, 0x10, 9, 1), + PINS_FIELD16(132, 135, 0x940, 0x10, 8, 1), + PINS_FIELD16(136, 137, 0x940, 0x10, 7, 1), + PINS_FIELD16(154, 161, 0xc20, 0x10, 13, 1), + PINS_FIELD16(162, 162, 0xc10, 0x10, 13, 1), + PINS_FIELD16(163, 163, 0xc00, 0x10, 13, 1), + PINS_FIELD16(164, 164, 0xd10, 0x10, 13, 1), + PINS_FIELD16(165, 165, 0xd00, 0x10, 13, 1), + PINS_FIELD16(166, 169, 0x940, 0x10, 14, 1), + PINS_FIELD16(176, 179, 0x940, 0x10, 15, 1), + PINS_FIELD16(180, 180, 0x950, 0x10, 0, 1), + PINS_FIELD16(181, 184, 0x950, 0x10, 1, 1), + PINS_FIELD16(185, 191, 0x950, 0x10, 2, 1), + PINS_FIELD16(192, 192, 0x950, 0x10, 8, 1), + PINS_FIELD16(193, 194, 0x950, 0x10, 9, 1), + PINS_FIELD16(195, 196, 0x950, 0x10, 8, 1), +}; + + +static const struct mtk_pin_field_calc mt6795_pin_pupd_range[] = { + /* KROW */ + PIN_FIELD16(119, 119, 0xe00, 0x10, 2, 1), /* KROW0 */ + PIN_FIELD16(120, 120, 0xe00, 0x10, 6, 1), /* KROW1 */ + PIN_FIELD16(121, 121, 0xe00, 0x10, 10, 1), /* KROW2 */ + PIN_FIELD16(122, 122, 0xe10, 0x10, 2, 1), /* KCOL0 */ + PIN_FIELD16(123, 123, 0xe10, 0x10, 6, 1), /* KCOL1 */ + PIN_FIELD16(124, 124, 0xe10, 0x10, 10, 1), /* KCOL2 */ + + /* DPI */ + PIN_FIELD16(138, 138, 0xd50, 0x10, 2, 1), /* CK */ + PIN_FIELD16(139, 139, 0xd60, 0x10, 1, 1), /* DE */ + PIN_FIELD16(140, 140, 0xd70, 0x10, 1, 1), /* data0 */ + PIN_FIELD16(141, 141, 0xd70, 0x10, 3, 1), /* data1 */ + PIN_FIELD16(142, 142, 0xd70, 0x10, 5, 1), /* data2 */ + PIN_FIELD16(143, 143, 0xd70, 0x10, 7, 1), /* data3 */ + PIN_FIELD16(144, 144, 0xd50, 0x10, 5, 1), /* data4 */ + PIN_FIELD16(145, 145, 0xd50, 0x10, 7, 1), /* data5 */ + PIN_FIELD16(146, 146, 0xd60, 0x10, 7, 1), /* data6 */ + PIN_FIELD16(147, 147, 0xed0, 0x10, 6, 1), /* data7 */ + PIN_FIELD16(148, 148, 0xed0, 0x10, 8, 1), /* data8 */ + PIN_FIELD16(149, 149, 0xed0, 0x10, 10, 1), /* data9 */ + PIN_FIELD16(150, 150, 0xed0, 0x10, 12, 1), /* data10 */ + PIN_FIELD16(151, 151, 0xed0, 0x10, 14, 1), /* data11 */ + PIN_FIELD16(152, 152, 0xd60, 0x10, 3, 1), /* hsync */ + PIN_FIELD16(153, 153, 0xd60, 0x10, 5, 1), /* vsync */ + + /* MSDC0 */ + PIN_FIELD16(154, 154, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(155, 155, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(156, 156, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(157, 157, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(158, 158, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(159, 159, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(160, 160, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(161, 161, 0xc20, 0x10, 2, 1), /* DATA 0-7 */ + PIN_FIELD16(162, 162, 0xc10, 0x10, 2, 1), /* CMD */ + PIN_FIELD16(163, 163, 0xc00, 0x10, 2, 1), /* CLK */ + PIN_FIELD16(164, 164, 0xd10, 0x10, 2, 1), /* DS */ + PIN_FIELD16(165, 165, 0xd00, 0x10, 2, 1), /* RST */ + + /* MSDC1 */ + PIN_FIELD16(170, 170, 0xc50, 0x10, 2, 1), /* CMD */ + PIN_FIELD16(171, 171, 0xd20, 0x10, 2, 1), /* DAT0 */ + PIN_FIELD16(172, 172, 0xd20, 0x10, 6, 1), /* DAT1 */ + PIN_FIELD16(173, 173, 0xd20, 0x10, 10, 1), /* DAT2 */ + PIN_FIELD16(174, 174, 0xd20, 0x10, 14, 1), /* DAT3 */ + PIN_FIELD16(175, 175, 0xc40, 0x10, 2, 1), /* CLK */ + + /* MSDC2 */ + PIN_FIELD16(100, 100, 0xd30, 0x10, 2, 1), /* DAT0 */ + PIN_FIELD16(101, 101, 0xd30, 0x10, 6, 1), /* DAT1 */ + PIN_FIELD16(102, 102, 0xd30, 0x10, 10, 1), /* DAT2 */ + PIN_FIELD16(103, 103, 0xd30, 0x10, 14, 1), /* DAT3 */ + PIN_FIELD16(104, 104, 0xc80, 0x10, 2, 1), /* CLK */ + PIN_FIELD16(105, 105, 0xc90, 0x10, 2, 1), /* CMD */ + + /* MSDC3 */ + PIN_FIELD16(23, 23, 0xd40, 0x10, 2, 1), /* DAT0 */ + PIN_FIELD16(24, 24, 0xd40, 0x10, 6, 5), /* DAT1 */ + PIN_FIELD16(25, 25, 0xd40, 0x10, 10, 9), /* DAT2 */ + PIN_FIELD16(26, 26, 0xd40, 0x10, 14, 13), /* DAT3 */ + PIN_FIELD16(27, 27, 0xcc0, 0x10, 2, 1), /* CLK */ + PIN_FIELD16(28, 28, 0xcd0, 0x10, 2, 1) /* CMD */ +}; + +static const struct mtk_pin_field_calc mt6795_pin_r0_range[] = { + PIN_FIELD16(23, 23, 0xd40, 0x10, 0, 1), + PIN_FIELD16(24, 24, 0xd40, 0x10, 4, 1), + PIN_FIELD16(25, 25, 0xd40, 0x10, 8, 1), + PIN_FIELD16(26, 26, 0xd40, 0x10, 12, 1), + PIN_FIELD16(27, 27, 0xcc0, 0x10, 0, 1), + PIN_FIELD16(28, 28, 0xcd0, 0x10, 0, 1), + PIN_FIELD16(100, 100, 0xd30, 0x10, 0, 1), + PIN_FIELD16(101, 101, 0xd30, 0x10, 4, 1), + PIN_FIELD16(102, 102, 0xd30, 0x10, 8, 1), + PIN_FIELD16(103, 103, 0xd30, 0x10, 12, 1), + PIN_FIELD16(104, 104, 0xc80, 0x10, 0, 1), + PIN_FIELD16(105, 105, 0xc90, 0x10, 0, 1), + PIN_FIELD16(119, 119, 0xe00, 0x10, 0, 1), + PIN_FIELD16(120, 120, 0xe00, 0x10, 4, 1), + PIN_FIELD16(121, 121, 0xe00, 0x10, 8, 1), + PIN_FIELD16(122, 122, 0xe10, 0x10, 0, 1), + PIN_FIELD16(123, 123, 0xe10, 0x10, 4, 1), + PIN_FIELD16(124, 124, 0xe10, 0x10, 8, 1), + PIN_FIELD16(138, 138, 0xd50, 0x10, 0, 1), + PIN_FIELD16(139, 139, 0xd60, 0x10, 0, 1), + PIN_FIELD16(140, 140, 0xd70, 0x10, 0, 1), + PIN_FIELD16(141, 141, 0xd70, 0x10, 1, 1), + PIN_FIELD16(142, 142, 0xd70, 0x10, 3, 1), + PIN_FIELD16(143, 143, 0xd70, 0x10, 5, 1), + PIN_FIELD16(144, 144, 0xd50, 0x10, 3, 1), + PIN_FIELD16(145, 145, 0xd50, 0x10, 5, 1), + PIN_FIELD16(146, 146, 0xd60, 0x10, 5, 1), + PIN_FIELD16(147, 147, 0xed0, 0x10, 4, 1), + PIN_FIELD16(148, 148, 0xed0, 0x10, 6, 1), + PIN_FIELD16(149, 149, 0xed0, 0x10, 8, 1), + PIN_FIELD16(150, 150, 0xed0, 0x10, 10, 1), + PIN_FIELD16(151, 151, 0xed0, 0x10, 12, 1), + PIN_FIELD16(152, 152, 0xd60, 0x10, 1, 1), + PIN_FIELD16(153, 153, 0xd60, 0x10, 3, 1), + PIN_FIELD16(154, 155, 0xc20, 0x10, 0, 1), + PIN_FIELD16(155, 156, 0xc20, 0x10, 0, 1), + PIN_FIELD16(156, 157, 0xc20, 0x10, 0, 1), + PIN_FIELD16(157, 158, 0xc20, 0x10, 0, 1), + PIN_FIELD16(158, 159, 0xc20, 0x10, 0, 1), + PIN_FIELD16(159, 160, 0xc20, 0x10, 0, 1), + PIN_FIELD16(160, 161, 0xc20, 0x10, 0, 1), + PIN_FIELD16(161, 161, 0xc20, 0x10, 0, 1), + PIN_FIELD16(162, 162, 0xc10, 0x10, 0, 1), + PIN_FIELD16(163, 163, 0xc00, 0x10, 0, 1), + PIN_FIELD16(164, 164, 0xd10, 0x10, 0, 1), + PIN_FIELD16(165, 165, 0xd00, 0x10, 0, 1), + PIN_FIELD16(170, 170, 0xc50, 0x10, 0, 1), + PIN_FIELD16(171, 171, 0xd20, 0x10, 0, 1), + PIN_FIELD16(172, 172, 0xd20, 0x10, 4, 1), + PIN_FIELD16(173, 173, 0xd20, 0x10, 8, 1), + PIN_FIELD16(174, 174, 0xd20, 0x10, 12, 1), + PIN_FIELD16(175, 175, 0xc40, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_r1_range[] = { + PIN_FIELD16(23, 23, 0xd40, 0x10, 1, 1), + PIN_FIELD16(24, 24, 0xd40, 0x10, 5, 1), + PIN_FIELD16(25, 25, 0xd40, 0x10, 9, 1), + PIN_FIELD16(26, 26, 0xd40, 0x10, 13, 1), + PIN_FIELD16(27, 27, 0xcc0, 0x10, 1, 1), + PIN_FIELD16(28, 28, 0xcd0, 0x10, 1, 1), + PIN_FIELD16(100, 100, 0xd30, 0x10, 1, 1), + PIN_FIELD16(101, 101, 0xd30, 0x10, 5, 1), + PIN_FIELD16(102, 102, 0xd30, 0x10, 9, 1), + PIN_FIELD16(103, 103, 0xd30, 0x10, 13, 1), + PIN_FIELD16(104, 104, 0xc80, 0x10, 1, 1), + PIN_FIELD16(105, 105, 0xc90, 0x10, 1, 1), + PIN_FIELD16(119, 119, 0xe00, 0x10, 1, 1), + PIN_FIELD16(120, 120, 0xe00, 0x10, 5, 1), + PIN_FIELD16(121, 121, 0xe00, 0x10, 9, 1), + PIN_FIELD16(122, 122, 0xe10, 0x10, 1, 1), + PIN_FIELD16(123, 123, 0xe10, 0x10, 5, 1), + PIN_FIELD16(124, 124, 0xe10, 0x10, 9, 1), + PIN_FIELD16(138, 138, 0xd50, 0x10, 1, 1), + PIN_FIELD16(139, 139, 0xd60, 0x10, 0, 1), + PIN_FIELD16(140, 140, 0xd70, 0x10, 0, 1), + PIN_FIELD16(141, 141, 0xd70, 0x10, 2, 1), + PIN_FIELD16(142, 142, 0xd70, 0x10, 4, 1), + PIN_FIELD16(143, 143, 0xd70, 0x10, 6, 1), + PIN_FIELD16(144, 144, 0xd50, 0x10, 4, 1), + PIN_FIELD16(145, 145, 0xd50, 0x10, 6, 1), + PIN_FIELD16(146, 146, 0xd60, 0x10, 6, 1), + PIN_FIELD16(147, 147, 0xed0, 0x10, 5, 1), + PIN_FIELD16(148, 148, 0xed0, 0x10, 7, 1), + PIN_FIELD16(149, 149, 0xed0, 0x10, 9, 1), + PIN_FIELD16(150, 150, 0xed0, 0x10, 11, 1), + PIN_FIELD16(151, 151, 0xed0, 0x10, 13, 1), + PIN_FIELD16(152, 152, 0xd60, 0x10, 2, 1), + PIN_FIELD16(153, 153, 0xd60, 0x10, 4, 1), + PIN_FIELD16(154, 155, 0xc20, 0x10, 1, 1), + PIN_FIELD16(155, 156, 0xc20, 0x10, 1, 1), + PIN_FIELD16(156, 157, 0xc20, 0x10, 1, 1), + PIN_FIELD16(157, 158, 0xc20, 0x10, 1, 1), + PIN_FIELD16(158, 159, 0xc20, 0x10, 1, 1), + PIN_FIELD16(159, 160, 0xc20, 0x10, 1, 1), + PIN_FIELD16(160, 161, 0xc20, 0x10, 1, 1), + PIN_FIELD16(161, 161, 0xc20, 0x10, 1, 1), + PIN_FIELD16(162, 162, 0xc10, 0x10, 1, 1), + PIN_FIELD16(163, 163, 0xc00, 0x10, 1, 1), + PIN_FIELD16(164, 164, 0xd10, 0x10, 1, 1), + PIN_FIELD16(165, 165, 0xd00, 0x10, 1, 1), + PIN_FIELD16(170, 170, 0xc50, 0x10, 1, 1), + PIN_FIELD16(171, 171, 0xd20, 0x10, 1, 1), + PIN_FIELD16(172, 172, 0xd20, 0x10, 5, 1), + PIN_FIELD16(173, 173, 0xd20, 0x10, 9, 1), + PIN_FIELD16(174, 174, 0xd20, 0x10, 13, 1), + PIN_FIELD16(175, 175, 0xc40, 0x10, 1, 1), +}; + +static const struct mtk_pin_field_calc mt6795_pin_drv_range[] = { + PINS_FIELD16(0, 4, 0xb30, 0x10, 13, 2), + PINS_FIELD16(5, 9, 0xb30, 0x10, 1, 2), + PINS_FIELD16(10, 15, 0xb30, 0x10, 5, 2), + PIN_FIELD16(16, 16, 0xb30, 0x10, 1, 2), + PINS_FIELD16(17, 19, 0xb70, 0x10, 5, 2), + PINS_FIELD16(20, 22, 0xb70, 0x10, 9, 2), + PINS_FIELD16(23, 26, 0xce0, 0x10, 8, 2), + PIN_FIELD16(27, 27, 0xcc0, 0x10, 8, 2), + PIN_FIELD16(28, 28, 0xcd0, 0x10, 8, 2), + PINS_FIELD16(29, 32, 0xb80, 0x10, 13, 2), + PIN_FIELD16(33, 33, 0xb10, 0x10, 13, 2), + PINS_FIELD16(34, 36, 0xb10, 0x10, 9, 2), + PINS_FIELD16(37, 38, 0xb10, 0x10, 5, 2), + PIN_FIELD16(39, 39, 0xb20, 0x10, 1, 2), + PIN_FIELD16(40, 40, 0xb20, 0x10, 5, 2), + PINS_FIELD16(41, 42, 0xb20, 0x10, 9, 2), + PINS_FIELD16(47, 61, 0xb00, 0x10, 9, 2), + PINS_FIELD16(62, 66, 0xb70, 0x10, 1, 2), + PINS_FIELD16(67, 67, 0xb00, 0x10, 9, 2), + PINS_FIELD16(68, 72, 0xb60, 0x10, 13, 2), + PINS_FIELD16(73, 77, 0xb40, 0x10, 13, 2), + PIN_FIELD16(78, 78, 0xb00, 0x10, 12, 3), + PINS_FIELD16(79, 91, 0xb00, 0x10, 13, 2), + PIN_FIELD16(92, 92, 0xb60, 0x10, 5, 2), + PINS_FIELD16(93, 95, 0xb60, 0x10, 1, 2), + PINS_FIELD16(96, 99, 0xb80, 0x10, 9, 2), + PINS_FIELD16(100, 103, 0xca0, 0x10, 8, 2), + PIN_FIELD16(104, 104, 0xc80, 0x10, 8, 2), + PIN_FIELD16(105, 105, 0xc90, 0x10, 8, 2), + PINS_FIELD16(106, 107, 0xb50, 0x10, 9, 2), + PINS_FIELD16(108, 112, 0xb50, 0x10, 1, 2), + PINS_FIELD16(113, 116, 0xb80, 0x10, 5, 2), + PINS_FIELD16(117, 118, 0xb90, 0x10, 1, 2), + PINS_FIELD16(119, 124, 0xb50, 0x10, 5, 2), + PIN_FIELD16(127, 127, 0xb70, 0x10, 5, 2), + PIN_FIELD16(128, 128, 0xb70, 0x10, 9, 2), + PIN_FIELD16(129, 129, 0xb40, 0x10, 9, 2), + PINS_FIELD16(130, 131, 0xb40, 0x10, 13, 2), + PINS_FIELD16(132, 135, 0xb40, 0x10, 9, 2), + PIN_FIELD16(138, 138, 0xb50, 0x10, 8, 2), + PIN_FIELD16(139, 139, 0xb60, 0x10, 8, 2), + PINS_FIELD16(140, 151, 0xb70, 0x10, 8, 2), + PINS_FIELD16(152, 153, 0xb60, 0x10, 8, 2), + PINS_FIELD16(153, 153, 0xb60, 0x10, 8, 2), + PINS_FIELD16(154, 161, 0xc20, 0x10, 8, 2), + PIN_FIELD16(162, 162, 0xc10, 0x10, 8, 2), + PIN_FIELD16(163, 163, 0xc00, 0x10, 8, 2), + PIN_FIELD16(164, 164, 0xd10, 0x10, 8, 2), + PIN_FIELD16(165, 165, 0xd00, 0x10, 8, 2), + PINS_FIELD16(166, 169, 0xb80, 0x10, 1, 2), + PINS_FIELD16(170, 173, 0xc60, 0x10, 8, 2), + PIN_FIELD16(174, 174, 0xc40, 0x10, 8, 2), + PIN_FIELD16(175, 175, 0xc50, 0x10, 8, 2), + PINS_FIELD16(176, 179, 0xb70, 0x10, 13, 2), + PIN_FIELD16(180, 180, 0xb00, 0x10, 5, 2), + PINS_FIELD16(181, 184, 0xb00, 0x10, 1, 2), + PINS_FIELD16(185, 191, 0xb60, 0x10, 9, 2), + PIN_FIELD16(192, 192, 0xb40, 0x10, 1, 2), + PINS_FIELD16(193, 194, 0xb40, 0x10, 5, 2), + PINS_FIELD16(195, 196, 0xb40, 0x10, 1, 2), +}; + +static const struct mtk_pin_field_calc mt6795_pin_sr_range[] = { + PINS_FIELD16(0, 4, 0xb30, 0x10, 15, 1), + PINS_FIELD16(5, 9, 0xb30, 0x10, 3, 1), + PINS_FIELD16(10, 15, 0xb30, 0x10, 7, 1), + PIN_FIELD16(16, 16, 0xb30, 0x10, 5, 1), + PINS_FIELD16(23, 26, 0xce0, 0x10, 12, 1), + PIN_FIELD16(27, 27, 0xcc0, 0x10, 12, 1), + PIN_FIELD16(28, 28, 0xcd0, 0x10, 12, 1), + PINS_FIELD16(29, 32, 0xb80, 0x10, 15, 1), + PIN_FIELD16(33, 33, 0xb10, 0x10, 15, 1), + PINS_FIELD16(34, 36, 0xb10, 0x10, 11, 1), + PINS_FIELD16(37, 38, 0xb10, 0x10, 7, 1), + PIN_FIELD16(39, 39, 0xb20, 0x10, 3, 1), + PIN_FIELD16(40, 40, 0xb20, 0x10, 7, 1), + PINS_FIELD16(41, 42, 0xb20, 0x10, 11, 1), + PINS_FIELD16(47, 61, 0xb00, 0x10, 11, 1), + PINS_FIELD16(62, 66, 0xb70, 0x10, 3, 1), + PINS_FIELD16(67, 67, 0xb00, 0x10, 11, 1), + PINS_FIELD16(68, 72, 0xb60, 0x10, 15, 1), + PINS_FIELD16(73, 77, 0xb40, 0x10, 15, 1), + PIN_FIELD16(78, 78, 0xb00, 0x10, 15, 3), + PINS_FIELD16(79, 91, 0xb00, 0x10, 15, 1), + PIN_FIELD16(92, 92, 0xb60, 0x10, 7, 1), + PINS_FIELD16(93, 95, 0xb60, 0x10, 3, 1), + PINS_FIELD16(96, 99, 0xb80, 0x10, 11, 1), + PINS_FIELD16(100, 103, 0xca0, 0x10, 12, 1), + PIN_FIELD16(104, 104, 0xc80, 0x10, 12, 1), + PIN_FIELD16(105, 105, 0xc90, 0x10, 12, 1), + PINS_FIELD16(106, 107, 0xb50, 0x10, 11, 1), + PINS_FIELD16(108, 112, 0xb50, 0x10, 3, 1), + PINS_FIELD16(113, 116, 0xb80, 0x10, 7, 1), + PINS_FIELD16(117, 118, 0xb90, 0x10, 3, 1), + PINS_FIELD16(119, 124, 0xb50, 0x10, 7, 1), + PIN_FIELD16(127, 127, 0xb70, 0x10, 7, 1), + PIN_FIELD16(128, 128, 0xb70, 0x10, 11, 1), + PIN_FIELD16(129, 129, 0xb40, 0x10, 11, 1), + PINS_FIELD16(130, 131, 0xb40, 0x10, 15, 1), + PINS_FIELD16(132, 135, 0xb40, 0x10, 11, 1), + PIN_FIELD16(138, 138, 0xb50, 0x10, 12, 1), + PIN_FIELD16(139, 139, 0xb60, 0x10, 12, 1), + PINS_FIELD16(140, 151, 0xb70, 0x10, 12, 1), + PINS_FIELD16(152, 153, 0xb60, 0x10, 12, 1), + PINS_FIELD16(153, 153, 0xb60, 0x10, 12, 1), + PINS_FIELD16(154, 161, 0xc20, 0x10, 12, 1), + PIN_FIELD16(162, 162, 0xc10, 0x10, 12, 1), + PIN_FIELD16(163, 163, 0xc00, 0x10, 12, 1), + PIN_FIELD16(164, 164, 0xd10, 0x10, 12, 1), + PIN_FIELD16(165, 165, 0xd00, 0x10, 12, 1), + PINS_FIELD16(166, 169, 0xb80, 0x10, 3, 1), + PINS_FIELD16(170, 173, 0xc60, 0x10, 12, 1), + PIN_FIELD16(174, 174, 0xc40, 0x10, 12, 1), + PIN_FIELD16(175, 175, 0xc50, 0x10, 12, 1), + PINS_FIELD16(176, 179, 0xb70, 0x10, 15, 1), + PIN_FIELD16(180, 180, 0xb00, 0x10, 7, 1), + PINS_FIELD16(181, 184, 0xb00, 0x10, 3, 1), + PINS_FIELD16(185, 191, 0xb60, 0x10, 11, 1), + PIN_FIELD16(192, 192, 0xb40, 0x10, 3, 1), + PINS_FIELD16(193, 194, 0xb40, 0x10, 7, 1), + PINS_FIELD16(195, 196, 0xb40, 0x10, 3, 1), +}; + +static const struct mtk_pin_reg_calc mt6795_reg_cals[PINCTRL_PIN_REG_MAX] = { + [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt6795_pin_mode_range), + [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt6795_pin_dir_range), + [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt6795_pin_di_range), + [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt6795_pin_do_range), + [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt6795_pin_sr_range), + [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt6795_pin_smt_range), + [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt6795_pin_drv_range), + [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt6795_pin_pupd_range), + [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt6795_pin_r0_range), + [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt6795_pin_r1_range), + [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt6795_pin_ies_range), + [PINCTRL_PIN_REG_PULLEN] = MTK_RANGE(mt6795_pin_pullen_range), + [PINCTRL_PIN_REG_PULLSEL] = MTK_RANGE(mt6795_pin_pullsel_range), +}; + +static const struct mtk_eint_hw mt6795_eint_hw = { + .port_mask = 7, + .ports = 7, + .ap_num = 224, + .db_cnt = 32, +}; + +static const unsigned int mt6795_pull_type[] = { + MTK_PULL_PULLSEL_TYPE,/*0*/ MTK_PULL_PULLSEL_TYPE,/*1*/ + MTK_PULL_PULLSEL_TYPE,/*2*/ MTK_PULL_PULLSEL_TYPE,/*3*/ + MTK_PULL_PULLSEL_TYPE,/*4*/ MTK_PULL_PULLSEL_TYPE,/*5*/ + MTK_PULL_PULLSEL_TYPE,/*6*/ MTK_PULL_PULLSEL_TYPE,/*7*/ + MTK_PULL_PULLSEL_TYPE,/*8*/ MTK_PULL_PULLSEL_TYPE,/*9*/ + MTK_PULL_PULLSEL_TYPE,/*10*/ MTK_PULL_PULLSEL_TYPE,/*11*/ + MTK_PULL_PULLSEL_TYPE,/*12*/ MTK_PULL_PULLSEL_TYPE,/*13*/ + MTK_PULL_PULLSEL_TYPE,/*14*/ MTK_PULL_PULLSEL_TYPE,/*15*/ + MTK_PULL_PULLSEL_TYPE,/*16*/ MTK_PULL_PULLSEL_TYPE,/*17*/ + MTK_PULL_PULLSEL_TYPE,/*18*/ MTK_PULL_PULLSEL_TYPE,/*19*/ + MTK_PULL_PULLSEL_TYPE,/*20*/ MTK_PULL_PULLSEL_TYPE,/*21*/ + MTK_PULL_PULLSEL_TYPE,/*22*/ MTK_PULL_PUPD_R1R0_TYPE,/*23*/ + MTK_PULL_PUPD_R1R0_TYPE,/*24*/ MTK_PULL_PUPD_R1R0_TYPE,/*25*/ + MTK_PULL_PUPD_R1R0_TYPE,/*26*/ MTK_PULL_PUPD_R1R0_TYPE,/*27*/ + MTK_PULL_PUPD_R1R0_TYPE,/*28*/ MTK_PULL_PULLSEL_TYPE,/*29*/ + MTK_PULL_PULLSEL_TYPE,/*30*/ MTK_PULL_PULLSEL_TYPE,/*31*/ + MTK_PULL_PULLSEL_TYPE,/*32*/ MTK_PULL_PULLSEL_TYPE,/*33*/ + MTK_PULL_PULLSEL_TYPE,/*34*/ MTK_PULL_PULLSEL_TYPE,/*35*/ + MTK_PULL_PULLSEL_TYPE,/*36*/ MTK_PULL_PULLSEL_TYPE,/*37*/ + MTK_PULL_PULLSEL_TYPE,/*38*/ MTK_PULL_PULLSEL_TYPE,/*39*/ + MTK_PULL_PULLSEL_TYPE,/*40*/ MTK_PULL_PULLSEL_TYPE,/*41*/ + MTK_PULL_PULLSEL_TYPE,/*42*/ MTK_PULL_PULLSEL_TYPE,/*43*/ + MTK_PULL_PULLSEL_TYPE,/*44*/ MTK_PULL_PULLSEL_TYPE,/*45*/ + MTK_PULL_PULLSEL_TYPE,/*46*/ MTK_PULL_PULLSEL_TYPE,/*47*/ + MTK_PULL_PULLSEL_TYPE,/*48*/ MTK_PULL_PULLSEL_TYPE,/*49*/ + MTK_PULL_PULLSEL_TYPE,/*50*/ MTK_PULL_PULLSEL_TYPE,/*51*/ + MTK_PULL_PULLSEL_TYPE,/*52*/ MTK_PULL_PULLSEL_TYPE,/*53*/ + MTK_PULL_PULLSEL_TYPE,/*54*/ MTK_PULL_PULLSEL_TYPE,/*55*/ + MTK_PULL_PULLSEL_TYPE,/*56*/ MTK_PULL_PULLSEL_TYPE,/*57*/ + MTK_PULL_PULLSEL_TYPE,/*58*/ MTK_PULL_PULLSEL_TYPE,/*59*/ + MTK_PULL_PULLSEL_TYPE,/*60*/ MTK_PULL_PULLSEL_TYPE,/*61*/ + MTK_PULL_PULLSEL_TYPE,/*62*/ MTK_PULL_PULLSEL_TYPE,/*63*/ + MTK_PULL_PULLSEL_TYPE,/*64*/ MTK_PULL_PULLSEL_TYPE,/*65*/ + MTK_PULL_PULLSEL_TYPE,/*66*/ MTK_PULL_PUPD_R1R0_TYPE,/*67*/ + MTK_PULL_PUPD_R1R0_TYPE,/*68*/ MTK_PULL_PUPD_R1R0_TYPE,/*69*/ + MTK_PULL_PUPD_R1R0_TYPE,/*70*/ MTK_PULL_PUPD_R1R0_TYPE,/*71*/ + MTK_PULL_PUPD_R1R0_TYPE,/*72*/ MTK_PULL_PUPD_R1R0_TYPE,/*73*/ + MTK_PULL_PUPD_R1R0_TYPE,/*74*/ MTK_PULL_PUPD_R1R0_TYPE,/*75*/ + MTK_PULL_PUPD_R1R0_TYPE,/*76*/ MTK_PULL_PUPD_R1R0_TYPE,/*77*/ + MTK_PULL_PUPD_R1R0_TYPE,/*78*/ MTK_PULL_PUPD_R1R0_TYPE,/*79*/ + MTK_PULL_PUPD_R1R0_TYPE,/*80*/ MTK_PULL_PUPD_R1R0_TYPE,/*81*/ + MTK_PULL_PUPD_R1R0_TYPE,/*82*/ MTK_PULL_PULLSEL_TYPE,/*83*/ + MTK_PULL_PUPD_R1R0_TYPE,/*84*/ MTK_PULL_PUPD_R1R0_TYPE,/*85*/ + MTK_PULL_PUPD_R1R0_TYPE,/*86*/ MTK_PULL_PUPD_R1R0_TYPE,/*87*/ + MTK_PULL_PUPD_R1R0_TYPE,/*88*/ MTK_PULL_PUPD_R1R0_TYPE,/*89*/ + MTK_PULL_PULLSEL_TYPE,/*90*/ MTK_PULL_PULLSEL_TYPE,/*91*/ + MTK_PULL_PULLSEL_TYPE,/*92*/ MTK_PULL_PULLSEL_TYPE,/*93*/ + MTK_PULL_PULLSEL_TYPE,/*94*/ MTK_PULL_PULLSEL_TYPE,/*95*/ + MTK_PULL_PULLSEL_TYPE,/*96*/ MTK_PULL_PULLSEL_TYPE,/*97*/ + MTK_PULL_PULLSEL_TYPE,/*98*/ MTK_PULL_PULLSEL_TYPE,/*99*/ + MTK_PULL_PUPD_R1R0_TYPE,/*100*/ MTK_PULL_PUPD_R1R0_TYPE,/*101*/ + MTK_PULL_PUPD_R1R0_TYPE,/*102*/ MTK_PULL_PUPD_R1R0_TYPE,/*103*/ + MTK_PULL_PUPD_R1R0_TYPE,/*104*/ MTK_PULL_PUPD_R1R0_TYPE,/*105*/ + MTK_PULL_PULLSEL_TYPE,/*106*/ MTK_PULL_PULLSEL_TYPE,/*107*/ + MTK_PULL_PULLSEL_TYPE,/*108*/ MTK_PULL_PULLSEL_TYPE,/*109*/ + MTK_PULL_PULLSEL_TYPE,/*110*/ MTK_PULL_PULLSEL_TYPE,/*111*/ + MTK_PULL_PULLSEL_TYPE,/*112*/ MTK_PULL_PULLSEL_TYPE,/*113*/ + MTK_PULL_PULLSEL_TYPE,/*114*/ MTK_PULL_PULLSEL_TYPE,/*115*/ + MTK_PULL_PULLSEL_TYPE,/*116*/ MTK_PULL_PULLSEL_TYPE,/*117*/ + MTK_PULL_PULLSEL_TYPE,/*118*/ MTK_PULL_PUPD_R1R0_TYPE,/*119*/ + MTK_PULL_PUPD_R1R0_TYPE,/*120*/ MTK_PULL_PUPD_R1R0_TYPE,/*121*/ + MTK_PULL_PUPD_R1R0_TYPE,/*122*/ MTK_PULL_PUPD_R1R0_TYPE,/*123*/ + MTK_PULL_PUPD_R1R0_TYPE,/*124*/ MTK_PULL_PULLSEL_TYPE,/*125*/ + MTK_PULL_PULLSEL_TYPE,/*126*/ MTK_PULL_PULLSEL_TYPE,/*127*/ + MTK_PULL_PULLSEL_TYPE,/*128*/ MTK_PULL_PULLSEL_TYPE,/*129*/ + MTK_PULL_PULLSEL_TYPE,/*130*/ MTK_PULL_PULLSEL_TYPE,/*131*/ + MTK_PULL_PULLSEL_TYPE,/*132*/ MTK_PULL_PULLSEL_TYPE,/*133*/ + MTK_PULL_PULLSEL_TYPE,/*134*/ MTK_PULL_PULLSEL_TYPE,/*135*/ + MTK_PULL_PULLSEL_TYPE,/*136*/ MTK_PULL_PULLSEL_TYPE,/*137*/ + MTK_PULL_PUPD_R1R0_TYPE,/*138*/ MTK_PULL_PUPD_R1R0_TYPE,/*139*/ + MTK_PULL_PUPD_R1R0_TYPE,/*140*/ MTK_PULL_PUPD_R1R0_TYPE,/*141*/ + MTK_PULL_PUPD_R1R0_TYPE,/*142*/ MTK_PULL_PUPD_R1R0_TYPE,/*143*/ + MTK_PULL_PUPD_R1R0_TYPE,/*144*/ MTK_PULL_PUPD_R1R0_TYPE,/*145*/ + MTK_PULL_PUPD_R1R0_TYPE,/*146*/ MTK_PULL_PUPD_R1R0_TYPE,/*147*/ + MTK_PULL_PUPD_R1R0_TYPE,/*148*/ MTK_PULL_PUPD_R1R0_TYPE,/*149*/ + MTK_PULL_PUPD_R1R0_TYPE,/*150*/ MTK_PULL_PUPD_R1R0_TYPE,/*151*/ + MTK_PULL_PUPD_R1R0_TYPE,/*152*/ MTK_PULL_PUPD_R1R0_TYPE,/*153*/ + MTK_PULL_PUPD_R1R0_TYPE,/*154*/ MTK_PULL_PUPD_R1R0_TYPE,/*155*/ + MTK_PULL_PUPD_R1R0_TYPE,/*156*/ MTK_PULL_PUPD_R1R0_TYPE,/*157*/ + MTK_PULL_PUPD_R1R0_TYPE,/*158*/ MTK_PULL_PUPD_R1R0_TYPE,/*159*/ + MTK_PULL_PUPD_R1R0_TYPE,/*160*/ MTK_PULL_PUPD_R1R0_TYPE,/*161*/ + MTK_PULL_PUPD_R1R0_TYPE,/*162*/ MTK_PULL_PUPD_R1R0_TYPE,/*163*/ + MTK_PULL_PUPD_R1R0_TYPE,/*164*/ MTK_PULL_PUPD_R1R0_TYPE,/*165*/ + MTK_PULL_PULLSEL_TYPE,/*166*/ MTK_PULL_PULLSEL_TYPE,/*167*/ + MTK_PULL_PULLSEL_TYPE,/*168*/ MTK_PULL_PULLSEL_TYPE,/*169*/ + MTK_PULL_PUPD_R1R0_TYPE,/*170*/ MTK_PULL_PUPD_R1R0_TYPE,/*171*/ + MTK_PULL_PUPD_R1R0_TYPE,/*172*/ MTK_PULL_PUPD_R1R0_TYPE,/*173*/ + MTK_PULL_PUPD_R1R0_TYPE,/*174*/ MTK_PULL_PUPD_R1R0_TYPE,/*175*/ + MTK_PULL_PULLSEL_TYPE,/*176*/ MTK_PULL_PULLSEL_TYPE,/*177*/ + MTK_PULL_PULLSEL_TYPE,/*178*/ MTK_PULL_PULLSEL_TYPE,/*179*/ + MTK_PULL_PULLSEL_TYPE,/*180*/ MTK_PULL_PULLSEL_TYPE,/*181*/ + MTK_PULL_PULLSEL_TYPE,/*182*/ MTK_PULL_PULLSEL_TYPE,/*183*/ + MTK_PULL_PULLSEL_TYPE,/*184*/ MTK_PULL_PULLSEL_TYPE,/*185*/ + MTK_PULL_PULLSEL_TYPE,/*186*/ MTK_PULL_PULLSEL_TYPE,/*187*/ + MTK_PULL_PULLSEL_TYPE,/*188*/ MTK_PULL_PULLSEL_TYPE,/*189*/ + MTK_PULL_PULLSEL_TYPE,/*190*/ MTK_PULL_PULLSEL_TYPE,/*191*/ + MTK_PULL_PULLSEL_TYPE,/*192*/ MTK_PULL_PULLSEL_TYPE,/*193*/ + MTK_PULL_PULLSEL_TYPE,/*194*/ MTK_PULL_PULLSEL_TYPE,/*195*/ + MTK_PULL_PULLSEL_TYPE,/*196*/ +}; + +static const struct mtk_pin_soc mt6795_data = { + .reg_cal = mt6795_reg_cals, + .pins = mtk_pins_mt6795, + .npins = ARRAY_SIZE(mtk_pins_mt6795), + .ngrps = ARRAY_SIZE(mtk_pins_mt6795), + .nfuncs = 8, + .eint_hw = &mt6795_eint_hw, + .gpio_m = 0, + .base_names = mtk_default_register_base_names, + .nbase_names = ARRAY_SIZE(mtk_default_register_base_names), + .pull_type = mt6795_pull_type, + .bias_disable_set = mtk_pinconf_bias_disable_set_rev1, + .bias_disable_get = mtk_pinconf_bias_disable_get_rev1, + .bias_set = mtk_pinconf_bias_set_rev1, + .bias_get = mtk_pinconf_bias_get_rev1, + .bias_set_combo = mtk_pinconf_bias_set_combo, + .bias_get_combo = mtk_pinconf_bias_get_combo, + .drive_set = mtk_pinconf_drive_set_rev1, + .drive_get = mtk_pinconf_drive_get_rev1, + .adv_pull_get = mtk_pinconf_adv_pull_get, + .adv_pull_set = mtk_pinconf_adv_pull_set, +}; + +static const struct of_device_id mt6795_pctrl_match[] = { + { .compatible = "mediatek,mt6795-pinctrl", .data = &mt6795_data }, + { } +}; + +static struct platform_driver mt6795_pinctrl_driver = { + .driver = { + .name = "mt6795-pinctrl", + .of_match_table = mt6795_pctrl_match, + .pm = &mtk_paris_pinctrl_pm_ops, + }, + .probe = mtk_paris_pinctrl_probe, +}; + +static int __init mtk_pinctrl_init(void) +{ + return platform_driver_register(&mt6795_pinctrl_driver); +} +arch_initcall(mtk_pinctrl_init); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h new file mode 100644 index 000000000000..f639bd859116 --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6795.h @@ -0,0 +1,1698 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Collabora Ltd. + * Author: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#ifndef __PINCTRL_MTK_MT6795_H +#define __PINCTRL_MTK_MT6795_H + +#include "pinctrl-paris.h" + +static const struct mtk_pin_desc mtk_pins_mt6795[] = { + MTK_PIN( + 0, "GPIO0", + MTK_EINT_FUNCTION(0, 0), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO0"), + MTK_FUNCTION(1, "IRDA_PDN"), + MTK_FUNCTION(2, "I2S1_WS"), + MTK_FUNCTION(4, "TDD_TMS"), + MTK_FUNCTION(5, "UTXD0") + ), + MTK_PIN( + 1, "GPIO1", + MTK_EINT_FUNCTION(0, 1), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO1"), + MTK_FUNCTION(1, "IRDA_RXD"), + MTK_FUNCTION(2, "I2S1_BCK"), + MTK_FUNCTION(3, "SDA4"), + MTK_FUNCTION(4, "TDD_TCK"), + MTK_FUNCTION(5, "URXD0") + ), + MTK_PIN( + 2, "GPIO2", + MTK_EINT_FUNCTION(0, 2), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO2"), + MTK_FUNCTION(1, "IRDA_TXD"), + MTK_FUNCTION(2, "I2S1_MCK"), + MTK_FUNCTION(3, "SCL4"), + MTK_FUNCTION(4, "TDD_TDI"), + MTK_FUNCTION(5, "UTXD3") + ), + MTK_PIN( + 3, "GPIO3", + MTK_EINT_FUNCTION(0, 3), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO3"), + MTK_FUNCTION(1, "DSI1_TE"), + MTK_FUNCTION(2, "I2S1_DO_1"), + MTK_FUNCTION(3, "SDA3"), + MTK_FUNCTION(4, "TDD_TDO"), + MTK_FUNCTION(5, "URXD3") + ), + MTK_PIN( + 4, "GPIO4", + MTK_EINT_FUNCTION(0, 4), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO4"), + MTK_FUNCTION(1, "DISP_PWM1"), + MTK_FUNCTION(2, "I2S1_DO_2"), + MTK_FUNCTION(3, "SCL3"), + MTK_FUNCTION(4, "TDD_TRSTN") + ), + MTK_PIN( + 5, "GPIO5", + MTK_EINT_FUNCTION(0, 5), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO5"), + MTK_FUNCTION(1, "PCM1_CLK"), + MTK_FUNCTION(2, "I2S2_WS"), + MTK_FUNCTION(3, "SPI_CK_3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TMS"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TMS") + ), + MTK_PIN( + 6, "GPIO6", + MTK_EINT_FUNCTION(0, 6), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO6"), + MTK_FUNCTION(1, "PCM1_SYNC"), + MTK_FUNCTION(2, "I2S2_BCK"), + MTK_FUNCTION(3, "SPI_MI_3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TCK"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TCK") + ), + MTK_PIN( + 7, "GPIO7", + MTK_EINT_FUNCTION(0, 7), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO7"), + MTK_FUNCTION(1, "PCM1_DI"), + MTK_FUNCTION(2, "I2S2_DI_1"), + MTK_FUNCTION(3, "SPI_MO_3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDI"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TDI") + ), + MTK_PIN( + 8, "GPIO8", + MTK_EINT_FUNCTION(0, 8), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO8"), + MTK_FUNCTION(1, "PCM1_DO"), + MTK_FUNCTION(2, "I2S2_DI_2"), + MTK_FUNCTION(3, "SPI_CS_3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDO"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TDO") + ), + MTK_PIN( + 9, "GPIO9", + MTK_EINT_FUNCTION(0, 9), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO9"), + MTK_FUNCTION(1, "USB_DRVVBUS"), + MTK_FUNCTION(2, "I2S2_MCK"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TRST"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TRST") + ), + MTK_PIN( + 10, "GPIO10", + MTK_EINT_FUNCTION(0, 10), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO10"), + MTK_FUNCTION(2, "I2S0_WS") + ), + MTK_PIN( + 11, "GPIO11", + MTK_EINT_FUNCTION(0, 11), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO11"), + MTK_FUNCTION(2, "I2S0_BCK") + ), + MTK_PIN( + 12, "GPIO12", + MTK_EINT_FUNCTION(0, 12), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO12"), + MTK_FUNCTION(2, "I2S0_MCK") + ), + MTK_PIN( + 13, "GPIO13", + MTK_EINT_FUNCTION(0, 13), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO13"), + MTK_FUNCTION(2, "I2S0_DO") + ), + MTK_PIN( + 14, "GPIO14", + MTK_EINT_FUNCTION(0, 14), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO14"), + MTK_FUNCTION(2, "I2S0_DI"), + MTK_FUNCTION(3, "DISP_PWM1"), + MTK_FUNCTION(4, "PWM4"), + MTK_FUNCTION(5, "IRDA_RXD"), + MTK_FUNCTION(6, "I2S1_BCK") + ), + MTK_PIN( + 15, "GPIO15", + MTK_EINT_FUNCTION(0, 15), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO15"), + MTK_FUNCTION(2, "DSI1_TE"), + MTK_FUNCTION(3, "USB_DRVVBUS"), + MTK_FUNCTION(4, "PWM5"), + MTK_FUNCTION(5, "IRDA_TXD"), + MTK_FUNCTION(6, "I2S1_MCK") + ), + MTK_PIN( + 16, "GPIO16", + MTK_EINT_FUNCTION(0, 16), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO16"), + MTK_FUNCTION(1, "IDDIG"), + MTK_FUNCTION(2, "FLASH"), + MTK_FUNCTION(3, "EXT_FRAME_SYNC"), + MTK_FUNCTION(4, "PWM5") + ), + MTK_PIN( + 17, "GPIO17", + MTK_EINT_FUNCTION(0, 17), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO17"), + MTK_FUNCTION(1, "SIM1_SCLK"), + MTK_FUNCTION(2, "SIM2_SCLK") + ), + MTK_PIN( + 18, "GPIO18", + MTK_EINT_FUNCTION(0, 18), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO18"), + MTK_FUNCTION(1, "SIM1_SRST"), + MTK_FUNCTION(2, "SIM2_SRST") + ), + MTK_PIN( + 19, "GPIO19", + MTK_EINT_FUNCTION(0, 19), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO19"), + MTK_FUNCTION(1, "SIM1_SDAT"), + MTK_FUNCTION(2, "SIM2_SDAT") + ), + MTK_PIN( + 20, "GPIO20", + MTK_EINT_FUNCTION(0, 20), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO20"), + MTK_FUNCTION(1, "SIM2_SCLK"), + MTK_FUNCTION(2, "SIM1_SCLK") + ), + MTK_PIN( + 21, "GPIO21", + MTK_EINT_FUNCTION(0, 21), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO21"), + MTK_FUNCTION(1, "SIM2_SRST"), + MTK_FUNCTION(2, "SIM1_SRST") + ), + MTK_PIN( + 22, "GPIO22", + MTK_EINT_FUNCTION(0, 22), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO22"), + MTK_FUNCTION(1, "SIM2_SDAT"), + MTK_FUNCTION(2, "SIM1_SDAT") + ), + MTK_PIN( + 23, "GPIO23", + MTK_EINT_FUNCTION(0, 23), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO23"), + MTK_FUNCTION(1, "MSDC3_DAT0") + ), + MTK_PIN( + 24, "GPIO24", + MTK_EINT_FUNCTION(0, 24), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO24"), + MTK_FUNCTION(1, "MSDC3_DAT1") + ), + MTK_PIN( + 25, "GPIO25", + MTK_EINT_FUNCTION(0, 25), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO25"), + MTK_FUNCTION(1, "MSDC3_DAT2") + ), + MTK_PIN( + 26, "GPIO26", + MTK_EINT_FUNCTION(0, 26), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO26"), + MTK_FUNCTION(1, "MSDC3_DAT3") + ), + MTK_PIN( + 27, "GPIO27", + MTK_EINT_FUNCTION(0, 27), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO27"), + MTK_FUNCTION(1, "MSDC3_CLK") + ), + MTK_PIN( + 28, "GPIO28", + MTK_EINT_FUNCTION(0, 28), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO28"), + MTK_FUNCTION(1, "MSDC3_CMD") + ), + MTK_PIN( + 29, "GPIO29", + MTK_EINT_FUNCTION(0, 29), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO29"), + MTK_FUNCTION(1, "PTA_RXD"), + MTK_FUNCTION(2, "UCTS2") + ), + MTK_PIN( + 30, "GPIO30", + MTK_EINT_FUNCTION(0, 30), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO30"), + MTK_FUNCTION(1, "PTA_TXD"), + MTK_FUNCTION(2, "URTS2") + ), + MTK_PIN( + 31, "GPIO31", + MTK_EINT_FUNCTION(0, 31), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO31"), + MTK_FUNCTION(1, "URXD2"), + MTK_FUNCTION(2, "UTXD2") + ), + MTK_PIN( + 32, "GPIO32", + MTK_EINT_FUNCTION(0, 32), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO32"), + MTK_FUNCTION(1, "UTXD2"), + MTK_FUNCTION(2, "URXD2") + ), + MTK_PIN( + 33, "GPIO33", + MTK_EINT_FUNCTION(0, 33), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO33"), + MTK_FUNCTION(1, "MRG_CLK"), + MTK_FUNCTION(2, "PCM0_CLK") + ), + MTK_PIN( + 34, "GPIO34", + MTK_EINT_FUNCTION(0, 34), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO34"), + MTK_FUNCTION(1, "MRG_DI"), + MTK_FUNCTION(2, "PCM0_DI") + ), + MTK_PIN( + 35, "GPIO35", + MTK_EINT_FUNCTION(0, 35), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO35"), + MTK_FUNCTION(1, "MRG_DO"), + MTK_FUNCTION(2, "PCM0_DO") + ), + MTK_PIN( + 36, "GPIO36", + MTK_EINT_FUNCTION(0, 36), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO36"), + MTK_FUNCTION(1, "MRG_SYNC"), + MTK_FUNCTION(2, "PCM0_SYNC") + ), + MTK_PIN( + 37, "GPIO37", + MTK_EINT_FUNCTION(0, 37), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO37"), + MTK_FUNCTION(1, "GPS_SYNC") + ), + MTK_PIN( + 38, "GPIO38", + MTK_EINT_FUNCTION(0, 38), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO38"), + MTK_FUNCTION(1, "DAIRSTB") + ), + MTK_PIN( + 39, "GPIO39", + MTK_EINT_FUNCTION(0, 39), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO39"), + MTK_FUNCTION(1, "CM2MCLK") + ), + MTK_PIN( + 40, "GPIO40", + MTK_EINT_FUNCTION(0, 40), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO40"), + MTK_FUNCTION(1, "CM3MCLK"), + MTK_FUNCTION(2, "IRDA_PDN"), + MTK_FUNCTION(3, "PWM6"), + MTK_FUNCTION(4, "I2S1_WS") + ), + MTK_PIN( + 41, "GPIO41", + MTK_EINT_FUNCTION(0, 41), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO41"), + MTK_FUNCTION(1, "CMPCLK"), + MTK_FUNCTION(2, "CMCSK"), + MTK_FUNCTION(3, "FLASH") + ), + MTK_PIN( + 42, "GPIO42", + MTK_EINT_FUNCTION(0, 42), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO42"), + MTK_FUNCTION(1, "CMMCLK") + ), + MTK_PIN( + 43, "GPIO43", + MTK_EINT_FUNCTION(0, 43), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO43"), + MTK_FUNCTION(1, "SDA2") + ), + MTK_PIN( + 44, "GPIO44", + MTK_EINT_FUNCTION(0, 44), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO44"), + MTK_FUNCTION(1, "SCL2") + ), + MTK_PIN( + 45, "GPIO45", + MTK_EINT_FUNCTION(0, 45), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO45"), + MTK_FUNCTION(1, "SDA0") + ), + MTK_PIN( + 46, "GPIO46", + MTK_EINT_FUNCTION(0, 46), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO46"), + MTK_FUNCTION(1, "SCL0") + ), + MTK_PIN( + 47, "GPIO47", + MTK_EINT_FUNCTION(0, 47), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO47"), + MTK_FUNCTION(1, "BPI_BUS0") + ), + MTK_PIN( + 48, "GPIO48", + MTK_EINT_FUNCTION(0, 48), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO48"), + MTK_FUNCTION(1, "BPI_BUS1") + ), + MTK_PIN( + 49, "GPIO49", + MTK_EINT_FUNCTION(0, 49), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO49"), + MTK_FUNCTION(1, "BPI_BUS2") + ), + MTK_PIN( + 50, "GPIO50", + MTK_EINT_FUNCTION(0, 50), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO50"), + MTK_FUNCTION(1, "BPI_BUS3") + ), + MTK_PIN( + 51, "GPIO51", + MTK_EINT_FUNCTION(0, 51), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO51"), + MTK_FUNCTION(1, "BPI_BUS4") + ), + MTK_PIN( + 52, "GPIO52", + MTK_EINT_FUNCTION(0, 52), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO52"), + MTK_FUNCTION(1, "BPI_BUS5") + ), + MTK_PIN( + 53, "GPIO53", + MTK_EINT_FUNCTION(0, 53), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO53"), + MTK_FUNCTION(1, "BPI_BUS6") + ), + MTK_PIN( + 54, "GPIO54", + MTK_EINT_FUNCTION(0, 54), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO54"), + MTK_FUNCTION(1, "BPI_BUS7") + ), + MTK_PIN( + 55, "GPIO55", + MTK_EINT_FUNCTION(0, 55), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO55"), + MTK_FUNCTION(1, "BPI_BUS8") + ), + MTK_PIN( + 56, "GPIO56", + MTK_EINT_FUNCTION(0, 56), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO56"), + MTK_FUNCTION(1, "BPI_BUS9") + ), + MTK_PIN( + 57, "GPIO57", + MTK_EINT_FUNCTION(0, 57), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO57"), + MTK_FUNCTION(1, "BPI_BUS10") + ), + MTK_PIN( + 58, "GPIO58", + MTK_EINT_FUNCTION(0, 58), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO58"), + MTK_FUNCTION(1, "BPI_BUS11") + ), + MTK_PIN( + 59, "GPIO59", + MTK_EINT_FUNCTION(0, 59), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO59"), + MTK_FUNCTION(1, "BPI_BUS12") + ), + MTK_PIN( + 60, "GPIO60", + MTK_EINT_FUNCTION(0, 60), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO60"), + MTK_FUNCTION(1, "BPI_BUS13") + ), + MTK_PIN( + 61, "GPIO61", + MTK_EINT_FUNCTION(0, 61), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO61"), + MTK_FUNCTION(1, "BPI_BUS14") + ), + MTK_PIN( + 62, "GPIO62", + MTK_EINT_FUNCTION(0, 62), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO62"), + MTK_FUNCTION(1, "RFIC1_BSI_CK") + ), + MTK_PIN( + 63, "GPIO63", + MTK_EINT_FUNCTION(0, 63), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO63"), + MTK_FUNCTION(1, "RFIC1_BSI_D0") + ), + MTK_PIN( + 64, "GPIO64", + MTK_EINT_FUNCTION(0, 64), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO64"), + MTK_FUNCTION(1, "RFIC1_BSI_D1") + ), + MTK_PIN( + 65, "GPIO65", + MTK_EINT_FUNCTION(0, 65), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO65"), + MTK_FUNCTION(1, "RFIC1_BSI_D2") + ), + MTK_PIN( + 66, "GPIO66", + MTK_EINT_FUNCTION(0, 66), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO66"), + MTK_FUNCTION(1, "RFIC1_BSI_CS") + ), + MTK_PIN( + 67, "GPIO67", + MTK_EINT_FUNCTION(0, 67), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO67"), + MTK_FUNCTION(1, "TD_TXBPI") + ), + MTK_PIN( + 68, "GPIO68", + MTK_EINT_FUNCTION(0, 68), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO68"), + MTK_FUNCTION(1, "RFIC0_BSI_CK") + ), + MTK_PIN( + 69, "GPIO69", + MTK_EINT_FUNCTION(0, 69), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO69"), + MTK_FUNCTION(1, "RFIC0_BSI_D0") + ), + MTK_PIN( + 70, "GPIO70", + MTK_EINT_FUNCTION(0, 70), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO70"), + MTK_FUNCTION(1, "RFIC0_BSI_D1") + ), + MTK_PIN( + 71, "GPIO71", + MTK_EINT_FUNCTION(0, 71), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO71"), + MTK_FUNCTION(1, "RFIC0_BSI_D2") + ), + MTK_PIN( + 72, "GPIO72", + MTK_EINT_FUNCTION(0, 72), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO72"), + MTK_FUNCTION(1, "RFIC0_BSI_CS") + ), + MTK_PIN( + 73, "GPIO73", + MTK_EINT_FUNCTION(0, 73), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO73"), + MTK_FUNCTION(1, "MISC_BSI_DO") + ), + MTK_PIN( + 74, "GPIO74", + MTK_EINT_FUNCTION(0, 74), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO74"), + MTK_FUNCTION(1, "MISC_BSI_CK") + ), + MTK_PIN( + 75, "GPIO75", + MTK_EINT_FUNCTION(0, 75), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO75"), + MTK_FUNCTION(1, "MISC_BSI_CS0B"), + MTK_FUNCTION(2, "MIPI1_SCLK") + ), + MTK_PIN( + 76, "GPIO76", + MTK_EINT_FUNCTION(0, 76), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO76"), + MTK_FUNCTION(1, "MISC_BSI_CS1B") + ), + MTK_PIN( + 77, "GPIO77", + MTK_EINT_FUNCTION(0, 77), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO77"), + MTK_FUNCTION(1, "MISC_BSI_DI"), + MTK_FUNCTION(2, "MIPI1_SDATA") + ), + MTK_PIN( + 78, "GPIO78", + MTK_EINT_FUNCTION(0, 78), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO78"), + MTK_FUNCTION(1, "LTE_TXBPI") + ), + MTK_PIN( + 79, "GPIO79", + MTK_EINT_FUNCTION(0, 79), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO79"), + MTK_FUNCTION(1, "BPI_BUS15") + ), + MTK_PIN( + 80, "GPIO80", + MTK_EINT_FUNCTION(0, 80), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO80"), + MTK_FUNCTION(1, "BPI_BUS16") + ), + MTK_PIN( + 81, "GPIO81", + MTK_EINT_FUNCTION(0, 81), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO81"), + MTK_FUNCTION(1, "BPI_BUS17") + ), + MTK_PIN( + 82, "GPIO82", + MTK_EINT_FUNCTION(0, 82), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO82"), + MTK_FUNCTION(1, "BPI_BUS18") + ), + MTK_PIN( + 83, "GPIO83", + MTK_EINT_FUNCTION(0, 83), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO83"), + MTK_FUNCTION(1, "BPI_BUS19") + ), + MTK_PIN( + 84, "GPIO84", + MTK_EINT_FUNCTION(0, 84), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO84"), + MTK_FUNCTION(1, "BPI_BUS20") + ), + MTK_PIN( + 85, "GPIO85", + MTK_EINT_FUNCTION(0, 85), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO85"), + MTK_FUNCTION(1, "BPI_BUS21") + ), + MTK_PIN( + 86, "GPIO86", + MTK_EINT_FUNCTION(0, 86), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO86"), + MTK_FUNCTION(1, "BPI_BUS22") + ), + MTK_PIN( + 87, "GPIO87", + MTK_EINT_FUNCTION(0, 87), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO87"), + MTK_FUNCTION(1, "BPI_BUS23") + ), + MTK_PIN( + 88, "GPIO88", + MTK_EINT_FUNCTION(0, 88), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO88"), + MTK_FUNCTION(1, "BPI_BUS24") + ), + MTK_PIN( + 89, "GPIO89", + MTK_EINT_FUNCTION(0, 89), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO89"), + MTK_FUNCTION(1, "BPI_BUS25") + ), + MTK_PIN( + 90, "GPIO90", + MTK_EINT_FUNCTION(0, 90), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO90"), + MTK_FUNCTION(1, "BPI_BUS26") + ), + MTK_PIN( + 91, "GPIO91", + MTK_EINT_FUNCTION(0, 91), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO91"), + MTK_FUNCTION(1, "BPI_BUS27") + ), + MTK_PIN( + 92, "GPIO92", + MTK_EINT_FUNCTION(0, 92), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO92"), + MTK_FUNCTION(1, "PCM1_CLK"), + MTK_FUNCTION(2, "I2S0_BCK"), + MTK_FUNCTION(3, "NLD6") + ), + MTK_PIN( + 93, "GPIO93", + MTK_EINT_FUNCTION(0, 93), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO93"), + MTK_FUNCTION(1, "PCM1_SYNC"), + MTK_FUNCTION(2, "I2S0_WS"), + MTK_FUNCTION(3, "NLD7") + ), + MTK_PIN( + 94, "GPIO94", + MTK_EINT_FUNCTION(0, 94), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO94"), + MTK_FUNCTION(1, "PCM1_DI"), + MTK_FUNCTION(2, "I2S0_DI"), + MTK_FUNCTION(3, "NREB") + ), + MTK_PIN( + 95, "GPIO95", + MTK_EINT_FUNCTION(0, 95), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO95"), + MTK_FUNCTION(1, "PCM1_DO"), + MTK_FUNCTION(2, "I2S0_DO"), + MTK_FUNCTION(3, "NRNB0") + ), + MTK_PIN( + 96, "GPIO96", + MTK_EINT_FUNCTION(0, 96), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO96"), + MTK_FUNCTION(1, "URXD1"), + MTK_FUNCTION(2, "UTXD1"), + MTK_FUNCTION(3, "NWEB") + ), + MTK_PIN( + 97, "GPIO97", + MTK_EINT_FUNCTION(0, 97), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO97"), + MTK_FUNCTION(1, "UTXD1"), + MTK_FUNCTION(2, "URXD1"), + MTK_FUNCTION(3, "NCEB0") + ), + MTK_PIN( + 98, "GPIO98", + MTK_EINT_FUNCTION(0, 98), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO98"), + MTK_FUNCTION(1, "URTS1"), + MTK_FUNCTION(2, "UCTS1"), + MTK_FUNCTION(3, "NALE") + ), + MTK_PIN( + 99, "GPIO99", + MTK_EINT_FUNCTION(0, 99), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO99"), + MTK_FUNCTION(1, "UCTS1"), + MTK_FUNCTION(2, "URTS1"), + MTK_FUNCTION(3, "NCLE") + ), + MTK_PIN( + 100, "GPIO100", + MTK_EINT_FUNCTION(0, 100), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO100"), + MTK_FUNCTION(1, "MSDC2_DAT0"), + MTK_FUNCTION(2, "URXD1"), + MTK_FUNCTION(3, "USB_DRVVBUS"), + MTK_FUNCTION(4, "SDA4") + ), + MTK_PIN( + 101, "GPIO101", + MTK_EINT_FUNCTION(0, 101), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO101"), + MTK_FUNCTION(1, "MSDC2_DAT1"), + MTK_FUNCTION(2, "UTXD1"), + MTK_FUNCTION(4, "SCL4") + ), + MTK_PIN( + 102, "GPIO102", + MTK_EINT_FUNCTION(0, 102), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO102"), + MTK_FUNCTION(1, "MSDC2_DAT2"), + MTK_FUNCTION(2, "URTS1"), + MTK_FUNCTION(3, "UTXD0"), + MTK_FUNCTION(5, "PWM0"), + MTK_FUNCTION(6, "SPI_CK_1") + ), + MTK_PIN( + 103, "GPIO103", + MTK_EINT_FUNCTION(0, 103), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO103"), + MTK_FUNCTION(1, "MSDC2_DAT3"), + MTK_FUNCTION(2, "UCTS1"), + MTK_FUNCTION(3, "URXD0"), + MTK_FUNCTION(5, "PWM1"), + MTK_FUNCTION(6, "SPI_MI_1") + ), + MTK_PIN( + 104, "GPIO104", + MTK_EINT_FUNCTION(0, 104), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO104"), + MTK_FUNCTION(1, "MSDC2_CLK"), + MTK_FUNCTION(2, "NLD4"), + MTK_FUNCTION(3, "UTXD3"), + MTK_FUNCTION(4, "SDA3"), + MTK_FUNCTION(5, "PWM2"), + MTK_FUNCTION(6, "SPI_MO_1") + ), + MTK_PIN( + 105, "GPIO105", + MTK_EINT_FUNCTION(0, 105), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO105"), + MTK_FUNCTION(1, "MSDC2_CMD"), + MTK_FUNCTION(2, "NLD5"), + MTK_FUNCTION(3, "URXD3"), + MTK_FUNCTION(4, "SCL3"), + MTK_FUNCTION(5, "PWM3"), + MTK_FUNCTION(6, "SPI_CS_1") + ), + MTK_PIN( + 106, "GPIO106", + MTK_EINT_FUNCTION(0, 106), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO106"), + MTK_FUNCTION(1, "LCM_RST") + ), + MTK_PIN( + 107, "GPIO107", + MTK_EINT_FUNCTION(0, 107), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO107"), + MTK_FUNCTION(1, "DSI_TE") + ), + MTK_PIN( + 108, "GPIO108", + MTK_EINT_FUNCTION(0, 108), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO108"), + MTK_FUNCTION(1, "JTMS"), + MTK_FUNCTION(2, "MFG_JTAG_TMS"), + MTK_FUNCTION(3, "TDD_TMS"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TMS"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TMS"), + MTK_FUNCTION(6, "DFD_TMS") + ), + MTK_PIN( + 109, "GPIO109", + MTK_EINT_FUNCTION(0, 109), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO109"), + MTK_FUNCTION(1, "JTCK"), + MTK_FUNCTION(2, "MFG_JTAG_TCK"), + MTK_FUNCTION(3, "TDD_TCK"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TCK"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TCK"), + MTK_FUNCTION(6, "DFD_TCK") + ), + MTK_PIN( + 110, "GPIO110", + MTK_EINT_FUNCTION(0, 110), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO110"), + MTK_FUNCTION(1, "JTDI"), + MTK_FUNCTION(2, "MFG_JTAG_TDI"), + MTK_FUNCTION(3, "TDD_TDI"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDI"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TDI"), + MTK_FUNCTION(6, "DFD_TDI") + ), + MTK_PIN( + 111, "GPIO111", + MTK_EINT_FUNCTION(0, 111), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO111"), + MTK_FUNCTION(1, "JTDO"), + MTK_FUNCTION(2, "MFG_JTAG_TDO"), + MTK_FUNCTION(3, "TDD_TDO"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDO"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TDO"), + MTK_FUNCTION(6, "DFD_TDO") + ), + MTK_PIN( + 112, "GPIO112", + MTK_EINT_FUNCTION(0, 112), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO112"), + MTK_FUNCTION(1, "JTRST_B"), + MTK_FUNCTION(2, "MFG_JTAG_TRSTN"), + MTK_FUNCTION(3, "TDD_TRSTN"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TRST"), + MTK_FUNCTION(5, "AP_MD32_JTAG_TRST"), + MTK_FUNCTION(6, "DFD_NTRST") + ), + MTK_PIN( + 113, "GPIO113", + MTK_EINT_FUNCTION(0, 113), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO113"), + MTK_FUNCTION(1, "URXD0"), + MTK_FUNCTION(2, "UTXD0"), + MTK_FUNCTION(3, "MD_URXD"), + MTK_FUNCTION(4, "LTE_URXD"), + MTK_FUNCTION(5, "TDD_TXD"), + MTK_FUNCTION(6, "I2S2_WS") + ), + MTK_PIN( + 114, "GPIO114", + MTK_EINT_FUNCTION(0, 114), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO114"), + MTK_FUNCTION(1, "UTXD0"), + MTK_FUNCTION(2, "URXD0"), + MTK_FUNCTION(3, "MD_UTXD"), + MTK_FUNCTION(4, "LTE_UTXD"), + MTK_FUNCTION(5, "TDD_TXD"), + MTK_FUNCTION(6, "I2S2_BCK") + ), + MTK_PIN( + 115, "GPIO115", + MTK_EINT_FUNCTION(0, 115), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO115"), + MTK_FUNCTION(1, "URTS0"), + MTK_FUNCTION(2, "UCTS0"), + MTK_FUNCTION(3, "MD_URXD"), + MTK_FUNCTION(4, "LTE_URXD"), + MTK_FUNCTION(5, "TDD_TXD"), + MTK_FUNCTION(6, "I2S2_MCK") + ), + MTK_PIN( + 116, "GPIO116", + MTK_EINT_FUNCTION(0, 116), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO116"), + MTK_FUNCTION(1, "UCTS0"), + MTK_FUNCTION(2, "URTS0"), + MTK_FUNCTION(3, "MD_UTXD"), + MTK_FUNCTION(4, "LTE_UTXD"), + MTK_FUNCTION(5, "TDD_TXD"), + MTK_FUNCTION(6, "I2S2_DI_1") + ), + MTK_PIN( + 117, "GPIO117", + MTK_EINT_FUNCTION(0, 117), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO117"), + MTK_FUNCTION(1, "URXD3"), + MTK_FUNCTION(2, "UTXD3"), + MTK_FUNCTION(3, "MD_URXD"), + MTK_FUNCTION(4, "LTE_URXD"), + MTK_FUNCTION(5, "TDD_TXD") + ), + MTK_PIN( + 118, "GPIO118", + MTK_EINT_FUNCTION(0, 118), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO118"), + MTK_FUNCTION(1, "UTXD3"), + MTK_FUNCTION(2, "URXD3"), + MTK_FUNCTION(3, "MD_UTXD"), + MTK_FUNCTION(4, "LTE_UTXD"), + MTK_FUNCTION(5, "TDD_TXD") + ), + MTK_PIN( + 119, "GPIO119", + MTK_EINT_FUNCTION(0, 119), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO119"), + MTK_FUNCTION(1, "KROW0") + ), + MTK_PIN( + 120, "GPIO120", + MTK_EINT_FUNCTION(0, 120), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO120"), + MTK_FUNCTION(1, "KROW1"), + MTK_FUNCTION(3, "PWM6") + ), + MTK_PIN( + 121, "GPIO121", + MTK_EINT_FUNCTION(0, 121), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO121"), + MTK_FUNCTION(1, "KROW2"), + MTK_FUNCTION(2, "IRDA_PDN"), + MTK_FUNCTION(3, "I2S1_DO_1"), + MTK_FUNCTION(4, "USB_DRVVBUS"), + MTK_FUNCTION(5, "SPI_CK_2"), + MTK_FUNCTION(6, "PWM4") + ), + MTK_PIN( + 122, "GPIO122", + MTK_EINT_FUNCTION(0, 122), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO122"), + MTK_FUNCTION(1, "KCOL0") + ), + MTK_PIN( + 123, "GPIO123", + MTK_EINT_FUNCTION(0, 123), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO123"), + MTK_FUNCTION(1, "KCOL1"), + MTK_FUNCTION(2, "IRDA_RXD"), + MTK_FUNCTION(3, "I2S2_DI_2"), + MTK_FUNCTION(4, "PWM5") + ), + MTK_PIN( + 124, "GPIO124", + MTK_EINT_FUNCTION(0, 124), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO124"), + MTK_FUNCTION(1, "KCOL2"), + MTK_FUNCTION(2, "IRDA_TXD"), + MTK_FUNCTION(3, "I2S1_DO_2"), + MTK_FUNCTION(4, "USB_DRVVBUS"), + MTK_FUNCTION(5, "SPI_MI_2"), + MTK_FUNCTION(6, "PWM3") + ), + MTK_PIN( + 125, "GPIO125", + MTK_EINT_FUNCTION(0, 125), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO125"), + MTK_FUNCTION(1, "SDA1") + ), + MTK_PIN( + 126, "GPIO126", + MTK_EINT_FUNCTION(0, 126), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO126"), + MTK_FUNCTION(1, "SCL1") + ), + MTK_PIN( + 127, "GPIO127", + MTK_EINT_FUNCTION(1, 127), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO127"), + MTK_FUNCTION(1, "MD_EINT1"), + MTK_FUNCTION(2, "DISP_PWM1"), + MTK_FUNCTION(3, "SPI_MO_2") + ), + MTK_PIN( + 128, "GPIO128", + MTK_EINT_FUNCTION(1, 128), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO128"), + MTK_FUNCTION(1, "MD_EINT2"), + MTK_FUNCTION(2, "DSI1_TE"), + MTK_FUNCTION(3, "SPI_CS_2") + ), + MTK_PIN( + 129, "GPIO129", + MTK_EINT_FUNCTION(0, 129), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO129"), + MTK_FUNCTION(1, "I2S3_WS"), + MTK_FUNCTION(2, "I2S2_WS"), + MTK_FUNCTION(3, "PWM0") + ), + MTK_PIN( + 130, "GPIO130", + MTK_EINT_FUNCTION(0, 130), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO130"), + MTK_FUNCTION(1, "I2S3_BCK"), + MTK_FUNCTION(2, "I2S2_BCK"), + MTK_FUNCTION(3, "PWM1") + ), + MTK_PIN( + 131, "GPIO131", + MTK_EINT_FUNCTION(0, 131), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO131"), + MTK_FUNCTION(1, "I2S3_MCK"), + MTK_FUNCTION(2, "I2S2_MCK"), + MTK_FUNCTION(3, "PWM2") + ), + MTK_PIN( + 132, "GPIO132", + MTK_EINT_FUNCTION(0, 132), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO132"), + MTK_FUNCTION(1, "I2S3_DO_1"), + MTK_FUNCTION(2, "I2S2_DI_1"), + MTK_FUNCTION(3, "PWM3") + ), + MTK_PIN( + 133, "GPIO133", + MTK_EINT_FUNCTION(0, 133), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO133"), + MTK_FUNCTION(1, "I2S3_DO_2"), + MTK_FUNCTION(2, "I2S2_DI_2"), + MTK_FUNCTION(3, "PWM4") + ), + MTK_PIN( + 134, "GPIO134", + MTK_EINT_FUNCTION(0, 134), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO134"), + MTK_FUNCTION(1, "I2S3_DO_3"), + MTK_FUNCTION(2, "DISP_PWM1"), + MTK_FUNCTION(3, "I2S1_DO_1"), + MTK_FUNCTION(4, "PWM5") + ), + MTK_PIN( + 135, "GPIO135", + MTK_EINT_FUNCTION(0, 135), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO135"), + MTK_FUNCTION(1, "I2S3_DO_4"), + MTK_FUNCTION(2, "DSI1_TE"), + MTK_FUNCTION(3, "I2S1_DO_2"), + MTK_FUNCTION(4, "PWM6") + ), + MTK_PIN( + 136, "GPIO136", + MTK_EINT_FUNCTION(0, 136), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO136"), + MTK_FUNCTION(1, "SDA3") + ), + MTK_PIN( + 137, "GPIO137", + MTK_EINT_FUNCTION(0, 137), + DRV_FIXED, + MTK_FUNCTION(0, "GPIO137"), + MTK_FUNCTION(1, "SCL3") + ), + MTK_PIN( + 138, "GPIO138", + MTK_EINT_FUNCTION(0, 138), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO138"), + MTK_FUNCTION(1, "DPI_CK"), + MTK_FUNCTION(2, "NLD6"), + MTK_FUNCTION(3, "UTXD0"), + MTK_FUNCTION(4, "USB_DRVVBUS"), + MTK_FUNCTION(5, "IRDA_PDN") + ), + MTK_PIN( + 139, "GPIO139", + MTK_EINT_FUNCTION(0, 139), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO139"), + MTK_FUNCTION(1, "DPI_DE"), + MTK_FUNCTION(2, "NLD7"), + MTK_FUNCTION(3, "URXD0"), + MTK_FUNCTION(4, "MD_UTXD"), + MTK_FUNCTION(5, "IRDA_RXD") + ), + MTK_PIN( + 140, "GPIO140", + MTK_EINT_FUNCTION(0, 140), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO140"), + MTK_FUNCTION(1, "DPI_D0"), + MTK_FUNCTION(2, "NREB"), + MTK_FUNCTION(3, "UCTS0"), + MTK_FUNCTION(4, "MD_URXD"), + MTK_FUNCTION(5, "IRDA_TXD") + ), + MTK_PIN( + 141, "GPIO141", + MTK_EINT_FUNCTION(0, 141), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO141"), + MTK_FUNCTION(1, "DPI_D1"), + MTK_FUNCTION(2, "NRNB0"), + MTK_FUNCTION(3, "URTS0"), + MTK_FUNCTION(4, "LTE_UTXD"), + MTK_FUNCTION(5, "I2S2_WS") + ), + MTK_PIN( + 142, "GPIO142", + MTK_EINT_FUNCTION(0, 142), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO142"), + MTK_FUNCTION(1, "DPI_D2"), + MTK_FUNCTION(2, "NWEB"), + MTK_FUNCTION(3, "UTXD1"), + MTK_FUNCTION(4, "LTE_URXD"), + MTK_FUNCTION(5, "I2S2_BCK") + ), + MTK_PIN( + 143, "GPIO143", + MTK_EINT_FUNCTION(0, 143), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO143"), + MTK_FUNCTION(1, "DPI_D3"), + MTK_FUNCTION(2, "NCEB0"), + MTK_FUNCTION(3, "URXD1"), + MTK_FUNCTION(4, "TDD_TXD"), + MTK_FUNCTION(5, "I2S2_MCK") + ), + MTK_PIN( + 144, "GPIO144", + MTK_EINT_FUNCTION(0, 144), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO144"), + MTK_FUNCTION(1, "DPI_D4"), + MTK_FUNCTION(2, "NALE"), + MTK_FUNCTION(3, "UCTS1"), + MTK_FUNCTION(4, "TDD_TMS"), + MTK_FUNCTION(5, "I2S2_DI_1") + ), + MTK_PIN( + 145, "GPIO145", + MTK_EINT_FUNCTION(0, 145), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO145"), + MTK_FUNCTION(1, "DPI_D5"), + MTK_FUNCTION(2, "NCLE"), + MTK_FUNCTION(3, "URTS1"), + MTK_FUNCTION(4, "TDD_TCK"), + MTK_FUNCTION(5, "I2S2_DI_2") + ), + MTK_PIN( + 146, "GPIO146", + MTK_EINT_FUNCTION(0, 146), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO146"), + MTK_FUNCTION(1, "DPI_D6"), + MTK_FUNCTION(2, "NLD8"), + MTK_FUNCTION(3, "UTXD2"), + MTK_FUNCTION(4, "TDD_TDI") + ), + MTK_PIN( + 147, "GPIO147", + MTK_EINT_FUNCTION(0, 147), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO147"), + MTK_FUNCTION(1, "DPI_D7"), + MTK_FUNCTION(2, "NLD9"), + MTK_FUNCTION(3, "URXD2"), + MTK_FUNCTION(4, "TDD_TDO"), + MTK_FUNCTION(5, "I2S1_WS") + ), + MTK_PIN( + 148, "GPIO148", + MTK_EINT_FUNCTION(0, 148), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO148"), + MTK_FUNCTION(1, "DPI_D8"), + MTK_FUNCTION(2, "NLD10"), + MTK_FUNCTION(3, "UCTS2"), + MTK_FUNCTION(4, "TDD_TRSTN"), + MTK_FUNCTION(5, "I2S1_BCK") + ), + MTK_PIN( + 149, "GPIO149", + MTK_EINT_FUNCTION(0, 149), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO149"), + MTK_FUNCTION(1, "DPI_D9"), + MTK_FUNCTION(2, "NLD11"), + MTK_FUNCTION(3, "URTS2"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TMS"), + MTK_FUNCTION(5, "I2S1_MCK") + ), + MTK_PIN( + 150, "GPIO150", + MTK_EINT_FUNCTION(0, 150), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO150"), + MTK_FUNCTION(1, "DPI_D10"), + MTK_FUNCTION(2, "NLD12"), + MTK_FUNCTION(3, "UTXD3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TCK"), + MTK_FUNCTION(5, "I2S1_DO_1") + ), + MTK_PIN( + 151, "GPIO151", + MTK_EINT_FUNCTION(0, 151), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO151"), + MTK_FUNCTION(1, "DPI_D11"), + MTK_FUNCTION(2, "NLD13"), + MTK_FUNCTION(3, "URXD3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDI"), + MTK_FUNCTION(5, "I2S1_DO_2") + ), + MTK_PIN( + 152, "GPIO152", + MTK_EINT_FUNCTION(0, 152), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO152"), + MTK_FUNCTION(1, "DPI_HSYNC"), + MTK_FUNCTION(2, "NLD14"), + MTK_FUNCTION(3, "UCTS3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TDO"), + MTK_FUNCTION(5, "DSI1_TE") + ), + MTK_PIN( + 153, "GPIO153", + MTK_EINT_FUNCTION(0, 153), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO153"), + MTK_FUNCTION(1, "DPI_VSYNC"), + MTK_FUNCTION(2, "NLD15"), + MTK_FUNCTION(3, "URTS3"), + MTK_FUNCTION(4, "LTE_MD32_JTAG_TRST"), + MTK_FUNCTION(5, "DISP_PWM1") + ), + MTK_PIN( + 154, "GPIO154", + MTK_EINT_FUNCTION(0, 154), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO154"), + MTK_FUNCTION(1, "MSDC0_DAT0"), + MTK_FUNCTION(2, "NLD8") + ), + MTK_PIN( + 155, "GPIO155", + MTK_EINT_FUNCTION(0, 155), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO155"), + MTK_FUNCTION(1, "MSDC0_DAT1"), + MTK_FUNCTION(2, "NLD9") + ), + MTK_PIN( + 156, "GPIO156", + MTK_EINT_FUNCTION(0, 156), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO156"), + MTK_FUNCTION(1, "MSDC0_DAT2"), + MTK_FUNCTION(2, "NLD10") + ), + MTK_PIN( + 157, "GPIO157", + MTK_EINT_FUNCTION(0, 157), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO157"), + MTK_FUNCTION(1, "MSDC0_DAT3"), + MTK_FUNCTION(2, "NLD11") + ), + MTK_PIN( + 158, "GPIO158", + MTK_EINT_FUNCTION(0, 158), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO158"), + MTK_FUNCTION(1, "MSDC0_DAT4"), + MTK_FUNCTION(2, "NLD12") + ), + MTK_PIN( + 159, "GPIO159", + MTK_EINT_FUNCTION(0, 159), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO159"), + MTK_FUNCTION(1, "MSDC0_DAT5"), + MTK_FUNCTION(2, "NLD13") + ), + MTK_PIN( + 160, "GPIO160", + MTK_EINT_FUNCTION(0, 160), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO160"), + MTK_FUNCTION(1, "MSDC0_DAT6"), + MTK_FUNCTION(2, "NLD14") + ), + MTK_PIN( + 161, "GPIO161", + MTK_EINT_FUNCTION(0, 161), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO161"), + MTK_FUNCTION(1, "MSDC0_DAT7"), + MTK_FUNCTION(2, "NLD15") + ), + MTK_PIN( + 162, "GPIO162", + MTK_EINT_FUNCTION(0, 162), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO162"), + MTK_FUNCTION(1, "MSDC0_CMD") + ), + MTK_PIN( + 163, "GPIO163", + MTK_EINT_FUNCTION(0, 163), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO163"), + MTK_FUNCTION(1, "MSDC0_CLK") + ), + MTK_PIN( + 164, "GPIO164", + MTK_EINT_FUNCTION(0, 164), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO164"), + MTK_FUNCTION(1, "MSDC0_DSL") + ), + MTK_PIN( + 165, "GPIO165", + MTK_EINT_FUNCTION(0, 165), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO165"), + MTK_FUNCTION(1, "MSDC0_RSTB") + ), + MTK_PIN( + 166, "GPIO166", + MTK_EINT_FUNCTION(0, 166), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO166"), + MTK_FUNCTION(1, "SPI_CK_0"), + MTK_FUNCTION(3, "PWM0") + ), + MTK_PIN( + 167, "GPIO167", + MTK_EINT_FUNCTION(0, 167), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO167"), + MTK_FUNCTION(1, "SPI_MI_0"), + MTK_FUNCTION(3, "PWM1"), + MTK_FUNCTION(4, "SPI_MO_0") + ), + MTK_PIN( + 168, "GPIO168", + MTK_EINT_FUNCTION(2, 168), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO168"), + MTK_FUNCTION(1, "SPI_MO_0"), + MTK_FUNCTION(2, "MD_EINT3"), + MTK_FUNCTION(3, "PWM2"), + MTK_FUNCTION(4, "SPI_MI_0") + ), + MTK_PIN( + 169, "GPIO169", + MTK_EINT_FUNCTION(2, 169), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO169"), + MTK_FUNCTION(1, "SPI_CS_0"), + MTK_FUNCTION(2, "MD_EINT4"), + MTK_FUNCTION(3, "PWM3") + ), + MTK_PIN( + 170, "GPIO170", + MTK_EINT_FUNCTION(0, 170), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO170"), + MTK_FUNCTION(1, "MSDC1_CMD") + ), + MTK_PIN( + 171, "GPIO171", + MTK_EINT_FUNCTION(0, 171), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO171"), + MTK_FUNCTION(1, "MSDC1_DAT0") + ), + MTK_PIN( + 172, "GPIO172", + MTK_EINT_FUNCTION(0, 172), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO172"), + MTK_FUNCTION(1, "MSDC1_DAT1") + ), + MTK_PIN( + 173, "GPIO173", + MTK_EINT_FUNCTION(0, 173), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO173"), + MTK_FUNCTION(1, "MSDC1_DAT2") + ), + MTK_PIN( + 174, "GPIO174", + MTK_EINT_FUNCTION(0, 174), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO174"), + MTK_FUNCTION(1, "MSDC1_DAT3") + ), + MTK_PIN( + 175, "GPIO175", + MTK_EINT_FUNCTION(0, 175), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO175"), + MTK_FUNCTION(1, "MSDC1_CLK") + ), + MTK_PIN( + 176, "GPIO176", + MTK_EINT_FUNCTION(0, 176), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO176"), + MTK_FUNCTION(1, "PWRAP_SPIMI"), + MTK_FUNCTION(2, "PWRAP_SPIMO") + ), + MTK_PIN( + 177, "GPIO177", + MTK_EINT_FUNCTION(0, 177), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO177"), + MTK_FUNCTION(1, "PWRAP_SPIMO"), + MTK_FUNCTION(2, "PWRAP_SPIMI") + ), + MTK_PIN( + 178, "GPIO178", + MTK_EINT_FUNCTION(0, 178), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO178"), + MTK_FUNCTION(1, "PWRAP_SPICK") + ), + MTK_PIN( + 179, "GPIO179", + MTK_EINT_FUNCTION(0, 179), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO179"), + MTK_FUNCTION(1, "PWRAP_SPICS") + ), + MTK_PIN( + 180, "GPIO180", + MTK_EINT_FUNCTION(0, 180), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO180"), + MTK_FUNCTION(1, "AUD_CLK_MOSI"), + MTK_FUNCTION(2, "I2S1_WS"), + MTK_FUNCTION(3, "I2S2_WS"), + MTK_FUNCTION(4, "I2S0_WS") + ), + MTK_PIN( + 181, "GPIO181", + MTK_EINT_FUNCTION(0, 181), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO181"), + MTK_FUNCTION(1, "AUD_DAT_MISO_1"), + MTK_FUNCTION(2, "I2S1_BCK"), + MTK_FUNCTION(3, "I2S2_BCK"), + MTK_FUNCTION(4, "I2S0_BCK") + ), + MTK_PIN( + 182, "GPIO182", + MTK_EINT_FUNCTION(0, 182), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO182"), + MTK_FUNCTION(1, "AUD_DAT_MOSI_1"), + MTK_FUNCTION(2, "I2S1_MCK"), + MTK_FUNCTION(3, "I2S2_MCK"), + MTK_FUNCTION(4, "I2S0_MCK") + ), + MTK_PIN( + 183, "GPIO183", + MTK_EINT_FUNCTION(0, 183), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO183"), + MTK_FUNCTION(1, "AUD_DAT_MISO_2"), + MTK_FUNCTION(2, "I2S1_DO_1"), + MTK_FUNCTION(3, "I2S2_DI_1"), + MTK_FUNCTION(4, "I2S0_DO") + ), + MTK_PIN( + 184, "GPIO184", + MTK_EINT_FUNCTION(0, 184), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO184"), + MTK_FUNCTION(1, "AUD_DAT_MOSI_2"), + MTK_FUNCTION(2, "I2S1_DO_2"), + MTK_FUNCTION(3, "I2S2_DI_2"), + MTK_FUNCTION(4, "I2S0_DI") + ), + MTK_PIN( + 185, "GPIO185", + MTK_EINT_FUNCTION(0, 185), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO185"), + MTK_FUNCTION(1, "RTC32K_CK") + ), + MTK_PIN( + 186, "GPIO186", + MTK_EINT_FUNCTION(0, 186), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO186"), + MTK_FUNCTION(1, "DISP_PWM0"), + MTK_FUNCTION(2, "DISP_PWM1") + ), + MTK_PIN( + 187, "GPIO187", + MTK_EINT_FUNCTION(0, 187), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO187"), + MTK_FUNCTION(1, "SRCLKENAI") + ), + MTK_PIN( + 188, "GPIO188", + MTK_EINT_FUNCTION(0, 188), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO188"), + MTK_FUNCTION(1, "SRCLKENAI2") + ), + MTK_PIN( + 189, "GPIO189", + MTK_EINT_FUNCTION(0, 189), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO189"), + MTK_FUNCTION(1, "SRCLKENA0") + ), + MTK_PIN( + 190, "GPIO190", + MTK_EINT_FUNCTION(0, 190), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO190"), + MTK_FUNCTION(1, "SRCLKENA1") + ), + MTK_PIN( + 191, "GPIO191", + MTK_EINT_FUNCTION(0, 191), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO191"), + MTK_FUNCTION(1, "WATCHDOG_AO") + ), + MTK_PIN( + 192, "GPIO192", + MTK_EINT_FUNCTION(0, 192), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO192"), + MTK_FUNCTION(1, "I2S0_WS"), + MTK_FUNCTION(2, "I2S1_WS"), + MTK_FUNCTION(3, "I2S2_WS"), + MTK_FUNCTION(4, "NCEB1") + ), + MTK_PIN( + 193, "GPIO193", + MTK_EINT_FUNCTION(0, 193), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO193"), + MTK_FUNCTION(1, "I2S0_BCK"), + MTK_FUNCTION(2, "I2S1_BCK"), + MTK_FUNCTION(3, "I2S2_BCK"), + MTK_FUNCTION(4, "NRNB1") + ), + MTK_PIN( + 194, "GPIO194", + MTK_EINT_FUNCTION(0, 194), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO194"), + MTK_FUNCTION(1, "I2S0_MCK"), + MTK_FUNCTION(2, "I2S1_MCK"), + MTK_FUNCTION(3, "I2S2_MCK") + ), + MTK_PIN( + 195, "GPIO195", + MTK_EINT_FUNCTION(0, 195), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO195"), + MTK_FUNCTION(1, "I2S0_DO"), + MTK_FUNCTION(2, "I2S1_DO_1"), + MTK_FUNCTION(3, "I2S2_DI_1") + ), + MTK_PIN( + 196, "GPIO196", + MTK_EINT_FUNCTION(0, 196), + DRV_GRP2, + MTK_FUNCTION(0, "GPIO196"), + MTK_FUNCTION(1, "I2S0_DI"), + MTK_FUNCTION(2, "I2S1_DO_2"), + MTK_FUNCTION(3, "I2S2_DI_2") + ), +}; + +#endif /* __PINCTRL_MTK_MT6795_H */ diff --git a/drivers/pinctrl/meson/pinctrl-meson-s4.c b/drivers/pinctrl/meson/pinctrl-meson-s4.c index 3c7358f53302..cea77864b880 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-s4.c +++ b/drivers/pinctrl/meson/pinctrl-meson-s4.c @@ -575,6 +575,7 @@ static struct meson_pmx_group meson_s4_periphs_groups[] = { GROUP(tdm_d2_c, 4), GROUP(tdm_d3_c, 4), GROUP(tdm_fs1_c, 4), + GROUP(tdm_sclk1_c, 4), GROUP(mclk_1_c, 4), GROUP(tdm_d4_c, 4), GROUP(tdm_d5_c, 4), @@ -936,7 +937,7 @@ static const char * const iso7816_groups[] = { }; static const char * const tdm_groups[] = { - "tdm_d2_c", "tdm_d3_c", "tdm_fs1_c", "tdm_d4_c", "tdm_d5_c", + "tdm_d2_c", "tdm_d3_c", "tdm_fs1_c", "tdm_d4_c", "tdm_d5_c", "tdm_sclk1_c", "tdm_fs1_d", "tdm_d4_d", "tdm_d3_d", "tdm_d2_d", "tdm_sclk1_d", "tdm_sclk1_h", "tdm_fs1_h", "tdm_d2_h", "tdm_d3_h", "tdm_d4_h", "tdm_d1", "tdm_d0", "tdm_fs0", "tdm_sclk0", "tdm_fs2", "tdm_sclk2", diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig index 0d12894d3ee1..aa5883f09d7b 100644 --- a/drivers/pinctrl/mvebu/Kconfig +++ b/drivers/pinctrl/mvebu/Kconfig @@ -45,6 +45,10 @@ config PINCTRL_ORION bool select PINCTRL_MVEBU +config PINCTRL_AC5 + bool + select PINCTRL_MVEBU + config PINCTRL_ARMADA_37XX bool select GENERIC_PINCONF diff --git a/drivers/pinctrl/mvebu/Makefile b/drivers/pinctrl/mvebu/Makefile index cd082dca4482..23458ab17c53 100644 --- a/drivers/pinctrl/mvebu/Makefile +++ b/drivers/pinctrl/mvebu/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_PINCTRL_ARMADA_CP110) += pinctrl-armada-cp110.o obj-$(CONFIG_PINCTRL_ARMADA_XP) += pinctrl-armada-xp.o obj-$(CONFIG_PINCTRL_ARMADA_37XX) += pinctrl-armada-37xx.o obj-$(CONFIG_PINCTRL_ORION) += pinctrl-orion.o +obj-$(CONFIG_PINCTRL_AC5) += pinctrl-ac5.o diff --git a/drivers/pinctrl/mvebu/pinctrl-ac5.c b/drivers/pinctrl/mvebu/pinctrl-ac5.c new file mode 100644 index 000000000000..292633e61129 --- /dev/null +++ b/drivers/pinctrl/mvebu/pinctrl-ac5.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Marvell ac5 pinctrl driver based on mvebu pinctrl core + * + * Copyright (C) 2021 Marvell + * + * Noam Liron <lnoam@marvell.com> + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-mvebu.h" + +static struct mvebu_mpp_mode ac5_mpp_modes[] = { + MPP_MODE(0, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d0"), + MPP_FUNCTION(2, "nand", "io4")), + MPP_MODE(1, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d1"), + MPP_FUNCTION(2, "nand", "io3")), + MPP_MODE(2, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d2"), + MPP_FUNCTION(2, "nand", "io2")), + MPP_MODE(3, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d3"), + MPP_FUNCTION(2, "nand", "io7")), + MPP_MODE(4, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d4"), + MPP_FUNCTION(2, "nand", "io6"), + MPP_FUNCTION(3, "uart3", "txd"), + MPP_FUNCTION(4, "uart2", "txd")), + MPP_MODE(5, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d5"), + MPP_FUNCTION(2, "nand", "io5"), + MPP_FUNCTION(3, "uart3", "rxd"), + MPP_FUNCTION(4, "uart2", "rxd")), + MPP_MODE(6, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d6"), + MPP_FUNCTION(2, "nand", "io0"), + MPP_FUNCTION(3, "i2c1", "sck")), + MPP_MODE(7, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "d7"), + MPP_FUNCTION(2, "nand", "io1"), + MPP_FUNCTION(3, "i2c1", "sda")), + MPP_MODE(8, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "clk"), + MPP_FUNCTION(2, "nand", "wen")), + MPP_MODE(9, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "cmd"), + MPP_FUNCTION(2, "nand", "ale")), + MPP_MODE(10, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "ds"), + MPP_FUNCTION(2, "nand", "cle")), + MPP_MODE(11, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "sdio", "rst"), + MPP_FUNCTION(2, "nand", "cen")), + MPP_MODE(12, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "clk")), + MPP_MODE(13, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "csn")), + MPP_MODE(14, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "mosi")), + MPP_MODE(15, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "miso")), + MPP_MODE(16, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "wpn"), + MPP_FUNCTION(2, "nand", "ren"), + MPP_FUNCTION(3, "uart1", "txd")), + MPP_MODE(17, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "spi0", "hold"), + MPP_FUNCTION(2, "nand", "rb"), + MPP_FUNCTION(3, "uart1", "rxd")), + MPP_MODE(18, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "tsen_int", NULL), + MPP_FUNCTION(2, "uart2", "rxd"), + MPP_FUNCTION(3, "wd_int", NULL)), + MPP_MODE(19, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "dev_init_done", NULL), + MPP_FUNCTION(2, "uart2", "txd")), + MPP_MODE(20, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(2, "i2c1", "sck"), + MPP_FUNCTION(3, "spi1", "clk"), + MPP_FUNCTION(4, "uart3", "txd")), + MPP_MODE(21, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(2, "i2c1", "sda"), + MPP_FUNCTION(3, "spi1", "csn"), + MPP_FUNCTION(4, "uart3", "rxd")), + MPP_MODE(22, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(3, "spi1", "mosi")), + MPP_MODE(23, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(3, "spi1", "miso")), + MPP_MODE(24, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "wd_int", NULL), + MPP_FUNCTION(2, "uart2", "txd"), + MPP_FUNCTION(3, "uartsd", "txd")), + MPP_MODE(25, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "int_out", NULL), + MPP_FUNCTION(2, "uart2", "rxd"), + MPP_FUNCTION(3, "uartsd", "rxd")), + MPP_MODE(26, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "i2c0", "sck"), + MPP_FUNCTION(2, "ptp", "clk1"), + MPP_FUNCTION(3, "uart3", "txd")), + MPP_MODE(27, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "i2c0", "sda"), + MPP_FUNCTION(2, "ptp", "pulse"), + MPP_FUNCTION(3, "uart3", "rxd")), + MPP_MODE(28, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "xg", "mdio"), + MPP_FUNCTION(2, "ge", "mdio"), + MPP_FUNCTION(3, "uart3", "txd")), + MPP_MODE(29, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "xg", "mdio"), + MPP_FUNCTION(2, "ge", "mdio"), + MPP_FUNCTION(3, "uart3", "rxd")), + MPP_MODE(30, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "xg", "mdio"), + MPP_FUNCTION(2, "ge", "mdio"), + MPP_FUNCTION(3, "ge", "mdio")), + MPP_MODE(31, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "xg", "mdio"), + MPP_FUNCTION(2, "ge", "mdio"), + MPP_FUNCTION(3, "ge", "mdio")), + MPP_MODE(32, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "uart0", "txd")), + MPP_MODE(33, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "uart0", "rxd"), + MPP_FUNCTION(2, "ptp", "clk1"), + MPP_FUNCTION(3, "ptp", "pulse")), + MPP_MODE(34, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ge", "mdio"), + MPP_FUNCTION(2, "uart3", "rxd")), + MPP_MODE(35, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ge", "mdio"), + MPP_FUNCTION(2, "uart3", "txd"), + MPP_FUNCTION(3, "pcie", "rstoutn")), + MPP_MODE(36, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "clk0_tp"), + MPP_FUNCTION(2, "ptp", "clk1_tp")), + MPP_MODE(37, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "pulse_tp"), + MPP_FUNCTION(2, "wd_int", NULL)), + MPP_MODE(38, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "synce", "clk_out0")), + MPP_MODE(39, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "synce", "clk_out1")), + MPP_MODE(40, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "pclk_out0"), + MPP_FUNCTION(2, "ptp", "pclk_out1")), + MPP_MODE(41, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "ref_clk"), + MPP_FUNCTION(2, "ptp", "clk1"), + MPP_FUNCTION(3, "ptp", "pulse"), + MPP_FUNCTION(4, "uart2", "txd"), + MPP_FUNCTION(5, "i2c1", "sck")), + MPP_MODE(42, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "ptp", "clk0"), + MPP_FUNCTION(2, "ptp", "clk1"), + MPP_FUNCTION(3, "ptp", "pulse"), + MPP_FUNCTION(4, "uart2", "rxd"), + MPP_FUNCTION(5, "i2c1", "sda")), + MPP_MODE(43, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "led", "clk")), + MPP_MODE(44, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "led", "stb")), + MPP_MODE(45, + MPP_FUNCTION(0, "gpio", NULL), + MPP_FUNCTION(1, "led", "data")), +}; + +static struct mvebu_pinctrl_soc_info ac5_pinctrl_info; + +static const struct of_device_id ac5_pinctrl_of_match[] = { + { + .compatible = "marvell,ac5-pinctrl", + }, + { }, +}; + +static const struct mvebu_mpp_ctrl ac5_mpp_controls[] = { + MPP_FUNC_CTRL(0, 45, NULL, mvebu_mmio_mpp_ctrl), }; + +static struct pinctrl_gpio_range ac5_mpp_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 46), }; + +static int ac5_pinctrl_probe(struct platform_device *pdev) +{ + struct mvebu_pinctrl_soc_info *soc = &ac5_pinctrl_info; + + soc->variant = 0; /* no variants for ac5 */ + soc->controls = ac5_mpp_controls; + soc->ncontrols = ARRAY_SIZE(ac5_mpp_controls); + soc->gpioranges = ac5_mpp_gpio_ranges; + soc->ngpioranges = ARRAY_SIZE(ac5_mpp_gpio_ranges); + soc->modes = ac5_mpp_modes; + soc->nmodes = ac5_mpp_controls[0].npins; + + pdev->dev.platform_data = soc; + + return mvebu_pinctrl_simple_mmio_probe(pdev); +} + +static struct platform_driver ac5_pinctrl_driver = { + .driver = { + .name = "ac5-pinctrl", + .of_match_table = of_match_ptr(ac5_pinctrl_of_match), + }, + .probe = ac5_pinctrl_probe, +}; +builtin_platform_driver(ac5_pinctrl_driver); diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index ef4118e49f16..a140b6bfbfaa 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -764,7 +764,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, for (i = 0; i < nr_irq_parent; i++) { int irq = irq_of_parse_and_map(np, i); - if (irq < 0) + if (!irq) continue; girq->parents[i] = irq; } diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c index 0b9b6cbfd10c..ac3d4d91266d 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c @@ -440,6 +440,10 @@ static const unsigned mc2_a_1_pins[] = { DB8500_PIN_A5, DB8500_PIN_B4, DB8500_PIN_C8, DB8500_PIN_A12, DB8500_PIN_C10, DB8500_PIN_B10, DB8500_PIN_B9, DB8500_PIN_A9, DB8500_PIN_C7, DB8500_PIN_A7, DB8500_PIN_C5 }; +/* MC2 without the feedback clock */ +static const unsigned mc2_a_2_pins[] = { DB8500_PIN_A5, DB8500_PIN_B4, + DB8500_PIN_A12, DB8500_PIN_C10, DB8500_PIN_B10, DB8500_PIN_B9, + DB8500_PIN_A9, DB8500_PIN_C7, DB8500_PIN_A7, DB8500_PIN_C5 }; static const unsigned ssp1_a_1_pins[] = { DB8500_PIN_C9, DB8500_PIN_B11, DB8500_PIN_C12, DB8500_PIN_C11 }; static const unsigned ssp0_a_1_pins[] = { DB8500_PIN_D12, DB8500_PIN_B13, @@ -699,6 +703,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = { DB8500_PIN_GROUP(kp_a_1, NMK_GPIO_ALT_A), DB8500_PIN_GROUP(kpskaskb_a_1, NMK_GPIO_ALT_A), DB8500_PIN_GROUP(mc2_a_1, NMK_GPIO_ALT_A), + DB8500_PIN_GROUP(mc2_a_2, NMK_GPIO_ALT_A), DB8500_PIN_GROUP(ssp1_a_1, NMK_GPIO_ALT_A), DB8500_PIN_GROUP(ssp0_a_1, NMK_GPIO_ALT_A), DB8500_PIN_GROUP(i2c0_a_1, NMK_GPIO_ALT_A), @@ -856,7 +861,7 @@ DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1", "lcd_d8_d11_a_1", "lcd_d12_d15_a_1", "lcd_d12_d23_a_1", "lcd_b_1", "lcd_d16_d23_b_1"); DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_a_2", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1"); -DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1"); +DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2_a_2", "mc2rstn_c_1"); DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1"); DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1"); DB8500_FUNC_GROUPS(i2c0, "i2c0_a_1"); diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 4757bf964d3c..640e50d94f27 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -1113,6 +1113,7 @@ static int nmk_gpio_probe(struct platform_device *dev) spin_lock_init(&nmk_chip->lock); chip = &nmk_chip->chip; + chip->parent = &dev->dev; chip->request = gpiochip_generic_request; chip->free = gpiochip_generic_free; chip->get_direction = nmk_gpio_get_dir; @@ -1154,7 +1155,6 @@ static int nmk_gpio_probe(struct platform_device *dev) clk_enable(nmk_chip->clk); nmk_chip->lowemi = readl_relaxed(nmk_chip->addr + NMK_GPIO_LOWEMI); clk_disable(nmk_chip->clk); - chip->of_node = np; ret = gpiochip_add_data(chip, nmk_chip); if (ret) diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c index 4828aa25e5c9..64d8a568b3db 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c +++ b/drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c @@ -1898,9 +1898,9 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl) } ret = irq_of_parse_and_map(np, 0); - if (ret < 0) { + if (!ret) { dev_err(dev, "No IRQ for GPIO bank %u\n", id); - return ret; + return -EINVAL; } pctrl->gpio_bank[id].irq = ret; pctrl->gpio_bank[id].irq_chip = npcmgpio_irqchip; diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c index 5e610849dfc3..2490384ef1b8 100644 --- a/drivers/pinctrl/pinctrl-apple-gpio.c +++ b/drivers/pinctrl/pinctrl-apple-gpio.c @@ -71,6 +71,7 @@ struct regmap_config regmap_config = { .max_register = 512 * sizeof(u32), .num_reg_defaults_raw = 512, .use_relaxed_mmio = true, + .use_raw_spinlock = true, }; /* No locking needed to mask/unmask IRQs as the interrupt mode is per pin-register. */ @@ -509,6 +510,7 @@ static const struct of_device_id apple_gpio_pinctrl_of_match[] = { { .compatible = "apple,pinctrl", }, { } }; +MODULE_DEVICE_TABLE(of, apple_gpio_pinctrl_of_match); static struct platform_driver apple_gpio_pinctrl_driver = { .driver = { diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c index 3f0143087cc7..99cf24eb67ae 100644 --- a/drivers/pinctrl/pinctrl-equilibrium.c +++ b/drivers/pinctrl/pinctrl-equilibrium.c @@ -11,6 +11,7 @@ #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinmux.h> #include <linux/platform_device.h> +#include <linux/property.h> #include "core.h" #include "pinconf.h" @@ -167,11 +168,9 @@ static int gpiochip_setup(struct device *dev, struct eqbr_gpio_ctrl *gctrl) gc = &gctrl->chip; gc->label = gctrl->name; -#if defined(CONFIG_OF_GPIO) - gc->of_node = gctrl->node; -#endif + gc->fwnode = gctrl->fwnode; - if (!of_property_read_bool(gctrl->node, "interrupt-controller")) { + if (!fwnode_property_read_bool(gctrl->fwnode, "interrupt-controller")) { dev_dbg(dev, "gc %s: doesn't act as interrupt controller!\n", gctrl->name); return 0; @@ -209,7 +208,7 @@ static int gpiolib_reg(struct eqbr_pinctrl_drv_data *drvdata) for (i = 0; i < drvdata->nr_gpio_ctrls; i++) { gctrl = drvdata->gpio_ctrls + i; - np = gctrl->node; + np = to_of_node(gctrl->fwnode); gctrl->name = devm_kasprintf(dev, GFP_KERNEL, "gpiochip%d", i); if (!gctrl->name) @@ -895,7 +894,7 @@ static int pinbank_probe(struct eqbr_pinctrl_drv_data *drvdata) pinbank_init(np_gpio, drvdata, banks + i, i); - gctrls[i].node = np_gpio; + gctrls[i].fwnode = of_fwnode_handle(np_gpio); gctrls[i].bank = banks + i; i++; } diff --git a/drivers/pinctrl/pinctrl-equilibrium.h b/drivers/pinctrl/pinctrl-equilibrium.h index 83cb7dafc657..0c635a5b79f0 100644 --- a/drivers/pinctrl/pinctrl-equilibrium.h +++ b/drivers/pinctrl/pinctrl-equilibrium.h @@ -95,22 +95,24 @@ struct eqbr_pin_bank { u32 aval_pinmap; }; +struct fwnode_handle; + /** * struct eqbr_gpio_ctrl: represent a gpio controller. - * @node: device node of gpio controller. + * @chip: gpio chip. + * @fwnode: firmware node of gpio controller. * @bank: pointer to corresponding pin bank. * @membase: base address of the gpio controller. - * @chip: gpio chip. * @ic: irq chip. * @name: gpio chip name. * @virq: irq number of the gpio chip to parent's irq domain. * @lock: spin lock to protect gpio register write. */ struct eqbr_gpio_ctrl { - struct device_node *node; + struct gpio_chip chip; + struct fwnode_handle *fwnode; struct eqbr_pin_bank *bank; void __iomem *membase; - struct gpio_chip chip; struct irq_chip ic; const char *name; unsigned int virq; diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index fa6becca1788..1ca11616db74 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -139,6 +139,30 @@ struct ingenic_gpio_chip { unsigned int irq, reg_base; }; +static const unsigned long enabled_socs = + IS_ENABLED(CONFIG_MACH_JZ4730) << ID_JZ4730 | + IS_ENABLED(CONFIG_MACH_JZ4740) << ID_JZ4740 | + IS_ENABLED(CONFIG_MACH_JZ4725B) << ID_JZ4725B | + IS_ENABLED(CONFIG_MACH_JZ4750) << ID_JZ4750 | + IS_ENABLED(CONFIG_MACH_JZ4755) << ID_JZ4755 | + IS_ENABLED(CONFIG_MACH_JZ4760) << ID_JZ4760 | + IS_ENABLED(CONFIG_MACH_JZ4770) << ID_JZ4770 | + IS_ENABLED(CONFIG_MACH_JZ4775) << ID_JZ4775 | + IS_ENABLED(CONFIG_MACH_JZ4780) << ID_JZ4780 | + IS_ENABLED(CONFIG_MACH_X1000) << ID_X1000 | + IS_ENABLED(CONFIG_MACH_X1500) << ID_X1500 | + IS_ENABLED(CONFIG_MACH_X1830) << ID_X1830 | + IS_ENABLED(CONFIG_MACH_X2000) << ID_X2000 | + IS_ENABLED(CONFIG_MACH_X2100) << ID_X2100; + +static bool +is_soc_or_above(const struct ingenic_pinctrl *jzpc, enum jz_version version) +{ + return (enabled_socs >> version) && + (!(enabled_socs & GENMASK(version - 1, 0)) + || jzpc->info->version >= version); +} + static const u32 jz4730_pull_ups[4] = { 0x3fa3320f, 0xf200ffff, 0xffffffff, 0xffffffff, }; @@ -3242,7 +3266,7 @@ static u32 ingenic_gpio_read_reg(struct ingenic_gpio_chip *jzgc, u8 reg) static void ingenic_gpio_set_bit(struct ingenic_gpio_chip *jzgc, u8 reg, u8 offset, bool set) { - if (jzgc->jzpc->info->version == ID_JZ4730) { + if (!is_soc_or_above(jzgc->jzpc, ID_JZ4740)) { regmap_update_bits(jzgc->jzpc->map, jzgc->reg_base + reg, BIT(offset), set ? BIT(offset) : 0); return; @@ -3300,9 +3324,9 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc, static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc, u8 offset, int value) { - if (jzgc->jzpc->info->version >= ID_JZ4770) + if (is_soc_or_above(jzgc->jzpc, ID_JZ4770)) ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value); - else if (jzgc->jzpc->info->version >= ID_JZ4740) + else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740)) ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value); else ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_DATA, offset, !!value); @@ -3337,10 +3361,10 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc, break; } - if (jzgc->jzpc->info->version >= ID_JZ4770) { + if (is_soc_or_above(jzgc->jzpc, ID_JZ4770)) { reg1 = JZ4770_GPIO_PAT1; reg2 = JZ4770_GPIO_PAT0; - } else if (jzgc->jzpc->info->version >= ID_JZ4740) { + } else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740)) { reg1 = JZ4740_GPIO_TRIG; reg2 = JZ4740_GPIO_DIR; } else { @@ -3350,12 +3374,12 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc, return; } - if (jzgc->jzpc->info->version >= ID_X2000) { + if (is_soc_or_above(jzgc->jzpc, ID_X2000)) { ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1); ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2); ingenic_gpio_shadow_set_bit_load(jzgc); ingenic_gpio_set_bit(jzgc, X2000_GPIO_EDG, offset, val3); - } else if (jzgc->jzpc->info->version >= ID_X1000) { + } else if (is_soc_or_above(jzgc->jzpc, ID_X1000)) { ingenic_gpio_shadow_set_bit(jzgc, reg2, offset, val1); ingenic_gpio_shadow_set_bit(jzgc, reg1, offset, val2); ingenic_gpio_shadow_set_bit_load(jzgc); @@ -3371,7 +3395,7 @@ static void ingenic_gpio_irq_mask(struct irq_data *irqd) struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc); int irq = irqd->hwirq; - if (jzgc->jzpc->info->version >= ID_JZ4740) + if (is_soc_or_above(jzgc->jzpc, ID_JZ4740)) ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, true); else ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIMR, irq, true); @@ -3383,7 +3407,7 @@ static void ingenic_gpio_irq_unmask(struct irq_data *irqd) struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc); int irq = irqd->hwirq; - if (jzgc->jzpc->info->version >= ID_JZ4740) + if (is_soc_or_above(jzgc->jzpc, ID_JZ4740)) ingenic_gpio_set_bit(jzgc, GPIO_MSK, irq, false); else ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIMR, irq, false); @@ -3395,9 +3419,9 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd) struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc); int irq = irqd->hwirq; - if (jzgc->jzpc->info->version >= ID_JZ4770) + if (is_soc_or_above(jzgc->jzpc, ID_JZ4770)) ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true); - else if (jzgc->jzpc->info->version >= ID_JZ4740) + else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740)) ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true); else ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, true); @@ -3413,9 +3437,9 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd) ingenic_gpio_irq_mask(irqd); - if (jzgc->jzpc->info->version >= ID_JZ4770) + if (is_soc_or_above(jzgc->jzpc, ID_JZ4770)) ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false); - else if (jzgc->jzpc->info->version >= ID_JZ4740) + else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740)) ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false); else ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPIER, irq, false); @@ -3429,7 +3453,7 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd) bool high; if ((irqd_get_trigger_type(irqd) == IRQ_TYPE_EDGE_BOTH) && - (jzgc->jzpc->info->version < ID_X2000)) { + !is_soc_or_above(jzgc->jzpc, ID_X2000)) { /* * Switch to an interrupt for the opposite edge to the one that * triggered the interrupt being ACKed. @@ -3441,9 +3465,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd) irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH); } - if (jzgc->jzpc->info->version >= ID_JZ4770) + if (is_soc_or_above(jzgc->jzpc, ID_JZ4770)) ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false); - else if (jzgc->jzpc->info->version >= ID_JZ4740) + else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740)) ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true); else ingenic_gpio_set_bit(jzgc, JZ4730_GPIO_GPFR, irq, false); @@ -3468,7 +3492,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) irq_set_handler_locked(irqd, handle_bad_irq); } - if ((type == IRQ_TYPE_EDGE_BOTH) && (jzgc->jzpc->info->version < ID_X2000)) { + if ((type == IRQ_TYPE_EDGE_BOTH) && !is_soc_or_above(jzgc->jzpc, ID_X2000)) { /* * The hardware does not support interrupts on both edges. The * best we can do is to set up a single-edge interrupt and then @@ -3500,9 +3524,9 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc) chained_irq_enter(irq_chip, desc); - if (jzgc->jzpc->info->version >= ID_JZ4770) + if (is_soc_or_above(jzgc->jzpc, ID_JZ4770)) flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG); - else if (jzgc->jzpc->info->version >= ID_JZ4740) + else if (is_soc_or_above(jzgc->jzpc, ID_JZ4740)) flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG); else flag = ingenic_gpio_read_reg(jzgc, JZ4730_GPIO_GPFR); @@ -3547,14 +3571,14 @@ static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc, unsigned int offt = pin / PINS_PER_GPIO_CHIP; if (set) { - if (jzpc->info->version >= ID_JZ4740) + if (is_soc_or_above(jzpc, ID_JZ4740)) regmap_write(jzpc->map, offt * jzpc->info->reg_offset + REG_SET(reg), BIT(idx)); else regmap_set_bits(jzpc->map, offt * jzpc->info->reg_offset + reg, BIT(idx)); } else { - if (jzpc->info->version >= ID_JZ4740) + if (is_soc_or_above(jzpc, ID_JZ4740)) regmap_write(jzpc->map, offt * jzpc->info->reg_offset + REG_CLEAR(reg), BIT(idx)); else @@ -3613,12 +3637,12 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) struct ingenic_pinctrl *jzpc = jzgc->jzpc; unsigned int pin = gc->base + offset; - if (jzpc->info->version >= ID_JZ4770) { + if (is_soc_or_above(jzpc, ID_JZ4770)) { if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) || ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1)) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; - } else if (jzpc->info->version == ID_JZ4730) { + } else if (!is_soc_or_above(jzpc, ID_JZ4740)) { if (!ingenic_get_pin_config(jzpc, pin, JZ4730_GPIO_GPDIR)) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; @@ -3669,18 +3693,18 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc, dev_dbg(jzpc->dev, "set pin P%c%u to function %u\n", 'A' + offt, idx, func); - if (jzpc->info->version >= ID_X1000) { + if (is_soc_or_above(jzpc, ID_X1000)) { ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false); ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false); ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2); ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1); ingenic_shadow_config_pin_load(jzpc, pin); - } else if (jzpc->info->version >= ID_JZ4770) { + } else if (is_soc_or_above(jzpc, ID_JZ4770)) { ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false); ingenic_config_pin(jzpc, pin, GPIO_MSK, false); ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2); ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1); - } else if (jzpc->info->version >= ID_JZ4740) { + } else if (is_soc_or_above(jzpc, ID_JZ4740)) { ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true); ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2); ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1); @@ -3738,16 +3762,16 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev, dev_dbg(pctldev->dev, "set pin P%c%u to %sput\n", 'A' + offt, idx, input ? "in" : "out"); - if (jzpc->info->version >= ID_X1000) { + if (is_soc_or_above(jzpc, ID_X1000)) { ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false); ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true); ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input); ingenic_shadow_config_pin_load(jzpc, pin); - } else if (jzpc->info->version >= ID_JZ4770) { + } else if (is_soc_or_above(jzpc, ID_JZ4770)) { ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false); ingenic_config_pin(jzpc, pin, GPIO_MSK, true); ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input); - } else if (jzpc->info->version >= ID_JZ4740) { + } else if (is_soc_or_above(jzpc, ID_JZ4740)) { ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false); ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input); ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false); @@ -3779,7 +3803,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev, unsigned int bias, reg; bool pull, pullup, pulldown; - if (jzpc->info->version >= ID_X2000) { + if (is_soc_or_above(jzpc, ID_X2000)) { pullup = ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPU) && !ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPD) && (jzpc->info->pull_ups[offt] & BIT(idx)); @@ -3787,7 +3811,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev, !ingenic_get_pin_config(jzpc, pin, X2000_GPIO_PEPU) && (jzpc->info->pull_downs[offt] & BIT(idx)); - } else if (jzpc->info->version >= ID_X1830) { + } else if (is_soc_or_above(jzpc, ID_X1830)) { unsigned int half = PINS_PER_GPIO_CHIP / 2; unsigned int idxh = (pin % half) * 2; @@ -3804,9 +3828,9 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev, pulldown = (bias == GPIO_PULL_DOWN) && (jzpc->info->pull_downs[offt] & BIT(idx)); } else { - if (jzpc->info->version >= ID_JZ4770) + if (is_soc_or_above(jzpc, ID_JZ4770)) pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN); - else if (jzpc->info->version >= ID_JZ4740) + else if (is_soc_or_above(jzpc, ID_JZ4740)) pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS); else pull = ingenic_get_pin_config(jzpc, pin, JZ4730_GPIO_GPPUR); @@ -3835,9 +3859,9 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - if (jzpc->info->version >= ID_X2000) + if (is_soc_or_above(jzpc, ID_X2000)) reg = X2000_GPIO_SMT; - else if (jzpc->info->version >= ID_X1830) + else if (is_soc_or_above(jzpc, ID_X1830)) reg = X1830_GPIO_SMT; else return -EINVAL; @@ -3846,9 +3870,9 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_SLEW_RATE: - if (jzpc->info->version >= ID_X2000) + if (is_soc_or_above(jzpc, ID_X2000)) reg = X2000_GPIO_SR; - else if (jzpc->info->version >= ID_X1830) + else if (is_soc_or_above(jzpc, ID_X1830)) reg = X1830_GPIO_SR; else return -EINVAL; @@ -3867,7 +3891,7 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev, static void ingenic_set_bias(struct ingenic_pinctrl *jzpc, unsigned int pin, unsigned int bias) { - if (jzpc->info->version >= ID_X2000) { + if (is_soc_or_above(jzpc, ID_X2000)) { switch (bias) { case GPIO_PULL_UP: ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false); @@ -3885,7 +3909,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc, ingenic_config_pin(jzpc, pin, X2000_GPIO_PEPD, false); } - } else if (jzpc->info->version >= ID_X1830) { + } else if (is_soc_or_above(jzpc, ID_X1830)) { unsigned int idx = pin % PINS_PER_GPIO_CHIP; unsigned int half = PINS_PER_GPIO_CHIP / 2; unsigned int idxh = (pin % half) * 2; @@ -3903,9 +3927,9 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc, REG_SET(X1830_GPIO_PEH), bias << idxh); } - } else if (jzpc->info->version >= ID_JZ4770) { + } else if (is_soc_or_above(jzpc, ID_JZ4770)) { ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias); - } else if (jzpc->info->version >= ID_JZ4740) { + } else if (is_soc_or_above(jzpc, ID_JZ4740)) { ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias); } else { ingenic_config_pin(jzpc, pin, JZ4730_GPIO_GPPUR, bias); @@ -3915,7 +3939,7 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc, static void ingenic_set_schmitt_trigger(struct ingenic_pinctrl *jzpc, unsigned int pin, bool enable) { - if (jzpc->info->version >= ID_X2000) + if (is_soc_or_above(jzpc, ID_X2000)) ingenic_config_pin(jzpc, pin, X2000_GPIO_SMT, enable); else ingenic_config_pin(jzpc, pin, X1830_GPIO_SMT, enable); @@ -3924,9 +3948,9 @@ static void ingenic_set_schmitt_trigger(struct ingenic_pinctrl *jzpc, static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc, unsigned int pin, bool high) { - if (jzpc->info->version >= ID_JZ4770) + if (is_soc_or_above(jzpc, ID_JZ4770)) ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high); - else if (jzpc->info->version >= ID_JZ4740) + else if (is_soc_or_above(jzpc, ID_JZ4740)) ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high); else ingenic_config_pin(jzpc, pin, JZ4730_GPIO_DATA, high); @@ -3935,7 +3959,7 @@ static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc, static void ingenic_set_slew_rate(struct ingenic_pinctrl *jzpc, unsigned int pin, unsigned int slew) { - if (jzpc->info->version >= ID_X2000) + if (is_soc_or_above(jzpc, ID_X2000)) ingenic_config_pin(jzpc, pin, X2000_GPIO_SR, slew); else ingenic_config_pin(jzpc, pin, X1830_GPIO_SR, slew); @@ -3991,7 +4015,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - if (jzpc->info->version < ID_X1830) + if (!is_soc_or_above(jzpc, ID_X1830)) return -EINVAL; ingenic_set_schmitt_trigger(jzpc, pin, arg); @@ -4006,7 +4030,7 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, break; case PIN_CONFIG_SLEW_RATE: - if (jzpc->info->version < ID_X1830) + if (!is_soc_or_above(jzpc, ID_X1830)) return -EINVAL; ingenic_set_slew_rate(jzpc, pin, arg); diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c index 1ee94574f0af..ab723ab4ec1d 100644 --- a/drivers/pinctrl/pinctrl-max77620.c +++ b/drivers/pinctrl/pinctrl-max77620.c @@ -668,5 +668,4 @@ module_platform_driver(max77620_pinctrl_driver); MODULE_DESCRIPTION("MAX77620/MAX20024 pin control driver"); MODULE_AUTHOR("Chaitanya Bandi<bandik@nvidia.com>"); MODULE_AUTHOR("Laxman Dewangan<ldewangan@nvidia.com>"); -MODULE_ALIAS("platform:max77620-pinctrl"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c index 80a8939ad0c0..6f55bf7d5e05 100644 --- a/drivers/pinctrl/pinctrl-microchip-sgpio.c +++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c @@ -688,11 +688,17 @@ static void microchip_sgpio_irq_setreg(struct irq_data *data, static void microchip_sgpio_irq_mask(struct irq_data *data) { + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + microchip_sgpio_irq_setreg(data, REG_INT_ENABLE, true); + gpiochip_disable_irq(chip, data->hwirq); } static void microchip_sgpio_irq_unmask(struct irq_data *data) { + struct gpio_chip *chip = irq_data_get_irq_chip_data(data); + + gpiochip_enable_irq(chip, data->hwirq); microchip_sgpio_irq_setreg(data, REG_INT_ENABLE, false); } @@ -746,6 +752,8 @@ static const struct irq_chip microchip_sgpio_irqchip = { .irq_ack = microchip_sgpio_irq_ack, .irq_unmask = microchip_sgpio_irq_unmask, .irq_set_type = microchip_sgpio_irq_set_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void sgpio_irq_handler(struct irq_desc *desc) @@ -840,7 +848,7 @@ static int microchip_sgpio_register_bank(struct device *dev, gc = &bank->gpio; gc->label = pctl_desc->name; gc->parent = dev; - gc->of_node = to_of_node(fwnode); + gc->fwnode = fwnode; gc->owner = THIS_MODULE; gc->get_direction = microchip_sgpio_get_direction; gc->direction_input = microchip_sgpio_direction_input; @@ -861,11 +869,7 @@ static int microchip_sgpio_register_bank(struct device *dev, if (irq) { struct gpio_irq_chip *girq = &gc->irq; - girq->chip = devm_kmemdup(dev, µchip_sgpio_irqchip, - sizeof(microchip_sgpio_irqchip), - GFP_KERNEL); - if (!girq->chip) - return -ENOMEM; + gpio_irq_chip_set_chip(girq, µchip_sgpio_irqchip); girq->parent_handler = sgpio_irq_handler; girq->num_parents = 1; girq->parents = devm_kcalloc(dev, 1, diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index 6a956ee94494..5f4a8c5c6650 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -19,6 +19,7 @@ #include <linux/pinctrl/pinconf-generic.h> #include <linux/platform_device.h> #include <linux/regmap.h> +#include <linux/reset.h> #include <linux/slab.h> #include "core.h" @@ -60,6 +61,7 @@ enum { FUNC_CAN0_a, FUNC_CAN0_b, FUNC_CAN1, + FUNC_CLKMON, FUNC_NONE, FUNC_FC0_a, FUNC_FC0_b, @@ -138,6 +140,8 @@ enum { FUNC_PTPSYNC_6, FUNC_PTPSYNC_7, FUNC_PWM, + FUNC_PWM_a, + FUNC_PWM_b, FUNC_QSPI1, FUNC_QSPI2, FUNC_R, @@ -184,6 +188,7 @@ static const char *const ocelot_function_names[] = { [FUNC_CAN0_a] = "can0_a", [FUNC_CAN0_b] = "can0_b", [FUNC_CAN1] = "can1", + [FUNC_CLKMON] = "clkmon", [FUNC_NONE] = "none", [FUNC_FC0_a] = "fc0_a", [FUNC_FC0_b] = "fc0_b", @@ -262,6 +267,8 @@ static const char *const ocelot_function_names[] = { [FUNC_PTPSYNC_6] = "ptpsync_6", [FUNC_PTPSYNC_7] = "ptpsync_7", [FUNC_PWM] = "pwm", + [FUNC_PWM_a] = "pwm_a", + [FUNC_PWM_b] = "pwm_b", [FUNC_QSPI1] = "qspi1", [FUNC_QSPI2] = "qspi2", [FUNC_R] = "reserved", @@ -977,11 +984,11 @@ LAN966X_P(23, GPIO, NONE, NONE, NONE, OB_TRG_a, NONE, NON LAN966X_P(24, GPIO, FC0_b, IB_TRG_a, USB_H_c, OB_TRG_a, IRQ_IN_c, TACHO_a, R); LAN966X_P(25, GPIO, FC0_b, IB_TRG_a, USB_H_c, OB_TRG_a, IRQ_OUT_c, SFP_SD, R); LAN966X_P(26, GPIO, FC0_b, IB_TRG_a, USB_S_c, OB_TRG_a, CAN0_a, SFP_SD, R); -LAN966X_P(27, GPIO, NONE, NONE, NONE, OB_TRG_a, CAN0_a, NONE, R); +LAN966X_P(27, GPIO, NONE, NONE, NONE, OB_TRG_a, CAN0_a, PWM_a, R); LAN966X_P(28, GPIO, MIIM_a, NONE, NONE, OB_TRG_a, IRQ_OUT_c, SFP_SD, R); LAN966X_P(29, GPIO, MIIM_a, NONE, NONE, OB_TRG_a, NONE, NONE, R); -LAN966X_P(30, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NONE, R); -LAN966X_P(31, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NONE, R); +LAN966X_P(30, GPIO, FC3_c, CAN1, CLKMON, OB_TRG, RECO_b, NONE, R); +LAN966X_P(31, GPIO, FC3_c, CAN1, CLKMON, OB_TRG, RECO_b, NONE, R); LAN966X_P(32, GPIO, FC3_c, NONE, SGPIO_a, NONE, MIIM_Sa, NONE, R); LAN966X_P(33, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R); LAN966X_P(34, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R); @@ -1001,7 +1008,7 @@ LAN966X_P(47, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD5, IRQ_IN LAN966X_P(48, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD6, IRQ_IN_a, R); LAN966X_P(49, GPIO, FC_SHRD7, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, TWI_SLC_GATE, IRQ_IN_a, R); LAN966X_P(50, GPIO, FC_SHRD16, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, TWI_SLC_GATE, NONE, R); -LAN966X_P(51, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, NONE, IRQ_IN_b, R); +LAN966X_P(51, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, PWM_b, IRQ_IN_b, R); LAN966X_P(52, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TACHO_b, IRQ_IN_b, R); LAN966X_P(53, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, NONE, IRQ_IN_b, R); LAN966X_P(54, GPIO, FC_SHRD8, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TWI_SLC_GATE, IRQ_IN_b, R); @@ -1908,6 +1915,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ocelot_pinctrl *info; + struct reset_control *reset; struct regmap *pincfg; void __iomem *base; int ret; @@ -1923,6 +1931,12 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) info->desc = (struct pinctrl_desc *)device_get_match_data(dev); + reset = devm_reset_control_get_optional_shared(dev, "switch"); + if (IS_ERR(reset)) + return dev_err_probe(dev, PTR_ERR(reset), + "Failed to get reset\n"); + reset_control_reset(reset); + base = devm_ioremap_resource(dev, platform_get_resource(pdev, IORESOURCE_MEM, 0)); if (IS_ERR(base)) diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 2cb79e649fcf..32e41395fc76 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -103,6 +103,25 @@ }, \ } +#define PIN_BANK_IOMUX_FLAGS_PULL_FLAGS(id, pins, label, iom0, iom1, \ + iom2, iom3, pull0, pull1, \ + pull2, pull3) \ + { \ + .bank_num = id, \ + .nr_pins = pins, \ + .name = label, \ + .iomux = { \ + { .type = iom0, .offset = -1 }, \ + { .type = iom1, .offset = -1 }, \ + { .type = iom2, .offset = -1 }, \ + { .type = iom3, .offset = -1 }, \ + }, \ + .pull_type[0] = pull0, \ + .pull_type[1] = pull1, \ + .pull_type[2] = pull2, \ + .pull_type[3] = pull3, \ + } + #define PIN_BANK_DRV_FLAGS_PULL_FLAGS(id, pins, label, drv0, drv1, \ drv2, drv3, pull0, pull1, \ pull2, pull3) \ @@ -197,6 +216,9 @@ #define RK_MUXROUTE_PMU(ID, PIN, FUNC, REG, VAL) \ PIN_BANK_MUX_ROUTE_FLAGS(ID, PIN, FUNC, REG, VAL, ROCKCHIP_ROUTE_PMU) +#define RK3588_PIN_BANK_FLAGS(ID, PIN, LABEL, M, P) \ + PIN_BANK_IOMUX_FLAGS_PULL_FLAGS(ID, PIN, LABEL, M, M, M, M, P, P, P, P) + static struct regmap_config rockchip_regmap_config = { .reg_bits = 32, .val_bits = 32, @@ -837,6 +859,7 @@ static bool rockchip_get_mux_route(struct rockchip_pin_bank *bank, int pin, static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin) { struct rockchip_pinctrl *info = bank->drvdata; + struct rockchip_pin_ctrl *ctrl = info->ctrl; int iomux_num = (pin / 8); struct regmap *regmap; unsigned int val; @@ -878,6 +901,27 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin) if (bank->recalced_mask & BIT(pin)) rockchip_get_recalced_mux(bank, pin, ®, &bit, &mask); + if (ctrl->type == RK3588) { + if (bank->bank_num == 0) { + if ((pin >= RK_PB4) && (pin <= RK_PD7)) { + u32 reg0 = 0; + + reg0 = reg + 0x4000 - 0xC; /* PMU2_IOC_BASE */ + ret = regmap_read(regmap, reg0, &val); + if (ret) + return ret; + + if (!(val & BIT(8))) + return ((val >> bit) & mask); + + reg = reg + 0x8000; /* BUS_IOC_BASE */ + regmap = info->regmap_base; + } + } else if (bank->bank_num > 0) { + reg += 0x8000; /* BUS_IOC_BASE */ + } + } + ret = regmap_read(regmap, reg, &val); if (ret) return ret; @@ -926,6 +970,7 @@ static int rockchip_verify_mux(struct rockchip_pin_bank *bank, static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux) { struct rockchip_pinctrl *info = bank->drvdata; + struct rockchip_pin_ctrl *ctrl = info->ctrl; struct device *dev = info->dev; int iomux_num = (pin / 8); struct regmap *regmap; @@ -966,6 +1011,46 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux) if (bank->recalced_mask & BIT(pin)) rockchip_get_recalced_mux(bank, pin, ®, &bit, &mask); + if (ctrl->type == RK3588) { + if (bank->bank_num == 0) { + if ((pin >= RK_PB4) && (pin <= RK_PD7)) { + if (mux < 8) { + reg += 0x4000 - 0xC; /* PMU2_IOC_BASE */ + data = (mask << (bit + 16)); + rmask = data | (data >> 16); + data |= (mux & mask) << bit; + ret = regmap_update_bits(regmap, reg, rmask, data); + } else { + u32 reg0 = 0; + + reg0 = reg + 0x4000 - 0xC; /* PMU2_IOC_BASE */ + data = (mask << (bit + 16)); + rmask = data | (data >> 16); + data |= 8 << bit; + ret = regmap_update_bits(regmap, reg0, rmask, data); + + reg0 = reg + 0x8000; /* BUS_IOC_BASE */ + data = (mask << (bit + 16)); + rmask = data | (data >> 16); + data |= mux << bit; + regmap = info->regmap_base; + ret |= regmap_update_bits(regmap, reg0, rmask, data); + } + } else { + data = (mask << (bit + 16)); + rmask = data | (data >> 16); + data |= (mux & mask) << bit; + ret = regmap_update_bits(regmap, reg, rmask, data); + } + return ret; + } else if (bank->bank_num > 0) { + reg += 0x8000; /* BUS_IOC_BASE */ + } + } + + if (mux > mask) + return -EINVAL; + if (bank->route_mask & BIT(pin)) { if (rockchip_get_mux_route(bank, pin, mux, &route_location, &route_reg, &route_val)) { @@ -1001,9 +1086,9 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux) #define PX30_PULL_PINS_PER_REG 8 #define PX30_PULL_BANK_STRIDE 16 -static void px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1023,6 +1108,8 @@ static void px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *reg += ((pin_num / PX30_PULL_PINS_PER_REG) * 4); *bit = (pin_num % PX30_PULL_PINS_PER_REG); *bit *= PX30_PULL_BITS_PER_PIN; + + return 0; } #define PX30_DRV_PMU_OFFSET 0x20 @@ -1031,9 +1118,9 @@ static void px30_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, #define PX30_DRV_PINS_PER_REG 8 #define PX30_DRV_BANK_STRIDE 16 -static void px30_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int px30_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1053,6 +1140,8 @@ static void px30_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, *reg += ((pin_num / PX30_DRV_PINS_PER_REG) * 4); *bit = (pin_num % PX30_DRV_PINS_PER_REG); *bit *= PX30_DRV_BITS_PER_PIN; + + return 0; } #define PX30_SCHMITT_PMU_OFFSET 0x38 @@ -1092,9 +1181,9 @@ static int px30_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank, #define RV1108_PULL_BITS_PER_PIN 2 #define RV1108_PULL_BANK_STRIDE 16 -static void rv1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rv1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1113,6 +1202,8 @@ static void rv1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *reg += ((pin_num / RV1108_PULL_PINS_PER_REG) * 4); *bit = (pin_num % RV1108_PULL_PINS_PER_REG); *bit *= RV1108_PULL_BITS_PER_PIN; + + return 0; } #define RV1108_DRV_PMU_OFFSET 0x20 @@ -1121,9 +1212,9 @@ static void rv1108_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, #define RV1108_DRV_PINS_PER_REG 8 #define RV1108_DRV_BANK_STRIDE 16 -static void rv1108_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rv1108_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1143,6 +1234,8 @@ static void rv1108_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, *reg += ((pin_num / RV1108_DRV_PINS_PER_REG) * 4); *bit = pin_num % RV1108_DRV_PINS_PER_REG; *bit *= RV1108_DRV_BITS_PER_PIN; + + return 0; } #define RV1108_SCHMITT_PMU_OFFSET 0x30 @@ -1199,9 +1292,9 @@ static int rk3308_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank, #define RK2928_PULL_PINS_PER_REG 16 #define RK2928_PULL_BANK_STRIDE 8 -static void rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1211,13 +1304,15 @@ static void rk2928_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *reg += (pin_num / RK2928_PULL_PINS_PER_REG) * 4; *bit = pin_num % RK2928_PULL_PINS_PER_REG; + + return 0; }; #define RK3128_PULL_OFFSET 0x118 -static void rk3128_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3128_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1227,6 +1322,8 @@ static void rk3128_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *reg += ((pin_num / RK2928_PULL_PINS_PER_REG) * 4); *bit = pin_num % RK2928_PULL_PINS_PER_REG; + + return 0; } #define RK3188_PULL_OFFSET 0x164 @@ -1235,9 +1332,9 @@ static void rk3128_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, #define RK3188_PULL_BANK_STRIDE 16 #define RK3188_PULL_PMU_OFFSET 0x64 -static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1267,12 +1364,14 @@ static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *bit = 7 - (pin_num % RK3188_PULL_PINS_PER_REG); *bit *= RK3188_PULL_BITS_PER_PIN; } + + return 0; } #define RK3288_PULL_OFFSET 0x140 -static void rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1296,6 +1395,8 @@ static void rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3188_PULL_PINS_PER_REG); *bit *= RK3188_PULL_BITS_PER_PIN; } + + return 0; } #define RK3288_DRV_PMU_OFFSET 0x70 @@ -1304,9 +1405,9 @@ static void rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, #define RK3288_DRV_PINS_PER_REG 8 #define RK3288_DRV_BANK_STRIDE 16 -static void rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1330,13 +1431,15 @@ static void rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3288_DRV_PINS_PER_REG); *bit *= RK3288_DRV_BITS_PER_PIN; } + + return 0; } #define RK3228_PULL_OFFSET 0x100 -static void rk3228_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3228_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1347,13 +1450,15 @@ static void rk3228_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3188_PULL_PINS_PER_REG); *bit *= RK3188_PULL_BITS_PER_PIN; + + return 0; } #define RK3228_DRV_GRF_OFFSET 0x200 -static void rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1364,13 +1469,15 @@ static void rk3228_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3288_DRV_PINS_PER_REG); *bit *= RK3288_DRV_BITS_PER_PIN; + + return 0; } #define RK3308_PULL_OFFSET 0xa0 -static void rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1381,13 +1488,15 @@ static void rk3308_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3188_PULL_PINS_PER_REG); *bit *= RK3188_PULL_BITS_PER_PIN; + + return 0; } #define RK3308_DRV_GRF_OFFSET 0x100 -static void rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1398,14 +1507,16 @@ static void rk3308_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3288_DRV_PINS_PER_REG); *bit *= RK3288_DRV_BITS_PER_PIN; + + return 0; } #define RK3368_PULL_GRF_OFFSET 0x100 #define RK3368_PULL_PMU_OFFSET 0x10 -static void rk3368_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3368_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1429,14 +1540,16 @@ static void rk3368_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3188_PULL_PINS_PER_REG); *bit *= RK3188_PULL_BITS_PER_PIN; } + + return 0; } #define RK3368_DRV_PMU_OFFSET 0x20 #define RK3368_DRV_GRF_OFFSET 0x200 -static void rk3368_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3368_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1460,15 +1573,17 @@ static void rk3368_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3288_DRV_PINS_PER_REG); *bit *= RK3288_DRV_BITS_PER_PIN; } + + return 0; } #define RK3399_PULL_GRF_OFFSET 0xe040 #define RK3399_PULL_PMU_OFFSET 0x40 #define RK3399_DRV_3BITS_PER_PIN 3 -static void rk3399_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3399_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1494,11 +1609,13 @@ static void rk3399_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3188_PULL_PINS_PER_REG); *bit *= RK3188_PULL_BITS_PER_PIN; } + + return 0; } -static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; int drv_num = (pin_num / 8); @@ -1515,6 +1632,8 @@ static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % 8) * 3; else *bit = (pin_num % 8) * 2; + + return 0; } #define RK3568_PULL_PMU_OFFSET 0x20 @@ -1523,9 +1642,9 @@ static void rk3399_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, #define RK3568_PULL_PINS_PER_REG 8 #define RK3568_PULL_BANK_STRIDE 0x10 -static void rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1546,6 +1665,8 @@ static void rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3568_PULL_PINS_PER_REG); *bit *= RK3568_PULL_BITS_PER_PIN; } + + return 0; } #define RK3568_DRV_PMU_OFFSET 0x70 @@ -1554,9 +1675,9 @@ static void rk3568_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, #define RK3568_DRV_PINS_PER_REG 2 #define RK3568_DRV_BANK_STRIDE 0x40 -static void rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, - int pin_num, struct regmap **regmap, - int *reg, u8 *bit) +static int rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) { struct rockchip_pinctrl *info = bank->drvdata; @@ -1577,6 +1698,189 @@ static void rk3568_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, *bit = (pin_num % RK3568_DRV_PINS_PER_REG); *bit *= RK3568_DRV_BITS_PER_PIN; } + + return 0; +} + +#define RK3588_PMU1_IOC_REG (0x0000) +#define RK3588_PMU2_IOC_REG (0x4000) +#define RK3588_BUS_IOC_REG (0x8000) +#define RK3588_VCCIO1_4_IOC_REG (0x9000) +#define RK3588_VCCIO3_5_IOC_REG (0xA000) +#define RK3588_VCCIO2_IOC_REG (0xB000) +#define RK3588_VCCIO6_IOC_REG (0xC000) +#define RK3588_EMMC_IOC_REG (0xD000) + +static const u32 rk3588_ds_regs[][2] = { + {RK_GPIO0_A0, RK3588_PMU1_IOC_REG + 0x0010}, + {RK_GPIO0_A4, RK3588_PMU1_IOC_REG + 0x0014}, + {RK_GPIO0_B0, RK3588_PMU1_IOC_REG + 0x0018}, + {RK_GPIO0_B4, RK3588_PMU2_IOC_REG + 0x0014}, + {RK_GPIO0_C0, RK3588_PMU2_IOC_REG + 0x0018}, + {RK_GPIO0_C4, RK3588_PMU2_IOC_REG + 0x001C}, + {RK_GPIO0_D0, RK3588_PMU2_IOC_REG + 0x0020}, + {RK_GPIO0_D4, RK3588_PMU2_IOC_REG + 0x0024}, + {RK_GPIO1_A0, RK3588_VCCIO1_4_IOC_REG + 0x0020}, + {RK_GPIO1_A4, RK3588_VCCIO1_4_IOC_REG + 0x0024}, + {RK_GPIO1_B0, RK3588_VCCIO1_4_IOC_REG + 0x0028}, + {RK_GPIO1_B4, RK3588_VCCIO1_4_IOC_REG + 0x002C}, + {RK_GPIO1_C0, RK3588_VCCIO1_4_IOC_REG + 0x0030}, + {RK_GPIO1_C4, RK3588_VCCIO1_4_IOC_REG + 0x0034}, + {RK_GPIO1_D0, RK3588_VCCIO1_4_IOC_REG + 0x0038}, + {RK_GPIO1_D4, RK3588_VCCIO1_4_IOC_REG + 0x003C}, + {RK_GPIO2_A0, RK3588_EMMC_IOC_REG + 0x0040}, + {RK_GPIO2_A4, RK3588_VCCIO3_5_IOC_REG + 0x0044}, + {RK_GPIO2_B0, RK3588_VCCIO3_5_IOC_REG + 0x0048}, + {RK_GPIO2_B4, RK3588_VCCIO3_5_IOC_REG + 0x004C}, + {RK_GPIO2_C0, RK3588_VCCIO3_5_IOC_REG + 0x0050}, + {RK_GPIO2_C4, RK3588_VCCIO3_5_IOC_REG + 0x0054}, + {RK_GPIO2_D0, RK3588_EMMC_IOC_REG + 0x0058}, + {RK_GPIO2_D4, RK3588_EMMC_IOC_REG + 0x005C}, + {RK_GPIO3_A0, RK3588_VCCIO3_5_IOC_REG + 0x0060}, + {RK_GPIO3_A4, RK3588_VCCIO3_5_IOC_REG + 0x0064}, + {RK_GPIO3_B0, RK3588_VCCIO3_5_IOC_REG + 0x0068}, + {RK_GPIO3_B4, RK3588_VCCIO3_5_IOC_REG + 0x006C}, + {RK_GPIO3_C0, RK3588_VCCIO3_5_IOC_REG + 0x0070}, + {RK_GPIO3_C4, RK3588_VCCIO3_5_IOC_REG + 0x0074}, + {RK_GPIO3_D0, RK3588_VCCIO3_5_IOC_REG + 0x0078}, + {RK_GPIO3_D4, RK3588_VCCIO3_5_IOC_REG + 0x007C}, + {RK_GPIO4_A0, RK3588_VCCIO6_IOC_REG + 0x0080}, + {RK_GPIO4_A4, RK3588_VCCIO6_IOC_REG + 0x0084}, + {RK_GPIO4_B0, RK3588_VCCIO6_IOC_REG + 0x0088}, + {RK_GPIO4_B4, RK3588_VCCIO6_IOC_REG + 0x008C}, + {RK_GPIO4_C0, RK3588_VCCIO6_IOC_REG + 0x0090}, + {RK_GPIO4_C2, RK3588_VCCIO3_5_IOC_REG + 0x0090}, + {RK_GPIO4_C4, RK3588_VCCIO3_5_IOC_REG + 0x0094}, + {RK_GPIO4_D0, RK3588_VCCIO2_IOC_REG + 0x0098}, + {RK_GPIO4_D4, RK3588_VCCIO2_IOC_REG + 0x009C}, +}; + +static const u32 rk3588_p_regs[][2] = { + {RK_GPIO0_A0, RK3588_PMU1_IOC_REG + 0x0020}, + {RK_GPIO0_B0, RK3588_PMU1_IOC_REG + 0x0024}, + {RK_GPIO0_B5, RK3588_PMU2_IOC_REG + 0x0028}, + {RK_GPIO0_C0, RK3588_PMU2_IOC_REG + 0x002C}, + {RK_GPIO0_D0, RK3588_PMU2_IOC_REG + 0x0030}, + {RK_GPIO1_A0, RK3588_VCCIO1_4_IOC_REG + 0x0110}, + {RK_GPIO1_B0, RK3588_VCCIO1_4_IOC_REG + 0x0114}, + {RK_GPIO1_C0, RK3588_VCCIO1_4_IOC_REG + 0x0118}, + {RK_GPIO1_D0, RK3588_VCCIO1_4_IOC_REG + 0x011C}, + {RK_GPIO2_A0, RK3588_EMMC_IOC_REG + 0x0120}, + {RK_GPIO2_A6, RK3588_VCCIO3_5_IOC_REG + 0x0120}, + {RK_GPIO2_B0, RK3588_VCCIO3_5_IOC_REG + 0x0124}, + {RK_GPIO2_C0, RK3588_VCCIO3_5_IOC_REG + 0x0128}, + {RK_GPIO2_D0, RK3588_EMMC_IOC_REG + 0x012C}, + {RK_GPIO3_A0, RK3588_VCCIO3_5_IOC_REG + 0x0130}, + {RK_GPIO3_B0, RK3588_VCCIO3_5_IOC_REG + 0x0134}, + {RK_GPIO3_C0, RK3588_VCCIO3_5_IOC_REG + 0x0138}, + {RK_GPIO3_D0, RK3588_VCCIO3_5_IOC_REG + 0x013C}, + {RK_GPIO4_A0, RK3588_VCCIO6_IOC_REG + 0x0140}, + {RK_GPIO4_B0, RK3588_VCCIO6_IOC_REG + 0x0144}, + {RK_GPIO4_C0, RK3588_VCCIO6_IOC_REG + 0x0148}, + {RK_GPIO4_C2, RK3588_VCCIO3_5_IOC_REG + 0x0148}, + {RK_GPIO4_D0, RK3588_VCCIO2_IOC_REG + 0x014C}, +}; + +static const u32 rk3588_smt_regs[][2] = { + {RK_GPIO0_A0, RK3588_PMU1_IOC_REG + 0x0030}, + {RK_GPIO0_B0, RK3588_PMU1_IOC_REG + 0x0034}, + {RK_GPIO0_B5, RK3588_PMU2_IOC_REG + 0x0040}, + {RK_GPIO0_C0, RK3588_PMU2_IOC_REG + 0x0044}, + {RK_GPIO0_D0, RK3588_PMU2_IOC_REG + 0x0048}, + {RK_GPIO1_A0, RK3588_VCCIO1_4_IOC_REG + 0x0210}, + {RK_GPIO1_B0, RK3588_VCCIO1_4_IOC_REG + 0x0214}, + {RK_GPIO1_C0, RK3588_VCCIO1_4_IOC_REG + 0x0218}, + {RK_GPIO1_D0, RK3588_VCCIO1_4_IOC_REG + 0x021C}, + {RK_GPIO2_A0, RK3588_EMMC_IOC_REG + 0x0220}, + {RK_GPIO2_A6, RK3588_VCCIO3_5_IOC_REG + 0x0220}, + {RK_GPIO2_B0, RK3588_VCCIO3_5_IOC_REG + 0x0224}, + {RK_GPIO2_C0, RK3588_VCCIO3_5_IOC_REG + 0x0228}, + {RK_GPIO2_D0, RK3588_EMMC_IOC_REG + 0x022C}, + {RK_GPIO3_A0, RK3588_VCCIO3_5_IOC_REG + 0x0230}, + {RK_GPIO3_B0, RK3588_VCCIO3_5_IOC_REG + 0x0234}, + {RK_GPIO3_C0, RK3588_VCCIO3_5_IOC_REG + 0x0238}, + {RK_GPIO3_D0, RK3588_VCCIO3_5_IOC_REG + 0x023C}, + {RK_GPIO4_A0, RK3588_VCCIO6_IOC_REG + 0x0240}, + {RK_GPIO4_B0, RK3588_VCCIO6_IOC_REG + 0x0244}, + {RK_GPIO4_C0, RK3588_VCCIO6_IOC_REG + 0x0248}, + {RK_GPIO4_C2, RK3588_VCCIO3_5_IOC_REG + 0x0248}, + {RK_GPIO4_D0, RK3588_VCCIO2_IOC_REG + 0x024C}, +}; + +#define RK3588_PULL_BITS_PER_PIN 2 +#define RK3588_PULL_PINS_PER_REG 8 + +static int rk3588_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + u8 bank_num = bank->bank_num; + u32 pin = bank_num * 32 + pin_num; + int i; + + for (i = ARRAY_SIZE(rk3588_p_regs) - 1; i >= 0; i--) { + if (pin >= rk3588_p_regs[i][0]) { + *reg = rk3588_p_regs[i][1]; + *regmap = info->regmap_base; + *bit = pin_num % RK3588_PULL_PINS_PER_REG; + *bit *= RK3588_PULL_BITS_PER_PIN; + return 0; + } + } + + return -EINVAL; +} + +#define RK3588_DRV_BITS_PER_PIN 4 +#define RK3588_DRV_PINS_PER_REG 4 + +static int rk3588_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + u8 bank_num = bank->bank_num; + u32 pin = bank_num * 32 + pin_num; + int i; + + for (i = ARRAY_SIZE(rk3588_ds_regs) - 1; i >= 0; i--) { + if (pin >= rk3588_ds_regs[i][0]) { + *reg = rk3588_ds_regs[i][1]; + *regmap = info->regmap_base; + *bit = pin_num % RK3588_DRV_PINS_PER_REG; + *bit *= RK3588_DRV_BITS_PER_PIN; + return 0; + } + } + + return -EINVAL; +} + +#define RK3588_SMT_BITS_PER_PIN 1 +#define RK3588_SMT_PINS_PER_REG 8 + +static int rk3588_calc_schmitt_reg_and_bit(struct rockchip_pin_bank *bank, + int pin_num, + struct regmap **regmap, + int *reg, u8 *bit) +{ + struct rockchip_pinctrl *info = bank->drvdata; + u8 bank_num = bank->bank_num; + u32 pin = bank_num * 32 + pin_num; + int i; + + for (i = ARRAY_SIZE(rk3588_smt_regs) - 1; i >= 0; i--) { + if (pin >= rk3588_smt_regs[i][0]) { + *reg = rk3588_smt_regs[i][1]; + *regmap = info->regmap_base; + *bit = pin_num % RK3588_SMT_PINS_PER_REG; + *bit *= RK3588_SMT_BITS_PER_PIN; + return 0; + } + } + + return -EINVAL; } static int rockchip_perpin_drv_list[DRV_TYPE_MAX][8] = { @@ -1599,7 +1903,9 @@ static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank, u8 bit; int drv_type = bank->drv[pin_num / 8].drv_type; - ctrl->drv_calc_reg(bank, pin_num, ®map, ®, &bit); + ret = ctrl->drv_calc_reg(bank, pin_num, ®map, ®, &bit); + if (ret) + return ret; switch (drv_type) { case DRV_TYPE_IO_1V8_3V0_AUTO: @@ -1679,8 +1985,14 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank, dev_dbg(dev, "setting drive of GPIO%d-%d to %d\n", bank->bank_num, pin_num, strength); - ctrl->drv_calc_reg(bank, pin_num, ®map, ®, &bit); - if (ctrl->type == RK3568) { + ret = ctrl->drv_calc_reg(bank, pin_num, ®map, ®, &bit); + if (ret) + return ret; + if (ctrl->type == RK3588) { + rmask_bits = RK3588_DRV_BITS_PER_PIN; + ret = strength; + goto config; + } else if (ctrl->type == RK3568) { rmask_bits = RK3568_DRV_BITS_PER_PIN; ret = (1 << (strength + 1)) - 1; goto config; @@ -1792,7 +2104,9 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num) if (ctrl->type == RK3066B) return PIN_CONFIG_BIAS_DISABLE; - ctrl->pull_calc_reg(bank, pin_num, ®map, ®, &bit); + ret = ctrl->pull_calc_reg(bank, pin_num, ®map, ®, &bit); + if (ret) + return ret; ret = regmap_read(regmap, reg, &data); if (ret) @@ -1811,6 +2125,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num) case RK3308: case RK3368: case RK3399: + case RK3588: pull_type = bank->pull_type[pin_num / 8]; data >>= bit; data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1; @@ -1839,7 +2154,9 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, if (ctrl->type == RK3066B) return pull ? -EINVAL : 0; - ctrl->pull_calc_reg(bank, pin_num, ®map, ®, &bit); + ret = ctrl->pull_calc_reg(bank, pin_num, ®map, ®, &bit); + if (ret) + return ret; switch (ctrl->type) { case RK2928: @@ -1857,6 +2174,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank, case RK3368: case RK3399: case RK3568: + case RK3588: pull_type = bank->pull_type[pin_num / 8]; ret = -EINVAL; for (i = 0; i < ARRAY_SIZE(rockchip_pull_list[pull_type]); @@ -2104,25 +2422,27 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl, case RK3368: case RK3399: case RK3568: + case RK3588: return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT); } return false; } -static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank, - unsigned int pin, u32 arg) +static int rockchip_pinconf_defer_pin(struct rockchip_pin_bank *bank, + unsigned int pin, u32 param, u32 arg) { - struct rockchip_pin_output_deferred *cfg; + struct rockchip_pin_deferred *cfg; cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) return -ENOMEM; cfg->pin = pin; + cfg->param = param; cfg->arg = arg; - list_add_tail(&cfg->head, &bank->deferred_output); + list_add_tail(&cfg->head, &bank->deferred_pins); return 0; } @@ -2143,6 +2463,25 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, param = pinconf_to_config_param(configs[i]); arg = pinconf_to_config_argument(configs[i]); + if (param == PIN_CONFIG_OUTPUT || param == PIN_CONFIG_INPUT_ENABLE) { + /* + * Check for gpio driver not being probed yet. + * The lock makes sure that either gpio-probe has completed + * or the gpio driver hasn't probed yet. + */ + mutex_lock(&bank->deferred_lock); + if (!gpio || !gpio->direction_output) { + rc = rockchip_pinconf_defer_pin(bank, pin - bank->pin_base, param, + arg); + mutex_unlock(&bank->deferred_lock); + if (rc) + return rc; + + break; + } + mutex_unlock(&bank->deferred_lock); + } + switch (param) { case PIN_CONFIG_BIAS_DISABLE: rc = rockchip_set_pull(bank, pin - bank->pin_base, @@ -2171,27 +2510,21 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, if (rc != RK_FUNC_GPIO) return -EINVAL; - /* - * Check for gpio driver not being probed yet. - * The lock makes sure that either gpio-probe has completed - * or the gpio driver hasn't probed yet. - */ - mutex_lock(&bank->deferred_lock); - if (!gpio || !gpio->direction_output) { - rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg); - mutex_unlock(&bank->deferred_lock); - if (rc) - return rc; - - break; - } - mutex_unlock(&bank->deferred_lock); - rc = gpio->direction_output(gpio, pin - bank->pin_base, arg); if (rc) return rc; break; + case PIN_CONFIG_INPUT_ENABLE: + rc = rockchip_set_mux(bank, pin - bank->pin_base, + RK_FUNC_GPIO); + if (rc != RK_FUNC_GPIO) + return -EINVAL; + + rc = gpio->direction_input(gpio, pin - bank->pin_base); + if (rc) + return rc; + break; case PIN_CONFIG_DRIVE_STRENGTH: /* rk3288 is the first with per-pin drive-strength */ if (!info->ctrl->drv_calc_reg) @@ -2500,7 +2833,7 @@ static int rockchip_pinctrl_register(struct platform_device *pdev, pdesc++; } - INIT_LIST_HEAD(&pin_bank->deferred_output); + INIT_LIST_HEAD(&pin_bank->deferred_pins); mutex_init(&pin_bank->deferred_lock); } @@ -2763,7 +3096,7 @@ static int rockchip_pinctrl_remove(struct platform_device *pdev) { struct rockchip_pinctrl *info = platform_get_drvdata(pdev); struct rockchip_pin_bank *bank; - struct rockchip_pin_output_deferred *cfg; + struct rockchip_pin_deferred *cfg; int i; of_platform_depopulate(&pdev->dev); @@ -2772,9 +3105,9 @@ static int rockchip_pinctrl_remove(struct platform_device *pdev) bank = &info->ctrl->pin_banks[i]; mutex_lock(&bank->deferred_lock); - while (!list_empty(&bank->deferred_output)) { - cfg = list_first_entry(&bank->deferred_output, - struct rockchip_pin_output_deferred, head); + while (!list_empty(&bank->deferred_pins)) { + cfg = list_first_entry(&bank->deferred_pins, + struct rockchip_pin_deferred, head); list_del(&cfg->head); kfree(cfg); } @@ -3207,6 +3540,29 @@ static struct rockchip_pin_ctrl rk3568_pin_ctrl = { .schmitt_calc_reg = rk3568_calc_schmitt_reg_and_bit, }; +static struct rockchip_pin_bank rk3588_pin_banks[] = { + RK3588_PIN_BANK_FLAGS(0, 32, "gpio0", + IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY), + RK3588_PIN_BANK_FLAGS(1, 32, "gpio1", + IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY), + RK3588_PIN_BANK_FLAGS(2, 32, "gpio2", + IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY), + RK3588_PIN_BANK_FLAGS(3, 32, "gpio3", + IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY), + RK3588_PIN_BANK_FLAGS(4, 32, "gpio4", + IOMUX_WIDTH_4BIT, PULL_TYPE_IO_1V8_ONLY), +}; + +static struct rockchip_pin_ctrl rk3588_pin_ctrl = { + .pin_banks = rk3588_pin_banks, + .nr_banks = ARRAY_SIZE(rk3588_pin_banks), + .label = "RK3588-GPIO", + .type = RK3588, + .pull_calc_reg = rk3588_calc_pull_reg_and_bit, + .drv_calc_reg = rk3588_calc_drv_reg_and_bit, + .schmitt_calc_reg = rk3588_calc_schmitt_reg_and_bit, +}; + static const struct of_device_id rockchip_pinctrl_dt_match[] = { { .compatible = "rockchip,px30-pinctrl", .data = &px30_pin_ctrl }, @@ -3238,6 +3594,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = { .data = &rk3399_pin_ctrl }, { .compatible = "rockchip,rk3568-pinctrl", .data = &rk3568_pin_ctrl }, + { .compatible = "rockchip,rk3588-pinctrl", + .data = &rk3588_pin_ctrl }, {}, }; diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h index 91f10279d084..ec46f8815ac9 100644 --- a/drivers/pinctrl/pinctrl-rockchip.h +++ b/drivers/pinctrl/pinctrl-rockchip.h @@ -18,6 +18,171 @@ #ifndef _PINCTRL_ROCKCHIP_H #define _PINCTRL_ROCKCHIP_H +#define RK_GPIO0_A0 0 +#define RK_GPIO0_A1 1 +#define RK_GPIO0_A2 2 +#define RK_GPIO0_A3 3 +#define RK_GPIO0_A4 4 +#define RK_GPIO0_A5 5 +#define RK_GPIO0_A6 6 +#define RK_GPIO0_A7 7 +#define RK_GPIO0_B0 8 +#define RK_GPIO0_B1 9 +#define RK_GPIO0_B2 10 +#define RK_GPIO0_B3 11 +#define RK_GPIO0_B4 12 +#define RK_GPIO0_B5 13 +#define RK_GPIO0_B6 14 +#define RK_GPIO0_B7 15 +#define RK_GPIO0_C0 16 +#define RK_GPIO0_C1 17 +#define RK_GPIO0_C2 18 +#define RK_GPIO0_C3 19 +#define RK_GPIO0_C4 20 +#define RK_GPIO0_C5 21 +#define RK_GPIO0_C6 22 +#define RK_GPIO0_C7 23 +#define RK_GPIO0_D0 24 +#define RK_GPIO0_D1 25 +#define RK_GPIO0_D2 26 +#define RK_GPIO0_D3 27 +#define RK_GPIO0_D4 28 +#define RK_GPIO0_D5 29 +#define RK_GPIO0_D6 30 +#define RK_GPIO0_D7 31 + +#define RK_GPIO1_A0 32 +#define RK_GPIO1_A1 33 +#define RK_GPIO1_A2 34 +#define RK_GPIO1_A3 35 +#define RK_GPIO1_A4 36 +#define RK_GPIO1_A5 37 +#define RK_GPIO1_A6 38 +#define RK_GPIO1_A7 39 +#define RK_GPIO1_B0 40 +#define RK_GPIO1_B1 41 +#define RK_GPIO1_B2 42 +#define RK_GPIO1_B3 43 +#define RK_GPIO1_B4 44 +#define RK_GPIO1_B5 45 +#define RK_GPIO1_B6 46 +#define RK_GPIO1_B7 47 +#define RK_GPIO1_C0 48 +#define RK_GPIO1_C1 49 +#define RK_GPIO1_C2 50 +#define RK_GPIO1_C3 51 +#define RK_GPIO1_C4 52 +#define RK_GPIO1_C5 53 +#define RK_GPIO1_C6 54 +#define RK_GPIO1_C7 55 +#define RK_GPIO1_D0 56 +#define RK_GPIO1_D1 57 +#define RK_GPIO1_D2 58 +#define RK_GPIO1_D3 59 +#define RK_GPIO1_D4 60 +#define RK_GPIO1_D5 61 +#define RK_GPIO1_D6 62 +#define RK_GPIO1_D7 63 + +#define RK_GPIO2_A0 64 +#define RK_GPIO2_A1 65 +#define RK_GPIO2_A2 66 +#define RK_GPIO2_A3 67 +#define RK_GPIO2_A4 68 +#define RK_GPIO2_A5 69 +#define RK_GPIO2_A6 70 +#define RK_GPIO2_A7 71 +#define RK_GPIO2_B0 72 +#define RK_GPIO2_B1 73 +#define RK_GPIO2_B2 74 +#define RK_GPIO2_B3 75 +#define RK_GPIO2_B4 76 +#define RK_GPIO2_B5 77 +#define RK_GPIO2_B6 78 +#define RK_GPIO2_B7 79 +#define RK_GPIO2_C0 80 +#define RK_GPIO2_C1 81 +#define RK_GPIO2_C2 82 +#define RK_GPIO2_C3 83 +#define RK_GPIO2_C4 84 +#define RK_GPIO2_C5 85 +#define RK_GPIO2_C6 86 +#define RK_GPIO2_C7 87 +#define RK_GPIO2_D0 88 +#define RK_GPIO2_D1 89 +#define RK_GPIO2_D2 90 +#define RK_GPIO2_D3 91 +#define RK_GPIO2_D4 92 +#define RK_GPIO2_D5 93 +#define RK_GPIO2_D6 94 +#define RK_GPIO2_D7 95 + +#define RK_GPIO3_A0 96 +#define RK_GPIO3_A1 97 +#define RK_GPIO3_A2 98 +#define RK_GPIO3_A3 99 +#define RK_GPIO3_A4 100 +#define RK_GPIO3_A5 101 +#define RK_GPIO3_A6 102 +#define RK_GPIO3_A7 103 +#define RK_GPIO3_B0 104 +#define RK_GPIO3_B1 105 +#define RK_GPIO3_B2 106 +#define RK_GPIO3_B3 107 +#define RK_GPIO3_B4 108 +#define RK_GPIO3_B5 109 +#define RK_GPIO3_B6 110 +#define RK_GPIO3_B7 111 +#define RK_GPIO3_C0 112 +#define RK_GPIO3_C1 113 +#define RK_GPIO3_C2 114 +#define RK_GPIO3_C3 115 +#define RK_GPIO3_C4 116 +#define RK_GPIO3_C5 117 +#define RK_GPIO3_C6 118 +#define RK_GPIO3_C7 119 +#define RK_GPIO3_D0 120 +#define RK_GPIO3_D1 121 +#define RK_GPIO3_D2 122 +#define RK_GPIO3_D3 123 +#define RK_GPIO3_D4 124 +#define RK_GPIO3_D5 125 +#define RK_GPIO3_D6 126 +#define RK_GPIO3_D7 127 + +#define RK_GPIO4_A0 128 +#define RK_GPIO4_A1 129 +#define RK_GPIO4_A2 130 +#define RK_GPIO4_A3 131 +#define RK_GPIO4_A4 132 +#define RK_GPIO4_A5 133 +#define RK_GPIO4_A6 134 +#define RK_GPIO4_A7 135 +#define RK_GPIO4_B0 136 +#define RK_GPIO4_B1 137 +#define RK_GPIO4_B2 138 +#define RK_GPIO4_B3 139 +#define RK_GPIO4_B4 140 +#define RK_GPIO4_B5 141 +#define RK_GPIO4_B6 142 +#define RK_GPIO4_B7 143 +#define RK_GPIO4_C0 144 +#define RK_GPIO4_C1 145 +#define RK_GPIO4_C2 146 +#define RK_GPIO4_C3 147 +#define RK_GPIO4_C4 148 +#define RK_GPIO4_C5 149 +#define RK_GPIO4_C6 150 +#define RK_GPIO4_C7 151 +#define RK_GPIO4_D0 152 +#define RK_GPIO4_D1 153 +#define RK_GPIO4_D2 154 +#define RK_GPIO4_D3 155 +#define RK_GPIO4_D4 156 +#define RK_GPIO4_D5 157 +#define RK_GPIO4_D6 158 +#define RK_GPIO4_D7 159 + enum rockchip_pinctrl_type { PX30, RV1108, @@ -30,6 +195,7 @@ enum rockchip_pinctrl_type { RK3368, RK3399, RK3568, + RK3588, }; /** @@ -171,7 +337,7 @@ struct rockchip_pin_bank { u32 toggle_edge_mode; u32 recalced_mask; u32 route_mask; - struct list_head deferred_output; + struct list_head deferred_pins; struct mutex deferred_lock; }; @@ -230,10 +396,10 @@ struct rockchip_pin_ctrl { struct rockchip_mux_route_data *iomux_routes; u32 niomux_routes; - void (*pull_calc_reg)(struct rockchip_pin_bank *bank, + int (*pull_calc_reg)(struct rockchip_pin_bank *bank, int pin_num, struct regmap **regmap, int *reg, u8 *bit); - void (*drv_calc_reg)(struct rockchip_pin_bank *bank, + int (*drv_calc_reg)(struct rockchip_pin_bank *bank, int pin_num, struct regmap **regmap, int *reg, u8 *bit); int (*schmitt_calc_reg)(struct rockchip_pin_bank *bank, @@ -247,9 +413,12 @@ struct rockchip_pin_config { unsigned int nconfigs; }; -struct rockchip_pin_output_deferred { +enum pin_config_param; + +struct rockchip_pin_deferred { struct list_head head; unsigned int pin; + enum pin_config_param param; u32 arg; }; diff --git a/drivers/pinctrl/pinctrl-starfive.c b/drivers/pinctrl/pinctrl-starfive.c index c586cfd09fa8..2a86c1035cc8 100644 --- a/drivers/pinctrl/pinctrl-starfive.c +++ b/drivers/pinctrl/pinctrl-starfive.c @@ -1074,6 +1074,8 @@ static void starfive_irq_mask(struct irq_data *d) value = readl_relaxed(ie) & ~mask; writel_relaxed(value, ie); raw_spin_unlock_irqrestore(&sfp->lock, flags); + + gpiochip_disable_irq(&sfp->gc, d->hwirq); } static void starfive_irq_mask_ack(struct irq_data *d) @@ -1102,6 +1104,8 @@ static void starfive_irq_unmask(struct irq_data *d) unsigned long flags; u32 value; + gpiochip_enable_irq(&sfp->gc, d->hwirq); + raw_spin_lock_irqsave(&sfp->lock, flags); value = readl_relaxed(ie) | mask; writel_relaxed(value, ie); @@ -1163,14 +1167,15 @@ static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger) return 0; } -static struct irq_chip starfive_irq_chip = { +static const struct irq_chip starfive_irq_chip = { .name = "StarFive GPIO", .irq_ack = starfive_irq_ack, .irq_mask = starfive_irq_mask, .irq_mask_ack = starfive_irq_mask_ack, .irq_unmask = starfive_irq_unmask, .irq_set_type = starfive_irq_set_type, - .flags = IRQCHIP_SET_TYPE_MASKED, + .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void starfive_gpio_irq_handler(struct irq_desc *desc) @@ -1308,7 +1313,7 @@ static int starfive_probe(struct platform_device *pdev) sfp->gc.base = -1; sfp->gc.ngpio = NR_GPIOS; - sfp->gc.irq.chip = &starfive_irq_chip; + gpio_irq_chip_set_chip(&sfp->gc.irq, &starfive_irq_chip); sfp->gc.irq.parent_handler = starfive_gpio_irq_handler; sfp->gc.irq.num_parents = 1; sfp->gc.irq.parents = devm_kcalloc(dev, sfp->gc.irq.num_parents, diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c index 79d44bca039e..9328b17485cf 100644 --- a/drivers/pinctrl/pinctrl-thunderbay.c +++ b/drivers/pinctrl/pinctrl-thunderbay.c @@ -1229,7 +1229,6 @@ static int thunderbay_pinctrl_probe(struct platform_device *pdev) const struct of_device_id *of_id; struct device *dev = &pdev->dev; struct thunderbay_pinctrl *tpc; - struct resource *iomem; int ret; of_id = of_match_node(thunderbay_pinctrl_match, pdev->dev.of_node); @@ -1243,11 +1242,7 @@ static int thunderbay_pinctrl_probe(struct platform_device *pdev) tpc->dev = dev; tpc->soc = of_id->data; - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!iomem) - return -ENXIO; - - tpc->base0 = devm_ioremap_resource(dev, iomem); + tpc->base0 = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(tpc->base0)) return PTR_ERR(tpc->base0); diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index c51ef54a9f61..3daeb9772391 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -239,6 +239,15 @@ config PINCTRL_SC7280 Qualcomm Technologies Inc TLMM block found on the Qualcomm Technologies Inc SC7280 platform. +config PINCTRL_SC7280_LPASS_LPI + tristate "Qualcomm Technologies Inc SC7280 LPASS LPI pin controller driver" + depends on GPIOLIB + depends on PINCTRL_LPASS_LPI + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI + (Low Power Island) found on the Qualcomm Technologies Inc SC7280 platform. + config PINCTRL_SC8180X tristate "Qualcomm Technologies Inc SC8180x pin controller driver" depends on (OF || ACPI) @@ -338,6 +347,15 @@ config PINCTRL_SM8250 Qualcomm Technologies Inc TLMM block found on the Qualcomm Technologies Inc SM8250 platform. +config PINCTRL_SM8250_LPASS_LPI + tristate "Qualcomm Technologies Inc SM8250 LPASS LPI pin controller driver" + depends on GPIOLIB + depends on PINCTRL_LPASS_LPI + help + This is the pinctrl, pinmux, pinconf and gpiolib driver for the + Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI + (Low Power Island) found on the Qualcomm Technologies Inc SM8250 platform. + config PINCTRL_SM8350 tristate "Qualcomm Technologies Inc SM8350 pin controller driver" depends on PINCTRL_MSM @@ -360,6 +378,7 @@ config PINCTRL_LPASS_LPI select PINMUX select PINCONF select GENERIC_PINCONF + select GENERIC_PINCTRL_GROUPS depends on GPIOLIB help This is the pinctrl, pinmux, pinconf and gpiolib driver for the diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 5efbfd9f6248..4f0ee7597f81 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o obj-$(CONFIG_PINCTRL_SC7180) += pinctrl-sc7180.o obj-$(CONFIG_PINCTRL_SC7280) += pinctrl-sc7280.o +obj-$(CONFIG_PINCTRL_SC7280_LPASS_LPI) += pinctrl-sc7280-lpass-lpi.o obj-$(CONFIG_PINCTRL_SC8180X) += pinctrl-sc8180x.o obj-$(CONFIG_PINCTRL_SC8280XP) += pinctrl-sc8280xp.o obj-$(CONFIG_PINCTRL_SDM660) += pinctrl-sdm660.o @@ -39,6 +40,7 @@ obj-$(CONFIG_PINCTRL_SM6350) += pinctrl-sm6350.o obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o +obj-$(CONFIG_PINCTRL_SM8250_LPASS_LPI) += pinctrl-sm8250-lpass-lpi.o obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c index 2f19ab4db720..74810ec4df44 100644 --- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c @@ -4,93 +4,15 @@ * Copyright (c) 2020 Linaro Ltd. */ -#include <linux/bitops.h> -#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/gpio/driver.h> -#include <linux/io.h> #include <linux/module.h> #include <linux/of_device.h> -#include <linux/of.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinconf.h> #include <linux/pinctrl/pinmux.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/types.h> -#include "../core.h" #include "../pinctrl-utils.h" - -#define LPI_SLEW_RATE_CTL_REG 0xa000 -#define LPI_TLMM_REG_OFFSET 0x1000 -#define LPI_SLEW_RATE_MAX 0x03 -#define LPI_SLEW_BITS_SIZE 0x02 -#define LPI_SLEW_RATE_MASK GENMASK(1, 0) -#define LPI_GPIO_CFG_REG 0x00 -#define LPI_GPIO_PULL_MASK GENMASK(1, 0) -#define LPI_GPIO_FUNCTION_MASK GENMASK(5, 2) -#define LPI_GPIO_OUT_STRENGTH_MASK GENMASK(8, 6) -#define LPI_GPIO_OE_MASK BIT(9) -#define LPI_GPIO_VALUE_REG 0x04 -#define LPI_GPIO_VALUE_IN_MASK BIT(0) -#define LPI_GPIO_VALUE_OUT_MASK BIT(1) - -#define LPI_GPIO_BIAS_DISABLE 0x0 -#define LPI_GPIO_PULL_DOWN 0x1 -#define LPI_GPIO_KEEPER 0x2 -#define LPI_GPIO_PULL_UP 0x3 -#define LPI_GPIO_DS_TO_VAL(v) (v / 2 - 1) -#define NO_SLEW -1 - -#define LPI_FUNCTION(fname) \ - [LPI_MUX_##fname] = { \ - .name = #fname, \ - .groups = fname##_groups, \ - .ngroups = ARRAY_SIZE(fname##_groups), \ - } - -#define LPI_PINGROUP(id, soff, f1, f2, f3, f4) \ - { \ - .name = "gpio" #id, \ - .pins = gpio##id##_pins, \ - .pin = id, \ - .slew_offset = soff, \ - .npins = ARRAY_SIZE(gpio##id##_pins), \ - .funcs = (int[]){ \ - LPI_MUX_gpio, \ - LPI_MUX_##f1, \ - LPI_MUX_##f2, \ - LPI_MUX_##f3, \ - LPI_MUX_##f4, \ - }, \ - .nfuncs = 5, \ - } - -struct lpi_pingroup { - const char *name; - const unsigned int *pins; - unsigned int npins; - unsigned int pin; - /* Bit offset in slew register for SoundWire pins only */ - int slew_offset; - unsigned int *funcs; - unsigned int nfuncs; -}; - -struct lpi_function { - const char *name; - const char * const *groups; - unsigned int ngroups; -}; - -struct lpi_pinctrl_variant_data { - const struct pinctrl_pin_desc *pins; - int npins; - const struct lpi_pingroup *groups; - int ngroups; - const struct lpi_function *functions; - int nfunctions; -}; +#include "pinctrl-lpass-lpi.h" #define MAX_LPI_NUM_CLKS 2 @@ -106,136 +28,6 @@ struct lpi_pinctrl { const struct lpi_pinctrl_variant_data *data; }; -/* sm8250 variant specific data */ -static const struct pinctrl_pin_desc sm8250_lpi_pins[] = { - PINCTRL_PIN(0, "gpio0"), - PINCTRL_PIN(1, "gpio1"), - PINCTRL_PIN(2, "gpio2"), - PINCTRL_PIN(3, "gpio3"), - PINCTRL_PIN(4, "gpio4"), - PINCTRL_PIN(5, "gpio5"), - PINCTRL_PIN(6, "gpio6"), - PINCTRL_PIN(7, "gpio7"), - PINCTRL_PIN(8, "gpio8"), - PINCTRL_PIN(9, "gpio9"), - PINCTRL_PIN(10, "gpio10"), - PINCTRL_PIN(11, "gpio11"), - PINCTRL_PIN(12, "gpio12"), - PINCTRL_PIN(13, "gpio13"), -}; - -enum sm8250_lpi_functions { - LPI_MUX_dmic1_clk, - LPI_MUX_dmic1_data, - LPI_MUX_dmic2_clk, - LPI_MUX_dmic2_data, - LPI_MUX_dmic3_clk, - LPI_MUX_dmic3_data, - LPI_MUX_i2s1_clk, - LPI_MUX_i2s1_data, - LPI_MUX_i2s1_ws, - LPI_MUX_i2s2_clk, - LPI_MUX_i2s2_data, - LPI_MUX_i2s2_ws, - LPI_MUX_qua_mi2s_data, - LPI_MUX_qua_mi2s_sclk, - LPI_MUX_qua_mi2s_ws, - LPI_MUX_swr_rx_clk, - LPI_MUX_swr_rx_data, - LPI_MUX_swr_tx_clk, - LPI_MUX_swr_tx_data, - LPI_MUX_wsa_swr_clk, - LPI_MUX_wsa_swr_data, - LPI_MUX_gpio, - LPI_MUX__, -}; - -static const unsigned int gpio0_pins[] = { 0 }; -static const unsigned int gpio1_pins[] = { 1 }; -static const unsigned int gpio2_pins[] = { 2 }; -static const unsigned int gpio3_pins[] = { 3 }; -static const unsigned int gpio4_pins[] = { 4 }; -static const unsigned int gpio5_pins[] = { 5 }; -static const unsigned int gpio6_pins[] = { 6 }; -static const unsigned int gpio7_pins[] = { 7 }; -static const unsigned int gpio8_pins[] = { 8 }; -static const unsigned int gpio9_pins[] = { 9 }; -static const unsigned int gpio10_pins[] = { 10 }; -static const unsigned int gpio11_pins[] = { 11 }; -static const unsigned int gpio12_pins[] = { 12 }; -static const unsigned int gpio13_pins[] = { 13 }; -static const char * const swr_tx_clk_groups[] = { "gpio0" }; -static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio5" }; -static const char * const swr_rx_clk_groups[] = { "gpio3" }; -static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" }; -static const char * const dmic1_clk_groups[] = { "gpio6" }; -static const char * const dmic1_data_groups[] = { "gpio7" }; -static const char * const dmic2_clk_groups[] = { "gpio8" }; -static const char * const dmic2_data_groups[] = { "gpio9" }; -static const char * const i2s2_clk_groups[] = { "gpio10" }; -static const char * const i2s2_ws_groups[] = { "gpio11" }; -static const char * const dmic3_clk_groups[] = { "gpio12" }; -static const char * const dmic3_data_groups[] = { "gpio13" }; -static const char * const qua_mi2s_sclk_groups[] = { "gpio0" }; -static const char * const qua_mi2s_ws_groups[] = { "gpio1" }; -static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" }; -static const char * const i2s1_clk_groups[] = { "gpio6" }; -static const char * const i2s1_ws_groups[] = { "gpio7" }; -static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" }; -static const char * const wsa_swr_clk_groups[] = { "gpio10" }; -static const char * const wsa_swr_data_groups[] = { "gpio11" }; -static const char * const i2s2_data_groups[] = { "gpio12", "gpio12" }; - -static const struct lpi_pingroup sm8250_groups[] = { - LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _), - LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _), - LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _), - LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _), - LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _), - LPI_PINGROUP(5, 12, swr_tx_data, swr_rx_data, _, _), - LPI_PINGROUP(6, NO_SLEW, dmic1_clk, i2s1_clk, _, _), - LPI_PINGROUP(7, NO_SLEW, dmic1_data, i2s1_ws, _, _), - LPI_PINGROUP(8, NO_SLEW, dmic2_clk, i2s1_data, _, _), - LPI_PINGROUP(9, NO_SLEW, dmic2_data, i2s1_data, _, _), - LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _), - LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _), - LPI_PINGROUP(12, NO_SLEW, dmic3_clk, i2s2_data, _, _), - LPI_PINGROUP(13, NO_SLEW, dmic3_data, i2s2_data, _, _), -}; - -static const struct lpi_function sm8250_functions[] = { - LPI_FUNCTION(dmic1_clk), - LPI_FUNCTION(dmic1_data), - LPI_FUNCTION(dmic2_clk), - LPI_FUNCTION(dmic2_data), - LPI_FUNCTION(dmic3_clk), - LPI_FUNCTION(dmic3_data), - LPI_FUNCTION(i2s1_clk), - LPI_FUNCTION(i2s1_data), - LPI_FUNCTION(i2s1_ws), - LPI_FUNCTION(i2s2_clk), - LPI_FUNCTION(i2s2_data), - LPI_FUNCTION(i2s2_ws), - LPI_FUNCTION(qua_mi2s_data), - LPI_FUNCTION(qua_mi2s_sclk), - LPI_FUNCTION(qua_mi2s_ws), - LPI_FUNCTION(swr_rx_clk), - LPI_FUNCTION(swr_rx_data), - LPI_FUNCTION(swr_tx_clk), - LPI_FUNCTION(swr_tx_data), - LPI_FUNCTION(wsa_swr_clk), - LPI_FUNCTION(wsa_swr_data), -}; - -static struct lpi_pinctrl_variant_data sm8250_lpi_data = { - .pins = sm8250_lpi_pins, - .npins = ARRAY_SIZE(sm8250_lpi_pins), - .groups = sm8250_groups, - .ngroups = ARRAY_SIZE(sm8250_groups), - .functions = sm8250_functions, - .nfunctions = ARRAY_SIZE(sm8250_functions), -}; - static int lpi_gpio_read(struct lpi_pinctrl *state, unsigned int pin, unsigned int addr) { @@ -250,38 +42,10 @@ static int lpi_gpio_write(struct lpi_pinctrl *state, unsigned int pin, return 0; } -static int lpi_gpio_get_groups_count(struct pinctrl_dev *pctldev) -{ - struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - - return pctrl->data->ngroups; -} - -static const char *lpi_gpio_get_group_name(struct pinctrl_dev *pctldev, - unsigned int group) -{ - struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - - return pctrl->data->groups[group].name; -} - -static int lpi_gpio_get_group_pins(struct pinctrl_dev *pctldev, - unsigned int group, - const unsigned int **pins, - unsigned int *num_pins) -{ - struct lpi_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - - *pins = pctrl->data->groups[group].pins; - *num_pins = pctrl->data->groups[group].npins; - - return 0; -} - static const struct pinctrl_ops lpi_gpio_pinctrl_ops = { - .get_groups_count = lpi_gpio_get_groups_count, - .get_group_name = lpi_gpio_get_group_name, - .get_group_pins = lpi_gpio_get_group_pins, + .get_groups_count = pinctrl_generic_get_group_count, + .get_group_name = pinctrl_generic_get_group_name, + .get_group_pins = pinctrl_generic_get_group_pins, .dt_node_to_map = pinconf_generic_dt_node_to_map_group, .dt_free_map = pinctrl_utils_free_map, }; @@ -435,7 +199,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group, } slew_offset = g->slew_offset; - if (slew_offset == NO_SLEW) + if (slew_offset == LPI_NO_SLEW) break; mutex_lock(&pctrl->slew_access_lock); @@ -582,7 +346,29 @@ static const struct gpio_chip lpi_gpio_template = { .dbg_show = lpi_gpio_dbg_show, }; -static int lpi_pinctrl_probe(struct platform_device *pdev) +static int lpi_build_pin_desc_groups(struct lpi_pinctrl *pctrl) +{ + int i, ret; + + for (i = 0; i < pctrl->data->npins; i++) { + const struct pinctrl_pin_desc *pin_info = pctrl->desc.pins + i; + + ret = pinctrl_generic_add_group(pctrl->ctrl, pin_info->name, + (int *)&pin_info->number, 1, NULL); + if (ret < 0) + goto err_pinctrl; + } + + return 0; + +err_pinctrl: + for (; i > 0; i--) + pinctrl_generic_remove_group(pctrl->ctrl, i - 1); + + return ret; +} + +int lpi_pinctrl_probe(struct platform_device *pdev) { const struct lpi_pinctrl_variant_data *data; struct device *dev = &pdev->dev; @@ -615,9 +401,13 @@ static int lpi_pinctrl_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(pctrl->slew_base), "Slew resource not provided\n"); - ret = devm_clk_bulk_get(dev, MAX_LPI_NUM_CLKS, pctrl->clks); + if (data->is_clk_optional) + ret = devm_clk_bulk_get_optional(dev, MAX_LPI_NUM_CLKS, pctrl->clks); + else + ret = devm_clk_bulk_get(dev, MAX_LPI_NUM_CLKS, pctrl->clks); + if (ret) - return dev_err_probe(dev, ret, "Can't get clocks\n"); + return ret; ret = clk_bulk_prepare_enable(MAX_LPI_NUM_CLKS, pctrl->clks); if (ret) @@ -647,6 +437,10 @@ static int lpi_pinctrl_probe(struct platform_device *pdev) goto err_pinctrl; } + ret = lpi_build_pin_desc_groups(pctrl); + if (ret) + goto err_pinctrl; + ret = devm_gpiochip_add_data(dev, &pctrl->chip, pctrl); if (ret) { dev_err(pctrl->dev, "can't add gpio chip\n"); @@ -661,35 +455,22 @@ err_pinctrl: return ret; } +EXPORT_SYMBOL_GPL(lpi_pinctrl_probe); -static int lpi_pinctrl_remove(struct platform_device *pdev) +int lpi_pinctrl_remove(struct platform_device *pdev) { struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev); + int i; mutex_destroy(&pctrl->slew_access_lock); clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks); + for (i = 0; i < pctrl->data->npins; i++) + pinctrl_generic_remove_group(pctrl->ctrl, i); + return 0; } +EXPORT_SYMBOL_GPL(lpi_pinctrl_remove); -static const struct of_device_id lpi_pinctrl_of_match[] = { - { - .compatible = "qcom,sm8250-lpass-lpi-pinctrl", - .data = &sm8250_lpi_data, - }, - { } -}; -MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match); - -static struct platform_driver lpi_pinctrl_driver = { - .driver = { - .name = "qcom-lpass-lpi-pinctrl", - .of_match_table = lpi_pinctrl_of_match, - }, - .probe = lpi_pinctrl_probe, - .remove = lpi_pinctrl_remove, -}; - -module_platform_driver(lpi_pinctrl_driver); MODULE_DESCRIPTION("QTI LPI GPIO pin control driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h new file mode 100644 index 000000000000..759d5d8da562 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2020 Linaro Ltd. + */ +#ifndef __PINCTRL_LPASS_LPI_H__ +#define __PINCTRL_LPASS_LPI_H__ + +#include <linux/bitops.h> +#include <linux/bitfield.h> +#include "../core.h" + +#define LPI_SLEW_RATE_CTL_REG 0xa000 +#define LPI_TLMM_REG_OFFSET 0x1000 +#define LPI_SLEW_RATE_MAX 0x03 +#define LPI_SLEW_BITS_SIZE 0x02 +#define LPI_SLEW_RATE_MASK GENMASK(1, 0) +#define LPI_GPIO_CFG_REG 0x00 +#define LPI_GPIO_PULL_MASK GENMASK(1, 0) +#define LPI_GPIO_FUNCTION_MASK GENMASK(5, 2) +#define LPI_GPIO_OUT_STRENGTH_MASK GENMASK(8, 6) +#define LPI_GPIO_OE_MASK BIT(9) +#define LPI_GPIO_VALUE_REG 0x04 +#define LPI_GPIO_VALUE_IN_MASK BIT(0) +#define LPI_GPIO_VALUE_OUT_MASK BIT(1) + +#define LPI_GPIO_BIAS_DISABLE 0x0 +#define LPI_GPIO_PULL_DOWN 0x1 +#define LPI_GPIO_KEEPER 0x2 +#define LPI_GPIO_PULL_UP 0x3 +#define LPI_GPIO_DS_TO_VAL(v) (v / 2 - 1) +#define LPI_NO_SLEW -1 + +#define LPI_FUNCTION(fname) \ + [LPI_MUX_##fname] = { \ + .name = #fname, \ + .groups = fname##_groups, \ + .ngroups = ARRAY_SIZE(fname##_groups), \ + } + +#define LPI_PINGROUP(id, soff, f1, f2, f3, f4) \ + { \ + .group.name = "gpio" #id, \ + .group.pins = gpio##id##_pins, \ + .pin = id, \ + .slew_offset = soff, \ + .group.num_pins = ARRAY_SIZE(gpio##id##_pins), \ + .funcs = (int[]){ \ + LPI_MUX_gpio, \ + LPI_MUX_##f1, \ + LPI_MUX_##f2, \ + LPI_MUX_##f3, \ + LPI_MUX_##f4, \ + }, \ + .nfuncs = 5, \ + } + +struct lpi_pingroup { + struct group_desc group; + unsigned int pin; + /* Bit offset in slew register for SoundWire pins only */ + int slew_offset; + unsigned int *funcs; + unsigned int nfuncs; +}; + +struct lpi_function { + const char *name; + const char * const *groups; + unsigned int ngroups; +}; + +struct lpi_pinctrl_variant_data { + const struct pinctrl_pin_desc *pins; + int npins; + const struct lpi_pingroup *groups; + int ngroups; + const struct lpi_function *functions; + int nfunctions; + bool is_clk_optional; +}; + +int lpi_pinctrl_probe(struct platform_device *pdev); +int lpi_pinctrl_remove(struct platform_device *pdev); + +#endif /*__PINCTRL_LPASS_LPI_H__*/ diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c new file mode 100644 index 000000000000..2add9a4520c2 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * ALSA SoC platform-machine driver for QTi LPASS + */ + +#include <linux/gpio/driver.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "pinctrl-lpass-lpi.h" + +enum lpass_lpi_functions { + LPI_MUX_dmic1_clk, + LPI_MUX_dmic1_data, + LPI_MUX_dmic2_clk, + LPI_MUX_dmic2_data, + LPI_MUX_dmic3_clk, + LPI_MUX_dmic3_data, + LPI_MUX_i2s1_clk, + LPI_MUX_i2s1_data, + LPI_MUX_i2s1_ws, + LPI_MUX_i2s2_clk, + LPI_MUX_i2s2_data, + LPI_MUX_i2s2_ws, + LPI_MUX_qua_mi2s_data, + LPI_MUX_qua_mi2s_sclk, + LPI_MUX_qua_mi2s_ws, + LPI_MUX_swr_rx_clk, + LPI_MUX_swr_rx_data, + LPI_MUX_swr_tx_clk, + LPI_MUX_swr_tx_data, + LPI_MUX_wsa_swr_clk, + LPI_MUX_wsa_swr_data, + LPI_MUX_gpio, + LPI_MUX__, +}; + +static int gpio0_pins[] = { 0 }; +static int gpio1_pins[] = { 1 }; +static int gpio2_pins[] = { 2 }; +static int gpio3_pins[] = { 3 }; +static int gpio4_pins[] = { 4 }; +static int gpio5_pins[] = { 5 }; +static int gpio6_pins[] = { 6 }; +static int gpio7_pins[] = { 7 }; +static int gpio8_pins[] = { 8 }; +static int gpio9_pins[] = { 9 }; +static int gpio10_pins[] = { 10 }; +static int gpio11_pins[] = { 11 }; +static int gpio12_pins[] = { 12 }; +static int gpio13_pins[] = { 13 }; +static int gpio14_pins[] = { 14 }; + +static const struct pinctrl_pin_desc sc7280_lpi_pins[] = { + PINCTRL_PIN(0, "gpio0"), + PINCTRL_PIN(1, "gpio1"), + PINCTRL_PIN(2, "gpio2"), + PINCTRL_PIN(3, "gpio3"), + PINCTRL_PIN(4, "gpio4"), + PINCTRL_PIN(5, "gpio5"), + PINCTRL_PIN(6, "gpio6"), + PINCTRL_PIN(7, "gpio7"), + PINCTRL_PIN(8, "gpio8"), + PINCTRL_PIN(9, "gpio9"), + PINCTRL_PIN(10, "gpio10"), + PINCTRL_PIN(11, "gpio11"), + PINCTRL_PIN(12, "gpio12"), + PINCTRL_PIN(13, "gpio13"), + PINCTRL_PIN(14, "gpio14"), +}; + +static const char * const swr_tx_clk_groups[] = { "gpio0" }; +static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" }; +static const char * const swr_rx_clk_groups[] = { "gpio3" }; +static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" }; +static const char * const dmic1_clk_groups[] = { "gpio6" }; +static const char * const dmic1_data_groups[] = { "gpio7" }; +static const char * const dmic2_clk_groups[] = { "gpio8" }; +static const char * const dmic2_data_groups[] = { "gpio9" }; +static const char * const i2s2_clk_groups[] = { "gpio10" }; +static const char * const i2s2_ws_groups[] = { "gpio11" }; +static const char * const dmic3_clk_groups[] = { "gpio12" }; +static const char * const dmic3_data_groups[] = { "gpio13" }; +static const char * const qua_mi2s_sclk_groups[] = { "gpio0" }; +static const char * const qua_mi2s_ws_groups[] = { "gpio1" }; +static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" }; +static const char * const i2s1_clk_groups[] = { "gpio6" }; +static const char * const i2s1_ws_groups[] = { "gpio7" }; +static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" }; +static const char * const wsa_swr_clk_groups[] = { "gpio10" }; +static const char * const wsa_swr_data_groups[] = { "gpio11" }; +static const char * const i2s2_data_groups[] = { "gpio12", "gpio13" }; + +static const struct lpi_pingroup sc7280_groups[] = { + LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _), + LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _), + LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _), + LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _), + LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _), + LPI_PINGROUP(5, 12, swr_rx_data, _, _, _), + LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _), + LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _), + LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _), + LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, _, _), + LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _), + LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _), + LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s2_data, _, _), + LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s2_data, _, _), + LPI_PINGROUP(14, 6, swr_tx_data, _, _, _), +}; + +static const struct lpi_function sc7280_functions[] = { + LPI_FUNCTION(dmic1_clk), + LPI_FUNCTION(dmic1_data), + LPI_FUNCTION(dmic2_clk), + LPI_FUNCTION(dmic2_data), + LPI_FUNCTION(dmic3_clk), + LPI_FUNCTION(dmic3_data), + LPI_FUNCTION(i2s1_clk), + LPI_FUNCTION(i2s1_data), + LPI_FUNCTION(i2s1_ws), + LPI_FUNCTION(i2s2_clk), + LPI_FUNCTION(i2s2_data), + LPI_FUNCTION(i2s2_ws), + LPI_FUNCTION(qua_mi2s_data), + LPI_FUNCTION(qua_mi2s_sclk), + LPI_FUNCTION(qua_mi2s_ws), + LPI_FUNCTION(swr_rx_clk), + LPI_FUNCTION(swr_rx_data), + LPI_FUNCTION(swr_tx_clk), + LPI_FUNCTION(swr_tx_data), + LPI_FUNCTION(wsa_swr_clk), + LPI_FUNCTION(wsa_swr_data), +}; + +static const struct lpi_pinctrl_variant_data sc7280_lpi_data = { + .pins = sc7280_lpi_pins, + .npins = ARRAY_SIZE(sc7280_lpi_pins), + .groups = sc7280_groups, + .ngroups = ARRAY_SIZE(sc7280_groups), + .functions = sc7280_functions, + .nfunctions = ARRAY_SIZE(sc7280_functions), + .is_clk_optional = true, +}; + +static const struct of_device_id lpi_pinctrl_of_match[] = { + { + .compatible = "qcom,sc7280-lpass-lpi-pinctrl", + .data = &sc7280_lpi_data, + }, + { } +}; +MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match); + +static struct platform_driver lpi_pinctrl_driver = { + .driver = { + .name = "qcom-sc7280-lpass-lpi-pinctrl", + .of_match_table = lpi_pinctrl_of_match, + }, + .probe = lpi_pinctrl_probe, + .remove = lpi_pinctrl_remove, +}; + +module_platform_driver(lpi_pinctrl_driver); +MODULE_DESCRIPTION("QTI SC7280 LPI GPIO pin control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c index 7359bae68c69..1cc622694553 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8150.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c @@ -1500,6 +1500,25 @@ static const struct msm_pingroup sm8150_groups[] = { [178] = SDC_QDSD_PINGROUP(sdc2_data, 0xB2000, 9, 0), }; +static const struct msm_gpio_wakeirq_map sm8150_pdc_map[] = { + { 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, + { 12, 104 }, { 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, + { 30, 39 }, { 36, 43 }, { 37, 44 }, { 38, 30 }, { 39, 118 }, + { 39, 125 }, { 41, 47 }, { 42, 48 }, { 46, 50 }, { 47, 49 }, + { 48, 51 }, { 49, 53 }, { 50, 52 }, { 51, 116 }, { 51, 123 }, + { 53, 54 }, { 54, 55 }, { 55, 56 }, { 56, 57 }, { 58, 58 }, + { 60, 60 }, { 61, 61 }, { 68, 62 }, { 70, 63 }, { 76, 71 }, + { 77, 66 }, { 81, 64 }, { 83, 65 }, { 86, 67 }, { 87, 84 }, + { 88, 117 }, { 88, 124 }, { 90, 69 }, { 91, 70 }, { 93, 75 }, + { 95, 72 }, { 96, 73 }, { 97, 74 }, { 101, 40 }, { 103, 77 }, + { 104, 78 }, { 108, 79 }, { 112, 80 }, { 113, 81 }, { 114, 82 }, + { 117, 85 }, { 118, 101 }, { 119, 87 }, { 120, 88 }, { 121, 89 }, + { 122, 90 }, { 123, 91 }, { 124, 92 }, { 125, 93 }, { 129, 94 }, + { 132, 105 }, { 133, 83 }, { 134, 36 }, { 136, 97 }, { 142, 103 }, + { 144, 115 }, { 144, 122 }, { 147, 102 }, { 150, 107 }, + { 152, 108 }, { 153, 109 } +}; + static const struct msm_pinctrl_soc_data sm8150_pinctrl = { .pins = sm8150_pins, .npins = ARRAY_SIZE(sm8150_pins), @@ -1510,6 +1529,9 @@ static const struct msm_pinctrl_soc_data sm8150_pinctrl = { .ngpios = 176, .tiles = sm8150_tiles, .ntiles = ARRAY_SIZE(sm8150_tiles), + .wakeirq_map = sm8150_pdc_map, + .nwakeirq_map = ARRAY_SIZE(sm8150_pdc_map), + .wakeirq_dual_edge_errata = true, }; static int sm8150_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c new file mode 100644 index 000000000000..ddbc6317f2a7 --- /dev/null +++ b/drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2020 Linaro Ltd. + */ + +#include <linux/gpio/driver.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include "pinctrl-lpass-lpi.h" + +enum lpass_lpi_functions { + LPI_MUX_dmic1_clk, + LPI_MUX_dmic1_data, + LPI_MUX_dmic2_clk, + LPI_MUX_dmic2_data, + LPI_MUX_dmic3_clk, + LPI_MUX_dmic3_data, + LPI_MUX_i2s1_clk, + LPI_MUX_i2s1_data, + LPI_MUX_i2s1_ws, + LPI_MUX_i2s2_clk, + LPI_MUX_i2s2_data, + LPI_MUX_i2s2_ws, + LPI_MUX_qua_mi2s_data, + LPI_MUX_qua_mi2s_sclk, + LPI_MUX_qua_mi2s_ws, + LPI_MUX_swr_rx_clk, + LPI_MUX_swr_rx_data, + LPI_MUX_swr_tx_clk, + LPI_MUX_swr_tx_data, + LPI_MUX_wsa_swr_clk, + LPI_MUX_wsa_swr_data, + LPI_MUX_gpio, + LPI_MUX__, +}; + +static int gpio0_pins[] = { 0 }; +static int gpio1_pins[] = { 1 }; +static int gpio2_pins[] = { 2 }; +static int gpio3_pins[] = { 3 }; +static int gpio4_pins[] = { 4 }; +static int gpio5_pins[] = { 5 }; +static int gpio6_pins[] = { 6 }; +static int gpio7_pins[] = { 7 }; +static int gpio8_pins[] = { 8 }; +static int gpio9_pins[] = { 9 }; +static int gpio10_pins[] = { 10 }; +static int gpio11_pins[] = { 11 }; +static int gpio12_pins[] = { 12 }; +static int gpio13_pins[] = { 13 }; + +static const struct pinctrl_pin_desc sm8250_lpi_pins[] = { + PINCTRL_PIN(0, "gpio0"), + PINCTRL_PIN(1, "gpio1"), + PINCTRL_PIN(2, "gpio2"), + PINCTRL_PIN(3, "gpio3"), + PINCTRL_PIN(4, "gpio4"), + PINCTRL_PIN(5, "gpio5"), + PINCTRL_PIN(6, "gpio6"), + PINCTRL_PIN(7, "gpio7"), + PINCTRL_PIN(8, "gpio8"), + PINCTRL_PIN(9, "gpio9"), + PINCTRL_PIN(10, "gpio10"), + PINCTRL_PIN(11, "gpio11"), + PINCTRL_PIN(12, "gpio12"), + PINCTRL_PIN(13, "gpio13"), +}; + +static const char * const swr_tx_clk_groups[] = { "gpio0" }; +static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio5" }; +static const char * const swr_rx_clk_groups[] = { "gpio3" }; +static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" }; +static const char * const dmic1_clk_groups[] = { "gpio6" }; +static const char * const dmic1_data_groups[] = { "gpio7" }; +static const char * const dmic2_clk_groups[] = { "gpio8" }; +static const char * const dmic2_data_groups[] = { "gpio9" }; +static const char * const i2s2_clk_groups[] = { "gpio10" }; +static const char * const i2s2_ws_groups[] = { "gpio11" }; +static const char * const dmic3_clk_groups[] = { "gpio12" }; +static const char * const dmic3_data_groups[] = { "gpio13" }; +static const char * const qua_mi2s_sclk_groups[] = { "gpio0" }; +static const char * const qua_mi2s_ws_groups[] = { "gpio1" }; +static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" }; +static const char * const i2s1_clk_groups[] = { "gpio6" }; +static const char * const i2s1_ws_groups[] = { "gpio7" }; +static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" }; +static const char * const wsa_swr_clk_groups[] = { "gpio10" }; +static const char * const wsa_swr_data_groups[] = { "gpio11" }; +static const char * const i2s2_data_groups[] = { "gpio12", "gpio12" }; + +static const struct lpi_pingroup sm8250_groups[] = { + LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _), + LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _), + LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _), + LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _), + LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _), + LPI_PINGROUP(5, 12, swr_tx_data, swr_rx_data, _, _), + LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _), + LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _), + LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _), + LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, _, _), + LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _), + LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _), + LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s2_data, _, _), + LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s2_data, _, _), +}; + +static const struct lpi_function sm8250_functions[] = { + LPI_FUNCTION(dmic1_clk), + LPI_FUNCTION(dmic1_data), + LPI_FUNCTION(dmic2_clk), + LPI_FUNCTION(dmic2_data), + LPI_FUNCTION(dmic3_clk), + LPI_FUNCTION(dmic3_data), + LPI_FUNCTION(i2s1_clk), + LPI_FUNCTION(i2s1_data), + LPI_FUNCTION(i2s1_ws), + LPI_FUNCTION(i2s2_clk), + LPI_FUNCTION(i2s2_data), + LPI_FUNCTION(i2s2_ws), + LPI_FUNCTION(qua_mi2s_data), + LPI_FUNCTION(qua_mi2s_sclk), + LPI_FUNCTION(qua_mi2s_ws), + LPI_FUNCTION(swr_rx_clk), + LPI_FUNCTION(swr_rx_data), + LPI_FUNCTION(swr_tx_clk), + LPI_FUNCTION(swr_tx_data), + LPI_FUNCTION(wsa_swr_clk), + LPI_FUNCTION(wsa_swr_data), +}; + +static const struct lpi_pinctrl_variant_data sm8250_lpi_data = { + .pins = sm8250_lpi_pins, + .npins = ARRAY_SIZE(sm8250_lpi_pins), + .groups = sm8250_groups, + .ngroups = ARRAY_SIZE(sm8250_groups), + .functions = sm8250_functions, + .nfunctions = ARRAY_SIZE(sm8250_functions), +}; + +static const struct of_device_id lpi_pinctrl_of_match[] = { + { + .compatible = "qcom,sm8250-lpass-lpi-pinctrl", + .data = &sm8250_lpi_data, + }, + { } +}; +MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match); + +static struct platform_driver lpi_pinctrl_driver = { + .driver = { + .name = "qcom-sm8250-lpass-lpi-pinctrl", + .of_match_table = lpi_pinctrl_of_match, + }, + .probe = lpi_pinctrl_probe, + .remove = lpi_pinctrl_remove, +}; + +module_platform_driver(lpi_pinctrl_driver); +MODULE_DESCRIPTION("QTI SM8250 LPI GPIO pin control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index 4fbf8d3938ef..fd5fff9adff0 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -1146,6 +1146,7 @@ static const struct of_device_id pmic_gpio_of_match[] = { { .compatible = "qcom,pm660-gpio", .data = (void *) 13 }, /* pm660l has 12 GPIOs with holes on 1, 2, 10, 11 and 12 */ { .compatible = "qcom,pm660l-gpio", .data = (void *) 12 }, + { .compatible = "qcom,pm6125-gpio", .data = (void *) 9 }, { .compatible = "qcom,pm6150-gpio", .data = (void *) 10 }, { .compatible = "qcom,pm6150l-gpio", .data = (void *) 12 }, { .compatible = "qcom,pm6350-gpio", .data = (void *) 9 }, @@ -1183,6 +1184,7 @@ static const struct of_device_id pmic_gpio_of_match[] = { { .compatible = "qcom,pms405-gpio", .data = (void *) 12 }, /* pmx55 has 11 GPIOs with holes on 3, 7, 10, 11 */ { .compatible = "qcom,pmx55-gpio", .data = (void *) 11 }, + { .compatible = "qcom,pmx65-gpio", .data = (void *) 16 }, { }, }; diff --git a/drivers/pinctrl/ralink/Kconfig b/drivers/pinctrl/ralink/Kconfig index a76ee3deb8c3..1e4c5e43d69b 100644 --- a/drivers/pinctrl/ralink/Kconfig +++ b/drivers/pinctrl/ralink/Kconfig @@ -3,37 +3,33 @@ menu "Ralink pinctrl drivers" depends on RALINK config PINCTRL_RALINK - bool "Ralink pin control support" - default y if RALINK - -config PINCTRL_RT2880 - bool "RT2880 pinctrl driver for RALINK/Mediatek SOCs" + bool "Ralink pinctrl driver" select PINMUX select GENERIC_PINCONF config PINCTRL_MT7620 - bool "mt7620 pinctrl driver for RALINK/Mediatek SOCs" + bool "MT7620 pinctrl subdriver" depends on RALINK && SOC_MT7620 - select PINCTRL_RT2880 + select PINCTRL_RALINK config PINCTRL_MT7621 - bool "mt7621 pinctrl driver for RALINK/Mediatek SOCs" + bool "MT7621 pinctrl subdriver" depends on RALINK && SOC_MT7621 - select PINCTRL_RT2880 + select PINCTRL_RALINK -config PINCTRL_RT288X - bool "RT288X pinctrl driver for RALINK/Mediatek SOCs" +config PINCTRL_RT2880 + bool "RT2880 pinctrl subdriver" depends on RALINK && SOC_RT288X - select PINCTRL_RT2880 + select PINCTRL_RALINK config PINCTRL_RT305X - bool "RT305X pinctrl driver for RALINK/Mediatek SOCs" + bool "RT305X pinctrl subdriver" depends on RALINK && SOC_RT305X - select PINCTRL_RT2880 + select PINCTRL_RALINK config PINCTRL_RT3883 - bool "RT3883 pinctrl driver for RALINK/Mediatek SOCs" + bool "RT3883 pinctrl subdriver" depends on RALINK && SOC_RT3883 - select PINCTRL_RT2880 + select PINCTRL_RALINK endmenu diff --git a/drivers/pinctrl/ralink/Makefile b/drivers/pinctrl/ralink/Makefile index a15610206ced..0ebbe552526d 100644 --- a/drivers/pinctrl/ralink/Makefile +++ b/drivers/pinctrl/ralink/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o +obj-$(CONFIG_PINCTRL_RALINK) += pinctrl-ralink.o obj-$(CONFIG_PINCTRL_MT7620) += pinctrl-mt7620.o obj-$(CONFIG_PINCTRL_MT7621) += pinctrl-mt7621.o -obj-$(CONFIG_PINCTRL_RT288X) += pinctrl-rt288x.o +obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o obj-$(CONFIG_PINCTRL_RT305X) += pinctrl-rt305x.o obj-$(CONFIG_PINCTRL_RT3883) += pinctrl-rt3883.o diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c index 6853b5b8b0fe..22ff16eff02f 100644 --- a/drivers/pinctrl/ralink/pinctrl-mt7620.c +++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c @@ -5,7 +5,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include "pinmux.h" +#include "pinctrl-ralink.h" #define MT7620_GPIO_MODE_UART0_SHIFT 2 #define MT7620_GPIO_MODE_UART0_MASK 0x7 @@ -54,20 +54,20 @@ #define MT7620_GPIO_MODE_EPHY 15 #define MT7620_GPIO_MODE_PA 20 -static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) }; -static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) }; -static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) }; -static struct rt2880_pmx_func mdio_grp[] = { +static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; +static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", MT7620_GPIO_MODE_MDIO, 22, 2), FUNC("refclk", MT7620_GPIO_MODE_MDIO_REFCLK, 22, 2), }; -static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) }; -static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) }; -static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) }; -static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) }; -static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) }; -static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) }; -static struct rt2880_pmx_func uartf_grp[] = { +static struct ralink_pmx_func rgmii1_func[] = { FUNC("rgmii1", 0, 24, 12) }; +static struct ralink_pmx_func refclk_func[] = { FUNC("spi refclk", 0, 37, 3) }; +static struct ralink_pmx_func ephy_func[] = { FUNC("ephy", 0, 40, 5) }; +static struct ralink_pmx_func rgmii2_func[] = { FUNC("rgmii2", 0, 60, 12) }; +static struct ralink_pmx_func wled_func[] = { FUNC("wled", 0, 72, 1) }; +static struct ralink_pmx_func pa_func[] = { FUNC("pa", 0, 18, 4) }; +static struct ralink_pmx_func uartf_func[] = { FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8), FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8), FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8), @@ -76,316 +76,316 @@ static struct rt2880_pmx_func uartf_grp[] = { FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4), FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4), }; -static struct rt2880_pmx_func wdt_grp[] = { +static struct ralink_pmx_func wdt_func[] = { FUNC("wdt rst", 0, 17, 1), FUNC("wdt refclk", 0, 17, 1), }; -static struct rt2880_pmx_func pcie_rst_grp[] = { +static struct ralink_pmx_func pcie_rst_func[] = { FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1), FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1) }; -static struct rt2880_pmx_func nd_sd_grp[] = { +static struct ralink_pmx_func nd_sd_func[] = { FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13) }; -static struct rt2880_pmx_group mt7620a_pinmux_data[] = { - GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C), - GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK, +static struct ralink_pmx_group mt7620a_pinmux_data[] = { + GRP("i2c", i2c_func, 1, MT7620_GPIO_MODE_I2C), + GRP("uartf", uartf_func, MT7620_GPIO_MODE_UART0_MASK, MT7620_GPIO_MODE_UART0_SHIFT), - GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI), - GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1), - GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK, + GRP("spi", spi_func, 1, MT7620_GPIO_MODE_SPI), + GRP("uartlite", uartlite_func, 1, MT7620_GPIO_MODE_UART1), + GRP_G("wdt", wdt_func, MT7620_GPIO_MODE_WDT_MASK, MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT), - GRP_G("mdio", mdio_grp, MT7620_GPIO_MODE_MDIO_MASK, + GRP_G("mdio", mdio_func, MT7620_GPIO_MODE_MDIO_MASK, MT7620_GPIO_MODE_MDIO_GPIO, MT7620_GPIO_MODE_MDIO_SHIFT), - GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1), - GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK), - GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK, + GRP("rgmii1", rgmii1_func, 1, MT7620_GPIO_MODE_RGMII1), + GRP("spi refclk", refclk_func, 1, MT7620_GPIO_MODE_SPI_REF_CLK), + GRP_G("pcie", pcie_rst_func, MT7620_GPIO_MODE_PCIE_MASK, MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT), - GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK, + GRP_G("nd_sd", nd_sd_func, MT7620_GPIO_MODE_ND_SD_MASK, MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT), - GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2), - GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED), - GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY), - GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA), + GRP("rgmii2", rgmii2_func, 1, MT7620_GPIO_MODE_RGMII2), + GRP("wled", wled_func, 1, MT7620_GPIO_MODE_WLED), + GRP("ephy", ephy_func, 1, MT7620_GPIO_MODE_EPHY), + GRP("pa", pa_func, 1, MT7620_GPIO_MODE_PA), { 0 } }; -static struct rt2880_pmx_func pwm1_grp_mt7628[] = { +static struct ralink_pmx_func pwm1_func_mt76x8[] = { FUNC("sdxc d6", 3, 19, 1), FUNC("utif", 2, 19, 1), FUNC("gpio", 1, 19, 1), FUNC("pwm1", 0, 19, 1), }; -static struct rt2880_pmx_func pwm0_grp_mt7628[] = { +static struct ralink_pmx_func pwm0_func_mt76x8[] = { FUNC("sdxc d7", 3, 18, 1), FUNC("utif", 2, 18, 1), FUNC("gpio", 1, 18, 1), FUNC("pwm0", 0, 18, 1), }; -static struct rt2880_pmx_func uart2_grp_mt7628[] = { +static struct ralink_pmx_func uart2_func_mt76x8[] = { FUNC("sdxc d5 d4", 3, 20, 2), FUNC("pwm", 2, 20, 2), FUNC("gpio", 1, 20, 2), FUNC("uart2", 0, 20, 2), }; -static struct rt2880_pmx_func uart1_grp_mt7628[] = { +static struct ralink_pmx_func uart1_func_mt76x8[] = { FUNC("sw_r", 3, 45, 2), FUNC("pwm", 2, 45, 2), FUNC("gpio", 1, 45, 2), FUNC("uart1", 0, 45, 2), }; -static struct rt2880_pmx_func i2c_grp_mt7628[] = { +static struct ralink_pmx_func i2c_func_mt76x8[] = { FUNC("-", 3, 4, 2), FUNC("debug", 2, 4, 2), FUNC("gpio", 1, 4, 2), FUNC("i2c", 0, 4, 2), }; -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) }; -static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; -static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; -static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; +static struct ralink_pmx_func refclk_func_mt76x8[] = { FUNC("refclk", 0, 37, 1) }; +static struct ralink_pmx_func perst_func_mt76x8[] = { FUNC("perst", 0, 36, 1) }; +static struct ralink_pmx_func wdt_func_mt76x8[] = { FUNC("wdt", 0, 38, 1) }; +static struct ralink_pmx_func spi_func_mt76x8[] = { FUNC("spi", 0, 7, 4) }; -static struct rt2880_pmx_func sd_mode_grp_mt7628[] = { +static struct ralink_pmx_func sd_mode_func_mt76x8[] = { FUNC("jtag", 3, 22, 8), FUNC("utif", 2, 22, 8), FUNC("gpio", 1, 22, 8), FUNC("sdxc", 0, 22, 8), }; -static struct rt2880_pmx_func uart0_grp_mt7628[] = { +static struct ralink_pmx_func uart0_func_mt76x8[] = { FUNC("-", 3, 12, 2), FUNC("-", 2, 12, 2), FUNC("gpio", 1, 12, 2), FUNC("uart0", 0, 12, 2), }; -static struct rt2880_pmx_func i2s_grp_mt7628[] = { +static struct ralink_pmx_func i2s_func_mt76x8[] = { FUNC("antenna", 3, 0, 4), FUNC("pcm", 2, 0, 4), FUNC("gpio", 1, 0, 4), FUNC("i2s", 0, 0, 4), }; -static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = { +static struct ralink_pmx_func spi_cs1_func_mt76x8[] = { FUNC("-", 3, 6, 1), FUNC("refclk", 2, 6, 1), FUNC("gpio", 1, 6, 1), FUNC("spi cs1", 0, 6, 1), }; -static struct rt2880_pmx_func spis_grp_mt7628[] = { +static struct ralink_pmx_func spis_func_mt76x8[] = { FUNC("pwm_uart2", 3, 14, 4), FUNC("utif", 2, 14, 4), FUNC("gpio", 1, 14, 4), FUNC("spis", 0, 14, 4), }; -static struct rt2880_pmx_func gpio_grp_mt7628[] = { +static struct ralink_pmx_func gpio_func_mt76x8[] = { FUNC("pcie", 3, 11, 1), FUNC("refclk", 2, 11, 1), FUNC("gpio", 1, 11, 1), FUNC("gpio", 0, 11, 1), }; -static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p4led_kn_func_mt76x8[] = { FUNC("jtag", 3, 30, 1), FUNC("utif", 2, 30, 1), FUNC("gpio", 1, 30, 1), FUNC("p4led_kn", 0, 30, 1), }; -static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p3led_kn_func_mt76x8[] = { FUNC("jtag", 3, 31, 1), FUNC("utif", 2, 31, 1), FUNC("gpio", 1, 31, 1), FUNC("p3led_kn", 0, 31, 1), }; -static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p2led_kn_func_mt76x8[] = { FUNC("jtag", 3, 32, 1), FUNC("utif", 2, 32, 1), FUNC("gpio", 1, 32, 1), FUNC("p2led_kn", 0, 32, 1), }; -static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p1led_kn_func_mt76x8[] = { FUNC("jtag", 3, 33, 1), FUNC("utif", 2, 33, 1), FUNC("gpio", 1, 33, 1), FUNC("p1led_kn", 0, 33, 1), }; -static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p0led_kn_func_mt76x8[] = { FUNC("jtag", 3, 34, 1), FUNC("rsvd", 2, 34, 1), FUNC("gpio", 1, 34, 1), FUNC("p0led_kn", 0, 34, 1), }; -static struct rt2880_pmx_func wled_kn_grp_mt7628[] = { +static struct ralink_pmx_func wled_kn_func_mt76x8[] = { FUNC("rsvd", 3, 35, 1), FUNC("rsvd", 2, 35, 1), FUNC("gpio", 1, 35, 1), FUNC("wled_kn", 0, 35, 1), }; -static struct rt2880_pmx_func p4led_an_grp_mt7628[] = { +static struct ralink_pmx_func p4led_an_func_mt76x8[] = { FUNC("jtag", 3, 39, 1), FUNC("utif", 2, 39, 1), FUNC("gpio", 1, 39, 1), FUNC("p4led_an", 0, 39, 1), }; -static struct rt2880_pmx_func p3led_an_grp_mt7628[] = { +static struct ralink_pmx_func p3led_an_func_mt76x8[] = { FUNC("jtag", 3, 40, 1), FUNC("utif", 2, 40, 1), FUNC("gpio", 1, 40, 1), FUNC("p3led_an", 0, 40, 1), }; -static struct rt2880_pmx_func p2led_an_grp_mt7628[] = { +static struct ralink_pmx_func p2led_an_func_mt76x8[] = { FUNC("jtag", 3, 41, 1), FUNC("utif", 2, 41, 1), FUNC("gpio", 1, 41, 1), FUNC("p2led_an", 0, 41, 1), }; -static struct rt2880_pmx_func p1led_an_grp_mt7628[] = { +static struct ralink_pmx_func p1led_an_func_mt76x8[] = { FUNC("jtag", 3, 42, 1), FUNC("utif", 2, 42, 1), FUNC("gpio", 1, 42, 1), FUNC("p1led_an", 0, 42, 1), }; -static struct rt2880_pmx_func p0led_an_grp_mt7628[] = { +static struct ralink_pmx_func p0led_an_func_mt76x8[] = { FUNC("jtag", 3, 43, 1), FUNC("rsvd", 2, 43, 1), FUNC("gpio", 1, 43, 1), FUNC("p0led_an", 0, 43, 1), }; -static struct rt2880_pmx_func wled_an_grp_mt7628[] = { +static struct ralink_pmx_func wled_an_func_mt76x8[] = { FUNC("rsvd", 3, 44, 1), FUNC("rsvd", 2, 44, 1), FUNC("gpio", 1, 44, 1), FUNC("wled_an", 0, 44, 1), }; -#define MT7628_GPIO_MODE_MASK 0x3 - -#define MT7628_GPIO_MODE_P4LED_KN 58 -#define MT7628_GPIO_MODE_P3LED_KN 56 -#define MT7628_GPIO_MODE_P2LED_KN 54 -#define MT7628_GPIO_MODE_P1LED_KN 52 -#define MT7628_GPIO_MODE_P0LED_KN 50 -#define MT7628_GPIO_MODE_WLED_KN 48 -#define MT7628_GPIO_MODE_P4LED_AN 42 -#define MT7628_GPIO_MODE_P3LED_AN 40 -#define MT7628_GPIO_MODE_P2LED_AN 38 -#define MT7628_GPIO_MODE_P1LED_AN 36 -#define MT7628_GPIO_MODE_P0LED_AN 34 -#define MT7628_GPIO_MODE_WLED_AN 32 -#define MT7628_GPIO_MODE_PWM1 30 -#define MT7628_GPIO_MODE_PWM0 28 -#define MT7628_GPIO_MODE_UART2 26 -#define MT7628_GPIO_MODE_UART1 24 -#define MT7628_GPIO_MODE_I2C 20 -#define MT7628_GPIO_MODE_REFCLK 18 -#define MT7628_GPIO_MODE_PERST 16 -#define MT7628_GPIO_MODE_WDT 14 -#define MT7628_GPIO_MODE_SPI 12 -#define MT7628_GPIO_MODE_SDMODE 10 -#define MT7628_GPIO_MODE_UART0 8 -#define MT7628_GPIO_MODE_I2S 6 -#define MT7628_GPIO_MODE_CS1 4 -#define MT7628_GPIO_MODE_SPIS 2 -#define MT7628_GPIO_MODE_GPIO 0 - -static struct rt2880_pmx_group mt7628an_pinmux_data[] = { - GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_PWM1), - GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_PWM0), - GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_UART2), - GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_UART1), - GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_I2C), - GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK), - GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST), - GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT), - GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI), - GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_SDMODE), - GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_UART0), - GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_I2S), - GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_CS1), - GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_SPIS), - GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_GPIO), - GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_WLED_AN), - GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P0LED_AN), - GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P1LED_AN), - GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P2LED_AN), - GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P3LED_AN), - GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P4LED_AN), - GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_WLED_KN), - GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P0LED_KN), - GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P1LED_KN), - GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P2LED_KN), - GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P3LED_KN), - GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P4LED_KN), +#define MT76X8_GPIO_MODE_MASK 0x3 + +#define MT76X8_GPIO_MODE_P4LED_KN 58 +#define MT76X8_GPIO_MODE_P3LED_KN 56 +#define MT76X8_GPIO_MODE_P2LED_KN 54 +#define MT76X8_GPIO_MODE_P1LED_KN 52 +#define MT76X8_GPIO_MODE_P0LED_KN 50 +#define MT76X8_GPIO_MODE_WLED_KN 48 +#define MT76X8_GPIO_MODE_P4LED_AN 42 +#define MT76X8_GPIO_MODE_P3LED_AN 40 +#define MT76X8_GPIO_MODE_P2LED_AN 38 +#define MT76X8_GPIO_MODE_P1LED_AN 36 +#define MT76X8_GPIO_MODE_P0LED_AN 34 +#define MT76X8_GPIO_MODE_WLED_AN 32 +#define MT76X8_GPIO_MODE_PWM1 30 +#define MT76X8_GPIO_MODE_PWM0 28 +#define MT76X8_GPIO_MODE_UART2 26 +#define MT76X8_GPIO_MODE_UART1 24 +#define MT76X8_GPIO_MODE_I2C 20 +#define MT76X8_GPIO_MODE_REFCLK 18 +#define MT76X8_GPIO_MODE_PERST 16 +#define MT76X8_GPIO_MODE_WDT 14 +#define MT76X8_GPIO_MODE_SPI 12 +#define MT76X8_GPIO_MODE_SDMODE 10 +#define MT76X8_GPIO_MODE_UART0 8 +#define MT76X8_GPIO_MODE_I2S 6 +#define MT76X8_GPIO_MODE_CS1 4 +#define MT76X8_GPIO_MODE_SPIS 2 +#define MT76X8_GPIO_MODE_GPIO 0 + +static struct ralink_pmx_group mt76x8_pinmux_data[] = { + GRP_G("pwm1", pwm1_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_PWM1), + GRP_G("pwm0", pwm0_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_PWM0), + GRP_G("uart2", uart2_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_UART2), + GRP_G("uart1", uart1_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_UART1), + GRP_G("i2c", i2c_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_I2C), + GRP("refclk", refclk_func_mt76x8, 1, MT76X8_GPIO_MODE_REFCLK), + GRP("perst", perst_func_mt76x8, 1, MT76X8_GPIO_MODE_PERST), + GRP("wdt", wdt_func_mt76x8, 1, MT76X8_GPIO_MODE_WDT), + GRP("spi", spi_func_mt76x8, 1, MT76X8_GPIO_MODE_SPI), + GRP_G("sdmode", sd_mode_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_SDMODE), + GRP_G("uart0", uart0_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_UART0), + GRP_G("i2s", i2s_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_I2S), + GRP_G("spi cs1", spi_cs1_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_CS1), + GRP_G("spis", spis_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_SPIS), + GRP_G("gpio", gpio_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_GPIO), + GRP_G("wled_an", wled_an_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_WLED_AN), + GRP_G("p0led_an", p0led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P0LED_AN), + GRP_G("p1led_an", p1led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P1LED_AN), + GRP_G("p2led_an", p2led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P2LED_AN), + GRP_G("p3led_an", p3led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P3LED_AN), + GRP_G("p4led_an", p4led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P4LED_AN), + GRP_G("wled_kn", wled_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_WLED_KN), + GRP_G("p0led_kn", p0led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P0LED_KN), + GRP_G("p1led_kn", p1led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P1LED_KN), + GRP_G("p2led_kn", p2led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P2LED_KN), + GRP_G("p3led_kn", p3led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P3LED_KN), + GRP_G("p4led_kn", p4led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P4LED_KN), { 0 } }; -static int mt7620_pinmux_probe(struct platform_device *pdev) +static int mt7620_pinctrl_probe(struct platform_device *pdev) { if (is_mt76x8()) - return rt2880_pinmux_init(pdev, mt7628an_pinmux_data); + return ralink_pinctrl_init(pdev, mt76x8_pinmux_data); else - return rt2880_pinmux_init(pdev, mt7620a_pinmux_data); + return ralink_pinctrl_init(pdev, mt7620a_pinmux_data); } -static const struct of_device_id mt7620_pinmux_match[] = { - { .compatible = "ralink,rt2880-pinmux" }, +static const struct of_device_id mt7620_pinctrl_match[] = { + { .compatible = "ralink,mt7620-pinctrl" }, {} }; -MODULE_DEVICE_TABLE(of, mt7620_pinmux_match); +MODULE_DEVICE_TABLE(of, mt7620_pinctrl_match); -static struct platform_driver mt7620_pinmux_driver = { - .probe = mt7620_pinmux_probe, +static struct platform_driver mt7620_pinctrl_driver = { + .probe = mt7620_pinctrl_probe, .driver = { - .name = "rt2880-pinmux", - .of_match_table = mt7620_pinmux_match, + .name = "mt7620-pinctrl", + .of_match_table = mt7620_pinctrl_match, }, }; -static int __init mt7620_pinmux_init(void) +static int __init mt7620_pinctrl_init(void) { - return platform_driver_register(&mt7620_pinmux_driver); + return platform_driver_register(&mt7620_pinctrl_driver); } -core_initcall_sync(mt7620_pinmux_init); +core_initcall_sync(mt7620_pinctrl_init); diff --git a/drivers/pinctrl/ralink/pinctrl-mt7621.c b/drivers/pinctrl/ralink/pinctrl-mt7621.c index 7d96144c474e..b47968f40e0c 100644 --- a/drivers/pinctrl/ralink/pinctrl-mt7621.c +++ b/drivers/pinctrl/ralink/pinctrl-mt7621.c @@ -3,7 +3,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include "pinmux.h" +#include "pinctrl-ralink.h" #define MT7621_GPIO_MODE_UART1 1 #define MT7621_GPIO_MODE_I2C 2 @@ -34,83 +34,83 @@ #define MT7621_GPIO_MODE_SDHCI_SHIFT 18 #define MT7621_GPIO_MODE_SDHCI_GPIO 1 -static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; -static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; -static struct rt2880_pmx_func uart3_grp[] = { +static struct ralink_pmx_func uart1_func[] = { FUNC("uart1", 0, 1, 2) }; +static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 3, 2) }; +static struct ralink_pmx_func uart3_func[] = { FUNC("uart3", 0, 5, 4), FUNC("i2s", 2, 5, 4), FUNC("spdif3", 3, 5, 4), }; -static struct rt2880_pmx_func uart2_grp[] = { +static struct ralink_pmx_func uart2_func[] = { FUNC("uart2", 0, 9, 4), FUNC("pcm", 2, 9, 4), FUNC("spdif2", 3, 9, 4), }; -static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) }; -static struct rt2880_pmx_func wdt_grp[] = { +static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 13, 5) }; +static struct ralink_pmx_func wdt_func[] = { FUNC("wdt rst", 0, 18, 1), FUNC("wdt refclk", 2, 18, 1), }; -static struct rt2880_pmx_func pcie_rst_grp[] = { +static struct ralink_pmx_func pcie_rst_func[] = { FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1), FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1) }; -static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) }; -static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) }; -static struct rt2880_pmx_func spi_grp[] = { +static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 20, 2) }; +static struct ralink_pmx_func rgmii2_func[] = { FUNC("rgmii2", 0, 22, 12) }; +static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 34, 7), FUNC("nand1", 2, 34, 7), }; -static struct rt2880_pmx_func sdhci_grp[] = { +static struct ralink_pmx_func sdhci_func[] = { FUNC("sdhci", 0, 41, 8), FUNC("nand2", 2, 41, 8), }; -static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) }; +static struct ralink_pmx_func rgmii1_func[] = { FUNC("rgmii1", 0, 49, 12) }; -static struct rt2880_pmx_group mt7621_pinmux_data[] = { - GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1), - GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C), - GRP_G("uart3", uart3_grp, MT7621_GPIO_MODE_UART3_MASK, +static struct ralink_pmx_group mt7621_pinmux_data[] = { + GRP("uart1", uart1_func, 1, MT7621_GPIO_MODE_UART1), + GRP("i2c", i2c_func, 1, MT7621_GPIO_MODE_I2C), + GRP_G("uart3", uart3_func, MT7621_GPIO_MODE_UART3_MASK, MT7621_GPIO_MODE_UART3_GPIO, MT7621_GPIO_MODE_UART3_SHIFT), - GRP_G("uart2", uart2_grp, MT7621_GPIO_MODE_UART2_MASK, + GRP_G("uart2", uart2_func, MT7621_GPIO_MODE_UART2_MASK, MT7621_GPIO_MODE_UART2_GPIO, MT7621_GPIO_MODE_UART2_SHIFT), - GRP("jtag", jtag_grp, 1, MT7621_GPIO_MODE_JTAG), - GRP_G("wdt", wdt_grp, MT7621_GPIO_MODE_WDT_MASK, + GRP("jtag", jtag_func, 1, MT7621_GPIO_MODE_JTAG), + GRP_G("wdt", wdt_func, MT7621_GPIO_MODE_WDT_MASK, MT7621_GPIO_MODE_WDT_GPIO, MT7621_GPIO_MODE_WDT_SHIFT), - GRP_G("pcie", pcie_rst_grp, MT7621_GPIO_MODE_PCIE_MASK, + GRP_G("pcie", pcie_rst_func, MT7621_GPIO_MODE_PCIE_MASK, MT7621_GPIO_MODE_PCIE_GPIO, MT7621_GPIO_MODE_PCIE_SHIFT), - GRP_G("mdio", mdio_grp, MT7621_GPIO_MODE_MDIO_MASK, + GRP_G("mdio", mdio_func, MT7621_GPIO_MODE_MDIO_MASK, MT7621_GPIO_MODE_MDIO_GPIO, MT7621_GPIO_MODE_MDIO_SHIFT), - GRP("rgmii2", rgmii2_grp, 1, MT7621_GPIO_MODE_RGMII2), - GRP_G("spi", spi_grp, MT7621_GPIO_MODE_SPI_MASK, + GRP("rgmii2", rgmii2_func, 1, MT7621_GPIO_MODE_RGMII2), + GRP_G("spi", spi_func, MT7621_GPIO_MODE_SPI_MASK, MT7621_GPIO_MODE_SPI_GPIO, MT7621_GPIO_MODE_SPI_SHIFT), - GRP_G("sdhci", sdhci_grp, MT7621_GPIO_MODE_SDHCI_MASK, + GRP_G("sdhci", sdhci_func, MT7621_GPIO_MODE_SDHCI_MASK, MT7621_GPIO_MODE_SDHCI_GPIO, MT7621_GPIO_MODE_SDHCI_SHIFT), - GRP("rgmii1", rgmii1_grp, 1, MT7621_GPIO_MODE_RGMII1), + GRP("rgmii1", rgmii1_func, 1, MT7621_GPIO_MODE_RGMII1), { 0 } }; -static int mt7621_pinmux_probe(struct platform_device *pdev) +static int mt7621_pinctrl_probe(struct platform_device *pdev) { - return rt2880_pinmux_init(pdev, mt7621_pinmux_data); + return ralink_pinctrl_init(pdev, mt7621_pinmux_data); } -static const struct of_device_id mt7621_pinmux_match[] = { - { .compatible = "ralink,rt2880-pinmux" }, +static const struct of_device_id mt7621_pinctrl_match[] = { + { .compatible = "ralink,mt7621-pinctrl" }, {} }; -MODULE_DEVICE_TABLE(of, mt7621_pinmux_match); +MODULE_DEVICE_TABLE(of, mt7621_pinctrl_match); -static struct platform_driver mt7621_pinmux_driver = { - .probe = mt7621_pinmux_probe, +static struct platform_driver mt7621_pinctrl_driver = { + .probe = mt7621_pinctrl_probe, .driver = { - .name = "rt2880-pinmux", - .of_match_table = mt7621_pinmux_match, + .name = "mt7621-pinctrl", + .of_match_table = mt7621_pinctrl_match, }, }; -static int __init mt7621_pinmux_init(void) +static int __init mt7621_pinctrl_init(void) { - return platform_driver_register(&mt7621_pinmux_driver); + return platform_driver_register(&mt7621_pinctrl_driver); } -core_initcall_sync(mt7621_pinmux_init); +core_initcall_sync(mt7621_pinctrl_init); diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.c b/drivers/pinctrl/ralink/pinctrl-ralink.c new file mode 100644 index 000000000000..63429a287434 --- /dev/null +++ b/drivers/pinctrl/ralink/pinctrl-ralink.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pinctrl/machine.h> + +#include <asm/mach-ralink/ralink_regs.h> +#include <asm/mach-ralink/mt7620.h> + +#include "pinctrl-ralink.h" +#include "../core.h" +#include "../pinctrl-utils.h" + +#define SYSC_REG_GPIO_MODE 0x60 +#define SYSC_REG_GPIO_MODE2 0x64 + +struct ralink_priv { + struct device *dev; + + struct pinctrl_pin_desc *pads; + struct pinctrl_desc *desc; + + struct ralink_pmx_func **func; + int func_count; + + struct ralink_pmx_group *groups; + const char **group_names; + int group_count; + + u8 *gpio; + int max_pins; +}; + +static int ralink_get_group_count(struct pinctrl_dev *pctrldev) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return p->group_count; +} + +static const char *ralink_get_group_name(struct pinctrl_dev *pctrldev, + unsigned int group) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return (group >= p->group_count) ? NULL : p->group_names[group]; +} + +static int ralink_get_group_pins(struct pinctrl_dev *pctrldev, + unsigned int group, + const unsigned int **pins, + unsigned int *num_pins) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + if (group >= p->group_count) + return -EINVAL; + + *pins = p->groups[group].func[0].pins; + *num_pins = p->groups[group].func[0].pin_count; + + return 0; +} + +static const struct pinctrl_ops ralink_pctrl_ops = { + .get_groups_count = ralink_get_group_count, + .get_group_name = ralink_get_group_name, + .get_group_pins = ralink_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_all, + .dt_free_map = pinconf_generic_dt_free_map, +}; + +static int ralink_pmx_func_count(struct pinctrl_dev *pctrldev) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return p->func_count; +} + +static const char *ralink_pmx_func_name(struct pinctrl_dev *pctrldev, + unsigned int func) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return p->func[func]->name; +} + +static int ralink_pmx_group_get_groups(struct pinctrl_dev *pctrldev, + unsigned int func, + const char * const **groups, + unsigned int * const num_groups) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + if (p->func[func]->group_count == 1) + *groups = &p->group_names[p->func[func]->groups[0]]; + else + *groups = p->group_names; + + *num_groups = p->func[func]->group_count; + + return 0; +} + +static int ralink_pmx_group_enable(struct pinctrl_dev *pctrldev, + unsigned int func, unsigned int group) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + u32 mode = 0; + u32 reg = SYSC_REG_GPIO_MODE; + int i; + int shift; + + /* dont allow double use */ + if (p->groups[group].enabled) { + dev_err(p->dev, "%s is already enabled\n", + p->groups[group].name); + return 0; + } + + p->groups[group].enabled = 1; + p->func[func]->enabled = 1; + + shift = p->groups[group].shift; + if (shift >= 32) { + shift -= 32; + reg = SYSC_REG_GPIO_MODE2; + } + mode = rt_sysc_r32(reg); + mode &= ~(p->groups[group].mask << shift); + + /* mark the pins as gpio */ + for (i = 0; i < p->groups[group].func[0].pin_count; i++) + p->gpio[p->groups[group].func[0].pins[i]] = 1; + + /* function 0 is gpio and needs special handling */ + if (func == 0) { + mode |= p->groups[group].gpio << shift; + } else { + for (i = 0; i < p->func[func]->pin_count; i++) + p->gpio[p->func[func]->pins[i]] = 0; + mode |= p->func[func]->value << shift; + } + rt_sysc_w32(mode, reg); + + return 0; +} + +static int ralink_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, + struct pinctrl_gpio_range *range, + unsigned int pin) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + if (!p->gpio[pin]) { + dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); + return -EINVAL; + } + + return 0; +} + +static const struct pinmux_ops ralink_pmx_group_ops = { + .get_functions_count = ralink_pmx_func_count, + .get_function_name = ralink_pmx_func_name, + .get_function_groups = ralink_pmx_group_get_groups, + .set_mux = ralink_pmx_group_enable, + .gpio_request_enable = ralink_pmx_group_gpio_request_enable, +}; + +static struct pinctrl_desc ralink_pctrl_desc = { + .owner = THIS_MODULE, + .name = "ralink-pinctrl", + .pctlops = &ralink_pctrl_ops, + .pmxops = &ralink_pmx_group_ops, +}; + +static struct ralink_pmx_func gpio_func = { + .name = "gpio", +}; + +static int ralink_pinctrl_index(struct ralink_priv *p) +{ + struct ralink_pmx_group *mux = p->groups; + int i, j, c = 0; + + /* count the mux functions */ + while (mux->name) { + p->group_count++; + mux++; + } + + /* allocate the group names array needed by the gpio function */ + p->group_names = devm_kcalloc(p->dev, p->group_count, + sizeof(char *), GFP_KERNEL); + if (!p->group_names) + return -ENOMEM; + + for (i = 0; i < p->group_count; i++) { + p->group_names[i] = p->groups[i].name; + p->func_count += p->groups[i].func_count; + } + + /* we have a dummy function[0] for gpio */ + p->func_count++; + + /* allocate our function and group mapping index buffers */ + p->func = devm_kcalloc(p->dev, p->func_count, + sizeof(*p->func), GFP_KERNEL); + gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int), + GFP_KERNEL); + if (!p->func || !gpio_func.groups) + return -ENOMEM; + + /* add a backpointer to the function so it knows its group */ + gpio_func.group_count = p->group_count; + for (i = 0; i < gpio_func.group_count; i++) + gpio_func.groups[i] = i; + + p->func[c] = &gpio_func; + c++; + + /* add remaining functions */ + for (i = 0; i < p->group_count; i++) { + for (j = 0; j < p->groups[i].func_count; j++) { + p->func[c] = &p->groups[i].func[j]; + p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int), + GFP_KERNEL); + if (!p->func[c]->groups) + return -ENOMEM; + p->func[c]->groups[0] = i; + p->func[c]->group_count = 1; + c++; + } + } + return 0; +} + +static int ralink_pinctrl_pins(struct ralink_priv *p) +{ + int i, j; + + /* + * loop over the functions and initialize the pins array. + * also work out the highest pin used. + */ + for (i = 0; i < p->func_count; i++) { + int pin; + + if (!p->func[i]->pin_count) + continue; + + p->func[i]->pins = devm_kcalloc(p->dev, + p->func[i]->pin_count, + sizeof(int), + GFP_KERNEL); + for (j = 0; j < p->func[i]->pin_count; j++) + p->func[i]->pins[j] = p->func[i]->pin_first + j; + + pin = p->func[i]->pin_first + p->func[i]->pin_count; + if (pin > p->max_pins) + p->max_pins = pin; + } + + /* the buffer that tells us which pins are gpio */ + p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL); + /* the pads needed to tell pinctrl about our pins */ + p->pads = devm_kcalloc(p->dev, p->max_pins, + sizeof(struct pinctrl_pin_desc), GFP_KERNEL); + if (!p->pads || !p->gpio) + return -ENOMEM; + + memset(p->gpio, 1, sizeof(u8) * p->max_pins); + for (i = 0; i < p->func_count; i++) { + if (!p->func[i]->pin_count) + continue; + + for (j = 0; j < p->func[i]->pin_count; j++) + p->gpio[p->func[i]->pins[j]] = 0; + } + + /* pin 0 is always a gpio */ + p->gpio[0] = 1; + + /* set the pads */ + for (i = 0; i < p->max_pins; i++) { + /* strlen("ioXY") + 1 = 5 */ + char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); + + if (!name) + return -ENOMEM; + snprintf(name, 5, "io%d", i); + p->pads[i].number = i; + p->pads[i].name = name; + } + p->desc->pins = p->pads; + p->desc->npins = p->max_pins; + + return 0; +} + +int ralink_pinctrl_init(struct platform_device *pdev, + struct ralink_pmx_group *data) +{ + struct ralink_priv *p; + struct pinctrl_dev *dev; + int err; + + if (!data) + return -ENOTSUPP; + + /* setup the private data */ + p = devm_kzalloc(&pdev->dev, sizeof(struct ralink_priv), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p->dev = &pdev->dev; + p->desc = &ralink_pctrl_desc; + p->groups = data; + platform_set_drvdata(pdev, p); + + /* init the device */ + err = ralink_pinctrl_index(p); + if (err) { + dev_err(&pdev->dev, "failed to load index\n"); + return err; + } + + err = ralink_pinctrl_pins(p); + if (err) { + dev_err(&pdev->dev, "failed to load pins\n"); + return err; + } + dev = pinctrl_register(p->desc, &pdev->dev, p); + + return PTR_ERR_OR_ZERO(dev); +} diff --git a/drivers/pinctrl/ralink/pinmux.h b/drivers/pinctrl/ralink/pinctrl-ralink.h index 0046abe3bcc7..e6037be1e153 100644 --- a/drivers/pinctrl/ralink/pinmux.h +++ b/drivers/pinctrl/ralink/pinctrl-ralink.h @@ -3,8 +3,8 @@ * Copyright (C) 2012 John Crispin <john@phrozen.org> */ -#ifndef _RT288X_PINMUX_H__ -#define _RT288X_PINMUX_H__ +#ifndef _PINCTRL_RALINK_H__ +#define _PINCTRL_RALINK_H__ #define FUNC(name, value, pin_first, pin_count) \ { name, value, pin_first, pin_count } @@ -19,9 +19,9 @@ .func = _func, .gpio = _gpio, \ .func_count = ARRAY_SIZE(_func) } -struct rt2880_pmx_group; +struct ralink_pmx_group; -struct rt2880_pmx_func { +struct ralink_pmx_func { const char *name; const char value; @@ -35,7 +35,7 @@ struct rt2880_pmx_func { int enabled; }; -struct rt2880_pmx_group { +struct ralink_pmx_group { const char *name; int enabled; @@ -43,11 +43,11 @@ struct rt2880_pmx_group { const char mask; const char gpio; - struct rt2880_pmx_func *func; + struct ralink_pmx_func *func; int func_count; }; -int rt2880_pinmux_init(struct platform_device *pdev, - struct rt2880_pmx_group *data); +int ralink_pinctrl_init(struct platform_device *pdev, + struct ralink_pmx_group *data); #endif diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c index 96fc06d1b8b9..811e12df1133 100644 --- a/drivers/pinctrl/ralink/pinctrl-rt2880.c +++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c @@ -1,349 +1,60 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2013 John Crispin <blogic@openwrt.org> - */ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/bitops.h> #include <linux/module.h> -#include <linux/device.h> -#include <linux/io.h> #include <linux/platform_device.h> -#include <linux/slab.h> #include <linux/of.h> -#include <linux/pinctrl/pinctrl.h> -#include <linux/pinctrl/pinconf.h> -#include <linux/pinctrl/pinconf-generic.h> -#include <linux/pinctrl/pinmux.h> -#include <linux/pinctrl/consumer.h> -#include <linux/pinctrl/machine.h> - -#include <asm/mach-ralink/ralink_regs.h> -#include <asm/mach-ralink/mt7620.h> - -#include "pinmux.h" -#include "../core.h" -#include "../pinctrl-utils.h" - -#define SYSC_REG_GPIO_MODE 0x60 -#define SYSC_REG_GPIO_MODE2 0x64 - -struct rt2880_priv { - struct device *dev; - - struct pinctrl_pin_desc *pads; - struct pinctrl_desc *desc; - - struct rt2880_pmx_func **func; - int func_count; - - struct rt2880_pmx_group *groups; - const char **group_names; - int group_count; - - u8 *gpio; - int max_pins; +#include "pinctrl-ralink.h" + +#define RT2880_GPIO_MODE_I2C BIT(0) +#define RT2880_GPIO_MODE_UART0 BIT(1) +#define RT2880_GPIO_MODE_SPI BIT(2) +#define RT2880_GPIO_MODE_UART1 BIT(3) +#define RT2880_GPIO_MODE_JTAG BIT(4) +#define RT2880_GPIO_MODE_MDIO BIT(5) +#define RT2880_GPIO_MODE_SDRAM BIT(6) +#define RT2880_GPIO_MODE_PCI BIT(7) + +static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) }; +static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; +static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; +static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; +static struct ralink_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) }; + +static struct ralink_pmx_group rt2880_pinmux_data_act[] = { + GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C), + GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI), + GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0), + GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG), + GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO), + GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM), + GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI), + { 0 } }; -static int rt2880_get_group_count(struct pinctrl_dev *pctrldev) +static int rt2880_pinctrl_probe(struct platform_device *pdev) { - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - return p->group_count; + return ralink_pinctrl_init(pdev, rt2880_pinmux_data_act); } -static const char *rt2880_get_group_name(struct pinctrl_dev *pctrldev, - unsigned int group) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - return (group >= p->group_count) ? NULL : p->group_names[group]; -} - -static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev, - unsigned int group, - const unsigned int **pins, - unsigned int *num_pins) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - if (group >= p->group_count) - return -EINVAL; - - *pins = p->groups[group].func[0].pins; - *num_pins = p->groups[group].func[0].pin_count; - - return 0; -} - -static const struct pinctrl_ops rt2880_pctrl_ops = { - .get_groups_count = rt2880_get_group_count, - .get_group_name = rt2880_get_group_name, - .get_group_pins = rt2880_get_group_pins, - .dt_node_to_map = pinconf_generic_dt_node_to_map_all, - .dt_free_map = pinconf_generic_dt_free_map, +static const struct of_device_id rt2880_pinctrl_match[] = { + { .compatible = "ralink,rt2880-pinctrl" }, + {} }; - -static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - return p->func_count; -} - -static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev, - unsigned int func) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - return p->func[func]->name; -} - -static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev, - unsigned int func, - const char * const **groups, - unsigned int * const num_groups) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - if (p->func[func]->group_count == 1) - *groups = &p->group_names[p->func[func]->groups[0]]; - else - *groups = p->group_names; - - *num_groups = p->func[func]->group_count; - - return 0; -} - -static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev, - unsigned int func, unsigned int group) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - u32 mode = 0; - u32 reg = SYSC_REG_GPIO_MODE; - int i; - int shift; - - /* dont allow double use */ - if (p->groups[group].enabled) { - dev_err(p->dev, "%s is already enabled\n", - p->groups[group].name); - return 0; - } - - p->groups[group].enabled = 1; - p->func[func]->enabled = 1; - - shift = p->groups[group].shift; - if (shift >= 32) { - shift -= 32; - reg = SYSC_REG_GPIO_MODE2; - } - mode = rt_sysc_r32(reg); - mode &= ~(p->groups[group].mask << shift); - - /* mark the pins as gpio */ - for (i = 0; i < p->groups[group].func[0].pin_count; i++) - p->gpio[p->groups[group].func[0].pins[i]] = 1; - - /* function 0 is gpio and needs special handling */ - if (func == 0) { - mode |= p->groups[group].gpio << shift; - } else { - for (i = 0; i < p->func[func]->pin_count; i++) - p->gpio[p->func[func]->pins[i]] = 0; - mode |= p->func[func]->value << shift; - } - rt_sysc_w32(mode, reg); - - return 0; -} - -static int rt2880_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, - struct pinctrl_gpio_range *range, - unsigned int pin) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - if (!p->gpio[pin]) { - dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); - return -EINVAL; - } - - return 0; -} - -static const struct pinmux_ops rt2880_pmx_group_ops = { - .get_functions_count = rt2880_pmx_func_count, - .get_function_name = rt2880_pmx_func_name, - .get_function_groups = rt2880_pmx_group_get_groups, - .set_mux = rt2880_pmx_group_enable, - .gpio_request_enable = rt2880_pmx_group_gpio_request_enable, +MODULE_DEVICE_TABLE(of, rt2880_pinctrl_match); + +static struct platform_driver rt2880_pinctrl_driver = { + .probe = rt2880_pinctrl_probe, + .driver = { + .name = "rt2880-pinctrl", + .of_match_table = rt2880_pinctrl_match, + }, }; -static struct pinctrl_desc rt2880_pctrl_desc = { - .owner = THIS_MODULE, - .name = "rt2880-pinmux", - .pctlops = &rt2880_pctrl_ops, - .pmxops = &rt2880_pmx_group_ops, -}; - -static struct rt2880_pmx_func gpio_func = { - .name = "gpio", -}; - -static int rt2880_pinmux_index(struct rt2880_priv *p) -{ - struct rt2880_pmx_group *mux = p->groups; - int i, j, c = 0; - - /* count the mux functions */ - while (mux->name) { - p->group_count++; - mux++; - } - - /* allocate the group names array needed by the gpio function */ - p->group_names = devm_kcalloc(p->dev, p->group_count, - sizeof(char *), GFP_KERNEL); - if (!p->group_names) - return -ENOMEM; - - for (i = 0; i < p->group_count; i++) { - p->group_names[i] = p->groups[i].name; - p->func_count += p->groups[i].func_count; - } - - /* we have a dummy function[0] for gpio */ - p->func_count++; - - /* allocate our function and group mapping index buffers */ - p->func = devm_kcalloc(p->dev, p->func_count, - sizeof(*p->func), GFP_KERNEL); - gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int), - GFP_KERNEL); - if (!p->func || !gpio_func.groups) - return -ENOMEM; - - /* add a backpointer to the function so it knows its group */ - gpio_func.group_count = p->group_count; - for (i = 0; i < gpio_func.group_count; i++) - gpio_func.groups[i] = i; - - p->func[c] = &gpio_func; - c++; - - /* add remaining functions */ - for (i = 0; i < p->group_count; i++) { - for (j = 0; j < p->groups[i].func_count; j++) { - p->func[c] = &p->groups[i].func[j]; - p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int), - GFP_KERNEL); - if (!p->func[c]->groups) - return -ENOMEM; - p->func[c]->groups[0] = i; - p->func[c]->group_count = 1; - c++; - } - } - return 0; -} - -static int rt2880_pinmux_pins(struct rt2880_priv *p) +static int __init rt2880_pinctrl_init(void) { - int i, j; - - /* - * loop over the functions and initialize the pins array. - * also work out the highest pin used. - */ - for (i = 0; i < p->func_count; i++) { - int pin; - - if (!p->func[i]->pin_count) - continue; - - p->func[i]->pins = devm_kcalloc(p->dev, - p->func[i]->pin_count, - sizeof(int), - GFP_KERNEL); - for (j = 0; j < p->func[i]->pin_count; j++) - p->func[i]->pins[j] = p->func[i]->pin_first + j; - - pin = p->func[i]->pin_first + p->func[i]->pin_count; - if (pin > p->max_pins) - p->max_pins = pin; - } - - /* the buffer that tells us which pins are gpio */ - p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL); - /* the pads needed to tell pinctrl about our pins */ - p->pads = devm_kcalloc(p->dev, p->max_pins, - sizeof(struct pinctrl_pin_desc), GFP_KERNEL); - if (!p->pads || !p->gpio) - return -ENOMEM; - - memset(p->gpio, 1, sizeof(u8) * p->max_pins); - for (i = 0; i < p->func_count; i++) { - if (!p->func[i]->pin_count) - continue; - - for (j = 0; j < p->func[i]->pin_count; j++) - p->gpio[p->func[i]->pins[j]] = 0; - } - - /* pin 0 is always a gpio */ - p->gpio[0] = 1; - - /* set the pads */ - for (i = 0; i < p->max_pins; i++) { - /* strlen("ioXY") + 1 = 5 */ - char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); - - if (!name) - return -ENOMEM; - snprintf(name, 5, "io%d", i); - p->pads[i].number = i; - p->pads[i].name = name; - } - p->desc->pins = p->pads; - p->desc->npins = p->max_pins; - - return 0; -} - -int rt2880_pinmux_init(struct platform_device *pdev, - struct rt2880_pmx_group *data) -{ - struct rt2880_priv *p; - struct pinctrl_dev *dev; - int err; - - if (!data) - return -ENOTSUPP; - - /* setup the private data */ - p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL); - if (!p) - return -ENOMEM; - - p->dev = &pdev->dev; - p->desc = &rt2880_pctrl_desc; - p->groups = data; - platform_set_drvdata(pdev, p); - - /* init the device */ - err = rt2880_pinmux_index(p); - if (err) { - dev_err(&pdev->dev, "failed to load index\n"); - return err; - } - - err = rt2880_pinmux_pins(p); - if (err) { - dev_err(&pdev->dev, "failed to load pins\n"); - return err; - } - dev = pinctrl_register(p->desc, &pdev->dev, p); - - return PTR_ERR_OR_ZERO(dev); + return platform_driver_register(&rt2880_pinctrl_driver); } +core_initcall_sync(rt2880_pinctrl_init); diff --git a/drivers/pinctrl/ralink/pinctrl-rt288x.c b/drivers/pinctrl/ralink/pinctrl-rt288x.c deleted file mode 100644 index 0744aebbace5..000000000000 --- a/drivers/pinctrl/ralink/pinctrl-rt288x.c +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only - -#include <linux/bitops.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/of.h> -#include "pinmux.h" - -#define RT2880_GPIO_MODE_I2C BIT(0) -#define RT2880_GPIO_MODE_UART0 BIT(1) -#define RT2880_GPIO_MODE_SPI BIT(2) -#define RT2880_GPIO_MODE_UART1 BIT(3) -#define RT2880_GPIO_MODE_JTAG BIT(4) -#define RT2880_GPIO_MODE_MDIO BIT(5) -#define RT2880_GPIO_MODE_SDRAM BIT(6) -#define RT2880_GPIO_MODE_PCI BIT(7) - -static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; -static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; -static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) }; -static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; -static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; -static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; -static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) }; - -static struct rt2880_pmx_group rt2880_pinmux_data_act[] = { - GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C), - GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI), - GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0), - GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG), - GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO), - GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM), - GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI), - { 0 } -}; - -static int rt288x_pinmux_probe(struct platform_device *pdev) -{ - return rt2880_pinmux_init(pdev, rt2880_pinmux_data_act); -} - -static const struct of_device_id rt288x_pinmux_match[] = { - { .compatible = "ralink,rt2880-pinmux" }, - {} -}; -MODULE_DEVICE_TABLE(of, rt288x_pinmux_match); - -static struct platform_driver rt288x_pinmux_driver = { - .probe = rt288x_pinmux_probe, - .driver = { - .name = "rt2880-pinmux", - .of_match_table = rt288x_pinmux_match, - }, -}; - -static int __init rt288x_pinmux_init(void) -{ - return platform_driver_register(&rt288x_pinmux_driver); -} -core_initcall_sync(rt288x_pinmux_init); diff --git a/drivers/pinctrl/ralink/pinctrl-rt305x.c b/drivers/pinctrl/ralink/pinctrl-rt305x.c index 5d8fa156c003..5b204b7ca1f3 100644 --- a/drivers/pinctrl/ralink/pinctrl-rt305x.c +++ b/drivers/pinctrl/ralink/pinctrl-rt305x.c @@ -5,7 +5,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include "pinmux.h" +#include "pinctrl-ralink.h" #define RT305X_GPIO_MODE_UART0_SHIFT 2 #define RT305X_GPIO_MODE_UART0_MASK 0x7 @@ -31,9 +31,9 @@ #define RT3352_GPIO_MODE_LNA 18 #define RT3352_GPIO_MODE_PA 20 -static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; -static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; -static struct rt2880_pmx_func uartf_func[] = { +static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct ralink_pmx_func uartf_func[] = { FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8), FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8), FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8), @@ -42,28 +42,28 @@ static struct rt2880_pmx_func uartf_func[] = { FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4), FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4), }; -static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; -static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; -static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; -static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) }; -static struct rt2880_pmx_func rt5350_cs1_func[] = { +static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; +static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; +static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; +static struct ralink_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) }; +static struct ralink_pmx_func rt5350_cs1_func[] = { FUNC("spi_cs1", 0, 27, 1), FUNC("wdg_cs1", 1, 27, 1), }; -static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; -static struct rt2880_pmx_func rt3352_rgmii_func[] = { +static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; +static struct ralink_pmx_func rt3352_rgmii_func[] = { FUNC("rgmii", 0, 24, 12) }; -static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; -static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; -static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; -static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; -static struct rt2880_pmx_func rt3352_cs1_func[] = { +static struct ralink_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; +static struct ralink_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; +static struct ralink_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; +static struct ralink_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; +static struct ralink_pmx_func rt3352_cs1_func[] = { FUNC("spi_cs1", 0, 45, 1), FUNC("wdg_cs1", 1, 45, 1), }; -static struct rt2880_pmx_group rt3050_pinmux_data[] = { +static struct ralink_pmx_group rt3050_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, @@ -76,7 +76,7 @@ static struct rt2880_pmx_group rt3050_pinmux_data[] = { { 0 } }; -static struct rt2880_pmx_group rt3352_pinmux_data[] = { +static struct ralink_pmx_group rt3352_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, @@ -92,7 +92,7 @@ static struct rt2880_pmx_group rt3352_pinmux_data[] = { { 0 } }; -static struct rt2880_pmx_group rt5350_pinmux_data[] = { +static struct ralink_pmx_group rt5350_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, @@ -104,34 +104,34 @@ static struct rt2880_pmx_group rt5350_pinmux_data[] = { { 0 } }; -static int rt305x_pinmux_probe(struct platform_device *pdev) +static int rt305x_pinctrl_probe(struct platform_device *pdev) { if (soc_is_rt5350()) - return rt2880_pinmux_init(pdev, rt5350_pinmux_data); + return ralink_pinctrl_init(pdev, rt5350_pinmux_data); else if (soc_is_rt305x() || soc_is_rt3350()) - return rt2880_pinmux_init(pdev, rt3050_pinmux_data); + return ralink_pinctrl_init(pdev, rt3050_pinmux_data); else if (soc_is_rt3352()) - return rt2880_pinmux_init(pdev, rt3352_pinmux_data); + return ralink_pinctrl_init(pdev, rt3352_pinmux_data); else return -EINVAL; } -static const struct of_device_id rt305x_pinmux_match[] = { - { .compatible = "ralink,rt2880-pinmux" }, +static const struct of_device_id rt305x_pinctrl_match[] = { + { .compatible = "ralink,rt305x-pinctrl" }, {} }; -MODULE_DEVICE_TABLE(of, rt305x_pinmux_match); +MODULE_DEVICE_TABLE(of, rt305x_pinctrl_match); -static struct platform_driver rt305x_pinmux_driver = { - .probe = rt305x_pinmux_probe, +static struct platform_driver rt305x_pinctrl_driver = { + .probe = rt305x_pinctrl_probe, .driver = { - .name = "rt2880-pinmux", - .of_match_table = rt305x_pinmux_match, + .name = "rt305x-pinctrl", + .of_match_table = rt305x_pinctrl_match, }, }; -static int __init rt305x_pinmux_init(void) +static int __init rt305x_pinctrl_init(void) { - return platform_driver_register(&rt305x_pinmux_driver); + return platform_driver_register(&rt305x_pinctrl_driver); } -core_initcall_sync(rt305x_pinmux_init); +core_initcall_sync(rt305x_pinctrl_init); diff --git a/drivers/pinctrl/ralink/pinctrl-rt3883.c b/drivers/pinctrl/ralink/pinctrl-rt3883.c index 3e0e1b4caa64..44a66c3d2d2a 100644 --- a/drivers/pinctrl/ralink/pinctrl-rt3883.c +++ b/drivers/pinctrl/ralink/pinctrl-rt3883.c @@ -3,7 +3,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include "pinmux.h" +#include "pinctrl-ralink.h" #define RT3883_GPIO_MODE_UART0_SHIFT 2 #define RT3883_GPIO_MODE_UART0_MASK 0x7 @@ -39,9 +39,9 @@ #define RT3883_GPIO_MODE_LNA_G_GPIO 0x3 #define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK) -static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; -static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; -static struct rt2880_pmx_func uartf_func[] = { +static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct ralink_pmx_func uartf_func[] = { FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8), FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8), FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8), @@ -50,21 +50,21 @@ static struct rt2880_pmx_func uartf_func[] = { FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4), FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4), }; -static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; -static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; -static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; -static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; -static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; -static struct rt2880_pmx_func pci_func[] = { +static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; +static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; +static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; +static struct ralink_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; +static struct ralink_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; +static struct ralink_pmx_func pci_func[] = { FUNC("pci-dev", 0, 40, 32), FUNC("pci-host2", 1, 40, 32), FUNC("pci-host1", 2, 40, 32), FUNC("pci-fnc", 3, 40, 32) }; -static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; -static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) }; +static struct ralink_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; +static struct ralink_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) }; -static struct rt2880_pmx_group rt3883_pinmux_data[] = { +static struct ralink_pmx_group rt3883_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI), GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK, @@ -81,27 +81,27 @@ static struct rt2880_pmx_group rt3883_pinmux_data[] = { { 0 } }; -static int rt3883_pinmux_probe(struct platform_device *pdev) +static int rt3883_pinctrl_probe(struct platform_device *pdev) { - return rt2880_pinmux_init(pdev, rt3883_pinmux_data); + return ralink_pinctrl_init(pdev, rt3883_pinmux_data); } -static const struct of_device_id rt3883_pinmux_match[] = { - { .compatible = "ralink,rt2880-pinmux" }, +static const struct of_device_id rt3883_pinctrl_match[] = { + { .compatible = "ralink,rt3883-pinctrl" }, {} }; -MODULE_DEVICE_TABLE(of, rt3883_pinmux_match); +MODULE_DEVICE_TABLE(of, rt3883_pinctrl_match); -static struct platform_driver rt3883_pinmux_driver = { - .probe = rt3883_pinmux_probe, +static struct platform_driver rt3883_pinctrl_driver = { + .probe = rt3883_pinctrl_probe, .driver = { - .name = "rt2880-pinmux", - .of_match_table = rt3883_pinmux_match, + .name = "rt3883-pinctrl", + .of_match_table = rt3883_pinctrl_match, }, }; -static int __init rt3883_pinmux_init(void) +static int __init rt3883_pinctrl_init(void) { - return platform_driver_register(&rt3883_pinmux_driver); + return platform_driver_register(&rt3883_pinctrl_driver); } -core_initcall_sync(rt3883_pinmux_init); +core_initcall_sync(rt3883_pinctrl_init); diff --git a/drivers/pinctrl/renesas/Kconfig b/drivers/pinctrl/renesas/Kconfig index 6b38720c56e3..961007ce7b3a 100644 --- a/drivers/pinctrl/renesas/Kconfig +++ b/drivers/pinctrl/renesas/Kconfig @@ -38,8 +38,7 @@ config PINCTRL_RENESAS select PINCTRL_PFC_R8A77995 if ARCH_R8A77995 select PINCTRL_PFC_R8A779A0 if ARCH_R8A779A0 select PINCTRL_PFC_R8A779F0 if ARCH_R8A779F0 - select PINCTRL_RZG2L if ARCH_R9A07G044 - select PINCTRL_RZG2L if ARCH_R9A07G054 + select PINCTRL_RZG2L if ARCH_RZG2L select PINCTRL_PFC_SH7203 if CPU_SUBTYPE_SH7203 select PINCTRL_PFC_SH7264 if CPU_SUBTYPE_SH7264 select PINCTRL_PFC_SH7269 if CPU_SUBTYPE_SH7269 @@ -184,14 +183,14 @@ config PINCTRL_RZA2 This selects GPIO and pinctrl driver for Renesas RZ/A2 platforms. config PINCTRL_RZG2L - bool "pin control support for RZ/{G2L,V2L}" if COMPILE_TEST + bool "pin control support for RZ/{G2L,G2UL,V2L}" if COMPILE_TEST depends on OF select GPIOLIB select GENERIC_PINCTRL_GROUPS select GENERIC_PINMUX_FUNCTIONS select GENERIC_PINCONF help - This selects GPIO and pinctrl driver for Renesas RZ/{G2L,V2L} + This selects GPIO and pinctrl driver for Renesas RZ/{G2L,G2UL,V2L} platforms. config PINCTRL_PFC_R8A77470 diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c index d0d4714731c1..8c14b2021bf0 100644 --- a/drivers/pinctrl/renesas/core.c +++ b/drivers/pinctrl/renesas/core.c @@ -13,10 +13,11 @@ #include <linux/bitops.h> #include <linux/err.h> #include <linux/errno.h> +#include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/kernel.h> -#include <linux/init.h> +#include <linux/math.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/machine.h> @@ -71,12 +72,11 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc, /* Fill them. */ for (i = 0; i < num_windows; i++) { - res = platform_get_resource(pdev, IORESOURCE_MEM, i); - windows->phys = res->start; - windows->size = resource_size(res); - windows->virt = devm_ioremap_resource(pfc->dev, res); + windows->virt = devm_platform_get_and_ioremap_resource(pdev, i, &res); if (IS_ERR(windows->virt)) return -ENOMEM; + windows->phys = res->start; + windows->size = resource_size(res); windows++; } for (i = 0; i < num_irqs; i++) @@ -214,7 +214,7 @@ static void sh_pfc_config_reg_helper(struct sh_pfc *pfc, *maskp = (1 << crp->var_field_width[in_pos]) - 1; *posp = crp->reg_width; for (k = 0; k <= in_pos; k++) - *posp -= crp->var_field_width[k]; + *posp -= abs(crp->var_field_width[k]); } } @@ -262,14 +262,17 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, u16 enum_id, if (!r_width) break; - for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width) { + for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width, m++) { u32 ncomb; u32 n; - if (f_width) + if (f_width) { curr_width = f_width; - else - curr_width = config_reg->var_field_width[m]; + } else { + curr_width = abs(config_reg->var_field_width[m]); + if (config_reg->var_field_width[m] < 0) + continue; + } ncomb = 1 << curr_width; for (n = 0; n < ncomb; n++) { @@ -281,7 +284,6 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, u16 enum_id, } } pos += ncomb; - m++; } k++; } @@ -875,7 +877,8 @@ static const struct sh_pfc_pin __init *sh_pfc_find_pin( static void __init sh_pfc_check_cfg_reg(const char *drvname, const struct pinmux_cfg_reg *cfg_reg) { - unsigned int i, n, rw, fw; + unsigned int i, n, rw, r; + int fw; sh_pfc_check_reg(drvname, cfg_reg->reg, GENMASK(cfg_reg->reg_width - 1, 0)); @@ -883,16 +886,29 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname, if (cfg_reg->field_width) { fw = cfg_reg->field_width; n = (cfg_reg->reg_width / fw) << fw; + for (i = 0, r = 0; i < n; i += 1 << fw) { + if (is0s(&cfg_reg->enum_ids[i], 1 << fw)) + r++; + } + + if ((r << fw) * sizeof(u16) > cfg_reg->reg_width / fw) + sh_pfc_warn("reg 0x%x can be described with variable-width reserved fields\n", + cfg_reg->reg); + /* Skip field checks (done at build time) */ goto check_enum_ids; } for (i = 0, n = 0, rw = 0; (fw = cfg_reg->var_field_width[i]); i++) { - if (fw > 3 && is0s(&cfg_reg->enum_ids[n], 1 << fw)) - sh_pfc_warn("reg 0x%x: reserved field [%u:%u] can be split to reduce table size\n", - cfg_reg->reg, rw, rw + fw - 1); - n += 1 << fw; - rw += fw; + if (fw < 0) { + rw += -fw; + } else { + if (is0s(&cfg_reg->enum_ids[n], 1 << fw)) + sh_pfc_warn("reg 0x%x: field [%u:%u] can be described as reserved\n", + cfg_reg->reg, rw, rw + fw - 1); + n += 1 << fw; + rw += fw; + } } if (rw != cfg_reg->reg_width) @@ -1007,7 +1023,18 @@ static void __init sh_pfc_compare_groups(const char *drvname, static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info) { const struct pinmux_drive_reg *drive_regs = info->drive_regs; +#define drive_nfields ARRAY_SIZE(drive_regs->fields) +#define drive_ofs(i) drive_regs[(i) / drive_nfields] +#define drive_reg(i) drive_ofs(i).reg +#define drive_bit(i) ((i) % drive_nfields) +#define drive_field(i) drive_ofs(i).fields[drive_bit(i)] const struct pinmux_bias_reg *bias_regs = info->bias_regs; +#define bias_npins ARRAY_SIZE(bias_regs->pins) +#define bias_ofs(i) bias_regs[(i) / bias_npins] +#define bias_puen(i) bias_ofs(i).puen +#define bias_pud(i) bias_ofs(i).pud +#define bias_bit(i) ((i) % bias_npins) +#define bias_pin(i) bias_ofs(i).pins[bias_bit(i)] const char *drvname = info->name; unsigned int *refcnts; unsigned int i, j, k; @@ -1076,17 +1103,17 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info) if (!drive_regs) { sh_pfc_err_once(drive, "SH_PFC_PIN_CFG_DRIVE_STRENGTH flag set but drive_regs missing\n"); } else { - for (j = 0; drive_regs[j / 8].reg; j++) { - if (!drive_regs[j / 8].fields[j % 8].pin && - !drive_regs[j / 8].fields[j % 8].offset && - !drive_regs[j / 8].fields[j % 8].size) + for (j = 0; drive_reg(j); j++) { + if (!drive_field(j).pin && + !drive_field(j).offset && + !drive_field(j).size) continue; - if (drive_regs[j / 8].fields[j % 8].pin == pin->pin) + if (drive_field(j).pin == pin->pin) break; } - if (!drive_regs[j / 8].reg) + if (!drive_reg(j)) sh_pfc_err("pin %s: SH_PFC_PIN_CFG_DRIVE_STRENGTH flag set but not in drive_regs\n", pin->name); } @@ -1164,20 +1191,17 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info) for (i = 0; drive_regs && drive_regs[i].reg; i++) sh_pfc_check_drive_reg(info, &drive_regs[i]); - for (i = 0; drive_regs && drive_regs[i / 8].reg; i++) { - if (!drive_regs[i / 8].fields[i % 8].pin && - !drive_regs[i / 8].fields[i % 8].offset && - !drive_regs[i / 8].fields[i % 8].size) + for (i = 0; drive_regs && drive_reg(i); i++) { + if (!drive_field(i).pin && !drive_field(i).offset && + !drive_field(i).size) continue; for (j = 0; j < i; j++) { - if (drive_regs[i / 8].fields[i % 8].pin == - drive_regs[j / 8].fields[j % 8].pin && - drive_regs[j / 8].fields[j % 8].offset && - drive_regs[j / 8].fields[j % 8].size) { - sh_pfc_err("drive_reg 0x%x:%u/0x%x:%u: pin conflict\n", - drive_regs[i / 8].reg, i % 8, - drive_regs[j / 8].reg, j % 8); + if (drive_field(i).pin == drive_field(j).pin && + drive_field(j).offset && drive_field(j).size) { + sh_pfc_err("drive_reg 0x%x:%zu/0x%x:%zu: pin conflict\n", + drive_reg(i), drive_bit(i), + drive_reg(j), drive_bit(j)); } } } @@ -1186,26 +1210,23 @@ static void __init sh_pfc_check_info(const struct sh_pfc_soc_info *info) for (i = 0; bias_regs && (bias_regs[i].puen || bias_regs[i].pud); i++) sh_pfc_check_bias_reg(info, &bias_regs[i]); - for (i = 0; bias_regs && - (bias_regs[i / 32].puen || bias_regs[i / 32].pud); i++) { - if (bias_regs[i / 32].pins[i % 32] == SH_PFC_PIN_NONE) + for (i = 0; bias_regs && (bias_puen(i) || bias_pud(i)); i++) { + if (bias_pin(i) == SH_PFC_PIN_NONE) continue; for (j = 0; j < i; j++) { - if (bias_regs[i / 32].pins[i % 32] != - bias_regs[j / 32].pins[j % 32]) + if (bias_pin(i) != bias_pin(j)) continue; - if (bias_regs[i / 32].puen && bias_regs[j / 32].puen) - sh_pfc_err("bias_reg 0x%x:%u/0x%x:%u: pin conflict\n", - bias_regs[i / 32].puen, i % 32, - bias_regs[j / 32].puen, j % 32); - if (bias_regs[i / 32].pud && bias_regs[j / 32].pud) - sh_pfc_err("bias_reg 0x%x:%u/0x%x:%u: pin conflict\n", - bias_regs[i / 32].pud, i % 32, - bias_regs[j / 32].pud, j % 32); + if (bias_puen(i) && bias_puen(j)) + sh_pfc_err("bias_reg 0x%x:%zu/0x%x:%zu: pin conflict\n", + bias_puen(i), bias_bit(i), + bias_puen(j), bias_bit(j)); + if (bias_pud(i) && bias_pud(j)) + sh_pfc_err("bias_reg 0x%x:%zu/0x%x:%zu: pin conflict\n", + bias_pud(i), bias_bit(i), + bias_pud(j), bias_bit(j)); } - } /* Check ioctrl registers */ diff --git a/drivers/pinctrl/renesas/gpio.c b/drivers/pinctrl/renesas/gpio.c index ad06f5355d1e..ea3d38b4af8d 100644 --- a/drivers/pinctrl/renesas/gpio.c +++ b/drivers/pinctrl/renesas/gpio.c @@ -8,7 +8,6 @@ #include <linux/device.h> #include <linux/gpio/driver.h> -#include <linux/init.h> #include <linux/module.h> #include <linux/pinctrl/consumer.h> #include <linux/slab.h> diff --git a/drivers/pinctrl/renesas/pfc-emev2.c b/drivers/pinctrl/renesas/pfc-emev2.c index 2326d348447d..1d8b540110f2 100644 --- a/drivers/pinctrl/renesas/pfc-emev2.c +++ b/drivers/pinctrl/renesas/pfc-emev2.c @@ -4,7 +4,6 @@ * * Copyright (C) 2015 Niklas Söderlund */ -#include <linux/init.h> #include <linux/kernel.h> #include "sh_pfc.h" @@ -1570,61 +1569,39 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("CHG_PINSEL_LCD3", 0xe0140284, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, - 2, 2), + GROUP(-20, 2, 2, -6, 2), GROUP( - /* 31 - 12 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + /* 31 - 12 RESERVED */ /* 11 - 10 */ FN_SEL_LCD3_11_10_00, FN_SEL_LCD3_11_10_01, FN_SEL_LCD3_11_10_10, 0, /* 9 - 8 */ FN_SEL_LCD3_9_8_00, 0, FN_SEL_LCD3_9_8_10, 0, - /* 7 - 2 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* 7 - 2 RESERVED */ /* 1 - 0 */ FN_SEL_LCD3_1_0_00, FN_SEL_LCD3_1_0_01, 0, 0, )) }, { PINMUX_CFG_REG_VAR("CHG_PINSEL_UART", 0xe0140288, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 2), + GROUP(-30, 2), GROUP( - /* 31 - 2 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* 31 - 2 RESERVED */ /* 1 - 0 */ FN_SEL_UART_1_0_00, FN_SEL_UART_1_0_01, 0, 0, )) }, { PINMUX_CFG_REG_VAR("CHG_PINSEL_IIC", 0xe014028c, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 2), + GROUP(-30, 2), GROUP( - /* 31 - 2 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* 31 - 2 RESERVED */ /* 1 - 0 */ FN_SEL_IIC_1_0_00, FN_SEL_IIC_1_0_01, 0, 0, )) }, { PINMUX_CFG_REG_VAR("CHG_PINSEL_AB", 0xe0140294, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2), + GROUP(-18, 2, 2, 2, 2, 2, 2, 2), GROUP( - /* 31 - 14 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, + /* 31 - 14 RESERVED */ /* 13 - 12 */ FN_SEL_AB_13_12_00, 0, FN_SEL_AB_13_12_10, 0, /* 11 - 10 */ @@ -1644,14 +1621,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("CHG_PINSEL_USI", 0xe0140298, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, - 2, 2, 2), + GROUP(-22, 2, 2, 2, 2, 2), GROUP( - /* 31 - 10 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* 31 - 10 RESERVED */ /* 9 - 8 */ FN_SEL_USI_9_8_00, FN_SEL_USI_9_8_01, 0, 0, /* 7 - 6 */ @@ -1665,15 +1637,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("CHG_PINSEL_HSI", 0xe01402a8, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 2), + GROUP(-30, 2), GROUP( - /* 31 - 2 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* 31 - 2 RESERVED */ /* 1 - 0 */ FN_SEL_HSI_1_0_00, FN_SEL_HSI_1_0_01, 0, 0, )) diff --git a/drivers/pinctrl/renesas/pfc-r8a73a4.c b/drivers/pinctrl/renesas/pfc-r8a73a4.c index ba3a1857f80a..dbfc46fe2f27 100644 --- a/drivers/pinctrl/renesas/pfc-r8a73a4.c +++ b/drivers/pinctrl/renesas/pfc-r8a73a4.c @@ -2270,15 +2270,17 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MSEL1CR_00_0, MSEL1CR_00_1, )) }, - { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1, GROUP( + { PINMUX_CFG_REG_VAR("MSEL3CR", 0xe6058020, 32, + GROUP(1, -2, 1, 1, 1, -2, 1, 1, 1, 1, 1, 1, + 1, 1, 1, -2, 1, 1, 1, 1, -2, 1, -2, 1, + -1, 1, 1), + GROUP( MSEL3CR_31_0, MSEL3CR_31_1, - 0, 0, - 0, 0, + /* RESERVED [2] */ MSEL3CR_28_0, MSEL3CR_28_1, MSEL3CR_27_0, MSEL3CR_27_1, MSEL3CR_26_0, MSEL3CR_26_1, - 0, 0, - 0, 0, + /* RESERVED [2] */ MSEL3CR_23_0, MSEL3CR_23_1, MSEL3CR_22_0, MSEL3CR_22_1, MSEL3CR_21_0, MSEL3CR_21_1, @@ -2288,19 +2290,16 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MSEL3CR_17_0, MSEL3CR_17_1, MSEL3CR_16_0, MSEL3CR_16_1, MSEL3CR_15_0, MSEL3CR_15_1, - 0, 0, - 0, 0, + /* RESERVED [2] */ MSEL3CR_12_0, MSEL3CR_12_1, MSEL3CR_11_0, MSEL3CR_11_1, MSEL3CR_10_0, MSEL3CR_10_1, MSEL3CR_09_0, MSEL3CR_09_1, - 0, 0, - 0, 0, + /* RESERVED [2] */ MSEL3CR_06_0, MSEL3CR_06_1, - 0, 0, - 0, 0, + /* RESERVED [2] */ MSEL3CR_03_0, MSEL3CR_03_1, - 0, 0, + /* RESERVED [1] */ MSEL3CR_01_0, MSEL3CR_01_1, MSEL3CR_00_0, MSEL3CR_00_1, )) @@ -2375,37 +2374,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, )) }, - { PINMUX_CFG_REG("MSEL8CR", 0xe6058034, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("MSEL8CR", 0xe6058034, 32, + GROUP(-15, 1, -14, 1, 1), + GROUP( + /* RESERVED [15] */ MSEL8CR_16_0, MSEL8CR_16_1, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [14] */ MSEL8CR_01_0, MSEL8CR_01_1, MSEL8CR_00_0, MSEL8CR_00_1, )) diff --git a/drivers/pinctrl/renesas/pfc-r8a7740.c b/drivers/pinctrl/renesas/pfc-r8a7740.c index e8b9fb74a802..6dcd39918daf 100644 --- a/drivers/pinctrl/renesas/pfc-r8a7740.c +++ b/drivers/pinctrl/renesas/pfc-r8a7740.c @@ -3250,89 +3250,93 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PORTCR(210, 0xe60530d2), /* PORT210CR */ PORTCR(211, 0xe60530d3), /* PORT211CR */ - { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1, GROUP( + { PINMUX_CFG_REG_VAR("MSEL1CR", 0xe605800c, 32, + GROUP(1, 1, 1, 1, 1, 1, -9, 1, 1, 1, 1, 1, + -2, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1), + GROUP( MSEL1CR_31_0, MSEL1CR_31_1, MSEL1CR_30_0, MSEL1CR_30_1, MSEL1CR_29_0, MSEL1CR_29_1, MSEL1CR_28_0, MSEL1CR_28_1, MSEL1CR_27_0, MSEL1CR_27_1, MSEL1CR_26_0, MSEL1CR_26_1, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED [9] */ MSEL1CR_16_0, MSEL1CR_16_1, MSEL1CR_15_0, MSEL1CR_15_1, MSEL1CR_14_0, MSEL1CR_14_1, MSEL1CR_13_0, MSEL1CR_13_1, MSEL1CR_12_0, MSEL1CR_12_1, - 0, 0, 0, 0, + /* RESERVED [2] */ MSEL1CR_9_0, MSEL1CR_9_1, - 0, 0, + /* RESERVED [1] */ MSEL1CR_7_0, MSEL1CR_7_1, MSEL1CR_6_0, MSEL1CR_6_1, MSEL1CR_5_0, MSEL1CR_5_1, MSEL1CR_4_0, MSEL1CR_4_1, MSEL1CR_3_0, MSEL1CR_3_1, MSEL1CR_2_0, MSEL1CR_2_1, - 0, 0, + /* RESERVED [1] */ MSEL1CR_0_0, MSEL1CR_0_1, )) }, - { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("MSEL3CR", 0xE6058020, 32, + GROUP(-16, 1, -8, 1, -6), + GROUP( + /* RESERVED [16] */ MSEL3CR_15_0, MSEL3CR_15_1, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED [8] */ MSEL3CR_6_0, MSEL3CR_6_1, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, + /* RESERVED [6] */ )) }, - { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("MSEL4CR", 0xE6058024, 32, + GROUP(-12, 1, 1, -2, 1, -4, 1, -3, 1, -1, 1, -2, + 1, -1), + GROUP( + /* RESERVED [12] */ MSEL4CR_19_0, MSEL4CR_19_1, MSEL4CR_18_0, MSEL4CR_18_1, - 0, 0, 0, 0, + /* RESERVED [2] */ MSEL4CR_15_0, MSEL4CR_15_1, - 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED [4] */ MSEL4CR_10_0, MSEL4CR_10_1, - 0, 0, 0, 0, 0, 0, + /* RESERVED [3] */ MSEL4CR_6_0, MSEL4CR_6_1, - 0, 0, + /* RESERVED [1] */ MSEL4CR_4_0, MSEL4CR_4_1, - 0, 0, 0, 0, + /* RESERVED [2] */ MSEL4CR_1_0, MSEL4CR_1_1, - 0, 0, + /* RESERVED [1] */ )) }, - { PINMUX_CFG_REG("MSEL5CR", 0xE6058028, 32, 1, GROUP( + { PINMUX_CFG_REG_VAR("MSEL5CR", 0xE6058028, 32, + GROUP(1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, + -1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, + -1, 1, 1, 1, 1, 1, 1, 1, -1, 1), + GROUP( MSEL5CR_31_0, MSEL5CR_31_1, MSEL5CR_30_0, MSEL5CR_30_1, MSEL5CR_29_0, MSEL5CR_29_1, - 0, 0, + /* RESERVED [1] */ MSEL5CR_27_0, MSEL5CR_27_1, - 0, 0, + /* RESERVED [1] */ MSEL5CR_25_0, MSEL5CR_25_1, - 0, 0, + /* RESERVED [1] */ MSEL5CR_23_0, MSEL5CR_23_1, - 0, 0, + /* RESERVED [1] */ MSEL5CR_21_0, MSEL5CR_21_1, - 0, 0, + /* RESERVED [1] */ MSEL5CR_19_0, MSEL5CR_19_1, - 0, 0, + /* RESERVED [1] */ MSEL5CR_17_0, MSEL5CR_17_1, - 0, 0, + /* RESERVED [1] */ MSEL5CR_15_0, MSEL5CR_15_1, MSEL5CR_14_0, MSEL5CR_14_1, MSEL5CR_13_0, MSEL5CR_13_1, MSEL5CR_12_0, MSEL5CR_12_1, MSEL5CR_11_0, MSEL5CR_11_1, MSEL5CR_10_0, MSEL5CR_10_1, - 0, 0, + /* RESERVED [1] */ MSEL5CR_8_0, MSEL5CR_8_1, MSEL5CR_7_0, MSEL5CR_7_1, MSEL5CR_6_0, MSEL5CR_6_1, @@ -3340,7 +3344,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MSEL5CR_4_0, MSEL5CR_4_1, MSEL5CR_3_0, MSEL5CR_3_1, MSEL5CR_2_0, MSEL5CR_2_1, - 0, 0, + /* RESERVED [1] */ MSEL5CR_0_0, MSEL5CR_0_1, )) }, diff --git a/drivers/pinctrl/renesas/pfc-r8a77470.c b/drivers/pinctrl/renesas/pfc-r8a77470.c index ee6e8fabab24..b5725c3ed2b6 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77470.c +++ b/drivers/pinctrl/renesas/pfc-r8a77470.c @@ -2485,16 +2485,11 @@ static const struct sh_pfc_function pinmux_functions[] = { }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { - { PINMUX_CFG_REG("GPSR0", 0xE6060004, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xE6060004, 32, + GROUP(-9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_23 RESERVED */ GP_0_22_FN, FN_MMC0_D7, GP_0_21_FN, FN_MMC0_D6, GP_0_20_FN, FN_IP1_7_4, @@ -2519,16 +2514,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_0_1_FN, FN_USB0_OVC, GP_0_0_FN, FN_USB0_PWEN, )) }, - { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR1", 0xE6060008, 32, + GROUP(-9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP1_31_23 RESERVED */ GP_1_22_FN, FN_IP4_3_0, GP_1_21_FN, FN_IP3_31_28, GP_1_20_FN, FN_IP3_27_24, @@ -2587,22 +2577,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, FN_IP4_11_8, GP_2_0_FN, FN_IP4_7_4, )) }, - { PINMUX_CFG_REG("GPSR3", 0xE6060010, 32, 1, GROUP( - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xE6060010, 32, + GROUP(-2, 1, 1, -10, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_30 RESERVED */ GP_3_29_FN, FN_IP10_19_16, GP_3_28_FN, FN_IP10_15_12, GP_3_27_FN, FN_IP10_11_8, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* GP3_26_17 RESERVED */ GP_3_16_FN, FN_IP10_7_4, GP_3_15_FN, FN_IP10_3_0, GP_3_14_FN, FN_IP9_31_28, @@ -2689,9 +2672,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_5_1_FN, FN_IP14_3_0, GP_5_0_FN, FN_IP13_31_28, )) }, - { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR0", 0xE6060040, 32, 4, GROUP( /* IP0_31_28 [4] */ FN_SD0_WP, FN_IRQ7, FN_CAN0_TX_A, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2717,9 +2698,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SD0_CLK, 0, 0, FN_SSI_SCK1_C, FN_RX3_C, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR1", 0xE6060044, 32, 4, GROUP( /* IP1_31_28 [4] */ FN_D5, FN_HRX2, FN_SCL1_B, FN_PWM2_C, FN_TCLK2_B, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2745,9 +2724,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_MMC0_D4, FN_SD1_CD, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR2", 0xE6060048, 32, 4, GROUP( /* IP2_31_28 [4] */ FN_D13, FN_MSIOF2_SYNC_A, 0, FN_RX4_C, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2773,9 +2750,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_D6, FN_HTX2, FN_SDA1_B, FN_PWM4_C, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR3", 0xE606004C, 32, 4, GROUP( /* IP3_31_28 [4] */ FN_QSPI0_SSL, FN_WE1_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2802,9 +2777,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, FN_AVB_AVTP_CAPTURE_A, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR4", 0xE6060050, 32, 4, GROUP( /* IP4_31_28 [4] */ FN_DU0_DR6, 0, FN_RX2_C, 0, 0, 0, FN_A6, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2830,9 +2803,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_EX_WAIT0, FN_CAN_CLK_B, FN_SCIF_CLK_A, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR5", 0xE6060054, 32, 4, GROUP( /* IP5_31_28 [4] */ FN_DU0_DG6, 0, FN_HRX1_C, 0, 0, 0, FN_A14, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2858,9 +2829,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_DU0_DR7, 0, FN_TX2_C, 0, FN_PWM2_B, 0, FN_A7, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR6", 0xE6060058, 32, 4, GROUP( /* IP6_31_28 [4] */ FN_DU0_DB6, 0, 0, 0, 0, 0, FN_A22, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2886,9 +2855,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_DU0_DG7, 0, FN_HTX1_C, 0, FN_PWM6_B, 0, FN_A15, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR7", 0xE606005C, 32, 4, GROUP( /* IP7_31_28 [4] */ FN_DU0_DISP, 0, 0, 0, FN_CAN1_RX_C, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2914,9 +2881,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_DU0_DB7, 0, 0, 0, 0, 0, FN_A23, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR8", 0xE6060060, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR8", 0xE6060060, 32, 4, GROUP( /* IP8_31_28 [4] */ FN_VI1_DATA5, 0, 0, 0, FN_AVB_RXD4, FN_ETH_LINK, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2942,9 +2907,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_DU0_CDE, 0, 0, 0, FN_CAN1_TX_C, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR9", 0xE6060064, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR9", 0xE6060064, 32, 4, GROUP( /* IP9_31_28 [4] */ FN_VI1_DATA9, 0, 0, FN_SDA2_B, FN_AVB_TXD0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2970,9 +2933,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_VI1_DATA6, 0, 0, 0, FN_AVB_RXD5, FN_ETH_TXD1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR10", 0xE6060068, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR10", 0xE6060068, 32, 4, GROUP( /* IP10_31_28 [4] */ FN_SCL1_A, FN_RX4_A, FN_PWM5_D, FN_DU1_DR0, 0, 0, FN_SSI_SCK6_B, FN_VI0_G0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2999,9 +2960,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_VI1_DATA10, 0, 0, FN_CAN0_RX_B, FN_AVB_TXD1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR11", 0xE606006C, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR11", 0xE606006C, 32, 4, GROUP( /* IP11_31_28 [4] */ FN_HRX1_A, FN_SCL4_A, FN_PWM6_A, FN_DU1_DG0, FN_RX0_A, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3031,9 +2990,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SDA1_A, FN_TX4_A, 0, FN_DU1_DR1, 0, 0, FN_SSI_WS6_B, FN_VI0_G1, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR12", 0xE6060070, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR12", 0xE6060070, 32, 4, GROUP( /* IP12_31_28 [4] */ FN_SD2_DAT2, FN_RX2_A, 0, FN_DU1_DB0, FN_SSI_SDATA2_B, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3059,9 +3016,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_HTX1_A, FN_SDA4_A, 0, FN_DU1_DG1, FN_TX0_A, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR13", 0xE6060074, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR13", 0xE6060074, 32, 4, GROUP( /* IP13_31_28 [4] */ FN_SSI_SCK5_A, 0, 0, FN_DU1_DOTCLKOUT1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3088,9 +3043,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SD2_DAT3, FN_TX2_A, 0, FN_DU1_DB1, FN_SSI_WS9_B, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR14", 0xE6060078, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR14", 0xE6060078, 32, 4, GROUP( /* IP14_31_28 [4] */ FN_SSI_SDATA7_A, 0, 0, FN_IRQ8, FN_AUDIO_CLKA_D, FN_CAN_CLK_D, FN_VI0_G5, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3116,9 +3069,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SSI_WS5_A, 0, FN_SCL3_C, FN_DU1_DOTCLKIN, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR15", 0xE606007C, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR15", 0xE606007C, 32, 4, GROUP( /* IP15_31_28 [4] */ FN_SSI_WS4_A, 0, FN_AVB_PHY_INT, 0, 0, 0, FN_VI0_R5, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3144,9 +3095,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SSI_SCK0129_A, FN_MSIOF1_RXD_A, FN_RX5_D, 0, 0, 0, FN_VI0_G6, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG_VAR("IPSR16", 0xE6060080, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), - GROUP( + { PINMUX_CFG_REG("IPSR16", 0xE6060080, 32, 4, GROUP( /* IP16_31_28 [4] */ FN_SSI_SDATA2_A, FN_HRTS1_N_B, 0, 0, 0, 0, FN_VI0_DATA4_VI0_B4, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3174,10 +3123,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR17", 0xE6060084, 32, - GROUP(4, 4, 4, 4, 4, 4, 4, 4), + GROUP(-4, 4, 4, 4, 4, 4, 4, 4), GROUP( - /* IP17_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* IP17_31_28 [4] RESERVED */ /* IP17_27_24 [4] */ FN_AUDIO_CLKOUT_A, FN_SDA4_B, 0, 0, 0, 0, FN_VI0_VSYNC_N, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -3201,25 +3149,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_VI0_DATA5_VI0_B5, 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xE60600C0, 32, - GROUP(1, 1, 1, 1, 1, 2, 1, 1, 2, 2, 2, 1, - 3, 3, 1, 2, 3, 3, 1), + GROUP(-5, 2, -2, 2, 2, 2, -1, + 3, 3, -1, 2, 3, 3, 1), GROUP( - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, + /* RESERVED [5] */ /* SEL_ADGA [2] */ FN_SEL_ADGA_0, FN_SEL_ADGA_1, FN_SEL_ADGA_2, FN_SEL_ADGA_3, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, + /* RESERVED [2] */ /* SEL_CANCLK [2] */ FN_SEL_CANCLK_0, FN_SEL_CANCLK_1, FN_SEL_CANCLK_2, FN_SEL_CANCLK_3, @@ -3228,7 +3164,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_CAN0 [2] */ FN_SEL_CAN0_0, FN_SEL_CAN0_1, FN_SEL_CAN0_2, FN_SEL_CAN0_3, /* RESERVED [1] */ - 0, 0, /* SEL_I2C04 [3] */ FN_SEL_I2C04_0, FN_SEL_I2C04_1, FN_SEL_I2C04_2, FN_SEL_I2C04_3, FN_SEL_I2C04_4, 0, 0, 0, @@ -3236,7 +3171,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_I2C03_0, FN_SEL_I2C03_1, FN_SEL_I2C03_2, FN_SEL_I2C03_3, FN_SEL_I2C03_4, 0, 0, 0, /* RESERVED [1] */ - 0, 0, /* SEL_I2C02 [2] */ FN_SEL_I2C02_0, FN_SEL_I2C02_1, FN_SEL_I2C02_2, FN_SEL_I2C02_3, /* SEL_I2C01 [3] */ @@ -3249,8 +3183,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_AVB_0, FN_SEL_AVB_1, )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xE60600C4, 32, - GROUP(1, 3, 3, 2, 2, 1, 2, 2, 2, 1, 1, 1, - 1, 1, 2, 1, 1, 2, 2, 1), + GROUP(1, 3, 3, 2, 2, 1, 2, 2, 2, -1, 1, -1, + 1, 1, -2, 1, 1, -2, 2, 1), GROUP( /* SEL_SCIFCLK [1] */ FN_SEL_SCIFCLK_0, FN_SEL_SCIFCLK_1, @@ -3273,52 +3207,28 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_MSIOF2 [2] */ FN_SEL_MSIOF2_0, FN_SEL_MSIOF2_1, FN_SEL_MSIOF2_2, 0, /* RESERVED [1] */ - 0, 0, /* SEL_MSIOF1 [1] */ FN_SEL_MSIOF1_0, FN_SEL_MSIOF1_1, /* RESERVED [1] */ - 0, 0, /* SEL_MSIOF0 [1] */ FN_SEL_MSIOF0_0, FN_SEL_MSIOF0_1, /* SEL_RCN [1] */ FN_SEL_RCN_0, FN_SEL_RCN_1, /* RESERVED [2] */ - 0, 0, 0, 0, /* SEL_TMU2 [1] */ FN_SEL_TMU2_0, FN_SEL_TMU2_1, /* SEL_TMU1 [1] */ FN_SEL_TMU1_0, FN_SEL_TMU1_1, /* RESERVED [2] */ - 0, 0, 0, 0, /* SEL_HSCIF1 [2] */ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, FN_SEL_HSCIF1_2, 0, /* SEL_HSCIF0 [1] */ FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1, )) }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE60600C8, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2), + GROUP(-10, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), GROUP( - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, + /* RESERVED [10] */ /* SEL_ADGB [2] */ FN_SEL_ADGB_0, FN_SEL_ADGB_1, FN_SEL_ADGB_2, 0, /* SEL_ADGC [2] */ diff --git a/drivers/pinctrl/renesas/pfc-r8a7778.c b/drivers/pinctrl/renesas/pfc-r8a7778.c index a24672ca3c01..35bdb9af8160 100644 --- a/drivers/pinctrl/renesas/pfc-r8a7778.c +++ b/drivers/pinctrl/renesas/pfc-r8a7778.c @@ -2240,11 +2240,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { }, { PINMUX_CFG_REG_VAR("IPSR0", 0xfffc0020, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + GROUP(-1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, 3, 3, 2), GROUP( - /* IP0_31 [1] */ - 0, 0, + /* IP0_31 [1] RESERVED */ /* IP0_30 [1] */ FN_A19, 0, /* IP0_29 [1] */ @@ -2296,13 +2295,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("IPSR1", 0xfffc0024, 32, - GROUP(1, 1, 2, 3, 1, 3, 3, 1, 2, 4, 3, 3, + GROUP(-2, 2, 3, 1, 3, 3, 1, 2, 4, 3, 3, 3, 1, 1), GROUP( - /* IP1_31 [1] */ - 0, 0, - /* IP1_30 [1] */ - 0, 0, + /* IP1_31_30 [2] RESERVED */ /* IP1_29_28 [2] */ FN_EX_CS1, FN_MMC_D4, 0, 0, /* IP1_27_25 [3] */ @@ -2437,11 +2433,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("IPSR4", 0xfffc0030, 32, - GROUP(1, 2, 2, 2, 4, 4, 2, 2, 2, 2, 1, 1, + GROUP(-1, 2, 2, 2, 4, 4, 2, 2, 2, 2, 1, 1, 3, 3, 1), GROUP( - /* IP4_31 [1] */ - 0, 0, + /* IP4_31 [1] RESERVED */ /* IP4_30_29 [2] */ FN_VI0_R4_B, FN_DU0_DB4, FN_LCDOUT20, 0, /* IP4_28_27 [2] */ @@ -2481,12 +2476,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("IPSR5", 0xfffc0034, 32, - GROUP(1, 2, 3, 3, 2, 3, 3, 2, 1, 2, 2, 1, + GROUP(-1, 2, 3, 3, 2, 3, 3, 2, 1, 2, 2, 1, 1, 2, 2, 2), GROUP( - /* IP5_31 [1] */ - 0, 0, + /* IP5_31 [1] RESERVED */ /* IP5_30_29 [2] */ FN_SSI_SDATA7, FN_HSPI_TX0_B, FN_RX2_A, FN_CAN0_RX_B, /* IP5_28_26 [3] */ @@ -2619,12 +2613,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("IPSR8", 0xfffc0040, 32, - GROUP(1, 1, 3, 3, 2, 3, 3, 2, 3, 2, 3, 3, 3), + GROUP(-2, 3, 3, 2, 3, 3, 2, 3, 2, 3, 3, 3), GROUP( - /* IP8_31 [1] */ - 0, 0, - /* IP8_30 [1] */ - 0, 0, + /* IP8_31_30 [2] RESERVED */ /* IP8_29_27 [3] */ FN_VI0_G3, FN_SD2_CMD_B, FN_VI1_DATA5, FN_DU1_DR5, 0, FN_HRX1_B, 0, 0, @@ -2660,12 +2651,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("IPSR9", 0xfffc0044, 32, - GROUP(1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), + GROUP(-2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), GROUP( - /* IP9_31 [1] */ - 0, 0, - /* IP9_30 [1] */ - 0, 0, + /* IP9_31_30 [2] RESERVED */ /* IP9_29_27 [3] */ FN_VI1_DATA11_A, FN_DU1_EXHSYNC_DU1_HSYNC, FN_ETH_RXD1, FN_FMIN_C, @@ -2703,24 +2691,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("IPSR10", 0xfffc0048, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 4, - 3, 3, 3), + GROUP(-7, 3, 3, 3, 3, 4, 3, 3, 3), GROUP( - /* IP10_31 [1] */ - 0, 0, - /* IP10_30 [1] */ - 0, 0, - /* IP10_29 [1] */ - 0, 0, - /* IP10_28 [1] */ - 0, 0, - /* IP10_27 [1] */ - 0, 0, - /* IP10_26 [1] */ - 0, 0, - /* IP10_25 [1] */ - 0, 0, + /* IP10_31_25 [7] RESERVED */ /* IP10_24_22 [3] */ FN_SD2_WP_A, FN_VI1_DATA15, FN_EX_WAIT2_B, FN_DACK0_B, FN_HSPI_TX2_B, FN_CAN_CLK_C, 0, 0, @@ -2754,12 +2728,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xfffc0050, 32, - GROUP(1, 1, 2, 2, 3, 2, 2, 1, 1, 1, 1, 2, - 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1), + GROUP(-1, 1, 2, 2, 3, 2, 2, -1, 1, 1, 1, 2, + -1, 1, 1, 1, 2, 1, -1, 1, 1, 1, 1, 1), GROUP( - /* SEL 31 [1] */ - 0, 0, + /* SEL 31 [1] RESERVED */ /* SEL_30 (SCIF5) [1] */ FN_SEL_SCIF5_A, FN_SEL_SCIF5_B, /* SEL_29_28 (SCIF4) [2] */ @@ -2779,8 +2752,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_20_19 (SCIF0) [2] */ FN_SEL_SCIF0_A, FN_SEL_SCIF0_B, FN_SEL_SCIF0_C, FN_SEL_SCIF0_D, - /* SEL_18 [1] */ - 0, 0, + /* SEL_18 [1] RESERVED */ /* SEL_17 (SSI2) [1] */ FN_SEL_SSI2_A, FN_SEL_SSI2_B, /* SEL_16 (SSI1) [1] */ @@ -2790,8 +2762,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_14_13 (VI0) [2] */ FN_SEL_VI0_A, FN_SEL_VI0_B, FN_SEL_VI0_C, FN_SEL_VI0_D, - /* SEL_12 [1] */ - 0, 0, + /* SEL_12 [1] RESERVED */ /* SEL_11 (SD2) [1] */ FN_SEL_SD2_A, FN_SEL_SD2_B, /* SEL_10 (SD1) [1] */ @@ -2803,8 +2774,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_IRQ2_C, 0, /* SEL_6 (IRQ1) [1] */ FN_SEL_IRQ1_A, FN_SEL_IRQ1_B, - /* SEL_5 [1] */ - 0, 0, + /* SEL_5 [1] RESERVED */ /* SEL_4 (DREQ2) [1] */ FN_SEL_DREQ2_A, FN_SEL_DREQ2_B, /* SEL_3 (DREQ1) [1] */ @@ -2818,18 +2788,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xfffc0054, 32, - GROUP(1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 2, 2, 1), + GROUP(-4, 1, 1, 2, 1, 1, -7, + 2, 2, 2, 1, 1, 1, 1, 2, 2, 1), GROUP( - /* SEL_31 [1] */ - 0, 0, - /* SEL_30 [1] */ - 0, 0, - /* SEL_29 [1] */ - 0, 0, - /* SEL_28 [1] */ - 0, 0, + /* SEL_31_28 [4] RESERVED */ /* SEL_27 (CAN1) [1] */ FN_SEL_CAN1_A, FN_SEL_CAN1_B, /* SEL_26 (CAN0) [1] */ @@ -2841,20 +2804,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_HSCIF1_A, FN_SEL_HSCIF1_B, /* SEL_22 (HSCIF0) [1] */ FN_SEL_HSCIF0_A, FN_SEL_HSCIF0_B, - /* SEL_21 [1] */ - 0, 0, - /* SEL_20 [1] */ - 0, 0, - /* SEL_19 [1] */ - 0, 0, - /* SEL_18 [1] */ - 0, 0, - /* SEL_17 [1] */ - 0, 0, - /* SEL_16 [1] */ - 0, 0, - /* SEL_15 [1] */ - 0, 0, + /* SEL_21_15 [7] RESERVED */ /* SEL_14_13 (REMOCON) [2] */ FN_SEL_REMOCON_A, FN_SEL_REMOCON_B, FN_SEL_REMOCON_C, 0, diff --git a/drivers/pinctrl/renesas/pfc-r8a7779.c b/drivers/pinctrl/renesas/pfc-r8a7779.c index 296b5fb0f349..fcc8ea48881f 100644 --- a/drivers/pinctrl/renesas/pfc-r8a7779.c +++ b/drivers/pinctrl/renesas/pfc-r8a7779.c @@ -3300,13 +3300,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_5_1_FN, FN_A2, GP_5_0_FN, FN_A1 )) }, - { PINMUX_CFG_REG("GPSR6", 0xfffc001c, 32, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR6", 0xfffc001c, 32, + GROUP(-23, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP6_31_9 RESERVED */ GP_6_8_FN, FN_IP3_20, GP_6_7_FN, FN_IP3_19, GP_6_6_FN, FN_IP3_18, @@ -3319,10 +3316,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { }, { PINMUX_CFG_REG_VAR("IPSR0", 0xfffc0020, 32, - GROUP(1, 3, 2, 1, 2, 4, 3, 2, 2, 2, 2, 2, 3, 3), + GROUP(-1, 3, 2, 1, 2, 4, 3, 2, 2, 2, 2, 2, 3, 3), GROUP( - /* IP0_31 [1] */ - 0, 0, + /* IP0_31 [1] RESERVED */ /* IP0_30_28 [3] */ FN_RD_WR, FN_FWE, FN_ATAG0, FN_VI1_R7, FN_HRTS1, FN_RX4_C, 0, 0, @@ -3358,10 +3354,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SCIF_CLK, FN_TCLK0_C, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR1", 0xfffc0024, 32, - GROUP(3, 4, 2, 2, 2, 4, 4, 4, 3, 2, 2), + GROUP(-3, 4, 2, 2, 2, 4, 4, 4, 3, 2, 2), GROUP( - /* IP1_31_29 [3] */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* IP1_31_29 [3] RESERVED */ /* IP1_28_25 [4] */ FN_HTX0, FN_TX1, FN_SDATA, FN_CTS0_C, FN_SUB_TCK, FN_CC5_STATE2, FN_CC5_STATE10, FN_CC5_STATE18, @@ -3397,10 +3392,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_EX_CS0, FN_RX3_C_IRDA_RX_C, FN_MMC0_D6, FN_FD6 )) }, { PINMUX_CFG_REG_VAR("IPSR2", 0xfffc0028, 32, - GROUP(1, 3, 1, 1, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4), + GROUP(-1, 3, 1, 1, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4), GROUP( - /* IP2_31 [1] */ - 0, 0, + /* IP2_31 [1] RESERVED */ /* IP2_30_28 [3] */ FN_DU0_DG0, FN_LCDOUT8, FN_DREQ1, FN_SCL2, FN_AUDATA2, 0, 0, 0, @@ -3545,11 +3539,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_DU0_DISP, FN_QPOLA, FN_CAN_CLK_C, FN_SCK2_C )) }, { PINMUX_CFG_REG_VAR("IPSR5", 0xfffc0034, 32, - GROUP(1, 2, 1, 4, 3, 4, 2, 2, 2, 2, 1, 1, + GROUP(-1, 2, 1, 4, 3, 4, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 3), GROUP( - /* IP5_31 [1] */ - 0, 0, + /* IP5_31 [1] RESERVED */ /* IP5_30_29 [2] */ FN_AUDIO_CLKB, FN_USB_OVC2, FN_CAN_DEBUGOUT0, FN_MOUT0, /* IP5_28 [1] */ @@ -3592,15 +3585,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_RX5, FN_RTS0_D_TANS_D, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR6", 0xfffc0038, 32, - GROUP(1, 2, 2, 2, 2, 3, 2, 3, 3, 3, 1, 2, + GROUP(-1, 2, -2, 2, 2, 3, 2, 3, 3, 3, 1, 2, 2, 2, 2), GROUP( - /* IP6_31 [1] */ - 0, 0, + /* IP6_31 [1] RESERVED */ /* IP6_30_29 [2] */ FN_SSI_SCK6, FN_ADICHS0, FN_CAN0_TX, FN_IERX_B, - /* IP_28_27 [2] */ - 0, 0, 0, 0, + /* IP_28_27 [2] RESERVED */ /* IP6_26_25 [2] */ FN_SSI_SDATA5, FN_ADIDATA, FN_CAN_DEBUGOUT12, FN_RX3_IRDA_RX, /* IP6_24_23 [2] */ @@ -3631,11 +3622,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SSI_SCK0129, FN_CAN_DEBUGOUT1, FN_MOUT1, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR7", 0xfffc003c, 32, - GROUP(1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, + GROUP(-1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 2, 2), GROUP( - /* IP7_31 [1] */ - 0, 0, + /* IP7_31 [1] RESERVED */ /* IP7_30_29 [2] */ FN_SD0_WP, FN_DACK2, FN_CTS1_B, 0, /* IP7_28_27 [2] */ @@ -3669,10 +3659,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SSI_WS6, FN_ADICHS1, FN_CAN0_RX, FN_IETX_B )) }, { PINMUX_CFG_REG_VAR("IPSR8", 0xfffc0040, 32, - GROUP(1, 3, 3, 2, 2, 1, 1, 1, 2, 4, 4, 4, 4), + GROUP(-1, 3, 3, 2, 2, 1, 1, 1, 2, 4, 4, 4, 4), GROUP( - /* IP8_31 [1] */ - 0, 0, + /* IP8_31 [1] RESERVED */ /* IP8_30_28 [3] */ FN_VI0_VSYNC, FN_VI0_DATA1_B_VI0_B1_B, FN_RTS1_C_TANS_C, FN_RX4_D, FN_PWMFSW0_C, 0, 0, 0, @@ -3713,11 +3702,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR9", 0xfffc0044, 32, - GROUP(2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 1, + GROUP(-2, 2, 2, 2, 2, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2), GROUP( - /* IP9_31_30 [2] */ - 0, 0, 0, 0, + /* IP9_31_30 [2] RESERVED */ /* IP9_29_28 [2] */ FN_VI0_G7, FN_ETH_RXD1, FN_SD2_DAT3_B, FN_ARM_TRACEDATA_9, /* IP9_27_26 [2] */ @@ -3790,10 +3778,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_ARM_TRACEDATA_10, FN_DREQ0_C, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR11", 0xfffc004c, 32, - GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), + GROUP(-2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), GROUP( - /* IP11_31_30 [2] */ - 0, 0, 0, 0, + /* IP11_31_30 [2] RESERVED */ /* IP11_29_27 [3] */ FN_VI1_G1, FN_VI3_DATA1, FN_SSI_SCK1, FN_TS_SDEN1, FN_DACK2_B, FN_RX2, FN_HRTS0_B, 0, @@ -3826,19 +3813,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_ADICLK_B, 0, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR12", 0xfffc0050, 32, - GROUP(4, 4, 4, 2, 3, 3, 3, 3, 3, 3), + GROUP(-14, 3, 3, 3, 3, 3, 3), GROUP( - /* IP12_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP12_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP12_23_20 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP12_19_18 [2] */ - 0, 0, 0, 0, + /* IP12_31_18 [14] RESERVED */ /* IP12_17_15 [3] */ FN_VI1_G7, FN_VI3_DATA7, FN_GPS_MAG, FN_FCE, FN_SCK4_B, 0, 0, 0, @@ -3904,7 +3881,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_EXBUS0_0, FN_SEL_EXBUS0_1, FN_SEL_EXBUS0_2, 0 )) }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xfffc0094, 32, - GROUP(2, 2, 2, 2, 1, 1, 1, 3, 1, 2, 2, 2, + GROUP(2, 2, 2, 2, 1, 1, 1, 3, 1, -6, 2, 1, 1, 2, 1, 2, 2), GROUP( /* SEL_TMU1 [2] */ @@ -3926,12 +3903,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_PWMFSW_3, FN_SEL_PWMFSW_4, 0, 0, 0, /* SEL_ADI [1] */ FN_SEL_ADI_0, FN_SEL_ADI_1, - /* [2] */ - 0, 0, 0, 0, - /* [2] */ - 0, 0, 0, 0, - /* [2] */ - 0, 0, 0, 0, + /* [6] RESERVED */ /* SEL_GPS [2] */ FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3, /* SEL_SIM [1] */ diff --git a/drivers/pinctrl/renesas/pfc-r8a7790.c b/drivers/pinctrl/renesas/pfc-r8a7790.c index 9db9e61d96bc..ee21d650991b 100644 --- a/drivers/pinctrl/renesas/pfc-r8a7790.c +++ b/drivers/pinctrl/renesas/pfc-r8a7790.c @@ -5122,10 +5122,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_5_0_FN, FN_IP14_21_19 )) }, { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32, - GROUP(1, 4, 4, 3, 4, 4, 3, 3, 3, 3), + GROUP(-1, 4, 4, 3, 4, 4, 3, 3, 3, 3), GROUP( - /* IP0_31 [1] */ - 0, 0, + /* IP0_31 [1] RESERVED */ /* IP0_30_27 [4] */ FN_D8, FN_SCIFA1_SCK_C, FN_AVB_TXD0, 0, FN_VI0_G0, FN_VI0_G0_B, FN_VI2_DATA0_VI2_B0, @@ -5159,10 +5158,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060024, 32, - GROUP(2, 2, 2, 4, 4, 3, 3, 4, 4, 4), + GROUP(-2, 2, 2, 4, 4, 3, 3, 4, 4, 4), GROUP( - /* IP1_31_30 [2] */ - 0, 0, 0, 0, + /* IP1_31_30 [2] RESERVED */ /* IP1_29_28 [2] */ FN_A1, FN_PWM4, 0, 0, /* IP1_27_26 [2] */ @@ -5197,10 +5195,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060028, 32, - GROUP(3, 3, 4, 4, 3, 3, 3, 3, 3, 3), + GROUP(-3, 3, 4, 4, 3, 3, 3, 3, 3, 3), GROUP( - /* IP2_31_29 [3] */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* IP2_31_29 [3] RESERVED */ /* IP2_28_26 [3] */ FN_A10, FN_SSI_SDATA5_B, FN_MSIOF2_SYNC, FN_VI0_R6, FN_VI0_R6_B, FN_VI2_DATA2_VI2_B2_B, 0, 0, @@ -5261,10 +5258,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, 0, 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060030, 32, - GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), + GROUP(-2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), GROUP( - /* IP4_31_30 [2] */ - 0, 0, 0, 0, + /* IP4_31_30 [2] RESERVED */ /* IP4_29_27 [3] */ FN_EX_CS2_N, FN_GPS_SIGN, FN_HRTS1_N_B, FN_VI3_CLKENB, FN_VI1_G0, FN_VI1_G0_B, FN_VI2_R2, 0, @@ -5295,10 +5291,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { )) }, { PINMUX_CFG_REG_VAR("IPSR5", 0xE6060034, 32, - GROUP(2, 3, 3, 3, 3, 3, 2, 3, 4, 3, 3), + GROUP(-2, 3, 3, 3, 3, 3, 2, 3, 4, 3, 3), GROUP( - /* IP5_31_30 [2] */ - 0, 0, 0, 0, + /* IP5_31_30 [2] RESERVED */ /* IP5_29_27 [3] */ FN_DREQ0_N, FN_VI1_HSYNC_N, FN_VI1_HSYNC_N_B, FN_VI2_R7, FN_SSI_SCK78_C, FN_SSI_WS78_B, 0, 0, @@ -5368,10 +5363,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_VI1_VSYNC_N_B, FN_SSI_WS78_C, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32, - GROUP(1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 3, 3), + GROUP(-1, 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 3, 3), GROUP( - /* IP7_31 [1] */ - 0, 0, + /* IP7_31 [1] RESERVED */ /* IP7_30_29 [2] */ FN_VI0_DATA0_VI0_B0, FN_ATACS10_N, FN_AVB_RXD2, 0, /* IP7_28_27 [2] */ @@ -5404,11 +5398,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SIM0_D_C, FN_HCTS0_N_F, 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32, - GROUP(1, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, + GROUP(-1, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), GROUP( - /* IP8_31 [1] */ - 0, 0, + /* IP8_31 [1] RESERVED */ /* IP8_30_29 [2] */ FN_SD0_CMD, FN_SCIFB1_SCK_B, FN_VI1_DATA1_VI1_B1_B, 0, /* IP8_28 [1] */ @@ -5482,10 +5475,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SD0_DAT0, FN_SCIFB1_RXD_B, FN_VI1_DATA2_VI1_B2_B, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR10", 0xE6060048, 32, - GROUP(2, 4, 3, 4, 4, 4, 4, 3, 4), + GROUP(-2, 4, 3, 4, 4, 4, 4, 3, 4), GROUP( - /* IP10_31_30 [2] */ - 0, 0, 0, 0, + /* IP10_31_30 [2] RESERVED */ /* IP10_29_26 [4] */ FN_SD2_CD, FN_MMC0_D4, FN_TS_SDAT0_B, FN_USB2_EXTP, FN_GLO_I0, FN_VI0_DATA6_VI0_B6_B, FN_HCTS0_N_D, FN_TS_SDAT1_B, @@ -5558,10 +5550,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_TS_SCK1_B, FN_GLO_I1_B, FN_VI3_DATA7_B, 0, 0, 0, 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32, - GROUP(1, 3, 3, 2, 3, 3, 3, 3, 3, 2, 2, 2, 2), + GROUP(-1, 3, 3, 2, 3, 3, 3, 3, 3, 2, 2, 2, 2), GROUP( - /* IP12_31 [1] */ - 0, 0, + /* IP12_31 [1] RESERVED */ /* IP12_30_28 [3] */ FN_SSI_WS5, FN_SCIFB1_RXD, FN_IECLK_B, FN_DU2_EXVSYNC_DU2_VSYNC, FN_QSTB_QHE, @@ -5598,10 +5589,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SSI_WS0129, FN_CAN0_TX_B, FN_MOUT1, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32, - GROUP(1, 2, 3, 3, 4, 3, 3, 3, 3, 4, 3), + GROUP(-1, 2, 3, 3, 4, 3, 3, 3, 3, 4, 3), GROUP( - /* IP13_31 [1] */ - 0, 0, + /* IP13_31 [1] RESERVED */ /* IP13_30_29 [2] */ FN_AUDIO_CLKA, FN_SCIFB2_RTS_N, FN_CAN_DEBUGOUT14, 0, /* IP13_28_26 [3] */ @@ -5635,10 +5625,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_LCDOUT2, FN_CAN_DEBUGOUT5, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR14", 0xE6060058, 32, - GROUP(1, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3), + GROUP(-1, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3), GROUP( - /* IP14_30 [1] */ - 0, 0, + /* IP14_30 [1] RESERVED */ /* IP14_30_28 [3] */ FN_SCIFA1_RTS_N, FN_AD_NCS_N, FN_RTS1_N, FN_MSIOF3_TXD, FN_DU1_DOTCLKOUT, FN_QSTVB_QVE, @@ -5674,10 +5663,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_REMOCON, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR15", 0xE606005C, 32, - GROUP(2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3), + GROUP(-2, 2, 2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3), GROUP( - /* IP15_31_30 [2] */ - 0, 0, 0, 0, + /* IP15_31_30 [2] RESERVED */ /* IP15_29_28 [2] */ FN_MSIOF0_TXD, FN_ADICHS1, FN_DU2_DG6, FN_LCDOUT14, /* IP15_27_26 [2] */ @@ -5710,26 +5698,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_LCDOUT15, FN_SCIF_CLK_B, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR16", 0xE6060160, 32, - GROUP(4, 4, 4, 4, 4, 4, 1, 1, 3, 3), + GROUP(-24, 1, 1, 3, 3), GROUP( - /* IP16_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP16_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP16_23_20 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP16_19_16 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP16_15_12 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP16_11_8 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + /* IP16_31_8 [24] RESERVED */ /* IP16_7 [1] */ FN_USB1_OVC, FN_TCLK1_B, /* IP16_6 [1] */ @@ -5743,7 +5714,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { }, { PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32, GROUP(3, 2, 2, 3, 2, 1, 1, 1, 2, 1, 2, 1, - 1, 1, 1, 2, 1, 1, 2, 1, 1), + 1, 1, 1, 2, -1, 1, 2, 1, 1), GROUP( /* SEL_SCIF1 [3] */ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3, @@ -5782,7 +5753,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_TSIF1 [2] */ FN_SEL_TSIF1_0, FN_SEL_TSIF1_1, FN_SEL_TSIF1_2, 0, /* RESERVED [1] */ - 0, 0, /* SEL_LBS [1] */ FN_SEL_LBS_0, FN_SEL_LBS_1, /* SEL_TSIF0 [2] */ @@ -5793,11 +5763,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_SOF0_0, FN_SEL_SOF0_1, )) }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32, - GROUP(3, 1, 1, 1, 2, 1, 2, 1, 2, 1, 1, 1, - 3, 3, 2, 3, 2, 2), + GROUP(-3, 1, 1, 1, 2, 1, 2, 1, -2, 1, 1, 1, + 3, 3, 2, -3, 2, 2), GROUP( /* RESERVED [3] */ - 0, 0, 0, 0, 0, 0, 0, 0, /* SEL_TMU1 [1] */ FN_SEL_TMU1_0, FN_SEL_TMU1_1, /* SEL_HSCIF1 [1] */ @@ -5813,7 +5782,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_CAN1 [1] */ FN_SEL_CAN1_0, FN_SEL_CAN1_1, /* RESERVED [2] */ - 0, 0, 0, 0, /* SEL_SCIF2 [1] */ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1, /* SEL_ADI [1] */ @@ -5829,36 +5797,22 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_GPS [2] */ FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, 0, /* RESERVED [3] */ - 0, 0, 0, 0, 0, 0, 0, 0, /* SEL_SIM [2] */ FN_SEL_SIM_0, FN_SEL_SIM_1, FN_SEL_SIM_2, 0, /* SEL_SSI8 [2] */ FN_SEL_SSI8_0, FN_SEL_SSI8_1, FN_SEL_SSI8_2, 0, )) }, { PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32, - GROUP(1, 1, 2, 4, 4, 2, 2, 4, 2, 3, 2, 3, 2), + GROUP(1, 1, -12, 2, -6, 3, 2, 3, 2), GROUP( /* SEL_IICDVFS [1] */ FN_SEL_IICDVFS_0, FN_SEL_IICDVFS_1, /* SEL_IIC0 [1] */ FN_SEL_IIC0_0, FN_SEL_IIC0_1, - /* RESERVED [2] */ - 0, 0, 0, 0, - /* RESERVED [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED [2] */ - 0, 0, 0, 0, + /* RESERVED [12] */ /* SEL_IEB [2] */ FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0, - /* RESERVED [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED [2] */ - 0, 0, 0, 0, + /* RESERVED [6] */ /* SEL_IIC2 [3] */ FN_SEL_IIC2_0, FN_SEL_IIC2_1, FN_SEL_IIC2_2, FN_SEL_IIC2_3, FN_SEL_IIC2_4, 0, 0, 0, diff --git a/drivers/pinctrl/renesas/pfc-r8a7791.c b/drivers/pinctrl/renesas/pfc-r8a7791.c index 076a8b7d71de..d57458504117 100644 --- a/drivers/pinctrl/renesas/pfc-r8a7791.c +++ b/drivers/pinctrl/renesas/pfc-r8a7791.c @@ -5686,11 +5686,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_7_0_FN, FN_IP15_17_15 )) }, { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32, - GROUP(1, 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, 1, + GROUP(-1, 2, 2, 2, 2, 2, 2, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), GROUP( - /* IP0_31 [1] */ - 0, 0, + /* IP0_31 [1] RESERVED */ /* IP0_30_29 [2] */ FN_A6, FN_MSIOF1_SCK, 0, 0, @@ -5783,10 +5782,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060028, 32, - GROUP(2, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3), + GROUP(-2, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2, 3), GROUP( - /* IP2_31_30 [2] */ - 0, 0, 0, 0, + /* IP2_31_30 [2] RESERVED */ /* IP2_29_27 [3] */ FN_EX_CS3_N, FN_ATADIR0_N, FN_MSIOF2_TXD, FN_ATAG0_N, 0, FN_EX_WAIT1, @@ -5820,10 +5818,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SCIFB1_TXD_C, 0, FN_SCIFB1_SCK_B, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR3", 0xE606002C, 32, - GROUP(1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3), + GROUP(-1, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3), GROUP( - /* IP3_31 [1] */ - 0, 0, + /* IP3_31 [1] RESERVED */ /* IP3_30_28 [3] */ FN_SSI_WS0129, FN_HTX0_C, FN_HTX2_C, FN_SCIFB0_TXD_C, FN_SCIFB2_TXD_C, @@ -5859,11 +5856,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060030, 32, - GROUP(1, 3, 2, 2, 2, 1, 1, 1, 3, 3, 3, 2, + GROUP(-1, 3, 2, 2, 2, 1, 1, 1, 3, 3, 3, 2, 3, 3, 2), GROUP( - /* IP4_31 [1] */ - 0, 0, + /* IP4_31 [1] RESERVED */ /* IP4_30_28 [3] */ FN_SSI_SCK5, FN_MSIOF1_SCK_C, FN_TS_SDATA0, FN_GLO_I0, FN_MSIOF2_SYNC_D, FN_VI1_R2_B, @@ -5943,10 +5939,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR6", 0xE6060038, 32, - GROUP(2, 3, 3, 3, 2, 3, 2, 2, 2, 2, 2, 3, 3), + GROUP(-2, 3, 3, 3, 2, 3, 2, 2, 2, 2, 2, 3, 3), GROUP( - /* IP6_31_30 [2] */ - 0, 0, 0, 0, + /* IP6_31_30 [2] RESERVED */ /* IP6_29_27 [3] */ FN_IRQ8, FN_HRTS1_N_C, FN_MSIOF1_RXD_B, FN_GPS_SIGN_C, FN_GPS_SIGN_D, @@ -5984,10 +5979,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32, - GROUP(2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3), + GROUP(-2, 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3), GROUP( - /* IP7_31_30 [2] */ - 0, 0, 0, 0, + /* IP7_31_30 [2] RESERVED */ /* IP7_29_27 [3] */ FN_DU1_DG2, FN_LCDOUT10, FN_VI1_DATA4_B, FN_SCIF1_SCK_B, FN_SCIFA1_SCK, FN_SSI_SCK78_B, @@ -6026,10 +6020,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32, - GROUP(1, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3), + GROUP(-1, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3), GROUP( - /* IP8_31 [1] */ - 0, 0, + /* IP8_31 [1] RESERVED */ /* IP8_30_28 [3] */ FN_DU1_DB5, FN_LCDOUT21, FN_TX3, FN_SCIFA3_TXD, FN_CAN1_TX, 0, 0, 0, @@ -6201,10 +6194,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_I2C1_SDA_D, 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32, - GROUP(2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2), + GROUP(-2, 3, 3, 2, 2, 2, 2, 3, 3, 3, 3, 2, 2), GROUP( - /* IP12_31_30 [2] */ - 0, 0, 0, 0, + /* IP12_31_30 [2] RESERVED */ /* IP12_29_27 [3] */ FN_STP_ISCLK_0, FN_AVB_TX_EN, FN_SCIFB2_RXD_D, FN_ADICS_SAMP_B, FN_MSIOF0_SCK_C, @@ -6243,11 +6235,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_ETH_RX_ER, FN_AVB_CRS, FN_I2C3_SCL, FN_IIC0_SCL, )) }, { PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32, - GROUP(1, 3, 1, 1, 1, 2, 1, 3, 3, 1, 1, 1, + GROUP(-1, 3, 1, 1, 1, 2, 1, 3, 3, 1, 1, 1, 1, 1, 1, 3, 2, 2, 3), GROUP( - /* IP13_31 [1] */ - 0, 0, + /* IP13_31 [1] RESERVED */ /* IP13_30_28 [3] */ FN_SD1_CD, FN_PWM0, FN_TPU_TO0, FN_I2C1_SCL_C, 0, 0, 0, 0, @@ -6340,10 +6331,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SD1_WP, FN_PWM1_B, FN_I2C1_SDA_C, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR15", 0xE606005C, 32, - GROUP(2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2), + GROUP(-2, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2), GROUP( - /* IP15_31_30 [2] */ - 0, 0, 0, 0, + /* IP15_31_30 [2] RESERVED */ /* IP15_29_27 [3] */ FN_HTX0, FN_SCIFB0_TXD, 0, FN_GLO_SCLK_C, FN_CAN0_TX_B, FN_VI1_DATA5_C, @@ -6382,23 +6372,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SIM0_RST, FN_IETX, FN_CAN1_TX_D, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR16", 0xE6060160, 32, - GROUP(4, 4, 4, 4, 4, 2, 2, 2, 3, 3), + GROUP(-20, 2, 2, 2, 3, 3), GROUP( - /* IP16_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP16_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP16_23_20 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP16_19_16 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* IP16_15_12 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED [20] */ /* IP16_11_10 [2] */ FN_HRTS1_N, FN_SCIFB1_RTS_N, FN_MLB_DAT, FN_CAN1_RX_B, /* IP16_9_8 [2] */ @@ -6415,11 +6391,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32, - GROUP(1, 2, 2, 2, 3, 2, 1, 1, 1, 1, 3, 2, - 2, 2, 1, 2, 2, 2), + GROUP(-1, 2, 2, 2, 3, 2, 1, 1, 1, 1, 3, -2, + 2, -2, 1, 2, 2, 2), GROUP( /* RESERVED [1] */ - 0, 0, /* SEL_SCIF1 [2] */ FN_SEL_SCIF1_0, FN_SEL_SCIF1_1, FN_SEL_SCIF1_2, FN_SEL_SCIF1_3, /* SEL_SCIFB [2] */ @@ -6446,11 +6421,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_HSCIF1_3, FN_SEL_HSCIF1_4, 0, 0, 0, /* RESERVED [2] */ - 0, 0, 0, 0, /* SEL_VI1 [2] */ FN_SEL_VI1_0, FN_SEL_VI1_1, FN_SEL_VI1_2, 0, /* RESERVED [2] */ - 0, 0, 0, 0, /* SEL_TMU [1] */ FN_SEL_TMU1_0, FN_SEL_TMU1_1, /* SEL_LBS [2] */ @@ -6461,15 +6434,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_SOF0_0, FN_SEL_SOF0_1, FN_SEL_SOF0_2, 0, )) }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32, - GROUP(3, 1, 1, 3, 2, 1, 1, 2, 2, 1, 3, 2, - 1, 2, 2, 2, 1, 1, 1), + GROUP(3, -1, 1, 3, 2, -1, 1, 2, -2, 1, 3, 2, + -1, 2, 2, 2, 1, -1, 1), GROUP( /* SEL_SCIF0 [3] */ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3, FN_SEL_SCIF0_4, 0, 0, 0, /* RESERVED [1] */ - 0, 0, /* SEL_SCIF [1] */ FN_SEL_SCIF_0, FN_SEL_SCIF_1, /* SEL_CAN0 [3] */ @@ -6479,13 +6451,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_CAN1 [2] */ FN_SEL_CAN1_0, FN_SEL_CAN1_1, FN_SEL_CAN1_2, FN_SEL_CAN1_3, /* RESERVED [1] */ - 0, 0, /* SEL_SCIFA2 [1] */ FN_SEL_SCIFA2_0, FN_SEL_SCIFA2_1, /* SEL_SCIF4 [2] */ FN_SEL_SCIF4_0, FN_SEL_SCIF4_1, FN_SEL_SCIF4_2, 0, /* RESERVED [2] */ - 0, 0, 0, 0, /* SEL_ADG [1] */ FN_SEL_ADG_0, FN_SEL_ADG_1, /* SEL_FM [3] */ @@ -6495,7 +6465,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_SCIFA5 [2] */ FN_SEL_SCIFA5_0, FN_SEL_SCIFA5_1, FN_SEL_SCIFA5_2, 0, /* RESERVED [1] */ - 0, 0, /* SEL_GPS [2] */ FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, FN_SEL_GPS_3, /* SEL_SCIFA4 [2] */ @@ -6505,13 +6474,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_SIM [1] */ FN_SEL_SIM_0, FN_SEL_SIM_1, /* RESERVED [1] */ - 0, 0, /* SEL_SSI8 [1] */ FN_SEL_SSI8_0, FN_SEL_SSI8_1, )) }, { PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32, - GROUP(2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 2, 2, - 3, 2, 2, 2, 1), + GROUP(2, 2, 2, 2, 2, 2, 2, 2, 1, 1, -2, 2, + 3, 2, -5), GROUP( /* SEL_HSCIF2 [2] */ FN_SEL_HSCIF2_0, FN_SEL_HSCIF2_1, @@ -6536,7 +6504,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_SCIF5 [1] */ FN_SEL_SCIF5_0, FN_SEL_SCIF5_1, /* RESERVED [2] */ - 0, 0, 0, 0, /* SEL_I2C2 [2] */ FN_SEL_I2C2_0, FN_SEL_I2C2_1, FN_SEL_I2C2_2, FN_SEL_I2C2_3, /* SEL_I2C1 [3] */ @@ -6545,16 +6512,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, /* SEL_I2C0 [2] */ FN_SEL_I2C0_0, FN_SEL_I2C0_1, FN_SEL_I2C0_2, 0, - /* RESERVED [2] */ - 0, 0, 0, 0, - /* RESERVED [2] */ - 0, 0, 0, 0, - /* RESERVED [1] */ - 0, 0, )) + /* RESERVED [5] */ )) }, { PINMUX_CFG_REG_VAR("MOD_SEL4", 0xE606009C, 32, - GROUP(3, 2, 2, 1, 1, 1, 1, 3, 2, 2, 3, 1, - 1, 1, 2, 2, 2, 2), + GROUP(3, 2, 2, -1, 1, 1, 1, 3, -4, 3, -1, + 1, 1, 2, -6), GROUP( /* SEL_SOF1 [3] */ FN_SEL_SOF1_0, FN_SEL_SOF1_1, FN_SEL_SOF1_2, FN_SEL_SOF1_3, @@ -6565,7 +6527,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_DIS [2] */ FN_SEL_DIS_0, FN_SEL_DIS_1, FN_SEL_DIS_2, 0, /* RESERVED [1] */ - 0, 0, /* SEL_RAD [1] */ FN_SEL_RAD_0, FN_SEL_RAD_1, /* SEL_RCN [1] */ @@ -6577,27 +6538,19 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_SCIF2_3, FN_SEL_SCIF2_4, 0, 0, 0, /* RESERVED [2] */ - 0, 0, 0, 0, /* RESERVED [2] */ - 0, 0, 0, 0, /* SEL_SOF2 [3] */ FN_SEL_SOF2_0, FN_SEL_SOF2_1, FN_SEL_SOF2_2, FN_SEL_SOF2_3, FN_SEL_SOF2_4, 0, 0, 0, /* RESERVED [1] */ - 0, 0, /* SEL_SSI1 [1] */ FN_SEL_SSI1_0, FN_SEL_SSI1_1, /* SEL_SSI0 [1] */ FN_SEL_SSI0_0, FN_SEL_SSI0_1, /* SEL_SSP [2] */ FN_SEL_SSP_0, FN_SEL_SSP_1, FN_SEL_SSP_2, 0, - /* RESERVED [2] */ - 0, 0, 0, 0, - /* RESERVED [2] */ - 0, 0, 0, 0, - /* RESERVED [2] */ - 0, 0, 0, 0, )) + /* RESERVED [6] */ )) }, { }, }; diff --git a/drivers/pinctrl/renesas/pfc-r8a7792.c b/drivers/pinctrl/renesas/pfc-r8a7792.c index 3e101f630148..808a85d62415 100644 --- a/drivers/pinctrl/renesas/pfc-r8a7792.c +++ b/drivers/pinctrl/renesas/pfc-r8a7792.c @@ -1999,16 +1999,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_0_1_FN, FN_IP0_1, GP_0_0_FN, FN_IP0_0 )) }, - { PINMUX_CFG_REG("GPSR1", 0xE6060008, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR1", 0xE6060008, 32, + GROUP(-9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP1_31_23 RESERVED */ GP_1_22_FN, FN_DU1_CDE, GP_1_21_FN, FN_DU1_DISP, GP_1_20_FN, FN_DU1_EXODDF_DU1_ODDF_DISP_CDE, @@ -2101,22 +2096,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_3_1_FN, FN_A17, GP_3_0_FN, FN_A16 )) }, - { PINMUX_CFG_REG("GPSR4", 0xE6060014, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR4", 0xE6060014, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP4_31_17 RESERVED */ GP_4_16_FN, FN_VI0_FIELD, GP_4_15_FN, FN_VI0_D11_G3_Y3, GP_4_14_FN, FN_VI0_D10_G2_Y2, @@ -2135,22 +2119,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_4_1_FN, FN_VI0_CLKENB, GP_4_0_FN, FN_VI0_CLK )) }, - { PINMUX_CFG_REG("GPSR5", 0xE6060018, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR5", 0xE6060018, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP5_31_17 RESERVED */ GP_5_16_FN, FN_VI1_FIELD, GP_5_15_FN, FN_VI1_D11_G3_Y3, GP_5_14_FN, FN_VI1_D10_G2_Y2, @@ -2169,22 +2142,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_5_1_FN, FN_VI1_CLKENB, GP_5_0_FN, FN_VI1_CLK )) }, - { PINMUX_CFG_REG("GPSR6", 0xE606001C, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR6", 0xE606001C, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP6_31_17 RESERVED */ GP_6_16_FN, FN_IP2_16, GP_6_15_FN, FN_IP2_15, GP_6_14_FN, FN_IP2_14, @@ -2203,22 +2165,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_6_1_FN, FN_IP2_1, GP_6_0_FN, FN_IP2_0 )) }, - { PINMUX_CFG_REG("GPSR7", 0xE6060020, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR7", 0xE6060020, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP7_31_17 RESERVED */ GP_7_16_FN, FN_VI3_FIELD, GP_7_15_FN, FN_IP3_14, GP_7_14_FN, FN_VI3_D10_Y2, @@ -2237,22 +2188,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_7_1_FN, FN_IP3_1, GP_7_0_FN, FN_IP3_0 )) }, - { PINMUX_CFG_REG("GPSR8", 0xE6060024, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR8", 0xE6060024, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP8_31_17 RESERVED */ GP_8_16_FN, FN_IP4_24, GP_8_15_FN, FN_IP4_23, GP_8_14_FN, FN_IP4_22, @@ -2271,22 +2211,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_8_1_FN, FN_IP4_0, GP_8_0_FN, FN_VI4_CLK )) }, - { PINMUX_CFG_REG("GPSR9", 0xE6060028, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR9", 0xE6060028, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP9_31_17 RESERVED */ GP_9_16_FN, FN_VI5_FIELD, GP_9_15_FN, FN_VI5_D11_Y3, GP_9_14_FN, FN_VI5_D10_Y2, @@ -2374,15 +2303,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_11_0_FN, FN_IP7_1_0 )) }, { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060040, 32, - GROUP(4, 4, + GROUP(-8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), GROUP( - /* IP0_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP0_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* IP0_31_24 [8] RESERVED */ /* IP0_23 [1] */ FN_DU0_DB7_C5, 0, /* IP0_22 [1] */ @@ -2433,17 +2359,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_DU0_DR0_DATA0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060044, 32, - GROUP(4, 4, - 1, 1, 1, 1, 1, 1, 1, 1, + GROUP(-9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), GROUP( - /* IP1_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP1_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP1_23 [1] */ - 0, 0, + /* IP1_31_23 [9] RESERVED */ /* IP1_22 [1] */ FN_A25, FN_SSL, /* IP1_21 [1] */ @@ -2492,19 +2412,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_DU0_EXHSYNC_DU0_HSYNC, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR2", 0xE6060048, 32, - GROUP(4, 4, - 4, 3, 1, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), GROUP( - /* IP2_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP2_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP2_23_20 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP2_19_17 [3] */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* IP2_31_17 [15] RESERVED */ /* IP2_16 [1] */ FN_VI2_FIELD, FN_AVB_TXD2, /* IP2_15 [1] */ @@ -2541,21 +2453,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_VI2_CLK, FN_AVB_RX_CLK )) }, { PINMUX_CFG_REG_VAR("IPSR3", 0xE606004C, 32, - GROUP(4, 4, - 4, 4, - 1, 1, 1, 1, 1, 1, 1, 1, + GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), GROUP( - /* IP3_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP3_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP3_23_20 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP3_19_16 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP3_15 [1] */ - 0, 0, + /* IP3_31_15 [17] RESERVED */ /* IP3_14 [1] */ FN_VI3_D11_Y3, FN_AVB_AVTP_MATCH, /* IP3_13 [1] */ @@ -2588,14 +2489,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_VI3_CLK, FN_AVB_TX_CLK )) }, { PINMUX_CFG_REG_VAR("IPSR4", 0xE6060050, 32, - GROUP(4, 3, 1, - 1, 1, 1, 2, 2, 2, + GROUP(-7, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1), GROUP( - /* IP4_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP4_27_25 [3] */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* IP4_31_25 [7] RESERVED */ /* IP4_24 [1] */ FN_VI4_FIELD, FN_VI3_D15_Y7, /* IP4_23 [1] */ @@ -2630,21 +2527,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_VI4_CLKENB, FN_VI0_D12_G4_Y4 )) }, { PINMUX_CFG_REG_VAR("IPSR5", 0xE6060054, 32, - GROUP(4, 4, - 4, 4, - 4, 1, 1, 1, 1, + GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), GROUP( - /* IP5_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP5_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP5_23_20 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP5_19_16 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP5_15_12 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* IP5_31_12 [20] RESERVED */ /* IP5_11 [1] */ FN_VI5_D8_Y0, FN_VI1_D23_R7, /* IP5_10 [1] */ @@ -2671,19 +2557,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_VI5_CLKENB, FN_VI1_D12_G4_Y4_B )) }, { PINMUX_CFG_REG_VAR("IPSR6", 0xE6060058, 32, - GROUP(4, 4, - 4, 1, 2, 1, - 2, 2, 2, 2, + GROUP(-13, 2, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1), GROUP( - /* IP6_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP6_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP6_23_20 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP6_19 [1] */ - 0, 0, + /* IP6_31_19 [13] RESERVED */ /* IP6_18_17 [2] */ FN_DREQ1_N, FN_RX3, 0, 0, /* IP6_16 [1] */ @@ -2714,17 +2591,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_MSIOF0_SCK, FN_HSCK0 )) }, { PINMUX_CFG_REG_VAR("IPSR7", 0xE606005C, 32, - GROUP(4, 4, - 3, 1, 1, 1, 1, 1, + GROUP(-11, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2, 2), GROUP( - /* IP7_31_28 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP7_27_24 [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP7_23_21 [3] */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* IP7_31_21 [11] RESERVED */ /* IP7_20 [1] */ FN_AUDIO_CLKB, 0, /* IP7_19 [1] */ diff --git a/drivers/pinctrl/renesas/pfc-r8a7794.c b/drivers/pinctrl/renesas/pfc-r8a7794.c index d1b0e6517382..668643553a70 100644 --- a/drivers/pinctrl/renesas/pfc-r8a7794.c +++ b/drivers/pinctrl/renesas/pfc-r8a7794.c @@ -4867,7 +4867,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { }, { PINMUX_CFG_REG_VAR("IPSR0", 0xE6060020, 32, GROUP(2, 2, 2, 1, 1, 2, 2, 2, 1, 1, 1, 1, - 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1), + 1, 1, 1, 1, 2, -7, 1), GROUP( /* IP0_31_30 [2] */ FN_D5, FN_SCIF4_RXD_B, FN_I2C0_SCL_D, 0, @@ -4903,25 +4903,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_MMC_CLK, FN_SD2_CLK, /* IP0_9_8 [2] */ FN_SD1_WP, FN_IRQ7, FN_CAN0_TX, 0, - /* IP0_7 [1] */ - 0, 0, - /* IP0_6 [1] */ - 0, 0, - /* IP0_5 [1] */ - 0, 0, - /* IP0_4 [1] */ - 0, 0, - /* IP0_3 [1] */ - 0, 0, - /* IP0_2 [1] */ - 0, 0, - /* IP0_1 [1] */ - 0, 0, + /* IP0_7_1 [7] RESERVED */ /* IP0_0 [1] */ FN_SD1_CD, FN_CAN0_RX, )) }, { PINMUX_CFG_REG_VAR("IPSR1", 0xE6060024, 32, - GROUP(2, 2, 1, 1, 1, 1, 2, 2, 2, 3, 2, 2, + GROUP(2, 2, 1, 1, -1, 1, 2, 2, 2, 3, 2, 2, 3, 2, 2, 2, 2), GROUP( /* IP1_31_30 [2] */ @@ -4932,8 +4919,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_A4, FN_SCIFB0_TXD, /* IP1_26 [1] */ FN_A3, FN_SCIFB0_SCK, - /* IP1_25 [1] */ - 0, 0, + /* IP1_25 [1] RESERVED */ /* IP1_24 [1] */ FN_A1, FN_SCIFB1_TXD, /* IP1_23_22 [2] */ @@ -5160,12 +5146,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_DU0_EXVSYNC_DU0_VSYNC, FN_QSTB_QHE, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR7", 0xE606003C, 32, - GROUP(1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), + GROUP(1, -1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3), GROUP( /* IP7_31 [1] */ FN_DREQ0_N, FN_SCIFB1_RXD, - /* IP7_30 [1] */ - 0, 0, + /* IP7_30 [1] RESERVED */ /* IP7_29_27 [3] */ FN_ETH_TXD0, FN_VI0_R2, FN_SCIF3_RXD_B, FN_I2C4_SCL_E, FN_AVB_GTX_CLK, FN_SSI_WS6_B, 0, 0, @@ -5234,10 +5219,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_AVB_MDC, FN_SSI_SDATA6_B, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32, - GROUP(1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3), + GROUP(-1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3), GROUP( - /* IP9_31 [1] */ - 0, 0, + /* IP9_31 [1] RESERVED */ /* IP9_30_28 [3] */ FN_SCIF1_SCK, FN_PWM3, FN_TCLK2, FN_DU1_DG5, FN_SSI_SDATA1_B, 0, 0, 0, @@ -5307,10 +5291,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR11", 0xE606004C, 32, - GROUP(2, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3), + GROUP(-2, 3, 3, 3, 3, 2, 2, 3, 3, 2, 3, 3), GROUP( - /* IP11_31_30 [2] */ - 0, 0, 0, 0, + /* IP11_31_30 [2] RESERVED */ /* IP11_29_27 [3] */ FN_SSI_SDATA0, FN_MSIOF1_SCK_B, FN_PWM0_B, FN_ADICLK_B, 0, 0, 0, 0, @@ -5343,10 +5326,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR12", 0xE6060050, 32, - GROUP(2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3), + GROUP(-2, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3), GROUP( - /* IP12_31_30 [2] */ - 0, 0, 0, 0, + /* IP12_31_30 [2] RESERVED */ /* IP12_29_27 [3] */ FN_SSI_SCK2, FN_HSCIF1_HTX_B, FN_VI1_DATA2, 0, FN_ATAG0_N, FN_ETH_RXD1_B, 0, 0, @@ -5379,18 +5361,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, FN_DREQ1_N_B, 0, 0, )) }, { PINMUX_CFG_REG_VAR("IPSR13", 0xE6060054, 32, - GROUP(1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3), + GROUP(-5, 3, 3, 3, 3, 3, 3, 3, 3, 3), GROUP( - /* IP13_31 [1] */ - 0, 0, - /* IP13_30 [1] */ - 0, 0, - /* IP13_29 [1] */ - 0, 0, - /* IP13_28 [1] */ - 0, 0, - /* IP13_27 [1] */ - 0, 0, + /* IP13_31_27 [5] RESERVED */ /* IP13_26_24 [3] */ FN_AUDIO_CLKOUT, FN_I2C4_SDA_B, FN_SCIFA5_TXD_D, FN_VI1_VSYNC_N, FN_TS_SPSYNC_C, 0, FN_FMIN_E, 0, @@ -5420,23 +5393,21 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, FN_ATACS00_N, FN_ETH_LINK_B, 0, )) }, { PINMUX_CFG_REG_VAR("MOD_SEL", 0xE6060090, 32, - GROUP(2, 1, 2, 3, 4, 1, 1, 3, 3, 3, 3, 3, 2, 1), + GROUP(2, -1, 2, 3, -4, 1, -1, + 3, 3, 3, 3, 3, 2, -1), GROUP( /* SEL_ADG [2] */ FN_SEL_ADG_0, FN_SEL_ADG_1, FN_SEL_ADG_2, FN_SEL_ADG_3, /* RESERVED [1] */ - 0, 0, /* SEL_CAN [2] */ FN_SEL_CAN_0, FN_SEL_CAN_1, FN_SEL_CAN_2, FN_SEL_CAN_3, /* SEL_DARC [3] */ FN_SEL_DARC_0, FN_SEL_DARC_1, FN_SEL_DARC_2, FN_SEL_DARC_3, FN_SEL_DARC_4, 0, 0, 0, /* RESERVED [4] */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* SEL_ETH [1] */ FN_SEL_ETH_0, FN_SEL_ETH_1, /* RESERVED [1] */ - 0, 0, /* SEL_IC200 [3] */ FN_SEL_I2C00_0, FN_SEL_I2C00_1, FN_SEL_I2C00_2, FN_SEL_I2C00_3, FN_SEL_I2C00_4, 0, 0, 0, @@ -5454,12 +5425,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_I2C04_4, 0, 0, 0, /* SEL_I2C05 [2] */ FN_SEL_I2C05_0, FN_SEL_I2C05_1, FN_SEL_I2C05_2, FN_SEL_I2C05_3, - /* RESERVED [1] */ - 0, 0, )) + /* RESERVED [1] */ )) }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xE6060094, 32, GROUP(2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, - 2, 2, 1, 1, 2, 2, 2, 1, 1, 2), + 2, 2, -1, 1, 2, 2, 2, 1, 1, -2), GROUP( /* SEL_IEB [2] */ FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0, @@ -5493,7 +5463,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_SCIFA5_0, FN_SEL_SCIFA5_1, FN_SEL_SCIFA5_2, FN_SEL_SCIFA5_3, /* RESERVED [1] */ - 0, 0, /* SEL_TMU [1] */ FN_SEL_TMU_0, FN_SEL_TMU_1, /* SEL_TSIF0 [2] */ @@ -5506,12 +5475,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1, /* SEL_HSCIF1 [1] */ FN_SEL_HSCIF1_0, FN_SEL_HSCIF1_1, - /* RESERVED [2] */ - 0, 0, 0, 0, )) + /* RESERVED [2] */ )) }, { PINMUX_CFG_REG_VAR("MOD_SEL3", 0xE6060098, 32, GROUP(2, 2, 2, 1, 3, 2, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + 1, 1, -12), GROUP( /* SEL_SCIF0 [2] */ FN_SEL_SCIF0_0, FN_SEL_SCIF0_1, FN_SEL_SCIF0_2, FN_SEL_SCIF0_3, @@ -5542,30 +5510,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_SSI8_0, FN_SEL_SSI8_1, /* SEL_SSI9 [1] */ FN_SEL_SSI9_0, FN_SEL_SSI9_1, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, - /* RESERVED [1] */ - 0, 0, )) + /* RESERVED [12] */ )) }, { }, }; diff --git a/drivers/pinctrl/renesas/pfc-r8a77950.c b/drivers/pinctrl/renesas/pfc-r8a77950.c index 63c9f6d6468b..4c543ec3a863 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77950.c +++ b/drivers/pinctrl/renesas/pfc-r8a77950.c @@ -4701,23 +4701,11 @@ static const struct sh_pfc_function pinmux_functions[] = { static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) FN_##y #define FM(x) FN_##x - { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32, + GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_16 RESERVED */ GP_0_15_FN, GPSR0_15, GP_0_14_FN, GPSR0_14, GP_0_13_FN, GPSR0_13, @@ -4769,24 +4757,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_1_1_FN, GPSR1_1, GP_1_0_FN, GPSR1_0, )) }, - { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32, + GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1), + GROUP( + /* GP2_31_15 RESERVED */ GP_2_14_FN, GPSR2_14, GP_2_13_FN, GPSR2_13, GP_2_12_FN, GPSR2_12, @@ -4803,23 +4778,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32, + GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_16 RESERVED */ GP_3_15_FN, GPSR3_15, GP_3_14_FN, GPSR3_14, GP_3_13_FN, GPSR3_13, @@ -4837,21 +4800,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_3_1_FN, GPSR3_1, GP_3_0_FN, GPSR3_0, )) }, - { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32, + GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP4_31_18 RESERVED */ GP_4_17_FN, GPSR4_17, GP_4_16_FN, GPSR4_16, GP_4_15_FN, GPSR4_15, @@ -4939,35 +4892,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_6_1_FN, GPSR6_1, GP_6_0_FN, GPSR6_0, )) }, - { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR7", 0xe606011c, 32, + GROUP(-28, 1, 1, 1, 1), + GROUP( + /* GP7_31_4 RESERVED */ GP_7_3_FN, GPSR7_3, GP_7_2_FN, GPSR7_2, GP_7_1_FN, GPSR7_1, @@ -5148,13 +5076,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP16_7_4 IP16_3_0 )) }, - { PINMUX_CFG_REG("IPSR17", 0xe6060244, 32, 4, GROUP( - /* IP17_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP17_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP17_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP17_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP17_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP17_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("IPSR17", 0xe6060244, 32, + GROUP(-24, 4, 4), + GROUP( + /* IP17_31_8 RESERVED */ IP17_7_4 IP17_3_0 )) }, @@ -5164,10 +5089,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32, - GROUP(1, 2, 2, 3, 1, 1, 2, 1, 1, 1, 2, 1, - 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 1), + GROUP(-1, 2, 2, 3, 1, 1, 2, 1, 1, 1, 2, 1, + 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, -1), GROUP( - 0, 0, /* RESERVED 31 */ + /* RESERVED 31 */ MOD_SEL0_30_29 MOD_SEL0_28_27 MOD_SEL0_26_25_24 @@ -5189,11 +5114,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_5_4 MOD_SEL0_3 MOD_SEL0_2_1 - 0, 0, /* RESERVED 0 */ )) + /* RESERVED 0 */ )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32, GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1, - 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1), + 1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1), GROUP( MOD_SEL1_31_30 MOD_SEL1_29_28_27 @@ -5210,7 +5135,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL1_11 MOD_SEL1_10 MOD_SEL1_9 - 0, 0, 0, 0, /* RESERVED 8, 7 */ + /* RESERVED 8, 7 */ MOD_SEL1_6 MOD_SEL1_5 MOD_SEL1_4 @@ -5220,35 +5145,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL1_0 )) }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32, - GROUP(1, 1, 1, 1, 4, 4, 4, 4, 4, 4, 1, 2, 1), + GROUP(1, 1, 1, -28, 1), GROUP( MOD_SEL2_31 MOD_SEL2_30 MOD_SEL2_29 - /* RESERVED 28 */ - 0, 0, - /* RESERVED 27, 26, 25, 24 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 23, 22, 21, 20 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 19, 18, 17, 16 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 15, 14, 13, 12 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 11, 10, 9, 8 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 7, 6, 5, 4 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 3 */ - 0, 0, - /* RESERVED 2, 1 */ - 0, 0, 0, 0, + /* RESERVED 28-1 */ MOD_SEL2_0 )) }, { }, diff --git a/drivers/pinctrl/renesas/pfc-r8a77951.c b/drivers/pinctrl/renesas/pfc-r8a77951.c index 9d6eef4e9d18..d4d271dff055 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77951.c +++ b/drivers/pinctrl/renesas/pfc-r8a77951.c @@ -5139,23 +5139,11 @@ static const struct { static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) FN_##y #define FM(x) FN_##x - { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32, + GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_16 RESERVED */ GP_0_15_FN, GPSR0_15, GP_0_14_FN, GPSR0_14, GP_0_13_FN, GPSR0_13, @@ -5207,24 +5195,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_1_1_FN, GPSR1_1, GP_1_0_FN, GPSR1_0, )) }, - { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32, + GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1), + GROUP( + /* GP2_31_15 RESERVED */ GP_2_14_FN, GPSR2_14, GP_2_13_FN, GPSR2_13, GP_2_12_FN, GPSR2_12, @@ -5241,23 +5216,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32, + GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_16 RESERVED */ GP_3_15_FN, GPSR3_15, GP_3_14_FN, GPSR3_14, GP_3_13_FN, GPSR3_13, @@ -5275,21 +5238,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_3_1_FN, GPSR3_1, GP_3_0_FN, GPSR3_0, )) }, - { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32, + GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP4_31_18 RESERVED */ GP_4_17_FN, GPSR4_17, GP_4_16_FN, GPSR4_16, GP_4_15_FN, GPSR4_15, @@ -5377,35 +5330,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_6_1_FN, GPSR6_1, GP_6_0_FN, GPSR6_0, )) }, - { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR7", 0xe606011c, 32, + GROUP(-28, 1, 1, 1, 1), + GROUP( + /* GP7_31_4 RESERVED */ GP_7_3_FN, GPSR7_3, GP_7_2_FN, GPSR7_2, GP_7_1_FN, GPSR7_1, @@ -5486,12 +5414,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP6_7_4 IP6_3_0 )) }, - { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP( + { PINMUX_CFG_REG_VAR("IPSR7", 0xe606021c, 32, + GROUP(4, 4, 4, 4, -4, 4, 4, 4), + GROUP( IP7_31_28 IP7_27_24 IP7_23_20 IP7_19_16 - /* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* IP7_15_12 RESERVED */ IP7_11_8 IP7_7_4 IP7_3_0 )) @@ -5596,13 +5526,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP17_7_4 IP17_3_0 )) }, - { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP( - /* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("IPSR18", 0xe6060248, 32, + GROUP(-24, 4, 4), + GROUP( + /* IP18_31_8 RESERVED */ IP18_7_4 IP18_3_0 )) }, @@ -5612,8 +5539,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32, - GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2, - 1, 1, 1, 2, 2, 1, 2, 3), + GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, -1, 2, + 1, 1, 1, 2, 2, 1, 2, -3), GROUP( MOD_SEL0_31_30_29 MOD_SEL0_28_27 @@ -5625,7 +5552,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_19 MOD_SEL0_18_17 MOD_SEL0_16 - 0, 0, /* RESERVED 15 */ + /* RESERVED 15 */ MOD_SEL0_14_13 MOD_SEL0_12 MOD_SEL0_11 @@ -5634,12 +5561,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_7_6 MOD_SEL0_5 MOD_SEL0_4_3 - /* RESERVED 2, 1, 0 */ - 0, 0, 0, 0, 0, 0, 0, 0 )) + /* RESERVED 2, 1, 0 */ )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32, GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1, - 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1), + 1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1), GROUP( MOD_SEL1_31_30 MOD_SEL1_29_28_27 @@ -5656,7 +5582,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL1_11 MOD_SEL1_10 MOD_SEL1_9 - 0, 0, 0, 0, /* RESERVED 8, 7 */ + /* RESERVED 8, 7 */ MOD_SEL1_6 MOD_SEL1_5 MOD_SEL1_4 @@ -5666,8 +5592,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL1_0 )) }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32, - GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1, - 1, 4, 4, 4, 3, 1), + GROUP(1, 1, 1, 2, 1, 3, -1, 1, 1, 1, 1, 1, + -16, 1), GROUP( MOD_SEL2_31 MOD_SEL2_30 @@ -5676,25 +5602,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL2_26 MOD_SEL2_25_24_23 /* RESERVED 22 */ - 0, 0, MOD_SEL2_21 MOD_SEL2_20 MOD_SEL2_19 MOD_SEL2_18 MOD_SEL2_17 - /* RESERVED 16 */ - 0, 0, - /* RESERVED 15, 14, 13, 12 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 11, 10, 9, 8 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 7, 6, 5, 4 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 3, 2, 1 */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED 16-1 */ MOD_SEL2_0 )) }, { }, diff --git a/drivers/pinctrl/renesas/pfc-r8a7796.c b/drivers/pinctrl/renesas/pfc-r8a7796.c index 75ea36829a70..a0096ef5e68d 100644 --- a/drivers/pinctrl/renesas/pfc-r8a7796.c +++ b/drivers/pinctrl/renesas/pfc-r8a7796.c @@ -5094,23 +5094,11 @@ static const struct { static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) FN_##y #define FM(x) FN_##x - { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32, + GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_16 RESERVED */ GP_0_15_FN, GPSR0_15, GP_0_14_FN, GPSR0_14, GP_0_13_FN, GPSR0_13, @@ -5162,24 +5150,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_1_1_FN, GPSR1_1, GP_1_0_FN, GPSR1_0, )) }, - { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32, + GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1), + GROUP( + /* GP2_31_15 RESERVED */ GP_2_14_FN, GPSR2_14, GP_2_13_FN, GPSR2_13, GP_2_12_FN, GPSR2_12, @@ -5196,23 +5171,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32, + GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_16 RESERVED */ GP_3_15_FN, GPSR3_15, GP_3_14_FN, GPSR3_14, GP_3_13_FN, GPSR3_13, @@ -5230,21 +5193,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_3_1_FN, GPSR3_1, GP_3_0_FN, GPSR3_0, )) }, - { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32, + GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP4_31_18 RESERVED */ GP_4_17_FN, GPSR4_17, GP_4_16_FN, GPSR4_16, GP_4_15_FN, GPSR4_15, @@ -5332,35 +5285,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_6_1_FN, GPSR6_1, GP_6_0_FN, GPSR6_0, )) }, - { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR7", 0xe606011c, 32, + GROUP(-28, 1, 1, 1, 1), + GROUP( + /* GP7_31_4 RESERVED */ GP_7_3_FN, GPSR7_3, GP_7_2_FN, GPSR7_2, GP_7_1_FN, GPSR7_1, @@ -5441,12 +5369,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP6_7_4 IP6_3_0 )) }, - { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP( + { PINMUX_CFG_REG_VAR("IPSR7", 0xe606021c, 32, + GROUP(4, 4, 4, 4, -4, 4, 4, 4), + GROUP( IP7_31_28 IP7_27_24 IP7_23_20 IP7_19_16 - /* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* IP7_15_12 RESERVED */ IP7_11_8 IP7_7_4 IP7_3_0 )) @@ -5551,13 +5481,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP17_7_4 IP17_3_0 )) }, - { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP( - /* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("IPSR18", 0xe6060248, 32, + GROUP(-24, 4, 4), + GROUP( + /* IP18_31_8 RESERVED */ IP18_7_4 IP18_3_0 )) }, @@ -5567,8 +5494,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32, - GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2, - 1, 1, 1, 2, 2, 1, 2, 3), + GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, -1, 2, + 1, 1, 1, 2, 2, 1, 2, -3), GROUP( MOD_SEL0_31_30_29 MOD_SEL0_28_27 @@ -5580,7 +5507,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_19 MOD_SEL0_18_17 MOD_SEL0_16 - 0, 0, /* RESERVED 15 */ + /* RESERVED 15 */ MOD_SEL0_14_13 MOD_SEL0_12 MOD_SEL0_11 @@ -5589,12 +5516,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_7_6 MOD_SEL0_5 MOD_SEL0_4_3 - /* RESERVED 2, 1, 0 */ - 0, 0, 0, 0, 0, 0, 0, 0 )) + /* RESERVED 2, 1, 0 */ )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32, GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1, - 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1), + 1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1), GROUP( MOD_SEL1_31_30 MOD_SEL1_29_28_27 @@ -5611,7 +5537,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL1_11 MOD_SEL1_10 MOD_SEL1_9 - 0, 0, 0, 0, /* RESERVED 8, 7 */ + /* RESERVED 8, 7 */ MOD_SEL1_6 MOD_SEL1_5 MOD_SEL1_4 @@ -5622,7 +5548,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32, GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1, - 1, 4, 4, 4, 3, 1), + -16, 1), GROUP( MOD_SEL2_31 MOD_SEL2_30 @@ -5636,19 +5562,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL2_19 MOD_SEL2_18 MOD_SEL2_17 - /* RESERVED 16 */ - 0, 0, - /* RESERVED 15, 14, 13, 12 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 11, 10, 9, 8 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 7, 6, 5, 4 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 3, 2, 1 */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED 16-1 */ MOD_SEL2_0 )) }, { }, diff --git a/drivers/pinctrl/renesas/pfc-r8a77965.c b/drivers/pinctrl/renesas/pfc-r8a77965.c index 6bb7f7543c37..acd0bdf13018 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77965.c +++ b/drivers/pinctrl/renesas/pfc-r8a77965.c @@ -5335,23 +5335,11 @@ static const struct { static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) FN_##y #define FM(x) FN_##x - { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32, + GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_16 RESERVED */ GP_0_15_FN, GPSR0_15, GP_0_14_FN, GPSR0_14, GP_0_13_FN, GPSR0_13, @@ -5403,24 +5391,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_1_1_FN, GPSR1_1, GP_1_0_FN, GPSR1_0, )) }, - { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32, + GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1), + GROUP( + /* GP2_31_15 RESERVED */ GP_2_14_FN, GPSR2_14, GP_2_13_FN, GPSR2_13, GP_2_12_FN, GPSR2_12, @@ -5437,23 +5412,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32, + GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_16 RESERVED */ GP_3_15_FN, GPSR3_15, GP_3_14_FN, GPSR3_14, GP_3_13_FN, GPSR3_13, @@ -5471,21 +5434,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_3_1_FN, GPSR3_1, GP_3_0_FN, GPSR3_0, )) }, - { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32, + GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP4_31_18 RESERVED */ GP_4_17_FN, GPSR4_17, GP_4_16_FN, GPSR4_16, GP_4_15_FN, GPSR4_15, @@ -5573,35 +5526,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_6_1_FN, GPSR6_1, GP_6_0_FN, GPSR6_0, )) }, - { PINMUX_CFG_REG("GPSR7", 0xe606011c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR7", 0xe606011c, 32, + GROUP(-28, 1, 1, 1, 1), + GROUP( + /* GP7_31_4 RESERVED */ GP_7_3_FN, GPSR7_3, GP_7_2_FN, GPSR7_2, GP_7_1_FN, GPSR7_1, @@ -5682,12 +5610,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP6_7_4 IP6_3_0 )) }, - { PINMUX_CFG_REG("IPSR7", 0xe606021c, 32, 4, GROUP( + { PINMUX_CFG_REG_VAR("IPSR7", 0xe606021c, 32, + GROUP(4, 4, 4, 4, -4, 4, 4, 4), + GROUP( IP7_31_28 IP7_27_24 IP7_23_20 IP7_19_16 - /* IP7_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* IP7_15_12 RESERVED */ IP7_11_8 IP7_7_4 IP7_3_0 )) @@ -5792,13 +5722,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP17_7_4 IP17_3_0 )) }, - { PINMUX_CFG_REG("IPSR18", 0xe6060248, 32, 4, GROUP( - /* IP18_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP18_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("IPSR18", 0xe6060248, 32, + GROUP(-24, 4, 4), + GROUP( + /* IP18_31_8 RESERVED */ IP18_7_4 IP18_3_0 )) }, @@ -5808,8 +5735,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32, - GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, 1, 2, - 1, 1, 1, 2, 2, 1, 2, 3), + GROUP(3, 2, 3, 1, 1, 1, 1, 1, 2, 1, -1, 2, + 1, 1, 1, 2, 2, 1, 2, -3), GROUP( MOD_SEL0_31_30_29 MOD_SEL0_28_27 @@ -5821,7 +5748,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_19 MOD_SEL0_18_17 MOD_SEL0_16 - 0, 0, /* RESERVED 15 */ + /* RESERVED 15 */ MOD_SEL0_14_13 MOD_SEL0_12 MOD_SEL0_11 @@ -5830,12 +5757,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_7_6 MOD_SEL0_5 MOD_SEL0_4_3 - /* RESERVED 2, 1, 0 */ - 0, 0, 0, 0, 0, 0, 0, 0 )) + /* RESERVED 2, 1, 0 */ )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32, GROUP(2, 3, 1, 2, 3, 1, 1, 2, 1, 2, 1, 1, - 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1), + 1, 1, 1, -2, 1, 1, 1, 1, 1, 1, 1), GROUP( MOD_SEL1_31_30 MOD_SEL1_29_28_27 @@ -5852,7 +5778,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL1_11 MOD_SEL1_10 MOD_SEL1_9 - 0, 0, 0, 0, /* RESERVED 8, 7 */ + /* RESERVED 8, 7 */ MOD_SEL1_6 MOD_SEL1_5 MOD_SEL1_4 @@ -5863,7 +5789,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6060508, 32, GROUP(1, 1, 1, 2, 1, 3, 1, 1, 1, 1, 1, 1, - 1, 4, 4, 4, 3, 1), + -16, 1), GROUP( MOD_SEL2_31 MOD_SEL2_30 @@ -5877,19 +5803,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL2_19 MOD_SEL2_18 MOD_SEL2_17 - /* RESERVED 16 */ - 0, 0, - /* RESERVED 15, 14, 13, 12 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 11, 10, 9, 8 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 7, 6, 5, 4 */ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 3, 2, 1 */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED 16-1 */ MOD_SEL2_0 )) }, { }, diff --git a/drivers/pinctrl/renesas/pfc-r8a77970.c b/drivers/pinctrl/renesas/pfc-r8a77970.c index 94f90c13989e..4a7803eaafaa 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77970.c +++ b/drivers/pinctrl/renesas/pfc-r8a77970.c @@ -231,7 +231,6 @@ #define IP8_19_16 FM(CANFD_CLK_A) FM(CLK_EXTFXR) FM(PWM4_B) FM(SPEEDIN_B) FM(SCIF_CLK_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP8_23_20 FM(DIGRF_CLKIN) FM(DIGRF_CLKEN_IN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP8_27_24 FM(DIGRF_CLKOUT) FM(DIGRF_CLKEN_OUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP8_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define PINMUX_GPSR \ \ @@ -290,8 +289,7 @@ FM(IP8_11_8) IP8_11_8 \ FM(IP8_15_12) IP8_15_12 \ FM(IP8_19_16) IP8_19_16 \ FM(IP8_23_20) IP8_23_20 \ -FM(IP8_27_24) IP8_27_24 \ -FM(IP8_31_28) IP8_31_28 +FM(IP8_27_24) IP8_27_24 /* MOD_SEL0 */ /* 0 */ /* 1 */ #define MOD_SEL0_11 FM(SEL_I2C3_0) FM(SEL_I2C3_1) @@ -2085,17 +2083,11 @@ static const struct sh_pfc_function pinmux_functions[] = { static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) FN_##y #define FM(x) FN_##x - { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32, + GROUP(-10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_22 RESERVED */ GP_0_21_FN, GPSR0_21, GP_0_20_FN, GPSR0_20, GP_0_19_FN, GPSR0_19, @@ -2153,22 +2145,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_1_1_FN, GPSR1_1, GP_1_0_FN, GPSR1_0, )) }, - { PINMUX_CFG_REG("GPSR2", 0xe6060108, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR2", 0xe6060108, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP2_31_17 RESERVED */ GP_2_16_FN, GPSR2_16, GP_2_15_FN, GPSR2_15, GP_2_14_FN, GPSR2_14, @@ -2187,22 +2168,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_17 RESERVED */ GP_3_16_FN, GPSR3_16, GP_3_15_FN, GPSR3_15, GP_3_14_FN, GPSR3_14, @@ -2221,33 +2191,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_3_1_FN, GPSR3_1, GP_3_0_FN, GPSR3_0, )) }, - { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32, + GROUP(-26, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP4_31_6 RESERVED */ GP_4_5_FN, GPSR4_5, GP_4_4_FN, GPSR4_4, GP_4_3_FN, GPSR4_3, @@ -2255,24 +2202,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_4_1_FN, GPSR4_1, GP_4_0_FN, GPSR4_0, )) }, - { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060114, 32, + GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1), + GROUP( + /* GP5_31_15 RESERVED */ GP_5_14_FN, GPSR5_14, GP_5_13_FN, GPSR5_13, GP_5_12_FN, GPSR5_12, @@ -2374,8 +2308,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP7_7_4 IP7_3_0 )) }, - { PINMUX_CFG_REG("IPSR8", 0xe6060220, 32, 4, GROUP( - IP8_31_28 + { PINMUX_CFG_REG_VAR("IPSR8", 0xe6060220, 32, + GROUP(-4, 4, 4, 4, 4, 4, 4, 4), + GROUP( + /* IP8_31_28 RESERVED */ IP8_27_24 IP8_23_20 IP8_19_16 @@ -2390,19 +2326,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32, - GROUP(4, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1), + GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), GROUP( - /* RESERVED 31, 30, 29, 28 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 27, 26, 25, 24 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 23, 22, 21, 20 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 19, 18, 17, 16 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 15, 14, 13, 12 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED 31-12 */ MOD_SEL0_11 MOD_SEL0_10 MOD_SEL0_9 diff --git a/drivers/pinctrl/renesas/pfc-r8a77980.c b/drivers/pinctrl/renesas/pfc-r8a77980.c index c229a5d8fa57..ac03309c5c0c 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77980.c +++ b/drivers/pinctrl/renesas/pfc-r8a77980.c @@ -278,9 +278,6 @@ #define IP10_11_8 FM(FSO_CFE_0_N) F_(0, 0) F_(0, 0) FM(VI0_DATA22) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP10_15_12 FM(FSO_CFE_1_N) F_(0, 0) F_(0, 0) FM(VI0_DATA23) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP10_19_16 FM(FSO_TOE_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP10_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP10_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP10_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define PINMUX_GPSR \ \ @@ -340,9 +337,9 @@ FM(IP8_7_4) IP8_7_4 FM(IP9_7_4) IP9_7_4 FM(IP10_7_4) IP10_7_4 \ FM(IP8_11_8) IP8_11_8 FM(IP9_11_8) IP9_11_8 FM(IP10_11_8) IP10_11_8 \ FM(IP8_15_12) IP8_15_12 FM(IP9_15_12) IP9_15_12 FM(IP10_15_12) IP10_15_12 \ FM(IP8_19_16) IP8_19_16 FM(IP9_19_16) IP9_19_16 FM(IP10_19_16) IP10_19_16 \ -FM(IP8_23_20) IP8_23_20 FM(IP9_23_20) IP9_23_20 FM(IP10_23_20) IP10_23_20 \ -FM(IP8_27_24) IP8_27_24 FM(IP9_27_24) IP9_27_24 FM(IP10_27_24) IP10_27_24 \ -FM(IP8_31_28) IP8_31_28 FM(IP9_31_28) IP9_31_28 FM(IP10_31_28) IP10_31_28 +FM(IP8_23_20) IP8_23_20 FM(IP9_23_20) IP9_23_20 \ +FM(IP8_27_24) IP8_27_24 FM(IP9_27_24) IP9_27_24 \ +FM(IP8_31_28) IP8_31_28 FM(IP9_31_28) IP9_31_28 /* MOD_SEL0 */ /* 0 */ /* 1 */ #define MOD_SEL0_11 FM(SEL_CANFD0_0) FM(SEL_CANFD0_1) @@ -2507,17 +2504,11 @@ static const struct sh_pfc_function pinmux_functions[] = { static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) FN_##y #define FM(x) FN_##x - { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32, + GROUP(-10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_22 RESERVED */ GP_0_21_FN, GPSR0_21, GP_0_20_FN, GPSR0_20, GP_0_19_FN, GPSR0_19, @@ -2609,22 +2600,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_17 RESERVED */ GP_3_16_FN, GPSR3_16, GP_3_15_FN, GPSR3_15, GP_3_14_FN, GPSR3_14, @@ -2643,14 +2623,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_3_1_FN, GPSR3_1, GP_3_0_FN, GPSR3_0, )) }, - { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32, + GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1), + GROUP( + /* GP4_31_25 RESERVED */ GP_4_24_FN, GPSR4_24, GP_4_23_FN, GPSR4_23, GP_4_22_FN, GPSR4_22, @@ -2677,24 +2655,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_4_1_FN, GPSR4_1, GP_4_0_FN, GPSR4_0, )) }, - { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060114, 32, + GROUP(-17, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1), + GROUP( + /* GP5_31_15 RESERVED */ GP_5_14_FN, GPSR5_14, GP_5_13_FN, GPSR5_13, GP_5_12_FN, GPSR5_12, @@ -2816,10 +2781,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP9_7_4 IP9_3_0 )) }, - { PINMUX_CFG_REG("IPSR10", 0xe6060228, 32, 4, GROUP( - IP10_31_28 - IP10_27_24 - IP10_23_20 + { PINMUX_CFG_REG_VAR("IPSR10", 0xe6060228, 32, + GROUP(-12, 4, 4, 4, 4, 4), + GROUP( + /* IP10_31_20 RESERVED */ IP10_19_16 IP10_15_12 IP10_11_8 @@ -2832,19 +2797,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32, - GROUP(4, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1), + GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1), GROUP( - /* RESERVED 31, 30, 29, 28 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 27, 26, 25, 24 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 23, 22, 21, 20 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 19, 18, 17, 16 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 15, 14, 13, 12 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED 31-12 */ MOD_SEL0_11 MOD_SEL0_10 MOD_SEL0_9 @@ -2853,7 +2808,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_6 MOD_SEL0_5 MOD_SEL0_4 - 0, 0, + /* RESERVED 3 */ MOD_SEL0_2 MOD_SEL0_1 MOD_SEL0_0 )) diff --git a/drivers/pinctrl/renesas/pfc-r8a77990.c b/drivers/pinctrl/renesas/pfc-r8a77990.c index 6c4ba9e16058..b0936962fad7 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77990.c +++ b/drivers/pinctrl/renesas/pfc-r8a77990.c @@ -22,12 +22,12 @@ PORT_GP_CFG_18(0, fn, sfx, CFG_FLAGS), \ PORT_GP_CFG_23(1, fn, sfx, CFG_FLAGS), \ PORT_GP_CFG_26(2, fn, sfx, CFG_FLAGS), \ - PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ + PORT_GP_CFG_12(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE | SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ PORT_GP_CFG_1(3, 12, fn, sfx, CFG_FLAGS), \ PORT_GP_CFG_1(3, 13, fn, sfx, CFG_FLAGS), \ PORT_GP_CFG_1(3, 14, fn, sfx, CFG_FLAGS), \ PORT_GP_CFG_1(3, 15, fn, sfx, CFG_FLAGS), \ - PORT_GP_CFG_11(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ + PORT_GP_CFG_11(4, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE | SH_PFC_PIN_CFG_DRIVE_STRENGTH), \ PORT_GP_CFG_20(5, fn, sfx, CFG_FLAGS), \ PORT_GP_CFG_9(6, fn, sfx, CFG_FLAGS), \ PORT_GP_CFG_1(6, 9, fn, sfx, SH_PFC_PIN_CFG_PULL_UP), \ @@ -2827,16 +2827,6 @@ static const unsigned int qspi0_ctrl_pins[] = { static const unsigned int qspi0_ctrl_mux[] = { QSPI0_SPCLK_MARK, QSPI0_SSL_MARK, }; -static const unsigned int qspi0_data_pins[] = { - /* QSPI0_MOSI_IO0, QSPI0_MISO_IO1 */ - RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 2), - /* QSPI0_IO2, QSPI0_IO3 */ - RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 4), -}; -static const unsigned int qspi0_data_mux[] = { - QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK, - QSPI0_IO2_MARK, QSPI0_IO3_MARK, -}; /* - QSPI1 ------------------------------------------------------------------ */ static const unsigned int qspi1_ctrl_pins[] = { /* QSPI1_SPCLK, QSPI1_SSL */ @@ -2845,16 +2835,51 @@ static const unsigned int qspi1_ctrl_pins[] = { static const unsigned int qspi1_ctrl_mux[] = { QSPI1_SPCLK_MARK, QSPI1_SSL_MARK, }; -static const unsigned int qspi1_data_pins[] = { - /* QSPI1_MOSI_IO0, QSPI1_MISO_IO1 */ + +/* - RPC -------------------------------------------------------------------- */ +static const unsigned int rpc_clk_pins[] = { + /* Octal-SPI flash: C/SCLK */ + /* HyperFlash: CK, CK# */ + RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 6), +}; +static const unsigned int rpc_clk_mux[] = { + QSPI0_SPCLK_MARK, QSPI1_SPCLK_MARK, +}; +static const unsigned int rpc_ctrl_pins[] = { + /* Octal-SPI flash: S#/CS, DQS */ + /* HyperFlash: CS#, RDS */ + RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 11), +}; +static const unsigned int rpc_ctrl_mux[] = { + QSPI0_SSL_MARK, QSPI1_SSL_MARK, +}; +static const unsigned int rpc_data_pins[] = { + /* DQ[0:7] */ + RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 2), + RCAR_GP_PIN(2, 3), RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8), - /* QSPI1_IO2, QSPI1_IO3 */ RCAR_GP_PIN(2, 9), RCAR_GP_PIN(2, 10), }; -static const unsigned int qspi1_data_mux[] = { +static const unsigned int rpc_data_mux[] = { + QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK, + QSPI0_IO2_MARK, QSPI0_IO3_MARK, QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK, QSPI1_IO2_MARK, QSPI1_IO3_MARK, }; +static const unsigned int rpc_reset_pins[] = { + /* RPC_RESET# */ + RCAR_GP_PIN(2, 13), +}; +static const unsigned int rpc_reset_mux[] = { + RPC_RESET_N_MARK, +}; +static const unsigned int rpc_int_pins[] = { + /* RPC_INT# */ + RCAR_GP_PIN(2, 12), +}; +static const unsigned int rpc_int_mux[] = { + RPC_INT_N_MARK, +}; /* - SCIF0 ------------------------------------------------------------------ */ static const unsigned int scif0_data_a_pins[] = { @@ -3758,7 +3783,7 @@ static const unsigned int vin5_clk_b_mux[] = { }; static const struct { - struct sh_pfc_pin_group common[255]; + struct sh_pfc_pin_group common[261]; #ifdef CONFIG_PINCTRL_PFC_R8A77990 struct sh_pfc_pin_group automotive[22]; #endif @@ -3907,11 +3932,17 @@ static const struct { SH_PFC_PIN_GROUP(pwm6_a), SH_PFC_PIN_GROUP(pwm6_b), SH_PFC_PIN_GROUP(qspi0_ctrl), - BUS_DATA_PIN_GROUP(qspi0_data, 2), - BUS_DATA_PIN_GROUP(qspi0_data, 4), + SH_PFC_PIN_GROUP_SUBSET(qspi0_data2, rpc_data, 0, 2), + SH_PFC_PIN_GROUP_SUBSET(qspi0_data4, rpc_data, 0, 4), SH_PFC_PIN_GROUP(qspi1_ctrl), - BUS_DATA_PIN_GROUP(qspi1_data, 2), - BUS_DATA_PIN_GROUP(qspi1_data, 4), + SH_PFC_PIN_GROUP_SUBSET(qspi1_data2, rpc_data, 4, 2), + SH_PFC_PIN_GROUP_SUBSET(qspi1_data4, rpc_data, 4, 4), + BUS_DATA_PIN_GROUP(rpc_clk, 1), + BUS_DATA_PIN_GROUP(rpc_clk, 2), + SH_PFC_PIN_GROUP(rpc_ctrl), + SH_PFC_PIN_GROUP(rpc_data), + SH_PFC_PIN_GROUP(rpc_reset), + SH_PFC_PIN_GROUP(rpc_int), SH_PFC_PIN_GROUP(scif0_data_a), SH_PFC_PIN_GROUP(scif0_clk_a), SH_PFC_PIN_GROUP(scif0_ctrl_a), @@ -4336,6 +4367,15 @@ static const char * const qspi1_groups[] = { "qspi1_data4", }; +static const char * const rpc_groups[] = { + "rpc_clk1", + "rpc_clk2", + "rpc_ctrl", + "rpc_data", + "rpc_reset", + "rpc_int", +}; + static const char * const scif0_groups[] = { "scif0_data_a", "scif0_clk_a", @@ -4492,7 +4532,7 @@ static const char * const vin5_groups[] = { }; static const struct { - struct sh_pfc_function common[49]; + struct sh_pfc_function common[50]; #ifdef CONFIG_PINCTRL_PFC_R8A77990 struct sh_pfc_function automotive[5]; #endif @@ -4531,6 +4571,7 @@ static const struct { SH_PFC_FUNCTION(pwm6), SH_PFC_FUNCTION(qspi0), SH_PFC_FUNCTION(qspi1), + SH_PFC_FUNCTION(rpc), SH_PFC_FUNCTION(scif0), SH_PFC_FUNCTION(scif1), SH_PFC_FUNCTION(scif2), @@ -4562,21 +4603,11 @@ static const struct { static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) FN_##y #define FM(x) FN_##x - { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32, + GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_18 RESERVED */ GP_0_17_FN, GPSR0_17, GP_0_16_FN, GPSR0_16, GP_0_15_FN, GPSR0_15, @@ -4596,16 +4627,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_0_1_FN, GPSR0_1, GP_0_0_FN, GPSR0_0, )) }, - { PINMUX_CFG_REG("GPSR1", 0xe6060104, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR1", 0xe6060104, 32, + GROUP(-9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP1_31_23 RESERVED */ GP_1_22_FN, GPSR1_22, GP_1_21_FN, GPSR1_21, GP_1_20_FN, GPSR1_20, @@ -4664,23 +4690,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32, + GROUP(-16, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_16 RESERVED */ GP_3_15_FN, GPSR3_15, GP_3_14_FN, GPSR3_14, GP_3_13_FN, GPSR3_13, @@ -4698,28 +4712,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_3_1_FN, GPSR3_1, GP_3_0_FN, GPSR3_0, )) }, - { PINMUX_CFG_REG("GPSR4", 0xe6060110, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR4", 0xe6060110, 32, + GROUP(-21, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP4_31_11 RESERVED */ GP_4_10_FN, GPSR4_10, GP_4_9_FN, GPSR4_9, GP_4_8_FN, GPSR4_8, @@ -4732,19 +4728,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_4_1_FN, GPSR4_1, GP_4_0_FN, GPSR4_0, )) }, - { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060114, 32, + GROUP(-12, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP5_31_20 RESERVED */ GP_5_19_FN, GPSR5_19, GP_5_18_FN, GPSR5_18, GP_5_17_FN, GPSR5_17, @@ -4766,21 +4754,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_5_1_FN, GPSR5_1, GP_5_0_FN, GPSR5_0, )) }, - { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR6", 0xe6060118, 32, + GROUP(-14, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP6_31_18 RESERVED */ GP_6_17_FN, GPSR6_17, GP_6_16_FN, GPSR6_16, GP_6_15_FN, GPSR6_15, @@ -4971,11 +4949,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32, - GROUP(1, 2, 1, 2, 1, 1, 1, 1, 2, 3, 1, 1, + GROUP(-1, 2, 1, 2, 1, 1, 1, 1, 2, 3, 1, 1, 1, 2, 2, 1, 1, 1, 2, 1, 1, 1, 2), GROUP( /* RESERVED 31 */ - 0, 0, MOD_SEL0_30_29 MOD_SEL0_28 MOD_SEL0_27_26 @@ -5000,15 +4977,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_1_0 )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 3, 3, 1, 1, 1, - 1, 2, 2, 2, 1, 1, 2, 1, 4), + GROUP(1, 1, 1, 1, -1, 1, 1, 3, 3, 1, 1, 1, + 1, 2, 2, 2, 1, 1, 2, 1, -4), GROUP( MOD_SEL1_31 MOD_SEL1_30 MOD_SEL1_29 MOD_SEL1_28 /* RESERVED 27 */ - 0, 0, MOD_SEL1_26 MOD_SEL1_25 MOD_SEL1_24_23_22 @@ -5024,12 +5000,44 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL1_7 MOD_SEL1_6_5 MOD_SEL1_4 - /* RESERVED 3, 2, 1, 0 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) + /* RESERVED 3, 2, 1, 0 */ )) }, { }, }; +static const struct pinmux_drive_reg pinmux_drive_regs[] = { + { PINMUX_DRIVE_REG("DRVCTRL8", 0xe6060320) { + { RCAR_GP_PIN(3, 0), 18, 2 }, /* SD0_CLK */ + { RCAR_GP_PIN(3, 1), 15, 2 }, /* SD0_CMD */ + { RCAR_GP_PIN(3, 2), 12, 2 }, /* SD0_DAT0 */ + { RCAR_GP_PIN(3, 3), 9, 2 }, /* SD0_DAT1 */ + { RCAR_GP_PIN(3, 4), 6, 2 }, /* SD0_DAT2 */ + { RCAR_GP_PIN(3, 5), 3, 2 }, /* SD0_DAT3 */ + { RCAR_GP_PIN(3, 6), 0, 2 }, /* SD1_CLK */ + } }, + { PINMUX_DRIVE_REG("DRVCTRL9", 0xe6060324) { + { RCAR_GP_PIN(3, 7), 29, 2 }, /* SD1_CMD */ + { RCAR_GP_PIN(3, 8), 26, 2 }, /* SD1_DAT0 */ + { RCAR_GP_PIN(3, 9), 23, 2 }, /* SD1_DAT1 */ + { RCAR_GP_PIN(3, 10), 20, 2 }, /* SD1_DAT2 */ + { RCAR_GP_PIN(3, 11), 17, 2 }, /* SD1_DAT3 */ + { RCAR_GP_PIN(4, 0), 14, 2 }, /* SD3_CLK */ + { RCAR_GP_PIN(4, 1), 11, 2 }, /* SD3_CMD */ + { RCAR_GP_PIN(4, 2), 8, 2 }, /* SD3_DAT0 */ + { RCAR_GP_PIN(4, 3), 5, 2 }, /* SD3_DAT1 */ + { RCAR_GP_PIN(4, 4), 2, 2 }, /* SD3_DAT2 */ + } }, + { PINMUX_DRIVE_REG("DRVCTRL10", 0xe6060328) { + { RCAR_GP_PIN(4, 5), 29, 2 }, /* SD3_DAT3 */ + { RCAR_GP_PIN(4, 6), 26, 2 }, /* SD3_DAT4 */ + { RCAR_GP_PIN(4, 7), 23, 2 }, /* SD3_DAT5 */ + { RCAR_GP_PIN(4, 8), 20, 2 }, /* SD3_DAT6 */ + { RCAR_GP_PIN(4, 9), 17, 2 }, /* SD3_DAT7 */ + { RCAR_GP_PIN(4, 10), 14, 2 }, /* SD3_DS */ + } }, + { }, +}; + enum ioctrl_regs { POCCTRL0, TDSELCTRL, @@ -5286,6 +5294,7 @@ const struct sh_pfc_soc_info r8a774c0_pinmux_info = { .nr_functions = ARRAY_SIZE(pinmux_functions.common), .cfg_regs = pinmux_config_regs, + .drive_regs = pinmux_drive_regs, .bias_regs = pinmux_bias_regs, .ioctrl_regs = pinmux_ioctrl_regs, @@ -5312,6 +5321,7 @@ const struct sh_pfc_soc_info r8a77990_pinmux_info = { ARRAY_SIZE(pinmux_functions.automotive), .cfg_regs = pinmux_config_regs, + .drive_regs = pinmux_drive_regs, .bias_regs = pinmux_bias_regs, .ioctrl_regs = pinmux_ioctrl_regs, diff --git a/drivers/pinctrl/renesas/pfc-r8a77995.c b/drivers/pinctrl/renesas/pfc-r8a77995.c index 445c903a121a..d949ae59c757 100644 --- a/drivers/pinctrl/renesas/pfc-r8a77995.c +++ b/drivers/pinctrl/renesas/pfc-r8a77995.c @@ -1682,6 +1682,68 @@ static const unsigned int pwm3_c_mux[] = { PWM3_C_MARK, }; +/* - QSPI0 ------------------------------------------------------------------ */ +static const unsigned int qspi0_ctrl_pins[] = { + /* QSPI0_SPCLK, QSPI0_SSL */ + RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 5), +}; +static const unsigned int qspi0_ctrl_mux[] = { + QSPI0_SPCLK_MARK, QSPI0_SSL_MARK, +}; +/* - QSPI1 ------------------------------------------------------------------ */ +static const unsigned int qspi1_ctrl_pins[] = { + /* QSPI1_SPCLK, QSPI1_SSL */ + RCAR_GP_PIN(6, 6), RCAR_GP_PIN(6, 11), +}; +static const unsigned int qspi1_ctrl_mux[] = { + QSPI1_SPCLK_MARK, QSPI1_SSL_MARK, +}; + +/* - RPC -------------------------------------------------------------------- */ +static const unsigned int rpc_clk_pins[] = { + /* Octal-SPI flash: C/SCLK */ + /* HyperFlash: CK, CK# */ + RCAR_GP_PIN(6, 0), RCAR_GP_PIN(6, 6), +}; +static const unsigned int rpc_clk_mux[] = { + QSPI0_SPCLK_MARK, QSPI1_SPCLK_MARK, +}; +static const unsigned int rpc_ctrl_pins[] = { + /* Octal-SPI flash: S#/CS, DQS */ + /* HyperFlash: CS#, RDS */ + RCAR_GP_PIN(6, 5), RCAR_GP_PIN(6, 11), +}; +static const unsigned int rpc_ctrl_mux[] = { + QSPI0_SSL_MARK, QSPI1_SSL_MARK, +}; +static const unsigned int rpc_data_pins[] = { + /* DQ[0:7] */ + RCAR_GP_PIN(6, 1), RCAR_GP_PIN(6, 2), + RCAR_GP_PIN(6, 3), RCAR_GP_PIN(6, 4), + RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 8), + RCAR_GP_PIN(6, 9), RCAR_GP_PIN(6, 10), +}; +static const unsigned int rpc_data_mux[] = { + QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK, + QSPI0_IO2_MARK, QSPI0_IO3_MARK, + QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK, + QSPI1_IO2_MARK, QSPI1_IO3_MARK, +}; +static const unsigned int rpc_reset_pins[] = { + /* RPC_RESET# */ + RCAR_GP_PIN(6, 12), +}; +static const unsigned int rpc_reset_mux[] = { + RPC_RESET_N_MARK, +}; +static const unsigned int rpc_int_pins[] = { + /* RPC_INT# */ + RCAR_GP_PIN(6, 13), +}; +static const unsigned int rpc_int_mux[] = { + RPC_INT_N_MARK, +}; + /* - SCIF0 ------------------------------------------------------------------ */ static const unsigned int scif0_data_a_pins[] = { /* RX, TX */ @@ -2085,6 +2147,18 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(pwm3_a), SH_PFC_PIN_GROUP(pwm3_b), SH_PFC_PIN_GROUP(pwm3_c), + SH_PFC_PIN_GROUP(qspi0_ctrl), + SH_PFC_PIN_GROUP_SUBSET(qspi0_data2, rpc_data, 0, 2), + SH_PFC_PIN_GROUP_SUBSET(qspi0_data4, rpc_data, 0, 4), + SH_PFC_PIN_GROUP(qspi1_ctrl), + SH_PFC_PIN_GROUP_SUBSET(qspi1_data2, rpc_data, 4, 2), + SH_PFC_PIN_GROUP_SUBSET(qspi1_data4, rpc_data, 4, 4), + BUS_DATA_PIN_GROUP(rpc_clk, 1), + BUS_DATA_PIN_GROUP(rpc_clk, 2), + SH_PFC_PIN_GROUP(rpc_ctrl), + SH_PFC_PIN_GROUP(rpc_data), + SH_PFC_PIN_GROUP(rpc_reset), + SH_PFC_PIN_GROUP(rpc_int), SH_PFC_PIN_GROUP(scif0_data_a), SH_PFC_PIN_GROUP(scif0_clk_a), SH_PFC_PIN_GROUP(scif0_data_b), @@ -2277,6 +2351,27 @@ static const char * const pwm3_groups[] = { "pwm3_c", }; +static const char * const qspi0_groups[] = { + "qspi0_ctrl", + "qspi0_data2", + "qspi0_data4", +}; + +static const char * const qspi1_groups[] = { + "qspi1_ctrl", + "qspi1_data2", + "qspi1_data4", +}; + +static const char * const rpc_groups[] = { + "rpc_clk1", + "rpc_clk2", + "rpc_ctrl", + "rpc_data", + "rpc_reset", + "rpc_int", +}; + static const char * const scif0_groups[] = { "scif0_data_a", "scif0_clk_a", @@ -2373,6 +2468,9 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(pwm1), SH_PFC_FUNCTION(pwm2), SH_PFC_FUNCTION(pwm3), + SH_PFC_FUNCTION(qspi0), + SH_PFC_FUNCTION(qspi1), + SH_PFC_FUNCTION(rpc), SH_PFC_FUNCTION(scif0), SH_PFC_FUNCTION(scif1), SH_PFC_FUNCTION(scif2), @@ -2388,30 +2486,10 @@ static const struct sh_pfc_function pinmux_functions[] = { static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) FN_##y #define FM(x) FN_##x - { PINMUX_CFG_REG("GPSR0", 0xe6060100, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xe6060100, 32, + GROUP(-23, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_9 RESERVED */ GP_0_8_FN, GPSR0_8, GP_0_7_FN, GPSR0_7, GP_0_6_FN, GPSR0_6, @@ -2490,29 +2568,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe606010c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe606010c, 32, + GROUP(-22, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_10 RESERVED */ GP_3_9_FN, GPSR3_9, GP_3_8_FN, GPSR3_8, GP_3_7_FN, GPSR3_7, @@ -2558,18 +2617,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_4_1_FN, GPSR4_1, GP_4_0_FN, GPSR4_0, )) }, - { PINMUX_CFG_REG("GPSR5", 0xe6060114, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060114, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP5_31_21 RESERVED */ GP_5_20_FN, GPSR5_20, GP_5_19_FN, GPSR5_19, GP_5_18_FN, GPSR5_18, @@ -2592,25 +2644,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_5_1_FN, GPSR5_1, GP_5_0_FN, GPSR5_0, )) }, - { PINMUX_CFG_REG("GPSR6", 0xe6060118, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR6", 0xe6060118, 32, + GROUP(-18, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1), + GROUP( + /* GP6_31_14 RESERVED */ GP_6_13_FN, GPSR6_13, GP_6_12_FN, GPSR6_12, GP_6_11_FN, GPSR6_11, @@ -2761,13 +2799,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP12_7_4 IP12_3_0 )) }, - { PINMUX_CFG_REG("IPSR13", 0xe6060234, 32, 4, GROUP( - /* IP13_31_28 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP13_27_24 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP13_23_20 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP13_19_16 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP13_15_12 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* IP13_11_8 */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("IPSR13", 0xe6060234, 32, + GROUP(-24, 4, 4), + GROUP( + /* IP13_31_8 RESERVED */ IP13_7_4 IP13_3_0 )) }, @@ -2777,11 +2812,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, - 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1), + GROUP(-1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, -1, + 1, 1, 1, 1, 1, 1, -4, 1, 1, 1, 1, 1, 1), GROUP( /* RESERVED 31 */ - 0, 0, MOD_SEL0_30 MOD_SEL0_29 MOD_SEL0_28 @@ -2793,7 +2827,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_20_19 MOD_SEL0_18_17 /* RESERVED 16 */ - 0, 0, MOD_SEL0_15 MOD_SEL0_14 MOD_SEL0_13 @@ -2801,7 +2834,6 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_11 MOD_SEL0_10 /* RESERVED 9, 8, 7, 6 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MOD_SEL0_5 MOD_SEL0_4 MOD_SEL0_3 @@ -2810,7 +2842,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL0_0 )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6060504, 32, - GROUP(1, 1, 1, 1, 1, 1, 2, 4, 4, 4, 4, 4, 4), + GROUP(1, 1, 1, 1, 1, 1, -26), GROUP( MOD_SEL1_31 MOD_SEL1_30 @@ -2818,20 +2850,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL1_28 MOD_SEL1_27 MOD_SEL1_26 - /* RESERVED 25, 24 */ - 0, 0, 0, 0, - /* RESERVED 23, 22, 21, 20 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 19, 18, 17, 16 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 15, 14, 13, 12 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 11, 10, 9, 8 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 7, 6, 5, 4 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 3, 2, 1, 0 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) + /* RESERVED 25-0 */ )) }, { }, }; diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c index 4a668a04b7ca..760c83a8740b 100644 --- a/drivers/pinctrl/renesas/pfc-r8a779a0.c +++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c @@ -389,7 +389,6 @@ #define IP3SR1_19_16 FM(GP1_28) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP3SR1_23_20 FM(GP1_29) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP3SR1_27_24 FM(GP1_30) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(D2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP3SR1_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP0SR2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */ #define IP0SR2_3_0 FM(IPC_CLKIN) FM(IPC_CLKEN_IN) F_(0, 0) F_(0, 0) FM(DU_DOTCLKIN) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) @@ -420,11 +419,8 @@ #define IP2SR2_31_28 FM(TCLK1_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(EX_WAIT0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP0SR3 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */ -#define IP0SR3_3_0 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR3_7_4 FM(CANFD0_TX) FM(FXR_TXDA_B) FM(TX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR3_11_8 FM(CANFD0_RX) FM(RXDA_EXTFXR_B) FM(RX1_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR3_15_12 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP0SR3_19_16 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR3_23_20 FM(CANFD2_TX) FM(TPU0TO2) FM(PWM0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR3_27_24 FM(CANFD2_RX) FM(TPU0TO3) FM(PWM1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP0SR3_31_28 FM(CANFD3_TX) F_(0, 0) FM(PWM2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) @@ -435,8 +431,6 @@ #define IP1SR3_15_12 FM(CANFD5_TX) F_(0, 0) F_(0, 0) FM(FXR_TXENA_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR3_19_16 FM(CANFD5_RX) F_(0, 0) F_(0, 0) FM(FXR_TXENB_N) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR3_23_20 FM(CANFD6_TX) F_(0, 0) F_(0, 0) FM(STPWT_EXTFXR) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR3_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP1SR3_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP0SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */ #define IP0SR4_3_0 FM(AVB0_RX_CTL) FM(AVB0_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) @@ -457,14 +451,10 @@ #define IP1SR4_27_24 FM(AVB0_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR4_31_28 FM(AVB0_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP2SR4 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */ -#define IP2SR4_3_0 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR4_7_4 FM(AVB0_LINK) FM(AVB0_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR4_11_8 FM(AVB0_AVTP_MATCH) FM(AVB0_MII_RX_ER) FM(CC5_OSCOUT) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR4_15_12 FM(AVB0_AVTP_CAPTURE) FM(AVB0_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR4_19_16 FM(AVB0_AVTP_PPS) FM(AVB0_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR4_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR4_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR4_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP0SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */ #define IP0SR5_3_0 FM(AVB1_RX_CTL) FM(AVB1_MII_RX_DV) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) @@ -485,14 +475,10 @@ #define IP1SR5_27_24 FM(AVB1_MDC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP1SR5_31_28 FM(AVB1_MAGIC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP2SR5 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 - F */ -#define IP2SR5_3_0 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR5_7_4 FM(AVB1_LINK) FM(AVB1_MII_TX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR5_11_8 FM(AVB1_AVTP_MATCH) FM(AVB1_MII_RX_ER) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR5_15_12 FM(AVB1_AVTP_CAPTURE) FM(AVB1_MII_CRS) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR5_19_16 FM(AVB1_AVTP_PPS) FM(AVB1_MII_COL) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR5_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR5_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR5_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define PINMUX_GPSR \ \ @@ -537,7 +523,7 @@ FM(IP0SR1_15_12) IP0SR1_15_12 FM(IP1SR1_15_12) IP1SR1_15_12 FM(IP2SR1_15_12) IP2 FM(IP0SR1_19_16) IP0SR1_19_16 FM(IP1SR1_19_16) IP1SR1_19_16 FM(IP2SR1_19_16) IP2SR1_19_16 FM(IP3SR1_19_16) IP3SR1_19_16 \ FM(IP0SR1_23_20) IP0SR1_23_20 FM(IP1SR1_23_20) IP1SR1_23_20 FM(IP2SR1_23_20) IP2SR1_23_20 FM(IP3SR1_23_20) IP3SR1_23_20 \ FM(IP0SR1_27_24) IP0SR1_27_24 FM(IP1SR1_27_24) IP1SR1_27_24 FM(IP2SR1_27_24) IP2SR1_27_24 FM(IP3SR1_27_24) IP3SR1_27_24 \ -FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 FM(IP3SR1_31_28) IP3SR1_31_28 \ +FM(IP0SR1_31_28) IP0SR1_31_28 FM(IP1SR1_31_28) IP1SR1_31_28 FM(IP2SR1_31_28) IP2SR1_31_28 \ \ FM(IP0SR2_3_0) IP0SR2_3_0 FM(IP1SR2_3_0) IP1SR2_3_0 FM(IP2SR2_3_0) IP2SR2_3_0 \ FM(IP0SR2_7_4) IP0SR2_7_4 FM(IP1SR2_7_4) IP1SR2_7_4 FM(IP2SR2_7_4) IP2SR2_7_4 \ @@ -548,32 +534,32 @@ FM(IP0SR2_23_20) IP0SR2_23_20 FM(IP1SR2_23_20) IP1SR2_23_20 FM(IP2SR2_23_20) IP2 FM(IP0SR2_27_24) IP0SR2_27_24 FM(IP1SR2_27_24) IP1SR2_27_24 FM(IP2SR2_27_24) IP2SR2_27_24 \ FM(IP0SR2_31_28) IP0SR2_31_28 FM(IP1SR2_31_28) IP1SR2_31_28 FM(IP2SR2_31_28) IP2SR2_31_28 \ \ -FM(IP0SR3_3_0) IP0SR3_3_0 FM(IP1SR3_3_0) IP1SR3_3_0 \ + FM(IP1SR3_3_0) IP1SR3_3_0 \ FM(IP0SR3_7_4) IP0SR3_7_4 FM(IP1SR3_7_4) IP1SR3_7_4 \ FM(IP0SR3_11_8) IP0SR3_11_8 FM(IP1SR3_11_8) IP1SR3_11_8 \ -FM(IP0SR3_15_12) IP0SR3_15_12 FM(IP1SR3_15_12) IP1SR3_15_12 \ -FM(IP0SR3_19_16) IP0SR3_19_16 FM(IP1SR3_19_16) IP1SR3_19_16 \ + FM(IP1SR3_15_12) IP1SR3_15_12 \ + FM(IP1SR3_19_16) IP1SR3_19_16 \ FM(IP0SR3_23_20) IP0SR3_23_20 FM(IP1SR3_23_20) IP1SR3_23_20 \ -FM(IP0SR3_27_24) IP0SR3_27_24 FM(IP1SR3_27_24) IP1SR3_27_24 \ -FM(IP0SR3_31_28) IP0SR3_31_28 FM(IP1SR3_31_28) IP1SR3_31_28 \ +FM(IP0SR3_27_24) IP0SR3_27_24 \ +FM(IP0SR3_31_28) IP0SR3_31_28 \ \ -FM(IP0SR4_3_0) IP0SR4_3_0 FM(IP1SR4_3_0) IP1SR4_3_0 FM(IP2SR4_3_0) IP2SR4_3_0 \ +FM(IP0SR4_3_0) IP0SR4_3_0 FM(IP1SR4_3_0) IP1SR4_3_0 \ FM(IP0SR4_7_4) IP0SR4_7_4 FM(IP1SR4_7_4) IP1SR4_7_4 FM(IP2SR4_7_4) IP2SR4_7_4 \ FM(IP0SR4_11_8) IP0SR4_11_8 FM(IP1SR4_11_8) IP1SR4_11_8 FM(IP2SR4_11_8) IP2SR4_11_8 \ FM(IP0SR4_15_12) IP0SR4_15_12 FM(IP1SR4_15_12) IP1SR4_15_12 FM(IP2SR4_15_12) IP2SR4_15_12 \ FM(IP0SR4_19_16) IP0SR4_19_16 FM(IP1SR4_19_16) IP1SR4_19_16 FM(IP2SR4_19_16) IP2SR4_19_16 \ -FM(IP0SR4_23_20) IP0SR4_23_20 FM(IP1SR4_23_20) IP1SR4_23_20 FM(IP2SR4_23_20) IP2SR4_23_20 \ -FM(IP0SR4_27_24) IP0SR4_27_24 FM(IP1SR4_27_24) IP1SR4_27_24 FM(IP2SR4_27_24) IP2SR4_27_24 \ -FM(IP0SR4_31_28) IP0SR4_31_28 FM(IP1SR4_31_28) IP1SR4_31_28 FM(IP2SR4_31_28) IP2SR4_31_28 \ +FM(IP0SR4_23_20) IP0SR4_23_20 FM(IP1SR4_23_20) IP1SR4_23_20 \ +FM(IP0SR4_27_24) IP0SR4_27_24 FM(IP1SR4_27_24) IP1SR4_27_24 \ +FM(IP0SR4_31_28) IP0SR4_31_28 FM(IP1SR4_31_28) IP1SR4_31_28 \ \ -FM(IP0SR5_3_0) IP0SR5_3_0 FM(IP1SR5_3_0) IP1SR5_3_0 FM(IP2SR5_3_0) IP2SR5_3_0 \ +FM(IP0SR5_3_0) IP0SR5_3_0 FM(IP1SR5_3_0) IP1SR5_3_0 \ FM(IP0SR5_7_4) IP0SR5_7_4 FM(IP1SR5_7_4) IP1SR5_7_4 FM(IP2SR5_7_4) IP2SR5_7_4 \ FM(IP0SR5_11_8) IP0SR5_11_8 FM(IP1SR5_11_8) IP1SR5_11_8 FM(IP2SR5_11_8) IP2SR5_11_8 \ FM(IP0SR5_15_12) IP0SR5_15_12 FM(IP1SR5_15_12) IP1SR5_15_12 FM(IP2SR5_15_12) IP2SR5_15_12 \ FM(IP0SR5_19_16) IP0SR5_19_16 FM(IP1SR5_19_16) IP1SR5_19_16 FM(IP2SR5_19_16) IP2SR5_19_16 \ -FM(IP0SR5_23_20) IP0SR5_23_20 FM(IP1SR5_23_20) IP1SR5_23_20 FM(IP2SR5_23_20) IP2SR5_23_20 \ -FM(IP0SR5_27_24) IP0SR5_27_24 FM(IP1SR5_27_24) IP1SR5_27_24 FM(IP2SR5_27_24) IP2SR5_27_24 \ -FM(IP0SR5_31_28) IP0SR5_31_28 FM(IP1SR5_31_28) IP1SR5_31_28 FM(IP2SR5_31_28) IP2SR5_31_28 +FM(IP0SR5_23_20) IP0SR5_23_20 FM(IP1SR5_23_20) IP1SR5_23_20 \ +FM(IP0SR5_27_24) IP0SR5_27_24 FM(IP1SR5_27_24) IP1SR5_27_24 \ +FM(IP0SR5_31_28) IP0SR5_31_28 FM(IP1SR5_31_28) IP1SR5_31_28 /* MOD_SEL2 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ #define MOD_SEL2_15_14 FM(SEL_I2C6_0) F_(0, 0) F_(0, 0) FM(SEL_I2C6_3) @@ -629,7 +615,36 @@ enum { }; static const u16 pinmux_data[] = { +/* Using GP_2_[2-15] requires disabling I2C in MOD_SEL2 */ +#define GP_2_2_FN GP_2_2_FN, FN_SEL_I2C0_0 +#define GP_2_3_FN GP_2_3_FN, FN_SEL_I2C0_0 +#define GP_2_4_FN GP_2_4_FN, FN_SEL_I2C1_0 +#define GP_2_5_FN GP_2_5_FN, FN_SEL_I2C1_0 +#define GP_2_6_FN GP_2_6_FN, FN_SEL_I2C2_0 +#define GP_2_7_FN GP_2_7_FN, FN_SEL_I2C2_0 +#define GP_2_8_FN GP_2_8_FN, FN_SEL_I2C3_0 +#define GP_2_9_FN GP_2_9_FN, FN_SEL_I2C3_0 +#define GP_2_10_FN GP_2_10_FN, FN_SEL_I2C4_0 +#define GP_2_11_FN GP_2_11_FN, FN_SEL_I2C4_0 +#define GP_2_12_FN GP_2_12_FN, FN_SEL_I2C5_0 +#define GP_2_13_FN GP_2_13_FN, FN_SEL_I2C5_0 +#define GP_2_14_FN GP_2_14_FN, FN_SEL_I2C6_0 +#define GP_2_15_FN GP_2_15_FN, FN_SEL_I2C6_0 PINMUX_DATA_GP_ALL(), +#undef GP_2_2_FN +#undef GP_2_3_FN +#undef GP_2_4_FN +#undef GP_2_5_FN +#undef GP_2_6_FN +#undef GP_2_7_FN +#undef GP_2_8_FN +#undef GP_2_9_FN +#undef GP_2_10_FN +#undef GP_2_11_FN +#undef GP_2_12_FN +#undef GP_2_13_FN +#undef GP_2_14_FN +#undef GP_2_15_FN PINMUX_SINGLE(MMC_D7), PINMUX_SINGLE(MMC_D6), @@ -3223,14 +3238,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_1_1_FN, GPSR1_1, GP_1_0_FN, GPSR1_0, )) }, - { PINMUX_CFG_REG("GPSR2", 0xe6050840, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR2", 0xe6050840, 32, + GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP2_31_25 RESERVED */ GP_2_24_FN, GPSR2_24, GP_2_23_FN, GPSR2_23, GP_2_22_FN, GPSR2_22, @@ -3257,22 +3269,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe6058840, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe6058840, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_17 RESERVED */ GP_3_16_FN, GPSR3_16, GP_3_15_FN, GPSR3_15, GP_3_14_FN, GPSR3_14, @@ -3325,18 +3326,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_4_1_FN, GPSR4_1, GP_4_0_FN, GPSR4_0, )) }, - { PINMUX_CFG_REG("GPSR5", 0xe6060840, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR5", 0xe6060840, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP5_31_21 RESERVED */ GP_5_20_FN, GPSR5_20, GP_5_19_FN, GPSR5_19, GP_5_18_FN, GPSR5_18, @@ -3359,18 +3353,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_5_1_FN, GPSR5_1, GP_5_0_FN, GPSR5_0, )) }, - { PINMUX_CFG_REG("GPSR6", 0xe6068040, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR6", 0xe6068040, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP6_31_21 RESERVED */ GP_6_20_FN, GPSR6_20, GP_6_19_FN, GPSR6_19, GP_6_18_FN, GPSR6_18, @@ -3393,18 +3380,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_6_1_FN, GPSR6_1, GP_6_0_FN, GPSR6_0, )) }, - { PINMUX_CFG_REG("GPSR7", 0xe6068840, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR7", 0xe6068840, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP7_31_21 RESERVED */ GP_7_20_FN, GPSR7_20, GP_7_19_FN, GPSR7_19, GP_7_18_FN, GPSR7_18, @@ -3427,18 +3407,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_7_1_FN, GPSR7_1, GP_7_0_FN, GPSR7_0, )) }, - { PINMUX_CFG_REG("GPSR8", 0xe6069040, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR8", 0xe6069040, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP8_31_21 RESERVED */ GP_8_20_FN, GPSR8_20, GP_8_19_FN, GPSR8_19, GP_8_18_FN, GPSR8_18, @@ -3461,18 +3434,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_8_1_FN, GPSR8_1, GP_8_0_FN, GPSR8_0, )) }, - { PINMUX_CFG_REG("GPSR9", 0xe6069840, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR9", 0xe6069840, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP9_31_21 RESERVED */ GP_9_20_FN, GPSR9_20, GP_9_19_FN, GPSR9_19, GP_9_18_FN, GPSR9_18, @@ -3530,8 +3496,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP2SR1_7_4 IP2SR1_3_0)) }, - { PINMUX_CFG_REG("IP3SR1", 0xe605006c, 32, 4, GROUP( - IP3SR1_31_28 + { PINMUX_CFG_REG_VAR("IP3SR1", 0xe605006c, 32, + GROUP(-4, 4, 4, 4, 4, 4, 4, 4), + GROUP( + /* IP3SR1_31_28 RESERVED */ IP3SR1_27_24 IP3SR1_23_20 IP3SR1_19_16 @@ -3570,19 +3538,21 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP2SR2_7_4 IP2SR2_3_0)) }, - { PINMUX_CFG_REG("IP0SR3", 0xe6058860, 32, 4, GROUP( + { PINMUX_CFG_REG_VAR("IP0SR3", 0xe6058860, 32, + GROUP(4, 4, 4, -8, 4, 4, -4), + GROUP( IP0SR3_31_28 IP0SR3_27_24 IP0SR3_23_20 - IP0SR3_19_16 - IP0SR3_15_12 + /* IP0SR3_19_12 RESERVED */ IP0SR3_11_8 IP0SR3_7_4 - IP0SR3_3_0)) + /* IP0SR3_3_0 RESERVED */ )) }, - { PINMUX_CFG_REG("IP1SR3", 0xe6058864, 32, 4, GROUP( - IP1SR3_31_28 - IP1SR3_27_24 + { PINMUX_CFG_REG_VAR("IP1SR3", 0xe6058864, 32, + GROUP(-8, 4, 4, 4, 4, 4, 4), + GROUP( + /* IP1SR3_31_24 RESERVED */ IP1SR3_23_20 IP1SR3_19_16 IP1SR3_15_12 @@ -3610,15 +3580,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP1SR4_7_4 IP1SR4_3_0)) }, - { PINMUX_CFG_REG("IP2SR4", 0xe6060068, 32, 4, GROUP( - IP2SR4_31_28 - IP2SR4_27_24 - IP2SR4_23_20 + { PINMUX_CFG_REG_VAR("IP2SR4", 0xe6060068, 32, + GROUP(-12, 4, 4, 4, 4, -4), + GROUP( + /* IP2SR4_31_20 RESERVED */ IP2SR4_19_16 IP2SR4_15_12 IP2SR4_11_8 IP2SR4_7_4 - IP2SR4_3_0)) + /* IP2SR4_3_0 RESERVED */ )) }, { PINMUX_CFG_REG("IP0SR5", 0xe6060860, 32, 4, GROUP( IP0SR5_31_28 @@ -3640,15 +3610,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP1SR5_7_4 IP1SR5_3_0)) }, - { PINMUX_CFG_REG("IP2SR5", 0xe6060868, 32, 4, GROUP( - IP2SR5_31_28 - IP2SR5_27_24 - IP2SR5_23_20 + { PINMUX_CFG_REG_VAR("IP2SR5", 0xe6060868, 32, + GROUP(-12, 4, 4, 4, 4, -4), + GROUP( + /* IP2SR5_31_20 RESERVED */ IP2SR5_19_16 IP2SR5_15_12 IP2SR5_11_8 IP2SR5_7_4 - IP2SR5_3_0)) + /* IP2SR5_3_0 RESERVED */ )) }, #undef F_ #undef FM @@ -3656,16 +3626,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xe6050900, 32, - GROUP(4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 1, 1), + GROUP(-16, 2, 2, 2, 2, 2, 2, 2, -2), GROUP( - /* RESERVED 31, 30, 29, 28 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 27, 26, 25, 24 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 23, 22, 21, 20 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 19, 18, 17, 16 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED 31-16 */ MOD_SEL2_15_14 MOD_SEL2_13_12 MOD_SEL2_11_10 @@ -3673,8 +3636,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MOD_SEL2_7_6 MOD_SEL2_5_4 MOD_SEL2_3_2 - 0, 0, - 0, 0, )) + /* RESERVED 1-0 */ )) }, { }, }; diff --git a/drivers/pinctrl/renesas/pfc-r8a779f0.c b/drivers/pinctrl/renesas/pfc-r8a779f0.c index 91860608242c..aaca4ee2af55 100644 --- a/drivers/pinctrl/renesas/pfc-r8a779f0.c +++ b/drivers/pinctrl/renesas/pfc-r8a779f0.c @@ -144,9 +144,6 @@ #define IP2SR0_11_8 FM(IRQ1) F_(0, 0) F_(0, 0) FM(MSIOF1_SS2) F_(0, 0) FM(TSN0_PHY_INT_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR0_15_12 FM(IRQ2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(TSN1_PHY_INT_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) #define IP2SR0_19_16 FM(IRQ3) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) FM(TSN2_PHY_INT_A) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR0_23_20 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR0_27_24 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) -#define IP2SR0_31_28 F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) /* IP0SR1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 - F */ #define IP0SR1_3_0 FM(GP1_00) FM(TCLK1) FM(HSCK2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) @@ -192,9 +189,9 @@ FM(IP0SR0_7_4) IP0SR0_7_4 FM(IP1SR0_7_4) IP1SR0_7_4 FM(IP2SR0_7_4) IP2SR0_7_4 FM(IP0SR0_11_8) IP0SR0_11_8 FM(IP1SR0_11_8) IP1SR0_11_8 FM(IP2SR0_11_8) IP2SR0_11_8 \ FM(IP0SR0_15_12) IP0SR0_15_12 FM(IP1SR0_15_12) IP1SR0_15_12 FM(IP2SR0_15_12) IP2SR0_15_12 \ FM(IP0SR0_19_16) IP0SR0_19_16 FM(IP1SR0_19_16) IP1SR0_19_16 FM(IP2SR0_19_16) IP2SR0_19_16 \ -FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 FM(IP2SR0_23_20) IP2SR0_23_20 \ -FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 FM(IP2SR0_27_24) IP2SR0_27_24 \ -FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 FM(IP2SR0_31_28) IP2SR0_31_28 \ +FM(IP0SR0_23_20) IP0SR0_23_20 FM(IP1SR0_23_20) IP1SR0_23_20 \ +FM(IP0SR0_27_24) IP0SR0_27_24 FM(IP1SR0_27_24) IP1SR0_27_24 \ +FM(IP0SR0_31_28) IP0SR0_31_28 FM(IP1SR0_31_28) IP1SR0_31_28 \ \ FM(IP0SR1_3_0) IP0SR1_3_0 \ FM(IP0SR1_7_4) IP0SR1_7_4 \ @@ -257,7 +254,28 @@ enum { }; static const u16 pinmux_data[] = { +/* Using GP_1_[0-9] requires disabling I2C in MOD_SEL1 */ +#define GP_1_0_FN GP_1_0_FN, FN_SEL_I2C0_0 +#define GP_1_1_FN GP_1_1_FN, FN_SEL_I2C0_0 +#define GP_1_2_FN GP_1_2_FN, FN_SEL_I2C1_0 +#define GP_1_3_FN GP_1_3_FN, FN_SEL_I2C1_0 +#define GP_1_4_FN GP_1_4_FN, FN_SEL_I2C2_0 +#define GP_1_5_FN GP_1_5_FN, FN_SEL_I2C2_0 +#define GP_1_6_FN GP_1_6_FN, FN_SEL_I2C3_0 +#define GP_1_7_FN GP_1_7_FN, FN_SEL_I2C3_0 +#define GP_1_8_FN GP_1_8_FN, FN_SEL_I2C4_0 +#define GP_1_9_FN GP_1_9_FN, FN_SEL_I2C4_0 PINMUX_DATA_GP_ALL(), +#undef GP_1_0_FN +#undef GP_1_1_FN +#undef GP_1_2_FN +#undef GP_1_3_FN +#undef GP_1_4_FN +#undef GP_1_5_FN +#undef GP_1_6_FN +#undef GP_1_7_FN +#undef GP_1_8_FN +#undef GP_1_9_FN PINMUX_SINGLE(SD_WP), PINMUX_SINGLE(SD_CD), @@ -1599,18 +1617,11 @@ static const struct sh_pfc_function pinmux_functions[] = { static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) FN_##y #define FM(x) FN_##x - { PINMUX_CFG_REG("GPSR0", 0xe6050040, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR0", 0xe6050040, 32, + GROUP(-11, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP0_31_21 RESERVED */ GP_0_20_FN, GPSR0_20, GP_0_19_FN, GPSR0_19, GP_0_18_FN, GPSR0_18, @@ -1633,14 +1644,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_0_1_FN, GPSR0_1, GP_0_0_FN, GPSR0_0, )) }, - { PINMUX_CFG_REG("GPSR1", 0xe6050840, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR1", 0xe6050840, 32, + GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP1_31_25 RESERVED */ GP_1_24_FN, GPSR1_24, GP_1_23_FN, GPSR1_23, GP_1_22_FN, GPSR1_22, @@ -1667,22 +1675,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_1_1_FN, GPSR1_1, GP_1_0_FN, GPSR1_0, )) }, - { PINMUX_CFG_REG("GPSR2", 0xe6051040, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR2", 0xe6051040, 32, + GROUP(-15, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1), + GROUP( + /* GP2_31_17 RESERVED */ GP_2_16_FN, GPSR2_16, GP_2_15_FN, GPSR2_15, GP_2_14_FN, GPSR2_14, @@ -1701,20 +1698,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_2_1_FN, GPSR2_1, GP_2_0_FN, GPSR2_0, )) }, - { PINMUX_CFG_REG("GPSR3", 0xe6051840, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("GPSR3", 0xe6051840, 32, + GROUP(-13, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP3_31_19 RESERVED */ GP_3_18_FN, GPSR3_18, GP_3_17_FN, GPSR3_17, GP_3_16_FN, GPSR3_16, @@ -1760,10 +1748,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { IP1SR0_7_4 IP1SR0_3_0)) }, - { PINMUX_CFG_REG("IP2SR0", 0xe6050068, 32, 4, GROUP( - IP2SR0_31_28 - IP2SR0_27_24 - IP2SR0_23_20 + { PINMUX_CFG_REG_VAR("IP2SR0", 0xe6050068, 32, + GROUP(-12, 4, 4, 4, 4, 4), + GROUP( + /* IP2SR0_31_20 RESERVED */ IP2SR0_19_16 IP2SR0_15_12 IP2SR0_11_8 @@ -1786,18 +1774,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { #define F_(x, y) x, #define FM(x) FN_##x, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xe6050900, 32, - GROUP(4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2), + GROUP(-20, 2, 2, 2, 2, 2, 2), GROUP( - /* RESERVED 31, 30, 29, 28 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 27, 26, 25, 24 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 23, 22, 21, 20 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 19, 18, 17, 16 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - /* RESERVED 15, 14, 13, 12 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + /* RESERVED 31-12 */ MOD_SEL1_11_10 MOD_SEL1_9_8 MOD_SEL1_7_6 diff --git a/drivers/pinctrl/renesas/pfc-sh7203.c b/drivers/pinctrl/renesas/pfc-sh7203.c index 3986802b448a..19735746b1bb 100644 --- a/drivers/pinctrl/renesas/pfc-sh7203.c +++ b/drivers/pinctrl/renesas/pfc-sh7203.c @@ -1072,31 +1072,20 @@ static const struct pinmux_func pinmux_func_gpios[] = { }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { - { PINMUX_CFG_REG("PBIORL", 0xfffe3886, 16, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("PBIORL", 0xfffe3886, 16, + GROUP(-4, 1, 1, 1, 1, -8), + GROUP( + /* RESERVED [4] */ PB11_IN, PB11_OUT, PB10_IN, PB10_OUT, PB9_IN, PB9_OUT, PB8_IN, PB8_OUT, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0 )) + /* RESERVED [8] */ )) }, - { PINMUX_CFG_REG("PBCRL4", 0xfffe3890, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - + { PINMUX_CFG_REG_VAR("PBCRL4", 0xfffe3890, 16, + GROUP(-12, 4), + GROUP( + /* RESERVED [12] */ PB12MD_00, PB12MD_01, PB12MD_10, PB12MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) }, @@ -1139,13 +1128,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PB0MD_00, PB0MD_01, PB0MD_10, PB0MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("IFCR", 0xfffe38a2, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - + { PINMUX_CFG_REG_VAR("IFCR", 0xfffe38a2, 16, + GROUP(-12, 4), + GROUP( + /* RESERVED [12] */ PB12IRQ_00, PB12IRQ_01, PB12IRQ_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) }, @@ -1167,9 +1153,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PC1_IN, PC1_OUT, PC0_IN, PC0_OUT )) }, - { PINMUX_CFG_REG("PCCRL4", 0xfffe3910, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - + { PINMUX_CFG_REG_VAR("PCCRL4", 0xfffe3910, 16, + GROUP(-4, 4, 4, 4), + GROUP( + /* RESERVED [4] */ PC14MD_0, PC14MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -1417,8 +1404,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PF1_IN, PF1_OUT, PF0_IN, PF0_OUT )) }, - { PINMUX_CFG_REG("PFCRH4", 0xfffe3a88, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PFCRH4", 0xfffe3a88, 16, + GROUP(-4, 4, 4, 4), + GROUP( + /* RESERVED [4] */ PF30MD_0, PF30MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, diff --git a/drivers/pinctrl/renesas/pfc-sh7264.c b/drivers/pinctrl/renesas/pfc-sh7264.c index 7476b982101d..30096925a70c 100644 --- a/drivers/pinctrl/renesas/pfc-sh7264.c +++ b/drivers/pinctrl/renesas/pfc-sh7264.c @@ -1464,19 +1464,20 @@ static const struct pinmux_func pinmux_func_gpios[] = { }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { - { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PAIOR0", 0xfffe3812, 16, + GROUP(-12, 1, 1, 1, 1), + GROUP( + /* RESERVED [12] */ PA3_IN, PA3_OUT, PA2_IN, PA2_OUT, PA1_IN, PA1_OUT, PA0_IN, PA0_OUT )) }, - { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PBCR5", 0xfffe3824, 16, + GROUP(-4, 4, 4, 4), + GROUP( + /* RESERVED [4] */ PB22MD_00, PB22MD_01, PB22MD_10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB21MD_0, PB21MD_1, 0, 0, 0, 0, 0, 0, @@ -1525,21 +1526,22 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, PB4MD_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4, GROUP( + { PINMUX_CFG_REG_VAR("PBCR0", 0xfffe382e, 16, + GROUP(4, 4, 4, -4), + GROUP( 0, PB3MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB2MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PB1MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 )) + /* RESERVED [4] */ )) }, - { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("PBIOR1", 0xfffe3830, 16, + GROUP(-9, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED [9] */ PB22_IN, PB22_OUT, PB21_IN, PB21_OUT, PB20_IN, PB20_OUT, @@ -1568,9 +1570,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0 )) }, - { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PCCR2", 0xfffe384a, 16, + GROUP(-4, 4, 4, 4), + GROUP( + /* RESERVED [4] */ PC10MD_0, PC10MD_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PC9MD_0, PC9MD_1, 0, 0, 0, 0, 0, 0, @@ -1599,8 +1602,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PCIOR0", 0xfffe3852, 16, + GROUP(-5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED [5] */ PC10_IN, PC10_OUT, PC9_IN, PC9_OUT, PC8_IN, PC8_OUT, @@ -1675,11 +1680,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PD0_IN, PD0_OUT )) }, - { PINMUX_CFG_REG("PECR1", 0xfffe388c, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PECR1", 0xfffe388c, 16, + GROUP(-8, 4, 4), + GROUP( + /* RESERVED [8] */ PE5MD_00, PE5MD_01, 0, PE5MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PE4MD_00, PE4MD_01, 0, PE4MD_11, 0, 0, 0, 0, @@ -1698,10 +1702,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PEIOR0", 0xfffe3892, 16, + GROUP(-10, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED [10] */ PE5_IN, PE5_OUT, PE4_IN, PE4_OUT, PE3_IN, PE3_OUT, @@ -1710,10 +1714,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PE0_IN, PE0_OUT )) }, - { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PFCR3", 0xfffe38a8, 16, + GROUP(-12, 4), + GROUP( + /* RESERVED [12] */ PF12MD_000, PF12MD_001, 0, PF12MD_011, PF12MD_100, PF12MD_101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) @@ -1780,25 +1784,19 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PF0_IN, PF0_OUT )) }, - { PINMUX_CFG_REG("PGCR7", 0xfffe38c0, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PGCR7", 0xfffe38c0, 16, + GROUP(-12, 4), + GROUP( + /* RESERVED [12] */ PG0MD_000, PG0MD_001, PG0MD_010, PG0MD_011, PG0MD_100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PGCR6", 0xfffe38c2, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PGCR6", 0xfffe38c2, 16, + GROUP(-12, 4), + GROUP( + /* RESERVED [12] */ PG24MD_00, PG24MD_01, PG24MD_10, PG24MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) }, @@ -1869,19 +1867,21 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PG4MD_00, PG4MD_01, PG4MD_10, PG4MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PGCR0", 0xfffe38ce, 16, 4, GROUP( + { PINMUX_CFG_REG_VAR("PGCR0", 0xfffe38ce, 16, + GROUP(4, 4, 4, -4), + GROUP( PG3MD_00, PG3MD_01, PG3MD_10, PG3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PG2MD_00, PG2MD_01, PG2MD_10, PG2MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PG1MD_00, PG1MD_01, PG1MD_10, PG1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 )) + /* RESERVED [4] */ )) }, - { PINMUX_CFG_REG("PGIOR1", 0xfffe38d0, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PGIOR1", 0xfffe38d0, 16, + GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED [7] */ PG24_IN, PG24_OUT, PG23_IN, PG23_OUT, PG22_IN, PG22_OUT, diff --git a/drivers/pinctrl/renesas/pfc-sh7269.c b/drivers/pinctrl/renesas/pfc-sh7269.c index 733a2c114ca2..f59f558d75ae 100644 --- a/drivers/pinctrl/renesas/pfc-sh7269.c +++ b/drivers/pinctrl/renesas/pfc-sh7269.c @@ -1966,15 +1966,18 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { * mode registers and modes are described in assending order [0..15] */ - { PINMUX_CFG_REG("PAIOR0", 0xfffe3812, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, PA1_IN, PA1_OUT, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, PA0_IN, PA0_OUT )) + { PINMUX_CFG_REG_VAR("PAIOR0", 0xfffe3812, 16, + GROUP(-7, 1, -7, 1), + GROUP( + /* RESERVED [7] */ + PA1_IN, PA1_OUT, + /* RESERVED [7] */ + PA0_IN, PA0_OUT )) }, - { PINMUX_CFG_REG("PBCR5", 0xfffe3824, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - + { PINMUX_CFG_REG_VAR("PBCR5", 0xfffe3824, 16, + GROUP(-4, 4, 4, 4), + GROUP( + /* RESERVED [4] */ PB22MD_000, PB22MD_001, PB22MD_010, PB22MD_011, PB22MD_100, PB22MD_101, PB22MD_110, PB22MD_111, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2045,7 +2048,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PB4MD_00, PB4MD_01, PB4MD_10, PB4MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PBCR0", 0xfffe382e, 16, 4, GROUP( + { PINMUX_CFG_REG_VAR("PBCR0", 0xfffe382e, 16, + GROUP(4, 4, 4, -4), + GROUP( PB3MD_00, PB3MD_01, PB3MD_10, PB3MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2055,13 +2060,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PB1MD_00, PB1MD_01, PB1MD_10, PB1MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) + /* RESERVED [4] */ )) }, - { PINMUX_CFG_REG("PBIOR1", 0xfffe3830, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("PBIOR1", 0xfffe3830, 16, + GROUP(-9, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED [9] */ PB22_IN, PB22_OUT, PB21_IN, PB21_OUT, PB20_IN, PB20_OUT, @@ -2089,13 +2094,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0 )) }, - { PINMUX_CFG_REG("PCCR2", 0xfffe384a, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - + { PINMUX_CFG_REG_VAR("PCCR2", 0xfffe384a, 16, + GROUP(-12, 4), + GROUP( + /* RESERVED [12] */ PC8MD_000, PC8MD_001, PC8MD_010, PC8MD_011, PC8MD_100, PC8MD_101, PC8MD_110, PC8MD_111, 0, 0, 0, 0, 0, 0, 0, 0 )) @@ -2130,8 +2132,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PCIOR0", 0xfffe3852, 16, + GROUP(-7, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED [7] */ PC8_IN, PC8_OUT, PC7_IN, PC7_OUT, PC6_IN, PC6_OUT, @@ -2244,9 +2248,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PE0MD_00, PE0MD_01, PE0MD_10, PE0MD_11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PEIOR0", 0xfffe3892, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PEIOR0", 0xfffe3892, 16, + GROUP(-8, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED [8] */ PE7_IN, PE7_OUT, PE6_IN, PE6_OUT, PE5_IN, PE5_OUT, @@ -2291,20 +2296,18 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PF16MD_100, PF16MD_101, PF16MD_110, PF16MD_111, 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PFCR4", 0xfffe38a6, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - + { PINMUX_CFG_REG_VAR("PFCR4", 0xfffe38a6, 16, + GROUP(-12, 4), + GROUP( + /* RESERVED [12] */ PF15MD_000, PF15MD_001, PF15MD_010, PF15MD_011, PF15MD_100, PF15MD_101, PF15MD_110, PF15MD_111, 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PFCR3", 0xfffe38a8, 16, 4, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - + { PINMUX_CFG_REG_VAR("PFCR3", 0xfffe38a8, 16, + GROUP(-4, 4, 4, 4), + GROUP( + /* RESERVED [4] */ PF14MD_000, PF14MD_001, PF14MD_010, PF14MD_011, PF14MD_100, PF14MD_101, PF14MD_110, PF14MD_111, 0, 0, 0, 0, 0, 0, 0, 0, @@ -2369,9 +2372,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, 0, 0, 0, 0 )) }, - { PINMUX_CFG_REG("PFIOR1", 0xfffe38b0, 16, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PFIOR1", 0xfffe38b0, 16, + GROUP(-8, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED [8] */ PF23_IN, PF23_OUT, PF22_IN, PF22_OUT, PF21_IN, PF21_OUT, diff --git a/drivers/pinctrl/renesas/pfc-sh73a0.c b/drivers/pinctrl/renesas/pfc-sh73a0.c index 5d8a0179fd60..4f54dfd5a967 100644 --- a/drivers/pinctrl/renesas/pfc-sh73a0.c +++ b/drivers/pinctrl/renesas/pfc-sh73a0.c @@ -3798,24 +3798,16 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PORTCR(308, 0xe6052134), /* PORT308CR */ PORTCR(309, 0xe6052135), /* PORT309CR */ - { PINMUX_CFG_REG("MSEL2CR", 0xe605801c, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("MSEL2CR", 0xe605801c, 32, + GROUP(-12, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* RESERVED [12] */ MSEL2CR_MSEL19_0, MSEL2CR_MSEL19_1, MSEL2CR_MSEL18_0, MSEL2CR_MSEL18_1, MSEL2CR_MSEL17_0, MSEL2CR_MSEL17_1, MSEL2CR_MSEL16_0, MSEL2CR_MSEL16_1, - 0, 0, + /* RESERVED [1] */ MSEL2CR_MSEL14_0, MSEL2CR_MSEL14_1, MSEL2CR_MSEL13_0, MSEL2CR_MSEL13_1, MSEL2CR_MSEL12_0, MSEL2CR_MSEL12_1, @@ -3833,60 +3825,43 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MSEL2CR_MSEL0_0, MSEL2CR_MSEL0_1, )) }, - { PINMUX_CFG_REG("MSEL3CR", 0xe6058020, 32, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("MSEL3CR", 0xe6058020, 32, + GROUP(-3, 1, -12, 1, -3, 1, -1, 1, -2, 1, -3, 1, + -2), + GROUP( + /* RESERVED [3] */ MSEL3CR_MSEL28_0, MSEL3CR_MSEL28_1, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [12] */ MSEL3CR_MSEL15_0, MSEL3CR_MSEL15_1, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [3] */ MSEL3CR_MSEL11_0, MSEL3CR_MSEL11_1, - 0, 0, + /* RESERVED [1] */ MSEL3CR_MSEL9_0, MSEL3CR_MSEL9_1, - 0, 0, - 0, 0, + /* RESERVED [2] */ MSEL3CR_MSEL6_0, MSEL3CR_MSEL6_1, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [3] */ MSEL3CR_MSEL2_0, MSEL3CR_MSEL2_1, - 0, 0, - 0, 0, + /* RESERVED [2] */ )) }, - { PINMUX_CFG_REG("MSEL4CR", 0xe6058024, 32, 1, GROUP( - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("MSEL4CR", 0xe6058024, 32, + GROUP(-2, 1, -1, 1, 1, -3, 1, 1, 1, 1, -3, 1, + -1, 1, 1, 1, 1, 1, 1, 1, -2, 1, -2, 1, + -1), + GROUP( + /* RESERVED [2] */ MSEL4CR_MSEL29_0, MSEL4CR_MSEL29_1, - 0, 0, + /* RESERVED [1] */ MSEL4CR_MSEL27_0, MSEL4CR_MSEL27_1, MSEL4CR_MSEL26_0, MSEL4CR_MSEL26_1, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [3] */ MSEL4CR_MSEL22_0, MSEL4CR_MSEL22_1, MSEL4CR_MSEL21_0, MSEL4CR_MSEL21_1, MSEL4CR_MSEL20_0, MSEL4CR_MSEL20_1, MSEL4CR_MSEL19_0, MSEL4CR_MSEL19_1, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [3] */ MSEL4CR_MSEL15_0, MSEL4CR_MSEL15_1, - 0, 0, + /* RESERVED [1] */ MSEL4CR_MSEL13_0, MSEL4CR_MSEL13_1, MSEL4CR_MSEL12_0, MSEL4CR_MSEL12_1, MSEL4CR_MSEL11_0, MSEL4CR_MSEL11_1, @@ -3894,13 +3869,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { MSEL4CR_MSEL9_0, MSEL4CR_MSEL9_1, MSEL4CR_MSEL8_0, MSEL4CR_MSEL8_1, MSEL4CR_MSEL7_0, MSEL4CR_MSEL7_1, - 0, 0, - 0, 0, + /* RESERVED [2] */ MSEL4CR_MSEL4_0, MSEL4CR_MSEL4_1, - 0, 0, - 0, 0, + /* RESERVED [2] */ MSEL4CR_MSEL1_0, MSEL4CR_MSEL1_1, - 0, 0, + /* RESERVED [1] */ )) }, { }, diff --git a/drivers/pinctrl/renesas/pfc-sh7720.c b/drivers/pinctrl/renesas/pfc-sh7720.c index 7071ef52449d..6eedcc5bbb4d 100644 --- a/drivers/pinctrl/renesas/pfc-sh7720.c +++ b/drivers/pinctrl/renesas/pfc-sh7720.c @@ -1014,25 +1014,24 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN, PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN )) }, - { PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PKCR", 0xa4050112, 16, + GROUP(-8, 2, 2, 2, 2), + GROUP( + /* RESERVED [8] */ PTK3_FN, PTK3_OUT, 0, PTK3_IN, PTK2_FN, PTK2_OUT, 0, PTK2_IN, PTK1_FN, PTK1_OUT, 0, PTK1_IN, PTK0_FN, PTK0_OUT, 0, PTK0_IN )) }, - { PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PLCR", 0xa4050114, 16, + GROUP(2, 2, 2, 2, 2, -6), + GROUP( PTL7_FN, PTL7_OUT, 0, PTL7_IN, PTL6_FN, PTL6_OUT, 0, PTL6_IN, PTL5_FN, PTL5_OUT, 0, PTL5_IN, PTL4_FN, PTL4_OUT, 0, PTL4_IN, PTL3_FN, PTL3_OUT, 0, PTL3_IN, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0 )) + /* RESERVED [6] */ )) }, { PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2, GROUP( PTM7_FN, PTM7_OUT, 0, PTM7_IN, @@ -1044,10 +1043,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTM1_FN, PTM1_OUT, 0, PTM1_IN, PTM0_FN, PTM0_OUT, 0, PTM0_IN )) }, - { PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PPCR", 0xa4050118, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ PTP4_FN, PTP4_OUT, 0, PTP4_IN, PTP3_FN, PTP3_OUT, 0, PTP3_IN, PTP2_FN, PTP2_OUT, 0, PTP2_IN, @@ -1064,40 +1063,40 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTR1_FN, PTR1_OUT, 0, PTR1_IN, PTR0_FN, PTR0_OUT, 0, PTR0_IN )) }, - { PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PSCR", 0xa405011c, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ PTS4_FN, PTS4_OUT, 0, PTS4_IN, PTS3_FN, PTS3_OUT, 0, PTS3_IN, PTS2_FN, PTS2_OUT, 0, PTS2_IN, PTS1_FN, PTS1_OUT, 0, PTS1_IN, PTS0_FN, PTS0_OUT, 0, PTS0_IN )) }, - { PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PTCR", 0xa405011e, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ PTT4_FN, PTT4_OUT, 0, PTT4_IN, PTT3_FN, PTT3_OUT, 0, PTT3_IN, PTT2_FN, PTT2_OUT, 0, PTT2_IN, PTT1_FN, PTT1_OUT, 0, PTT1_IN, PTT0_FN, PTT0_OUT, 0, PTT0_IN )) }, - { PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PUCR", 0xa4050120, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ PTU4_FN, PTU4_OUT, 0, PTU4_IN, PTU3_FN, PTU3_OUT, 0, PTU3_IN, PTU2_FN, PTU2_OUT, 0, PTU2_IN, PTU1_FN, PTU1_OUT, 0, PTU1_IN, PTU0_FN, PTU0_OUT, 0, PTU0_IN )) }, - { PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PVCR", 0xa4050122, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ PTV4_FN, PTV4_OUT, 0, PTV4_IN, PTV3_FN, PTV3_OUT, 0, PTV3_IN, PTV2_FN, PTV2_OUT, 0, PTV2_IN, diff --git a/drivers/pinctrl/renesas/pfc-sh7722.c b/drivers/pinctrl/renesas/pfc-sh7722.c index 13d9967dce59..4b82ac2c5e91 100644 --- a/drivers/pinctrl/renesas/pfc-sh7722.c +++ b/drivers/pinctrl/renesas/pfc-sh7722.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/init.h> #include <linux/kernel.h> #include <cpu/sh7722.h> @@ -1256,14 +1255,16 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { HPD49, PTB1_OUT, 0, PTB1_IN, HPD48, PTB0_OUT, 0, PTB0_IN )) }, - { PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PCCR", 0xa4050104, 16, + GROUP(2, -2, 2, 2, 2, 2, -2, 2), + GROUP( 0, 0, 0, PTC7_IN, - 0, 0, 0, 0, + /* RESERVED [2] */ IOIS16, 0, 0, PTC5_IN, HPDQM7, PTC4_OUT, 0, PTC4_IN, HPDQM6, PTC3_OUT, 0, PTC3_IN, HPDQM5, PTC2_OUT, 0, PTC2_IN, - 0, 0, 0, 0, + /* RESERVED [2] */ HPDQM4, PTC0_OUT, 0, PTC0_IN )) }, { PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2, GROUP( @@ -1276,13 +1277,14 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { SDHICMD, PTD1_OUT, 0, PTD1_IN, SDHICLK, PTD0_OUT, 0, 0 )) }, - { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PECR", 0xa4050108, 16, + GROUP(2, 2, 2, 2, -4, 2, 2), + GROUP( A25, PTE7_OUT, 0, PTE7_IN, A24, PTE6_OUT, 0, PTE6_IN, A23, PTE5_OUT, 0, PTE5_IN, A22, PTE4_OUT, 0, PTE4_IN, - 0, 0, 0, 0, - 0, 0, 0, 0, + /* RESERVED [4] */ IRQ5, PTE1_OUT, 0, PTE1_IN, IRQ4_BS, PTE0_OUT, 0, PTE0_IN )) }, @@ -1296,10 +1298,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { SIORXD_SIUBISLD, 0, 0, PTF1_IN, SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 )) }, - { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PGCR", 0xa405010c, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ AUDSYNC, PTG4_OUT, 0, 0, AUDATA3, PTG3_OUT, 0, 0, AUDATA2, PTG2_OUT, 0, 0, @@ -1316,13 +1318,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { LCDD17_DV_HSYNC, PTH1_OUT, 0, PTH1_IN, LCDD16_DV_VSYNC, PTH0_OUT, 0, PTH0_IN )) }, - { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PJCR", 0xa4050110, 16, + GROUP(2, 2, 2, -6, 2, 2), + GROUP( STATUS0, PTJ7_OUT, 0, 0, 0, PTJ6_OUT, 0, 0, PDSTATUS, PTJ5_OUT, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + /* RESERVED [6] */ IRQ1, PTJ1_OUT, 0, PTJ1_IN, IRQ0, PTJ0_OUT, 0, PTJ0_IN )) }, @@ -1376,50 +1378,50 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTQ1, PTQ1_OUT, 0, 0, PTQ0, PTQ0_OUT, 0, PTQ0_IN )) }, - { PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PRCR", 0xa405011c, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ LCDRD, PTR4_OUT, 0, 0, CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0, WAIT, 0, 0, PTR2_IN, LCDDCK_LCDWR, PTR1_OUT, 0, 0, LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 )) }, - { PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PSCR", 0xa405011e, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ SCIF0_CTS_SIUAISPD, 0, 0, PTS4_IN, SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0, SCIF0_SCK_TPUTO, PTS2_OUT, 0, PTS2_IN, SCIF0_RXD, 0, 0, PTS1_IN, SCIF0_TXD, PTS0_OUT, 0, 0 )) }, - { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PTCR", 0xa4050140, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ FOE_VIO_VD2, PTT4_OUT, 0, PTT4_IN, FWE, PTT3_OUT, 0, PTT3_IN, FSC, PTT2_OUT, 0, PTT2_IN, DREQ0, 0, 0, PTT1_IN, FCDE, PTT0_OUT, 0, 0 )) }, - { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PUCR", 0xa4050142, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ NAF2_VIO_D10, PTU4_OUT, 0, PTU4_IN, NAF1_VIO_D9, PTU3_OUT, 0, PTU3_IN, NAF0_VIO_D8, PTU2_OUT, 0, PTU2_IN, FRB_VIO_CLK2, 0, 0, PTU1_IN, FCE_VIO_HD2, PTU0_OUT, 0, PTU0_IN )) }, - { PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PVCR", 0xa4050144, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ NAF7_VIO_D15, PTV4_OUT, 0, PTV4_IN, NAF6_VIO_D14, PTV3_OUT, 0, PTV3_IN, NAF5_VIO_D13, PTV2_OUT, 0, PTV2_IN, @@ -1446,9 +1448,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { LCDD19_DV_CLKI, PTX1_OUT, 0, PTX1_IN, LCDD18_DV_CLK, PTX0_OUT, 0, PTX0_IN )) }, - { PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PYCR", 0xa405014a, 16, + GROUP(-4, 2, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [4] */ KEYOUT5_IN5, PTY5_OUT, 0, PTY5_IN, KEYOUT4_IN6, PTY4_OUT, 0, PTY4_IN, KEYOUT3, PTY3_OUT, 0, PTY3_IN, @@ -1456,33 +1459,27 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { KEYOUT1, PTY1_OUT, 0, 0, KEYOUT0, PTY0_OUT, 0, PTY0_IN )) }, - { PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PZCR", 0xa405014c, 16, + GROUP(-4, 2, 2, 2, 2, 2, -2), + GROUP( + /* RESERVED [4] */ KEYIN4_IRQ7, 0, 0, PTZ5_IN, KEYIN3, 0, 0, PTZ4_IN, KEYIN2, 0, 0, PTZ3_IN, KEYIN1, 0, 0, PTZ2_IN, KEYIN0_IRQ6, 0, 0, PTZ1_IN, - 0, 0, 0, 0 )) + /* RESERVED [2] */ )) }, - { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1, GROUP( + { PINMUX_CFG_REG_VAR("PSELA", 0xa405014e, 16, + GROUP(1, 1, -4, 1, -4, 1, -4), + GROUP( PSA15_KEYIN0, PSA15_IRQ6, PSA14_KEYIN4, PSA14_IRQ7, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [4] */ PSA9_IRQ4, PSA9_BS, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [4] */ PSA4_IRQ2, PSA4_SDHID2, - 0, 0, - 0, 0, - 0, 0, - 0, 0 )) + /* RESERVED [4] */ )) }, { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 1, GROUP( PSB15_SIOTXD, PSB15_SIUBOSLD, @@ -1502,22 +1499,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PSB1_SIUMCKA, PSB1_SIOF1_MCK, PSB0_SIUAOSLD, PSB0_SIOF1_TXD )) }, - { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 1, GROUP( + { PINMUX_CFG_REG_VAR("PSELC", 0xa4050152, 16, + GROUP(1, 1, 1, 1, 1, -10, 1), + GROUP( PSC15_SIUAISLD, PSC15_SIOF1_RXD, PSC14_SIUAOBT, PSC14_SIOF1_SCK, PSC13_SIUAOLR, PSC13_SIOF1_SYNC, PSC12_SIUAIBT, PSC12_SIOF1_SS1, PSC11_SIUAILR, PSC11_SIOF1_SS2, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [10] */ PSC0_NAF, PSC0_VIO )) }, { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 1, GROUP( @@ -1538,61 +1528,45 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, PSD0_LCDD19_LCDD0, PSD0_DV )) }, - { PINMUX_CFG_REG("PSELE", 0xa4050156, 16, 1, GROUP( + { PINMUX_CFG_REG_VAR("PSELE", 0xa4050156, 16, + GROUP(1, 1, 1, 1, 1, -7, 1, 1, 1, 1), + GROUP( PSE15_SIOF0_MCK_IRQ3, PSE15_SIM_D, PSE14_SIOF0_TXD_IRDA_OUT, PSE14_SIM_CLK, PSE13_SIOF0_RXD_IRDA_IN, PSE13_TS_SDAT, PSE12_LCDVSYN2, PSE12_DACK, PSE11_SIUMCKA_SIOF1_MCK, PSE11_SIUFCKA, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [7] */ PSE3_FLCTL, PSE3_VIO, PSE2_NAF2, PSE2_VIO_D10, PSE1_NAF1, PSE1_VIO_D9, PSE0_NAF0, PSE0_VIO_D8 )) }, - { PINMUX_CFG_REG("HIZCRA", 0xa4050158, 16, 1, GROUP( - 0, 0, + { PINMUX_CFG_REG_VAR("HIZCRA", 0xa4050158, 16, + GROUP(-1, 1, -3, 1, 1, 1, 1, 1, -6), + GROUP( + /* RESERVED [1] */ HIZA14_KEYSC, HIZA14_HIZ, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [3] */ HIZA10_NAF, HIZA10_HIZ, HIZA9_VIO, HIZA9_HIZ, HIZA8_LCDC, HIZA8_HIZ, HIZA7_LCDC, HIZA7_HIZ, HIZA6_LCDC, HIZA6_HIZ, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0 )) + /* RESERVED [6] */ )) }, - { PINMUX_CFG_REG("HIZCRB", 0xa405015a, 16, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("HIZCRB", 0xa405015a, 16, + GROUP(-11, 1, -2, 1, 1), + GROUP( + /* RESERVED [11] */ HIZB4_SIUA, HIZB4_HIZ, - 0, 0, - 0, 0, + /* RESERVED [2] */ HIZB1_VIO, HIZB1_HIZ, HIZB0_VIO, HIZB0_HIZ )) }, - { PINMUX_CFG_REG("HIZCRC", 0xa405015c, 16, 1, GROUP( + { PINMUX_CFG_REG_VAR("HIZCRC", 0xa405015c, 16, + GROUP(1, 1, 1, 1, 1, 1, 1, 1, -8), + GROUP( HIZC15_IRQ7, HIZC15_HIZ, HIZC14_IRQ6, HIZC14_HIZ, HIZC13_IRQ5, HIZC13_HIZ, @@ -1601,32 +1575,15 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { HIZC10_IRQ2, HIZC10_HIZ, HIZC9_IRQ1, HIZC9_HIZ, HIZC8_IRQ0, HIZC8_HIZ, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0 )) + /* RESERVED [8] */ )) }, - { PINMUX_CFG_REG("MSELCRB", 0xa4050182, 16, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("MSELCRB", 0xa4050182, 16, + GROUP(-6, 1, 1, -8), + GROUP( + /* RESERVED [6] */ MSELB9_VIO, MSELB9_VIO2, MSELB8_RGB, MSELB8_SYS, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0 )) + /* RESERVED [8] */ )) }, {} }; diff --git a/drivers/pinctrl/renesas/pfc-sh7723.c b/drivers/pinctrl/renesas/pfc-sh7723.c index 6f08f527c010..95344281966e 100644 --- a/drivers/pinctrl/renesas/pfc-sh7723.c +++ b/drivers/pinctrl/renesas/pfc-sh7723.c @@ -5,7 +5,6 @@ * Copyright (C) 2008 Magnus Damm */ -#include <linux/init.h> #include <linux/kernel.h> #include <cpu/sh7723.h> @@ -1547,9 +1546,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTD1_FN, PTD1_OUT, 0, PTD1_IN, PTD0_FN, PTD0_OUT, 0, PTD0_IN )) }, - { PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PECR", 0xa4050108, 16, + GROUP(-4, 2, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [4] */ PTE5_FN, PTE5_OUT, 0, PTE5_IN, PTE4_FN, PTE4_OUT, 0, PTE4_IN, PTE3_FN, PTE3_OUT, 0, PTE3_IN, @@ -1567,9 +1567,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTF1_FN, PTF1_OUT, 0, PTF1_IN, PTF0_FN, PTF0_OUT, 0, PTF0_IN )) }, - { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PGCR", 0xa405010c, 16, + GROUP(-4, 2, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [4] */ PTG5_FN, PTG5_OUT, 0, 0, PTG4_FN, PTG4_OUT, 0, 0, PTG3_FN, PTG3_OUT, 0, 0, @@ -1587,11 +1588,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTH1_FN, PTH1_OUT, 0, PTH1_IN, PTH0_FN, PTH0_OUT, 0, PTH0_IN )) }, - { PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PJCR", 0xa4050110, 16, + GROUP(2, -2, 2, -2, 2, 2, 2, 2), + GROUP( PTJ7_FN, PTJ7_OUT, 0, 0, - 0, 0, 0, 0, + /* RESERVED [2] */ PTJ5_FN, PTJ5_OUT, 0, 0, - 0, 0, 0, 0, + /* RESERVED [2] */ PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN, PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN, PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN, @@ -1637,11 +1640,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTN1_FN, PTN1_OUT, 0, PTN1_IN, PTN0_FN, PTN0_OUT, 0, PTN0_IN )) }, - { PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PQCR", 0xa405011a, 16, + GROUP(-8, 2, 2, 2, 2), + GROUP( + /* RESERVED [8] */ PTQ3_FN, 0, 0, PTQ3_IN, PTQ2_FN, 0, 0, PTQ2_IN, PTQ1_FN, 0, 0, PTQ1_IN, @@ -1667,9 +1669,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTS1_FN, PTS1_OUT, 0, PTS1_IN, PTS0_FN, PTS0_OUT, 0, PTS0_IN )) }, - { PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PTCR", 0xa4050140, 16, + GROUP(-4, 2, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [4] */ PTT5_FN, PTT5_OUT, 0, PTT5_IN, PTT4_FN, PTT4_OUT, 0, PTT4_IN, PTT3_FN, PTT3_OUT, 0, PTT3_IN, @@ -1677,9 +1680,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTT1_FN, PTT1_OUT, 0, PTT1_IN, PTT0_FN, PTT0_OUT, 0, PTT0_IN )) }, - { PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PUCR", 0xa4050142, 16, + GROUP(-4, 2, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [4] */ PTU5_FN, PTU5_OUT, 0, PTU5_IN, PTU4_FN, PTU4_OUT, 0, PTU4_IN, PTU3_FN, PTU3_OUT, 0, PTU3_IN, @@ -1737,35 +1741,38 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN, PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN )) }, - { PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PSELA", 0xa405014e, 16, + GROUP(2, 2, 2, -4, 2, 2, -2), + GROUP( PSA15_PSA14_FN1, PSA15_PSA14_FN2, 0, 0, PSA13_PSA12_FN1, PSA13_PSA12_FN2, 0, 0, PSA11_PSA10_FN1, PSA11_PSA10_FN2, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + /* RESERVED [4] */ PSA5_PSA4_FN1, PSA5_PSA4_FN2, PSA5_PSA4_FN3, 0, PSA3_PSA2_FN1, PSA3_PSA2_FN2, 0, 0, - 0, 0, 0, 0 )) + /* RESERVED [2] */ )) }, - { PINMUX_CFG_REG("PSELB", 0xa4050150, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PSELB", 0xa4050150, 16, + GROUP(2, 2, -2, 2, 2, 2, 2, -2), + GROUP( PSB15_PSB14_FN1, PSB15_PSB14_FN2, 0, 0, PSB13_PSB12_LCDC_RGB, PSB13_PSB12_LCDC_SYS, 0, 0, - 0, 0, 0, 0, + /* RESERVED [2] */ PSB9_PSB8_FN1, PSB9_PSB8_FN2, PSB9_PSB8_FN3, 0, PSB7_PSB6_FN1, PSB7_PSB6_FN2, 0, 0, PSB5_PSB4_FN1, PSB5_PSB4_FN2, 0, 0, PSB3_PSB2_FN1, PSB3_PSB2_FN2, 0, 0, - 0, 0, 0, 0 )) + /* RESERVED [2] */ )) }, - { PINMUX_CFG_REG("PSELC", 0xa4050152, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PSELC", 0xa4050152, 16, + GROUP(2, 2, 2, 2, 2, -6), + GROUP( PSC15_PSC14_FN1, PSC15_PSC14_FN2, 0, 0, PSC13_PSC12_FN1, PSC13_PSC12_FN2, 0, 0, PSC11_PSC10_FN1, PSC11_PSC10_FN2, PSC11_PSC10_FN3, 0, PSC9_PSC8_FN1, PSC9_PSC8_FN2, 0, 0, PSC7_PSC6_FN1, PSC7_PSC6_FN2, PSC7_PSC6_FN3, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0 )) + /* RESERVED [3] */ )) }, { PINMUX_CFG_REG("PSELD", 0xa4050154, 16, 2, GROUP( PSD15_PSD14_FN1, PSD15_PSD14_FN2, 0, 0, diff --git a/drivers/pinctrl/renesas/pfc-sh7724.c b/drivers/pinctrl/renesas/pfc-sh7724.c index 7a18afecda2c..26517ad26a0f 100644 --- a/drivers/pinctrl/renesas/pfc-sh7724.c +++ b/drivers/pinctrl/renesas/pfc-sh7724.c @@ -10,7 +10,6 @@ * Copyright (C) 2008 Magnus Damm */ -#include <linux/init.h> #include <linux/kernel.h> #include <cpu/sh7724.h> @@ -1799,9 +1798,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PTF1_FN, PTF1_OUT, 0, PTF1_IN, PTF0_FN, PTF0_OUT, 0, PTF0_IN )) }, - { PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PGCR", 0xa405010c, 16, + GROUP(-4, 2, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [4] */ PTG5_FN, PTG5_OUT, 0, 0, PTG4_FN, PTG4_OUT, 0, 0, PTG3_FN, PTG3_OUT, 0, 0, diff --git a/drivers/pinctrl/renesas/pfc-sh7734.c b/drivers/pinctrl/renesas/pfc-sh7734.c index dbc36079c381..106a500ad13d 100644 --- a/drivers/pinctrl/renesas/pfc-sh7734.c +++ b/drivers/pinctrl/renesas/pfc-sh7734.c @@ -5,7 +5,6 @@ * Copyright (C) 2012 Renesas Solutions Corp. * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> */ -#include <linux/init.h> #include <linux/kernel.h> #include <cpu/sh7734.h> @@ -1806,16 +1805,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_4_1_FN, FN_IP9_21_20, GP_4_0_FN, FN_IP9_19_18 )) }, - { PINMUX_CFG_REG("GPSR5", 0xFFFC0018, 32, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 28 */ - 0, 0, 0, 0, 0, 0, 0, 0, /* 27 - 24 */ - 0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 20 */ - 0, 0, 0, 0, 0, 0, 0, 0, /* 19 - 16 */ - 0, 0, 0, 0, 0, 0, 0, 0, /* 15 - 12 */ + { PINMUX_CFG_REG_VAR("GPSR5", 0xFFFC0018, 32, + GROUP(-20, 1, 1, -6, 1, 1, 1, 1), + GROUP( + /* GP5_31_12 RESERVED */ GP_5_11_FN, FN_IP10_29_28, GP_5_10_FN, FN_IP10_27_26, - 0, 0, 0, 0, 0, 0, 0, 0, /* 9 - 6 */ - 0, 0, 0, 0, /* 5, 4 */ + /* GP5_9_4 RESERVED */ GP_5_3_FN, FN_IRQ3_B, GP_5_2_FN, FN_IRQ2_B, GP_5_1_FN, FN_IP11_3, @@ -1896,10 +1892,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_A16, FN_ST0_PWM, FN_LCD_DON_A, FN_TIOC4A_C )) }, { PINMUX_CFG_REG_VAR("IPSR2", 0xFFFC0024, 32, - GROUP(1, 3, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3), + GROUP(-1, 3, 3, 2, 3, 3, 3, 3, 3, 3, 2, 3), GROUP( - /* IP2_31 [1] */ - 0, 0, + /* IP2_31 [1] RESERVED */ /* IP2_30_28 [3] */ FN_D14, FN_TX2_B, 0, FN_FSE_A, FN_ET0_TX_CLK_B, 0, 0, 0, @@ -1933,10 +1928,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_FD4_A, 0, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR3", 0xFFFC0028, 32, - GROUP(2, 3, 3, 3, 1, 2, 3, 3, 3, 3, 3, 1, 2), + GROUP(-2, 3, 3, 3, 1, 2, 3, 3, 3, 3, 3, 1, 2), GROUP( - /* IP3_31_30 [2] */ - 0, 0, 0, 0, + /* IP3_31_30 [2] RESERVED */ /* IP3_29_27 [3] */ FN_DRACK0, FN_SD1_DAT2_A, FN_ATAG, FN_TCLK1_A, FN_ET0_ETXD7, 0, 0, 0, @@ -2007,19 +2001,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_ET0_ERXD7, 0, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR5", 0xFFFC0030, 32, - GROUP(1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, - 3, 3, 3), + GROUP(-5, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3), GROUP( - /* IP5_31 [1] */ - 0, 0, - /* IP5_30 [1] */ - 0, 0, - /* IP5_29 [1] */ - 0, 0, - /* IP5_28 [1] */ - 0, 0, - /* IP5_27 [1] */ - 0, 0, + /* IP5_31_27 [5] RESERVED */ /* IP5_26_25 [2] */ FN_REF50CK, FN_CTS1_E, FN_HCTS0_D, 0, /* IP5_24_23 [2] */ @@ -2049,25 +2033,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_ET0_RX_CLK_B, 0, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR6", 0xFFFC0034, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 2, 2, - 2, 2, 2, 2, 3, 3), + GROUP(-8, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3), GROUP( - /* IP5_31 [1] */ - 0, 0, - /* IP6_30 [1] */ - 0, 0, - /* IP6_29 [1] */ - 0, 0, - /* IP6_28 [1] */ - 0, 0, - /* IP6_27 [1] */ - 0, 0, - /* IP6_26 [1] */ - 0, 0, - /* IP6_25 [1] */ - 0, 0, - /* IP6_24 [1] */ - 0, 0, + /* IP5_31_24 [8] RESERVED */ /* IP6_23_21 [3] */ FN_DU0_DG1, FN_CTS1_C, FN_HRTS0_D, FN_TIOC1B_A, FN_HIFD09, 0, 0, 0, @@ -2094,10 +2062,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_TCLKA_A, FN_HIFD00, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR7", 0xFFFC0038, 32, - GROUP(1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3), + GROUP(-1, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3), GROUP( - /* IP7_31 [1] */ - 0, 0, + /* IP7_31 [1] RESERVED */ /* IP7_30_29 [2] */ FN_DU0_DB4, 0, FN_HIFINT, 0, /* IP7_28_27 [2] */ @@ -2131,11 +2098,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_HIFD10, 0, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR8", 0xFFFC003C, 32, - GROUP(2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, + GROUP(-2, 2, 2, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), GROUP( - /* IP9_31_30 [2] */ - 0, 0, 0, 0, + /* IP9_31_30 [2] RESERVED */ /* IP8_29_28 [2] */ FN_IRQ3_A, FN_RTS0_A, FN_HRTS0_B, FN_ET0_ERXD3_A, /* IP8_27_26 [2] */ @@ -2169,11 +2135,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_DU0_DB5, 0, FN_HIFDREQ, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR9", 0xFFFC0040, 32, - GROUP(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + GROUP(-2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2), GROUP( - /* IP9_31_30 [2] */ - 0, 0, 0, 0, + /* IP9_31_30 [2] RESERVED */ /* IP9_29_28 [2] */ FN_SSI_SDATA1_A, FN_VI1_3_B, FN_LCD_DATA14_B, 0, /* IP9_27_26 [2] */ @@ -2206,10 +2171,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_VI1_CLK_A, 0, FN_FD0_B, FN_LCD_DATA0_B )) }, { PINMUX_CFG_REG_VAR("IPSR10", 0xFFFC0044, 32, - GROUP(2, 2, 2, 1, 2, 1, 3, 3, 1, 3, 3, 3, 3, 3), + GROUP(-2, 2, 2, 1, 2, 1, 3, 3, 1, 3, 3, 3, 3, 3), GROUP( - /* IP9_31_30 [2] */ - 0, 0, 0, 0, + /* IP9_31_30 [2] RESERVED */ /* IP10_29_28 [2] */ FN_CAN1_TX_A, FN_TX5_C, FN_MLB_DAT, 0, /* IP10_27_26 [2] */ @@ -2245,11 +2209,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_LCD_DATA15_B, 0, 0, 0 )) }, { PINMUX_CFG_REG_VAR("IPSR11", 0xFFFC0048, 32, - GROUP(3, 1, 2, 3, 2, 2, 3, 3, 1, 2, 3, 3, + GROUP(-3, 1, 2, 3, 2, 2, 3, 3, 1, 2, 3, 3, 1, 1, 1, 1), GROUP( - /* IP11_31_29 [3] */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* IP11_31_29 [3] RESERVED */ /* IP11_28 [1] */ FN_PRESETOUT, FN_ST_CLKOUT, /* IP11_27_26 [2] */ @@ -2287,11 +2250,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SCL1, FN_SCIF_CLK_C )) }, { PINMUX_CFG_REG_VAR("MOD_SEL1", 0xFFFC004C, 32, - GROUP(3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, + GROUP(-3, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), GROUP( - /* SEL1_31_29 [3] */ - 0, 0, 0, 0, 0, 0, 0, 0, + /* SEL1_31_29 [3] RESERVED */ /* SEL1_28 [1] */ FN_SEL_IEBUS_0, FN_SEL_IEBUS_1, /* SEL1_27 [1] */ @@ -2344,25 +2306,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_SEL_INTC_0, FN_SEL_INTC_1 )) }, { PINMUX_CFG_REG_VAR("MOD_SEL2", 0xFFFC0050, 32, - GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, - 2, 1, 2, 2, 3, 2, 3, 2, 2), + GROUP(-8, 1, 1, 1, 2, 2, 1, 2, 2, 3, 2, 3, 2, 2), GROUP( - /* SEL2_31 [1] */ - 0, 0, - /* SEL2_30 [1] */ - 0, 0, - /* SEL2_29 [1] */ - 0, 0, - /* SEL2_28 [1] */ - 0, 0, - /* SEL2_27 [1] */ - 0, 0, - /* SEL2_26 [1] */ - 0, 0, - /* SEL2_25 [1] */ - 0, 0, - /* SEL2_24 [1] */ - 0, 0, + /* SEL2_31_24 [8] RESERVED */ /* SEL2_23 [1] */ FN_SEL_MTU2_CLK_0, FN_SEL_MTU2_CLK_1, /* SEL2_22 [1] */ @@ -2403,10 +2349,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { }, { PINMUX_CFG_REG("INOUTSEL4", 0xFFC44004, 32, 1, GROUP(GP_INOUTSEL(4))) }, - { PINMUX_CFG_REG("INOUTSEL5", 0xffc45004, 32, 1, GROUP( - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 31 - 24 */ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 23 - 16 */ - 0, 0, 0, 0, 0, 0, 0, 0, /* 15 - 12 */ + { PINMUX_CFG_REG_VAR("INOUTSEL5", 0xffc45004, 32, + GROUP(-20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), + GROUP( + /* GP5_31_12 RESERVED */ GP_5_11_IN, GP_5_11_OUT, GP_5_10_IN, GP_5_10_OUT, GP_5_9_IN, GP_5_9_OUT, diff --git a/drivers/pinctrl/renesas/pfc-sh7757.c b/drivers/pinctrl/renesas/pfc-sh7757.c index 064e987b09cb..0d7857d7efef 100644 --- a/drivers/pinctrl/renesas/pfc-sh7757.c +++ b/drivers/pinctrl/renesas/pfc-sh7757.c @@ -10,7 +10,6 @@ * Copyright (C) 2008 Magnus Damm */ -#include <linux/init.h> #include <linux/kernel.h> #include <cpu/sh7757.h> @@ -1964,43 +1963,35 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { 0, 0, 0, 0, )) }, - { PINMUX_CFG_REG("PSEL1", 0xffec0072, 16, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("PSEL1", 0xffec0072, 16, + GROUP(-5, 1, 1, 1, -5, 1, -2), + GROUP( + /* RESERVED [5] */ PS1_10_FN1, PS1_10_FN2, PS1_9_FN1, PS1_9_FN2, PS1_8_FN1, PS1_8_FN2, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [5] */ PS1_2_FN1, PS1_2_FN2, - 0, 0, - 0, 0, )) + /* RESERVED [2] */ )) }, - { PINMUX_CFG_REG("PSEL2", 0xffec0074, 16, 1, GROUP( - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("PSEL2", 0xffec0074, 16, + GROUP(-2, 1, 1, -4, 1, 1, 1, 1, -1, 1, -2), + GROUP( + /* RESERVED [2] */ PS2_13_FN1, PS2_13_FN2, PS2_12_FN1, PS2_12_FN2, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [4] */ PS2_7_FN1, PS2_7_FN2, PS2_6_FN1, PS2_6_FN2, PS2_5_FN1, PS2_5_FN2, PS2_4_FN1, PS2_4_FN2, - 0, 0, + /* RESERVED [1] */ PS2_2_FN1, PS2_2_FN2, - 0, 0, - 0, 0, )) + /* RESERVED [2] */ )) }, - { PINMUX_CFG_REG("PSEL3", 0xffec0076, 16, 1, GROUP( + { PINMUX_CFG_REG_VAR("PSEL3", 0xffec0076, 16, + GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, -4, 1, 1, -1), + GROUP( PS3_15_FN1, PS3_15_FN2, PS3_14_FN1, PS3_14_FN2, PS3_13_FN1, PS3_13_FN2, @@ -2010,38 +2001,35 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PS3_9_FN1, PS3_9_FN2, PS3_8_FN1, PS3_8_FN2, PS3_7_FN1, PS3_7_FN2, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [4] */ PS3_2_FN1, PS3_2_FN2, PS3_1_FN1, PS3_1_FN2, - 0, 0, )) + /* RESERVED [1] */ )) }, - { PINMUX_CFG_REG("PSEL4", 0xffec0078, 16, 1, GROUP( - 0, 0, + { PINMUX_CFG_REG_VAR("PSEL4", 0xffec0078, 16, + GROUP(-1, 1, 1, 1, -1, 1, 1, 1, -3, 1, 1, 1, + 1, 1), + GROUP( + /* RESERVED [1] */ PS4_14_FN1, PS4_14_FN2, PS4_13_FN1, PS4_13_FN2, PS4_12_FN1, PS4_12_FN2, - 0, 0, + /* RESERVED [1] */ PS4_10_FN1, PS4_10_FN2, PS4_9_FN1, PS4_9_FN2, PS4_8_FN1, PS4_8_FN2, - 0, 0, - 0, 0, - 0, 0, + /* RESERVED [3] */ PS4_4_FN1, PS4_4_FN2, PS4_3_FN1, PS4_3_FN2, PS4_2_FN1, PS4_2_FN2, PS4_1_FN1, PS4_1_FN2, PS4_0_FN1, PS4_0_FN2, )) }, - { PINMUX_CFG_REG("PSEL5", 0xffec007a, 16, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("PSEL5", 0xffec007a, 16, + GROUP(-4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -2), + GROUP( + /* RESERVED [4] */ PS5_11_FN1, PS5_11_FN2, PS5_10_FN1, PS5_10_FN2, PS5_9_FN1, PS5_9_FN2, @@ -2052,8 +2040,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PS5_4_FN1, PS5_4_FN2, PS5_3_FN1, PS5_3_FN2, PS5_2_FN1, PS5_2_FN2, - 0, 0, - 0, 0, )) + /* RESERVED [2] */ )) }, { PINMUX_CFG_REG("PSEL6", 0xffec007c, 16, 1, GROUP( PS6_15_FN1, PS6_15_FN2, @@ -2073,7 +2060,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PS6_1_FN1, PS6_1_FN2, PS6_0_FN1, PS6_0_FN2, )) }, - { PINMUX_CFG_REG("PSEL7", 0xffec0082, 16, 1, GROUP( + { PINMUX_CFG_REG_VAR("PSEL7", 0xffec0082, 16, + GROUP(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -5), + GROUP( PS7_15_FN1, PS7_15_FN2, PS7_14_FN1, PS7_14_FN2, PS7_13_FN1, PS7_13_FN2, @@ -2085,13 +2074,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PS7_7_FN1, PS7_7_FN2, PS7_6_FN1, PS7_6_FN2, PS7_5_FN1, PS7_5_FN2, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, )) + /* RESERVED [5] */ )) }, - { PINMUX_CFG_REG("PSEL8", 0xffec0084, 16, 1, GROUP( + { PINMUX_CFG_REG_VAR("PSEL8", 0xffec0084, 16, + GROUP(1, 1, 1, 1, 1, 1, 1, 1, -8), + GROUP( PS8_15_FN1, PS8_15_FN2, PS8_14_FN1, PS8_14_FN2, PS8_13_FN1, PS8_13_FN2, @@ -2100,14 +2087,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PS8_10_FN1, PS8_10_FN2, PS8_9_FN1, PS8_9_FN2, PS8_8_FN1, PS8_8_FN2, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, )) + /* RESERVED [8] */ )) }, {} }; diff --git a/drivers/pinctrl/renesas/pfc-sh7785.c b/drivers/pinctrl/renesas/pfc-sh7785.c index c4c1e288c53e..126b663bb6eb 100644 --- a/drivers/pinctrl/renesas/pfc-sh7785.c +++ b/drivers/pinctrl/renesas/pfc-sh7785.c @@ -5,7 +5,6 @@ * Copyright (C) 2008 Magnus Damm */ -#include <linux/init.h> #include <linux/kernel.h> #include <cpu/sh7785.h> @@ -1025,9 +1024,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PD1_FN, PD1_OUT, PD1_IN, 0, PD0_FN, PD0_OUT, PD0_IN, 0 )) }, - { PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PECR", 0xffe70008, 16, + GROUP(-4, 2, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [4] */ PE5_FN, PE5_OUT, PE5_IN, 0, PE4_FN, PE4_OUT, PE4_IN, 0, PE3_FN, PE3_OUT, PE3_IN, 0, @@ -1095,13 +1095,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PL1_FN, PL1_OUT, PL1_IN, 0, PL0_FN, PL0_OUT, PL0_IN, 0 )) }, - { PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PMCR", 0xffe70016, 16, + GROUP(-12, 2, 2), + GROUP( + /* RESERVED [12] */ PM1_FN, PM1_OUT, PM1_IN, 0, PM0_FN, PM0_OUT, PM0_IN, 0 )) }, @@ -1115,9 +1112,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PN1_FN, PN1_OUT, PN1_IN, 0, PN0_FN, PN0_OUT, PN0_IN, 0 )) }, - { PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PPCR", 0xffe7001a, 16, + GROUP(-4, 2, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [4] */ PP5_FN, PP5_OUT, PP5_IN, 0, PP4_FN, PP4_OUT, PP4_IN, 0, PP3_FN, PP3_OUT, PP3_IN, 0, @@ -1125,21 +1123,20 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PP1_FN, PP1_OUT, PP1_IN, 0, PP0_FN, PP0_OUT, PP0_IN, 0 )) }, - { PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PQCR", 0xffe7001c, 16, + GROUP(-6, 2, 2, 2, 2, 2), + GROUP( + /* RESERVED [6] */ PQ4_FN, PQ4_OUT, PQ4_IN, 0, PQ3_FN, PQ3_OUT, PQ3_IN, 0, PQ2_FN, PQ2_OUT, PQ2_IN, 0, PQ1_FN, PQ1_OUT, PQ1_IN, 0, PQ0_FN, PQ0_OUT, PQ0_IN, 0 )) }, - { PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2, GROUP( - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, + { PINMUX_CFG_REG_VAR("PRCR", 0xffe7001e, 16, + GROUP(-8, 2, 2, 2, 2), + GROUP( + /* RESERVED [8] */ PR3_FN, PR3_OUT, PR3_IN, 0, PR2_FN, PR2_OUT, PR2_IN, 0, PR1_FN, PR1_OUT, PR1_IN, 0, @@ -1163,20 +1160,10 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { P1MSEL1_0, P1MSEL1_1, P1MSEL0_0, P1MSEL0_1 )) }, - { PINMUX_CFG_REG("P2MSELR", 0xffe70082, 16, 1, GROUP( - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, - 0, 0, + { PINMUX_CFG_REG_VAR("P2MSELR", 0xffe70082, 16, + GROUP(-13, 1, 1, 1), + GROUP( + /* RESERVED [13] */ P2MSEL2_0, P2MSEL2_1, P2MSEL1_0, P2MSEL1_1, P2MSEL0_0, P2MSEL0_1 )) diff --git a/drivers/pinctrl/renesas/pfc-sh7786.c b/drivers/pinctrl/renesas/pfc-sh7786.c index b8a098cd7721..f09f4a769010 100644 --- a/drivers/pinctrl/renesas/pfc-sh7786.c +++ b/drivers/pinctrl/renesas/pfc-sh7786.c @@ -10,7 +10,6 @@ * Copyright (C) 2008 Magnus Damm */ -#include <linux/init.h> #include <linux/kernel.h> #include <cpu/sh7786.h> @@ -667,15 +666,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PD1_FN, PD1_OUT, PD1_IN, 0, PD0_FN, PD0_OUT, PD0_IN, 0 )) }, - { PINMUX_CFG_REG("PECR", 0xffcc0008, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PECR", 0xffcc0008, 16, + GROUP(2, 2, -12), + GROUP( PE7_FN, PE7_OUT, PE7_IN, 0, PE6_FN, PE6_OUT, PE6_IN, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, )) + /* RESERVED [12] */ )) }, { PINMUX_CFG_REG("PFCR", 0xffcc000a, 16, 2, GROUP( PF7_FN, PF7_OUT, PF7_IN, 0, @@ -687,15 +683,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { PF1_FN, PF1_OUT, PF1_IN, 0, PF0_FN, PF0_OUT, PF0_IN, 0 )) }, - { PINMUX_CFG_REG("PGCR", 0xffcc000c, 16, 2, GROUP( + { PINMUX_CFG_REG_VAR("PGCR", 0xffcc000c, 16, + GROUP(2, 2, 2, -10), + GROUP( PG7_FN, PG7_OUT, PG7_IN, 0, PG6_FN, PG6_OUT, PG6_IN, 0, PG5_FN, PG5_OUT, PG5_IN, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, )) + /* RESERVED [10] */ )) }, { PINMUX_CFG_REG("PHCR", 0xffcc000e, 16, 2, GROUP( PH7_FN, PH7_OUT, PH7_IN, 0, diff --git a/drivers/pinctrl/renesas/pfc-shx3.c b/drivers/pinctrl/renesas/pfc-shx3.c index 22e812850964..96a65d83774f 100644 --- a/drivers/pinctrl/renesas/pfc-shx3.c +++ b/drivers/pinctrl/renesas/pfc-shx3.c @@ -4,7 +4,6 @@ * * Copyright (C) 2010 Paul Mundt */ -#include <linux/init.h> #include <linux/kernel.h> #include <cpu/shx3.h> diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index cb805502fb0f..a48cac55152c 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -996,83 +996,112 @@ static const u32 rzg2l_gpio_configs[] = { RZG2L_GPIO_PORT_PACK(5, 0x40, RZG2L_MPXED_PIN_FUNCS), }; -static struct rzg2l_dedicated_configs rzg2l_dedicated_pins[] = { - { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, - (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) }, - { "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0, - (PIN_CFG_SR | PIN_CFG_IOLH_A | PIN_CFG_IEN)) }, - { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0, - (PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) }, - { "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x4, 0, PIN_CFG_IEN) }, - { "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x4, 1, PIN_CFG_IEN) }, - { "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x6, 0, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x6, 1, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_RST#", RZG2L_SINGLE_PIN_PACK(0x6, 2, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_DATA0", RZG2L_SINGLE_PIN_PACK(0x7, 0, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_DATA1", RZG2L_SINGLE_PIN_PACK(0x7, 1, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_DATA2", RZG2L_SINGLE_PIN_PACK(0x7, 2, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_DATA3", RZG2L_SINGLE_PIN_PACK(0x7, 3, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_DATA4", RZG2L_SINGLE_PIN_PACK(0x7, 4, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_DATA5", RZG2L_SINGLE_PIN_PACK(0x7, 5, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_DATA6", RZG2L_SINGLE_PIN_PACK(0x7, 6, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, - { "SD0_DATA7", RZG2L_SINGLE_PIN_PACK(0x7, 7, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, - { "SD1_CLK", RZG2L_SINGLE_PIN_PACK(0x8, 0, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD1)) }, - { "SD1_CMD", RZG2L_SINGLE_PIN_PACK(0x8, 1, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, - { "SD1_DATA0", RZG2L_SINGLE_PIN_PACK(0x9, 0, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, - { "SD1_DATA1", RZG2L_SINGLE_PIN_PACK(0x9, 1, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, - { "SD1_DATA2", RZG2L_SINGLE_PIN_PACK(0x9, 2, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, - { "SD1_DATA3", RZG2L_SINGLE_PIN_PACK(0x9, 3, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, - { "QSPI0_SPCLK", RZG2L_SINGLE_PIN_PACK(0xa, 0, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0xa, 1, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0xa, 2, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0xa, 3, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0xa, 4, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI0_SSL", RZG2L_SINGLE_PIN_PACK(0xa, 5, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI1_SPCLK", RZG2L_SINGLE_PIN_PACK(0xb, 0, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI1_IO0", RZG2L_SINGLE_PIN_PACK(0xb, 1, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI1_IO1", RZG2L_SINGLE_PIN_PACK(0xb, 2, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI1_IO2", RZG2L_SINGLE_PIN_PACK(0xb, 3, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI1_IO3", RZG2L_SINGLE_PIN_PACK(0xb, 4, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI1_SSL", RZG2L_SINGLE_PIN_PACK(0xb, 5, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI_RESET#", RZG2L_SINGLE_PIN_PACK(0xc, 0, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI_WP#", RZG2L_SINGLE_PIN_PACK(0xc, 1, - (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "QSPI_INT#", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, - { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0xd, 0, (PIN_CFG_IOLH_A | PIN_CFG_SR)) }, - { "RIIC0_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 0, PIN_CFG_IEN) }, - { "RIIC0_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 1, PIN_CFG_IEN) }, - { "RIIC1_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 2, PIN_CFG_IEN) }, - { "RIIC1_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 3, PIN_CFG_IEN) }, +static const u32 r9a07g043_gpio_configs[] = { + RZG2L_GPIO_PORT_PACK(4, 0x10, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(5, 0x11, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)), + RZG2L_GPIO_PORT_PACK(4, 0x12, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)), + RZG2L_GPIO_PORT_PACK(4, 0x13, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)), + RZG2L_GPIO_PORT_PACK(6, 0x14, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)), + RZG2L_GPIO_PORT_PACK(5, 0x15, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(5, 0x16, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(5, 0x17, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)), + RZG2L_GPIO_PORT_PACK(5, 0x18, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)), + RZG2L_GPIO_PORT_PACK(4, 0x19, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)), + RZG2L_GPIO_PORT_PACK(5, 0x1a, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)), + RZG2L_GPIO_PORT_PACK(4, 0x1b, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(2, 0x1c, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(5, 0x1d, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(3, 0x1e, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(4, 0x1f, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(2, 0x20, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(4, 0x21, RZG2L_MPXED_PIN_FUNCS), + RZG2L_GPIO_PORT_PACK(6, 0x22, RZG2L_MPXED_PIN_FUNCS), +}; + +static struct { + struct rzg2l_dedicated_configs common[35]; + struct rzg2l_dedicated_configs rzg2l_pins[7]; +} rzg2l_dedicated_pins = { + .common = { + { "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0, + (PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) }, + { "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0, + (PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) }, + { "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0, + (PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) }, + { "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x4, 0, PIN_CFG_IEN) }, + { "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x4, 1, PIN_CFG_IEN) }, + { "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x6, 0, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x6, 1, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_RST#", RZG2L_SINGLE_PIN_PACK(0x6, 2, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_DATA0", RZG2L_SINGLE_PIN_PACK(0x7, 0, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_DATA1", RZG2L_SINGLE_PIN_PACK(0x7, 1, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_DATA2", RZG2L_SINGLE_PIN_PACK(0x7, 2, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_DATA3", RZG2L_SINGLE_PIN_PACK(0x7, 3, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_DATA4", RZG2L_SINGLE_PIN_PACK(0x7, 4, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_DATA5", RZG2L_SINGLE_PIN_PACK(0x7, 5, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_DATA6", RZG2L_SINGLE_PIN_PACK(0x7, 6, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, + { "SD0_DATA7", RZG2L_SINGLE_PIN_PACK(0x7, 7, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) }, + { "SD1_CLK", RZG2L_SINGLE_PIN_PACK(0x8, 0, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD1)) }, + { "SD1_CMD", RZG2L_SINGLE_PIN_PACK(0x8, 1, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, + { "SD1_DATA0", RZG2L_SINGLE_PIN_PACK(0x9, 0, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, + { "SD1_DATA1", RZG2L_SINGLE_PIN_PACK(0x9, 1, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, + { "SD1_DATA2", RZG2L_SINGLE_PIN_PACK(0x9, 2, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, + { "SD1_DATA3", RZG2L_SINGLE_PIN_PACK(0x9, 3, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) }, + { "QSPI0_SPCLK", RZG2L_SINGLE_PIN_PACK(0xa, 0, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0xa, 1, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0xa, 2, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0xa, 3, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0xa, 4, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI0_SSL", RZG2L_SINGLE_PIN_PACK(0xa, 5, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI_RESET#", RZG2L_SINGLE_PIN_PACK(0xc, 0, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI_WP#", RZG2L_SINGLE_PIN_PACK(0xc, 1, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0xd, 0, (PIN_CFG_IOLH_A | PIN_CFG_SR)) }, + { "RIIC0_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 0, PIN_CFG_IEN) }, + { "RIIC0_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 1, PIN_CFG_IEN) }, + { "RIIC1_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 2, PIN_CFG_IEN) }, + { "RIIC1_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 3, PIN_CFG_IEN) }, + }, + .rzg2l_pins = { + { "QSPI_INT#", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI1_SPCLK", RZG2L_SINGLE_PIN_PACK(0xb, 0, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI1_IO0", RZG2L_SINGLE_PIN_PACK(0xb, 1, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI1_IO1", RZG2L_SINGLE_PIN_PACK(0xb, 2, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI1_IO2", RZG2L_SINGLE_PIN_PACK(0xb, 3, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI1_IO3", RZG2L_SINGLE_PIN_PACK(0xb, 4, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + { "QSPI1_SSL", RZG2L_SINGLE_PIN_PACK(0xb, 5, + (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) }, + } }; static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) @@ -1250,16 +1279,29 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev) return 0; } +static struct rzg2l_pinctrl_data r9a07g043_data = { + .port_pins = rzg2l_gpio_names, + .port_pin_configs = r9a07g043_gpio_configs, + .dedicated_pins = rzg2l_dedicated_pins.common, + .n_port_pins = ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT, + .n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common), +}; + static struct rzg2l_pinctrl_data r9a07g044_data = { .port_pins = rzg2l_gpio_names, .port_pin_configs = rzg2l_gpio_configs, - .dedicated_pins = rzg2l_dedicated_pins, + .dedicated_pins = rzg2l_dedicated_pins.common, .n_port_pins = ARRAY_SIZE(rzg2l_gpio_names), - .n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins), + .n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common) + + ARRAY_SIZE(rzg2l_dedicated_pins.rzg2l_pins), }; static const struct of_device_id rzg2l_pinctrl_of_table[] = { { + .compatible = "renesas,r9a07g043-pinctrl", + .data = &r9a07g043_data, + }, + { .compatible = "renesas,r9a07g044-pinctrl", .data = &r9a07g044_data, }, diff --git a/drivers/pinctrl/renesas/pinctrl-rzn1.c b/drivers/pinctrl/renesas/pinctrl-rzn1.c index ef5fb25b6016..849d091205d4 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzn1.c +++ b/drivers/pinctrl/renesas/pinctrl-rzn1.c @@ -865,17 +865,15 @@ static int rzn1_pinctrl_probe(struct platform_device *pdev) ipctl->mdio_func[0] = -1; ipctl->mdio_func[1] = -1; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ipctl->lev1_protect_phys = (u32)res->start + 0x400; - ipctl->lev1 = devm_ioremap_resource(&pdev->dev, res); + ipctl->lev1 = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(ipctl->lev1)) return PTR_ERR(ipctl->lev1); + ipctl->lev1_protect_phys = (u32)res->start + 0x400; - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - ipctl->lev2_protect_phys = (u32)res->start + 0x400; - ipctl->lev2 = devm_ioremap_resource(&pdev->dev, res); + ipctl->lev2 = devm_platform_get_and_ioremap_resource(pdev, 1, &res); if (IS_ERR(ipctl->lev2)) return PTR_ERR(ipctl->lev2); + ipctl->lev2_protect_phys = (u32)res->start + 0x400; ipctl->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ipctl->clk)) diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c index 4c37aebc75b8..b438d24c13b5 100644 --- a/drivers/pinctrl/renesas/pinctrl.c +++ b/drivers/pinctrl/renesas/pinctrl.c @@ -9,7 +9,6 @@ #include <linux/device.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> diff --git a/drivers/pinctrl/renesas/sh_pfc.h b/drivers/pinctrl/renesas/sh_pfc.h index 6b5836ea47de..12bc279f5733 100644 --- a/drivers/pinctrl/renesas/sh_pfc.h +++ b/drivers/pinctrl/renesas/sh_pfc.h @@ -112,7 +112,7 @@ struct pinmux_cfg_reg { #define SET_NR_ENUM_IDS(n) #endif const u16 *enum_ids; - const u8 *var_field_width; + const s8 *var_field_width; }; #define GROUP(...) __VA_ARGS__ @@ -132,9 +132,8 @@ struct pinmux_cfg_reg { .reg = r, .reg_width = r_width, \ .field_width = f_width + BUILD_BUG_ON_ZERO(r_width % f_width) + \ BUILD_BUG_ON_ZERO(sizeof((const u16 []) { ids }) / sizeof(u16) != \ - (r_width / f_width) * (1 << f_width)), \ - .enum_ids = (const u16 [(r_width / f_width) * (1 << f_width)]) \ - { ids } + (r_width / f_width) << f_width), \ + .enum_ids = (const u16 [(r_width / f_width) << f_width]) { ids } /* * Describe a config register consisting of several fields of different widths @@ -143,14 +142,15 @@ struct pinmux_cfg_reg { * - r_width: Width of the register (in bits) * - f_widths: List of widths of the register fields (in bits), from left * to right (i.e. MSB to LSB), wrapped using the GROUP() macro. - * - ids: For each register field (from left to right, i.e. MSB to LSB), - * 2^f_widths[i] enum IDs must be specified, one for each possible - * combination of the register field bit values, all wrapped using - * the GROUP() macro. + * Reserved fields are indicated by negating the field width. + * - ids: For each non-reserved register field (from left to right, i.e. MSB + * to LSB), 2^f_widths[i] enum IDs must be specified, one for each + * possible combination of the register field bit values, all wrapped + * using the GROUP() macro. */ #define PINMUX_CFG_REG_VAR(name, r, r_width, f_widths, ids) \ .reg = r, .reg_width = r_width, \ - .var_field_width = (const u8 []) { f_widths, 0 }, \ + .var_field_width = (const s8 []) { f_widths, 0 }, \ SET_NR_ENUM_IDS(sizeof((const u16 []) { ids }) / sizeof(u16)) \ .enum_ids = (const u16 []) { ids } @@ -162,7 +162,7 @@ struct pinmux_drive_reg_field { struct pinmux_drive_reg { u32 reg; - const struct pinmux_drive_reg_field fields[8]; + const struct pinmux_drive_reg_field fields[10]; }; #define PINMUX_DRIVE_REG(name, r) \ @@ -739,14 +739,12 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info; * PORTnCR helper macro for SH-Mobile/R-Mobile */ #define PORTCR(nr, reg) { \ - PINMUX_CFG_REG_VAR("PORT" nr "CR", reg, 8, GROUP(2, 2, 1, 3), \ + PINMUX_CFG_REG_VAR("PORT" nr "CR", reg, 8, GROUP(-2, 2, -1, 3), \ GROUP( \ /* PULMD[1:0], handled by .set_bias() */ \ - 0, 0, 0, 0, \ /* IE and OE */ \ 0, PORT##nr##_OUT, PORT##nr##_IN, 0, \ /* SEC, not supported */ \ - 0, 0, \ /* PTMD[2:0] */ \ PORT##nr##_FN0, PORT##nr##_FN1, \ PORT##nr##_FN2, PORT##nr##_FN3, \ diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 7aecd0efde07..57a33fb0f2d7 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -44,6 +44,7 @@ #define STM32_GPIO_LCKR 0x1c #define STM32_GPIO_AFRL 0x20 #define STM32_GPIO_AFRH 0x24 +#define STM32_GPIO_SECCFGR 0x30 /* custom bitfield to backup pin status */ #define STM32_GPIO_BKP_MODE_SHIFT 0 @@ -95,6 +96,7 @@ struct stm32_gpio_bank { u32 bank_ioport_nr; u32 pin_backup[STM32_GPIO_PINS_PER_BANK]; u8 irq_type[STM32_GPIO_PINS_PER_BANK]; + bool secure_control; }; struct stm32_pinctrl { @@ -198,11 +200,7 @@ static inline void __stm32_gpio_set(struct stm32_gpio_bank *bank, if (!value) offset += STM32_GPIO_PINS_PER_BANK; - clk_enable(bank->clk); - writel_relaxed(BIT(offset), bank->base + STM32_GPIO_BSRR); - - clk_disable(bank->clk); } static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset) @@ -226,25 +224,11 @@ static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset) pinctrl_gpio_free(chip->base + offset); } -static int stm32_gpio_get_noclk(struct gpio_chip *chip, unsigned int offset) -{ - struct stm32_gpio_bank *bank = gpiochip_get_data(chip); - - return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset)); -} - static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset) { struct stm32_gpio_bank *bank = gpiochip_get_data(chip); - int ret; - - clk_enable(bank->clk); - - ret = stm32_gpio_get_noclk(chip, offset); - clk_disable(bank->clk); - - return ret; + return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset)); } static void stm32_gpio_set(struct gpio_chip *chip, unsigned offset, int value) @@ -302,6 +286,33 @@ static int stm32_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) return ret; } +static int stm32_gpio_init_valid_mask(struct gpio_chip *chip, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct stm32_gpio_bank *bank = gpiochip_get_data(chip); + struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); + unsigned int i; + u32 sec; + + /* All gpio are valid per default */ + bitmap_fill(valid_mask, ngpios); + + if (bank->secure_control) { + /* Tag secured pins as invalid */ + sec = readl_relaxed(bank->base + STM32_GPIO_SECCFGR); + + for (i = 0; i < ngpios; i++) { + if (sec & BIT(i)) { + clear_bit(i, valid_mask); + dev_dbg(pctl->dev, "No access to gpio %d - %d\n", bank->bank_nr, i); + } + } + } + + return 0; +} + static const struct gpio_chip stm32_gpio_template = { .request = stm32_gpio_request, .free = stm32_gpio_free, @@ -312,6 +323,7 @@ static const struct gpio_chip stm32_gpio_template = { .to_irq = stm32_gpio_to_irq, .get_direction = stm32_gpio_get_direction, .set_config = gpiochip_generic_config, + .init_valid_mask = stm32_gpio_init_valid_mask, }; static void stm32_gpio_irq_trigger(struct irq_data *d) @@ -324,7 +336,7 @@ static void stm32_gpio_irq_trigger(struct irq_data *d) return; /* If level interrupt type then retrig */ - level = stm32_gpio_get_noclk(&bank->gpio_chip, d->hwirq); + level = stm32_gpio_get(&bank->gpio_chip, d->hwirq); if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) || (level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH)) irq_chip_retrigger_hierarchy(d); @@ -366,7 +378,6 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data) { struct stm32_gpio_bank *bank = irq_data->domain->host_data; struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); - unsigned long flags; int ret; ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq); @@ -380,10 +391,6 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data) return ret; } - flags = irqd_get_trigger_type(irq_data); - if (flags & IRQ_TYPE_LEVEL_MASK) - clk_enable(bank->clk); - return 0; } @@ -391,9 +398,6 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data) { struct stm32_gpio_bank *bank = irq_data->domain->host_data; - if (bank->irq_type[irq_data->hwirq] & IRQ_TYPE_LEVEL_MASK) - clk_disable(bank->clk); - gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq); } @@ -534,7 +538,7 @@ stm32_pctrl_find_group_by_pin(struct stm32_pinctrl *pctl, u32 pin) static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl, u32 pin_num, u32 fnum) { - int i; + int i, k; for (i = 0; i < pctl->npins; i++) { const struct stm32_desc_pin *pin = pctl->pins + i; @@ -543,7 +547,7 @@ static bool stm32_pctrl_is_function_valid(struct stm32_pinctrl *pctl, if (pin->pin.number != pin_num) continue; - while (func && func->name) { + for (k = 0; k < STM32_CONFIG_NUM; k++) { if (func->num == fnum) return true; func++; @@ -770,7 +774,6 @@ static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank, unsigned long flags; int err = 0; - clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); if (pctl->hwlock) { @@ -799,7 +802,6 @@ static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank, unlock: spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); return err; } @@ -812,7 +814,6 @@ void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode, int alt_offset = STM32_GPIO_AFRL + (pin / 8) * 4; unsigned long flags; - clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); val = readl_relaxed(bank->base + alt_offset); @@ -824,7 +825,6 @@ void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode, *mode = val >> (pin * 2); spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); } static int stm32_pmx_set_mux(struct pinctrl_dev *pctldev, @@ -868,12 +868,32 @@ static int stm32_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, return stm32_pmx_set_mode(bank, pin, !input, 0); } +static int stm32_pmx_request(struct pinctrl_dev *pctldev, unsigned int gpio) +{ + struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct pinctrl_gpio_range *range; + + range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, gpio); + if (!range) { + dev_err(pctl->dev, "No gpio range defined.\n"); + return -EINVAL; + } + + if (!gpiochip_line_is_valid(range->gc, stm32_gpio_pin(gpio))) { + dev_warn(pctl->dev, "Can't access gpio %d\n", gpio); + return -EACCES; + } + + return 0; +} + static const struct pinmux_ops stm32_pmx_ops = { .get_functions_count = stm32_pmx_get_funcs_cnt, .get_function_name = stm32_pmx_get_func_name, .get_function_groups = stm32_pmx_get_func_groups, .set_mux = stm32_pmx_set_mux, .gpio_set_direction = stm32_pmx_gpio_set_direction, + .request = stm32_pmx_request, .strict = true, }; @@ -887,7 +907,6 @@ static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank, u32 val; int err = 0; - clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); if (pctl->hwlock) { @@ -911,7 +930,6 @@ static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank, unlock: spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); return err; } @@ -922,14 +940,12 @@ static u32 stm32_pconf_get_driving(struct stm32_gpio_bank *bank, unsigned long flags; u32 val; - clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); val = readl_relaxed(bank->base + STM32_GPIO_TYPER); val &= BIT(offset); spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); return (val >> offset); } @@ -942,7 +958,6 @@ static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank, u32 val; int err = 0; - clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); if (pctl->hwlock) { @@ -966,7 +981,6 @@ static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank, unlock: spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); return err; } @@ -977,14 +991,12 @@ static u32 stm32_pconf_get_speed(struct stm32_gpio_bank *bank, unsigned long flags; u32 val; - clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); val = readl_relaxed(bank->base + STM32_GPIO_SPEEDR); val &= GENMASK(offset * 2 + 1, offset * 2); spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); return (val >> (offset * 2)); } @@ -997,7 +1009,6 @@ static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank, u32 val; int err = 0; - clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); if (pctl->hwlock) { @@ -1021,7 +1032,6 @@ static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank, unlock: spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); return err; } @@ -1032,14 +1042,12 @@ static u32 stm32_pconf_get_bias(struct stm32_gpio_bank *bank, unsigned long flags; u32 val; - clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); val = readl_relaxed(bank->base + STM32_GPIO_PUPDR); val &= GENMASK(offset * 2 + 1, offset * 2); spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); return (val >> (offset * 2)); } @@ -1050,7 +1058,6 @@ static bool stm32_pconf_get(struct stm32_gpio_bank *bank, unsigned long flags; u32 val; - clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); if (dir) @@ -1061,7 +1068,6 @@ static bool stm32_pconf_get(struct stm32_gpio_bank *bank, BIT(offset)); spin_unlock_irqrestore(&bank->lock, flags); - clk_disable(bank->clk); return val; } @@ -1084,6 +1090,11 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev, bank = gpiochip_get_data(range->gc); offset = stm32_gpio_pin(pin); + if (!gpiochip_line_is_valid(range->gc, offset)) { + dev_warn(pctl->dev, "Can't access gpio %d\n", pin); + return -EACCES; + } + switch (param) { case PIN_CONFIG_DRIVE_PUSH_PULL: ret = stm32_pconf_set_driving(bank, offset, 0); @@ -1163,10 +1174,27 @@ static int stm32_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin, return 0; } +static struct stm32_desc_pin * +stm32_pconf_get_pin_desc_by_pin_number(struct stm32_pinctrl *pctl, + unsigned int pin_number) +{ + struct stm32_desc_pin *pins = pctl->pins; + int i; + + for (i = 0; i < pctl->npins; i++) { + if (pins->pin.number == pin_number) + return pins; + pins++; + } + return NULL; +} + static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned int pin) { + struct stm32_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + const struct stm32_desc_pin *pin_desc; struct pinctrl_gpio_range *range; struct stm32_gpio_bank *bank; int offset; @@ -1186,6 +1214,11 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev, bank = gpiochip_get_data(range->gc); offset = stm32_gpio_pin(pin); + if (!gpiochip_line_is_valid(range->gc, offset)) { + seq_puts(s, "NO ACCESS"); + return; + } + stm32_pmx_get_mode(bank, offset, &mode, &alt); bias = stm32_pconf_get_bias(bank, offset); @@ -1216,7 +1249,12 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev, case 2: drive = stm32_pconf_get_driving(bank, offset); speed = stm32_pconf_get_speed(bank, offset); - seq_printf(s, "%d - %s - %s - %s %s", alt, + pin_desc = stm32_pconf_get_pin_desc_by_pin_number(pctl, pin); + if (!pin_desc) + return; + + seq_printf(s, "%d (%s) - %s - %s - %s %s", alt, + pin_desc->functions[alt + 1].name, drive ? "open drain" : "push pull", biasing[bias], speeds[speed], "speed"); @@ -1256,9 +1294,9 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode if (IS_ERR(bank->base)) return PTR_ERR(bank->base); - err = clk_prepare(bank->clk); + err = clk_prepare_enable(bank->clk); if (err) { - dev_err(dev, "failed to prepare clk (%d)\n", err); + dev_err(dev, "failed to prepare_enable clk (%d)\n", err); return err; } @@ -1297,6 +1335,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode bank->gpio_chip.parent = dev; bank->bank_nr = bank_nr; bank->bank_ioport_nr = bank_ioport_nr; + bank->secure_control = pctl->match_data->secure_control; spin_lock_init(&bank->lock); /* create irq hierarchical domain */ @@ -1306,21 +1345,28 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode STM32_GPIO_IRQ_LINE, bank->fwnode, &stm32_gpio_domain_ops, bank); - if (!bank->domain) - return -ENODEV; + if (!bank->domain) { + err = -ENODEV; + goto err_clk; + } err = gpiochip_add_data(&bank->gpio_chip, bank); if (err) { dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr); - return err; + goto err_clk; } dev_info(dev, "%s bank added\n", bank->gpio_chip.label); return 0; + +err_clk: + clk_disable_unprepare(bank->clk); + return err; } -static struct irq_domain *stm32_pctrl_get_irq_domain(struct device_node *np) +static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; struct device_node *parent; struct irq_domain *domain; @@ -1424,7 +1470,8 @@ static int stm32_pctrl_create_pins_tab(struct stm32_pinctrl *pctl, if (pctl->pkg && !(pctl->pkg & p->pkg)) continue; pins->pin = p->pin; - pins->functions = p->functions; + memcpy((struct stm32_desc_pin *)pins->functions, p->functions, + STM32_CONFIG_NUM * sizeof(struct stm32_desc_function)); pins++; nb_pins_available++; } @@ -1436,23 +1483,19 @@ static int stm32_pctrl_create_pins_tab(struct stm32_pinctrl *pctl, int stm32_pctl_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; + const struct stm32_pinctrl_match_data *match_data; struct fwnode_handle *child; - const struct of_device_id *match; struct device *dev = &pdev->dev; struct stm32_pinctrl *pctl; struct pinctrl_pin_desc *pins; int i, ret, hwlock_id; unsigned int banks; - if (!np) + match_data = device_get_match_data(dev); + if (!match_data) return -EINVAL; - match = of_match_device(dev->driver->of_match_table, dev); - if (!match || !match->data) - return -EINVAL; - - if (!of_find_property(np, "pins-are-numbered", NULL)) { + if (!device_property_present(dev, "pins-are-numbered")) { dev_err(dev, "only support pins-are-numbered format\n"); return -EINVAL; } @@ -1464,7 +1507,7 @@ int stm32_pctl_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pctl); /* check for IRQ controller (may require deferred probe) */ - pctl->domain = stm32_pctrl_get_irq_domain(np); + pctl->domain = stm32_pctrl_get_irq_domain(pdev); if (IS_ERR(pctl->domain)) return PTR_ERR(pctl->domain); @@ -1480,10 +1523,10 @@ int stm32_pctl_probe(struct platform_device *pdev) spin_lock_init(&pctl->irqmux_lock); pctl->dev = dev; - pctl->match_data = match->data; + pctl->match_data = match_data; /* get optional package information */ - if (!of_property_read_u32(np, "st,package", &pctl->pkg)) + if (!device_property_read_u32(dev, "st,package", &pctl->pkg)) dev_dbg(pctl->dev, "package detected: %x\n", pctl->pkg); pctl->pins = devm_kcalloc(pctl->dev, pctl->match_data->npins, @@ -1568,6 +1611,10 @@ int stm32_pctl_probe(struct platform_device *pdev) ret = stm32_gpiolib_register_bank(pctl, child); if (ret) { fwnode_handle_put(child); + + for (i = 0; i < pctl->nbanks; i++) + clk_disable_unprepare(pctl->banks[i].clk); + return ret; } @@ -1593,6 +1640,9 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs( if (!range) return 0; + if (!gpiochip_line_is_valid(range->gc, offset)) + return 0; + pin_is_irq = gpiochip_line_is_irq(range->gc, offset); if (!desc || (!pin_is_irq && !desc->gpio_owner)) @@ -1639,12 +1689,26 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs( return 0; } +int __maybe_unused stm32_pinctrl_suspend(struct device *dev) +{ + struct stm32_pinctrl *pctl = dev_get_drvdata(dev); + int i; + + for (i = 0; i < pctl->nbanks; i++) + clk_disable(pctl->banks[i].clk); + + return 0; +} + int __maybe_unused stm32_pinctrl_resume(struct device *dev) { struct stm32_pinctrl *pctl = dev_get_drvdata(dev); struct stm32_pinctrl_group *g = pctl->groups; int i; + for (i = 0; i < pctl->nbanks; i++) + clk_enable(pctl->banks[i].clk); + for (i = 0; i < pctl->ngroups; i++, g++) stm32_pinctrl_restore_gpio_regs(pctl, g->pin); diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.h b/drivers/pinctrl/stm32/pinctrl-stm32.h index b0882d120765..e0c31c4c8bca 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.h +++ b/drivers/pinctrl/stm32/pinctrl-stm32.h @@ -17,6 +17,7 @@ #define STM32_PIN_GPIO 0 #define STM32_PIN_AF(x) ((x) + 1) #define STM32_PIN_ANALOG (STM32_PIN_AF(15) + 1) +#define STM32_CONFIG_NUM (STM32_PIN_ANALOG + 1) /* package information */ #define STM32MP_PKG_AA BIT(0) @@ -31,26 +32,26 @@ struct stm32_desc_function { struct stm32_desc_pin { struct pinctrl_pin_desc pin; - const struct stm32_desc_function *functions; + const struct stm32_desc_function functions[STM32_CONFIG_NUM]; const unsigned int pkg; }; #define STM32_PIN(_pin, ...) \ { \ .pin = _pin, \ - .functions = (struct stm32_desc_function[]){ \ - __VA_ARGS__, { } }, \ + .functions = { \ + __VA_ARGS__}, \ } #define STM32_PIN_PKG(_pin, _pkg, ...) \ { \ .pin = _pin, \ .pkg = _pkg, \ - .functions = (struct stm32_desc_function[]){ \ - __VA_ARGS__, { } }, \ + .functions = { \ + __VA_ARGS__}, \ } #define STM32_FUNCTION(_num, _name) \ - { \ + [_num] = { \ .num = _num, \ .name = _name, \ } @@ -58,6 +59,7 @@ struct stm32_desc_pin { struct stm32_pinctrl_match_data { const struct stm32_desc_pin *pins; const unsigned int npins; + bool secure_control; }; struct stm32_gpio_bank; @@ -65,6 +67,7 @@ struct stm32_gpio_bank; int stm32_pctl_probe(struct platform_device *pdev); void stm32_pmx_get_mode(struct stm32_gpio_bank *bank, int pin, u32 *mode, u32 *alt); +int stm32_pinctrl_suspend(struct device *dev); int stm32_pinctrl_resume(struct device *dev); #endif /* __PINCTRL_STM32_H */ diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp135.c b/drivers/pinctrl/stm32/pinctrl-stm32mp135.c index 4ab03520c407..fde1df191c24 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32mp135.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32mp135.c @@ -1649,6 +1649,7 @@ static const struct stm32_desc_pin stm32mp135_pins[] = { static struct stm32_pinctrl_match_data stm32mp135_match_data = { .pins = stm32mp135_pins, .npins = ARRAY_SIZE(stm32mp135_pins), + .secure_control = true, }; static const struct of_device_id stm32mp135_pctrl_match[] = { @@ -1660,7 +1661,7 @@ static const struct of_device_id stm32mp135_pctrl_match[] = { }; static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume) + SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume) }; static struct platform_driver stm32mp135_pinctrl_driver = { diff --git a/drivers/pinctrl/stm32/pinctrl-stm32mp157.c b/drivers/pinctrl/stm32/pinctrl-stm32mp157.c index 2ccb99d64df8..91b2fc8ddbdb 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32mp157.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32mp157.c @@ -2343,7 +2343,7 @@ static const struct of_device_id stm32mp157_pctrl_match[] = { }; static const struct dev_pm_ops stm32_pinctrl_dev_pm_ops = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, stm32_pinctrl_resume) + SET_LATE_SYSTEM_SLEEP_PM_OPS(stm32_pinctrl_suspend, stm32_pinctrl_resume) }; static struct platform_driver stm32mp157_pinctrl_driver = { diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c index 5c1dfcb46749..f6c5d5e6dbb6 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra194.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c @@ -1110,24 +1110,15 @@ static const unsigned int sdmmc4_dat0_pins[] = { static const unsigned int sdmmc1_comp_pins[] = { TEGRA_PIN_SDMMC1_COMP, }; -static const unsigned int sdmmc1_hv_trim_pins[] = { - TEGRA_PIN_SDMMC1_HV_TRIM, -}; static const unsigned int sdmmc3_comp_pins[] = { TEGRA_PIN_SDMMC3_COMP, }; -static const unsigned int sdmmc3_hv_trim_pins[] = { - TEGRA_PIN_SDMMC3_HV_TRIM, -}; static const unsigned int eqos_comp_pins[] = { TEGRA_PIN_EQOS_COMP, }; static const unsigned int qspi_comp_pins[] = { TEGRA_PIN_QSPI_COMP, }; -static const unsigned int sys_reset_n_pins[] = { - TEGRA_PIN_SYS_RESET_N, -}; static const unsigned int shutdown_n_pins[] = { TEGRA_PIN_SHUTDOWN_N, }; diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c index 386389ffec41..d8c5f9195f85 100644 --- a/drivers/platform/mips/cpu_hwmon.c +++ b/drivers/platform/mips/cpu_hwmon.c @@ -55,55 +55,6 @@ out: static int nr_packages; static struct device *cpu_hwmon_dev; -static SENSOR_DEVICE_ATTR(name, 0444, NULL, NULL, 0); - -static struct attribute *cpu_hwmon_attributes[] = { - &sensor_dev_attr_name.dev_attr.attr, - NULL -}; - -/* Hwmon device attribute group */ -static struct attribute_group cpu_hwmon_attribute_group = { - .attrs = cpu_hwmon_attributes, -}; - -static ssize_t get_cpu_temp(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t cpu_temp_label(struct device *dev, - struct device_attribute *attr, char *buf); - -static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1); -static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1); -static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2); -static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2); -static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3); -static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3); -static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4); -static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4); - -static const struct attribute *hwmon_cputemp[4][3] = { - { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - NULL - }, - { - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - NULL - }, - { - &sensor_dev_attr_temp3_input.dev_attr.attr, - &sensor_dev_attr_temp3_label.dev_attr.attr, - NULL - }, - { - &sensor_dev_attr_temp4_input.dev_attr.attr, - &sensor_dev_attr_temp4_label.dev_attr.attr, - NULL - } -}; - static ssize_t cpu_temp_label(struct device *dev, struct device_attribute *attr, char *buf) { @@ -121,24 +72,47 @@ static ssize_t get_cpu_temp(struct device *dev, return sprintf(buf, "%d\n", value); } -static int create_sysfs_cputemp_files(struct kobject *kobj) -{ - int i, ret = 0; - - for (i = 0; i < nr_packages; i++) - ret = sysfs_create_files(kobj, hwmon_cputemp[i]); +static SENSOR_DEVICE_ATTR(temp1_input, 0444, get_cpu_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_label, 0444, cpu_temp_label, NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_input, 0444, get_cpu_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(temp2_label, 0444, cpu_temp_label, NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_input, 0444, get_cpu_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(temp3_label, 0444, cpu_temp_label, NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_input, 0444, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp4_label, 0444, cpu_temp_label, NULL, 4); - return ret; -} +static struct attribute *cpu_hwmon_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_label.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_label.dev_attr.attr, + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp4_label.dev_attr.attr, + NULL +}; -static void remove_sysfs_cputemp_files(struct kobject *kobj) +static umode_t cpu_hwmon_is_visible(struct kobject *kobj, + struct attribute *attr, int i) { - int i; + int id = i / 2; - for (i = 0; i < nr_packages; i++) - sysfs_remove_files(kobj, hwmon_cputemp[i]); + if (id < nr_packages) + return attr->mode; + return 0; } +static struct attribute_group cpu_hwmon_group = { + .attrs = cpu_hwmon_attributes, + .is_visible = cpu_hwmon_is_visible, +}; + +static const struct attribute_group *cpu_hwmon_groups[] = { + &cpu_hwmon_group, + NULL +}; + #define CPU_THERMAL_THRESHOLD 90000 static struct delayed_work thermal_work; @@ -159,50 +133,31 @@ static void do_thermal_timer(struct work_struct *work) static int __init loongson_hwmon_init(void) { - int ret; - pr_info("Loongson Hwmon Enter...\n"); if (cpu_has_csr()) csr_temp_enable = csr_readl(LOONGSON_CSR_FEATURES) & LOONGSON_CSRF_TEMP; - cpu_hwmon_dev = hwmon_device_register_with_info(NULL, "cpu_hwmon", NULL, NULL, NULL); - if (IS_ERR(cpu_hwmon_dev)) { - ret = PTR_ERR(cpu_hwmon_dev); - pr_err("hwmon_device_register fail!\n"); - goto fail_hwmon_device_register; - } - nr_packages = loongson_sysconf.nr_cpus / loongson_sysconf.cores_per_package; - ret = create_sysfs_cputemp_files(&cpu_hwmon_dev->kobj); - if (ret) { - pr_err("fail to create cpu temperature interface!\n"); - goto fail_create_sysfs_cputemp_files; + cpu_hwmon_dev = hwmon_device_register_with_groups(NULL, "cpu_hwmon", + NULL, cpu_hwmon_groups); + if (IS_ERR(cpu_hwmon_dev)) { + pr_err("hwmon_device_register fail!\n"); + return PTR_ERR(cpu_hwmon_dev); } INIT_DEFERRABLE_WORK(&thermal_work, do_thermal_timer); schedule_delayed_work(&thermal_work, msecs_to_jiffies(20000)); - return ret; - -fail_create_sysfs_cputemp_files: - sysfs_remove_group(&cpu_hwmon_dev->kobj, - &cpu_hwmon_attribute_group); - hwmon_device_unregister(cpu_hwmon_dev); - -fail_hwmon_device_register: - return ret; + return 0; } static void __exit loongson_hwmon_exit(void) { cancel_delayed_work_sync(&thermal_work); - remove_sysfs_cputemp_files(&cpu_hwmon_dev->kobj); - sysfs_remove_group(&cpu_hwmon_dev->kobj, - &cpu_hwmon_attribute_group); hwmon_device_unregister(cpu_hwmon_dev); } diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 97ac588a9e9c..ec8a404d71b4 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -3037,13 +3037,6 @@ static int ab8500_fg_bind(struct device *dev, struct device *master, { struct ab8500_fg *di = dev_get_drvdata(dev); - /* Create a work queue for running the FG algorithm */ - di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM); - if (di->fg_wq == NULL) { - dev_err(dev, "failed to create work queue\n"); - return -ENOMEM; - } - di->bat_cap.max_mah_design = di->bm->bi->charge_full_design_uah; di->bat_cap.max_mah = di->bat_cap.max_mah_design; di->vbat_nom_uv = di->bm->bi->voltage_max_design_uv; @@ -3067,8 +3060,7 @@ static void ab8500_fg_unbind(struct device *dev, struct device *master, if (ret) dev_err(dev, "failed to disable coulomb counter\n"); - destroy_workqueue(di->fg_wq); - flush_scheduled_work(); + flush_workqueue(di->fg_wq); } static const struct component_ops ab8500_fg_component_ops = { @@ -3117,6 +3109,13 @@ static int ab8500_fg_probe(struct platform_device *pdev) ab8500_fg_charge_state_to(di, AB8500_FG_CHARGE_INIT); ab8500_fg_discharge_state_to(di, AB8500_FG_DISCHARGE_INIT); + /* Create a work queue for running the FG algorithm */ + di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM); + if (di->fg_wq == NULL) { + dev_err(dev, "failed to create work queue\n"); + return -ENOMEM; + } + /* Init work for running the fg algorithm instantly */ INIT_WORK(&di->fg_work, ab8500_fg_instant_work); @@ -3227,6 +3226,8 @@ static int ab8500_fg_remove(struct platform_device *pdev) { struct ab8500_fg *di = platform_get_drvdata(pdev); + destroy_workqueue(di->fg_wq); + flush_scheduled_work(); component_del(&pdev->dev, &ab8500_fg_component_ops); list_del(&di->node); ab8500_fg_sysfs_exit(di); diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index e9f285dae489..8e6f8a655079 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -90,6 +90,8 @@ #define AXP288_REG_UPDATE_INTERVAL (60 * HZ) #define AXP288_FG_INTR_NUM 6 +#define AXP288_QUIRK_NO_BATTERY BIT(0) + static bool no_current_sense_res; module_param(no_current_sense_res, bool, 0444); MODULE_PARM_DESC(no_current_sense_res, "No (or broken) current sense resistor"); @@ -524,7 +526,7 @@ static struct power_supply_desc fuel_gauge_desc = { * detection reports one despite it not being there. * Please keep this listed sorted alphabetically. */ -static const struct dmi_system_id axp288_no_battery_list[] = { +static const struct dmi_system_id axp288_quirks[] = { { /* ACEPC T8 Cherry Trail Z8350 mini PC */ .matches = { @@ -534,6 +536,7 @@ static const struct dmi_system_id axp288_no_battery_list[] = { /* also match on somewhat unique bios-version */ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"), }, + .driver_data = (void *)AXP288_QUIRK_NO_BATTERY, }, { /* ACEPC T11 Cherry Trail Z8350 mini PC */ @@ -544,6 +547,7 @@ static const struct dmi_system_id axp288_no_battery_list[] = { /* also match on somewhat unique bios-version */ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"), }, + .driver_data = (void *)AXP288_QUIRK_NO_BATTERY, }, { /* Intel Cherry Trail Compute Stick, Windows version */ @@ -551,6 +555,7 @@ static const struct dmi_system_id axp288_no_battery_list[] = { DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"), }, + .driver_data = (void *)AXP288_QUIRK_NO_BATTERY, }, { /* Intel Cherry Trail Compute Stick, version without an OS */ @@ -558,34 +563,54 @@ static const struct dmi_system_id axp288_no_battery_list[] = { DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"), }, + .driver_data = (void *)AXP288_QUIRK_NO_BATTERY, }, { /* Meegopad T02 */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "MEEGOPAD T02"), }, + .driver_data = (void *)AXP288_QUIRK_NO_BATTERY, }, { /* Mele PCG03 Mini PC */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Mini PC"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "Mini PC"), }, + .driver_data = (void *)AXP288_QUIRK_NO_BATTERY, }, { /* Minix Neo Z83-4 mini PC */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MINIX"), DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"), - } + }, + .driver_data = (void *)AXP288_QUIRK_NO_BATTERY, }, { - /* Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks */ + /* + * One Mix 1, this uses the "T3 MRD" boardname used by + * generic mini PCs, but it is a mini laptop so it does + * actually have a battery! + */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"), + DMI_MATCH(DMI_BIOS_DATE, "06/14/2018"), + }, + .driver_data = NULL, + }, + { + /* + * Various Ace PC/Meegopad/MinisForum/Wintel Mini-PCs/HDMI-sticks + * This entry must be last because it is generic, this allows + * adding more specifuc quirks overriding this generic entry. + */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"), DMI_MATCH(DMI_CHASSIS_TYPE, "3"), DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), - DMI_MATCH(DMI_BIOS_VERSION, "5.11"), }, + .driver_data = (void *)AXP288_QUIRK_NO_BATTERY, }, {} }; @@ -665,7 +690,9 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev) [BAT_D_CURR] = "axp288-chrg-d-curr", [BAT_VOLT] = "axp288-batt-volt", }; + const struct dmi_system_id *dmi_id; struct device *dev = &pdev->dev; + unsigned long quirks = 0; int i, pirq, ret; /* @@ -675,7 +702,11 @@ static int axp288_fuel_gauge_probe(struct platform_device *pdev) if (!acpi_quirk_skip_acpi_ac_and_battery()) return -ENODEV; - if (dmi_check_system(axp288_no_battery_list)) + dmi_id = dmi_first_match(axp288_quirks); + if (dmi_id) + quirks = (unsigned long)dmi_id->driver_data; + + if (quirks & AXP288_QUIRK_NO_BATTERY) return -ENODEV; info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index aa1a589eb9f2..27f5c7648617 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -455,11 +455,9 @@ static ssize_t bq24190_sysfs_show(struct device *dev, if (!info) return -EINVAL; - ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) { - pm_runtime_put_noidle(bdi->dev); + ret = pm_runtime_resume_and_get(bdi->dev); + if (ret < 0) return ret; - } ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v); if (ret) @@ -490,11 +488,9 @@ static ssize_t bq24190_sysfs_store(struct device *dev, if (ret < 0) return ret; - ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) { - pm_runtime_put_noidle(bdi->dev); + ret = pm_runtime_resume_and_get(bdi->dev); + if (ret < 0) return ret; - } ret = bq24190_write_mask(bdi, info->reg, info->mask, info->shift, v); if (ret) @@ -512,10 +508,9 @@ static int bq24190_set_otg_vbus(struct bq24190_dev_info *bdi, bool enable) union power_supply_propval val = { .intval = bdi->charge_type }; int ret; - ret = pm_runtime_get_sync(bdi->dev); + ret = pm_runtime_resume_and_get(bdi->dev); if (ret < 0) { dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret); - pm_runtime_put_noidle(bdi->dev); return ret; } @@ -551,10 +546,9 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev) int ret; u8 val; - ret = pm_runtime_get_sync(bdi->dev); + ret = pm_runtime_resume_and_get(bdi->dev); if (ret < 0) { dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret); - pm_runtime_put_noidle(bdi->dev); return ret; } @@ -1128,11 +1122,9 @@ static int bq24190_charger_get_property(struct power_supply *psy, dev_dbg(bdi->dev, "prop: %d\n", psp); - ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) { - pm_runtime_put_noidle(bdi->dev); + ret = pm_runtime_resume_and_get(bdi->dev); + if (ret < 0) return ret; - } switch (psp) { case POWER_SUPPLY_PROP_CHARGE_TYPE: @@ -1204,11 +1196,9 @@ static int bq24190_charger_set_property(struct power_supply *psy, dev_dbg(bdi->dev, "prop: %d\n", psp); - ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) { - pm_runtime_put_noidle(bdi->dev); + ret = pm_runtime_resume_and_get(bdi->dev); + if (ret < 0) return ret; - } switch (psp) { case POWER_SUPPLY_PROP_ONLINE: @@ -1477,11 +1467,9 @@ static int bq24190_battery_get_property(struct power_supply *psy, dev_warn(bdi->dev, "warning: /sys/class/power_supply/bq24190-battery is deprecated\n"); dev_dbg(bdi->dev, "prop: %d\n", psp); - ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) { - pm_runtime_put_noidle(bdi->dev); + ret = pm_runtime_resume_and_get(bdi->dev); + if (ret < 0) return ret; - } switch (psp) { case POWER_SUPPLY_PROP_STATUS: @@ -1525,11 +1513,9 @@ static int bq24190_battery_set_property(struct power_supply *psy, dev_warn(bdi->dev, "warning: /sys/class/power_supply/bq24190-battery is deprecated\n"); dev_dbg(bdi->dev, "prop: %d\n", psp); - ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) { - pm_runtime_put_noidle(bdi->dev); + ret = pm_runtime_resume_and_get(bdi->dev); + if (ret < 0) return ret; - } switch (psp) { case POWER_SUPPLY_PROP_ONLINE: @@ -1683,10 +1669,9 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data) int error; bdi->irq_event = true; - error = pm_runtime_get_sync(bdi->dev); + error = pm_runtime_resume_and_get(bdi->dev); if (error < 0) { dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error); - pm_runtime_put_noidle(bdi->dev); return IRQ_NONE; } bq24190_check_status(bdi); @@ -1921,11 +1906,9 @@ static int bq24190_remove(struct i2c_client *client) struct bq24190_dev_info *bdi = i2c_get_clientdata(client); int error; - error = pm_runtime_get_sync(bdi->dev); - if (error < 0) { + error = pm_runtime_resume_and_get(bdi->dev); + if (error < 0) dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error); - pm_runtime_put_noidle(bdi->dev); - } bq24190_register_reset(bdi); if (bdi->battery) @@ -1982,11 +1965,9 @@ static __maybe_unused int bq24190_pm_suspend(struct device *dev) struct bq24190_dev_info *bdi = i2c_get_clientdata(client); int error; - error = pm_runtime_get_sync(bdi->dev); - if (error < 0) { + error = pm_runtime_resume_and_get(bdi->dev); + if (error < 0) dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error); - pm_runtime_put_noidle(bdi->dev); - } bq24190_register_reset(bdi); @@ -2007,11 +1988,9 @@ static __maybe_unused int bq24190_pm_resume(struct device *dev) bdi->f_reg = 0; bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */ - error = pm_runtime_get_sync(bdi->dev); - if (error < 0) { + error = pm_runtime_resume_and_get(bdi->dev); + if (error < 0) dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error); - pm_runtime_put_noidle(bdi->dev); - } bq24190_register_reset(bdi); bq24190_set_config(bdi); diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 72e727cd31e8..35e6a394c0df 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1572,14 +1572,6 @@ static int bq27xxx_battery_read_charge(struct bq27xxx_device_info *di, u8 reg) */ static inline int bq27xxx_battery_read_nac(struct bq27xxx_device_info *di) { - int flags; - - if (di->opts & BQ27XXX_O_ZERO) { - flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, true); - if (flags >= 0 && (flags & BQ27000_FLAG_CI)) - return -ENODATA; - } - return bq27xxx_battery_read_charge(di, BQ27XXX_REG_NAC); } @@ -1742,6 +1734,18 @@ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags) return flags & (BQ27XXX_FLAG_SOC1 | BQ27XXX_FLAG_SOCF); } +/* + * Returns true if reported battery capacity is inaccurate + */ +static bool bq27xxx_battery_capacity_inaccurate(struct bq27xxx_device_info *di, + u16 flags) +{ + if (di->opts & BQ27XXX_O_HAS_CI) + return (flags & BQ27000_FLAG_CI); + else + return false; +} + static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di) { /* Unlikely but important to return first */ @@ -1751,6 +1755,8 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di) return POWER_SUPPLY_HEALTH_COLD; if (unlikely(bq27xxx_battery_dead(di, di->cache.flags))) return POWER_SUPPLY_HEALTH_DEAD; + if (unlikely(bq27xxx_battery_capacity_inaccurate(di, di->cache.flags))) + return POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED; return POWER_SUPPLY_HEALTH_GOOD; } @@ -1758,7 +1764,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di) void bq27xxx_battery_update(struct bq27xxx_device_info *di) { struct bq27xxx_reg_cache cache = {0, }; - bool has_ci_flag = di->opts & BQ27XXX_O_HAS_CI; bool has_singe_flag = di->opts & BQ27XXX_O_ZERO; cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); @@ -1766,30 +1771,19 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di) cache.flags = -1; /* read error */ if (cache.flags >= 0) { cache.temperature = bq27xxx_battery_read_temperature(di); - if (has_ci_flag && (cache.flags & BQ27000_FLAG_CI)) { - dev_info_once(di->dev, "battery is not calibrated! ignoring capacity values\n"); - cache.capacity = -ENODATA; - cache.energy = -ENODATA; - cache.time_to_empty = -ENODATA; - cache.time_to_empty_avg = -ENODATA; - cache.time_to_full = -ENODATA; - cache.charge_full = -ENODATA; - cache.health = -ENODATA; - } else { - if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR) - cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE); - if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR) - cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); - if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR) - cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); - - cache.charge_full = bq27xxx_battery_read_fcc(di); - cache.capacity = bq27xxx_battery_read_soc(di); - if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) - cache.energy = bq27xxx_battery_read_energy(di); - di->cache.flags = cache.flags; - cache.health = bq27xxx_battery_read_health(di); - } + if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR) + cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE); + if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR) + cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); + if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR) + cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); + + cache.charge_full = bq27xxx_battery_read_fcc(di); + cache.capacity = bq27xxx_battery_read_soc(di); + if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) + cache.energy = bq27xxx_battery_read_energy(di); + di->cache.flags = cache.flags; + cache.health = bq27xxx_battery_read_health(di); if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR) cache.cycle_count = bq27xxx_battery_read_cyct(di); diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index d925cb137e12..fad5890c899e 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -616,7 +616,7 @@ int power_supply_get_battery_info(struct power_supply *psy, goto out_put_node; } - info = devm_kmalloc(&psy->dev, sizeof(*info), GFP_KERNEL); + info = devm_kzalloc(&psy->dev, sizeof(*info), GFP_KERNEL); if (!info) { err = -ENOMEM; goto out_put_node; diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 21e3b05a5153..904de8d61828 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -572,6 +572,17 @@ config PWM_SUN4I To compile this driver as a module, choose M here: the module will be called pwm-sun4i. +config PWM_SUNPLUS + tristate "Sunplus PWM support" + depends on ARCH_SUNPLUS || COMPILE_TEST + depends on HAS_IOMEM && OF + help + Generic PWM framework driver for the PWM controller on + Sunplus SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-sunplus. + config PWM_TEGRA tristate "NVIDIA Tegra PWM support" depends on ARCH_TEGRA || COMPILE_TEST @@ -640,4 +651,18 @@ config PWM_VT8500 To compile this driver as a module, choose M here: the module will be called pwm-vt8500. +config PWM_XILINX + tristate "Xilinx AXI Timer PWM support" + depends on OF_ADDRESS + depends on COMMON_CLK + select REGMAP_MMIO + help + PWM driver for Xilinx LogiCORE IP AXI timers. This timer is + typically a soft core which may be present in Xilinx FPGAs. + This device may also be present in Microblaze soft processors. + If you don't have this IP in your design, choose N. + + To compile this driver as a module, choose M here: the module + will be called pwm-xilinx. + endif diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 708840b7fba8..5c08bdb817b4 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_PWM_STM32) += pwm-stm32.o obj-$(CONFIG_PWM_STM32_LP) += pwm-stm32-lp.o obj-$(CONFIG_PWM_STMPE) += pwm-stmpe.o obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o +obj-$(CONFIG_PWM_SUNPLUS) += pwm-sunplus.o obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o @@ -60,3 +61,4 @@ obj-$(CONFIG_PWM_TWL) += pwm-twl.o obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o obj-$(CONFIG_PWM_VISCONTI) += pwm-visconti.o obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o +obj-$(CONFIG_PWM_XILINX) += pwm-xilinx.o diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c index 36f7ea381838..3977a0f9d132 100644 --- a/drivers/pwm/pwm-atmel-tcb.c +++ b/drivers/pwm/pwm-atmel-tcb.c @@ -61,7 +61,7 @@ struct atmel_tcb_pwm_chip { struct atmel_tcb_channel bkup; }; -const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, }; +static const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, }; static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip) { @@ -72,7 +72,8 @@ static int atmel_tcb_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity) { - struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); + struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); + struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm]; tcbpwm->polarity = polarity; @@ -97,7 +98,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip, return ret; } - pwm_set_chip_data(pwm, tcbpwm); tcbpwm->polarity = PWM_POLARITY_NORMAL; tcbpwm->duty = 0; tcbpwm->period = 0; @@ -139,7 +139,7 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip, static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); - struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); + struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm]; clk_disable_unprepare(tcbpwmc->clk); tcbpwmc->pwms[pwm->hwpwm] = NULL; @@ -149,7 +149,7 @@ static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); - struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); + struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm]; unsigned cmr; enum pwm_polarity polarity = tcbpwm->polarity; @@ -206,7 +206,7 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); - struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); + struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm]; u32 cmr; enum pwm_polarity polarity = tcbpwm->polarity; @@ -291,7 +291,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip); - struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm); + struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm]; struct atmel_tcb_pwm_device *atcbpwm = NULL; int i = 0; int slowclk = 0; diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c index d7ad88685830..b0d91142da8d 100644 --- a/drivers/pwm/pwm-clps711x.c +++ b/drivers/pwm/pwm-clps711x.c @@ -23,29 +23,6 @@ static inline struct clps711x_chip *to_clps711x_chip(struct pwm_chip *chip) return container_of(chip, struct clps711x_chip, chip); } -static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v) -{ - /* PWM0 - bits 4..7, PWM1 - bits 8..11 */ - u32 shift = (n + 1) * 4; - unsigned long flags; - u32 tmp; - - spin_lock_irqsave(&priv->lock, flags); - - tmp = readl(priv->pmpcon); - tmp &= ~(0xf << shift); - tmp |= v << shift; - writel(tmp, priv->pmpcon); - - spin_unlock_irqrestore(&priv->lock, flags); -} - -static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v) -{ - /* Duty cycle 0..15 max */ - return DIV64_U64_ROUND_CLOSEST(v * 0xf, pwm->args.period); -} - static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct clps711x_chip *priv = to_clps711x_chip(chip); @@ -60,44 +37,41 @@ static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) return 0; } -static int clps711x_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static int clps711x_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) { struct clps711x_chip *priv = to_clps711x_chip(chip); - unsigned int duty; + /* PWM0 - bits 4..7, PWM1 - bits 8..11 */ + u32 shift = (pwm->hwpwm + 1) * 4; + unsigned long flags; + u32 pmpcon, val; - if (period_ns != pwm->args.period) + if (state->polarity != PWM_POLARITY_NORMAL) return -EINVAL; - duty = clps711x_get_duty(pwm, duty_ns); - clps711x_pwm_update_val(priv, pwm->hwpwm, duty); - - return 0; -} + if (state->period != pwm->args.period) + return -EINVAL; -static int clps711x_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct clps711x_chip *priv = to_clps711x_chip(chip); - unsigned int duty; + if (state->enabled) + val = mul_u64_u64_div_u64(state->duty_cycle, 0xf, state->period); + else + val = 0; - duty = clps711x_get_duty(pwm, pwm_get_duty_cycle(pwm)); - clps711x_pwm_update_val(priv, pwm->hwpwm, duty); + spin_lock_irqsave(&priv->lock, flags); - return 0; -} + pmpcon = readl(priv->pmpcon); + pmpcon &= ~(0xf << shift); + pmpcon |= val << shift; + writel(pmpcon, priv->pmpcon); -static void clps711x_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct clps711x_chip *priv = to_clps711x_chip(chip); + spin_unlock_irqrestore(&priv->lock, flags); - clps711x_pwm_update_val(priv, pwm->hwpwm, 0); + return 0; } static const struct pwm_ops clps711x_pwm_ops = { .request = clps711x_pwm_request, - .config = clps711x_pwm_config, - .enable = clps711x_pwm_enable, - .disable = clps711x_pwm_disable, + .apply = clps711x_pwm_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c index 5e29d9c682c3..7f10f56c3eb6 100644 --- a/drivers/pwm/pwm-cros-ec.c +++ b/drivers/pwm/pwm-cros-ec.c @@ -12,17 +12,21 @@ #include <linux/pwm.h> #include <linux/slab.h> +#include <dt-bindings/mfd/cros_ec.h> + /** * struct cros_ec_pwm_device - Driver data for EC PWM * * @dev: Device node * @ec: Pointer to EC device * @chip: PWM controller chip + * @use_pwm_type: Use PWM types instead of generic channels */ struct cros_ec_pwm_device { struct device *dev; struct cros_ec_device *ec; struct pwm_chip chip; + bool use_pwm_type; }; /** @@ -58,14 +62,31 @@ static void cros_ec_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) kfree(channel); } -static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty) +static int cros_ec_dt_type_to_pwm_type(u8 dt_index, u8 *pwm_type) { + switch (dt_index) { + case CROS_EC_PWM_DT_KB_LIGHT: + *pwm_type = EC_PWM_TYPE_KB_LIGHT; + return 0; + case CROS_EC_PWM_DT_DISPLAY_LIGHT: + *pwm_type = EC_PWM_TYPE_DISPLAY_LIGHT; + return 0; + default: + return -EINVAL; + } +} + +static int cros_ec_pwm_set_duty(struct cros_ec_pwm_device *ec_pwm, u8 index, + u16 duty) +{ + struct cros_ec_device *ec = ec_pwm->ec; struct { struct cros_ec_command msg; struct ec_params_pwm_set_duty params; } __packed buf; struct ec_params_pwm_set_duty *params = &buf.params; struct cros_ec_command *msg = &buf.msg; + int ret; memset(&buf, 0, sizeof(buf)); @@ -75,14 +96,25 @@ static int cros_ec_pwm_set_duty(struct cros_ec_device *ec, u8 index, u16 duty) msg->outsize = sizeof(*params); params->duty = duty; - params->pwm_type = EC_PWM_TYPE_GENERIC; - params->index = index; + + if (ec_pwm->use_pwm_type) { + ret = cros_ec_dt_type_to_pwm_type(index, ¶ms->pwm_type); + if (ret) { + dev_err(ec->dev, "Invalid PWM type index: %d\n", index); + return ret; + } + params->index = 0; + } else { + params->pwm_type = EC_PWM_TYPE_GENERIC; + params->index = index; + } return cros_ec_cmd_xfer_status(ec, msg); } -static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index) +static int cros_ec_pwm_get_duty(struct cros_ec_pwm_device *ec_pwm, u8 index) { + struct cros_ec_device *ec = ec_pwm->ec; struct { struct cros_ec_command msg; union { @@ -102,8 +134,17 @@ static int cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index) msg->insize = sizeof(*resp); msg->outsize = sizeof(*params); - params->pwm_type = EC_PWM_TYPE_GENERIC; - params->index = index; + if (ec_pwm->use_pwm_type) { + ret = cros_ec_dt_type_to_pwm_type(index, ¶ms->pwm_type); + if (ret) { + dev_err(ec->dev, "Invalid PWM type index: %d\n", index); + return ret; + } + params->index = 0; + } else { + params->pwm_type = EC_PWM_TYPE_GENERIC; + params->index = index; + } ret = cros_ec_cmd_xfer_status(ec, msg); if (ret < 0) @@ -133,7 +174,7 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, */ duty_cycle = state->enabled ? state->duty_cycle : 0; - ret = cros_ec_pwm_set_duty(ec_pwm->ec, pwm->hwpwm, duty_cycle); + ret = cros_ec_pwm_set_duty(ec_pwm, pwm->hwpwm, duty_cycle); if (ret < 0) return ret; @@ -149,7 +190,7 @@ static void cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct cros_ec_pwm *channel = pwm_get_chip_data(pwm); int ret; - ret = cros_ec_pwm_get_duty(ec_pwm->ec, pwm->hwpwm); + ret = cros_ec_pwm_get_duty(ec_pwm, pwm->hwpwm); if (ret < 0) { dev_err(chip->dev, "error getting initial duty: %d\n", ret); return; @@ -204,13 +245,13 @@ static const struct pwm_ops cros_ec_pwm_ops = { * of PWMs it supports directly, so we have to read the pwm duty cycle for * subsequent channels until we get an error. */ -static int cros_ec_num_pwms(struct cros_ec_device *ec) +static int cros_ec_num_pwms(struct cros_ec_pwm_device *ec_pwm) { int i, ret; /* The index field is only 8 bits */ for (i = 0; i <= U8_MAX; i++) { - ret = cros_ec_pwm_get_duty(ec, i); + ret = cros_ec_pwm_get_duty(ec_pwm, i); /* * We look for SUCCESS, INVALID_COMMAND, or INVALID_PARAM * responses; everything else is treated as an error. @@ -236,6 +277,7 @@ static int cros_ec_pwm_probe(struct platform_device *pdev) { struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent); struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; struct cros_ec_pwm_device *ec_pwm; struct pwm_chip *chip; int ret; @@ -251,17 +293,26 @@ static int cros_ec_pwm_probe(struct platform_device *pdev) chip = &ec_pwm->chip; ec_pwm->ec = ec; + if (of_device_is_compatible(np, "google,cros-ec-pwm-type")) + ec_pwm->use_pwm_type = true; + /* PWM chip */ chip->dev = dev; chip->ops = &cros_ec_pwm_ops; chip->of_xlate = cros_ec_pwm_xlate; chip->of_pwm_n_cells = 1; - ret = cros_ec_num_pwms(ec); - if (ret < 0) { - dev_err(dev, "Couldn't find PWMs: %d\n", ret); - return ret; + + if (ec_pwm->use_pwm_type) { + chip->npwm = CROS_EC_PWM_DT_COUNT; + } else { + ret = cros_ec_num_pwms(ec_pwm); + if (ret < 0) { + dev_err(dev, "Couldn't find PWMs: %d\n", ret); + return ret; + } + chip->npwm = ret; } - chip->npwm = ret; + dev_dbg(dev, "Probed %u PWMs\n", chip->npwm); ret = pwmchip_add(chip); @@ -288,6 +339,7 @@ static int cros_ec_pwm_remove(struct platform_device *dev) #ifdef CONFIG_OF static const struct of_device_id cros_ec_pwm_of_match[] = { { .compatible = "google,cros-ec-pwm" }, + { .compatible = "google,cros-ec-pwm-type" }, {}, }; MODULE_DEVICE_TABLE(of, cros_ec_pwm_of_match); diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c index ea17d446a627..215ef9069114 100644 --- a/drivers/pwm/pwm-lp3943.c +++ b/drivers/pwm/pwm-lp3943.c @@ -93,7 +93,7 @@ static void lp3943_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) } static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) + u64 duty_ns, u64 period_ns) { struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip); struct lp3943 *lp3943 = lp3943_pwm->lp3943; @@ -118,14 +118,20 @@ static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, reg_duty = LP3943_REG_PWM1; } - period_ns = clamp(period_ns, LP3943_MIN_PERIOD, LP3943_MAX_PERIOD); - val = (u8)(period_ns / LP3943_MIN_PERIOD - 1); + /* + * Note that after this clamping, period_ns fits into an int. This is + * helpful because we can resort to integer division below instead of + * the (more expensive) 64 bit division. + */ + period_ns = clamp(period_ns, (u64)LP3943_MIN_PERIOD, (u64)LP3943_MAX_PERIOD); + val = (u8)((int)period_ns / LP3943_MIN_PERIOD - 1); err = lp3943_write_byte(lp3943, reg_prescale, val); if (err) return err; - val = (u8)(duty_ns * LP3943_MAX_DUTY / period_ns); + duty_ns = min(duty_ns, period_ns); + val = (u8)((int)duty_ns * LP3943_MAX_DUTY / (int)period_ns); return lp3943_write_byte(lp3943, reg_duty, val); } @@ -182,12 +188,34 @@ static void lp3943_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) lp3943_pwm_set_mode(lp3943_pwm, pwm_map, LP3943_GPIO_OUT_HIGH); } +static int lp3943_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + lp3943_pwm_disable(chip, pwm); + return 0; + } + + err = lp3943_pwm_config(chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = lp3943_pwm_enable(chip, pwm); + + return err; +} + static const struct pwm_ops lp3943_pwm_ops = { .request = lp3943_pwm_request, .free = lp3943_pwm_free, - .config = lp3943_pwm_config, - .enable = lp3943_pwm_enable, - .disable = lp3943_pwm_disable, + .apply = lp3943_pwm_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c index b909096dba2f..272e0b5d01b8 100644 --- a/drivers/pwm/pwm-lpc18xx-sct.c +++ b/drivers/pwm/pwm-lpc18xx-sct.c @@ -226,14 +226,7 @@ static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } -static int lpc18xx_pwm_set_polarity(struct pwm_chip *chip, - struct pwm_device *pwm, - enum pwm_polarity polarity) -{ - return 0; -} - -static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity) { struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip); struct lpc18xx_pwm_data *lpc18xx_data = &lpc18xx_pwm->channeldata[pwm->hwpwm]; @@ -249,7 +242,7 @@ static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event), LPC18XX_PWM_EVSTATEMSK_ALL); - if (pwm_get_polarity(pwm) == PWM_POLARITY_NORMAL) { + if (polarity == PWM_POLARITY_NORMAL) { set_event = lpc18xx_pwm->period_event; clear_event = lpc18xx_data->duty_event; res_action = LPC18XX_PWM_RES_SET; @@ -308,11 +301,35 @@ static void lpc18xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map); } +static int lpc18xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + bool enabled = pwm->state.enabled; + + if (state->polarity != pwm->state.polarity && pwm->state.enabled) { + lpc18xx_pwm_disable(chip, pwm); + enabled = false; + } + + if (!state->enabled) { + if (enabled) + lpc18xx_pwm_disable(chip, pwm); + + return 0; + } + + err = lpc18xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!enabled) + err = lpc18xx_pwm_enable(chip, pwm, state->polarity); + + return err; +} static const struct pwm_ops lpc18xx_pwm_ops = { - .config = lpc18xx_pwm_config, - .set_polarity = lpc18xx_pwm_set_polarity, - .enable = lpc18xx_pwm_enable, - .disable = lpc18xx_pwm_disable, + .apply = lpc18xx_pwm_apply, .request = lpc18xx_pwm_request, .free = lpc18xx_pwm_free, .owner = THIS_MODULE, diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c index ddeab5687cb8..86a0ea0f6955 100644 --- a/drivers/pwm/pwm-lpc32xx.c +++ b/drivers/pwm/pwm-lpc32xx.c @@ -88,10 +88,33 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) clk_disable_unprepare(lpc32xx->clk); } +static int lpc32xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + lpc32xx_pwm_disable(chip, pwm); + + return 0; + } + + err = lpc32xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = lpc32xx_pwm_enable(chip, pwm); + + return err; +} + static const struct pwm_ops lpc32xx_pwm_ops = { - .config = lpc32xx_pwm_config, - .enable = lpc32xx_pwm_enable, - .disable = lpc32xx_pwm_disable, + .apply = lpc32xx_pwm_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 568b13a48717..d28c0874c7f2 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -198,10 +198,33 @@ static void pwm_mediatek_disable(struct pwm_chip *chip, struct pwm_device *pwm) pwm_mediatek_clk_disable(chip, pwm); } +static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + pwm_mediatek_disable(chip, pwm); + + return 0; + } + + err = pwm_mediatek_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = pwm_mediatek_enable(chip, pwm); + + return err; +} + static const struct pwm_ops pwm_mediatek_ops = { - .config = pwm_mediatek_config, - .enable = pwm_mediatek_enable, - .disable = pwm_mediatek_disable, + .apply = pwm_mediatek_apply, .owner = THIS_MODULE, }; @@ -264,6 +287,12 @@ static const struct pwm_mediatek_of_data mt2712_pwm_data = { .has_ck_26m_sel = false, }; +static const struct pwm_mediatek_of_data mt6795_pwm_data = { + .num_pwms = 7, + .pwm45_fixup = false, + .has_ck_26m_sel = false, +}; + static const struct pwm_mediatek_of_data mt7622_pwm_data = { .num_pwms = 6, .pwm45_fixup = false, @@ -302,6 +331,7 @@ static const struct pwm_mediatek_of_data mt8516_pwm_data = { static const struct of_device_id pwm_mediatek_of_match[] = { { .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data }, + { .compatible = "mediatek,mt6795-pwm", .data = &mt6795_pwm_data }, { .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data }, { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data }, { .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data }, diff --git a/drivers/pwm/pwm-raspberrypi-poe.c b/drivers/pwm/pwm-raspberrypi-poe.c index e52e29fc8231..6ff73029f367 100644 --- a/drivers/pwm/pwm-raspberrypi-poe.c +++ b/drivers/pwm/pwm-raspberrypi-poe.c @@ -66,7 +66,7 @@ static int raspberrypi_pwm_get_property(struct rpi_firmware *firmware, u32 reg, u32 *val) { struct raspberrypi_pwm_prop msg = { - .reg = reg + .reg = cpu_to_le32(reg), }; int ret; diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c index 4381df90a527..d7311614c846 100644 --- a/drivers/pwm/pwm-renesas-tpu.c +++ b/drivers/pwm/pwm-renesas-tpu.c @@ -89,71 +89,71 @@ struct tpu_device { #define to_tpu_device(c) container_of(c, struct tpu_device, chip) -static void tpu_pwm_write(struct tpu_pwm_device *pwm, int reg_nr, u16 value) +static void tpu_pwm_write(struct tpu_pwm_device *tpd, int reg_nr, u16 value) { - void __iomem *base = pwm->tpu->base + TPU_CHANNEL_OFFSET - + pwm->channel * TPU_CHANNEL_SIZE; + void __iomem *base = tpd->tpu->base + TPU_CHANNEL_OFFSET + + tpd->channel * TPU_CHANNEL_SIZE; iowrite16(value, base + reg_nr); } -static void tpu_pwm_set_pin(struct tpu_pwm_device *pwm, +static void tpu_pwm_set_pin(struct tpu_pwm_device *tpd, enum tpu_pin_state state) { static const char * const states[] = { "inactive", "PWM", "active" }; - dev_dbg(&pwm->tpu->pdev->dev, "%u: configuring pin as %s\n", - pwm->channel, states[state]); + dev_dbg(&tpd->tpu->pdev->dev, "%u: configuring pin as %s\n", + tpd->channel, states[state]); switch (state) { case TPU_PIN_INACTIVE: - tpu_pwm_write(pwm, TPU_TIORn, - pwm->polarity == PWM_POLARITY_INVERSED ? + tpu_pwm_write(tpd, TPU_TIORn, + tpd->polarity == PWM_POLARITY_INVERSED ? TPU_TIOR_IOA_1 : TPU_TIOR_IOA_0); break; case TPU_PIN_PWM: - tpu_pwm_write(pwm, TPU_TIORn, - pwm->polarity == PWM_POLARITY_INVERSED ? + tpu_pwm_write(tpd, TPU_TIORn, + tpd->polarity == PWM_POLARITY_INVERSED ? TPU_TIOR_IOA_0_SET : TPU_TIOR_IOA_1_CLR); break; case TPU_PIN_ACTIVE: - tpu_pwm_write(pwm, TPU_TIORn, - pwm->polarity == PWM_POLARITY_INVERSED ? + tpu_pwm_write(tpd, TPU_TIORn, + tpd->polarity == PWM_POLARITY_INVERSED ? TPU_TIOR_IOA_0 : TPU_TIOR_IOA_1); break; } } -static void tpu_pwm_start_stop(struct tpu_pwm_device *pwm, int start) +static void tpu_pwm_start_stop(struct tpu_pwm_device *tpd, int start) { unsigned long flags; u16 value; - spin_lock_irqsave(&pwm->tpu->lock, flags); - value = ioread16(pwm->tpu->base + TPU_TSTR); + spin_lock_irqsave(&tpd->tpu->lock, flags); + value = ioread16(tpd->tpu->base + TPU_TSTR); if (start) - value |= 1 << pwm->channel; + value |= 1 << tpd->channel; else - value &= ~(1 << pwm->channel); + value &= ~(1 << tpd->channel); - iowrite16(value, pwm->tpu->base + TPU_TSTR); - spin_unlock_irqrestore(&pwm->tpu->lock, flags); + iowrite16(value, tpd->tpu->base + TPU_TSTR); + spin_unlock_irqrestore(&tpd->tpu->lock, flags); } -static int tpu_pwm_timer_start(struct tpu_pwm_device *pwm) +static int tpu_pwm_timer_start(struct tpu_pwm_device *tpd) { int ret; - if (!pwm->timer_on) { + if (!tpd->timer_on) { /* Wake up device and enable clock. */ - pm_runtime_get_sync(&pwm->tpu->pdev->dev); - ret = clk_prepare_enable(pwm->tpu->clk); + pm_runtime_get_sync(&tpd->tpu->pdev->dev); + ret = clk_prepare_enable(tpd->tpu->clk); if (ret) { - dev_err(&pwm->tpu->pdev->dev, "cannot enable clock\n"); + dev_err(&tpd->tpu->pdev->dev, "cannot enable clock\n"); return ret; } - pwm->timer_on = true; + tpd->timer_on = true; } /* @@ -161,8 +161,8 @@ static int tpu_pwm_timer_start(struct tpu_pwm_device *pwm) * completely. First drive the pin to the inactive state to avoid * glitches. */ - tpu_pwm_set_pin(pwm, TPU_PIN_INACTIVE); - tpu_pwm_start_stop(pwm, false); + tpu_pwm_set_pin(tpd, TPU_PIN_INACTIVE); + tpu_pwm_start_stop(tpd, false); /* * - Clear TCNT on TGRB match @@ -172,142 +172,168 @@ static int tpu_pwm_timer_start(struct tpu_pwm_device *pwm) * - Output 1 until TGRA, output 0 until TGRB (active high polarity * - PWM mode */ - tpu_pwm_write(pwm, TPU_TCRn, TPU_TCR_CCLR_TGRB | TPU_TCR_CKEG_RISING | - pwm->prescaler); - tpu_pwm_write(pwm, TPU_TMDRn, TPU_TMDR_MD_PWM); - tpu_pwm_set_pin(pwm, TPU_PIN_PWM); - tpu_pwm_write(pwm, TPU_TGRAn, pwm->duty); - tpu_pwm_write(pwm, TPU_TGRBn, pwm->period); + tpu_pwm_write(tpd, TPU_TCRn, TPU_TCR_CCLR_TGRB | TPU_TCR_CKEG_RISING | + tpd->prescaler); + tpu_pwm_write(tpd, TPU_TMDRn, TPU_TMDR_MD_PWM); + tpu_pwm_set_pin(tpd, TPU_PIN_PWM); + tpu_pwm_write(tpd, TPU_TGRAn, tpd->duty); + tpu_pwm_write(tpd, TPU_TGRBn, tpd->period); - dev_dbg(&pwm->tpu->pdev->dev, "%u: TGRA 0x%04x TGRB 0x%04x\n", - pwm->channel, pwm->duty, pwm->period); + dev_dbg(&tpd->tpu->pdev->dev, "%u: TGRA 0x%04x TGRB 0x%04x\n", + tpd->channel, tpd->duty, tpd->period); /* Start the channel. */ - tpu_pwm_start_stop(pwm, true); + tpu_pwm_start_stop(tpd, true); return 0; } -static void tpu_pwm_timer_stop(struct tpu_pwm_device *pwm) +static void tpu_pwm_timer_stop(struct tpu_pwm_device *tpd) { - if (!pwm->timer_on) + if (!tpd->timer_on) return; /* Disable channel. */ - tpu_pwm_start_stop(pwm, false); + tpu_pwm_start_stop(tpd, false); /* Stop clock and mark device as idle. */ - clk_disable_unprepare(pwm->tpu->clk); - pm_runtime_put(&pwm->tpu->pdev->dev); + clk_disable_unprepare(tpd->tpu->clk); + pm_runtime_put(&tpd->tpu->pdev->dev); - pwm->timer_on = false; + tpd->timer_on = false; } /* ----------------------------------------------------------------------------- * PWM API */ -static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *_pwm) +static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct tpu_device *tpu = to_tpu_device(chip); - struct tpu_pwm_device *pwm; + struct tpu_pwm_device *tpd; - if (_pwm->hwpwm >= TPU_CHANNEL_MAX) + if (pwm->hwpwm >= TPU_CHANNEL_MAX) return -EINVAL; - pwm = kzalloc(sizeof(*pwm), GFP_KERNEL); - if (pwm == NULL) + tpd = kzalloc(sizeof(*tpd), GFP_KERNEL); + if (tpd == NULL) return -ENOMEM; - pwm->tpu = tpu; - pwm->channel = _pwm->hwpwm; - pwm->polarity = PWM_POLARITY_NORMAL; - pwm->prescaler = 0; - pwm->period = 0; - pwm->duty = 0; + tpd->tpu = tpu; + tpd->channel = pwm->hwpwm; + tpd->polarity = PWM_POLARITY_NORMAL; + tpd->prescaler = 0; + tpd->period = 0; + tpd->duty = 0; - pwm->timer_on = false; + tpd->timer_on = false; - pwm_set_chip_data(_pwm, pwm); + pwm_set_chip_data(pwm, tpd); return 0; } -static void tpu_pwm_free(struct pwm_chip *chip, struct pwm_device *_pwm) +static void tpu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { - struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm); + struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm); - tpu_pwm_timer_stop(pwm); - kfree(pwm); + tpu_pwm_timer_stop(tpd); + kfree(tpd); } -static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *_pwm, - int duty_ns, int period_ns) +static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + u64 duty_ns, u64 period_ns, bool enabled) { - static const unsigned int prescalers[] = { 1, 4, 16, 64 }; - struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm); + struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm); struct tpu_device *tpu = to_tpu_device(chip); unsigned int prescaler; bool duty_only = false; u32 clk_rate; - u32 period; + u64 period; u32 duty; int ret; + clk_rate = clk_get_rate(tpu->clk); + if (unlikely(clk_rate > NSEC_PER_SEC)) { + /* + * This won't happen in the nearer future, so this is only a + * safeguard to prevent the following calculation from + * overflowing. With this clk_rate * period_ns / NSEC_PER_SEC is + * not greater than period_ns and so fits into an u64. + */ + return -EINVAL; + } + + period = mul_u64_u64_div_u64(clk_rate, period_ns, NSEC_PER_SEC); + /* - * Pick a prescaler to avoid overflowing the counter. - * TODO: Pick the highest acceptable prescaler. + * Find the minimal prescaler in [0..3] such that + * + * period >> (2 * prescaler) < 0x10000 + * + * This could be calculated using something like: + * + * prescaler = max(ilog2(period) / 2, 7) - 7; + * + * but given there are only four allowed results and that ilog2 isn't + * cheap on all platforms using a switch statement is more effective. */ - clk_rate = clk_get_rate(tpu->clk); + switch (period) { + case 1 ... 0xffff: + prescaler = 0; + break; - for (prescaler = 0; prescaler < ARRAY_SIZE(prescalers); ++prescaler) { - period = clk_rate / prescalers[prescaler] - / (NSEC_PER_SEC / period_ns); - if (period <= 0xffff) - break; - } + case 0x10000 ... 0x3ffff: + prescaler = 1; + break; - if (prescaler == ARRAY_SIZE(prescalers) || period == 0) { - dev_err(&tpu->pdev->dev, "clock rate mismatch\n"); - return -ENOTSUPP; + case 0x40000 ... 0xfffff: + prescaler = 2; + break; + + case 0x100000 ... 0x3fffff: + prescaler = 3; + break; + + default: + return -EINVAL; } - if (duty_ns) { - duty = clk_rate / prescalers[prescaler] - / (NSEC_PER_SEC / duty_ns); - if (duty > period) - return -EINVAL; - } else { + period >>= 2 * prescaler; + + if (duty_ns) + duty = mul_u64_u64_div_u64(clk_rate, duty_ns, + (u64)NSEC_PER_SEC << (2 * prescaler)); + else duty = 0; - } dev_dbg(&tpu->pdev->dev, "rate %u, prescaler %u, period %u, duty %u\n", - clk_rate, prescalers[prescaler], period, duty); + clk_rate, 1 << (2 * prescaler), (u32)period, duty); - if (pwm->prescaler == prescaler && pwm->period == period) + if (tpd->prescaler == prescaler && tpd->period == period) duty_only = true; - pwm->prescaler = prescaler; - pwm->period = period; - pwm->duty = duty; + tpd->prescaler = prescaler; + tpd->period = period; + tpd->duty = duty; /* If the channel is disabled we're done. */ - if (!pwm_is_enabled(_pwm)) + if (!enabled) return 0; - if (duty_only && pwm->timer_on) { + if (duty_only && tpd->timer_on) { /* * If only the duty cycle changed and the timer is already * running, there's no need to reconfigure it completely, Just * modify the duty cycle. */ - tpu_pwm_write(pwm, TPU_TGRAn, pwm->duty); - dev_dbg(&tpu->pdev->dev, "%u: TGRA 0x%04x\n", pwm->channel, - pwm->duty); + tpu_pwm_write(tpd, TPU_TGRAn, tpd->duty); + dev_dbg(&tpu->pdev->dev, "%u: TGRA 0x%04x\n", tpd->channel, + tpd->duty); } else { /* Otherwise perform a full reconfiguration. */ - ret = tpu_pwm_timer_start(pwm); + ret = tpu_pwm_timer_start(tpd); if (ret < 0) return ret; } @@ -317,29 +343,29 @@ static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *_pwm, * To avoid running the timer when not strictly required, handle * 0% and 100% duty cycles as fixed levels and stop the timer. */ - tpu_pwm_set_pin(pwm, duty ? TPU_PIN_ACTIVE : TPU_PIN_INACTIVE); - tpu_pwm_timer_stop(pwm); + tpu_pwm_set_pin(tpd, duty ? TPU_PIN_ACTIVE : TPU_PIN_INACTIVE); + tpu_pwm_timer_stop(tpd); } return 0; } -static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *_pwm, +static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm, enum pwm_polarity polarity) { - struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm); + struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm); - pwm->polarity = polarity; + tpd->polarity = polarity; return 0; } -static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *_pwm) +static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { - struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm); + struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm); int ret; - ret = tpu_pwm_timer_start(pwm); + ret = tpu_pwm_timer_start(tpd); if (ret < 0) return ret; @@ -347,32 +373,64 @@ static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *_pwm) * To avoid running the timer when not strictly required, handle 0% and * 100% duty cycles as fixed levels and stop the timer. */ - if (pwm->duty == 0 || pwm->duty == pwm->period) { - tpu_pwm_set_pin(pwm, pwm->duty ? + if (tpd->duty == 0 || tpd->duty == tpd->period) { + tpu_pwm_set_pin(tpd, tpd->duty ? TPU_PIN_ACTIVE : TPU_PIN_INACTIVE); - tpu_pwm_timer_stop(pwm); + tpu_pwm_timer_stop(tpd); } return 0; } -static void tpu_pwm_disable(struct pwm_chip *chip, struct pwm_device *_pwm) +static void tpu_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { - struct tpu_pwm_device *pwm = pwm_get_chip_data(_pwm); + struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm); /* The timer must be running to modify the pin output configuration. */ - tpu_pwm_timer_start(pwm); - tpu_pwm_set_pin(pwm, TPU_PIN_INACTIVE); - tpu_pwm_timer_stop(pwm); + tpu_pwm_timer_start(tpd); + tpu_pwm_set_pin(tpd, TPU_PIN_INACTIVE); + tpu_pwm_timer_stop(tpd); +} + +static int tpu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + bool enabled = pwm->state.enabled; + + if (state->polarity != pwm->state.polarity) { + if (enabled) { + tpu_pwm_disable(chip, pwm); + enabled = false; + } + + err = tpu_pwm_set_polarity(chip, pwm, state->polarity); + if (err) + return err; + } + + if (!state->enabled) { + if (enabled) + tpu_pwm_disable(chip, pwm); + + return 0; + } + + err = tpu_pwm_config(pwm->chip, pwm, + state->duty_cycle, state->period, enabled); + if (err) + return err; + + if (!enabled) + err = tpu_pwm_enable(chip, pwm); + + return err; } static const struct pwm_ops tpu_pwm_ops = { .request = tpu_pwm_request, .free = tpu_pwm_free, - .config = tpu_pwm_config, - .set_polarity = tpu_pwm_set_polarity, - .enable = tpu_pwm_enable, - .disable = tpu_pwm_disable, + .apply = tpu_pwm_apply, .owner = THIS_MODULE, }; @@ -398,10 +456,8 @@ static int tpu_probe(struct platform_device *pdev) return PTR_ERR(tpu->base); tpu->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(tpu->clk)) { - dev_err(&pdev->dev, "cannot get clock\n"); - return PTR_ERR(tpu->clk); - } + if (IS_ERR(tpu->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(tpu->clk), "Failed to get clock\n"); /* Initialize and register the device. */ platform_set_drvdata(pdev, tpu); @@ -410,25 +466,13 @@ static int tpu_probe(struct platform_device *pdev) tpu->chip.ops = &tpu_pwm_ops; tpu->chip.npwm = TPU_CHANNEL_MAX; - pm_runtime_enable(&pdev->dev); - - ret = pwmchip_add(&tpu->chip); - if (ret < 0) { - dev_err(&pdev->dev, "failed to register PWM chip\n"); - pm_runtime_disable(&pdev->dev); - return ret; - } - - return 0; -} - -static int tpu_remove(struct platform_device *pdev) -{ - struct tpu_device *tpu = platform_get_drvdata(pdev); - - pwmchip_remove(&tpu->chip); + ret = devm_pm_runtime_enable(&pdev->dev); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Failed to enable runtime PM\n"); - pm_runtime_disable(&pdev->dev); + ret = devm_pwmchip_add(&pdev->dev, &tpu->chip); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Failed to register PWM chip\n"); return 0; } @@ -447,7 +491,6 @@ MODULE_DEVICE_TABLE(of, tpu_of_table); static struct platform_driver tpu_driver = { .probe = tpu_probe, - .remove = tpu_remove, .driver = { .name = "renesas-tpu-pwm", .of_match_table = of_match_ptr(tpu_of_table), diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c index 0a4ff55fad04..9c5b4f515641 100644 --- a/drivers/pwm/pwm-samsung.c +++ b/drivers/pwm/pwm-samsung.c @@ -321,14 +321,6 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm, struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm); u32 tin_ns = chan->tin_ns, tcnt, tcmp, oldtcmp; - /* - * We currently avoid using 64bit arithmetic by using the - * fact that anything faster than 1Hz is easily representable - * by 32bits. - */ - if (period_ns > NSEC_PER_SEC) - return -ERANGE; - tcnt = readl(our_chip->base + REG_TCNTB(pwm->hwpwm)); oldtcmp = readl(our_chip->base + REG_TCMPB(pwm->hwpwm)); @@ -438,13 +430,51 @@ static int pwm_samsung_set_polarity(struct pwm_chip *chip, return 0; } +static int pwm_samsung_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err, enabled = pwm->state.enabled; + + if (state->polarity != pwm->state.polarity) { + if (enabled) { + pwm_samsung_disable(chip, pwm); + enabled = false; + } + + err = pwm_samsung_set_polarity(chip, pwm, state->polarity); + if (err) + return err; + } + + if (!state->enabled) { + if (enabled) + pwm_samsung_disable(chip, pwm); + + return 0; + } + + /* + * We currently avoid using 64bit arithmetic by using the + * fact that anything faster than 1Hz is easily representable + * by 32bits. + */ + if (state->period > NSEC_PER_SEC) + return -ERANGE; + + err = pwm_samsung_config(chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = pwm_samsung_enable(chip, pwm); + + return err; +} + static const struct pwm_ops pwm_samsung_ops = { .request = pwm_samsung_request, .free = pwm_samsung_free, - .enable = pwm_samsung_enable, - .disable = pwm_samsung_disable, - .config = pwm_samsung_config, - .set_polarity = pwm_samsung_set_polarity, + .apply = pwm_samsung_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index 253c4a17d255..e6d05a329002 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -138,10 +138,9 @@ static int pwm_sifive_enable(struct pwm_chip *chip, bool enable) dev_err(ddata->chip.dev, "Enable clk failed\n"); return ret; } - } - - if (!enable) + } else { clk_disable(ddata->clk); + } return 0; } diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index f491d56254d7..44b1f93256b3 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -391,11 +391,34 @@ out: return ret; } +static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + sti_pwm_disable(chip, pwm); + + return 0; + } + + err = sti_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = sti_pwm_enable(chip, pwm); + + return err; +} + static const struct pwm_ops sti_pwm_ops = { .capture = sti_pwm_capture, - .config = sti_pwm_config, - .enable = sti_pwm_enable, - .disable = sti_pwm_disable, + .apply = sti_pwm_apply, .free = sti_pwm_free, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c index c4336d3bace3..5d4a4762ce0c 100644 --- a/drivers/pwm/pwm-stmpe.c +++ b/drivers/pwm/pwm-stmpe.c @@ -259,10 +259,33 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } +static int stmpe_24xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + stmpe_24xx_pwm_disable(chip, pwm); + + return 0; + } + + err = stmpe_24xx_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = stmpe_24xx_pwm_enable(chip, pwm); + + return err; +} + static const struct pwm_ops stmpe_24xx_pwm_ops = { - .config = stmpe_24xx_pwm_config, - .enable = stmpe_24xx_pwm_enable, - .disable = stmpe_24xx_pwm_disable, + .apply = stmpe_24xx_pwm_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index 16d75f9aa36a..c8445b0a3339 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -89,7 +89,6 @@ struct sun4i_pwm_chip { void __iomem *base; spinlock_t ctrl_lock; const struct sun4i_pwm_data *data; - unsigned long next_period[2]; }; static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip) @@ -236,7 +235,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, u32 ctrl, duty = 0, period = 0, val; int ret; unsigned int delay_us, prescaler = 0; - unsigned long now; bool bypass; pwm_get_state(pwm, &cstate); @@ -284,8 +282,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, val = (duty & PWM_DTY_MASK) | PWM_PRD(period); sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm)); - sun4i_pwm->next_period[pwm->hwpwm] = jiffies + - nsecs_to_jiffies(cstate.period + 1000); if (state->polarity != PWM_POLARITY_NORMAL) ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm); @@ -305,15 +301,11 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; /* We need a full period to elapse before disabling the channel. */ - now = jiffies; - if (time_before(now, sun4i_pwm->next_period[pwm->hwpwm])) { - delay_us = jiffies_to_usecs(sun4i_pwm->next_period[pwm->hwpwm] - - now); - if ((delay_us / 500) > MAX_UDELAY_MS) - msleep(delay_us / 1000 + 1); - else - usleep_range(delay_us, delay_us * 2); - } + delay_us = DIV_ROUND_UP_ULL(cstate.period, NSEC_PER_USEC); + if ((delay_us / 500) > MAX_UDELAY_MS) + msleep(delay_us / 1000 + 1); + else + usleep_range(delay_us, delay_us * 2); spin_lock(&sun4i_pwm->ctrl_lock); ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c new file mode 100644 index 000000000000..e776fd16512d --- /dev/null +++ b/drivers/pwm/pwm-sunplus.c @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PWM device driver for SUNPLUS SP7021 SoC + * + * Links: + * Reference Manual: + * https://sunplus-tibbo.atlassian.net/wiki/spaces/doc/overview + * + * Reference Manual(PWM module): + * https://sunplus.atlassian.net/wiki/spaces/doc/pages/461144198/12.+Pulse+Width+Modulation+PWM + * + * Limitations: + * - Only supports normal polarity. + * - It output low when PWM channel disabled. + * - When the parameters change, current running period will not be completed + * and run new settings immediately. + * - In .apply() PWM output need to write register FREQ and DUTY. When first write FREQ + * done and not yet write DUTY, it has short timing gap use new FREQ and old DUTY. + * + * Author: Hammer Hsieh <hammerh0314@gmail.com> + */ +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> + +#define SP7021_PWM_MODE0 0x000 +#define SP7021_PWM_MODE0_PWMEN(ch) BIT(ch) +#define SP7021_PWM_MODE0_BYPASS(ch) BIT(8 + (ch)) +#define SP7021_PWM_MODE1 0x004 +#define SP7021_PWM_MODE1_CNT_EN(ch) BIT(ch) +#define SP7021_PWM_FREQ(ch) (0x008 + 4 * (ch)) +#define SP7021_PWM_FREQ_MAX GENMASK(15, 0) +#define SP7021_PWM_DUTY(ch) (0x018 + 4 * (ch)) +#define SP7021_PWM_DUTY_DD_SEL(ch) FIELD_PREP(GENMASK(9, 8), ch) +#define SP7021_PWM_DUTY_MAX GENMASK(7, 0) +#define SP7021_PWM_DUTY_MASK SP7021_PWM_DUTY_MAX +#define SP7021_PWM_FREQ_SCALER 256 +#define SP7021_PWM_NUM 4 + +struct sunplus_pwm { + struct pwm_chip chip; + void __iomem *base; + struct clk *clk; +}; + +static inline struct sunplus_pwm *to_sunplus_pwm(struct pwm_chip *chip) +{ + return container_of(chip, struct sunplus_pwm, chip); +} + +static int sunplus_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + struct sunplus_pwm *priv = to_sunplus_pwm(chip); + u32 dd_freq, duty, mode0, mode1; + u64 clk_rate; + + if (state->polarity != pwm->state.polarity) + return -EINVAL; + + if (!state->enabled) { + /* disable pwm channel output */ + mode0 = readl(priv->base + SP7021_PWM_MODE0); + mode0 &= ~SP7021_PWM_MODE0_PWMEN(pwm->hwpwm); + writel(mode0, priv->base + SP7021_PWM_MODE0); + /* disable pwm channel clk source */ + mode1 = readl(priv->base + SP7021_PWM_MODE1); + mode1 &= ~SP7021_PWM_MODE1_CNT_EN(pwm->hwpwm); + writel(mode1, priv->base + SP7021_PWM_MODE1); + return 0; + } + + clk_rate = clk_get_rate(priv->clk); + + /* + * The following calculations might overflow if clk is bigger + * than 256 GHz. In practise it's 202.5MHz, so this limitation + * is only theoretic. + */ + if (clk_rate > (u64)SP7021_PWM_FREQ_SCALER * NSEC_PER_SEC) + return -EINVAL; + + /* + * With clk_rate limited above we have dd_freq <= state->period, + * so this cannot overflow. + */ + dd_freq = mul_u64_u64_div_u64(clk_rate, state->period, (u64)SP7021_PWM_FREQ_SCALER + * NSEC_PER_SEC); + + if (dd_freq == 0) + return -EINVAL; + + if (dd_freq > SP7021_PWM_FREQ_MAX) + dd_freq = SP7021_PWM_FREQ_MAX; + + writel(dd_freq, priv->base + SP7021_PWM_FREQ(pwm->hwpwm)); + + /* cal and set pwm duty */ + mode0 = readl(priv->base + SP7021_PWM_MODE0); + mode0 |= SP7021_PWM_MODE0_PWMEN(pwm->hwpwm); + mode1 = readl(priv->base + SP7021_PWM_MODE1); + mode1 |= SP7021_PWM_MODE1_CNT_EN(pwm->hwpwm); + if (state->duty_cycle == state->period) { + /* PWM channel output = high */ + mode0 |= SP7021_PWM_MODE0_BYPASS(pwm->hwpwm); + duty = SP7021_PWM_DUTY_DD_SEL(pwm->hwpwm) | SP7021_PWM_DUTY_MAX; + } else { + mode0 &= ~SP7021_PWM_MODE0_BYPASS(pwm->hwpwm); + /* + * duty_ns <= period_ns 27 bits, clk_rate 28 bits, won't overflow. + */ + duty = mul_u64_u64_div_u64(state->duty_cycle, clk_rate, + (u64)dd_freq * NSEC_PER_SEC); + duty = SP7021_PWM_DUTY_DD_SEL(pwm->hwpwm) | duty; + } + writel(duty, priv->base + SP7021_PWM_DUTY(pwm->hwpwm)); + writel(mode1, priv->base + SP7021_PWM_MODE1); + writel(mode0, priv->base + SP7021_PWM_MODE0); + + return 0; +} + +static void sunplus_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) +{ + struct sunplus_pwm *priv = to_sunplus_pwm(chip); + u32 mode0, dd_freq, duty; + u64 clk_rate; + + mode0 = readl(priv->base + SP7021_PWM_MODE0); + + if (mode0 & BIT(pwm->hwpwm)) { + clk_rate = clk_get_rate(priv->clk); + dd_freq = readl(priv->base + SP7021_PWM_FREQ(pwm->hwpwm)); + duty = readl(priv->base + SP7021_PWM_DUTY(pwm->hwpwm)); + duty = FIELD_GET(SP7021_PWM_DUTY_MASK, duty); + /* + * dd_freq 16 bits, SP7021_PWM_FREQ_SCALER 8 bits + * NSEC_PER_SEC 30 bits, won't overflow. + */ + state->period = DIV64_U64_ROUND_UP((u64)dd_freq * (u64)SP7021_PWM_FREQ_SCALER + * NSEC_PER_SEC, clk_rate); + /* + * dd_freq 16 bits, duty 8 bits, NSEC_PER_SEC 30 bits, won't overflow. + */ + state->duty_cycle = DIV64_U64_ROUND_UP((u64)dd_freq * (u64)duty * NSEC_PER_SEC, + clk_rate); + state->enabled = true; + } else { + state->enabled = false; + } + + state->polarity = PWM_POLARITY_NORMAL; +} + +static const struct pwm_ops sunplus_pwm_ops = { + .apply = sunplus_pwm_apply, + .get_state = sunplus_pwm_get_state, + .owner = THIS_MODULE, +}; + +static void sunplus_pwm_clk_release(void *data) +{ + struct clk *clk = data; + + clk_disable_unprepare(clk); +} + +static int sunplus_pwm_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sunplus_pwm *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "get pwm clock failed\n"); + + ret = clk_prepare_enable(priv->clk); + if (ret < 0) { + dev_err(dev, "failed to enable clock: %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(dev, sunplus_pwm_clk_release, priv->clk); + if (ret < 0) { + dev_err(dev, "failed to release clock: %d\n", ret); + return ret; + } + + priv->chip.dev = dev; + priv->chip.ops = &sunplus_pwm_ops; + priv->chip.npwm = SP7021_PWM_NUM; + + ret = devm_pwmchip_add(dev, &priv->chip); + if (ret < 0) + return dev_err_probe(dev, ret, "Cannot register sunplus PWM\n"); + + return 0; +} + +static const struct of_device_id sunplus_pwm_of_match[] = { + { .compatible = "sunplus,sp7021-pwm", }, + {} +}; +MODULE_DEVICE_TABLE(of, sunplus_pwm_of_match); + +static struct platform_driver sunplus_pwm_driver = { + .probe = sunplus_pwm_probe, + .driver = { + .name = "sunplus-pwm", + .of_match_table = sunplus_pwm_of_match, + }, +}; +module_platform_driver(sunplus_pwm_driver); + +MODULE_DESCRIPTION("Sunplus SoC PWM Driver"); +MODULE_AUTHOR("Hammer Hsieh <hammerh0314@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index e5a9ffef4a71..dad9978c9186 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -99,7 +99,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); - unsigned long long c = duty_ns, hz; + unsigned long long c = duty_ns; unsigned long rate, required_clk_rate; u32 val = 0; int err; @@ -156,11 +156,9 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, pc->clk_rate = clk_get_rate(pc->clk); } - rate = pc->clk_rate >> PWM_DUTY_WIDTH; - /* Consider precision in PWM_SCALE_WIDTH rate calculation */ - hz = DIV_ROUND_CLOSEST_ULL(100ULL * NSEC_PER_SEC, period_ns); - rate = DIV_ROUND_CLOSEST_ULL(100ULL * rate, hz); + rate = mul_u64_u64_div_u64(pc->clk_rate, period_ns, + (u64)NSEC_PER_SEC << PWM_DUTY_WIDTH); /* * Since the actual PWM divider is the register's frequency divider @@ -169,6 +167,8 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, */ if (rate > 0) rate--; + else + return -EINVAL; /* * Make sure that the rate will fit in the register's frequency @@ -230,10 +230,34 @@ static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) pm_runtime_put_sync(pc->dev); } +static int tegra_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + bool enabled = pwm->state.enabled; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (enabled) + tegra_pwm_disable(chip, pwm); + + return 0; + } + + err = tegra_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period); + if (err) + return err; + + if (!enabled) + err = tegra_pwm_enable(chip, pwm); + + return err; +} + static const struct pwm_ops tegra_pwm_ops = { - .config = tegra_pwm_config, - .enable = tegra_pwm_enable, - .disable = tegra_pwm_disable, + .apply = tegra_pwm_apply, .owner = THIS_MODULE, }; diff --git a/drivers/pwm/pwm-twl-led.c b/drivers/pwm/pwm-twl-led.c index 49d9f7a78012..ed0b63dd38f1 100644 --- a/drivers/pwm/pwm-twl-led.c +++ b/drivers/pwm/pwm-twl-led.c @@ -137,6 +137,45 @@ out: mutex_unlock(&twl->mutex); } +static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int ret; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + twl4030_pwmled_disable(chip, pwm); + + return 0; + } + + /* + * We cannot skip calling ->config even if state->period == + * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle + * because we might have exited early in the last call to + * pwm_apply_state because of !state->enabled and so the two values in + * pwm->state might not be configured in hardware. + */ + ret = twl4030_pwmled_config(pwm->chip, pwm, + state->duty_cycle, state->period); + if (ret) + return ret; + + if (!pwm->state.enabled) + ret = twl4030_pwmled_enable(chip, pwm); + + return ret; +} + + +static const struct pwm_ops twl4030_pwmled_ops = { + .apply = twl4030_pwmled_apply, + .owner = THIS_MODULE, +}; + static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm, int duty_ns, int period_ns) { @@ -206,6 +245,32 @@ out: mutex_unlock(&twl->mutex); } +static int twl6030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_state *state) +{ + int err; + + if (state->polarity != pwm->state.polarity) + return -EINVAL; + + if (!state->enabled) { + if (pwm->state.enabled) + twl6030_pwmled_disable(chip, pwm); + + return 0; + } + + err = twl6030_pwmled_config(pwm->chip, pwm, + state->duty_cycle, state->period); + if (err) + return err; + + if (!pwm->state.enabled) + err = twl6030_pwmled_enable(chip, pwm); + + return err; +} + static int twl6030_pwmled_request(struct pwm_chip *chip, struct pwm_device *pwm) { struct twl_pwmled_chip *twl = to_twl(chip); @@ -257,17 +322,8 @@ out: mutex_unlock(&twl->mutex); } -static const struct pwm_ops twl4030_pwmled_ops = { - .enable = twl4030_pwmled_enable, - .disable = twl4030_pwmled_disable, - .config = twl4030_pwmled_config, - .owner = THIS_MODULE, -}; - static const struct pwm_ops twl6030_pwmled_ops = { - .enable = twl6030_pwmled_enable, - .disable = twl6030_pwmled_disable, - .config = twl6030_pwmled_config, + .apply = twl6030_pwmled_apply, .request = twl6030_pwmled_request, .free = twl6030_pwmled_free, .owner = THIS_MODULE, diff --git a/drivers/pwm/pwm-xilinx.c b/drivers/pwm/pwm-xilinx.c new file mode 100644 index 000000000000..4dab2b86c427 --- /dev/null +++ b/drivers/pwm/pwm-xilinx.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Sean Anderson <sean.anderson@seco.com> + * + * Limitations: + * - When changing both duty cycle and period, we may end up with one cycle + * with the old duty cycle and the new period. This is because the counters + * may only be reloaded by first stopping them, or by letting them be + * automatically reloaded at the end of a cycle. If this automatic reload + * happens after we set TLR0 but before we set TLR1 then we will have a + * bad cycle. This could probably be fixed by reading TCR0 just before + * reprogramming, but I think it would add complexity for little gain. + * - Cannot produce 100% duty cycle by configuring the TLRs. This might be + * possible by stopping the counters at an appropriate point in the cycle, + * but this is not (yet) implemented. + * - Only produces "normal" output. + * - Always produces low output if disabled. + */ + +#include <clocksource/timer-xilinx.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/regmap.h> + +/* + * The following functions are "common" to drivers for this device, and may be + * exported at a future date. + */ +u32 xilinx_timer_tlr_cycles(struct xilinx_timer_priv *priv, u32 tcsr, + u64 cycles) +{ + WARN_ON(cycles < 2 || cycles - 2 > priv->max); + + if (tcsr & TCSR_UDT) + return cycles - 2; + return priv->max - cycles + 2; +} + +unsigned int xilinx_timer_get_period(struct xilinx_timer_priv *priv, + u32 tlr, u32 tcsr) +{ + u64 cycles; + + if (tcsr & TCSR_UDT) + cycles = tlr + 2; + else + cycles = (u64)priv->max - tlr + 2; + + /* cycles has a max of 2^32 + 2, so we can't overflow */ + return DIV64_U64_ROUND_UP(cycles * NSEC_PER_SEC, + clk_get_rate(priv->clk)); +} + +/* + * The idea here is to capture whether the PWM is actually running (e.g. + * because we or the bootloader set it up) and we need to be careful to ensure + * we don't cause a glitch. According to the data sheet, to enable the PWM we + * need to + * + * - Set both timers to generate mode (MDT=1) + * - Set both timers to PWM mode (PWMA=1) + * - Enable the generate out signals (GENT=1) + * + * In addition, + * + * - The timer must be running (ENT=1) + * - The timer must auto-reload TLR into TCR (ARHT=1) + * - We must not be in the process of loading TLR into TCR (LOAD=0) + * - Cascade mode must be disabled (CASC=0) + * + * If any of these differ from usual, then the PWM is either disabled, or is + * running in a mode that this driver does not support. + */ +#define TCSR_PWM_SET (TCSR_GENT | TCSR_ARHT | TCSR_ENT | TCSR_PWMA) +#define TCSR_PWM_CLEAR (TCSR_MDT | TCSR_LOAD) +#define TCSR_PWM_MASK (TCSR_PWM_SET | TCSR_PWM_CLEAR) + +struct xilinx_pwm_device { + struct pwm_chip chip; + struct xilinx_timer_priv priv; +}; + +static inline struct xilinx_timer_priv +*xilinx_pwm_chip_to_priv(struct pwm_chip *chip) +{ + return &container_of(chip, struct xilinx_pwm_device, chip)->priv; +} + +static bool xilinx_timer_pwm_enabled(u32 tcsr0, u32 tcsr1) +{ + return ((TCSR_PWM_MASK | TCSR_CASC) & tcsr0) == TCSR_PWM_SET && + (TCSR_PWM_MASK & tcsr1) == TCSR_PWM_SET; +} + +static int xilinx_pwm_apply(struct pwm_chip *chip, struct pwm_device *unused, + const struct pwm_state *state) +{ + struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip); + u32 tlr0, tlr1, tcsr0, tcsr1; + u64 period_cycles, duty_cycles; + unsigned long rate; + + if (state->polarity != PWM_POLARITY_NORMAL) + return -EINVAL; + + /* + * To be representable by TLR, cycles must be between 2 and + * priv->max + 2. To enforce this we can reduce the cycles, but we may + * not increase them. Caveat emptor: while this does result in more + * predictable rounding, it may also result in a completely different + * duty cycle (% high time) than what was requested. + */ + rate = clk_get_rate(priv->clk); + /* Avoid overflow */ + period_cycles = min_t(u64, state->period, U32_MAX * NSEC_PER_SEC); + period_cycles = mul_u64_u32_div(period_cycles, rate, NSEC_PER_SEC); + period_cycles = min_t(u64, period_cycles, priv->max + 2); + if (period_cycles < 2) + return -ERANGE; + + /* Same thing for duty cycles */ + duty_cycles = min_t(u64, state->duty_cycle, U32_MAX * NSEC_PER_SEC); + duty_cycles = mul_u64_u32_div(duty_cycles, rate, NSEC_PER_SEC); + duty_cycles = min_t(u64, duty_cycles, priv->max + 2); + + /* + * If we specify 100% duty cycle, we will get 0% instead, so decrease + * the duty cycle count by one. + */ + if (duty_cycles >= period_cycles) + duty_cycles = period_cycles - 1; + + /* Round down to 0% duty cycle for unrepresentable duty cycles */ + if (duty_cycles < 2) + duty_cycles = period_cycles; + + regmap_read(priv->map, TCSR0, &tcsr0); + regmap_read(priv->map, TCSR1, &tcsr1); + tlr0 = xilinx_timer_tlr_cycles(priv, tcsr0, period_cycles); + tlr1 = xilinx_timer_tlr_cycles(priv, tcsr1, duty_cycles); + regmap_write(priv->map, TLR0, tlr0); + regmap_write(priv->map, TLR1, tlr1); + + if (state->enabled) { + /* + * If the PWM is already running, then the counters will be + * reloaded at the end of the current cycle. + */ + if (!xilinx_timer_pwm_enabled(tcsr0, tcsr1)) { + /* Load TLR into TCR */ + regmap_write(priv->map, TCSR0, tcsr0 | TCSR_LOAD); + regmap_write(priv->map, TCSR1, tcsr1 | TCSR_LOAD); + /* Enable timers all at once with ENALL */ + tcsr0 = (TCSR_PWM_SET & ~TCSR_ENT) | (tcsr0 & TCSR_UDT); + tcsr1 = TCSR_PWM_SET | TCSR_ENALL | (tcsr1 & TCSR_UDT); + regmap_write(priv->map, TCSR0, tcsr0); + regmap_write(priv->map, TCSR1, tcsr1); + } + } else { + regmap_write(priv->map, TCSR0, 0); + regmap_write(priv->map, TCSR1, 0); + } + + return 0; +} + +static void xilinx_pwm_get_state(struct pwm_chip *chip, + struct pwm_device *unused, + struct pwm_state *state) +{ + struct xilinx_timer_priv *priv = xilinx_pwm_chip_to_priv(chip); + u32 tlr0, tlr1, tcsr0, tcsr1; + + regmap_read(priv->map, TLR0, &tlr0); + regmap_read(priv->map, TLR1, &tlr1); + regmap_read(priv->map, TCSR0, &tcsr0); + regmap_read(priv->map, TCSR1, &tcsr1); + state->period = xilinx_timer_get_period(priv, tlr0, tcsr0); + state->duty_cycle = xilinx_timer_get_period(priv, tlr1, tcsr1); + state->enabled = xilinx_timer_pwm_enabled(tcsr0, tcsr1); + state->polarity = PWM_POLARITY_NORMAL; + + /* + * 100% duty cycle results in constant low output. This may be (very) + * wrong if rate > 1 GHz, so fix this if you have such hardware :) + */ + if (state->period == state->duty_cycle) + state->duty_cycle = 0; +} + +static const struct pwm_ops xilinx_pwm_ops = { + .apply = xilinx_pwm_apply, + .get_state = xilinx_pwm_get_state, + .owner = THIS_MODULE, +}; + +static const struct regmap_config xilinx_pwm_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .max_register = TCR1, +}; + +static int xilinx_pwm_probe(struct platform_device *pdev) +{ + int ret; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct xilinx_timer_priv *priv; + struct xilinx_pwm_device *xilinx_pwm; + u32 pwm_cells, one_timer, width; + void __iomem *regs; + + /* If there are no PWM cells, this binding is for a timer */ + ret = of_property_read_u32(np, "#pwm-cells", &pwm_cells); + if (ret == -EINVAL) + return -ENODEV; + if (ret) + return dev_err_probe(dev, ret, "could not read #pwm-cells\n"); + + xilinx_pwm = devm_kzalloc(dev, sizeof(*xilinx_pwm), GFP_KERNEL); + if (!xilinx_pwm) + return -ENOMEM; + platform_set_drvdata(pdev, xilinx_pwm); + priv = &xilinx_pwm->priv; + + regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + priv->map = devm_regmap_init_mmio(dev, regs, + &xilinx_pwm_regmap_config); + if (IS_ERR(priv->map)) + return dev_err_probe(dev, PTR_ERR(priv->map), + "Could not create regmap\n"); + + ret = of_property_read_u32(np, "xlnx,one-timer-only", &one_timer); + if (ret) + return dev_err_probe(dev, ret, + "Could not read xlnx,one-timer-only\n"); + + if (one_timer) + return dev_err_probe(dev, -EINVAL, + "Two timers required for PWM mode\n"); + + ret = of_property_read_u32(np, "xlnx,count-width", &width); + if (ret == -EINVAL) + width = 32; + else if (ret) + return dev_err_probe(dev, ret, + "Could not read xlnx,count-width\n"); + + if (width != 8 && width != 16 && width != 32) + return dev_err_probe(dev, -EINVAL, + "Invalid counter width %d\n", width); + priv->max = BIT_ULL(width) - 1; + + /* + * The polarity of the Generate Out signals must be active high for PWM + * mode to work. We could determine this from the device tree, but + * alas, such properties are not allowed to be used. + */ + + priv->clk = devm_clk_get(dev, "s_axi_aclk"); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "Could not get clock\n"); + + ret = clk_prepare_enable(priv->clk); + if (ret) + return dev_err_probe(dev, ret, "Clock enable failed\n"); + clk_rate_exclusive_get(priv->clk); + + xilinx_pwm->chip.dev = dev; + xilinx_pwm->chip.ops = &xilinx_pwm_ops; + xilinx_pwm->chip.npwm = 1; + ret = pwmchip_add(&xilinx_pwm->chip); + if (ret) { + clk_rate_exclusive_put(priv->clk); + clk_disable_unprepare(priv->clk); + return dev_err_probe(dev, ret, "Could not register PWM chip\n"); + } + + return 0; +} + +static int xilinx_pwm_remove(struct platform_device *pdev) +{ + struct xilinx_pwm_device *xilinx_pwm = platform_get_drvdata(pdev); + + pwmchip_remove(&xilinx_pwm->chip); + clk_rate_exclusive_put(xilinx_pwm->priv.clk); + clk_disable_unprepare(xilinx_pwm->priv.clk); + return 0; +} + +static const struct of_device_id xilinx_pwm_of_match[] = { + { .compatible = "xlnx,xps-timer-1.00.a", }, + {}, +}; +MODULE_DEVICE_TABLE(of, xilinx_pwm_of_match); + +static struct platform_driver xilinx_pwm_driver = { + .probe = xilinx_pwm_probe, + .remove = xilinx_pwm_remove, + .driver = { + .name = "xilinx-pwm", + .of_match_table = of_match_ptr(xilinx_pwm_of_match), + }, +}; +module_platform_driver(xilinx_pwm_driver); + +MODULE_ALIAS("platform:xilinx-pwm"); +MODULE_DESCRIPTION("PWM driver for Xilinx LogiCORE IP AXI Timer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c index aa55cfca9e40..6b617024a67d 100644 --- a/drivers/regulator/pfuze100-regulator.c +++ b/drivers/regulator/pfuze100-regulator.c @@ -10,6 +10,7 @@ #include <linux/of_device.h> #include <linux/regulator/of_regulator.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/pfuze100.h> @@ -571,10 +572,10 @@ static inline struct device_node *match_of_node(int index) return pfuze_matches[index].of_node; } -static struct pfuze_chip *syspm_pfuze_chip; - -static void pfuze_power_off_prepare(void) +static int pfuze_power_off_prepare(struct sys_off_data *data) { + struct pfuze_chip *syspm_pfuze_chip = data->cb_data; + dev_info(syspm_pfuze_chip->dev, "Configure standby mode for power off"); /* Switch from default mode: APS/APS to APS/Off */ @@ -609,28 +610,30 @@ static void pfuze_power_off_prepare(void) regmap_update_bits(syspm_pfuze_chip->regmap, PFUZE100_VGEN6VOL, PFUZE100_VGENxLPWR | PFUZE100_VGENxSTBY, PFUZE100_VGENxSTBY); + + return NOTIFY_DONE; } static int pfuze_power_off_prepare_init(struct pfuze_chip *pfuze_chip) { + int err; + if (pfuze_chip->chip_id != PFUZE100) { dev_warn(pfuze_chip->dev, "Requested pm_power_off_prepare handler for not supported chip\n"); return -ENODEV; } - if (pm_power_off_prepare) { - dev_warn(pfuze_chip->dev, "pm_power_off_prepare is already registered.\n"); - return -EBUSY; + err = devm_register_sys_off_handler(pfuze_chip->dev, + SYS_OFF_MODE_POWER_OFF_PREPARE, + SYS_OFF_PRIO_DEFAULT, + pfuze_power_off_prepare, + pfuze_chip); + if (err) { + dev_err(pfuze_chip->dev, "failed to register sys-off handler: %d\n", + err); + return err; } - if (syspm_pfuze_chip) { - dev_warn(pfuze_chip->dev, "syspm_pfuze_chip is already set.\n"); - return -EBUSY; - } - - syspm_pfuze_chip = pfuze_chip; - pm_power_off_prepare = pfuze_power_off_prepare; - return 0; } @@ -839,23 +842,12 @@ static int pfuze100_regulator_probe(struct i2c_client *client, return 0; } -static int pfuze100_regulator_remove(struct i2c_client *client) -{ - if (syspm_pfuze_chip) { - syspm_pfuze_chip = NULL; - pm_power_off_prepare = NULL; - } - - return 0; -} - static struct i2c_driver pfuze_driver = { .driver = { .name = "pfuze100-regulator", .of_match_table = pfuze_dt_ids, }, .probe = pfuze100_regulator_probe, - .remove = pfuze100_regulator_remove, }; module_i2c_driver(pfuze_driver); diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c index 2abee78df96e..ca0817f8e41e 100644 --- a/drivers/remoteproc/imx_dsp_rproc.c +++ b/drivers/remoteproc/imx_dsp_rproc.c @@ -649,99 +649,6 @@ static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv) return 0; } -/** - * imx_dsp_rproc_elf_load_segments() - load firmware segments to memory - * @rproc: remote processor which will be booted using these fw segments - * @fw: the ELF firmware image - * - * This function specially checks if memsz is zero or not, otherwise it - * is mostly same as rproc_elf_load_segments(). - */ -static int imx_dsp_rproc_elf_load_segments(struct rproc *rproc, - const struct firmware *fw) -{ - struct device *dev = &rproc->dev; - u8 class = fw_elf_get_class(fw); - u32 elf_phdr_get_size = elf_size_of_phdr(class); - const u8 *elf_data = fw->data; - const void *ehdr, *phdr; - int i, ret = 0; - u16 phnum; - - ehdr = elf_data; - phnum = elf_hdr_get_e_phnum(class, ehdr); - phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr); - - /* go through the available ELF segments */ - for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) { - u64 da = elf_phdr_get_p_paddr(class, phdr); - u64 memsz = elf_phdr_get_p_memsz(class, phdr); - u64 filesz = elf_phdr_get_p_filesz(class, phdr); - u64 offset = elf_phdr_get_p_offset(class, phdr); - u32 type = elf_phdr_get_p_type(class, phdr); - void *ptr; - - /* - * There is a case that with PT_LOAD type, the - * filesz = memsz = 0. If memsz = 0, rproc_da_to_va - * should return NULL ptr, then error is returned. - * So this case should be skipped from the loop. - * Add !memsz checking here. - */ - if (type != PT_LOAD || !memsz) - continue; - - dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n", - type, da, memsz, filesz); - - if (filesz > memsz) { - dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n", - filesz, memsz); - ret = -EINVAL; - break; - } - - if (offset + filesz > fw->size) { - dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n", - offset + filesz, fw->size); - ret = -EINVAL; - break; - } - - if (!rproc_u64_fit_in_size_t(memsz)) { - dev_err(dev, "size (%llx) does not fit in size_t type\n", - memsz); - ret = -EOVERFLOW; - break; - } - - /* grab the kernel address for this device address */ - ptr = rproc_da_to_va(rproc, da, memsz, NULL); - if (!ptr) { - dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da, - memsz); - ret = -EINVAL; - break; - } - - /* put the segment where the remote processor expects it */ - if (filesz) - memcpy(ptr, elf_data + offset, filesz); - - /* - * Zero out remaining memory for this segment. - * - * This isn't strictly required since dma_alloc_coherent already - * did this for us. albeit harmless, we may consider removing - * this. - */ - if (memsz > filesz) - memset(ptr + filesz, 0, memsz - filesz); - } - - return ret; -} - /* Prepare function for rproc_ops */ static int imx_dsp_rproc_prepare(struct rproc *rproc) { @@ -802,14 +709,22 @@ static void imx_dsp_rproc_kick(struct rproc *rproc, int vqid) dev_err(dev, "%s: failed (%d, err:%d)\n", __func__, vqid, err); } +static int imx_dsp_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) +{ + if (rproc_elf_load_rsc_table(rproc, fw)) + dev_warn(&rproc->dev, "no resource table found for this firmware\n"); + + return 0; +} + static const struct rproc_ops imx_dsp_rproc_ops = { .prepare = imx_dsp_rproc_prepare, .unprepare = imx_dsp_rproc_unprepare, .start = imx_dsp_rproc_start, .stop = imx_dsp_rproc_stop, .kick = imx_dsp_rproc_kick, - .load = imx_dsp_rproc_elf_load_segments, - .parse_fw = rproc_elf_load_rsc_table, + .load = rproc_elf_load_segments, + .parse_fw = imx_dsp_rproc_parse_fw, .sanity_check = rproc_elf_sanity_check, .get_boot_addr = rproc_elf_get_boot_addr, }; diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 7a096f1891e6..4a3352821b1d 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c @@ -91,6 +91,32 @@ struct imx_rproc { void __iomem *rsc_table; }; +static const struct imx_rproc_att imx_rproc_att_imx93[] = { + /* dev addr , sys addr , size , flags */ + /* TCM CODE NON-SECURE */ + { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM }, + { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM }, + + /* TCM CODE SECURE */ + { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM }, + { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM }, + + /* TCM SYS NON-SECURE*/ + { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM }, + { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM }, + + /* TCM SYS SECURE*/ + { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM }, + { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM }, + + /* DDR */ + { 0x80000000, 0x80000000, 0x10000000, 0 }, + { 0x90000000, 0x80000000, 0x10000000, 0 }, + + { 0xC0000000, 0xa0000000, 0x10000000, 0 }, + { 0xD0000000, 0xa0000000, 0x10000000, 0 }, +}; + static const struct imx_rproc_att imx_rproc_att_imx8mn[] = { /* dev addr , sys addr , size , flags */ /* ITCM */ @@ -261,6 +287,12 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = { .method = IMX_RPROC_MMIO, }; +static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = { + .att = imx_rproc_att_imx93, + .att_size = ARRAY_SIZE(imx_rproc_att_imx93), + .method = IMX_RPROC_SMC, +}; + static int imx_rproc_start(struct rproc *rproc) { struct imx_rproc *priv = rproc->priv; @@ -423,6 +455,9 @@ static int imx_rproc_prepare(struct rproc *rproc) if (!strcmp(it.node->name, "vdev0buffer")) continue; + if (!strcmp(it.node->name, "rsc-table")) + continue; + rmem = of_reserved_mem_lookup(it.node); if (!rmem) { dev_err(priv->dev, "unable to acquire memory-region\n"); @@ -821,6 +856,7 @@ static const struct of_device_id imx_rproc_of_match[] = { { .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn }, { .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn }, { .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp }, + { .compatible = "fsl,imx93-cm33", .data = &imx_rproc_cfg_imx93 }, {}, }; MODULE_DEVICE_TABLE(of, imx_rproc_of_match); diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h index 71ce4977cb0b..ea6fa1100a00 100644 --- a/drivers/remoteproc/mtk_common.h +++ b/drivers/remoteproc/mtk_common.h @@ -54,6 +54,8 @@ #define MT8192_CORE0_WDT_IRQ 0x10030 #define MT8192_CORE0_WDT_CFG 0x10034 +#define MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS GENMASK(7, 4) + #define SCP_FW_VER_LEN 32 #define SCP_SHARE_BUFFER_SIZE 288 diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c index 38609153bf64..47b2a40e1b4a 100644 --- a/drivers/remoteproc/mtk_scp.c +++ b/drivers/remoteproc/mtk_scp.c @@ -365,22 +365,22 @@ static int mt8183_scp_before_load(struct mtk_scp *scp) return 0; } -static void mt8192_power_on_sram(void __iomem *addr) +static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask) { int i; for (i = 31; i >= 0; i--) - writel(GENMASK(i, 0), addr); + writel(GENMASK(i, 0) & ~reserved_mask, addr); writel(0, addr); } -static void mt8192_power_off_sram(void __iomem *addr) +static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask) { int i; writel(0, addr); for (i = 0; i < 32; i++) - writel(GENMASK(i, 0), addr); + writel(GENMASK(i, 0) & ~reserved_mask, addr); } static int mt8186_scp_before_load(struct mtk_scp *scp) @@ -393,7 +393,7 @@ static int mt8186_scp_before_load(struct mtk_scp *scp) writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL); /* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/ - mt8192_power_on_sram(scp->reg_base + MT8183_SCP_SRAM_PDN); + scp_sram_power_on(scp->reg_base + MT8183_SCP_SRAM_PDN, 0); /* Initialize TCM before loading FW. */ writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD); @@ -412,11 +412,32 @@ static int mt8192_scp_before_load(struct mtk_scp *scp) writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET); /* enable SRAM clock */ - mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0); - mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1); - mt8192_power_on_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2); - mt8192_power_on_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN); - mt8192_power_on_sram(scp->reg_base + MT8192_CPU0_SRAM_PD); + scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0); + scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0); + scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0); + scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0); + scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0); + + /* enable MPU for all memory regions */ + writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF); + + return 0; +} + +static int mt8195_scp_before_load(struct mtk_scp *scp) +{ + /* clear SPM interrupt, SCP2SPM_IPC_CLR */ + writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR); + + writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET); + + /* enable SRAM clock */ + scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0); + scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0); + scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0); + scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, + MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS); + scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0); /* enable MPU for all memory regions */ writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF); @@ -572,11 +593,25 @@ static void mt8183_scp_stop(struct mtk_scp *scp) static void mt8192_scp_stop(struct mtk_scp *scp) { /* Disable SRAM clock */ - mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_0); - mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_1); - mt8192_power_off_sram(scp->reg_base + MT8192_L2TCM_SRAM_PD_2); - mt8192_power_off_sram(scp->reg_base + MT8192_L1TCM_SRAM_PDN); - mt8192_power_off_sram(scp->reg_base + MT8192_CPU0_SRAM_PD); + scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0); + scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0); + scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0); + scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0); + scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0); + + /* Disable SCP watchdog */ + writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG); +} + +static void mt8195_scp_stop(struct mtk_scp *scp) +{ + /* Disable SRAM clock */ + scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0); + scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0); + scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0); + scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, + MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS); + scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0); /* Disable SCP watchdog */ writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG); @@ -774,9 +809,13 @@ static int scp_probe(struct platform_device *pdev) struct mtk_scp *scp; struct rproc *rproc; struct resource *res; - char *fw_name = "scp.img"; + const char *fw_name = "scp.img"; int ret, i; + ret = rproc_of_parse_firmware(dev, 0, &fw_name); + if (ret < 0 && ret != -EINVAL) + return ret; + rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp)); if (!rproc) return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n"); @@ -877,7 +916,6 @@ static int scp_remove(struct platform_device *pdev) for (i = 0; i < SCP_IPI_MAX; i++) mutex_destroy(&scp->ipi_desc[i].lock); mutex_destroy(&scp->send_lock); - rproc_free(scp->rproc); return 0; } @@ -922,11 +960,11 @@ static const struct mtk_scp_of_data mt8192_of_data = { static const struct mtk_scp_of_data mt8195_of_data = { .scp_clk_get = mt8195_scp_clk_get, - .scp_before_load = mt8192_scp_before_load, + .scp_before_load = mt8195_scp_before_load, .scp_irq_handler = mt8192_scp_irq_handler, .scp_reset_assert = mt8192_scp_reset_assert, .scp_reset_deassert = mt8192_scp_reset_deassert, - .scp_stop = mt8192_scp_stop, + .scp_stop = mt8195_scp_stop, .scp_da_to_va = mt8192_scp_da_to_va, .host_to_scp_reg = MT8192_GIPC_IN_SET, .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT, diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 1ae47cc153e5..6ae39c5653b1 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -704,6 +704,36 @@ static const struct adsp_data sm8250_cdsp_resource = { .ssctl_id = 0x17, }; +static const struct adsp_data sc8280xp_nsp0_resource = { + .crash_reason_smem = 601, + .firmware_name = "cdsp.mdt", + .pas_id = 18, + .has_aggre2_clk = false, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "nsp", + NULL + }, + .ssr_name = "cdsp0", + .sysmon_name = "cdsp", + .ssctl_id = 0x17, +}; + +static const struct adsp_data sc8280xp_nsp1_resource = { + .crash_reason_smem = 633, + .firmware_name = "cdsp.mdt", + .pas_id = 30, + .has_aggre2_clk = false, + .auto_boot = true, + .proxy_pd_names = (char*[]){ + "nsp", + NULL + }, + .ssr_name = "cdsp1", + .sysmon_name = "cdsp1", + .ssctl_id = 0x20, +}; + static const struct adsp_data sm8350_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", @@ -848,6 +878,7 @@ static const struct adsp_data sdx55_mpss_resource = { }; static const struct of_device_id adsp_of_match[] = { + { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init}, { .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init}, { .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource}, { .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init}, @@ -861,6 +892,9 @@ static const struct of_device_id adsp_of_match[] = { { .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource}, { .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource}, { .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource}, + { .compatible = "qcom,sc8280xp-adsp-pas", .data = &sm8250_adsp_resource}, + { .compatible = "qcom,sc8280xp-nsp0-pas", .data = &sc8280xp_nsp0_resource}, + { .compatible = "qcom,sc8280xp-nsp1-pas", .data = &sc8280xp_nsp1_resource}, { .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init}, { .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init}, { .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init}, diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c index 906ff3c4dfdd..687f205fd70a 100644 --- a/drivers/remoteproc/remoteproc_cdev.c +++ b/drivers/remoteproc/remoteproc_cdev.c @@ -32,21 +32,10 @@ static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_ return -EFAULT; if (!strncmp(cmd, "start", len)) { - if (rproc->state == RPROC_RUNNING || - rproc->state == RPROC_ATTACHED) - return -EBUSY; - ret = rproc_boot(rproc); } else if (!strncmp(cmd, "stop", len)) { - if (rproc->state != RPROC_RUNNING && - rproc->state != RPROC_ATTACHED) - return -EINVAL; - ret = rproc_shutdown(rproc); } else if (!strncmp(cmd, "detach", len)) { - if (rproc->state != RPROC_ATTACHED) - return -EINVAL; - ret = rproc_detach(rproc); } else { dev_err(&rproc->dev, "Unrecognized option\n"); diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index c510125769b9..02a04ab34a23 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -684,10 +684,6 @@ static int rproc_handle_trace(struct rproc *rproc, void *ptr, /* create the debugfs entry */ trace->tfile = rproc_create_trace_file(name, rproc, trace); - if (!trace->tfile) { - kfree(trace); - return -EINVAL; - } list_add_tail(&trace->node, &rproc->traces); @@ -2075,6 +2071,12 @@ int rproc_shutdown(struct rproc *rproc) return ret; } + if (rproc->state != RPROC_RUNNING && + rproc->state != RPROC_ATTACHED) { + ret = -EINVAL; + goto out; + } + /* if the remote proc is still needed, bail out */ if (!atomic_dec_and_test(&rproc->power)) goto out; @@ -2134,6 +2136,11 @@ int rproc_detach(struct rproc *rproc) return ret; } + if (rproc->state != RPROC_ATTACHED) { + ret = -EINVAL; + goto out; + } + /* if the remote proc is still needed, bail out */ if (!atomic_dec_and_test(&rproc->power)) { ret = 0; diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 581930483ef8..b86c1d09c70c 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -386,16 +386,8 @@ void rproc_remove_trace_file(struct dentry *tfile) struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc, struct rproc_debug_trace *trace) { - struct dentry *tfile; - - tfile = debugfs_create_file(name, 0400, rproc->dbg_dir, trace, + return debugfs_create_file(name, 0400, rproc->dbg_dir, trace, &trace_rproc_ops); - if (!tfile) { - dev_err(&rproc->dev, "failed to create debugfs trace entry\n"); - return NULL; - } - - return tfile; } void rproc_delete_debug_dir(struct rproc *rproc) @@ -411,8 +403,6 @@ void rproc_create_debug_dir(struct rproc *rproc) return; rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg); - if (!rproc->dbg_dir) - return; debugfs_create_file("name", 0400, rproc->dbg_dir, rproc, &rproc_name_ops); @@ -430,11 +420,8 @@ void rproc_create_debug_dir(struct rproc *rproc) void __init rproc_init_debugfs(void) { - if (debugfs_initialized()) { + if (debugfs_initialized()) rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (!rproc_dbg) - pr_err("can't create debugfs dir\n"); - } } void __exit rproc_exit_debugfs(void) diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c index d635d19a5aa8..5a412d7b6e0b 100644 --- a/drivers/remoteproc/remoteproc_elf_loader.c +++ b/drivers/remoteproc/remoteproc_elf_loader.c @@ -181,7 +181,7 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw) bool is_iomem = false; void *ptr; - if (type != PT_LOAD) + if (type != PT_LOAD || !memsz) continue; dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n", diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 51a04bc6ba7a..8c7ea8922638 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -194,23 +194,12 @@ static ssize_t state_store(struct device *dev, int ret = 0; if (sysfs_streq(buf, "start")) { - if (rproc->state == RPROC_RUNNING || - rproc->state == RPROC_ATTACHED) - return -EBUSY; - ret = rproc_boot(rproc); if (ret) dev_err(&rproc->dev, "Boot failed: %d\n", ret); } else if (sysfs_streq(buf, "stop")) { - if (rproc->state != RPROC_RUNNING && - rproc->state != RPROC_ATTACHED) - return -EINVAL; - ret = rproc_shutdown(rproc); } else if (sysfs_streq(buf, "detach")) { - if (rproc->state != RPROC_ATTACHED) - return -EINVAL; - ret = rproc_detach(rproc); } else { dev_err(&rproc->dev, "Unrecognised option: %s\n", buf); diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 764c980507be..1957b27c4cf3 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c @@ -1407,9 +1407,9 @@ static int qcom_smd_parse_edge(struct device *dev, edge->name = node->name; irq = irq_of_parse_and_map(node, 0); - if (irq < 0) { + if (!irq) { dev_err(dev, "required smd interrupt missing\n"); - ret = irq; + ret = -EINVAL; goto put_node; } diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 3ede25b1f2e4..905ac7910c98 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -851,7 +851,7 @@ static struct rpmsg_device *rpmsg_virtio_add_ctrl_dev(struct virtio_device *vdev err = rpmsg_ctrldev_register_device(rpdev_ctrl); if (err) { - kfree(vch); + /* vch will be free in virtio_rpmsg_release_device() */ return ERR_PTR(err); } @@ -862,7 +862,7 @@ static void rpmsg_virtio_del_ctrl_dev(struct rpmsg_device *rpdev_ctrl) { if (!rpdev_ctrl) return; - kfree(to_virtio_rpmsg_channel(rpdev_ctrl)); + device_unregister(&rpdev_ctrl->dev); } static int rpmsg_probe(struct virtio_device *vdev) @@ -973,7 +973,8 @@ static int rpmsg_probe(struct virtio_device *vdev) err = rpmsg_ns_register_device(rpdev_ns); if (err) - goto free_vch; + /* vch will be free in virtio_rpmsg_release_device() */ + goto free_ctrldev; } /* @@ -997,8 +998,6 @@ static int rpmsg_probe(struct virtio_device *vdev) return 0; -free_vch: - kfree(vch); free_ctrldev: rpmsg_virtio_del_ctrl_dev(rpdev_ctrl); free_coherent: diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 41c65b4d2baf..a00f901b5c1d 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -1548,6 +1548,13 @@ config RTC_DRV_RS5C313 help If you say yes here you get support for the Ricoh RS5C313 RTC chips. +config RTC_DRV_RZN1 + tristate "Renesas RZ/N1 RTC" + depends on ARCH_RZN1 || COMPILE_TEST + depends on OF && HAS_IOMEM + help + If you say yes here you get support for the Renesas RZ/N1 RTC. + config RTC_DRV_GENERIC tristate "Generic RTC support" # Please consider writing a new RTC driver instead of using the generic diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 2d827d8261d5..fb04467b652d 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -151,6 +151,7 @@ obj-$(CONFIG_RTC_DRV_RX6110) += rtc-rx6110.o obj-$(CONFIG_RTC_DRV_RX8010) += rtc-rx8010.o obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o +obj-$(CONFIG_RTC_DRV_RZN1) += rtc-rzn1.o obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c index 53bb08fe1cd4..25c6e7d9570f 100644 --- a/drivers/rtc/rtc-ftrtc010.c +++ b/drivers/rtc/rtc-ftrtc010.c @@ -137,26 +137,34 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev) ret = clk_prepare_enable(rtc->extclk); if (ret) { dev_err(dev, "failed to enable EXTCLK\n"); - return ret; + goto err_disable_pclk; } } rtc->rtc_irq = platform_get_irq(pdev, 0); - if (rtc->rtc_irq < 0) - return rtc->rtc_irq; + if (rtc->rtc_irq < 0) { + ret = rtc->rtc_irq; + goto err_disable_extclk; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; + if (!res) { + ret = -ENODEV; + goto err_disable_extclk; + } rtc->rtc_base = devm_ioremap(dev, res->start, resource_size(res)); - if (!rtc->rtc_base) - return -ENOMEM; + if (!rtc->rtc_base) { + ret = -ENOMEM; + goto err_disable_extclk; + } rtc->rtc_dev = devm_rtc_allocate_device(dev); - if (IS_ERR(rtc->rtc_dev)) - return PTR_ERR(rtc->rtc_dev); + if (IS_ERR(rtc->rtc_dev)) { + ret = PTR_ERR(rtc->rtc_dev); + goto err_disable_extclk; + } rtc->rtc_dev->ops = &ftrtc010_rtc_ops; @@ -172,9 +180,15 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev) ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt, IRQF_SHARED, pdev->name, dev); if (unlikely(ret)) - return ret; + goto err_disable_extclk; return devm_rtc_register_device(rtc->rtc_dev); + +err_disable_extclk: + clk_disable_unprepare(rtc->extclk); +err_disable_pclk: + clk_disable_unprepare(rtc->pclk); + return ret; } static int ftrtc010_rtc_remove(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c index 18ca3b38b2d0..c2717bb52b2b 100644 --- a/drivers/rtc/rtc-gamecube.c +++ b/drivers/rtc/rtc-gamecube.c @@ -267,6 +267,7 @@ static int gamecube_rtc_read_offset_from_sram(struct priv *d) ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias); if (ret) { pr_err("failed to get the RTC bias\n"); + iounmap(hw_srnprot); return -1; } diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c index 44bdc8b4a90d..db1d626edca5 100644 --- a/drivers/rtc/rtc-meson.c +++ b/drivers/rtc/rtc-meson.c @@ -399,7 +399,7 @@ static struct platform_driver meson_rtc_driver = { module_platform_driver(meson_rtc_driver); MODULE_DESCRIPTION("Amlogic Meson RTC Driver"); -MODULE_AUTHOR("Ben Dooks <ben.doosk@codethink.co.uk>"); +MODULE_AUTHOR("Ben Dooks <ben.dooks@codethink.co.uk>"); MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:meson-rtc"); diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c index 80dc479a6ff0..1d297af80f87 100644 --- a/drivers/rtc/rtc-mt6397.c +++ b/drivers/rtc/rtc-mt6397.c @@ -269,6 +269,8 @@ static int mtk_rtc_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; rtc->addr_base = res->start; rtc->data = of_device_get_match_data(&pdev->dev); diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 0f08f22df869..53d4e253e81f 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -311,7 +311,7 @@ static int mxc_rtc_probe(struct platform_device *pdev) if (!pdata) return -ENOMEM; - pdata->devtype = (enum imx_rtc_type)of_device_get_match_data(&pdev->dev); + pdata->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev); pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pdata->ioaddr)) diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index 9760824ec199..095891999da1 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -650,6 +650,7 @@ static int pcf85063_probe(struct i2c_client *client) } static const struct i2c_device_id pcf85063_ids[] = { + { "pca85073a", PCF85063A }, { "pcf85063", PCF85063 }, { "pcf85063tp", PCF85063TP }, { "pcf85063a", PCF85063A }, @@ -660,6 +661,7 @@ MODULE_DEVICE_TABLE(i2c, pcf85063_ids); #ifdef CONFIG_OF static const struct of_device_id pcf85063_of_match[] = { + { .compatible = "nxp,pca85073a", .data = &pcf85063_cfg[PCF85063A] }, { .compatible = "nxp,pcf85063", .data = &pcf85063_cfg[PCF85063] }, { .compatible = "nxp,pcf85063tp", .data = &pcf85063_cfg[PCF85063TP] }, { .compatible = "nxp,pcf85063a", .data = &pcf85063_cfg[PCF85063A] }, diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c index 5bfdd34a72ff..b32117ccd74b 100644 --- a/drivers/rtc/rtc-rx8025.c +++ b/drivers/rtc/rtc-rx8025.c @@ -436,7 +436,6 @@ static int rx8025_set_offset(struct device *dev, long offset) { struct i2c_client *client = to_i2c_client(dev); u8 digoff; - int err; offset /= RX8025_ADJ_RESOLUTION; if (offset > RX8025_ADJ_DATA_MAX) @@ -449,11 +448,7 @@ static int rx8025_set_offset(struct device *dev, long offset) offset += 128; digoff = offset; - err = rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff); - if (err) - return err; - - return 0; + return rx8025_write_reg(client, RX8025_REG_DIGOFF, digoff); } static const struct rtc_class_ops rx8025_rtc_ops = { diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c new file mode 100644 index 000000000000..ac788799c8e3 --- /dev/null +++ b/drivers/rtc/rtc-rzn1.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Renesas RZ/N1 Real Time Clock interface for Linux + * + * Copyright: + * - 2014 Renesas Electronics Europe Limited + * - 2022 Schneider Electric + * + * Authors: + * - Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com> + * - Miquel Raynal <miquel.raynal@bootlin.com> + */ + +#include <linux/bcd.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/rtc.h> + +#define RZN1_RTC_CTL0 0x00 +#define RZN1_RTC_CTL0_SLSB_SUBU 0 +#define RZN1_RTC_CTL0_SLSB_SCMP BIT(4) +#define RZN1_RTC_CTL0_AMPM BIT(5) +#define RZN1_RTC_CTL0_CE BIT(7) + +#define RZN1_RTC_CTL1 0x04 +#define RZN1_RTC_CTL1_ALME BIT(4) + +#define RZN1_RTC_CTL2 0x08 +#define RZN1_RTC_CTL2_WAIT BIT(0) +#define RZN1_RTC_CTL2_WST BIT(1) +#define RZN1_RTC_CTL2_WUST BIT(5) +#define RZN1_RTC_CTL2_STOPPED (RZN1_RTC_CTL2_WAIT | RZN1_RTC_CTL2_WST) + +#define RZN1_RTC_SEC 0x14 +#define RZN1_RTC_MIN 0x18 +#define RZN1_RTC_HOUR 0x1c +#define RZN1_RTC_WEEK 0x20 +#define RZN1_RTC_DAY 0x24 +#define RZN1_RTC_MONTH 0x28 +#define RZN1_RTC_YEAR 0x2c + +#define RZN1_RTC_SUBU 0x38 +#define RZN1_RTC_SUBU_DEV BIT(7) +#define RZN1_RTC_SUBU_DECR BIT(6) + +#define RZN1_RTC_ALM 0x40 +#define RZN1_RTC_ALH 0x44 +#define RZN1_RTC_ALW 0x48 + +#define RZN1_RTC_SECC 0x4c +#define RZN1_RTC_MINC 0x50 +#define RZN1_RTC_HOURC 0x54 +#define RZN1_RTC_WEEKC 0x58 +#define RZN1_RTC_DAYC 0x5c +#define RZN1_RTC_MONTHC 0x60 +#define RZN1_RTC_YEARC 0x64 + +struct rzn1_rtc { + struct rtc_device *rtcdev; + void __iomem *base; +}; + +static void rzn1_rtc_get_time_snapshot(struct rzn1_rtc *rtc, struct rtc_time *tm) +{ + tm->tm_sec = readl(rtc->base + RZN1_RTC_SECC); + tm->tm_min = readl(rtc->base + RZN1_RTC_MINC); + tm->tm_hour = readl(rtc->base + RZN1_RTC_HOURC); + tm->tm_wday = readl(rtc->base + RZN1_RTC_WEEKC); + tm->tm_mday = readl(rtc->base + RZN1_RTC_DAYC); + tm->tm_mon = readl(rtc->base + RZN1_RTC_MONTHC); + tm->tm_year = readl(rtc->base + RZN1_RTC_YEARC); +} + +static unsigned int rzn1_rtc_tm_to_wday(struct rtc_time *tm) +{ + time64_t time; + unsigned int days; + u32 secs; + + time = rtc_tm_to_time64(tm); + days = div_s64_rem(time, 86400, &secs); + + /* day of the week, 1970-01-01 was a Thursday */ + return (days + 4) % 7; +} + +static int rzn1_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + u32 val, secs; + + /* + * The RTC was not started or is stopped and thus does not carry the + * proper time/date. + */ + val = readl(rtc->base + RZN1_RTC_CTL2); + if (val & RZN1_RTC_CTL2_STOPPED) + return -EINVAL; + + rzn1_rtc_get_time_snapshot(rtc, tm); + secs = readl(rtc->base + RZN1_RTC_SECC); + if (tm->tm_sec != secs) + rzn1_rtc_get_time_snapshot(rtc, tm); + + tm->tm_sec = bcd2bin(tm->tm_sec); + tm->tm_min = bcd2bin(tm->tm_min); + tm->tm_hour = bcd2bin(tm->tm_hour); + tm->tm_wday = bcd2bin(tm->tm_wday); + tm->tm_mday = bcd2bin(tm->tm_mday); + tm->tm_mon = bcd2bin(tm->tm_mon); + tm->tm_year = bcd2bin(tm->tm_year); + + return 0; +} + +static int rzn1_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + u32 val; + int ret; + + tm->tm_sec = bin2bcd(tm->tm_sec); + tm->tm_min = bin2bcd(tm->tm_min); + tm->tm_hour = bin2bcd(tm->tm_hour); + tm->tm_wday = bin2bcd(rzn1_rtc_tm_to_wday(tm)); + tm->tm_mday = bin2bcd(tm->tm_mday); + tm->tm_mon = bin2bcd(tm->tm_mon); + tm->tm_year = bin2bcd(tm->tm_year); + + val = readl(rtc->base + RZN1_RTC_CTL2); + if (!(val & RZN1_RTC_CTL2_STOPPED)) { + /* Hold the counter if it was counting up */ + writel(RZN1_RTC_CTL2_WAIT, rtc->base + RZN1_RTC_CTL2); + + /* Wait for the counter to stop: two 32k clock cycles */ + usleep_range(61, 100); + ret = readl_poll_timeout(rtc->base + RZN1_RTC_CTL2, val, + val & RZN1_RTC_CTL2_WST, 0, 100); + if (ret) + return ret; + } + + writel(tm->tm_sec, rtc->base + RZN1_RTC_SEC); + writel(tm->tm_min, rtc->base + RZN1_RTC_MIN); + writel(tm->tm_hour, rtc->base + RZN1_RTC_HOUR); + writel(tm->tm_wday, rtc->base + RZN1_RTC_WEEK); + writel(tm->tm_mday, rtc->base + RZN1_RTC_DAY); + writel(tm->tm_mon, rtc->base + RZN1_RTC_MONTH); + writel(tm->tm_year, rtc->base + RZN1_RTC_YEAR); + writel(0, rtc->base + RZN1_RTC_CTL2); + + return 0; +} + +static irqreturn_t rzn1_rtc_alarm_irq(int irq, void *dev_id) +{ + struct rzn1_rtc *rtc = dev_id; + + rtc_update_irq(rtc->rtcdev, 1, RTC_AF | RTC_IRQF); + + return IRQ_HANDLED; +} + +static int rzn1_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + u32 ctl1 = readl(rtc->base + RZN1_RTC_CTL1); + + if (enable) + ctl1 |= RZN1_RTC_CTL1_ALME; + else + ctl1 &= ~RZN1_RTC_CTL1_ALME; + + writel(ctl1, rtc->base + RZN1_RTC_CTL1); + + return 0; +} + +static int rzn1_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &alrm->time; + unsigned int min, hour, wday, delta_days; + time64_t alarm; + u32 ctl1; + int ret; + + ret = rzn1_rtc_read_time(dev, tm); + if (ret) + return ret; + + min = readl(rtc->base + RZN1_RTC_ALM); + hour = readl(rtc->base + RZN1_RTC_ALH); + wday = readl(rtc->base + RZN1_RTC_ALW); + + tm->tm_sec = 0; + tm->tm_min = bcd2bin(min); + tm->tm_hour = bcd2bin(hour); + delta_days = ((fls(wday) - 1) - tm->tm_wday + 7) % 7; + tm->tm_wday = fls(wday) - 1; + + if (delta_days) { + alarm = rtc_tm_to_time64(tm) + (delta_days * 86400); + rtc_time64_to_tm(alarm, tm); + } + + ctl1 = readl(rtc->base + RZN1_RTC_CTL1); + alrm->enabled = !!(ctl1 & RZN1_RTC_CTL1_ALME); + + return 0; +} + +static int rzn1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &alrm->time, tm_now; + unsigned long alarm, farest; + unsigned int days_ahead, wday; + int ret; + + ret = rzn1_rtc_read_time(dev, &tm_now); + if (ret) + return ret; + + /* We cannot set alarms more than one week ahead */ + farest = rtc_tm_to_time64(&tm_now) + (7 * 86400); + alarm = rtc_tm_to_time64(tm); + if (time_after(alarm, farest)) + return -ERANGE; + + /* Convert alarm day into week day */ + days_ahead = tm->tm_mday - tm_now.tm_mday; + wday = (tm_now.tm_wday + days_ahead) % 7; + + writel(bin2bcd(tm->tm_min), rtc->base + RZN1_RTC_ALM); + writel(bin2bcd(tm->tm_hour), rtc->base + RZN1_RTC_ALH); + writel(BIT(wday), rtc->base + RZN1_RTC_ALW); + + rzn1_rtc_alarm_irq_enable(dev, alrm->enabled); + + return 0; +} + +static int rzn1_rtc_read_offset(struct device *dev, long *offset) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + unsigned int ppb_per_step; + bool subtract; + u32 val; + + val = readl(rtc->base + RZN1_RTC_SUBU); + ppb_per_step = val & RZN1_RTC_SUBU_DEV ? 1017 : 3051; + subtract = val & RZN1_RTC_SUBU_DECR; + val &= 0x3F; + + if (!val) + *offset = 0; + else if (subtract) + *offset = -(((~val) & 0x3F) + 1) * ppb_per_step; + else + *offset = (val - 1) * ppb_per_step; + + return 0; +} + +static int rzn1_rtc_set_offset(struct device *dev, long offset) +{ + struct rzn1_rtc *rtc = dev_get_drvdata(dev); + int stepsh, stepsl, steps; + u32 subu = 0, ctl2; + int ret; + + /* + * Check which resolution mode (every 20 or 60s) can be used. + * Between 2 and 124 clock pulses can be added or substracted. + * + * In 20s mode, the minimum resolution is 2 / (32768 * 20) which is + * close to 3051 ppb. In 60s mode, the resolution is closer to 1017. + */ + stepsh = DIV_ROUND_CLOSEST(offset, 1017); + stepsl = DIV_ROUND_CLOSEST(offset, 3051); + + if (stepsh >= -0x3E && stepsh <= 0x3E) { + /* 1017 ppb per step */ + steps = stepsh; + subu |= RZN1_RTC_SUBU_DEV; + } else if (stepsl >= -0x3E && stepsl <= 0x3E) { + /* 3051 ppb per step */ + steps = stepsl; + } else { + return -ERANGE; + } + + if (!steps) + return 0; + + if (steps > 0) { + subu |= steps + 1; + } else { + subu |= RZN1_RTC_SUBU_DECR; + subu |= (~(-steps - 1)) & 0x3F; + } + + ret = readl_poll_timeout(rtc->base + RZN1_RTC_CTL2, ctl2, + !(ctl2 & RZN1_RTC_CTL2_WUST), 100, 2000000); + if (ret) + return ret; + + writel(subu, rtc->base + RZN1_RTC_SUBU); + + return 0; +} + +static const struct rtc_class_ops rzn1_rtc_ops = { + .read_time = rzn1_rtc_read_time, + .set_time = rzn1_rtc_set_time, + .read_alarm = rzn1_rtc_read_alarm, + .set_alarm = rzn1_rtc_set_alarm, + .alarm_irq_enable = rzn1_rtc_alarm_irq_enable, + .read_offset = rzn1_rtc_read_offset, + .set_offset = rzn1_rtc_set_offset, +}; + +static int rzn1_rtc_probe(struct platform_device *pdev) +{ + struct rzn1_rtc *rtc; + int alarm_irq; + int ret; + + rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + platform_set_drvdata(pdev, rtc); + + rtc->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(rtc->base)) + return dev_err_probe(&pdev->dev, PTR_ERR(rtc->base), "Missing reg\n"); + + alarm_irq = platform_get_irq(pdev, 0); + if (alarm_irq < 0) + return alarm_irq; + + rtc->rtcdev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc->rtcdev)) + return PTR_ERR(rtc->rtcdev); + + rtc->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000; + rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099; + rtc->rtcdev->ops = &rzn1_rtc_ops; + set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features); + clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features); + + devm_pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) + return ret; + + /* + * Ensure the clock counter is enabled. + * Set 24-hour mode and possible oscillator offset compensation in SUBU mode. + */ + writel(RZN1_RTC_CTL0_CE | RZN1_RTC_CTL0_AMPM | RZN1_RTC_CTL0_SLSB_SUBU, + rtc->base + RZN1_RTC_CTL0); + + /* Disable all interrupts */ + writel(0, rtc->base + RZN1_RTC_CTL1); + + ret = devm_request_irq(&pdev->dev, alarm_irq, rzn1_rtc_alarm_irq, 0, + dev_name(&pdev->dev), rtc); + if (ret) { + dev_err(&pdev->dev, "RTC timer interrupt not available\n"); + goto dis_runtime_pm; + } + + ret = devm_rtc_register_device(rtc->rtcdev); + if (ret) + goto dis_runtime_pm; + + return 0; + +dis_runtime_pm: + pm_runtime_put(&pdev->dev); + + return ret; +} + +static int rzn1_rtc_remove(struct platform_device *pdev) +{ + pm_runtime_put(&pdev->dev); + + return 0; +} + +static const struct of_device_id rzn1_rtc_of_match[] = { + { .compatible = "renesas,rzn1-rtc" }, + {}, +}; +MODULE_DEVICE_TABLE(of, rzn1_rtc_of_match); + +static struct platform_driver rzn1_rtc_driver = { + .probe = rzn1_rtc_probe, + .remove = rzn1_rtc_remove, + .driver = { + .name = "rzn1-rtc", + .of_match_table = rzn1_rtc_of_match, + }, +}; +module_platform_driver(rzn1_rtc_driver); + +MODULE_AUTHOR("Michel Pollet <Michel.Pollet@bp.renesas.com"); +MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com"); +MODULE_DESCRIPTION("RZ/N1 RTC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 5252ce4cbda4..57540727ce1c 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -71,6 +71,10 @@ #define SUN6I_LOSC_OUT_GATING 0x0060 #define SUN6I_LOSC_OUT_GATING_EN_OFFSET 0 +/* General-purpose data */ +#define SUN6I_GP_DATA 0x0100 +#define SUN6I_GP_DATA_SIZE 0x20 + /* * Get date values */ @@ -679,6 +683,39 @@ static const struct rtc_class_ops sun6i_rtc_ops = { .alarm_irq_enable = sun6i_rtc_alarm_irq_enable }; +static int sun6i_rtc_nvmem_read(void *priv, unsigned int offset, void *_val, size_t bytes) +{ + struct sun6i_rtc_dev *chip = priv; + u32 *val = _val; + int i; + + for (i = 0; i < bytes / 4; ++i) + val[i] = readl(chip->base + SUN6I_GP_DATA + offset + 4 * i); + + return 0; +} + +static int sun6i_rtc_nvmem_write(void *priv, unsigned int offset, void *_val, size_t bytes) +{ + struct sun6i_rtc_dev *chip = priv; + u32 *val = _val; + int i; + + for (i = 0; i < bytes / 4; ++i) + writel(val[i], chip->base + SUN6I_GP_DATA + offset + 4 * i); + + return 0; +} + +static struct nvmem_config sun6i_rtc_nvmem_cfg = { + .type = NVMEM_TYPE_BATTERY_BACKED, + .reg_read = sun6i_rtc_nvmem_read, + .reg_write = sun6i_rtc_nvmem_write, + .size = SUN6I_GP_DATA_SIZE, + .word_size = 4, + .stride = 4, +}; + #ifdef CONFIG_PM_SLEEP /* Enable IRQ wake on suspend, to wake up from RTC. */ static int sun6i_rtc_suspend(struct device *dev) @@ -812,6 +849,11 @@ static int sun6i_rtc_probe(struct platform_device *pdev) if (ret) return ret; + sun6i_rtc_nvmem_cfg.priv = chip; + ret = devm_rtc_nvmem_register(chip->rtc, &sun6i_rtc_nvmem_cfg); + if (ret) + return ret; + dev_info(&pdev->dev, "RTC enabled\n"); return 0; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index d614843caf6c..8d0d0eaa3059 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -32,7 +32,8 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode); static void dcssblk_submit_bio(struct bio *bio); static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn); + long nr_pages, enum dax_access_mode mode, void **kaddr, + pfn_t *pfn); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; @@ -50,7 +51,8 @@ static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev, long rc; void *kaddr; - rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL); + rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS, + &kaddr, NULL); if (rc < 0) return rc; memset(kaddr, 0, nr_pages << PAGE_SHIFT); @@ -927,7 +929,8 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, - long nr_pages, void **kaddr, pfn_t *pfn) + long nr_pages, enum dax_access_mode mode, void **kaddr, + pfn_t *pfn) { struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev); diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index 8d1b2771c1aa..0c2be9421ab7 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -16,6 +16,7 @@ #include <asm/idals.h> #include "vfio_ccw_cp.h" +#include "vfio_ccw_private.h" struct pfn_array { /* Starting guest physical I/O address. */ @@ -98,17 +99,17 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len) * If the pin request partially succeeds, or fails completely, * all pages are left unpinned and a negative error value is returned. */ -static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) +static int pfn_array_pin(struct pfn_array *pa, struct vfio_device *vdev) { int ret = 0; - ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, + ret = vfio_pin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr, IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); if (ret < 0) { goto err_out; } else if (ret > 0 && ret != pa->pa_nr) { - vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); + vfio_unpin_pages(vdev, pa->pa_iova_pfn, ret); ret = -EINVAL; goto err_out; } @@ -122,11 +123,11 @@ err_out: } /* Unpin the pages before releasing the memory. */ -static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) +static void pfn_array_unpin_free(struct pfn_array *pa, struct vfio_device *vdev) { /* Only unpin if any pages were pinned to begin with */ if (pa->pa_nr) - vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); + vfio_unpin_pages(vdev, pa->pa_iova_pfn, pa->pa_nr); pa->pa_nr = 0; kfree(pa->pa_iova_pfn); } @@ -190,8 +191,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len) * Within the domain (@mdev), copy @n bytes from a guest physical * address (@iova) to a host physical address (@to). */ -static long copy_from_iova(struct device *mdev, - void *to, u64 iova, +static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova, unsigned long n) { struct pfn_array pa = {0}; @@ -203,9 +203,9 @@ static long copy_from_iova(struct device *mdev, if (ret < 0) return ret; - ret = pfn_array_pin(&pa, mdev); + ret = pfn_array_pin(&pa, vdev); if (ret < 0) { - pfn_array_unpin_free(&pa, mdev); + pfn_array_unpin_free(&pa, vdev); return ret; } @@ -226,7 +226,7 @@ static long copy_from_iova(struct device *mdev, break; } - pfn_array_unpin_free(&pa, mdev); + pfn_array_unpin_free(&pa, vdev); return l; } @@ -423,11 +423,13 @@ static int ccwchain_loop_tic(struct ccwchain *chain, static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp) { + struct vfio_device *vdev = + &container_of(cp, struct vfio_ccw_private, cp)->vdev; struct ccwchain *chain; int len, ret; /* Copy 2K (the most we support today) of possible CCWs */ - len = copy_from_iova(cp->mdev, cp->guest_cp, cda, + len = copy_from_iova(vdev, cp->guest_cp, cda, CCWCHAIN_LEN_MAX * sizeof(struct ccw1)); if (len) return len; @@ -508,6 +510,8 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, int idx, struct channel_program *cp) { + struct vfio_device *vdev = + &container_of(cp, struct vfio_ccw_private, cp)->vdev; struct ccw1 *ccw; struct pfn_array *pa; u64 iova; @@ -526,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, if (ccw_is_idal(ccw)) { /* Read first IDAW to see if it's 4K-aligned or not. */ /* All subsequent IDAws will be 4K-aligned. */ - ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova)); + ret = copy_from_iova(vdev, &iova, ccw->cda, sizeof(iova)); if (ret) return ret; } else { @@ -555,7 +559,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, if (ccw_is_idal(ccw)) { /* Copy guest IDAL into host IDAL */ - ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idal_len); + ret = copy_from_iova(vdev, idaws, ccw->cda, idal_len); if (ret) goto out_unpin; @@ -574,7 +578,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, } if (ccw_does_data_transfer(ccw)) { - ret = pfn_array_pin(pa, cp->mdev); + ret = pfn_array_pin(pa, vdev); if (ret < 0) goto out_unpin; } else { @@ -590,7 +594,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, return 0; out_unpin: - pfn_array_unpin_free(pa, cp->mdev); + pfn_array_unpin_free(pa, vdev); out_free_idaws: kfree(idaws); out_init: @@ -632,8 +636,10 @@ static int ccwchain_fetch_one(struct ccwchain *chain, * Returns: * %0 on success and a negative error value on failure. */ -int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) +int cp_init(struct channel_program *cp, union orb *orb) { + struct vfio_device *vdev = + &container_of(cp, struct vfio_ccw_private, cp)->vdev; /* custom ratelimit used to avoid flood during guest IPL */ static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1); int ret; @@ -650,11 +656,12 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) * the problem if something does break. */ if (!orb->cmd.pfch && __ratelimit(&ratelimit_state)) - dev_warn(mdev, "Prefetching channel program even though prefetch not specified in ORB"); + dev_warn( + vdev->dev, + "Prefetching channel program even though prefetch not specified in ORB"); INIT_LIST_HEAD(&cp->ccwchain_list); memcpy(&cp->orb, orb, sizeof(*orb)); - cp->mdev = mdev; /* Build a ccwchain for the first CCW segment */ ret = ccwchain_handle_ccw(orb->cmd.cpa, cp); @@ -682,6 +689,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) */ void cp_free(struct channel_program *cp) { + struct vfio_device *vdev = + &container_of(cp, struct vfio_ccw_private, cp)->vdev; struct ccwchain *chain, *temp; int i; @@ -691,7 +700,7 @@ void cp_free(struct channel_program *cp) cp->initialized = false; list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { for (i = 0; i < chain->ch_len; i++) { - pfn_array_unpin_free(chain->ch_pa + i, cp->mdev); + pfn_array_unpin_free(chain->ch_pa + i, vdev); ccwchain_cda_free(chain, i); } ccwchain_free(chain); diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h index ba31240ce965..e4c436199b4c 100644 --- a/drivers/s390/cio/vfio_ccw_cp.h +++ b/drivers/s390/cio/vfio_ccw_cp.h @@ -37,13 +37,11 @@ struct channel_program { struct list_head ccwchain_list; union orb orb; - struct device *mdev; bool initialized; struct ccw1 *guest_cp; }; -extern int cp_init(struct channel_program *cp, struct device *mdev, - union orb *orb); +extern int cp_init(struct channel_program *cp, union orb *orb); extern void cp_free(struct channel_program *cp); extern int cp_prefetch(struct channel_program *cp); extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm); diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index e435a9cd92da..8483a266051c 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -262,8 +262,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, errstr = "transport mode"; goto err_out; } - io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), - orb); + io_region->ret_code = cp_init(&private->cp, orb); if (io_region->ret_code) { VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): cp_init=%d\n", diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index c4d60cdbf247..b49e2e9db2dc 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -183,7 +183,7 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev) private->nb.notifier_call = vfio_ccw_mdev_notifier; - ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, + ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY, &events, &private->nb); if (ret) return ret; @@ -204,8 +204,7 @@ static int vfio_ccw_mdev_open_device(struct vfio_device *vdev) out_unregister: vfio_ccw_unregister_dev_regions(private); - vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, - &private->nb); + vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb); return ret; } @@ -223,7 +222,7 @@ static void vfio_ccw_mdev_close_device(struct vfio_device *vdev) cp_free(&private->cp); vfio_ccw_unregister_dev_regions(private); - vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, &private->nb); + vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &private->nb); } static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private, diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index ee0a3bf8f476..a7d2a95796d3 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -124,8 +124,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) q->saved_isc = VFIO_AP_ISC_INVALID; } if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) { - vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), - &q->saved_pfn, 1); + vfio_unpin_pages(&q->matrix_mdev->vdev, &q->saved_pfn, 1); q->saved_pfn = 0; } } @@ -258,7 +257,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, return status; } - ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1, + ret = vfio_pin_pages(&q->matrix_mdev->vdev, &g_pfn, 1, IOMMU_READ | IOMMU_WRITE, &h_pfn); switch (ret) { case 1: @@ -301,7 +300,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, break; case AP_RESPONSE_OTHERWISE_CHANGED: /* We could not modify IRQ setings: clear new configuration */ - vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1); + vfio_unpin_pages(&q->matrix_mdev->vdev, &g_pfn, 1); kvm_s390_gisc_unregister(kvm, isc); break; default: @@ -1250,7 +1249,7 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb, struct vfio_iommu_type1_dma_unmap *unmap = data; unsigned long g_pfn = unmap->iova >> PAGE_SHIFT; - vfio_unpin_pages(mdev_dev(matrix_mdev->mdev), &g_pfn, 1); + vfio_unpin_pages(&matrix_mdev->vdev, &g_pfn, 1); return NOTIFY_OK; } @@ -1285,25 +1284,6 @@ static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) } } -static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - int notify_rc = NOTIFY_OK; - struct ap_matrix_mdev *matrix_mdev; - - if (action != VFIO_GROUP_NOTIFY_SET_KVM) - return NOTIFY_OK; - - matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier); - - if (!data) - vfio_ap_mdev_unset_kvm(matrix_mdev); - else if (vfio_ap_mdev_set_kvm(matrix_mdev, data)) - notify_rc = NOTIFY_DONE; - - return notify_rc; -} - static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) { struct device *dev; @@ -1403,25 +1383,23 @@ static int vfio_ap_mdev_open_device(struct vfio_device *vdev) unsigned long events; int ret; - matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier; - events = VFIO_GROUP_NOTIFY_SET_KVM; + if (!vdev->kvm) + return -EINVAL; - ret = vfio_register_notifier(vdev->dev, VFIO_GROUP_NOTIFY, - &events, &matrix_mdev->group_notifier); + ret = vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm); if (ret) return ret; matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier; events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; - ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, - &events, &matrix_mdev->iommu_notifier); + ret = vfio_register_notifier(vdev, VFIO_IOMMU_NOTIFY, &events, + &matrix_mdev->iommu_notifier); if (ret) - goto out_unregister_group; + goto err_kvm; return 0; -out_unregister_group: - vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY, - &matrix_mdev->group_notifier); +err_kvm: + vfio_ap_mdev_unset_kvm(matrix_mdev); return ret; } @@ -1430,10 +1408,8 @@ static void vfio_ap_mdev_close_device(struct vfio_device *vdev) struct ap_matrix_mdev *matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev); - vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, + vfio_unregister_notifier(vdev, VFIO_IOMMU_NOTIFY, &matrix_mdev->iommu_notifier); - vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY, - &matrix_mdev->group_notifier); vfio_ap_mdev_unset_kvm(matrix_mdev); } diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 648fcaf8104a..a26efd804d0d 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -81,8 +81,6 @@ struct ap_matrix { * @node: allows the ap_matrix_mdev struct to be added to a list * @matrix: the adapters, usage domains and control domains assigned to the * mediated matrix device. - * @group_notifier: notifier block used for specifying callback function for - * handling the VFIO_GROUP_NOTIFY_SET_KVM event * @iommu_notifier: notifier block used for specifying callback function for * handling the VFIO_IOMMU_NOTIFY_DMA_UNMAP even * @kvm: the struct holding guest's state @@ -94,7 +92,6 @@ struct ap_matrix_mdev { struct vfio_device vdev; struct list_head node; struct ap_matrix matrix; - struct notifier_block group_notifier; struct notifier_block iommu_notifier; struct kvm *kvm; crypto_hook pqap_hook; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 9a0bba5a51a7..08ed059a738b 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -54,7 +54,6 @@ #define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \ (((MINOR_) & 0xff))) - #define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0) #define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2) #define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1) @@ -136,21 +135,11 @@ struct hv_fc_wwn_packet { */ #define STORVSC_MAX_CMD_LEN 0x10 -#define POST_WIN7_STORVSC_SENSE_BUFFER_SIZE 0x14 -#define PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE 0x12 - +/* Sense buffer size is the same for all versions since Windows 8 */ #define STORVSC_SENSE_BUFFER_SIZE 0x14 #define STORVSC_MAX_BUF_LEN_WITH_PADDING 0x14 /* - * Sense buffer size changed in win8; have a run-time - * variable to track the size we should use. This value will - * likely change during protocol negotiation but it is valid - * to start by assuming pre-Win8. - */ -static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE; - -/* * The storage protocol version is determined during the * initial exchange with the host. It will indicate which * storage functionality is available in the host. @@ -177,18 +166,6 @@ do { \ dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \ } while (0) -struct vmscsi_win8_extension { - /* - * The following were added in Windows 8 - */ - u16 reserve; - u8 queue_tag; - u8 queue_action; - u32 srb_flags; - u32 time_out_value; - u32 queue_sort_ey; -} __packed; - struct vmscsi_request { u16 length; u8 srb_status; @@ -214,46 +191,23 @@ struct vmscsi_request { /* * The following was added in win8. */ - struct vmscsi_win8_extension win8_extension; + u16 reserve; + u8 queue_tag; + u8 queue_action; + u32 srb_flags; + u32 time_out_value; + u32 queue_sort_ey; } __attribute((packed)); /* - * The list of storage protocols in order of preference. + * The list of windows version in order of preference. */ -struct vmstor_protocol { - int protocol_version; - int sense_buffer_size; - int vmscsi_size_delta; -}; - -static const struct vmstor_protocol vmstor_protocols[] = { - { +static const int protocol_version[] = { VMSTOR_PROTO_VERSION_WIN10, - POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, - 0 - }, - { VMSTOR_PROTO_VERSION_WIN8_1, - POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, - 0 - }, - { VMSTOR_PROTO_VERSION_WIN8, - POST_WIN7_STORVSC_SENSE_BUFFER_SIZE, - 0 - }, - { - VMSTOR_PROTO_VERSION_WIN7, - PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE, - sizeof(struct vmscsi_win8_extension), - }, - { - VMSTOR_PROTO_VERSION_WIN6, - PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE, - sizeof(struct vmscsi_win8_extension), - } }; @@ -409,9 +363,7 @@ static void storvsc_on_channel_callback(void *context); #define STORVSC_IDE_MAX_CHANNELS 1 /* - * Upper bound on the size of a storvsc packet. vmscsi_size_delta is not - * included in the calculation because it is set after STORVSC_MAX_PKT_SIZE - * is used in storvsc_connect_to_vsp + * Upper bound on the size of a storvsc packet. */ #define STORVSC_MAX_PKT_SIZE (sizeof(struct vmpacket_descriptor) +\ sizeof(struct vstor_packet)) @@ -453,17 +405,6 @@ struct storvsc_device { unsigned char target_id; /* - * The size of the vmscsi_request has changed in win8. The - * additional size is because of new elements added to the - * structure. These elements are valid only when we are talking - * to a win8 host. - * Track the correction to size we need to apply. This value - * will likely change during protocol negotiation but it is - * valid to start by assuming pre-Win8. - */ - int vmscsi_size_delta; - - /* * Max I/O, the device can support. */ u32 max_transfer_bytes; @@ -795,8 +736,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns) vstor_packet->sub_channel_count = num_sc; ret = vmbus_sendpacket(device->channel, vstor_packet, - (sizeof(struct vstor_packet) - - stor_device->vmscsi_size_delta), + sizeof(struct vstor_packet), VMBUS_RQST_INIT, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -864,8 +804,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device, vstor_packet->flags = REQUEST_COMPLETION_FLAG; ret = vmbus_sendpacket(device->channel, vstor_packet, - (sizeof(struct vstor_packet) - - stor_device->vmscsi_size_delta), + sizeof(struct vstor_packet), VMBUS_RQST_INIT, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -915,14 +854,13 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc) * Query host supported protocol version. */ - for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) { + for (i = 0; i < ARRAY_SIZE(protocol_version); i++) { /* reuse the packet for version range supported */ memset(vstor_packet, 0, sizeof(struct vstor_packet)); vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; - vstor_packet->version.major_minor = - vmstor_protocols[i].protocol_version; + vstor_packet->version.major_minor = protocol_version[i]; /* * The revision number is only used in Windows; set it to 0. @@ -936,21 +874,16 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc) return -EINVAL; if (vstor_packet->status == 0) { - vmstor_proto_version = - vmstor_protocols[i].protocol_version; - - sense_buffer_size = - vmstor_protocols[i].sense_buffer_size; - - stor_device->vmscsi_size_delta = - vmstor_protocols[i].vmscsi_size_delta; + vmstor_proto_version = protocol_version[i]; break; } } - if (vstor_packet->status != 0) + if (vstor_packet->status != 0) { + dev_err(&device->device, "Obsolete Hyper-V version\n"); return -EINVAL; + } memset(vstor_packet, 0, sizeof(struct vstor_packet)); @@ -986,11 +919,10 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc) cpumask_set_cpu(device->channel->target_cpu, &stor_device->alloced_cpus); - if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) { - if (vstor_packet->storage_channel_properties.flags & - STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) - process_sub_channels = true; - } + if (vstor_packet->storage_channel_properties.flags & + STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) + process_sub_channels = true; + stor_device->max_transfer_bytes = vstor_packet->storage_channel_properties.max_transfer_bytes; @@ -1197,7 +1129,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, * Copy over the sense_info_length, but limit to the known max * size if Hyper-V returns a bad value. */ - stor_pkt->vm_srb.sense_info_length = min_t(u8, sense_buffer_size, + stor_pkt->vm_srb.sense_info_length = min_t(u8, STORVSC_SENSE_BUFFER_SIZE, vstor_packet->vm_srb.sense_info_length); if (vstor_packet->vm_srb.scsi_status != 0 || @@ -1289,8 +1221,8 @@ static void storvsc_on_channel_callback(void *context) struct storvsc_cmd_request *request = NULL; u32 pktlen = hv_pkt_datalen(desc); u64 rqst_id = desc->trans_id; - u32 minlen = rqst_id ? sizeof(struct vstor_packet) - - stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation); + u32 minlen = rqst_id ? sizeof(struct vstor_packet) : + sizeof(enum vstor_packet_operation); if (pktlen < minlen) { dev_err(&device->device, @@ -1346,7 +1278,7 @@ static void storvsc_on_channel_callback(void *context) } memcpy(&request->vstor_packet, packet, - (sizeof(struct vstor_packet) - stor_device->vmscsi_size_delta)); + sizeof(struct vstor_packet)); complete(&request->wait_event); } } @@ -1557,11 +1489,10 @@ static int storvsc_do_io(struct hv_device *device, found_channel: vstor_packet->flags |= REQUEST_COMPLETION_FLAG; - vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) - - stor_device->vmscsi_size_delta); + vstor_packet->vm_srb.length = sizeof(struct vmscsi_request); - vstor_packet->vm_srb.sense_info_length = sense_buffer_size; + vstor_packet->vm_srb.sense_info_length = STORVSC_SENSE_BUFFER_SIZE; vstor_packet->vm_srb.data_transfer_length = @@ -1574,13 +1505,11 @@ found_channel: ret = vmbus_sendpacket_mpb_desc(outgoing_channel, request->payload, request->payload_sz, vstor_packet, - (sizeof(struct vstor_packet) - - stor_device->vmscsi_size_delta), + sizeof(struct vstor_packet), (unsigned long)request); } else { ret = vmbus_sendpacket(outgoing_channel, vstor_packet, - (sizeof(struct vstor_packet) - - stor_device->vmscsi_size_delta), + sizeof(struct vstor_packet), (unsigned long)request, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -1684,8 +1613,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) vstor_packet->vm_srb.path_id = stor_device->path_id; ret = vmbus_sendpacket(device->channel, vstor_packet, - (sizeof(struct vstor_packet) - - stor_device->vmscsi_size_delta), + sizeof(struct vstor_packet), VMBUS_RQST_RESET, VM_PKT_DATA_INBAND, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); @@ -1778,31 +1706,31 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) memset(&cmd_request->vstor_packet, 0, sizeof(struct vstor_packet)); vm_srb = &cmd_request->vstor_packet.vm_srb; - vm_srb->win8_extension.time_out_value = 60; + vm_srb->time_out_value = 60; - vm_srb->win8_extension.srb_flags |= + vm_srb->srb_flags |= SRB_FLAGS_DISABLE_SYNCH_TRANSFER; if (scmnd->device->tagged_supported) { - vm_srb->win8_extension.srb_flags |= + vm_srb->srb_flags |= (SRB_FLAGS_QUEUE_ACTION_ENABLE | SRB_FLAGS_NO_QUEUE_FREEZE); - vm_srb->win8_extension.queue_tag = SP_UNTAGGED; - vm_srb->win8_extension.queue_action = SRB_SIMPLE_TAG_REQUEST; + vm_srb->queue_tag = SP_UNTAGGED; + vm_srb->queue_action = SRB_SIMPLE_TAG_REQUEST; } /* Build the SRB */ switch (scmnd->sc_data_direction) { case DMA_TO_DEVICE: vm_srb->data_in = WRITE_TYPE; - vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT; + vm_srb->srb_flags |= SRB_FLAGS_DATA_OUT; break; case DMA_FROM_DEVICE: vm_srb->data_in = READ_TYPE; - vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN; + vm_srb->srb_flags |= SRB_FLAGS_DATA_IN; break; case DMA_NONE: vm_srb->data_in = UNKNOWN_TYPE; - vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; + vm_srb->srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; break; default: /* @@ -1966,34 +1894,16 @@ static int storvsc_probe(struct hv_device *device, bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false); int target = 0; struct storvsc_device *stor_device; - int max_luns_per_target; - int max_targets; - int max_channels; int max_sub_channels = 0; /* - * Based on the windows host we are running on, - * set state to properly communicate with the host. + * We support sub-channels for storage on SCSI and FC controllers. + * The number of sub-channels offerred is based on the number of + * VCPUs in the guest. */ - - if (vmbus_proto_version < VERSION_WIN8) { - max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET; - max_targets = STORVSC_IDE_MAX_TARGETS; - max_channels = STORVSC_IDE_MAX_CHANNELS; - } else { - max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET; - max_targets = STORVSC_MAX_TARGETS; - max_channels = STORVSC_MAX_CHANNELS; - /* - * On Windows8 and above, we support sub-channels for storage - * on SCSI and FC controllers. - * The number of sub-channels offerred is based on the number of - * VCPUs in the guest. - */ - if (!dev_is_ide) - max_sub_channels = - (num_cpus - 1) / storvsc_vcpus_per_sub_channel; - } + if (!dev_is_ide) + max_sub_channels = + (num_cpus - 1) / storvsc_vcpus_per_sub_channel; scsi_driver.can_queue = max_outstanding_req_per_channel * (max_sub_channels + 1) * @@ -2022,7 +1932,6 @@ static int storvsc_probe(struct hv_device *device, init_waitqueue_head(&stor_device->waiting_to_drain); stor_device->device = device; stor_device->host = host; - stor_device->vmscsi_size_delta = sizeof(struct vmscsi_win8_extension); spin_lock_init(&stor_device->lock); hv_set_drvdata(device, stor_device); dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1); @@ -2046,9 +1955,9 @@ static int storvsc_probe(struct hv_device *device, break; case SCSI_GUID: - host->max_lun = max_luns_per_target; - host->max_id = max_targets; - host->max_channel = max_channels - 1; + host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; + host->max_id = STORVSC_MAX_TARGETS; + host->max_channel = STORVSC_MAX_CHANNELS - 1; break; default: @@ -2235,10 +2144,6 @@ static int __init storvsc_drv_init(void) * than the ring buffer size since that page is reserved for * the ring buffer indices) by the max request size (which is * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) - * - * The computation underestimates max_outstanding_req_per_channel - * for Win7 and older hosts because it does not take into account - * the vmscsi_size_delta correction to the max request size. */ max_outstanding_req_per_channel = ((storvsc_ringbuffer_size - PAGE_SIZE) / diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index c77ecf61818b..5611d14d3ba2 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -39,6 +39,7 @@ #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_opp.h> +#include <linux/power_supply.h> #include <linux/reboot.h> #include <linux/regmap.h> #include <linux/reset.h> @@ -108,6 +109,7 @@ #define PMC_USB_DEBOUNCE_DEL 0xec #define PMC_USB_AO 0xf0 +#define PMC_SCRATCH37 0x130 #define PMC_SCRATCH41 0x140 #define PMC_WAKE2_MASK 0x160 @@ -1101,8 +1103,7 @@ static struct notifier_block tegra_pmc_reboot_notifier = { .notifier_call = tegra_pmc_reboot_notify, }; -static int tegra_pmc_restart_notify(struct notifier_block *this, - unsigned long action, void *data) +static void tegra_pmc_restart(void) { u32 value; @@ -1110,14 +1111,31 @@ static int tegra_pmc_restart_notify(struct notifier_block *this, value = tegra_pmc_readl(pmc, PMC_CNTRL); value |= PMC_CNTRL_MAIN_RST; tegra_pmc_writel(pmc, value, PMC_CNTRL); +} + +static int tegra_pmc_restart_handler(struct sys_off_data *data) +{ + tegra_pmc_restart(); return NOTIFY_DONE; } -static struct notifier_block tegra_pmc_restart_handler = { - .notifier_call = tegra_pmc_restart_notify, - .priority = 128, -}; +static int tegra_pmc_power_off_handler(struct sys_off_data *data) +{ + /* + * Reboot Nexus 7 into special bootloader mode if USB cable is + * connected in order to display battery status and power off. + */ + if (of_machine_is_compatible("asus,grouper") && + power_supply_is_system_supplied()) { + const u32 go_to_charger_mode = 0xa5a55a5a; + + tegra_pmc_writel(pmc, go_to_charger_mode, PMC_SCRATCH37); + tegra_pmc_restart(); + } + + return NOTIFY_DONE; +} static int powergate_show(struct seq_file *s, void *data) { @@ -2880,6 +2898,42 @@ static int tegra_pmc_probe(struct platform_device *pdev) } /* + * PMC should be last resort for restarting since it soft-resets + * CPU without resetting everything else. + */ + err = devm_register_reboot_notifier(&pdev->dev, + &tegra_pmc_reboot_notifier); + if (err) { + dev_err(&pdev->dev, "unable to register reboot notifier, %d\n", + err); + return err; + } + + err = devm_register_sys_off_handler(&pdev->dev, + SYS_OFF_MODE_RESTART, + SYS_OFF_PRIO_LOW, + tegra_pmc_restart_handler, NULL); + if (err) { + dev_err(&pdev->dev, "failed to register sys-off handler: %d\n", + err); + return err; + } + + /* + * PMC should be primary power-off method if it soft-resets CPU, + * asking bootloader to shutdown hardware. + */ + err = devm_register_sys_off_handler(&pdev->dev, + SYS_OFF_MODE_POWER_OFF, + SYS_OFF_PRIO_FIRMWARE, + tegra_pmc_power_off_handler, NULL); + if (err) { + dev_err(&pdev->dev, "failed to register sys-off handler: %d\n", + err); + return err; + } + + /* * PCLK clock rate can't be retrieved using CLK API because it * causes lockup if CPU enters LP2 idle state from some other * CLK notifier, hence we're caching the rate's value locally. @@ -2910,28 +2964,13 @@ static int tegra_pmc_probe(struct platform_device *pdev) goto cleanup_sysfs; } - err = devm_register_reboot_notifier(&pdev->dev, - &tegra_pmc_reboot_notifier); - if (err) { - dev_err(&pdev->dev, "unable to register reboot notifier, %d\n", - err); - goto cleanup_debugfs; - } - - err = register_restart_handler(&tegra_pmc_restart_handler); - if (err) { - dev_err(&pdev->dev, "unable to register restart handler, %d\n", - err); - goto cleanup_debugfs; - } - err = tegra_pmc_pinctrl_init(pmc); if (err) - goto cleanup_restart_handler; + goto cleanup_debugfs; err = tegra_pmc_regmap_init(pmc); if (err < 0) - goto cleanup_restart_handler; + goto cleanup_debugfs; err = tegra_powergate_init(pmc, pdev->dev.of_node); if (err < 0) @@ -2954,8 +2993,6 @@ static int tegra_pmc_probe(struct platform_device *pdev) cleanup_powergates: tegra_powergate_remove_all(pdev->dev.of_node); -cleanup_restart_handler: - unregister_restart_handler(&tegra_pmc_restart_handler); cleanup_debugfs: debugfs_remove(pmc->debugfs); cleanup_sysfs: diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c index d403a7a3021d..72ab066ce552 100644 --- a/drivers/spi/spi-fsi.c +++ b/drivers/spi/spi-fsi.c @@ -319,12 +319,12 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS); do { + if (time_after(jiffies, end)) + return -ETIMEDOUT; + rc = fsi_spi_status(ctx, &status, "TX"); if (rc) return rc; - - if (time_after(jiffies, end)) - return -ETIMEDOUT; } while (status & SPI_FSI_STATUS_TDR_FULL); sent += nb; @@ -337,12 +337,12 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx, while (transfer->len > recv) { end = jiffies + msecs_to_jiffies(SPI_FSI_STATUS_TIMEOUT_MS); do { + if (time_after(jiffies, end)) + return -ETIMEDOUT; + rc = fsi_spi_status(ctx, &status, "RX"); if (rc) return rc; - - if (time_after(jiffies, end)) - return -ETIMEDOUT; } while (!(status & SPI_FSI_STATUS_RDR_FULL)); rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index fe252a8075a7..b9e2c7e7c580 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1672,7 +1672,8 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ret = ctlr->transfer_one_message(ctlr, msg); if (ret) { dev_err(&ctlr->dev, - "failed to transfer one message from queue\n"); + "failed to transfer one message from queue: %d\n", + ret); goto out; } diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index 770d2b0299c3..80d4e0676083 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -663,6 +663,7 @@ static const struct acpi_device_id int3400_thermal_match[] = { {"INT3400", 0}, {"INTC1040", 0}, {"INTC1041", 0}, + {"INTC1042", 0}, {"INTC10A0", 0}, {} }; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index 07e25321dfe3..71d084c4c456 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -285,6 +285,7 @@ static const struct acpi_device_id int3403_device_ids[] = { {"INT3403", 0}, {"INTC1043", 0}, {"INTC1046", 0}, + {"INTC1062", 0}, {"INTC10A1", 0}, {"", 0}, }; diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h index 49932a68abac..7d52fcff4937 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h @@ -24,6 +24,7 @@ #define PCI_DEVICE_ID_INTEL_HSB_THERMAL 0x0A03 #define PCI_DEVICE_ID_INTEL_ICL_THERMAL 0x8a03 #define PCI_DEVICE_ID_INTEL_JSL_THERMAL 0x4E03 +#define PCI_DEVICE_ID_INTEL_MTLP_THERMAL 0x7D03 #define PCI_DEVICE_ID_INTEL_RPL_THERMAL 0xA71D #define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903 #define PCI_DEVICE_ID_INTEL_TGL_THERMAL 0x9A03 diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c index ca40b0967cdd..c2dc4c158b9d 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c @@ -358,6 +358,7 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend, static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) }, + { PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) }, { PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) }, { }, }; diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index 7018d959f775..2889a214dadc 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -7,9 +7,7 @@ */ #include <linux/device.h> -#include <linux/dmar.h> #include <linux/idr.h> -#include <linux/iommu.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -257,13 +255,9 @@ static ssize_t iommu_dma_protection_show(struct device *dev, struct device_attribute *attr, char *buf) { - /* - * Kernel DMA protection is a feature where Thunderbolt security is - * handled natively using IOMMU. It is enabled when IOMMU is - * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set. - */ - return sprintf(buf, "%d\n", - iommu_present(&pci_bus_type) && dmar_platform_optin()); + struct tb *tb = container_of(dev, struct tb, dev); + + return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection); } static DEVICE_ATTR_RO(iommu_dma_protection); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 4a582183f675..4bc87b0f003a 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -15,9 +15,11 @@ #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> +#include <linux/iommu.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/property.h> +#include <linux/string_helpers.h> #include "nhi.h" #include "nhi_regs.h" @@ -1103,6 +1105,47 @@ static void nhi_check_quirks(struct tb_nhi *nhi) nhi->quirks |= QUIRK_AUTO_CLEAR_INT; } +static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data) +{ + if (!pdev->external_facing || + !device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION)) + return 0; + *(bool *)data = true; + return 1; /* Stop walking */ +} + +static void nhi_check_iommu(struct tb_nhi *nhi) +{ + struct pci_bus *bus = nhi->pdev->bus; + bool port_ok = false; + + /* + * Ideally what we'd do here is grab every PCI device that + * represents a tunnelling adapter for this NHI and check their + * status directly, but unfortunately USB4 seems to make it + * obnoxiously difficult to reliably make any correlation. + * + * So for now we'll have to bodge it... Hoping that the system + * is at least sane enough that an adapter is in the same PCI + * segment as its NHI, if we can find *something* on that segment + * which meets the requirements for Kernel DMA Protection, we'll + * take that to imply that firmware is aware and has (hopefully) + * done the right thing in general. We need to know that the PCI + * layer has seen the ExternalFacingPort property which will then + * inform the IOMMU layer to enforce the complete "untrusted DMA" + * flow, but also that the IOMMU driver itself can be trusted not + * to have been subverted by a pre-boot DMA attack. + */ + while (bus->parent) + bus = bus->parent; + + pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok); + + nhi->iommu_dma_protection = port_ok; + dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n", + str_enabled_disabled(port_ok)); +} + static int nhi_init_msi(struct tb_nhi *nhi) { struct pci_dev *pdev = nhi->pdev; @@ -1220,6 +1263,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; nhi_check_quirks(nhi); + nhi_check_iommu(nhi); res = nhi_init_msi(nhi); if (res) { diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 6771f05e32c2..8873c1644a29 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -19,7 +19,7 @@ struct ucsi_acpi { struct device *dev; struct ucsi *ucsi; - void __iomem *base; + void *base; struct completion complete; unsigned long flags; guid_t guid; @@ -51,7 +51,7 @@ static int ucsi_acpi_read(struct ucsi *ucsi, unsigned int offset, if (ret) return ret; - memcpy(val, (const void __force *)(ua->base + offset), val_len); + memcpy(val, ua->base + offset, val_len); return 0; } @@ -61,7 +61,7 @@ static int ucsi_acpi_async_write(struct ucsi *ucsi, unsigned int offset, { struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); - memcpy((void __force *)(ua->base + offset), val, val_len); + memcpy(ua->base + offset, val, val_len); return ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_WRITE); } @@ -132,20 +132,9 @@ static int ucsi_acpi_probe(struct platform_device *pdev) return -ENODEV; } - /* This will make sure we can use ioremap() */ - status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1); - if (ACPI_FAILURE(status)) - return -ENOMEM; - - /* - * NOTE: The memory region for the data structures is used also in an - * operation region, which means ACPI has already reserved it. Therefore - * it can not be requested here, and we can not use - * devm_ioremap_resource(). - */ - ua->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); - if (!ua->base) - return -ENOMEM; + ua->base = devm_memremap(&pdev->dev, res->start, resource_size(res), MEMREMAP_WB); + if (IS_ERR(ua->base)) + return PTR_ERR(ua->base); ret = guid_parse(UCSI_DSM_UUID, &ua->guid); if (ret) diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c index 6e2e62c6f47a..3feff729f3ce 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c @@ -588,6 +588,7 @@ static struct fsl_mc_driver vfio_fsl_mc_driver = { .name = "vfio-fsl-mc", .owner = THIS_MODULE, }, + .driver_managed_dma = true, }; static int __init vfio_fsl_mc_driver_init(void) diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 767b5d47631a..4def43f5f7b6 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -337,6 +337,14 @@ static int vf_qm_cache_wb(struct hisi_qm *qm) return 0; } +static struct hisi_acc_vf_core_device *hssi_acc_drvdata(struct pci_dev *pdev) +{ + struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev); + + return container_of(core_device, struct hisi_acc_vf_core_device, + core_device); +} + static void vf_qm_fun_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev, struct hisi_qm *qm) { @@ -962,7 +970,7 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev, static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev); if (hisi_acc_vdev->core_device.vdev.migration_flags != VFIO_MIGRATION_STOP_COPY) @@ -1274,11 +1282,10 @@ static int hisi_acc_vfio_pci_probe(struct pci_dev *pdev, const struct pci_device &hisi_acc_vfio_pci_ops); } + dev_set_drvdata(&pdev->dev, &hisi_acc_vdev->core_device); ret = vfio_pci_core_register_device(&hisi_acc_vdev->core_device); if (ret) goto out_free; - - dev_set_drvdata(&pdev->dev, hisi_acc_vdev); return 0; out_free: @@ -1289,7 +1296,7 @@ out_free: static void hisi_acc_vfio_pci_remove(struct pci_dev *pdev) { - struct hisi_acc_vf_core_device *hisi_acc_vdev = dev_get_drvdata(&pdev->dev); + struct hisi_acc_vf_core_device *hisi_acc_vdev = hssi_acc_drvdata(pdev); vfio_pci_core_unregister_device(&hisi_acc_vdev->core_device); vfio_pci_core_uninit_device(&hisi_acc_vdev->core_device); @@ -1316,6 +1323,7 @@ static struct pci_driver hisi_acc_vfio_pci_driver = { .probe = hisi_acc_vfio_pci_probe, .remove = hisi_acc_vfio_pci_remove, .err_handler = &hisi_acc_vf_err_handlers, + .driver_managed_dma = true, }; module_pci_driver(hisi_acc_vfio_pci_driver); diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index 5c9f9218cc1d..9b9f33ca270a 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -5,89 +5,157 @@ #include "cmd.h" -int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod) +static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id, + u16 *vhca_id); + +int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) { - struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev); u32 out[MLX5_ST_SZ_DW(suspend_vhca_out)] = {}; u32 in[MLX5_ST_SZ_DW(suspend_vhca_in)] = {}; - int ret; - if (!mdev) + lockdep_assert_held(&mvdev->state_mutex); + if (mvdev->mdev_detach) return -ENOTCONN; MLX5_SET(suspend_vhca_in, in, opcode, MLX5_CMD_OP_SUSPEND_VHCA); - MLX5_SET(suspend_vhca_in, in, vhca_id, vhca_id); + MLX5_SET(suspend_vhca_in, in, vhca_id, mvdev->vhca_id); MLX5_SET(suspend_vhca_in, in, op_mod, op_mod); - ret = mlx5_cmd_exec_inout(mdev, suspend_vhca, in, out); - mlx5_vf_put_core_dev(mdev); - return ret; + return mlx5_cmd_exec_inout(mvdev->mdev, suspend_vhca, in, out); } -int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod) +int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod) { - struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev); u32 out[MLX5_ST_SZ_DW(resume_vhca_out)] = {}; u32 in[MLX5_ST_SZ_DW(resume_vhca_in)] = {}; - int ret; - if (!mdev) + lockdep_assert_held(&mvdev->state_mutex); + if (mvdev->mdev_detach) return -ENOTCONN; MLX5_SET(resume_vhca_in, in, opcode, MLX5_CMD_OP_RESUME_VHCA); - MLX5_SET(resume_vhca_in, in, vhca_id, vhca_id); + MLX5_SET(resume_vhca_in, in, vhca_id, mvdev->vhca_id); MLX5_SET(resume_vhca_in, in, op_mod, op_mod); - ret = mlx5_cmd_exec_inout(mdev, resume_vhca, in, out); - mlx5_vf_put_core_dev(mdev); - return ret; + return mlx5_cmd_exec_inout(mvdev->mdev, resume_vhca, in, out); } -int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id, +int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev, size_t *state_size) { - struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev); u32 out[MLX5_ST_SZ_DW(query_vhca_migration_state_out)] = {}; u32 in[MLX5_ST_SZ_DW(query_vhca_migration_state_in)] = {}; int ret; - if (!mdev) + lockdep_assert_held(&mvdev->state_mutex); + if (mvdev->mdev_detach) return -ENOTCONN; MLX5_SET(query_vhca_migration_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE); - MLX5_SET(query_vhca_migration_state_in, in, vhca_id, vhca_id); + MLX5_SET(query_vhca_migration_state_in, in, vhca_id, mvdev->vhca_id); MLX5_SET(query_vhca_migration_state_in, in, op_mod, 0); - ret = mlx5_cmd_exec_inout(mdev, query_vhca_migration_state, in, out); + ret = mlx5_cmd_exec_inout(mvdev->mdev, query_vhca_migration_state, in, + out); if (ret) - goto end; + return ret; *state_size = MLX5_GET(query_vhca_migration_state_out, out, required_umem_size); + return 0; +} + +static int mlx5fv_vf_event(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct mlx5vf_pci_core_device *mvdev = + container_of(nb, struct mlx5vf_pci_core_device, nb); + + mutex_lock(&mvdev->state_mutex); + switch (event) { + case MLX5_PF_NOTIFY_ENABLE_VF: + mvdev->mdev_detach = false; + break; + case MLX5_PF_NOTIFY_DISABLE_VF: + mlx5vf_disable_fds(mvdev); + mvdev->mdev_detach = true; + break; + default: + break; + } + mlx5vf_state_mutex_unlock(mvdev); + return 0; +} + +void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev) +{ + if (!mvdev->migrate_cap) + return; + + mlx5_sriov_blocking_notifier_unregister(mvdev->mdev, mvdev->vf_id, + &mvdev->nb); + destroy_workqueue(mvdev->cb_wq); +} + +void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev) +{ + struct pci_dev *pdev = mvdev->core_device.pdev; + int ret; + + if (!pdev->is_virtfn) + return; + + mvdev->mdev = mlx5_vf_get_core_dev(pdev); + if (!mvdev->mdev) + return; + + if (!MLX5_CAP_GEN(mvdev->mdev, migration)) + goto end; + + mvdev->vf_id = pci_iov_vf_id(pdev); + if (mvdev->vf_id < 0) + goto end; + + if (mlx5vf_cmd_get_vhca_id(mvdev->mdev, mvdev->vf_id + 1, + &mvdev->vhca_id)) + goto end; + + mvdev->cb_wq = alloc_ordered_workqueue("mlx5vf_wq", 0); + if (!mvdev->cb_wq) + goto end; + + mutex_init(&mvdev->state_mutex); + spin_lock_init(&mvdev->reset_lock); + mvdev->nb.notifier_call = mlx5fv_vf_event; + ret = mlx5_sriov_blocking_notifier_register(mvdev->mdev, mvdev->vf_id, + &mvdev->nb); + if (ret) { + destroy_workqueue(mvdev->cb_wq); + goto end; + } + + mvdev->migrate_cap = 1; + mvdev->core_device.vdev.migration_flags = + VFIO_MIGRATION_STOP_COPY | + VFIO_MIGRATION_P2P; end: - mlx5_vf_put_core_dev(mdev); - return ret; + mlx5_vf_put_core_dev(mvdev->mdev); } -int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id) +static int mlx5vf_cmd_get_vhca_id(struct mlx5_core_dev *mdev, u16 function_id, + u16 *vhca_id) { - struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev); u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {}; int out_size; void *out; int ret; - if (!mdev) - return -ENOTCONN; - out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out); out = kzalloc(out_size, GFP_KERNEL); - if (!out) { - ret = -ENOMEM; - goto end; - } + if (!out) + return -ENOMEM; MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, other_function, 1); @@ -105,8 +173,6 @@ int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id) err_exec: kfree(out); -end: - mlx5_vf_put_core_dev(mdev); return ret; } @@ -151,21 +217,68 @@ static int _create_state_mkey(struct mlx5_core_dev *mdev, u32 pdn, return err; } -int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id, +void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work) +{ + struct mlx5vf_async_data *async_data = container_of(_work, + struct mlx5vf_async_data, work); + struct mlx5_vf_migration_file *migf = container_of(async_data, + struct mlx5_vf_migration_file, async_data); + struct mlx5_core_dev *mdev = migf->mvdev->mdev; + + mutex_lock(&migf->lock); + if (async_data->status) { + migf->is_err = true; + wake_up_interruptible(&migf->poll_wait); + } + mutex_unlock(&migf->lock); + + mlx5_core_destroy_mkey(mdev, async_data->mkey); + dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0); + mlx5_core_dealloc_pd(mdev, async_data->pdn); + kvfree(async_data->out); + fput(migf->filp); +} + +static void mlx5vf_save_callback(int status, struct mlx5_async_work *context) +{ + struct mlx5vf_async_data *async_data = container_of(context, + struct mlx5vf_async_data, cb_work); + struct mlx5_vf_migration_file *migf = container_of(async_data, + struct mlx5_vf_migration_file, async_data); + + if (!status) { + WRITE_ONCE(migf->total_length, + MLX5_GET(save_vhca_state_out, async_data->out, + actual_image_size)); + wake_up_interruptible(&migf->poll_wait); + } + + /* + * The error and the cleanup flows can't run from an + * interrupt context + */ + async_data->status = status; + queue_work(migf->mvdev->cb_wq, &async_data->work); +} + +int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, struct mlx5_vf_migration_file *migf) { - struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev); - u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {}; + u32 out_size = MLX5_ST_SZ_BYTES(save_vhca_state_out); u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {}; + struct mlx5vf_async_data *async_data; + struct mlx5_core_dev *mdev; u32 pdn, mkey; int err; - if (!mdev) + lockdep_assert_held(&mvdev->state_mutex); + if (mvdev->mdev_detach) return -ENOTCONN; + mdev = mvdev->mdev; err = mlx5_core_alloc_pd(mdev, &pdn); if (err) - goto end; + return err; err = dma_map_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0); @@ -179,45 +292,54 @@ int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id, MLX5_SET(save_vhca_state_in, in, opcode, MLX5_CMD_OP_SAVE_VHCA_STATE); MLX5_SET(save_vhca_state_in, in, op_mod, 0); - MLX5_SET(save_vhca_state_in, in, vhca_id, vhca_id); + MLX5_SET(save_vhca_state_in, in, vhca_id, mvdev->vhca_id); MLX5_SET(save_vhca_state_in, in, mkey, mkey); MLX5_SET(save_vhca_state_in, in, size, migf->total_length); - err = mlx5_cmd_exec_inout(mdev, save_vhca_state, in, out); + async_data = &migf->async_data; + async_data->out = kvzalloc(out_size, GFP_KERNEL); + if (!async_data->out) { + err = -ENOMEM; + goto err_out; + } + + /* no data exists till the callback comes back */ + migf->total_length = 0; + get_file(migf->filp); + async_data->mkey = mkey; + async_data->pdn = pdn; + err = mlx5_cmd_exec_cb(&migf->async_ctx, in, sizeof(in), + async_data->out, + out_size, mlx5vf_save_callback, + &async_data->cb_work); if (err) goto err_exec; - migf->total_length = - MLX5_GET(save_vhca_state_out, out, actual_image_size); - - mlx5_core_destroy_mkey(mdev, mkey); - mlx5_core_dealloc_pd(mdev, pdn); - dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0); - mlx5_vf_put_core_dev(mdev); - return 0; err_exec: + fput(migf->filp); + kvfree(async_data->out); +err_out: mlx5_core_destroy_mkey(mdev, mkey); err_create_mkey: dma_unmap_sgtable(mdev->device, &migf->table.sgt, DMA_FROM_DEVICE, 0); err_dma_map: mlx5_core_dealloc_pd(mdev, pdn); -end: - mlx5_vf_put_core_dev(mdev); return err; } -int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id, +int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev, struct mlx5_vf_migration_file *migf) { - struct mlx5_core_dev *mdev = mlx5_vf_get_core_dev(pdev); + struct mlx5_core_dev *mdev; u32 out[MLX5_ST_SZ_DW(save_vhca_state_out)] = {}; u32 in[MLX5_ST_SZ_DW(save_vhca_state_in)] = {}; u32 pdn, mkey; int err; - if (!mdev) + lockdep_assert_held(&mvdev->state_mutex); + if (mvdev->mdev_detach) return -ENOTCONN; mutex_lock(&migf->lock); @@ -226,6 +348,7 @@ int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id, goto end; } + mdev = mvdev->mdev; err = mlx5_core_alloc_pd(mdev, &pdn); if (err) goto end; @@ -241,7 +364,7 @@ int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id, MLX5_SET(load_vhca_state_in, in, opcode, MLX5_CMD_OP_LOAD_VHCA_STATE); MLX5_SET(load_vhca_state_in, in, op_mod, 0); - MLX5_SET(load_vhca_state_in, in, vhca_id, vhca_id); + MLX5_SET(load_vhca_state_in, in, vhca_id, mvdev->vhca_id); MLX5_SET(load_vhca_state_in, in, mkey, mkey); MLX5_SET(load_vhca_state_in, in, size, migf->total_length); @@ -253,7 +376,6 @@ err_mkey: err_reg: mlx5_core_dealloc_pd(mdev, pdn); end: - mlx5_vf_put_core_dev(mdev); mutex_unlock(&migf->lock); return err; } diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h index 1392a11a9cc0..6c3112fdd8b1 100644 --- a/drivers/vfio/pci/mlx5/cmd.h +++ b/drivers/vfio/pci/mlx5/cmd.h @@ -7,12 +7,23 @@ #define MLX5_VFIO_CMD_H #include <linux/kernel.h> +#include <linux/vfio_pci_core.h> #include <linux/mlx5/driver.h> +struct mlx5vf_async_data { + struct mlx5_async_work cb_work; + struct work_struct work; + int status; + u32 pdn; + u32 mkey; + void *out; +}; + struct mlx5_vf_migration_file { struct file *filp; struct mutex lock; - bool disabled; + u8 disabled:1; + u8 is_err:1; struct sg_append_table table; size_t total_length; @@ -22,15 +33,42 @@ struct mlx5_vf_migration_file { struct scatterlist *last_offset_sg; unsigned int sg_last_entry; unsigned long last_offset; + struct mlx5vf_pci_core_device *mvdev; + wait_queue_head_t poll_wait; + struct mlx5_async_ctx async_ctx; + struct mlx5vf_async_data async_data; +}; + +struct mlx5vf_pci_core_device { + struct vfio_pci_core_device core_device; + int vf_id; + u16 vhca_id; + u8 migrate_cap:1; + u8 deferred_reset:1; + u8 mdev_detach:1; + /* protect migration state */ + struct mutex state_mutex; + enum vfio_device_mig_state mig_state; + /* protect the reset_done flow */ + spinlock_t reset_lock; + struct mlx5_vf_migration_file *resuming_migf; + struct mlx5_vf_migration_file *saving_migf; + struct workqueue_struct *cb_wq; + struct notifier_block nb; + struct mlx5_core_dev *mdev; }; -int mlx5vf_cmd_suspend_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod); -int mlx5vf_cmd_resume_vhca(struct pci_dev *pdev, u16 vhca_id, u16 op_mod); -int mlx5vf_cmd_query_vhca_migration_state(struct pci_dev *pdev, u16 vhca_id, +int mlx5vf_cmd_suspend_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod); +int mlx5vf_cmd_resume_vhca(struct mlx5vf_pci_core_device *mvdev, u16 op_mod); +int mlx5vf_cmd_query_vhca_migration_state(struct mlx5vf_pci_core_device *mvdev, size_t *state_size); -int mlx5vf_cmd_get_vhca_id(struct pci_dev *pdev, u16 function_id, u16 *vhca_id); -int mlx5vf_cmd_save_vhca_state(struct pci_dev *pdev, u16 vhca_id, +void mlx5vf_cmd_set_migratable(struct mlx5vf_pci_core_device *mvdev); +void mlx5vf_cmd_remove_migratable(struct mlx5vf_pci_core_device *mvdev); +int mlx5vf_cmd_save_vhca_state(struct mlx5vf_pci_core_device *mvdev, struct mlx5_vf_migration_file *migf); -int mlx5vf_cmd_load_vhca_state(struct pci_dev *pdev, u16 vhca_id, +int mlx5vf_cmd_load_vhca_state(struct mlx5vf_pci_core_device *mvdev, struct mlx5_vf_migration_file *migf); +void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev); +void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev); +void mlx5vf_mig_file_cleanup_cb(struct work_struct *_work); #endif /* MLX5_VFIO_CMD_H */ diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c index bbec5d288fee..0558d0649ddb 100644 --- a/drivers/vfio/pci/mlx5/main.c +++ b/drivers/vfio/pci/mlx5/main.c @@ -17,7 +17,6 @@ #include <linux/uaccess.h> #include <linux/vfio.h> #include <linux/sched/mm.h> -#include <linux/vfio_pci_core.h> #include <linux/anon_inodes.h> #include "cmd.h" @@ -25,19 +24,13 @@ /* Arbitrary to prevent userspace from consuming endless memory */ #define MAX_MIGRATION_SIZE (512*1024*1024) -struct mlx5vf_pci_core_device { - struct vfio_pci_core_device core_device; - u16 vhca_id; - u8 migrate_cap:1; - u8 deferred_reset:1; - /* protect migration state */ - struct mutex state_mutex; - enum vfio_device_mig_state mig_state; - /* protect the reset_done flow */ - spinlock_t reset_lock; - struct mlx5_vf_migration_file *resuming_migf; - struct mlx5_vf_migration_file *saving_migf; -}; +static struct mlx5vf_pci_core_device *mlx5vf_drvdata(struct pci_dev *pdev) +{ + struct vfio_pci_core_device *core_device = dev_get_drvdata(&pdev->dev); + + return container_of(core_device, struct mlx5vf_pci_core_device, + core_device); +} static struct page * mlx5vf_get_migration_page(struct mlx5_vf_migration_file *migf, @@ -149,12 +142,22 @@ static ssize_t mlx5vf_save_read(struct file *filp, char __user *buf, size_t len, return -ESPIPE; pos = &filp->f_pos; + if (!(filp->f_flags & O_NONBLOCK)) { + if (wait_event_interruptible(migf->poll_wait, + READ_ONCE(migf->total_length) || migf->is_err)) + return -ERESTARTSYS; + } + mutex_lock(&migf->lock); + if ((filp->f_flags & O_NONBLOCK) && !READ_ONCE(migf->total_length)) { + done = -EAGAIN; + goto out_unlock; + } if (*pos > migf->total_length) { done = -EINVAL; goto out_unlock; } - if (migf->disabled) { + if (migf->disabled || migf->is_err) { done = -ENODEV; goto out_unlock; } @@ -194,9 +197,28 @@ out_unlock: return done; } +static __poll_t mlx5vf_save_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct mlx5_vf_migration_file *migf = filp->private_data; + __poll_t pollflags = 0; + + poll_wait(filp, &migf->poll_wait, wait); + + mutex_lock(&migf->lock); + if (migf->disabled || migf->is_err) + pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; + else if (READ_ONCE(migf->total_length)) + pollflags = EPOLLIN | EPOLLRDNORM; + mutex_unlock(&migf->lock); + + return pollflags; +} + static const struct file_operations mlx5vf_save_fops = { .owner = THIS_MODULE, .read = mlx5vf_save_read, + .poll = mlx5vf_save_poll, .release = mlx5vf_release_file, .llseek = no_llseek, }; @@ -222,9 +244,11 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev) stream_open(migf->filp->f_inode, migf->filp); mutex_init(&migf->lock); - - ret = mlx5vf_cmd_query_vhca_migration_state( - mvdev->core_device.pdev, mvdev->vhca_id, &migf->total_length); + init_waitqueue_head(&migf->poll_wait); + mlx5_cmd_init_async_ctx(mvdev->mdev, &migf->async_ctx); + INIT_WORK(&migf->async_data.work, mlx5vf_mig_file_cleanup_cb); + ret = mlx5vf_cmd_query_vhca_migration_state(mvdev, + &migf->total_length); if (ret) goto out_free; @@ -233,8 +257,8 @@ mlx5vf_pci_save_device_data(struct mlx5vf_pci_core_device *mvdev) if (ret) goto out_free; - ret = mlx5vf_cmd_save_vhca_state(mvdev->core_device.pdev, - mvdev->vhca_id, migf); + migf->mvdev = mvdev; + ret = mlx5vf_cmd_save_vhca_state(mvdev, migf); if (ret) goto out_free; return migf; @@ -339,7 +363,7 @@ mlx5vf_pci_resume_device_data(struct mlx5vf_pci_core_device *mvdev) return migf; } -static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev) +void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev) { if (mvdev->resuming_migf) { mlx5vf_disable_fd(mvdev->resuming_migf); @@ -347,6 +371,8 @@ static void mlx5vf_disable_fds(struct mlx5vf_pci_core_device *mvdev) mvdev->resuming_migf = NULL; } if (mvdev->saving_migf) { + mlx5_cmd_cleanup_async_ctx(&mvdev->saving_migf->async_ctx); + cancel_work_sync(&mvdev->saving_migf->async_data.work); mlx5vf_disable_fd(mvdev->saving_migf); fput(mvdev->saving_migf->filp); mvdev->saving_migf = NULL; @@ -361,8 +387,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, int ret; if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_STOP) { - ret = mlx5vf_cmd_suspend_vhca( - mvdev->core_device.pdev, mvdev->vhca_id, + ret = mlx5vf_cmd_suspend_vhca(mvdev, MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER); if (ret) return ERR_PTR(ret); @@ -370,8 +395,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, } if (cur == VFIO_DEVICE_STATE_STOP && new == VFIO_DEVICE_STATE_RUNNING_P2P) { - ret = mlx5vf_cmd_resume_vhca( - mvdev->core_device.pdev, mvdev->vhca_id, + ret = mlx5vf_cmd_resume_vhca(mvdev, MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER); if (ret) return ERR_PTR(ret); @@ -379,8 +403,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, } if (cur == VFIO_DEVICE_STATE_RUNNING && new == VFIO_DEVICE_STATE_RUNNING_P2P) { - ret = mlx5vf_cmd_suspend_vhca( - mvdev->core_device.pdev, mvdev->vhca_id, + ret = mlx5vf_cmd_suspend_vhca(mvdev, MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR); if (ret) return ERR_PTR(ret); @@ -388,8 +411,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, } if (cur == VFIO_DEVICE_STATE_RUNNING_P2P && new == VFIO_DEVICE_STATE_RUNNING) { - ret = mlx5vf_cmd_resume_vhca( - mvdev->core_device.pdev, mvdev->vhca_id, + ret = mlx5vf_cmd_resume_vhca(mvdev, MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR); if (ret) return ERR_PTR(ret); @@ -424,8 +446,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, } if (cur == VFIO_DEVICE_STATE_RESUMING && new == VFIO_DEVICE_STATE_STOP) { - ret = mlx5vf_cmd_load_vhca_state(mvdev->core_device.pdev, - mvdev->vhca_id, + ret = mlx5vf_cmd_load_vhca_state(mvdev, mvdev->resuming_migf); if (ret) return ERR_PTR(ret); @@ -444,7 +465,7 @@ mlx5vf_pci_step_device_state_locked(struct mlx5vf_pci_core_device *mvdev, * This function is called in all state_mutex unlock cases to * handle a 'deferred_reset' if exists. */ -static void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev) +void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev) { again: spin_lock(&mvdev->reset_lock); @@ -505,7 +526,7 @@ static int mlx5vf_pci_get_device_state(struct vfio_device *vdev, static void mlx5vf_pci_aer_reset_done(struct pci_dev *pdev) { - struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev); + struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev); if (!mvdev->migrate_cap) return; @@ -532,34 +553,16 @@ static int mlx5vf_pci_open_device(struct vfio_device *core_vdev) struct mlx5vf_pci_core_device *mvdev = container_of( core_vdev, struct mlx5vf_pci_core_device, core_device.vdev); struct vfio_pci_core_device *vdev = &mvdev->core_device; - int vf_id; int ret; ret = vfio_pci_core_enable(vdev); if (ret) return ret; - if (!mvdev->migrate_cap) { - vfio_pci_core_finish_enable(vdev); - return 0; - } - - vf_id = pci_iov_vf_id(vdev->pdev); - if (vf_id < 0) { - ret = vf_id; - goto out_disable; - } - - ret = mlx5vf_cmd_get_vhca_id(vdev->pdev, vf_id + 1, &mvdev->vhca_id); - if (ret) - goto out_disable; - - mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING; + if (mvdev->migrate_cap) + mvdev->mig_state = VFIO_DEVICE_STATE_RUNNING; vfio_pci_core_finish_enable(vdev); return 0; -out_disable: - vfio_pci_core_disable(vdev); - return ret; } static void mlx5vf_pci_close_device(struct vfio_device *core_vdev) @@ -596,32 +599,15 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev, if (!mvdev) return -ENOMEM; vfio_pci_core_init_device(&mvdev->core_device, pdev, &mlx5vf_pci_ops); - - if (pdev->is_virtfn) { - struct mlx5_core_dev *mdev = - mlx5_vf_get_core_dev(pdev); - - if (mdev) { - if (MLX5_CAP_GEN(mdev, migration)) { - mvdev->migrate_cap = 1; - mvdev->core_device.vdev.migration_flags = - VFIO_MIGRATION_STOP_COPY | - VFIO_MIGRATION_P2P; - mutex_init(&mvdev->state_mutex); - spin_lock_init(&mvdev->reset_lock); - } - mlx5_vf_put_core_dev(mdev); - } - } - + mlx5vf_cmd_set_migratable(mvdev); + dev_set_drvdata(&pdev->dev, &mvdev->core_device); ret = vfio_pci_core_register_device(&mvdev->core_device); if (ret) goto out_free; - - dev_set_drvdata(&pdev->dev, mvdev); return 0; out_free: + mlx5vf_cmd_remove_migratable(mvdev); vfio_pci_core_uninit_device(&mvdev->core_device); kfree(mvdev); return ret; @@ -629,9 +615,10 @@ out_free: static void mlx5vf_pci_remove(struct pci_dev *pdev) { - struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev); + struct mlx5vf_pci_core_device *mvdev = mlx5vf_drvdata(pdev); vfio_pci_core_unregister_device(&mvdev->core_device); + mlx5vf_cmd_remove_migratable(mvdev); vfio_pci_core_uninit_device(&mvdev->core_device); kfree(mvdev); } @@ -654,6 +641,7 @@ static struct pci_driver mlx5vf_pci_driver = { .probe = mlx5vf_pci_probe, .remove = mlx5vf_pci_remove, .err_handler = &mlx5vf_err_handlers, + .driver_managed_dma = true, }; static void __exit mlx5vf_pci_cleanup(void) diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 2b047469e02f..4d1a97415a27 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -151,10 +151,10 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; vfio_pci_core_init_device(vdev, pdev, &vfio_pci_ops); + dev_set_drvdata(&pdev->dev, vdev); ret = vfio_pci_core_register_device(vdev); if (ret) goto out_free; - dev_set_drvdata(&pdev->dev, vdev); return 0; out_free: @@ -174,10 +174,12 @@ static void vfio_pci_remove(struct pci_dev *pdev) static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn) { + struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev); + if (!enable_sriov) return -ENOENT; - return vfio_pci_core_sriov_configure(pdev, nr_virtfn); + return vfio_pci_core_sriov_configure(vdev, nr_virtfn); } static const struct pci_device_id vfio_pci_table[] = { @@ -194,6 +196,7 @@ static struct pci_driver vfio_pci_driver = { .remove = vfio_pci_remove, .sriov_configure = vfio_pci_sriov_configure, .err_handler = &vfio_pci_core_err_handlers, + .driver_managed_dma = true, }; static void __init vfio_pci_fill_ids(void) diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 6e58b4bf7a60..9343f597182d 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -402,11 +402,14 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev) u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]); /* + * Memory region cannot be accessed if device power state is D3. + * * SR-IOV VF memory enable is handled by the MSE bit in the * PF SR-IOV capability, there's therefore no need to trigger * faults based on the virtual value. */ - return pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY); + return pdev->current_state < PCI_D3hot && + (pdev->no_command_memory || (cmd & PCI_COMMAND_MEMORY)); } /* @@ -692,6 +695,22 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm) return 0; } +/* + * It takes all the required locks to protect the access of power related + * variables and then invokes vfio_pci_set_power_state(). + */ +static void vfio_lock_and_set_power_state(struct vfio_pci_core_device *vdev, + pci_power_t state) +{ + if (state >= PCI_D3hot) + vfio_pci_zap_and_down_write_memory_lock(vdev); + else + down_write(&vdev->memory_lock); + + vfio_pci_set_power_state(vdev, state); + up_write(&vdev->memory_lock); +} + static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos, int count, struct perm_bits *perm, int offset, __le32 val) @@ -718,7 +737,7 @@ static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos, break; } - vfio_pci_set_power_state(vdev, state); + vfio_lock_and_set_power_state(vdev, state); } return count; @@ -739,11 +758,28 @@ static int __init init_pci_cap_pm_perm(struct perm_bits *perm) p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE); /* + * The guests can't process PME events. If any PME event will be + * generated, then it will be mostly handled in the host and the + * host will clear the PME_STATUS. So virtualize PME_Support bits. + * The vconfig bits will be cleared during device capability + * initialization. + */ + p_setw(perm, PCI_PM_PMC, PCI_PM_CAP_PME_MASK, NO_WRITE); + + /* * Power management is defined *per function*, so we can let * the user change power state, but we trap and initiate the * change ourselves, so the state bits are read-only. + * + * The guest can't process PME from D3cold so virtualize PME_Status + * and PME_En bits. The vconfig bits will be cleared during device + * capability initialization. */ - p_setd(perm, PCI_PM_CTRL, NO_VIRT, ~PCI_PM_CTRL_STATE_MASK); + p_setd(perm, PCI_PM_CTRL, + PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS, + ~(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS | + PCI_PM_CTRL_STATE_MASK)); + return 0; } @@ -1412,6 +1448,17 @@ static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epo return 0; } +static void vfio_update_pm_vconfig_bytes(struct vfio_pci_core_device *vdev, + int offset) +{ + __le16 *pmc = (__le16 *)&vdev->vconfig[offset + PCI_PM_PMC]; + __le16 *ctrl = (__le16 *)&vdev->vconfig[offset + PCI_PM_CTRL]; + + /* Clear vconfig PME_Support, PME_Status, and PME_En bits */ + *pmc &= ~cpu_to_le16(PCI_PM_CAP_PME_MASK); + *ctrl &= ~cpu_to_le16(PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS); +} + static int vfio_fill_vconfig_bytes(struct vfio_pci_core_device *vdev, int offset, int size) { @@ -1535,6 +1582,9 @@ static int vfio_cap_init(struct vfio_pci_core_device *vdev) if (ret) return ret; + if (cap == PCI_CAP_ID_PM) + vfio_update_pm_vconfig_bytes(vdev, pos); + prev = &vdev->vconfig[pos + PCI_CAP_LIST_NEXT]; pos = next; caps++; diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index 06b6f3594a13..a0d69ddaf90d 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -156,7 +156,7 @@ no_mmap: } struct vfio_pci_group_info; -static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set); +static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set); static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set, struct vfio_pci_group_info *groups); @@ -217,6 +217,10 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat bool needs_restore = false, needs_save = false; int ret; + /* Prevent changing power state for PFs with VFs enabled */ + if (pci_num_vf(pdev) && state > PCI_D0) + return -EBUSY; + if (vdev->needs_pm_restore) { if (pdev->current_state < PCI_D3hot && state >= PCI_D3hot) { pci_save_state(pdev); @@ -255,6 +259,17 @@ int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, pci_power_t stat return ret; } +/* + * The dev_pm_ops needs to be provided to make pci-driver runtime PM working, + * so use structure without any callbacks. + * + * The pci-driver core runtime PM routines always save the device state + * before going into suspended state. If the device is going into low power + * state with only with runtime PM ops, then no explicit handling is needed + * for the devices which have NoSoftRst-. + */ +static const struct dev_pm_ops vfio_pci_core_pm_ops = { }; + int vfio_pci_core_enable(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; @@ -262,21 +277,23 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev) u16 cmd; u8 msix_pos; - vfio_pci_set_power_state(vdev, PCI_D0); + if (!disable_idle_d3) { + ret = pm_runtime_resume_and_get(&pdev->dev); + if (ret < 0) + return ret; + } /* Don't allow our initial saved state to include busmaster */ pci_clear_master(pdev); ret = pci_enable_device(pdev); if (ret) - return ret; + goto out_power; /* If reset fails because of the device lock, fail this path entirely */ ret = pci_try_reset_function(pdev); - if (ret == -EAGAIN) { - pci_disable_device(pdev); - return ret; - } + if (ret == -EAGAIN) + goto out_disable_device; vdev->reset_works = !ret; pci_save_state(pdev); @@ -300,12 +317,8 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev) } ret = vfio_config_init(vdev); - if (ret) { - kfree(vdev->pci_saved_state); - vdev->pci_saved_state = NULL; - pci_disable_device(pdev); - return ret; - } + if (ret) + goto out_free_state; msix_pos = pdev->msix_cap; if (msix_pos) { @@ -326,6 +339,16 @@ int vfio_pci_core_enable(struct vfio_pci_core_device *vdev) return 0; + +out_free_state: + kfree(vdev->pci_saved_state); + vdev->pci_saved_state = NULL; +out_disable_device: + pci_disable_device(pdev); +out_power: + if (!disable_idle_d3) + pm_runtime_put(&pdev->dev); + return ret; } EXPORT_SYMBOL_GPL(vfio_pci_core_enable); @@ -433,8 +456,11 @@ void vfio_pci_core_disable(struct vfio_pci_core_device *vdev) out: pci_disable_device(pdev); - if (!vfio_pci_dev_set_try_reset(vdev->vdev.dev_set) && !disable_idle_d3) - vfio_pci_set_power_state(vdev, PCI_D3hot); + vfio_pci_dev_set_try_reset(vdev->vdev.dev_set); + + /* Put the pm-runtime usage counter acquired during enable */ + if (!disable_idle_d3) + pm_runtime_put(&pdev->dev); } EXPORT_SYMBOL_GPL(vfio_pci_core_disable); @@ -556,7 +582,7 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data) struct vfio_pci_group_info { int count; - struct vfio_group **groups; + struct file **files; }; static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot) @@ -1018,10 +1044,10 @@ reset_info_exit: } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) { struct vfio_pci_hot_reset hdr; int32_t *group_fds; - struct vfio_group **groups; + struct file **files; struct vfio_pci_group_info info; bool slot = false; - int group_idx, count = 0, ret = 0; + int file_idx, count = 0, ret = 0; minsz = offsetofend(struct vfio_pci_hot_reset, count); @@ -1054,17 +1080,17 @@ reset_info_exit: return -EINVAL; group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL); - groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL); - if (!group_fds || !groups) { + files = kcalloc(hdr.count, sizeof(*files), GFP_KERNEL); + if (!group_fds || !files) { kfree(group_fds); - kfree(groups); + kfree(files); return -ENOMEM; } if (copy_from_user(group_fds, (void __user *)(arg + minsz), hdr.count * sizeof(*group_fds))) { kfree(group_fds); - kfree(groups); + kfree(files); return -EFAULT; } @@ -1073,22 +1099,22 @@ reset_info_exit: * user interface and store the group and iommu ID. This * ensures the group is held across the reset. */ - for (group_idx = 0; group_idx < hdr.count; group_idx++) { - struct vfio_group *group; - struct fd f = fdget(group_fds[group_idx]); - if (!f.file) { + for (file_idx = 0; file_idx < hdr.count; file_idx++) { + struct file *file = fget(group_fds[file_idx]); + + if (!file) { ret = -EBADF; break; } - group = vfio_group_get_external_user(f.file); - fdput(f); - if (IS_ERR(group)) { - ret = PTR_ERR(group); + /* Ensure the FD is a vfio group FD.*/ + if (!vfio_file_iommu_group(file)) { + fput(file); + ret = -EINVAL; break; } - groups[group_idx] = group; + files[file_idx] = file; } kfree(group_fds); @@ -1098,15 +1124,15 @@ reset_info_exit: goto hot_reset_release; info.count = hdr.count; - info.groups = groups; + info.files = files; ret = vfio_pci_dev_set_hot_reset(vdev->vdev.dev_set, &info); hot_reset_release: - for (group_idx--; group_idx >= 0; group_idx--) - vfio_group_put_external_user(groups[group_idx]); + for (file_idx--; file_idx >= 0; file_idx--) + fput(files[file_idx]); - kfree(groups); + kfree(files); return ret; } else if (cmd == VFIO_DEVICE_IOEVENTFD) { struct vfio_device_ioeventfd ioeventfd; @@ -1819,8 +1845,13 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device); int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) { struct pci_dev *pdev = vdev->pdev; + struct device *dev = &pdev->dev; int ret; + /* Drivers must set the vfio_pci_core_device to their drvdata */ + if (WARN_ON(vdev != dev_get_drvdata(dev))) + return -EINVAL; + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) return -EINVAL; @@ -1860,19 +1891,21 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) vfio_pci_probe_power_state(vdev); - if (!disable_idle_d3) { - /* - * pci-core sets the device power state to an unknown value at - * bootup and after being removed from a driver. The only - * transition it allows from this unknown state is to D0, which - * typically happens when a driver calls pci_enable_device(). - * We're not ready to enable the device yet, but we do want to - * be able to get to D3. Therefore first do a D0 transition - * before going to D3. - */ - vfio_pci_set_power_state(vdev, PCI_D0); - vfio_pci_set_power_state(vdev, PCI_D3hot); - } + /* + * pci-core sets the device power state to an unknown value at + * bootup and after being removed from a driver. The only + * transition it allows from this unknown state is to D0, which + * typically happens when a driver calls pci_enable_device(). + * We're not ready to enable the device yet, but we do want to + * be able to get to D3. Therefore first do a D0 transition + * before enabling runtime PM. + */ + vfio_pci_set_power_state(vdev, PCI_D0); + + dev->driver->pm = &vfio_pci_core_pm_ops; + pm_runtime_allow(dev); + if (!disable_idle_d3) + pm_runtime_put(dev); ret = vfio_register_group_dev(&vdev->vdev); if (ret) @@ -1881,7 +1914,9 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev) out_power: if (!disable_idle_d3) - vfio_pci_set_power_state(vdev, PCI_D0); + pm_runtime_get_noresume(dev); + + pm_runtime_forbid(dev); out_vf: vfio_pci_vf_uninit(vdev); return ret; @@ -1890,9 +1925,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_register_device); void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev) { - struct pci_dev *pdev = vdev->pdev; - - vfio_pci_core_sriov_configure(pdev, 0); + vfio_pci_core_sriov_configure(vdev, 0); vfio_unregister_group_dev(&vdev->vdev); @@ -1900,21 +1933,16 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev) vfio_pci_vga_uninit(vdev); if (!disable_idle_d3) - vfio_pci_set_power_state(vdev, PCI_D0); + pm_runtime_get_noresume(&vdev->pdev->dev); + + pm_runtime_forbid(&vdev->pdev->dev); } EXPORT_SYMBOL_GPL(vfio_pci_core_unregister_device); pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { - struct vfio_pci_core_device *vdev; - struct vfio_device *device; - - device = vfio_device_get_from_dev(&pdev->dev); - if (device == NULL) - return PCI_ERS_RESULT_DISCONNECT; - - vdev = container_of(device, struct vfio_pci_core_device, vdev); + struct vfio_pci_core_device *vdev = dev_get_drvdata(&pdev->dev); mutex_lock(&vdev->igate); @@ -1923,26 +1951,18 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, mutex_unlock(&vdev->igate); - vfio_device_put(device); - return PCI_ERS_RESULT_CAN_RECOVER; } EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected); -int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn) +int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev, + int nr_virtfn) { - struct vfio_pci_core_device *vdev; - struct vfio_device *device; + struct pci_dev *pdev = vdev->pdev; int ret = 0; device_lock_assert(&pdev->dev); - device = vfio_device_get_from_dev(&pdev->dev); - if (!device) - return -ENODEV; - - vdev = container_of(device, struct vfio_pci_core_device, vdev); - if (nr_virtfn) { mutex_lock(&vfio_pci_sriov_pfs_mutex); /* @@ -1957,22 +1977,42 @@ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn) } list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs); mutex_unlock(&vfio_pci_sriov_pfs_mutex); - ret = pci_enable_sriov(pdev, nr_virtfn); + + /* + * The PF power state should always be higher than the VF power + * state. The PF can be in low power state either with runtime + * power management (when there is no user) or PCI_PM_CTRL + * register write by the user. If PF is in the low power state, + * then change the power state to D0 first before enabling + * SR-IOV. Also, this function can be called at any time, and + * userspace PCI_PM_CTRL write can race against this code path, + * so protect the same with 'memory_lock'. + */ + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret) goto out_del; - ret = nr_virtfn; - goto out_put; + + down_write(&vdev->memory_lock); + vfio_pci_set_power_state(vdev, PCI_D0); + ret = pci_enable_sriov(pdev, nr_virtfn); + up_write(&vdev->memory_lock); + if (ret) { + pm_runtime_put(&pdev->dev); + goto out_del; + } + return nr_virtfn; } - pci_disable_sriov(pdev); + if (pci_num_vf(pdev)) { + pci_disable_sriov(pdev); + pm_runtime_put(&pdev->dev); + } out_del: mutex_lock(&vfio_pci_sriov_pfs_mutex); list_del_init(&vdev->sriov_pfs_item); out_unlock: mutex_unlock(&vfio_pci_sriov_pfs_mutex); -out_put: - vfio_device_put(device); return ret; } EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure); @@ -1988,7 +2028,7 @@ static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev, unsigned int i; for (i = 0; i < groups->count; i++) - if (groups->groups[i] == vdev->vdev.group) + if (vfio_file_has_dev(groups->files[i], &vdev->vdev)) return true; return false; } @@ -2041,6 +2081,27 @@ vfio_pci_dev_set_resettable(struct vfio_device_set *dev_set) return pdev; } +static int vfio_pci_dev_set_pm_runtime_get(struct vfio_device_set *dev_set) +{ + struct vfio_pci_core_device *cur; + int ret; + + list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) { + ret = pm_runtime_resume_and_get(&cur->pdev->dev); + if (ret) + goto unwind; + } + + return 0; + +unwind: + list_for_each_entry_continue_reverse(cur, &dev_set->device_list, + vdev.dev_set_list) + pm_runtime_put(&cur->pdev->dev); + + return ret; +} + /* * We need to get memory_lock for each device, but devices can share mmap_lock, * therefore we need to zap and hold the vma_lock for each device, and only then @@ -2147,43 +2208,38 @@ static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set) * - At least one of the affected devices is marked dirty via * needs_reset (such as by lack of FLR support) * Then attempt to perform that bus or slot reset. - * Returns true if the dev_set was reset. */ -static bool vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set) +static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set) { struct vfio_pci_core_device *cur; struct pci_dev *pdev; - int ret; + bool reset_done = false; if (!vfio_pci_dev_set_needs_reset(dev_set)) - return false; + return; pdev = vfio_pci_dev_set_resettable(dev_set); if (!pdev) - return false; + return; /* - * The pci_reset_bus() will reset all the devices in the bus. - * The power state can be non-D0 for some of the devices in the bus. - * For these devices, the pci_reset_bus() will internally set - * the power state to D0 without vfio driver involvement. - * For the devices which have NoSoftRst-, the reset function can - * cause the PCI config space reset without restoring the original - * state (saved locally in 'vdev->pm_save'). + * Some of the devices in the bus can be in the runtime suspended + * state. Increment the usage count for all the devices in the dev_set + * before reset and decrement the same after reset. */ - list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) - vfio_pci_set_power_state(cur, PCI_D0); + if (!disable_idle_d3 && vfio_pci_dev_set_pm_runtime_get(dev_set)) + return; - ret = pci_reset_bus(pdev); - if (ret) - return false; + if (!pci_reset_bus(pdev)) + reset_done = true; list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) { - cur->needs_reset = false; + if (reset_done) + cur->needs_reset = false; + if (!disable_idle_d3) - vfio_pci_set_power_state(cur, PCI_D3hot); + pm_runtime_put(&cur->pdev->dev); } - return true; } void vfio_pci_core_set_params(bool is_nointxmask, bool is_disable_vga, diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c index badfffea14fb..1aaa4f721bd2 100644 --- a/drivers/vfio/platform/vfio_amba.c +++ b/drivers/vfio/platform/vfio_amba.c @@ -95,6 +95,7 @@ static struct amba_driver vfio_amba_driver = { .name = "vfio-amba", .owner = THIS_MODULE, }, + .driver_managed_dma = true, }; module_amba_driver(vfio_amba_driver); diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c index 68a1c87066d7..04f40c5acfd6 100644 --- a/drivers/vfio/platform/vfio_platform.c +++ b/drivers/vfio/platform/vfio_platform.c @@ -76,6 +76,7 @@ static struct platform_driver vfio_platform_driver = { .driver = { .name = "vfio-platform", }, + .driver_managed_dma = true, }; module_platform_driver(vfio_platform_driver); diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index a4555014bd1e..61e71c1154be 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -62,30 +62,22 @@ struct vfio_container { bool noiommu; }; -struct vfio_unbound_dev { - struct device *dev; - struct list_head unbound_next; -}; - struct vfio_group { struct device dev; struct cdev cdev; refcount_t users; - atomic_t container_users; + unsigned int container_users; struct iommu_group *iommu_group; struct vfio_container *container; struct list_head device_list; struct mutex device_lock; - struct notifier_block nb; struct list_head vfio_next; struct list_head container_next; - struct list_head unbound_list; - struct mutex unbound_lock; - atomic_t opened; - wait_queue_head_t container_q; enum vfio_group_type type; unsigned int dev_counter; + struct rw_semaphore group_rwsem; struct kvm *kvm; + struct file *opened_file; struct blocking_notifier_head notifier; }; @@ -281,8 +273,6 @@ void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops) } EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); -static int vfio_iommu_group_notifier(struct notifier_block *nb, - unsigned long action, void *data); static void vfio_group_get(struct vfio_group *group); /* @@ -340,16 +330,8 @@ vfio_group_get_from_iommu(struct iommu_group *iommu_group) static void vfio_group_release(struct device *dev) { struct vfio_group *group = container_of(dev, struct vfio_group, dev); - struct vfio_unbound_dev *unbound, *tmp; - - list_for_each_entry_safe(unbound, tmp, - &group->unbound_list, unbound_next) { - list_del(&unbound->unbound_next); - kfree(unbound); - } mutex_destroy(&group->device_lock); - mutex_destroy(&group->unbound_lock); iommu_group_put(group->iommu_group); ida_free(&vfio.group_ida, MINOR(group->dev.devt)); kfree(group); @@ -379,11 +361,9 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group, group->cdev.owner = THIS_MODULE; refcount_set(&group->users, 1); + init_rwsem(&group->group_rwsem); INIT_LIST_HEAD(&group->device_list); mutex_init(&group->device_lock); - INIT_LIST_HEAD(&group->unbound_list); - mutex_init(&group->unbound_lock); - init_waitqueue_head(&group->container_q); group->iommu_group = iommu_group; /* put in vfio_group_release() */ iommu_group_ref_get(iommu_group); @@ -412,13 +392,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, goto err_put; } - group->nb.notifier_call = vfio_iommu_group_notifier; - err = iommu_group_register_notifier(iommu_group, &group->nb); - if (err) { - ret = ERR_PTR(err); - goto err_put; - } - mutex_lock(&vfio.group_lock); /* Did we race creating this group? */ @@ -439,7 +412,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group, err_unlock: mutex_unlock(&vfio.group_lock); - iommu_group_unregister_notifier(group->iommu_group, &group->nb); err_put: put_device(&group->dev); return ret; @@ -457,14 +429,13 @@ static void vfio_group_put(struct vfio_group *group) * properly hold the group reference. */ WARN_ON(!list_empty(&group->device_list)); - WARN_ON(atomic_read(&group->container_users)); + WARN_ON(group->container || group->container_users); WARN_ON(group->notifier.head); list_del(&group->vfio_next); cdev_device_del(&group->cdev, &group->dev); mutex_unlock(&vfio.group_lock); - iommu_group_unregister_notifier(group->iommu_group, &group->nb); put_device(&group->dev); } @@ -473,31 +444,15 @@ static void vfio_group_get(struct vfio_group *group) refcount_inc(&group->users); } -static struct vfio_group *vfio_group_get_from_dev(struct device *dev) -{ - struct iommu_group *iommu_group; - struct vfio_group *group; - - iommu_group = iommu_group_get(dev); - if (!iommu_group) - return NULL; - - group = vfio_group_get_from_iommu(iommu_group); - iommu_group_put(iommu_group); - - return group; -} - /* * Device objects - create, release, get, put, search */ /* Device reference always implies a group reference */ -void vfio_device_put(struct vfio_device *device) +static void vfio_device_put(struct vfio_device *device) { if (refcount_dec_and_test(&device->refcount)) complete(&device->comp); } -EXPORT_SYMBOL_GPL(vfio_device_put); static bool vfio_device_try_get(struct vfio_device *device) { @@ -521,175 +476,6 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group, } /* - * Some drivers, like pci-stub, are only used to prevent other drivers from - * claiming a device and are therefore perfectly legitimate for a user owned - * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping - * of the device, but it does prevent the user from having direct access to - * the device, which is useful in some circumstances. - * - * We also assume that we can include PCI interconnect devices, ie. bridges. - * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge - * then all of the downstream devices will be part of the same IOMMU group as - * the bridge. Thus, if placing the bridge into the user owned IOVA space - * breaks anything, it only does so for user owned devices downstream. Note - * that error notification via MSI can be affected for platforms that handle - * MSI within the same IOVA space as DMA. - */ -static const char * const vfio_driver_allowed[] = { "pci-stub" }; - -static bool vfio_dev_driver_allowed(struct device *dev, - struct device_driver *drv) -{ - if (dev_is_pci(dev)) { - struct pci_dev *pdev = to_pci_dev(dev); - - if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) - return true; - } - - return match_string(vfio_driver_allowed, - ARRAY_SIZE(vfio_driver_allowed), - drv->name) >= 0; -} - -/* - * A vfio group is viable for use by userspace if all devices are in - * one of the following states: - * - driver-less - * - bound to a vfio driver - * - bound to an otherwise allowed driver - * - a PCI interconnect device - * - * We use two methods to determine whether a device is bound to a vfio - * driver. The first is to test whether the device exists in the vfio - * group. The second is to test if the device exists on the group - * unbound_list, indicating it's in the middle of transitioning from - * a vfio driver to driver-less. - */ -static int vfio_dev_viable(struct device *dev, void *data) -{ - struct vfio_group *group = data; - struct vfio_device *device; - struct device_driver *drv = READ_ONCE(dev->driver); - struct vfio_unbound_dev *unbound; - int ret = -EINVAL; - - mutex_lock(&group->unbound_lock); - list_for_each_entry(unbound, &group->unbound_list, unbound_next) { - if (dev == unbound->dev) { - ret = 0; - break; - } - } - mutex_unlock(&group->unbound_lock); - - if (!ret || !drv || vfio_dev_driver_allowed(dev, drv)) - return 0; - - device = vfio_group_get_device(group, dev); - if (device) { - vfio_device_put(device); - return 0; - } - - return ret; -} - -/* - * Async device support - */ -static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) -{ - struct vfio_device *device; - - /* Do we already know about it? We shouldn't */ - device = vfio_group_get_device(group, dev); - if (WARN_ON_ONCE(device)) { - vfio_device_put(device); - return 0; - } - - /* Nothing to do for idle groups */ - if (!atomic_read(&group->container_users)) - return 0; - - /* TODO Prevent device auto probing */ - dev_WARN(dev, "Device added to live group %d!\n", - iommu_group_id(group->iommu_group)); - - return 0; -} - -static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) -{ - /* We don't care what happens when the group isn't in use */ - if (!atomic_read(&group->container_users)) - return 0; - - return vfio_dev_viable(dev, group); -} - -static int vfio_iommu_group_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - struct vfio_group *group = container_of(nb, struct vfio_group, nb); - struct device *dev = data; - struct vfio_unbound_dev *unbound; - - switch (action) { - case IOMMU_GROUP_NOTIFY_ADD_DEVICE: - vfio_group_nb_add_dev(group, dev); - break; - case IOMMU_GROUP_NOTIFY_DEL_DEVICE: - /* - * Nothing to do here. If the device is in use, then the - * vfio sub-driver should block the remove callback until - * it is unused. If the device is unused or attached to a - * stub driver, then it should be released and we don't - * care that it will be going away. - */ - break; - case IOMMU_GROUP_NOTIFY_BIND_DRIVER: - dev_dbg(dev, "%s: group %d binding to driver\n", __func__, - iommu_group_id(group->iommu_group)); - break; - case IOMMU_GROUP_NOTIFY_BOUND_DRIVER: - dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__, - iommu_group_id(group->iommu_group), dev->driver->name); - BUG_ON(vfio_group_nb_verify(group, dev)); - break; - case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER: - dev_dbg(dev, "%s: group %d unbinding from driver %s\n", - __func__, iommu_group_id(group->iommu_group), - dev->driver->name); - break; - case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER: - dev_dbg(dev, "%s: group %d unbound from driver\n", __func__, - iommu_group_id(group->iommu_group)); - /* - * XXX An unbound device in a live group is ok, but we'd - * really like to avoid the above BUG_ON by preventing other - * drivers from binding to it. Once that occurs, we have to - * stop the system to maintain isolation. At a minimum, we'd - * want a toggle to disable driver auto probe for this device. - */ - - mutex_lock(&group->unbound_lock); - list_for_each_entry(unbound, - &group->unbound_list, unbound_next) { - if (dev == unbound->dev) { - list_del(&unbound->unbound_next); - kfree(unbound); - break; - } - } - mutex_unlock(&group->unbound_lock); - break; - } - return NOTIFY_OK; -} - -/* * VFIO driver API */ void vfio_init_group_dev(struct vfio_device *device, struct device *dev, @@ -745,11 +531,11 @@ static struct vfio_group *vfio_group_find_or_alloc(struct device *dev) iommu_group = iommu_group_get(dev); #ifdef CONFIG_VFIO_NOIOMMU - if (!iommu_group && noiommu && !iommu_present(dev->bus)) { + if (!iommu_group && noiommu) { /* * With noiommu enabled, create an IOMMU group for devices that - * don't already have one and don't have an iommu_ops on their - * bus. Taint the kernel because we're about to give a DMA + * don't already have one, implying no IOMMU hardware/driver + * exists. Taint the kernel because we're about to give a DMA * capable device to a user without IOMMU protection. */ group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU); @@ -815,6 +601,13 @@ static int __vfio_register_dev(struct vfio_device *device, int vfio_register_group_dev(struct vfio_device *device) { + /* + * VFIO always sets IOMMU_CACHE because we offer no way for userspace to + * restore cache coherency. + */ + if (!iommu_capable(device->dev->bus, IOMMU_CAP_CACHE_COHERENCY)) + return -EINVAL; + return __vfio_register_dev(device, vfio_group_find_or_alloc(device->dev)); } @@ -831,29 +624,6 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device) } EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev); -/* - * Get a reference to the vfio_device for a device. Even if the - * caller thinks they own the device, they could be racing with a - * release call path, so we can't trust drvdata for the shortcut. - * Go the long way around, from the iommu_group to the vfio_group - * to the vfio_device. - */ -struct vfio_device *vfio_device_get_from_dev(struct device *dev) -{ - struct vfio_group *group; - struct vfio_device *device; - - group = vfio_group_get_from_dev(dev); - if (!group) - return NULL; - - device = vfio_group_get_device(group, dev); - vfio_group_put(group); - - return device; -} -EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); - static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, char *buf) { @@ -889,29 +659,10 @@ static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, void vfio_unregister_group_dev(struct vfio_device *device) { struct vfio_group *group = device->group; - struct vfio_unbound_dev *unbound; unsigned int i = 0; bool interrupted = false; long rc; - /* - * When the device is removed from the group, the group suddenly - * becomes non-viable; the device has a driver (until the unbind - * completes), but it's not present in the group. This is bad news - * for any external users that need to re-acquire a group reference - * in order to match and release their existing reference. To - * solve this, we track such devices on the unbound_list to bridge - * the gap until they're fully unbound. - */ - unbound = kzalloc(sizeof(*unbound), GFP_KERNEL); - if (unbound) { - unbound->dev = device->dev; - mutex_lock(&group->unbound_lock); - list_add(&unbound->unbound_next, &group->unbound_list); - mutex_unlock(&group->unbound_lock); - } - WARN_ON(!unbound); - vfio_device_put(device); rc = try_wait_for_completion(&device->comp); while (rc <= 0) { @@ -940,23 +691,6 @@ void vfio_unregister_group_dev(struct vfio_device *device) group->dev_counter--; mutex_unlock(&group->device_lock); - /* - * In order to support multiple devices per group, devices can be - * plucked from the group while other devices in the group are still - * in use. The container persists with this group and those remaining - * devices still attached. If the user creates an isolation violation - * by binding this device to another driver while the group is still in - * use, that's their fault. However, in the case of removing the last, - * or potentially the only, device in the group there can be no other - * in-use devices in the group. The user has done their due diligence - * and we should lay no claims to those devices. In order to do that, - * we need to make sure the group is detached from the container. - * Without this stall, we're potentially racing with a user process - * that may attempt to immediately bind this device to another driver. - */ - if (list_empty(&group->device_list)) - wait_event(group->container_q, !group->container); - if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU) iommu_group_remove_device(device->dev); @@ -1191,6 +925,8 @@ static void __vfio_group_unset_container(struct vfio_group *group) struct vfio_container *container = group->container; struct vfio_iommu_driver *driver; + lockdep_assert_held_write(&group->group_rwsem); + down_write(&container->group_lock); driver = container->iommu_driver; @@ -1198,8 +934,11 @@ static void __vfio_group_unset_container(struct vfio_group *group) driver->ops->detach_group(container->iommu_data, group->iommu_group); + if (group->type == VFIO_IOMMU) + iommu_group_release_dma_owner(group->iommu_group); + group->container = NULL; - wake_up(&group->container_q); + group->container_users = 0; list_del(&group->container_next); /* Detaching the last group deprivileges a container, remove iommu */ @@ -1223,30 +962,16 @@ static void __vfio_group_unset_container(struct vfio_group *group) */ static int vfio_group_unset_container(struct vfio_group *group) { - int users = atomic_cmpxchg(&group->container_users, 1, 0); + lockdep_assert_held_write(&group->group_rwsem); - if (!users) + if (!group->container) return -EINVAL; - if (users != 1) + if (group->container_users != 1) return -EBUSY; - __vfio_group_unset_container(group); - return 0; } -/* - * When removing container users, anything that removes the last user - * implicitly removes the group from the container. That is, if the - * group file descriptor is closed, as well as any device file descriptors, - * the group is free. - */ -static void vfio_group_try_dissolve_container(struct vfio_group *group) -{ - if (0 == atomic_dec_if_positive(&group->container_users)) - __vfio_group_unset_container(group); -} - static int vfio_group_set_container(struct vfio_group *group, int container_fd) { struct fd f; @@ -1254,7 +979,9 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd) struct vfio_iommu_driver *driver; int ret = 0; - if (atomic_read(&group->container_users)) + lockdep_assert_held_write(&group->group_rwsem); + + if (group->container || WARN_ON(group->container_users)) return -EINVAL; if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) @@ -1282,22 +1009,32 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd) goto unlock_out; } + if (group->type == VFIO_IOMMU) { + ret = iommu_group_claim_dma_owner(group->iommu_group, f.file); + if (ret) + goto unlock_out; + } + driver = container->iommu_driver; if (driver) { ret = driver->ops->attach_group(container->iommu_data, group->iommu_group, group->type); - if (ret) + if (ret) { + if (group->type == VFIO_IOMMU) + iommu_group_release_dma_owner( + group->iommu_group); goto unlock_out; + } } group->container = container; + group->container_users = 1; container->noiommu = (group->type == VFIO_NO_IOMMU); list_add(&group->container_next, &container->group_list); /* Get a reference on the container and mark a user within the group */ vfio_container_get(container); - atomic_inc(&group->container_users); unlock_out: up_write(&container->group_lock); @@ -1305,60 +1042,74 @@ unlock_out: return ret; } -static bool vfio_group_viable(struct vfio_group *group) +static const struct file_operations vfio_device_fops; + +/* true if the vfio_device has open_device() called but not close_device() */ +static bool vfio_assert_device_open(struct vfio_device *device) { - return (iommu_group_for_each_dev(group->iommu_group, - group, vfio_dev_viable) == 0); + return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); } -static int vfio_group_add_container_user(struct vfio_group *group) +static int vfio_device_assign_container(struct vfio_device *device) { - if (!atomic_inc_not_zero(&group->container_users)) + struct vfio_group *group = device->group; + + lockdep_assert_held_write(&group->group_rwsem); + + if (!group->container || !group->container->iommu_driver || + WARN_ON(!group->container_users)) return -EINVAL; - if (group->type == VFIO_NO_IOMMU) { - atomic_dec(&group->container_users); + if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) return -EPERM; - } - if (!group->container->iommu_driver || !vfio_group_viable(group)) { - atomic_dec(&group->container_users); - return -EINVAL; - } + get_file(group->opened_file); + group->container_users++; return 0; } -static const struct file_operations vfio_device_fops; +static void vfio_device_unassign_container(struct vfio_device *device) +{ + down_write(&device->group->group_rwsem); + WARN_ON(device->group->container_users <= 1); + device->group->container_users--; + fput(device->group->opened_file); + up_write(&device->group->group_rwsem); +} -static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) +static struct file *vfio_device_open(struct vfio_device *device) { - struct vfio_device *device; struct file *filep; - int fdno; - int ret = 0; - - if (0 == atomic_read(&group->container_users) || - !group->container->iommu_driver || !vfio_group_viable(group)) - return -EINVAL; - - if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) - return -EPERM; + int ret; - device = vfio_device_get_from_name(group, buf); - if (IS_ERR(device)) - return PTR_ERR(device); + down_write(&device->group->group_rwsem); + ret = vfio_device_assign_container(device); + up_write(&device->group->group_rwsem); + if (ret) + return ERR_PTR(ret); if (!try_module_get(device->dev->driver->owner)) { ret = -ENODEV; - goto err_device_put; + goto err_unassign_container; } mutex_lock(&device->dev_set->lock); device->open_count++; - if (device->open_count == 1 && device->ops->open_device) { - ret = device->ops->open_device(device); - if (ret) - goto err_undo_count; + if (device->open_count == 1) { + /* + * Here we pass the KVM pointer with the group under the read + * lock. If the device driver will use it, it must obtain a + * reference and release it during close_device. + */ + down_read(&device->group->group_rwsem); + device->kvm = device->group->kvm; + + if (device->ops->open_device) { + ret = device->ops->open_device(device); + if (ret) + goto err_undo_count; + } + up_read(&device->group->group_rwsem); } mutex_unlock(&device->dev_set->lock); @@ -1366,15 +1117,11 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) * We can't use anon_inode_getfd() because we need to modify * the f_mode flags directly to allow more than just ioctls */ - fdno = ret = get_unused_fd_flags(O_CLOEXEC); - if (ret < 0) - goto err_close_device; - filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, device, O_RDWR); if (IS_ERR(filep)) { ret = PTR_ERR(filep); - goto err_fd; + goto err_close_device; } /* @@ -1384,26 +1131,61 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) */ filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - atomic_inc(&group->container_users); - - fd_install(fdno, filep); - - if (group->type == VFIO_NO_IOMMU) + if (device->group->type == VFIO_NO_IOMMU) dev_warn(device->dev, "vfio-noiommu device opened by user " "(%s:%d)\n", current->comm, task_pid_nr(current)); - return fdno; + /* + * On success the ref of device is moved to the file and + * put in vfio_device_fops_release() + */ + return filep; -err_fd: - put_unused_fd(fdno); err_close_device: mutex_lock(&device->dev_set->lock); + down_read(&device->group->group_rwsem); if (device->open_count == 1 && device->ops->close_device) device->ops->close_device(device); err_undo_count: device->open_count--; + if (device->open_count == 0 && device->kvm) + device->kvm = NULL; + up_read(&device->group->group_rwsem); mutex_unlock(&device->dev_set->lock); module_put(device->dev->driver->owner); -err_device_put: +err_unassign_container: + vfio_device_unassign_container(device); + return ERR_PTR(ret); +} + +static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) +{ + struct vfio_device *device; + struct file *filep; + int fdno; + int ret; + + device = vfio_device_get_from_name(group, buf); + if (IS_ERR(device)) + return PTR_ERR(device); + + fdno = get_unused_fd_flags(O_CLOEXEC); + if (fdno < 0) { + ret = fdno; + goto err_put_device; + } + + filep = vfio_device_open(device); + if (IS_ERR(filep)) { + ret = PTR_ERR(filep); + goto err_put_fdno; + } + + fd_install(fdno, filep); + return fdno; + +err_put_fdno: + put_unused_fd(fdno); +err_put_device: vfio_device_put(device); return ret; } @@ -1430,11 +1212,13 @@ static long vfio_group_fops_unl_ioctl(struct file *filep, status.flags = 0; - if (vfio_group_viable(group)) - status.flags |= VFIO_GROUP_FLAGS_VIABLE; - + down_read(&group->group_rwsem); if (group->container) - status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET; + status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET | + VFIO_GROUP_FLAGS_VIABLE; + else if (!iommu_group_dma_owner_claimed(group->iommu_group)) + status.flags |= VFIO_GROUP_FLAGS_VIABLE; + up_read(&group->group_rwsem); if (copy_to_user((void __user *)arg, &status, minsz)) return -EFAULT; @@ -1452,11 +1236,15 @@ static long vfio_group_fops_unl_ioctl(struct file *filep, if (fd < 0) return -EINVAL; + down_write(&group->group_rwsem); ret = vfio_group_set_container(group, fd); + up_write(&group->group_rwsem); break; } case VFIO_GROUP_UNSET_CONTAINER: + down_write(&group->group_rwsem); ret = vfio_group_unset_container(group); + up_write(&group->group_rwsem); break; case VFIO_GROUP_GET_DEVICE_FD: { @@ -1479,38 +1267,38 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep) { struct vfio_group *group = container_of(inode->i_cdev, struct vfio_group, cdev); - int opened; + int ret; - /* users can be zero if this races with vfio_group_put() */ - if (!refcount_inc_not_zero(&group->users)) - return -ENODEV; + down_write(&group->group_rwsem); - if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) { - vfio_group_put(group); - return -EPERM; + /* users can be zero if this races with vfio_group_put() */ + if (!refcount_inc_not_zero(&group->users)) { + ret = -ENODEV; + goto err_unlock; } - /* Do we need multiple instances of the group open? Seems not. */ - opened = atomic_cmpxchg(&group->opened, 0, 1); - if (opened) { - vfio_group_put(group); - return -EBUSY; + if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) { + ret = -EPERM; + goto err_put; } - /* Is something still in use from a previous open? */ - if (group->container) { - atomic_dec(&group->opened); - vfio_group_put(group); - return -EBUSY; + /* + * Do we need multiple instances of the group open? Seems not. + */ + if (group->opened_file) { + ret = -EBUSY; + goto err_put; } - - /* Warn if previous user didn't cleanup and re-init to drop them */ - if (WARN_ON(group->notifier.head)) - BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); - + group->opened_file = filep; filep->private_data = group; + up_write(&group->group_rwsem); return 0; +err_put: + vfio_group_put(group); +err_unlock: + up_write(&group->group_rwsem); + return ret; } static int vfio_group_fops_release(struct inode *inode, struct file *filep) @@ -1519,9 +1307,18 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep) filep->private_data = NULL; - vfio_group_try_dissolve_container(group); - - atomic_dec(&group->opened); + down_write(&group->group_rwsem); + /* + * Device FDs hold a group file reference, therefore the group release + * is only called when there are no open devices. + */ + WARN_ON(group->notifier.head); + if (group->container) { + WARN_ON(group->container_users != 1); + __vfio_group_unset_container(group); + } + group->opened_file = NULL; + up_write(&group->group_rwsem); vfio_group_put(group); @@ -1544,13 +1341,19 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep) struct vfio_device *device = filep->private_data; mutex_lock(&device->dev_set->lock); - if (!--device->open_count && device->ops->close_device) + vfio_assert_device_open(device); + down_read(&device->group->group_rwsem); + if (device->open_count == 1 && device->ops->close_device) device->ops->close_device(device); + up_read(&device->group->group_rwsem); + device->open_count--; + if (device->open_count == 0) + device->kvm = NULL; mutex_unlock(&device->dev_set->lock); module_put(device->dev->driver->owner); - vfio_group_try_dissolve_container(device->group); + vfio_device_unassign_container(device); vfio_device_put(device); @@ -1899,119 +1702,94 @@ static const struct file_operations vfio_device_fops = { .mmap = vfio_device_fops_mmap, }; -/* - * External user API, exported by symbols to be linked dynamically. - * - * The protocol includes: - * 1. do normal VFIO init operation: - * - opening a new container; - * - attaching group(s) to it; - * - setting an IOMMU driver for a container. - * When IOMMU is set for a container, all groups in it are - * considered ready to use by an external user. - * - * 2. User space passes a group fd to an external user. - * The external user calls vfio_group_get_external_user() - * to verify that: - * - the group is initialized; - * - IOMMU is set for it. - * If both checks passed, vfio_group_get_external_user() - * increments the container user counter to prevent - * the VFIO group from disposal before KVM exits. +/** + * vfio_file_iommu_group - Return the struct iommu_group for the vfio group file + * @file: VFIO group file * - * 3. The external user calls vfio_external_user_iommu_id() - * to know an IOMMU ID. - * - * 4. When the external KVM finishes, it calls - * vfio_group_put_external_user() to release the VFIO group. - * This call decrements the container user counter. + * The returned iommu_group is valid as long as a ref is held on the file. */ -struct vfio_group *vfio_group_get_external_user(struct file *filep) +struct iommu_group *vfio_file_iommu_group(struct file *file) { - struct vfio_group *group = filep->private_data; - int ret; + struct vfio_group *group = file->private_data; - if (filep->f_op != &vfio_group_fops) - return ERR_PTR(-EINVAL); - - ret = vfio_group_add_container_user(group); - if (ret) - return ERR_PTR(ret); - - /* - * Since the caller holds the fget on the file group->users must be >= 1 - */ - vfio_group_get(group); - - return group; + if (file->f_op != &vfio_group_fops) + return NULL; + return group->iommu_group; } -EXPORT_SYMBOL_GPL(vfio_group_get_external_user); +EXPORT_SYMBOL_GPL(vfio_file_iommu_group); -/* - * External user API, exported by symbols to be linked dynamically. - * The external user passes in a device pointer - * to verify that: - * - A VFIO group is assiciated with the device; - * - IOMMU is set for the group. - * If both checks passed, vfio_group_get_external_user_from_dev() - * increments the container user counter to prevent the VFIO group - * from disposal before external user exits and returns the pointer - * to the VFIO group. - * - * When the external user finishes using the VFIO group, it calls - * vfio_group_put_external_user() to release the VFIO group and - * decrement the container user counter. +/** + * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file + * is always CPU cache coherent + * @file: VFIO group file * - * @dev [in] : device - * Return error PTR or pointer to VFIO group. + * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop + * bit in DMA transactions. A return of false indicates that the user has + * rights to access additional instructions such as wbinvd on x86. */ - -struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev) +bool vfio_file_enforced_coherent(struct file *file) { - struct vfio_group *group; - int ret; + struct vfio_group *group = file->private_data; + bool ret; - group = vfio_group_get_from_dev(dev); - if (!group) - return ERR_PTR(-ENODEV); + if (file->f_op != &vfio_group_fops) + return true; - ret = vfio_group_add_container_user(group); - if (ret) { - vfio_group_put(group); - return ERR_PTR(ret); + down_read(&group->group_rwsem); + if (group->container) { + ret = vfio_ioctl_check_extension(group->container, + VFIO_DMA_CC_IOMMU); + } else { + /* + * Since the coherency state is determined only once a container + * is attached the user must do so before they can prove they + * have permission. + */ + ret = true; } - - return group; + up_read(&group->group_rwsem); + return ret; } -EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev); +EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent); -void vfio_group_put_external_user(struct vfio_group *group) +/** + * vfio_file_set_kvm - Link a kvm with VFIO drivers + * @file: VFIO group file + * @kvm: KVM to link + * + * When a VFIO device is first opened the KVM will be available in + * device->kvm if one was associated with the group. + */ +void vfio_file_set_kvm(struct file *file, struct kvm *kvm) { - vfio_group_try_dissolve_container(group); - vfio_group_put(group); -} -EXPORT_SYMBOL_GPL(vfio_group_put_external_user); + struct vfio_group *group = file->private_data; -bool vfio_external_group_match_file(struct vfio_group *test_group, - struct file *filep) -{ - struct vfio_group *group = filep->private_data; + if (file->f_op != &vfio_group_fops) + return; - return (filep->f_op == &vfio_group_fops) && (group == test_group); + down_write(&group->group_rwsem); + group->kvm = kvm; + up_write(&group->group_rwsem); } -EXPORT_SYMBOL_GPL(vfio_external_group_match_file); +EXPORT_SYMBOL_GPL(vfio_file_set_kvm); -int vfio_external_user_iommu_id(struct vfio_group *group) +/** + * vfio_file_has_dev - True if the VFIO file is a handle for device + * @file: VFIO file to check + * @device: Device that must be part of the file + * + * Returns true if given file has permission to manipulate the given device. + */ +bool vfio_file_has_dev(struct file *file, struct vfio_device *device) { - return iommu_group_id(group->iommu_group); -} -EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id); + struct vfio_group *group = file->private_data; -long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) -{ - return vfio_ioctl_check_extension(group->container, arg); + if (file->f_op != &vfio_group_fops) + return false; + + return group == device->group; } -EXPORT_SYMBOL_GPL(vfio_external_check_extension); +EXPORT_SYMBOL_GPL(vfio_file_has_dev); /* * Sub-module support @@ -2134,7 +1912,7 @@ EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); /* * Pin a set of guest PFNs and return their associated host PFNs for local * domain only. - * @dev [in] : device + * @device [in] : device * @user_pfn [in]: array of user/guest PFNs to be pinned. * @npage [in] : count of elements in user_pfn array. This count should not * be greater VFIO_PIN_PAGES_MAX_ENTRIES. @@ -2142,33 +1920,25 @@ EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); * @phys_pfn[out]: array of host PFNs * Return error or number of pages pinned. */ -int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, - int prot, unsigned long *phys_pfn) +int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn, + int npage, int prot, unsigned long *phys_pfn) { struct vfio_container *container; - struct vfio_group *group; + struct vfio_group *group = device->group; struct vfio_iommu_driver *driver; int ret; - if (!dev || !user_pfn || !phys_pfn || !npage) + if (!user_pfn || !phys_pfn || !npage || + !vfio_assert_device_open(device)) return -EINVAL; if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) return -E2BIG; - group = vfio_group_get_from_dev(dev); - if (!group) - return -ENODEV; - - if (group->dev_counter > 1) { - ret = -EINVAL; - goto err_pin_pages; - } - - ret = vfio_group_add_container_user(group); - if (ret) - goto err_pin_pages; + if (group->dev_counter > 1) + return -EINVAL; + /* group->container cannot change while a vfio device is open */ container = group->container; driver = container->iommu_driver; if (likely(driver && driver->ops->pin_pages)) @@ -2178,45 +1948,34 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, else ret = -ENOTTY; - vfio_group_try_dissolve_container(group); - -err_pin_pages: - vfio_group_put(group); return ret; } EXPORT_SYMBOL(vfio_pin_pages); /* * Unpin set of host PFNs for local domain only. - * @dev [in] : device + * @device [in] : device * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest * PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES. * @npage [in] : count of elements in user_pfn array. This count should not * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. * Return error or number of pages unpinned. */ -int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage) +int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn, + int npage) { struct vfio_container *container; - struct vfio_group *group; struct vfio_iommu_driver *driver; int ret; - if (!dev || !user_pfn || !npage) + if (!user_pfn || !npage || !vfio_assert_device_open(device)) return -EINVAL; if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) return -E2BIG; - group = vfio_group_get_from_dev(dev); - if (!group) - return -ENODEV; - - ret = vfio_group_add_container_user(group); - if (ret) - goto err_unpin_pages; - - container = group->container; + /* group->container cannot change while a vfio device is open */ + container = device->group->container; driver = container->iommu_driver; if (likely(driver && driver->ops->unpin_pages)) ret = driver->ops->unpin_pages(container->iommu_data, user_pfn, @@ -2224,110 +1983,11 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage) else ret = -ENOTTY; - vfio_group_try_dissolve_container(group); - -err_unpin_pages: - vfio_group_put(group); return ret; } EXPORT_SYMBOL(vfio_unpin_pages); /* - * Pin a set of guest IOVA PFNs and return their associated host PFNs for a - * VFIO group. - * - * The caller needs to call vfio_group_get_external_user() or - * vfio_group_get_external_user_from_dev() prior to calling this interface, - * so as to prevent the VFIO group from disposal in the middle of the call. - * But it can keep the reference to the VFIO group for several calls into - * this interface. - * After finishing using of the VFIO group, the caller needs to release the - * VFIO group by calling vfio_group_put_external_user(). - * - * @group [in] : VFIO group - * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned. - * @npage [in] : count of elements in user_iova_pfn array. - * This count should not be greater - * VFIO_PIN_PAGES_MAX_ENTRIES. - * @prot [in] : protection flags - * @phys_pfn [out] : array of host PFNs - * Return error or number of pages pinned. - */ -int vfio_group_pin_pages(struct vfio_group *group, - unsigned long *user_iova_pfn, int npage, - int prot, unsigned long *phys_pfn) -{ - struct vfio_container *container; - struct vfio_iommu_driver *driver; - int ret; - - if (!group || !user_iova_pfn || !phys_pfn || !npage) - return -EINVAL; - - if (group->dev_counter > 1) - return -EINVAL; - - if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) - return -E2BIG; - - container = group->container; - driver = container->iommu_driver; - if (likely(driver && driver->ops->pin_pages)) - ret = driver->ops->pin_pages(container->iommu_data, - group->iommu_group, user_iova_pfn, - npage, prot, phys_pfn); - else - ret = -ENOTTY; - - return ret; -} -EXPORT_SYMBOL(vfio_group_pin_pages); - -/* - * Unpin a set of guest IOVA PFNs for a VFIO group. - * - * The caller needs to call vfio_group_get_external_user() or - * vfio_group_get_external_user_from_dev() prior to calling this interface, - * so as to prevent the VFIO group from disposal in the middle of the call. - * But it can keep the reference to the VFIO group for several calls into - * this interface. - * After finishing using of the VFIO group, the caller needs to release the - * VFIO group by calling vfio_group_put_external_user(). - * - * @group [in] : vfio group - * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned. - * @npage [in] : count of elements in user_iova_pfn array. - * This count should not be greater than - * VFIO_PIN_PAGES_MAX_ENTRIES. - * Return error or number of pages unpinned. - */ -int vfio_group_unpin_pages(struct vfio_group *group, - unsigned long *user_iova_pfn, int npage) -{ - struct vfio_container *container; - struct vfio_iommu_driver *driver; - int ret; - - if (!group || !user_iova_pfn || !npage) - return -EINVAL; - - if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) - return -E2BIG; - - container = group->container; - driver = container->iommu_driver; - if (likely(driver && driver->ops->unpin_pages)) - ret = driver->ops->unpin_pages(container->iommu_data, - user_iova_pfn, npage); - else - ret = -ENOTTY; - - return ret; -} -EXPORT_SYMBOL(vfio_group_unpin_pages); - - -/* * This interface allows the CPUs to perform some sort of virtual DMA on * behalf of the device. * @@ -2337,32 +1997,25 @@ EXPORT_SYMBOL(vfio_group_unpin_pages); * As the read/write of user space memory is conducted via the CPUs and is * not a real device DMA, it is not necessary to pin the user space memory. * - * The caller needs to call vfio_group_get_external_user() or - * vfio_group_get_external_user_from_dev() prior to calling this interface, - * so as to prevent the VFIO group from disposal in the middle of the call. - * But it can keep the reference to the VFIO group for several calls into - * this interface. - * After finishing using of the VFIO group, the caller needs to release the - * VFIO group by calling vfio_group_put_external_user(). - * - * @group [in] : VFIO group + * @device [in] : VFIO device * @user_iova [in] : base IOVA of a user space buffer * @data [in] : pointer to kernel buffer * @len [in] : kernel buffer length * @write : indicate read or write * Return error code on failure or 0 on success. */ -int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, - void *data, size_t len, bool write) +int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, void *data, + size_t len, bool write) { struct vfio_container *container; struct vfio_iommu_driver *driver; int ret = 0; - if (!group || !data || len <= 0) + if (!data || len <= 0 || !vfio_assert_device_open(device)) return -EINVAL; - container = group->container; + /* group->container cannot change while a vfio device is open */ + container = device->group->container; driver = container->iommu_driver; if (likely(driver && driver->ops->dma_rw)) @@ -2370,7 +2023,6 @@ int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, user_iova, data, len, write); else ret = -ENOTTY; - return ret; } EXPORT_SYMBOL(vfio_dma_rw); @@ -2383,9 +2035,7 @@ static int vfio_register_iommu_notifier(struct vfio_group *group, struct vfio_iommu_driver *driver; int ret; - ret = vfio_group_add_container_user(group); - if (ret) - return -EINVAL; + lockdep_assert_held_read(&group->group_rwsem); container = group->container; driver = container->iommu_driver; @@ -2395,8 +2045,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group, else ret = -ENOTTY; - vfio_group_try_dissolve_container(group); - return ret; } @@ -2407,9 +2055,7 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group, struct vfio_iommu_driver *driver; int ret; - ret = vfio_group_add_container_user(group); - if (ret) - return -EINVAL; + lockdep_assert_held_read(&group->group_rwsem); container = group->container; driver = container->iommu_driver; @@ -2419,147 +2065,52 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group, else ret = -ENOTTY; - vfio_group_try_dissolve_container(group); - - return ret; -} - -void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm) -{ - group->kvm = kvm; - blocking_notifier_call_chain(&group->notifier, - VFIO_GROUP_NOTIFY_SET_KVM, kvm); -} -EXPORT_SYMBOL_GPL(vfio_group_set_kvm); - -static int vfio_register_group_notifier(struct vfio_group *group, - unsigned long *events, - struct notifier_block *nb) -{ - int ret; - bool set_kvm = false; - - if (*events & VFIO_GROUP_NOTIFY_SET_KVM) - set_kvm = true; - - /* clear known events */ - *events &= ~VFIO_GROUP_NOTIFY_SET_KVM; - - /* refuse to continue if still events remaining */ - if (*events) - return -EINVAL; - - ret = vfio_group_add_container_user(group); - if (ret) - return -EINVAL; - - ret = blocking_notifier_chain_register(&group->notifier, nb); - - /* - * The attaching of kvm and vfio_group might already happen, so - * here we replay once upon registration. - */ - if (!ret && set_kvm && group->kvm) - blocking_notifier_call_chain(&group->notifier, - VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); - - vfio_group_try_dissolve_container(group); - - return ret; -} - -static int vfio_unregister_group_notifier(struct vfio_group *group, - struct notifier_block *nb) -{ - int ret; - - ret = vfio_group_add_container_user(group); - if (ret) - return -EINVAL; - - ret = blocking_notifier_chain_unregister(&group->notifier, nb); - - vfio_group_try_dissolve_container(group); - return ret; } -int vfio_register_notifier(struct device *dev, enum vfio_notify_type type, - unsigned long *events, struct notifier_block *nb) +int vfio_register_notifier(struct vfio_device *device, + enum vfio_notify_type type, unsigned long *events, + struct notifier_block *nb) { - struct vfio_group *group; + struct vfio_group *group = device->group; int ret; - if (!dev || !nb || !events || (*events == 0)) + if (!nb || !events || (*events == 0) || + !vfio_assert_device_open(device)) return -EINVAL; - group = vfio_group_get_from_dev(dev); - if (!group) - return -ENODEV; - switch (type) { case VFIO_IOMMU_NOTIFY: ret = vfio_register_iommu_notifier(group, events, nb); break; - case VFIO_GROUP_NOTIFY: - ret = vfio_register_group_notifier(group, events, nb); - break; default: ret = -EINVAL; } - - vfio_group_put(group); return ret; } EXPORT_SYMBOL(vfio_register_notifier); -int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type, +int vfio_unregister_notifier(struct vfio_device *device, + enum vfio_notify_type type, struct notifier_block *nb) { - struct vfio_group *group; + struct vfio_group *group = device->group; int ret; - if (!dev || !nb) + if (!nb || !vfio_assert_device_open(device)) return -EINVAL; - group = vfio_group_get_from_dev(dev); - if (!group) - return -ENODEV; - switch (type) { case VFIO_IOMMU_NOTIFY: ret = vfio_unregister_iommu_notifier(group, nb); break; - case VFIO_GROUP_NOTIFY: - ret = vfio_unregister_group_notifier(group, nb); - break; default: ret = -EINVAL; } - - vfio_group_put(group); return ret; } EXPORT_SYMBOL(vfio_unregister_notifier); -struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group) -{ - struct vfio_container *container; - struct vfio_iommu_driver *driver; - - if (!group) - return ERR_PTR(-EINVAL); - - container = group->container; - driver = container->iommu_driver; - if (likely(driver && driver->ops->group_iommu_domain)) - return driver->ops->group_iommu_domain(container->iommu_data, - group->iommu_group); - - return ERR_PTR(-ENOTTY); -} -EXPORT_SYMBOL_GPL(vfio_group_iommu_domain); - /* * Module/class support */ diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 9394aa9444c1..c13b9290e357 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -84,8 +84,8 @@ struct vfio_domain { struct iommu_domain *domain; struct list_head next; struct list_head group_list; - int prot; /* IOMMU_CACHE */ - bool fgsp; /* Fine-grained super pages */ + bool fgsp : 1; /* Fine-grained super pages */ + bool enforce_cache_coherency : 1; }; struct vfio_dma { @@ -1461,7 +1461,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, list_for_each_entry(d, &iommu->domain_list, next) { ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, - npage << PAGE_SHIFT, prot | d->prot); + npage << PAGE_SHIFT, prot | IOMMU_CACHE); if (ret) goto unwind; @@ -1771,7 +1771,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, } ret = iommu_map(domain->domain, iova, phys, - size, dma->prot | domain->prot); + size, dma->prot | IOMMU_CACHE); if (ret) { if (!dma->iommu_mapped) { vfio_unpin_pages_remote(dma, iova, @@ -1859,7 +1859,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain) return; ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, - IOMMU_READ | IOMMU_WRITE | domain->prot); + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); if (!ret) { size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); @@ -2267,8 +2267,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, goto out_detach; } - if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) - domain->prot |= IOMMU_CACHE; + /* + * If the IOMMU can block non-coherent operations (ie PCIe TLPs with + * no-snoop set) then VFIO always turns this feature on because on Intel + * platforms it optimizes KVM to disable wbinvd emulation. + */ + if (domain->domain->ops->enforce_cache_coherency) + domain->enforce_cache_coherency = + domain->domain->ops->enforce_cache_coherency( + domain->domain); /* * Try to match an existing compatible domain. We don't want to @@ -2279,7 +2286,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, */ list_for_each_entry(d, &iommu->domain_list, next) { if (d->domain->ops == domain->domain->ops && - d->prot == domain->prot) { + d->enforce_cache_coherency == + domain->enforce_cache_coherency) { iommu_detach_group(domain->domain, group->iommu_group); if (!iommu_attach_group(d->domain, group->iommu_group)) { @@ -2611,14 +2619,14 @@ static void vfio_iommu_type1_release(void *iommu_data) kfree(iommu); } -static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) +static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu) { struct vfio_domain *domain; int ret = 1; mutex_lock(&iommu->lock); list_for_each_entry(domain, &iommu->domain_list, next) { - if (!(domain->prot & IOMMU_CACHE)) { + if (!(domain->enforce_cache_coherency)) { ret = 0; break; } @@ -2641,7 +2649,7 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu, case VFIO_DMA_CC_IOMMU: if (!iommu) return 0; - return vfio_domains_have_iommu_cache(iommu); + return vfio_domains_have_enforce_cache_coherency(iommu); default: return 0; } diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index f869b723494f..837011be9978 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c @@ -549,6 +549,26 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name) } #endif +static void sti_dump_font(struct sti_cooked_font *font) +{ +#ifdef STI_DUMP_FONT + unsigned char *p = (unsigned char *)font->raw; + int n; + + p += sizeof(struct sti_rom_font); + pr_debug(" w %d h %d bpc %d\n", font->width, font->height, + font->raw->bytes_per_char); + + for (n = 0; n < 256 * font->raw->bytes_per_char; n += 16, p += 16) { + pr_debug(" 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x," + " 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x," + " 0x%02x, 0x%02x, 0x%02x, 0x%02x,\n", + p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8], + p[9], p[10], p[11], p[12], p[13], p[14], p[15]); + } +#endif +} + static int sti_search_font(struct sti_cooked_rom *rom, int height, int width) { struct sti_cooked_font *font; @@ -796,6 +816,7 @@ static int sti_read_rom(int wordmode, struct sti_struct *sti, sti->font->width = sti->font->raw->width; sti->font->height = sti->font->raw->height; sti_font_convert_bytemode(sti, sti->font); + sti_dump_font(sti->font); sti->sti_mem_request = raw->sti_mem_req; sti->graphics_id[0] = raw->graphics_id[0]; diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 9ec969e136bf..8080116aea84 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -758,12 +758,15 @@ static int clcdfb_of_vram_setup(struct clcd_fb *fb) return -ENODEV; fb->fb.screen_base = of_iomap(memory, 0); - if (!fb->fb.screen_base) + if (!fb->fb.screen_base) { + of_node_put(memory); return -ENOMEM; + } fb->fb.fix.smem_start = of_translate_address(memory, of_get_address(memory, 0, &size, NULL)); fb->fb.fix.smem_len = size; + of_node_put(memory); return 0; } diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 8359a513b600..886c564787f1 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -63,6 +63,7 @@ #define MAX_VMBUS_PKT_SIZE 0x4000 #define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major)) +/* Support for VERSION_WIN7 is removed. #define is retained for reference. */ #define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0) #define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2) #define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5) @@ -70,13 +71,7 @@ #define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff) #define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16) -#define SYNTHVID_DEPTH_WIN7 16 #define SYNTHVID_DEPTH_WIN8 32 - -#define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024) -#define SYNTHVID_WIDTH_MAX_WIN7 1600 -#define SYNTHVID_HEIGHT_MAX_WIN7 1200 - #define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024) #define PCI_VENDOR_ID_MICROSOFT 0x1414 @@ -643,12 +638,6 @@ static int synthvid_connect_vsp(struct hv_device *hdev) case VERSION_WIN8: case VERSION_WIN8_1: ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8); - if (!ret) - break; - fallthrough; - case VERSION_WS2008: - case VERSION_WIN7: - ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7); break; default: ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10); @@ -660,11 +649,7 @@ static int synthvid_connect_vsp(struct hv_device *hdev) goto error; } - if (par->synthvid_version == SYNTHVID_VERSION_WIN7) - screen_depth = SYNTHVID_DEPTH_WIN7; - else - screen_depth = SYNTHVID_DEPTH_WIN8; - + screen_depth = SYNTHVID_DEPTH_WIN8; if (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10)) { ret = synthvid_get_supported_resolution(hdev); if (ret) @@ -933,9 +918,7 @@ static void hvfb_get_option(struct fb_info *info) (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) && (x * y * screen_depth / 8 > screen_fb_size)) || (par->synthvid_version == SYNTHVID_VERSION_WIN8 && - x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) || - (par->synthvid_version == SYNTHVID_VERSION_WIN7 && - (x > SYNTHVID_WIDTH_MAX_WIN7 || y > SYNTHVID_HEIGHT_MAX_WIN7))) { + x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8)) { pr_err("Screen resolution option is out of range: skipped\n"); return; } @@ -1009,7 +992,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) struct pci_dev *pdev = NULL; void __iomem *fb_virt; int gen2vm = efi_enabled(EFI_BOOT); - resource_size_t pot_start, pot_end; phys_addr_t paddr; int ret; @@ -1060,23 +1042,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) dio_fb_size = screen_width * screen_height * screen_depth / 8; - if (gen2vm) { - pot_start = 0; - pot_end = -1; - } else { - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || - pci_resource_len(pdev, 0) < screen_fb_size) { - pr_err("Resource not available or (0x%lx < 0x%lx)\n", - (unsigned long) pci_resource_len(pdev, 0), - (unsigned long) screen_fb_size); - goto err1; - } - - pot_end = pci_resource_end(pdev, 0); - pot_start = pot_end - screen_fb_size + 1; - } - - ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end, + ret = vmbus_allocate_mmio(&par->mem, hdev, 0, -1, screen_fb_size, 0x100000, true); if (ret != 0) { pr_err("Unable to allocate framebuffer memory\n"); diff --git a/drivers/video/fbdev/omap/omapfb.h b/drivers/video/fbdev/omap/omapfb.h index 313a051fe7a4..beb841ccb99c 100644 --- a/drivers/video/fbdev/omap/omapfb.h +++ b/drivers/video/fbdev/omap/omapfb.h @@ -231,5 +231,9 @@ extern int omapfb_update_window_async(struct fb_info *fbi, struct omapfb_update_window *win, void (*callback)(void *), void *callback_data); +extern int hwa742_update_window_async(struct fb_info *fbi, + struct omapfb_update_window *win, + void (*callback)(void *), + void *callback_data); #endif /* __OMAPFB_H */ diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c index be9910ff6e62..b407173e27b1 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c @@ -117,16 +117,11 @@ static int nec_8048_connect(struct omap_dss_device *dssdev) { struct panel_drv_data *ddata = to_panel_data(dssdev); struct omap_dss_device *in = ddata->in; - int r; if (omapdss_device_is_connected(dssdev)) return 0; - r = in->ops.dpi->connect(in, dssdev); - if (r) - return r; - - return 0; + return in->ops.dpi->connect(in, dssdev); } static void nec_8048_disconnect(struct omap_dss_device *dssdev) diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c index c5f89129dcdd..531b36d2232b 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c @@ -173,7 +173,6 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data { struct dss_pll *pll = &hpll->pll; struct clk *clk; - int r; clk = devm_clk_get(&pdev->dev, "sys_clk"); if (IS_ERR(clk)) { @@ -203,12 +202,7 @@ static int dsi_init_pll_data(struct platform_device *pdev, struct hdmi_pll_data } pll->ops = &dsi_pll_ops; - - r = dss_pll_register(pll); - if (r) - return r; - - return 0; + return dss_pll_register(pll); } int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 350b3139c863..043cc8f9ef1c 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -646,6 +646,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) for (i = 0; i < 8; i++) { ret = pxa3xx_gcu_add_buffer(dev, priv); if (ret) { + pxa3xx_gcu_free_buffers(dev, priv); dev_err(dev, "failed to allocate DMA memory\n"); goto err_disable_clk; } @@ -662,15 +663,15 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) SHARED_SIZE, irq); return 0; -err_free_dma: - dma_free_coherent(dev, SHARED_SIZE, - priv->shared, priv->shared_phys); +err_disable_clk: + clk_disable_unprepare(priv->clk); err_misc_deregister: misc_deregister(&priv->misc_dev); -err_disable_clk: - clk_disable_unprepare(priv->clk); +err_free_dma: + dma_free_coherent(dev, SHARED_SIZE, + priv->shared, priv->shared_phys); return ret; } @@ -683,6 +684,7 @@ static int pxa3xx_gcu_remove(struct platform_device *pdev) pxa3xx_gcu_wait_idle(priv); misc_deregister(&priv->misc_dev); dma_free_coherent(dev, SHARED_SIZE, priv->shared, priv->shared_phys); + clk_disable_unprepare(priv->clk); pxa3xx_gcu_free_buffers(dev, priv); return 0; diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c index e25e8de5ff67..929d4775cb4b 100644 --- a/drivers/video/fbdev/vesafb.c +++ b/drivers/video/fbdev/vesafb.c @@ -490,11 +490,12 @@ static int vesafb_remove(struct platform_device *pdev) { struct fb_info *info = platform_get_drvdata(pdev); - /* vesafb_destroy takes care of info cleanup */ - unregister_framebuffer(info); if (((struct vesafb_par *)(info->par))->region) release_region(0x3c0, 32); + /* vesafb_destroy takes care of info cleanup */ + unregister_framebuffer(info); + return 0; } diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c index 3bed357a9870..4d2694d904aa 100644 --- a/drivers/video/fbdev/xen-fbfront.c +++ b/drivers/video/fbdev/xen-fbfront.c @@ -223,7 +223,6 @@ static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green, red = CNVT_TOHW(red, info->var.red.length); green = CNVT_TOHW(green, info->var.green.length); blue = CNVT_TOHW(blue, info->var.blue.length); - transp = CNVT_TOHW(transp, info->var.transp.length); #undef CNVT_TOHW v = (red << info->var.red.offset) | diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c4e82a8d863f..001ae1be9b61 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -883,6 +883,14 @@ config RENESAS_RZAWDT This driver adds watchdog support for the integrated watchdogs in the Renesas RZ/A SoCs. These watchdogs can be used to reset a system. +config RENESAS_RZN1WDT + tristate "Renesas RZ/N1 watchdog" + depends on ARCH_RENESAS || COMPILE_TEST + select WATCHDOG_CORE + help + This driver adds watchdog support for the integrated watchdogs in the + Renesas RZ/N1 SoCs. These watchdogs can be used to reset a system. + config RENESAS_RZG2LWDT tristate "Renesas RZ/G2L WDT Watchdog" depends on ARCH_RENESAS || COMPILE_TEST @@ -1011,6 +1019,17 @@ config APPLE_WATCHDOG To compile this driver as a module, choose M here: the module will be called apple_wdt. +config SUNPLUS_WATCHDOG + tristate "Sunplus watchdog support" + depends on ARCH_SUNPLUS || COMPILE_TEST + select WATCHDOG_CORE + help + Say Y here to include support for the watchdog timer + in Sunplus SoCs. + + To compile this driver as a module, choose M here: the + module will be called sunplus_wdt. + # X86 (i386 + ia64 + x86_64) Architecture config ACQUIRE_WDT diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index f7da867e8782..5f88a6237f7c 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -84,6 +84,7 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o +obj-$(CONFIG_RENESAS_RZN1WDT) += rzn1_wdt.o obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o @@ -95,6 +96,7 @@ obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o obj-$(CONFIG_APPLE_WATCHDOG) += apple_wdt.o +obj-$(CONFIG_SUNPLUS_WATCHDOG) += sunplus_wdt.o # X86 (i386 + ia64 + x86_64) Architecture obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c index 8656a137e9a4..1ffcf6aca6ae 100644 --- a/drivers/watchdog/bcm7038_wdt.c +++ b/drivers/watchdog/bcm7038_wdt.c @@ -218,6 +218,7 @@ static SIMPLE_DEV_PM_OPS(bcm7038_wdt_pm_ops, bcm7038_wdt_suspend, bcm7038_wdt_resume); static const struct of_device_id bcm7038_wdt_match[] = { + { .compatible = "brcm,bcm6345-wdt" }, { .compatible = "brcm,bcm7038-wdt" }, {}, }; diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c index 9adad1862bbd..09a4af4c58fc 100644 --- a/drivers/watchdog/da9063_wdt.c +++ b/drivers/watchdog/da9063_wdt.c @@ -18,6 +18,7 @@ #include <linux/delay.h> #include <linux/mfd/da9063/registers.h> #include <linux/mfd/da9063/core.h> +#include <linux/property.h> #include <linux/regmap.h> /* @@ -26,6 +27,8 @@ * others: timeout = 2048 ms * 2^(TWDSCALE-1). */ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 }; +static bool use_sw_pm; + #define DA9063_TWDSCALE_DISABLE 0 #define DA9063_TWDSCALE_MIN 1 #define DA9063_TWDSCALE_MAX (ARRAY_SIZE(wdt_timeout) - 1) @@ -218,6 +221,8 @@ static int da9063_wdt_probe(struct platform_device *pdev) if (!wdd) return -ENOMEM; + use_sw_pm = device_property_present(dev, "dlg,use-sw-pm"); + wdd->info = &da9063_watchdog_info; wdd->ops = &da9063_watchdog_ops; wdd->min_timeout = DA9063_WDT_MIN_TIMEOUT; @@ -228,6 +233,7 @@ static int da9063_wdt_probe(struct platform_device *pdev) watchdog_set_restart_priority(wdd, 128); watchdog_set_drvdata(wdd, da9063); + dev_set_drvdata(dev, wdd); wdd->timeout = DA9063_WDG_TIMEOUT; @@ -249,10 +255,40 @@ static int da9063_wdt_probe(struct platform_device *pdev) return devm_watchdog_register_device(dev, wdd); } +static int __maybe_unused da9063_wdt_suspend(struct device *dev) +{ + struct watchdog_device *wdd = dev_get_drvdata(dev); + + if (!use_sw_pm) + return 0; + + if (watchdog_active(wdd)) + return da9063_wdt_stop(wdd); + + return 0; +} + +static int __maybe_unused da9063_wdt_resume(struct device *dev) +{ + struct watchdog_device *wdd = dev_get_drvdata(dev); + + if (!use_sw_pm) + return 0; + + if (watchdog_active(wdd)) + return da9063_wdt_start(wdd); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(da9063_wdt_pm_ops, + da9063_wdt_suspend, da9063_wdt_resume); + static struct platform_driver da9063_wdt_driver = { .probe = da9063_wdt_probe, .driver = { .name = DA9063_DRVNAME_WATCHDOG, + .pm = &da9063_wdt_pm_ops, }, }; module_platform_driver(da9063_wdt_driver); diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 3f2f4343644f..34693f11385f 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -596,7 +596,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP /* * Suspend-to-idle requires this, because it stops the ticks and timekeeping, so * the watchdog cannot be pinged while in that state. In ACPI sleep states the @@ -604,15 +603,15 @@ static int iTCO_wdt_probe(struct platform_device *pdev) */ #ifdef CONFIG_ACPI -static inline bool need_suspend(void) +static inline bool __maybe_unused need_suspend(void) { return acpi_target_system_state() == ACPI_STATE_S0; } #else -static inline bool need_suspend(void) { return true; } +static inline bool __maybe_unused need_suspend(void) { return true; } #endif -static int iTCO_wdt_suspend_noirq(struct device *dev) +static int __maybe_unused iTCO_wdt_suspend_noirq(struct device *dev) { struct iTCO_wdt_private *p = dev_get_drvdata(dev); int ret = 0; @@ -626,7 +625,7 @@ static int iTCO_wdt_suspend_noirq(struct device *dev) return ret; } -static int iTCO_wdt_resume_noirq(struct device *dev) +static int __maybe_unused iTCO_wdt_resume_noirq(struct device *dev) { struct iTCO_wdt_private *p = dev_get_drvdata(dev); @@ -637,20 +636,15 @@ static int iTCO_wdt_resume_noirq(struct device *dev) } static const struct dev_pm_ops iTCO_wdt_pm = { - .suspend_noirq = iTCO_wdt_suspend_noirq, - .resume_noirq = iTCO_wdt_resume_noirq, + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(iTCO_wdt_suspend_noirq, + iTCO_wdt_resume_noirq) }; -#define ITCO_WDT_PM_OPS (&iTCO_wdt_pm) -#else -#define ITCO_WDT_PM_OPS NULL -#endif /* CONFIG_PM_SLEEP */ - static struct platform_driver iTCO_wdt_driver = { .probe = iTCO_wdt_probe, .driver = { .name = DRV_NAME, - .pm = ITCO_WDT_PM_OPS, + .pm = &iTCO_wdt_pm, }, }; diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index 4577a76dd464..f0d4e3cc7459 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -10,7 +10,9 @@ */ #include <dt-bindings/reset/mt2712-resets.h> +#include <dt-bindings/reset/mt7986-resets.h> #include <dt-bindings/reset/mt8183-resets.h> +#include <dt-bindings/reset/mt8186-resets.h> #include <dt-bindings/reset/mt8192-resets.h> #include <dt-bindings/reset/mt8195-resets.h> #include <linux/delay.h> @@ -76,10 +78,18 @@ static const struct mtk_wdt_data mt2712_data = { .toprgu_sw_rst_num = MT2712_TOPRGU_SW_RST_NUM, }; +static const struct mtk_wdt_data mt7986_data = { + .toprgu_sw_rst_num = MT7986_TOPRGU_SW_RST_NUM, +}; + static const struct mtk_wdt_data mt8183_data = { .toprgu_sw_rst_num = MT8183_TOPRGU_SW_RST_NUM, }; +static const struct mtk_wdt_data mt8186_data = { + .toprgu_sw_rst_num = MT8186_TOPRGU_SW_RST_NUM, +}; + static const struct mtk_wdt_data mt8192_data = { .toprgu_sw_rst_num = MT8192_TOPRGU_SW_RST_NUM, }; @@ -418,7 +428,9 @@ static int mtk_wdt_resume(struct device *dev) static const struct of_device_id mtk_wdt_dt_ids[] = { { .compatible = "mediatek,mt2712-wdt", .data = &mt2712_data }, { .compatible = "mediatek,mt6589-wdt" }, + { .compatible = "mediatek,mt7986-wdt", .data = &mt7986_data }, { .compatible = "mediatek,mt8183-wdt", .data = &mt8183_data }, + { .compatible = "mediatek,mt8186-wdt", .data = &mt8186_data }, { .compatible = "mediatek,mt8192-wdt", .data = &mt8192_data }, { .compatible = "mediatek,mt8195-wdt", .data = &mt8195_data }, { /* sentinel */ } diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index db843f825860..053ef3bde12d 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -226,7 +226,7 @@ static int rti_wdt_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); - if (ret) { + if (ret < 0) { pm_runtime_put_noidle(dev); pm_runtime_disable(&pdev->dev); return dev_err_probe(dev, ret, "runtime pm failed\n"); @@ -253,6 +253,7 @@ static int rti_wdt_probe(struct platform_device *pdev) } if (readl(wdt->base + RTIDWDCTRL) == WDENABLE_KEY) { + int preset_heartbeat; u32 time_left_ms; u64 heartbeat_ms; u32 wsize; @@ -263,11 +264,12 @@ static int rti_wdt_probe(struct platform_device *pdev) heartbeat_ms <<= WDT_PRELOAD_SHIFT; heartbeat_ms *= 1000; do_div(heartbeat_ms, wdt->freq); - if (heartbeat_ms != heartbeat * 1000) + preset_heartbeat = heartbeat_ms + 500; + preset_heartbeat /= 1000; + if (preset_heartbeat != heartbeat) dev_warn(dev, "watchdog already running, ignoring heartbeat config!\n"); - heartbeat = heartbeat_ms; - heartbeat /= 1000; + heartbeat = preset_heartbeat; wsize = readl(wdt->base + RTIWWDSIZECTRL); ret = rti_wdt_setup_hw_hb(wdd, wsize); diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c index 6b426df34fd6..6eea0ee4af49 100644 --- a/drivers/watchdog/rzg2l_wdt.c +++ b/drivers/watchdog/rzg2l_wdt.c @@ -21,8 +21,11 @@ #define WDTSET 0x04 #define WDTTIM 0x08 #define WDTINT 0x0C +#define PECR 0x10 +#define PEEN 0x14 #define WDTCNT_WDTEN BIT(0) #define WDTINT_INTDISP BIT(0) +#define PEEN_FORCE BIT(0) #define WDT_DEFAULT_TIMEOUT 60U @@ -43,6 +46,8 @@ struct rzg2l_wdt_priv { struct reset_control *rstc; unsigned long osc_clk_rate; unsigned long delay; + struct clk *pclk; + struct clk *osc_clk; }; static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv) @@ -53,7 +58,7 @@ static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv) static u32 rzg2l_wdt_get_cycle_usec(unsigned long cycle, u32 wdttime) { - u64 timer_cycle_us = 1024 * 1024 * (wdttime + 1) * MICRO; + u64 timer_cycle_us = 1024 * 1024ULL * (wdttime + 1) * MICRO; return div64_ul(timer_cycle_us, cycle); } @@ -86,7 +91,6 @@ static int rzg2l_wdt_start(struct watchdog_device *wdev) { struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); - reset_control_deassert(priv->rstc); pm_runtime_get_sync(wdev->parent); /* Initialize time out */ @@ -106,7 +110,26 @@ static int rzg2l_wdt_stop(struct watchdog_device *wdev) struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); pm_runtime_put(wdev->parent); - reset_control_assert(priv->rstc); + reset_control_reset(priv->rstc); + + return 0; +} + +static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout) +{ + struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + + wdev->timeout = timeout; + + /* + * If the watchdog is active, reset the module for updating the WDTSET + * register so that it is updated with new timeout values. + */ + if (watchdog_active(wdev)) { + pm_runtime_put(wdev->parent); + reset_control_reset(priv->rstc); + rzg2l_wdt_start(wdev); + } return 0; } @@ -116,15 +139,14 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev, { struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); - /* Reset the module before we modify any register */ - reset_control_reset(priv->rstc); - pm_runtime_get_sync(wdev->parent); + clk_prepare_enable(priv->pclk); + clk_prepare_enable(priv->osc_clk); - /* smallest counter value to reboot soon */ - rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(1), WDTSET); + /* Generate Reset (WDTRSTB) Signal on parity error */ + rzg2l_wdt_write(priv, 0, PECR); - /* Enable watchdog timer*/ - rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT); + /* Force parity error */ + rzg2l_wdt_write(priv, PEEN_FORCE, PEEN); return 0; } @@ -148,15 +170,15 @@ static const struct watchdog_ops rzg2l_wdt_ops = { .start = rzg2l_wdt_start, .stop = rzg2l_wdt_stop, .ping = rzg2l_wdt_ping, + .set_timeout = rzg2l_wdt_set_timeout, .restart = rzg2l_wdt_restart, }; -static void rzg2l_wdt_reset_assert_pm_disable_put(void *data) +static void rzg2l_wdt_reset_assert_pm_disable(void *data) { struct watchdog_device *wdev = data; struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); - pm_runtime_put(wdev->parent); pm_runtime_disable(wdev->parent); reset_control_assert(priv->rstc); } @@ -166,7 +188,6 @@ static int rzg2l_wdt_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct rzg2l_wdt_priv *priv; unsigned long pclk_rate; - struct clk *wdt_clk; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -178,22 +199,20 @@ static int rzg2l_wdt_probe(struct platform_device *pdev) return PTR_ERR(priv->base); /* Get watchdog main clock */ - wdt_clk = clk_get(&pdev->dev, "oscclk"); - if (IS_ERR(wdt_clk)) - return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no oscclk"); + priv->osc_clk = devm_clk_get(&pdev->dev, "oscclk"); + if (IS_ERR(priv->osc_clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->osc_clk), "no oscclk"); - priv->osc_clk_rate = clk_get_rate(wdt_clk); - clk_put(wdt_clk); + priv->osc_clk_rate = clk_get_rate(priv->osc_clk); if (!priv->osc_clk_rate) return dev_err_probe(&pdev->dev, -EINVAL, "oscclk rate is 0"); /* Get Peripheral clock */ - wdt_clk = clk_get(&pdev->dev, "pclk"); - if (IS_ERR(wdt_clk)) - return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no pclk"); + priv->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(priv->pclk)) + return dev_err_probe(&pdev->dev, PTR_ERR(priv->pclk), "no pclk"); - pclk_rate = clk_get_rate(wdt_clk); - clk_put(wdt_clk); + pclk_rate = clk_get_rate(priv->pclk); if (!pclk_rate) return dev_err_probe(&pdev->dev, -EINVAL, "pclk rate is 0"); @@ -204,13 +223,11 @@ static int rzg2l_wdt_probe(struct platform_device *pdev) return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc), "failed to get cpg reset"); - reset_control_deassert(priv->rstc); + ret = reset_control_deassert(priv->rstc); + if (ret) + return dev_err_probe(dev, ret, "failed to deassert"); + pm_runtime_enable(&pdev->dev); - ret = pm_runtime_resume_and_get(&pdev->dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_resume_and_get failed ret=%pe", ERR_PTR(ret)); - goto out_pm_get; - } priv->wdev.info = &rzg2l_wdt_ident; priv->wdev.ops = &rzg2l_wdt_ops; @@ -222,7 +239,7 @@ static int rzg2l_wdt_probe(struct platform_device *pdev) watchdog_set_drvdata(&priv->wdev, priv); ret = devm_add_action_or_reset(&pdev->dev, - rzg2l_wdt_reset_assert_pm_disable_put, + rzg2l_wdt_reset_assert_pm_disable, &priv->wdev); if (ret < 0) return ret; @@ -235,12 +252,6 @@ static int rzg2l_wdt_probe(struct platform_device *pdev) dev_warn(dev, "Specified timeout invalid, using default"); return devm_watchdog_register_device(&pdev->dev, &priv->wdev); - -out_pm_get: - pm_runtime_disable(dev); - reset_control_assert(priv->rstc); - - return ret; } static const struct of_device_id rzg2l_wdt_ids[] = { diff --git a/drivers/watchdog/rzn1_wdt.c b/drivers/watchdog/rzn1_wdt.c new file mode 100644 index 000000000000..55ab384b9965 --- /dev/null +++ b/drivers/watchdog/rzn1_wdt.c @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas RZ/N1 Watchdog timer. + * This is a 12-bit timer driver from a (62.5/16384) MHz clock. It can't even + * cope with 2 seconds. + * + * Copyright 2018 Renesas Electronics Europe Ltd. + * + * Derived from Ralink RT288x watchdog timer. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/watchdog.h> + +#define DEFAULT_TIMEOUT 60 + +#define RZN1_WDT_RETRIGGER 0x0 +#define RZN1_WDT_RETRIGGER_RELOAD_VAL 0 +#define RZN1_WDT_RETRIGGER_RELOAD_VAL_MASK 0xfff +#define RZN1_WDT_RETRIGGER_PRESCALE BIT(12) +#define RZN1_WDT_RETRIGGER_ENABLE BIT(13) +#define RZN1_WDT_RETRIGGER_WDSI (0x2 << 14) + +#define RZN1_WDT_PRESCALER 16384 +#define RZN1_WDT_MAX 4095 + +struct rzn1_watchdog { + struct watchdog_device wdtdev; + void __iomem *base; + unsigned long clk_rate_khz; +}; + +static inline uint32_t max_heart_beat_ms(unsigned long clk_rate_khz) +{ + return (RZN1_WDT_MAX * RZN1_WDT_PRESCALER) / clk_rate_khz; +} + +static inline uint32_t compute_reload_value(uint32_t tick_ms, + unsigned long clk_rate_khz) +{ + return (tick_ms * clk_rate_khz) / RZN1_WDT_PRESCALER; +} + +static int rzn1_wdt_ping(struct watchdog_device *w) +{ + struct rzn1_watchdog *wdt = watchdog_get_drvdata(w); + + /* Any value retrigggers the watchdog */ + writel(0, wdt->base + RZN1_WDT_RETRIGGER); + + return 0; +} + +static int rzn1_wdt_start(struct watchdog_device *w) +{ + struct rzn1_watchdog *wdt = watchdog_get_drvdata(w); + u32 val; + + /* + * The hardware allows you to write to this reg only once. + * Since this includes the reload value, there is no way to change the + * timeout once started. Also note that the WDT clock is half the bus + * fabric clock rate, so if the bus fabric clock rate is changed after + * the WDT is started, the WDT interval will be wrong. + */ + val = RZN1_WDT_RETRIGGER_WDSI; + val |= RZN1_WDT_RETRIGGER_ENABLE; + val |= RZN1_WDT_RETRIGGER_PRESCALE; + val |= compute_reload_value(w->max_hw_heartbeat_ms, wdt->clk_rate_khz); + writel(val, wdt->base + RZN1_WDT_RETRIGGER); + + return 0; +} + +static irqreturn_t rzn1_wdt_irq(int irq, void *_wdt) +{ + pr_crit("RZN1 Watchdog. Initiating system reboot\n"); + emergency_restart(); + + return IRQ_HANDLED; +} + +static struct watchdog_info rzn1_wdt_info = { + .identity = "RZ/N1 Watchdog", + .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, +}; + +static const struct watchdog_ops rzn1_wdt_ops = { + .owner = THIS_MODULE, + .start = rzn1_wdt_start, + .ping = rzn1_wdt_ping, +}; + +static void rzn1_wdt_clk_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static int rzn1_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rzn1_watchdog *wdt; + struct device_node *np = dev->of_node; + struct clk *clk; + unsigned long clk_rate; + int ret; + int irq; + + wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + wdt->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(wdt->base)) + return PTR_ERR(wdt->base); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = devm_request_irq(dev, irq, rzn1_wdt_irq, 0, + np->name, wdt); + if (ret) { + dev_err(dev, "failed to request irq %d\n", irq); + return ret; + } + + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + dev_err(dev, "failed to get the clock\n"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(dev, "failed to prepare/enable the clock\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, rzn1_wdt_clk_disable_unprepare, + clk); + if (ret) + return ret; + + clk_rate = clk_get_rate(clk); + if (!clk_rate) { + dev_err(dev, "failed to get the clock rate\n"); + return -EINVAL; + } + + wdt->clk_rate_khz = clk_rate / 1000; + wdt->wdtdev.info = &rzn1_wdt_info, + wdt->wdtdev.ops = &rzn1_wdt_ops, + wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS, + wdt->wdtdev.parent = dev; + /* + * The period of the watchdog cannot be changed once set + * and is limited to a very short period. + * Configure it for a 1s period once and for all, and + * rely on the heart-beat provided by the watchdog core + * to make this usable by the user-space. + */ + wdt->wdtdev.max_hw_heartbeat_ms = max_heart_beat_ms(wdt->clk_rate_khz); + if (wdt->wdtdev.max_hw_heartbeat_ms > 1000) + wdt->wdtdev.max_hw_heartbeat_ms = 1000; + + wdt->wdtdev.timeout = DEFAULT_TIMEOUT; + ret = watchdog_init_timeout(&wdt->wdtdev, 0, dev); + if (ret) + return ret; + + watchdog_set_drvdata(&wdt->wdtdev, wdt); + + return devm_watchdog_register_device(dev, &wdt->wdtdev); +} + + +static const struct of_device_id rzn1_wdt_match[] = { + { .compatible = "renesas,rzn1-wdt" }, + {}, +}; +MODULE_DEVICE_TABLE(of, rzn1_wdt_match); + +static struct platform_driver rzn1_wdt_driver = { + .probe = rzn1_wdt_probe, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = rzn1_wdt_match, + }, +}; + +module_platform_driver(rzn1_wdt_driver); + +MODULE_DESCRIPTION("Renesas RZ/N1 hardware watchdog"); +MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index dbeb2146c968..f9479a3fe2a6 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -272,6 +272,7 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id) watchdog_set_nowayout(&wdt->wdd, nowayout); watchdog_set_drvdata(&wdt->wdd, wdt); watchdog_set_restart_priority(&wdt->wdd, 128); + watchdog_stop_on_unregister(&wdt->wdd); /* * If 'timeout-sec' devicetree property is specified, use that. diff --git a/drivers/watchdog/sunplus_wdt.c b/drivers/watchdog/sunplus_wdt.c new file mode 100644 index 000000000000..e2d8c532bcb1 --- /dev/null +++ b/drivers/watchdog/sunplus_wdt.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sunplus Watchdog Driver + * + * Copyright (C) 2021 Sunplus Technology Co., Ltd. + * + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/watchdog.h> + +#define WDT_CTRL 0x00 +#define WDT_CNT 0x04 + +#define WDT_STOP 0x3877 +#define WDT_RESUME 0x4A4B +#define WDT_CLRIRQ 0x7482 +#define WDT_UNLOCK 0xAB00 +#define WDT_LOCK 0xAB01 +#define WDT_CONMAX 0xDEAF + +/* TIMEOUT_MAX = ffff0/90kHz =11.65, so longer than 11 seconds will time out. */ +#define SP_WDT_MAX_TIMEOUT 11U +#define SP_WDT_DEFAULT_TIMEOUT 10 + +#define STC_CLK 90000 + +#define DEVICE_NAME "sunplus-wdt" + +static unsigned int timeout; +module_param(timeout, int, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +struct sp_wdt_priv { + struct watchdog_device wdev; + void __iomem *base; + struct clk *clk; + struct reset_control *rstc; +}; + +static int sp_wdt_restart(struct watchdog_device *wdev, + unsigned long action, void *data) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + + writel(WDT_STOP, base + WDT_CTRL); + writel(WDT_UNLOCK, base + WDT_CTRL); + writel(0x0001, base + WDT_CNT); + writel(WDT_LOCK, base + WDT_CTRL); + writel(WDT_RESUME, base + WDT_CTRL); + + return 0; +} + +static int sp_wdt_ping(struct watchdog_device *wdev) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + u32 count; + + if (wdev->timeout > SP_WDT_MAX_TIMEOUT) { + /* WDT_CONMAX sets the count to the maximum (down-counting). */ + writel(WDT_CONMAX, base + WDT_CTRL); + } else { + writel(WDT_UNLOCK, base + WDT_CTRL); + /* + * Watchdog timer is a 20-bit down-counting based on STC_CLK. + * This register bits[16:0] is from bit[19:4] of the watchdog + * timer counter. + */ + count = (wdev->timeout * STC_CLK) >> 4; + writel(count, base + WDT_CNT); + writel(WDT_LOCK, base + WDT_CTRL); + } + + return 0; +} + +static int sp_wdt_stop(struct watchdog_device *wdev) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + + writel(WDT_STOP, base + WDT_CTRL); + + return 0; +} + +static int sp_wdt_start(struct watchdog_device *wdev) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + + writel(WDT_RESUME, base + WDT_CTRL); + + return 0; +} + +static unsigned int sp_wdt_get_timeleft(struct watchdog_device *wdev) +{ + struct sp_wdt_priv *priv = watchdog_get_drvdata(wdev); + void __iomem *base = priv->base; + u32 val; + + val = readl(base + WDT_CNT); + val &= 0xffff; + val = val << 4; + + return val; +} + +static const struct watchdog_info sp_wdt_info = { + .identity = DEVICE_NAME, + .options = WDIOF_SETTIMEOUT | + WDIOF_MAGICCLOSE | + WDIOF_KEEPALIVEPING, +}; + +static const struct watchdog_ops sp_wdt_ops = { + .owner = THIS_MODULE, + .start = sp_wdt_start, + .stop = sp_wdt_stop, + .ping = sp_wdt_ping, + .get_timeleft = sp_wdt_get_timeleft, + .restart = sp_wdt_restart, +}; + +static void sp_clk_disable_unprepare(void *data) +{ + clk_disable_unprepare(data); +} + +static void sp_reset_control_assert(void *data) +{ + reset_control_assert(data); +} + +static int sp_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sp_wdt_priv *priv; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n"); + + ret = clk_prepare_enable(priv->clk); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable clock\n"); + + ret = devm_add_action_or_reset(dev, sp_clk_disable_unprepare, priv->clk); + if (ret) + return ret; + + /* The timer and watchdog shared the STC reset */ + priv->rstc = devm_reset_control_get_shared(dev, NULL); + if (IS_ERR(priv->rstc)) + return dev_err_probe(dev, PTR_ERR(priv->rstc), "Failed to get reset\n"); + + reset_control_deassert(priv->rstc); + + ret = devm_add_action_or_reset(dev, sp_reset_control_assert, priv->rstc); + if (ret) + return ret; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->wdev.info = &sp_wdt_info; + priv->wdev.ops = &sp_wdt_ops; + priv->wdev.timeout = SP_WDT_DEFAULT_TIMEOUT; + priv->wdev.max_hw_heartbeat_ms = SP_WDT_MAX_TIMEOUT * 1000; + priv->wdev.min_timeout = 1; + priv->wdev.parent = dev; + + watchdog_set_drvdata(&priv->wdev, priv); + watchdog_init_timeout(&priv->wdev, timeout, dev); + watchdog_set_nowayout(&priv->wdev, nowayout); + watchdog_stop_on_reboot(&priv->wdev); + watchdog_set_restart_priority(&priv->wdev, 128); + + return devm_watchdog_register_device(dev, &priv->wdev); +} + +static const struct of_device_id sp_wdt_of_match[] = { + {.compatible = "sunplus,sp7021-wdt", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sp_wdt_of_match); + +static struct platform_driver sp_wdt_driver = { + .probe = sp_wdt_probe, + .driver = { + .name = DEVICE_NAME, + .of_match_table = sp_wdt_of_match, + }, +}; + +module_platform_driver(sp_wdt_driver); + +MODULE_AUTHOR("Xiantao Hu <xt.hu@cqplus1.com>"); +MODULE_DESCRIPTION("Sunplus Watchdog Timer Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/ts4800_wdt.c b/drivers/watchdog/ts4800_wdt.c index c137ad2bd5c3..0ea554c7cda5 100644 --- a/drivers/watchdog/ts4800_wdt.c +++ b/drivers/watchdog/ts4800_wdt.c @@ -125,13 +125,16 @@ static int ts4800_wdt_probe(struct platform_device *pdev) ret = of_property_read_u32_index(np, "syscon", 1, ®); if (ret < 0) { dev_err(dev, "no offset in syscon\n"); + of_node_put(syscon_np); return ret; } /* allocate memory for watchdog struct */ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); - if (!wdt) + if (!wdt) { + of_node_put(syscon_np); return -ENOMEM; + } /* set regmap and offset to know where to write */ wdt->feed_offset = reg; diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c index 195c8c004b69..e6f95e99156d 100644 --- a/drivers/watchdog/wdat_wdt.c +++ b/drivers/watchdog/wdat_wdt.c @@ -344,6 +344,7 @@ static int wdat_wdt_probe(struct platform_device *pdev) wdat->period = tbl->timer_period; wdat->wdd.min_hw_heartbeat_ms = wdat->period * tbl->min_count; wdat->wdd.max_hw_heartbeat_ms = wdat->period * tbl->max_count; + wdat->wdd.min_timeout = 1; wdat->stopped_in_sleep = tbl->flags & ACPI_WDAT_STOPPED; wdat->wdd.info = &wdat_wdt_info; wdat->wdd.ops = &wdat_wdt_ops; @@ -450,8 +451,7 @@ static int wdat_wdt_probe(struct platform_device *pdev) * watchdog properly after it has opened the device. In some cases * the BIOS default is too short and causes immediate reboot. */ - if (timeout * 1000 < wdat->wdd.min_hw_heartbeat_ms || - timeout * 1000 > wdat->wdd.max_hw_heartbeat_ms) { + if (watchdog_timeout_invalid(&wdat->wdd, timeout)) { dev_warn(dev, "Invalid timeout %d given, using %d\n", timeout, WDAT_DEFAULT_TIMEOUT); timeout = WDAT_DEFAULT_TIMEOUT; @@ -462,6 +462,8 @@ static int wdat_wdt_probe(struct platform_device *pdev) return ret; watchdog_set_nowayout(&wdat->wdd, nowayout); + watchdog_stop_on_reboot(&wdat->wdd); + watchdog_stop_on_unregister(&wdat->wdd); return devm_watchdog_register_device(dev, &wdat->wdd); } |