aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-cxl7
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.txt16
-rw-r--r--Documentation/devicetree/bindings/bus/ti,da850-mstpri.txt20
-rw-r--r--Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt108
-rw-r--r--Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt23
-rw-r--r--Documentation/devicetree/bindings/ipmi/ipmi-smic.txt (renamed from Documentation/devicetree/bindings/ipmi.txt)0
-rw-r--r--Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.txt52
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ti-da8xx-ddrctl.txt20
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt4
-rw-r--r--Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt7
-rw-r--r--Documentation/devicetree/bindings/reset/oxnas,reset.txt44
-rw-r--r--Documentation/devicetree/bindings/reset/st,sti-softreset.txt8
-rw-r--r--Documentation/devicetree/bindings/sram/sram.txt2
-rw-r--r--Documentation/devicetree/bindings/timer/jcore,pit.txt24
-rw-r--r--Documentation/filesystems/proc.txt26
-rw-r--r--MAINTAINERS8
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/ptrace.c9
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm/mach-sti/Kconfig2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/exec.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h11
-rw-r--r--arch/arm64/include/asm/module.h5
-rw-r--r--arch/arm64/include/asm/percpu.h120
-rw-r--r--arch/arm64/include/asm/processor.h6
-rw-r--r--arch/arm64/include/asm/sysreg.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h8
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c36
-rw-r--r--arch/arm64/kernel/cpu_errata.c3
-rw-r--r--arch/arm64/kernel/cpufeature.c10
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/kernel/process.c18
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/kernel/suspend.c11
-rw-r--r--arch/arm64/kernel/traps.c30
-rw-r--r--arch/arm64/mm/fault.c15
-rw-r--r--arch/arm64/mm/init.c26
-rw-r--r--arch/blackfin/kernel/ptrace.c5
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c4
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c4
-rw-r--r--arch/ia64/kernel/err_inject.c2
-rw-r--r--arch/ia64/kernel/ptrace.c14
-rw-r--r--arch/m32r/kernel/ptrace.c15
-rw-r--r--arch/mips/kernel/ptrace32.c5
-rw-r--r--arch/mips/kvm/mips.c1
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/powerpc/boot/main.c18
-rw-r--r--arch/powerpc/include/asm/unistd.h4
-rw-r--r--arch/powerpc/kernel/ptrace32.c5
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/numa.c46
-rw-r--r--arch/s390/kvm/intercept.c9
-rw-r--r--arch/s390/mm/gup.c3
-rw-r--r--arch/score/kernel/ptrace.c10
-rw-r--r--arch/sh/Makefile2
-rw-r--r--arch/sh/boards/Kconfig10
-rw-r--r--arch/sh/configs/j2_defconfig2
-rw-r--r--arch/sh/mm/gup.c3
-rw-r--r--arch/sparc/kernel/ptrace_64.c24
-rw-r--r--arch/sparc/mm/gup.c3
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/events/intel/core.c3
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/events/intel/uncore.c1
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/intel-family.h1
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/rwsem.h6
-rw-r--r--arch/x86/include/asm/thread_info.h9
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c5
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/fpu/xstate.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c11
-rw-r--r--arch/x86/kernel/signal_compat.c3
-rw-r--r--arch/x86/kernel/smp.c2
-rw-r--r--arch/x86/kernel/smpboot.c16
-rw-r--r--arch/x86/kernel/step.c3
-rw-r--r--arch/x86/kvm/ioapic.c2
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/mpx.c5
-rw-r--r--arch/x86/platform/uv/bios_uv.c10
-rw-r--r--arch/x86/um/ptrace_32.c3
-rw-r--r--arch/x86/um/ptrace_64.c3
-rw-r--r--block/badblocks.c6
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/block/rbd.c50
-rw-r--r--drivers/bus/Kconfig9
-rw-r--r--drivers/bus/Makefile2
-rw-r--r--drivers/bus/da8xx-mstpri.c269
-rw-r--r--drivers/char/ipmi/Kconfig8
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c505
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c7
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/jcore-pit.c249
-rw-r--r--drivers/clocksource/timer-sun5i.c16
-rw-r--r--drivers/firewire/nosy.c13
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/firmware/qcom_scm.c4
-rw-r--r--drivers/firmware/tegra/Kconfig25
-rw-r--r--drivers/firmware/tegra/Makefile2
-rw-r--r--drivers/firmware/tegra/bpmp.c868
-rw-r--r--drivers/firmware/tegra/ivc.c695
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c18
-rw-r--r--drivers/gpu/drm/drm_info.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c39
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c56
-rw-r--r--drivers/hid/hid-dr.c83
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-led.c23
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/adm9240.c6
-rw-r--r--drivers/hwmon/max31790.c4
-rw-r--r--drivers/infiniband/core/umem.c6
-rw-r--r--drivers/infiniband/core/umem_odp.c7
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/irq-eznps.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
-rw-r--r--drivers/irqchip/irq-gic-v3.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-jcore-aic.c20
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/tegra-hsp.c479
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c5
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c6
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/atmel-ebi.c2
-rw-r--r--drivers/memory/atmel-sdramc.c6
-rw-r--r--drivers/memory/da8xx-ddrctl.c175
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c6
-rw-r--r--drivers/misc/cxl/api.c9
-rw-r--r--drivers/misc/cxl/context.c3
-rw-r--r--drivers/misc/cxl/cxl.h24
-rw-r--r--drivers/misc/cxl/file.c11
-rw-r--r--drivers/misc/cxl/guest.c3
-rw-r--r--drivers/misc/cxl/main.c42
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c27
-rw-r--r--drivers/misc/mic/scif/scif_rma.c3
-rw-r--r--drivers/misc/sgi-gru/grufault.c2
-rw-r--r--drivers/misc/sram.c42
-rw-r--r--drivers/mmc/card/block.c3
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/mmc.c12
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c7
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c23
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c26
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c54
-rw-r--r--drivers/mmc/host/sdhci-pci.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci.c42
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mtd/ubi/eba.c1
-rw-r--r--drivers/mtd/ubi/fastmap.c2
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/pci.c77
-rw-r--r--drivers/nvme/host/scsi.c4
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/discovery.c4
-rw-r--r--drivers/pci/host/pci-layerscape.c2
-rw-r--r--drivers/pci/host/pcie-designware-plat.c2
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c100
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c25
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c3
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/reset/Kconfig1
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c43
-rw-r--r--drivers/reset/reset-berlin.c12
-rw-r--r--drivers/reset/reset-lpc18xx.c32
-rw-r--r--drivers/reset/reset-oxnas.c1
-rw-r--r--drivers/reset/reset-socfpga.c10
-rw-r--r--drivers/reset/reset-sunxi.c9
-rw-r--r--drivers/reset/reset-zynq.c10
-rw-r--r--drivers/reset/sti/Kconfig8
-rw-r--r--drivers/reset/sti/Makefile2
-rw-r--r--drivers/reset/sti/reset-stih415.c112
-rw-r--r--drivers/reset/sti/reset-stih416.c143
-rw-r--r--drivers/reset/tegra/Kconfig2
-rw-r--r--drivers/reset/tegra/Makefile1
-rw-r--r--drivers/reset/tegra/reset-bpmp.c71
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/scsi/ipr.c3
-rw-r--r--drivers/scsi/scsi_dh.c6
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/soc/mediatek/Kconfig2
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c465
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a7743-sysc.c32
-rw-r--r--drivers/soc/renesas/rcar-sysc.c3
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/rockchip/pm_domains.c81
-rw-r--r--drivers/soc/tegra/Kconfig14
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c3
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c3
-rw-r--r--drivers/target/iscsi/iscsi_target.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c4
-rw-r--r--drivers/target/target_core_transport.c39
-rw-r--r--drivers/target/target_core_user.c50
-rw-r--r--drivers/target/target_core_xcopy.c34
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c4
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c42
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/watchdog/wdat_wdt.c4
-rw-r--r--fs/btrfs/compression.c4
-rw-r--r--fs/ceph/file.c3
-rw-r--r--fs/ceph/inode.c3
-rw-r--r--fs/ceph/super.c2
-rw-r--r--fs/ceph/xattr.c4
-rw-r--r--fs/crypto/crypto.c15
-rw-r--r--fs/crypto/policy.c4
-rw-r--r--fs/exec.c9
-rw-r--r--fs/ext2/inode.c7
-rw-r--r--fs/ext4/block_validity.c4
-rw-r--r--fs/ext4/mballoc.h17
-rw-r--r--fs/ext4/namei.c18
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/ext4/sysfs.c4
-rw-r--r--fs/ext4/xattr.c20
-rw-r--r--fs/f2fs/gc.c10
-rw-r--r--fs/isofs/inode.c8
-rw-r--r--fs/jbd2/transaction.c3
-rw-r--r--fs/locks.c6
-rw-r--r--fs/nfs/blocklayout/blocklayout.c3
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/proc/array.c9
-rw-r--r--fs/proc/base.c19
-rw-r--r--fs/proc/task_mmu.c29
-rw-r--r--fs/proc/task_nommu.c28
-rw-r--r--fs/ubifs/dir.c20
-rw-r--r--fs/ubifs/xattr.c2
-rw-r--r--include/acpi/pcc.h2
-rw-r--r--include/dt-bindings/clock/tegra186-clock.h940
-rw-r--r--include/dt-bindings/mailbox/tegra186-hsp.h24
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-gpio.h4
-rw-r--r--include/dt-bindings/pinctrl/qcom,pmic-mpp.h6
-rw-r--r--include/dt-bindings/power/r8a7743-sysc.h25
-rw-r--r--include/dt-bindings/power/tegra186-powergate.h39
-rw-r--r--include/dt-bindings/reset/oxsemi,ox810se.h53
-rw-r--r--include/dt-bindings/reset/oxsemi,ox820.h53
-rw-r--r--include/dt-bindings/reset/tegra186-reset.h217
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/mm.h21
-rw-r--r--include/linux/nvme.h49
-rw-r--r--include/linux/soc/qcom/wcnss_ctrl.h13
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/thread_info.h11
-rw-r--r--include/soc/at91/atmel-secumod.h19
-rw-r--r--include/soc/tegra/bpmp-abi.h1601
-rw-r--r--include/soc/tegra/bpmp.h141
-rw-r--r--include/soc/tegra/ivc.h109
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/bt-bmc.h18
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/events/uprobes.c6
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/ptrace.c16
-rw-r--r--kernel/sched/fair.c20
-rw-r--r--kernel/time/alarmtimer.c2
-rw-r--r--mm/frame_vector.c9
-rw-r--r--mm/gup.c64
-rw-r--r--mm/kasan/kasan.c22
-rw-r--r--mm/memory.c16
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/mprotect.c1
-rw-r--r--mm/nommu.c38
-rw-r--r--mm/process_vm_access.c7
-rw-r--r--mm/util.c12
-rw-r--r--net/ceph/pagevec.c2
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/tomoyo/domain.c2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/objtool/arch/x86/decode.c9
-rw-r--r--tools/objtool/builtin-check.c68
-rw-r--r--tools/perf/jvmti/Makefile2
-rw-r--r--tools/perf/ui/browsers/hists.c3
-rw-r--r--tools/perf/util/header.c2
-rw-r--r--tools/perf/util/parse-events.l4
-rw-r--r--virt/kvm/async_pf.c3
-rw-r--r--virt/kvm/kvm_main.c11
354 files changed, 9494 insertions, 1747 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
index 4ba0a2a61926..640f65e79ef1 100644
--- a/Documentation/ABI/testing/sysfs-class-cxl
+++ b/Documentation/ABI/testing/sysfs-class-cxl
@@ -220,8 +220,11 @@ What: /sys/class/cxl/<card>/reset
Date: October 2014
Contact: linuxppc-dev@lists.ozlabs.org
Description: write only
- Writing 1 will issue a PERST to card which may cause the card
- to reload the FPGA depending on load_image_on_perst.
+ Writing 1 will issue a PERST to card provided there are no
+ contexts active on any one of the card AFUs. This may cause
+ the card to reload the FPGA depending on load_image_on_perst.
+ Writing -1 will do a force PERST irrespective of any active
+ contexts on the card AFUs.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/perst_reloads_same_image (not in a guest)
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index e1f5ad855f14..29737b9b616e 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -225,3 +225,19 @@ required properties:
compatible = "atmel,sama5d3-sfr", "syscon";
reg = <0xf0038000 0x60>;
};
+
+Security Module (SECUMOD)
+
+The Security Module macrocell provides all necessary secure functions to avoid
+voltage, temperature, frequency and mechanical attacks on the chip. It also
+embeds secure memories that can be scrambled
+
+required properties:
+- compatible: Should be "atmel,<chip>-secumod", "syscon".
+ <chip> can be "sama5d2".
+- reg: Should contain registers location and length
+
+ secumod@fc040000 {
+ compatible = "atmel,sama5d2-secumod", "syscon";
+ reg = <0xfc040000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/bus/ti,da850-mstpri.txt b/Documentation/devicetree/bindings/bus/ti,da850-mstpri.txt
new file mode 100644
index 000000000000..72daefc6b4a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/ti,da850-mstpri.txt
@@ -0,0 +1,20 @@
+* Device tree bindings for Texas Instruments da8xx master peripheral
+ priority driver
+
+DA8XX SoCs feature a set of registers allowing to change the priority of all
+peripherals classified as masters.
+
+Documentation:
+OMAP-L138 (DA850) - http://www.ti.com/lit/ug/spruh82c/spruh82c.pdf
+
+Required properties:
+
+- compatible: "ti,da850-mstpri" - for da850 based boards
+- reg: offset and length of the mstpri registers
+
+Example for da850-lcdk is shown below.
+
+mstpri {
+ compatible = "ti,da850-mstpri";
+ reg = <0x14110 0x0c>;
+};
diff --git a/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
new file mode 100644
index 000000000000..e821e16ad65b
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/nvidia,tegra186-bpmp.txt
@@ -0,0 +1,108 @@
+NVIDIA Tegra Boot and Power Management Processor (BPMP)
+
+The BPMP is a specific processor in Tegra chip, which is designed for
+booting process handling and offloading the power management, clock
+management, and reset control tasks from the CPU. The binding document
+defines the resources that would be used by the BPMP firmware driver,
+which can create the interprocessor communication (IPC) between the CPU
+and BPMP.
+
+Required properties:
+- name : Should be bpmp
+- compatible
+ Array of strings
+ One of:
+ - "nvidia,tegra186-bpmp"
+- mboxes : The phandle of mailbox controller and the mailbox specifier.
+- shmem : List of the phandle of the TX and RX shared memory area that
+ the IPC between CPU and BPMP is based on.
+- #clock-cells : Should be 1.
+- #power-domain-cells : Should be 1.
+- #reset-cells : Should be 1.
+
+This node is a mailbox consumer. See the following files for details of
+the mailbox subsystem, and the specifiers implemented by the relevant
+provider(s):
+
+- .../mailbox/mailbox.txt
+- .../mailbox/nvidia,tegra186-hsp.txt
+
+This node is a clock, power domain, and reset provider. See the following
+files for general documentation of those features, and the specifiers
+implemented by this node:
+
+- .../clock/clock-bindings.txt
+- <dt-bindings/clock/tegra186-clock.h>
+- ../power/power_domain.txt
+- <dt-bindings/power/tegra186-powergate.h>
+- .../reset/reset.txt
+- <dt-bindings/reset/tegra186-reset.h>
+
+The BPMP implements some services which must be represented by separate nodes.
+For example, it can provide access to certain I2C controllers, and the I2C
+bindings represent each I2C controller as a device tree node. Such nodes should
+be nested directly inside the main BPMP node.
+
+Software can determine whether a child node of the BPMP node represents a device
+by checking for a compatible property. Any node with a compatible property
+represents a device that can be instantiated. Nodes without a compatible
+property may be used to provide configuration information regarding the BPMP
+itself, although no such configuration nodes are currently defined by this
+binding.
+
+The BPMP firmware defines no single global name-/numbering-space for such
+services. Put another way, the numbering scheme for I2C buses is distinct from
+the numbering scheme for any other service the BPMP may provide (e.g. a future
+hypothetical SPI bus service). As such, child device nodes will have no reg
+property, and the BPMP node will have no #address-cells or #size-cells property.
+
+The shared memory bindings for BPMP
+-----------------------------------
+
+The shared memory area for the IPC TX and RX between CPU and BPMP are
+predefined and work on top of sysram, which is an SRAM inside the chip.
+
+See ".../sram/sram.txt" for the bindings.
+
+Example:
+
+hsp_top0: hsp@03c00000 {
+ ...
+ #mbox-cells = <2>;
+};
+
+sysram@30000000 {
+ compatible = "nvidia,tegra186-sysram", "mmio-sram";
+ reg = <0x0 0x30000000 0x0 0x50000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0x0 0x0 0x30000000 0x0 0x50000>;
+
+ cpu_bpmp_tx: shmem@4e000 {
+ compatible = "nvidia,tegra186-bpmp-shmem";
+ reg = <0x0 0x4e000 0x0 0x1000>;
+ label = "cpu-bpmp-tx";
+ pool;
+ };
+
+ cpu_bpmp_rx: shmem@4f000 {
+ compatible = "nvidia,tegra186-bpmp-shmem";
+ reg = <0x0 0x4f000 0x0 0x1000>;
+ label = "cpu-bpmp-rx";
+ pool;
+ };
+};
+
+bpmp {
+ compatible = "nvidia,tegra186-bpmp";
+ mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB TEGRA_HSP_DB_MASTER_BPMP>;
+ shmem = <&cpu_bpmp_tx &cpu_bpmp_rx>;
+ #clock-cells = <1>;
+ #power-domain-cells = <1>;
+ #reset-cells = <1>;
+
+ i2c {
+ compatible = "...";
+ ...
+ };
+};
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
new file mode 100644
index 000000000000..fbbacd958240
--- /dev/null
+++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt
@@ -0,0 +1,23 @@
+* Aspeed BT (Block Transfer) IPMI interface
+
+The Aspeed SOCs (AST2400 and AST2500) are commonly used as BMCs
+(BaseBoard Management Controllers) and the BT interface can be used to
+perform in-band IPMI communication with their host.
+
+Required properties:
+
+- compatible : should be "aspeed,ast2400-bt-bmc"
+- reg: physical address and size of the registers
+
+Optional properties:
+
+- interrupts: interrupt generated by the BT interface. without an
+ interrupt, the driver will operate in poll mode.
+
+Example:
+
+ ibt@1e789140 {
+ compatible = "aspeed,ast2400-bt-bmc";
+ reg = <0x1e789140 0x18>;
+ interrupts = <8>;
+ };
diff --git a/Documentation/devicetree/bindings/ipmi.txt b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt
index d5f1a877ed3e..d5f1a877ed3e 100644
--- a/Documentation/devicetree/bindings/ipmi.txt
+++ b/Documentation/devicetree/bindings/ipmi/ipmi-smic.txt
diff --git a/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.txt b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.txt
new file mode 100644
index 000000000000..b99d25fc2f26
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/nvidia,tegra186-hsp.txt
@@ -0,0 +1,52 @@
+NVIDIA Tegra Hardware Synchronization Primitives (HSP)
+
+The HSP modules are used for the processors to share resources and communicate
+together. It provides a set of hardware synchronization primitives for
+interprocessor communication. So the interprocessor communication (IPC)
+protocols can use hardware synchronization primitives, when operating between
+two processors not in an SMP relationship.
+
+The features that HSP supported are shared mailboxes, shared semaphores,
+arbitrated semaphores and doorbells.
+
+Required properties:
+- name : Should be hsp
+- compatible
+ Array of strings.
+ one of:
+ - "nvidia,tegra186-hsp"
+- reg : Offset and length of the register set for the device.
+- interrupt-names
+ Array of strings.
+ Contains a list of names for the interrupts described by the interrupt
+ property. May contain the following entries, in any order:
+ - "doorbell"
+ Users of this binding MUST look up entries in the interrupt property
+ by name, using this interrupt-names property to do so.
+- interrupts
+ Array of interrupt specifiers.
+ Must contain one entry per entry in the interrupt-names property,
+ in a matching order.
+- #mbox-cells : Should be 2.
+
+The mbox specifier of the "mboxes" property in the client node should
+contain two data. The first one should be the HSP type and the second
+one should be the ID that the client is going to use. Those information
+can be found in the following file.
+
+- <dt-bindings/mailbox/tegra186-hsp.h>.
+
+Example:
+
+hsp_top0: hsp@3c00000 {
+ compatible = "nvidia,tegra186-hsp";
+ reg = <0x0 0x03c00000 0x0 0xa0000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "doorbell";
+ #mbox-cells = <2>;
+};
+
+client {
+ ...
+ mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB TEGRA_HSP_DB_MASTER_XXX>;
+};
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti-da8xx-ddrctl.txt b/Documentation/devicetree/bindings/memory-controllers/ti-da8xx-ddrctl.txt
new file mode 100644
index 000000000000..ec1dd408d573
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/ti-da8xx-ddrctl.txt
@@ -0,0 +1,20 @@
+* Device tree bindings for Texas Instruments da8xx DDR2/mDDR memory controller
+
+The DDR2/mDDR memory controller present on Texas Instruments da8xx SoCs features
+a set of registers which allow to tweak the controller's behavior.
+
+Documentation:
+OMAP-L138 (DA850) - http://www.ti.com/lit/ug/spruh82c/spruh82c.pdf
+
+Required properties:
+
+- compatible: "ti,da850-ddr-controller" - for da850 SoC based boards
+- reg: a tuple containing the base address of the memory
+ controller and the size of the memory area to map
+
+Example for da850 shown below.
+
+ddrctl {
+ compatible = "ti,da850-ddr-controller";
+ reg = <0xb0000000 0xe8>;
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
index 5e60ad18f147..2ad18c4ea55c 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-aspeed.txt
@@ -43,7 +43,9 @@ aspeed,ast2500-pinctrl, aspeed,g5-pinctrl:
GPID0 GPID2 GPIE0 I2C10 I2C11 I2C12 I2C13 I2C14 I2C3 I2C4 I2C5 I2C6 I2C7 I2C8
I2C9 MAC1LINK MDIO1 MDIO2 OSCCLK PEWAKE PWM0 PWM1 PWM2 PWM3 PWM4 PWM5 PWM6 PWM7
-RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 TIMER4 TIMER5 TIMER6 TIMER7 TIMER8
+RGMII1 RGMII2 RMII1 RMII2 SD1 SPI1 SPI1DEBUG SPI1PASSTHRU TIMER4 TIMER5 TIMER6
+TIMER7 TIMER8 VGABIOSROM
+
Examples:
diff --git a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
index 0725fb37a973..c16ec1866ac4 100644
--- a/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
+++ b/Documentation/devicetree/bindings/power/renesas,rcar-sysc.txt
@@ -1,12 +1,13 @@
-DT bindings for the Renesas R-Car System Controller
+DT bindings for the Renesas R-Car (RZ/G) System Controller
== System Controller Node ==
-The R-Car System Controller provides power management for the CPU cores and
-various coprocessors.
+The R-Car (RZ/G) System Controller provides power management for the CPU cores
+and various coprocessors.
Required properties:
- compatible: Must contain exactly one of the following:
+ - "renesas,r8a7743-sysc" (RZ/G1M)
- "renesas,r8a7779-sysc" (R-Car H1)
- "renesas,r8a7790-sysc" (R-Car H2)
- "renesas,r8a7791-sysc" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/reset/oxnas,reset.txt b/Documentation/devicetree/bindings/reset/oxnas,reset.txt
index 6f06db930030..d27ccb5d04fc 100644
--- a/Documentation/devicetree/bindings/reset/oxnas,reset.txt
+++ b/Documentation/devicetree/bindings/reset/oxnas,reset.txt
@@ -5,45 +5,19 @@ Please also refer to reset.txt in this directory for common reset
controller binding usage.
Required properties:
-- compatible: Should be "oxsemi,ox810se-reset"
+- compatible: For OX810SE, should be "oxsemi,ox810se-reset"
+ For OX820, should be "oxsemi,ox820-reset"
- #reset-cells: 1, see below
Parent node should have the following properties :
-- compatible: Should be "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd"
+- compatible: For OX810SE, should be :
+ "oxsemi,ox810se-sys-ctrl", "syscon", "simple-mfd"
+ For OX820, should be :
+ "oxsemi,ox820-sys-ctrl", "syscon", "simple-mfd"
-For OX810SE, the indices are :
- - 0 : ARM
- - 1 : COPRO
- - 2 : Reserved
- - 3 : Reserved
- - 4 : USBHS
- - 5 : USBHSPHY
- - 6 : MAC
- - 7 : PCI
- - 8 : DMA
- - 9 : DPE
- - 10 : DDR
- - 11 : SATA
- - 12 : SATA_LINK
- - 13 : SATA_PHY
- - 14 : Reserved
- - 15 : NAND
- - 16 : GPIO
- - 17 : UART1
- - 18 : UART2
- - 19 : MISC
- - 20 : I2S
- - 21 : AHB_MON
- - 22 : UART3
- - 23 : UART4
- - 24 : SGDMA
- - 25 : Reserved
- - 26 : Reserved
- - 27 : Reserved
- - 28 : Reserved
- - 29 : Reserved
- - 30 : Reserved
- - 31 : BUS
+Reset indices are in dt-bindings include files :
+- For OX810SE: include/dt-bindings/reset/oxsemi,ox810se.h
+- For OX820: include/dt-bindings/reset/oxsemi,ox820.h
example:
diff --git a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
index 891a2fd85ed6..a21658f18fe6 100644
--- a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
+++ b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
@@ -15,15 +15,14 @@ Please refer to reset.txt in this directory for common reset
controller binding usage.
Required properties:
-- compatible: Should be "st,<chip>-softreset" example:
- "st,stih415-softreset" or "st,stih416-softreset";
+- compatible: Should be st,stih407-softreset";
- #reset-cells: 1, see below
example:
softreset: softreset-controller {
#reset-cells = <1>;
- compatible = "st,stih415-softreset";
+ compatible = "st,stih407-softreset";
};
@@ -42,5 +41,4 @@ example:
Macro definitions for the supported reset channels can be found in:
-include/dt-bindings/reset/stih415-resets.h
-include/dt-bindings/reset/stih416-resets.h
+include/dt-bindings/reset/stih407-resets.h
diff --git a/Documentation/devicetree/bindings/sram/sram.txt b/Documentation/devicetree/bindings/sram/sram.txt
index add48f09015e..068c2c03c38f 100644
--- a/Documentation/devicetree/bindings/sram/sram.txt
+++ b/Documentation/devicetree/bindings/sram/sram.txt
@@ -4,7 +4,7 @@ Simple IO memory regions to be managed by the genalloc API.
Required properties:
-- compatible : mmio-sram
+- compatible : mmio-sram or atmel,sama5d2-securam
- reg : SRAM iomem address range
diff --git a/Documentation/devicetree/bindings/timer/jcore,pit.txt b/Documentation/devicetree/bindings/timer/jcore,pit.txt
new file mode 100644
index 000000000000..af5dd35469d7
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/jcore,pit.txt
@@ -0,0 +1,24 @@
+J-Core Programmable Interval Timer and Clocksource
+
+Required properties:
+
+- compatible: Must be "jcore,pit".
+
+- reg: Memory region(s) for timer/clocksource registers. For SMP,
+ there should be one region per cpu, indexed by the sequential,
+ zero-based hardware cpu number.
+
+- interrupts: An interrupt to assign for the timer. The actual pit
+ core is integrated with the aic and allows the timer interrupt
+ assignment to be programmed by software, but this property is
+ required in order to reserve an interrupt number that doesn't
+ conflict with other devices.
+
+
+Example:
+
+timer@200 {
+ compatible = "jcore,pit";
+ reg = < 0x200 0x30 0x500 0x30 >;
+ interrupts = < 0x48 >;
+};
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 219ffd41a911..74329fd0add2 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -395,32 +395,6 @@ is not associated with a file:
or if empty, the mapping is anonymous.
-The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
-of the individual tasks of a process. In this file you will see a mapping marked
-as [stack] if that task sees it as a stack. Hence, for the example above, the
-task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
-
-08048000-08049000 r-xp 00000000 03:00 8312 /opt/test
-08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test
-0804a000-0806b000 rw-p 00000000 00:00 0 [heap]
-a7cb1000-a7cb2000 ---p 00000000 00:00 0
-a7cb2000-a7eb2000 rw-p 00000000 00:00 0
-a7eb2000-a7eb3000 ---p 00000000 00:00 0
-a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack]
-a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6
-a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6
-a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6
-a800b000-a800e000 rw-p 00000000 00:00 0
-a800e000-a8022000 r-xp 00000000 03:00 14462 /lib/libpthread.so.0
-a8022000-a8023000 r--p 00013000 03:00 14462 /lib/libpthread.so.0
-a8023000-a8024000 rw-p 00014000 03:00 14462 /lib/libpthread.so.0
-a8024000-a8027000 rw-p 00000000 00:00 0
-a8027000-a8043000 r-xp 00000000 03:00 8317 /lib/ld-linux.so.2
-a8043000-a8044000 r--p 0001b000 03:00 8317 /lib/ld-linux.so.2
-a8044000-a8045000 rw-p 0001c000 03:00 8317 /lib/ld-linux.so.2
-aff35000-aff4a000 rw-p 00000000 00:00 0
-ffffe000-fffff000 r-xp 00000000 00:00 0 [vdso]
-
The /proc/PID/smaps is an extension based on maps, showing the memory
consumption for each of the process's mappings. For each of mappings there
is a series of lines such as the following:
diff --git a/MAINTAINERS b/MAINTAINERS
index 1cd38a7e0064..7b3256abfc38 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1600,6 +1600,7 @@ F: arch/arm/mach-qcom/
F: arch/arm64/boot/dts/qcom/*
F: drivers/i2c/busses/i2c-qup.c
F: drivers/clk/qcom/
+F: drivers/pinctrl/qcom/
F: drivers/soc/qcom/
F: drivers/spi/spi-qup.c
F: drivers/tty/serial/msm_serial.h
@@ -4620,8 +4621,9 @@ F: sound/usb/misc/ua101.c
EXTENSIBLE FIRMWARE INTERFACE (EFI)
M: Matt Fleming <matt@codeblueprint.co.uk>
+M: Ard Biesheuvel <ard.biesheuvel@linaro.org>
L: linux-efi@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git
S: Maintained
F: Documentation/efi-stub.txt
F: arch/ia64/kernel/efi.c
@@ -8212,7 +8214,7 @@ F: include/linux/mfd/
MULTIMEDIA CARD (MMC), SECURE DIGITAL (SD) AND SDIO SUBSYSTEM
M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-mmc@vger.kernel.org
-T: git git://git.linaro.org/people/ulf.hansson/mmc.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git
S: Maintained
F: Documentation/devicetree/bindings/mmc/
F: drivers/mmc/
@@ -9299,7 +9301,7 @@ S: Maintained
F: drivers/pci/host/*designware*
PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
-M: Joao Pinto <jpinto@synopsys.com>
+M: Jose Abreu <Jose.Abreu@synopsys.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/designware-pcie.txt
diff --git a/Makefile b/Makefile
index 512e47a53e9a..93beca4312c4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
index d9ee81769899..940dfb406591 100644
--- a/arch/alpha/kernel/ptrace.c
+++ b/arch/alpha/kernel/ptrace.c
@@ -157,14 +157,16 @@ put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
static inline int
read_int(struct task_struct *task, unsigned long addr, int * data)
{
- int copied = access_process_vm(task, addr, data, sizeof(int), 0);
+ int copied = access_process_vm(task, addr, data, sizeof(int),
+ FOLL_FORCE);
return (copied == sizeof(int)) ? 0 : -EIO;
}
static inline int
write_int(struct task_struct *task, unsigned long addr, int data)
{
- int copied = access_process_vm(task, addr, &data, sizeof(int), 1);
+ int copied = access_process_vm(task, addr, &data, sizeof(int),
+ FOLL_FORCE | FOLL_WRITE);
return (copied == sizeof(int)) ? 0 : -EIO;
}
@@ -281,7 +283,8 @@ long arch_ptrace(struct task_struct *child, long request,
/* When I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp),
+ FOLL_FORCE);
ret = -EIO;
if (copied != sizeof(tmp))
break;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 03e9273f1876..08bb84f2ad58 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1312,6 +1312,13 @@ static int init_hyp_mode(void)
goto out_err;
}
+ err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
+ kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
+ if (err) {
+ kvm_err("Cannot map bss section\n");
+ goto out_err;
+ }
+
/*
* Map the Hyp stack pages
*/
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index 119e1108b1f8..f8eeeffddaff 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -28,7 +28,6 @@ if ARCH_STI
config SOC_STIH415
bool "STiH415 STMicroelectronics Consumer Electronics family"
default y
- select STIH415_RESET
help
This enables support for STMicroelectronics Digital Consumer
Electronics family StiH415 parts, primarily targeted at set-top-box
@@ -38,7 +37,6 @@ config SOC_STIH415
config SOC_STIH416
bool "STiH416 STMicroelectronics Consumer Electronics family"
default y
- select STIH416_RESET
help
This enables support for STMicroelectronics Digital Consumer
Electronics family StiH416 parts, primarily targeted at set-top-box
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 30398dbc940a..969ef880d234 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -915,7 +915,7 @@ config RANDOMIZE_BASE
config RANDOMIZE_MODULE_REGION_FULL
bool "Randomize the module region independently from the core kernel"
- depends on RANDOMIZE_BASE
+ depends on RANDOMIZE_BASE && !DYNAMIC_FTRACE
default y
help
Randomizes the location of the module region without considering the
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index ab51aed6b6c1..3635b8662724 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -15,7 +15,7 @@ CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
ifneq ($(CONFIG_RELOCATABLE),)
-LDFLAGS_vmlinux += -pie -Bsymbolic
+LDFLAGS_vmlinux += -pie -shared -Bsymbolic
endif
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 758d74fedfad..a27c3245ba21 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -94,7 +94,7 @@ struct arm64_cpu_capabilities {
u16 capability;
int def_scope; /* default scope */
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
- void (*enable)(void *); /* Called on all active CPUs */
+ int (*enable)(void *); /* Called on all active CPUs */
union {
struct { /* To be used for erratum handling only */
u32 midr_model;
diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h
index db0563c23482..f7865dd9d868 100644
--- a/arch/arm64/include/asm/exec.h
+++ b/arch/arm64/include/asm/exec.h
@@ -18,6 +18,9 @@
#ifndef __ASM_EXEC_H
#define __ASM_EXEC_H
+#include <linux/sched.h>
+
extern unsigned long arch_align_stack(unsigned long sp);
+void uao_thread_switch(struct task_struct *next);
#endif /* __ASM_EXEC_H */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fd9d5fd788f5..f5ea0ba70f07 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -178,11 +178,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
-{
- return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
-}
-
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
@@ -203,6 +198,12 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
+static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
+ kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
+}
+
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index e12af6754634..06ff7fd9e81f 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -17,6 +17,7 @@
#define __ASM_MODULE_H
#include <asm-generic/module.h>
+#include <asm/memory.h>
#define MODULE_ARCH_VERMAGIC "aarch64"
@@ -32,6 +33,10 @@ u64 module_emit_plt_entry(struct module *mod, const Elf64_Rela *rela,
Elf64_Sym *sym);
#ifdef CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_MODVERSIONS
+#define ARCH_RELOCATES_KCRCTAB
+#define reloc_start (kimage_vaddr - KIMAGE_VADDR)
+#endif
extern u64 module_alloc_base;
#else
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 2fee2f59288c..5394c8405e66 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -44,48 +44,44 @@ static inline unsigned long __percpu_##op(void *ptr, \
\
switch (size) { \
case 1: \
- do { \
- asm ("//__per_cpu_" #op "_1\n" \
- "ldxrb %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_1\n" \
+ "1: ldxrb %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxrb %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u8 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxrb %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u8 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 2: \
- do { \
- asm ("//__per_cpu_" #op "_2\n" \
- "ldxrh %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_2\n" \
+ "1: ldxrh %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxrh %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u16 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxrh %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u16 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 4: \
- do { \
- asm ("//__per_cpu_" #op "_4\n" \
- "ldxr %w[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_4\n" \
+ "1: ldxr %w[ret], %[ptr]\n" \
#asm_op " %w[ret], %w[ret], %w[val]\n" \
- "stxr %w[loop], %w[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u32 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxr %w[loop], %w[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u32 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
case 8: \
- do { \
- asm ("//__per_cpu_" #op "_8\n" \
- "ldxr %[ret], %[ptr]\n" \
+ asm ("//__per_cpu_" #op "_8\n" \
+ "1: ldxr %[ret], %[ptr]\n" \
#asm_op " %[ret], %[ret], %[val]\n" \
- "stxr %w[loop], %[ret], %[ptr]\n" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u64 *)ptr) \
- : [val] "Ir" (val)); \
- } while (loop); \
+ " stxr %w[loop], %[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b" \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u64 *)ptr) \
+ : [val] "Ir" (val)); \
break; \
default: \
BUILD_BUG(); \
@@ -150,44 +146,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
switch (size) {
case 1:
- do {
- asm ("//__percpu_xchg_1\n"
- "ldxrb %w[ret], %[ptr]\n"
- "stxrb %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u8 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_1\n"
+ "1: ldxrb %w[ret], %[ptr]\n"
+ " stxrb %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u8 *)ptr)
+ : [val] "r" (val));
break;
case 2:
- do {
- asm ("//__percpu_xchg_2\n"
- "ldxrh %w[ret], %[ptr]\n"
- "stxrh %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u16 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_2\n"
+ "1: ldxrh %w[ret], %[ptr]\n"
+ " stxrh %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u16 *)ptr)
+ : [val] "r" (val));
break;
case 4:
- do {
- asm ("//__percpu_xchg_4\n"
- "ldxr %w[ret], %[ptr]\n"
- "stxr %w[loop], %w[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u32 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_4\n"
+ "1: ldxr %w[ret], %[ptr]\n"
+ " stxr %w[loop], %w[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u32 *)ptr)
+ : [val] "r" (val));
break;
case 8:
- do {
- asm ("//__percpu_xchg_8\n"
- "ldxr %[ret], %[ptr]\n"
- "stxr %w[loop], %[val], %[ptr]\n"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u64 *)ptr)
- : [val] "r" (val));
- } while (loop);
+ asm ("//__percpu_xchg_8\n"
+ "1: ldxr %[ret], %[ptr]\n"
+ " stxr %w[loop], %[val], %[ptr]\n"
+ " cbnz %w[loop], 1b"
+ : [loop] "=&r"(loop), [ret] "=&r"(ret),
+ [ptr] "+Q"(*(u64 *)ptr)
+ : [val] "r" (val));
break;
default:
BUILD_BUG();
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index df2e53d3a969..60e34824e18c 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif
-void cpu_enable_pan(void *__unused);
-void cpu_enable_uao(void *__unused);
-void cpu_enable_cache_maint_trap(void *__unused);
+int cpu_enable_pan(void *__unused);
+int cpu_enable_uao(void *__unused);
+int cpu_enable_cache_maint_trap(void *__unused);
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index e8d46e8e6079..6c80b3699cb8 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -286,7 +286,7 @@ asm(
#define write_sysreg_s(v, r) do { \
u64 __val = (u64)v; \
- asm volatile("msr_s " __stringify(r) ", %0" : : "rZ" (__val)); \
+ asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0)
static inline void config_sctlr_el1(u32 clear, u32 set)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index bcaf6fba1b65..55d0adbf6509 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -21,6 +21,7 @@
/*
* User space memory access functions
*/
+#include <linux/bitops.h>
#include <linux/kasan-checks.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -102,6 +103,13 @@ static inline void set_fs(mm_segment_t fs)
flag; \
})
+/*
+ * When dealing with data aborts or instruction traps we may end up with
+ * a tagged userland pointer. Clear the tag to get a sane pointer to pass
+ * on to access_ok(), for instance.
+ */
+#define untagged_addr(addr) sign_extend64(addr, 55)
+
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 42ffdb54e162..b0988bb1bf64 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -280,35 +280,43 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
/*
* Error-checking SWP macros implemented using ldxr{b}/stxr{b}
*/
-#define __user_swpX_asm(data, addr, res, temp, B) \
+
+/* Arbitrary constant to ensure forward-progress of the LL/SC loop */
+#define __SWP_LL_SC_LOOPS 4
+
+#define __user_swpX_asm(data, addr, res, temp, temp2, B) \
__asm__ __volatile__( \
+ " mov %w3, %w7\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- "0: ldxr"B" %w2, [%3]\n" \
- "1: stxr"B" %w0, %w1, [%3]\n" \
+ "0: ldxr"B" %w2, [%4]\n" \
+ "1: stxr"B" %w0, %w1, [%4]\n" \
" cbz %w0, 2f\n" \
- " mov %w0, %w4\n" \
+ " sub %w3, %w3, #1\n" \
+ " cbnz %w3, 0b\n" \
+ " mov %w0, %w5\n" \
" b 3f\n" \
"2:\n" \
" mov %w1, %w2\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
- "4: mov %w0, %w5\n" \
+ "4: mov %w0, %w6\n" \
" b 3b\n" \
" .popsection" \
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
- : "=&r" (res), "+r" (data), "=&r" (temp) \
- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
+ : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
+ : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
+ "i" (__SWP_LL_SC_LOOPS) \
: "memory")
-#define __user_swp_asm(data, addr, res, temp) \
- __user_swpX_asm(data, addr, res, temp, "")
-#define __user_swpb_asm(data, addr, res, temp) \
- __user_swpX_asm(data, addr, res, temp, "b")
+#define __user_swp_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "")
+#define __user_swpb_asm(data, addr, res, temp, temp2) \
+ __user_swpX_asm(data, addr, res, temp, temp2, "b")
/*
* Bit 22 of the instruction encoding distinguishes between
@@ -328,12 +336,12 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
}
while (1) {
- unsigned long temp;
+ unsigned long temp, temp2;
if (type == TYPE_SWPB)
- __user_swpb_asm(*data, address, res, temp);
+ __user_swpb_asm(*data, address, res, temp, temp2);
else
- __user_swp_asm(*data, address, res, temp);
+ __user_swp_asm(*data, address, res, temp, temp2);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 0150394f4cab..b75e917aac46 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
}
-static void cpu_enable_trap_ctr_access(void *__unused)
+static int cpu_enable_trap_ctr_access(void *__unused)
{
/* Clear SCTLR_EL1.UCT */
config_sctlr_el1(SCTLR_EL1_UCT, 0);
+ return 0;
}
#define MIDR_RANGE(model, min, max) \
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d577f263cc4a..c02504ea304b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -19,7 +19,9 @@
#define pr_fmt(fmt) "CPU features: " fmt
#include <linux/bsearch.h>
+#include <linux/cpumask.h>
#include <linux/sort.h>
+#include <linux/stop_machine.h>
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
@@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++)
if (caps->enable && cpus_have_cap(caps->capability))
- on_each_cpu(caps->enable, NULL, true);
+ /*
+ * Use stop_machine() as it schedules the work allowing
+ * us to modify PSTATE, instead of on_each_cpu() which
+ * uses an IPI, giving us a PSTATE that disappears when
+ * we return.
+ */
+ stop_machine(caps->enable, NULL, cpu_online_mask);
}
/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 427f6d3f084c..332e33193ccf 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -586,8 +586,9 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
- msr mdcr_el2, x0 // all PMU counters from EL1
4:
+ csel x0, xzr, x0, lt // all PMU counters from EL1
+ msr mdcr_el2, x0 // (if they exist)
/* Stage-2 translation */
msr vttbr_el2, xzr
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 27b2f1387df4..01753cd7d3f0 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -49,6 +49,7 @@
#include <asm/alternative.h>
#include <asm/compat.h>
#include <asm/cacheflush.h>
+#include <asm/exec.h>
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
@@ -186,10 +187,19 @@ void __show_regs(struct pt_regs *regs)
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
regs->pc, lr, regs->pstate);
printk("sp : %016llx\n", sp);
- for (i = top_reg; i >= 0; i--) {
+
+ i = top_reg;
+
+ while (i >= 0) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
- if (i % 2 == 0)
- printk("\n");
+ i--;
+
+ if (i % 2 == 0) {
+ pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
+ i--;
+ }
+
+ pr_cont("\n");
}
printk("\n");
}
@@ -301,7 +311,7 @@ static void tls_thread_switch(struct task_struct *next)
}
/* Restore the UAO state depending on next's addr_limit */
-static void uao_thread_switch(struct task_struct *next)
+void uao_thread_switch(struct task_struct *next)
{
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
if (task_thread_info(next)->addr_limit == KERNEL_DS)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index b8799e7c79de..1bec41b5fda3 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -135,7 +135,7 @@ ENTRY(_cpu_resume)
#ifdef CONFIG_KASAN
mov x0, sp
- bl kasan_unpoison_remaining_stack
+ bl kasan_unpoison_task_stack_below
#endif
ldp x19, x20, [x29, #16]
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index d3f151cfd4a1..8507703dabe4 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -544,6 +544,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
return;
}
bootcpu_valid = true;
+ early_map_cpu_to_node(0, acpi_numa_get_nid(0, hwid));
return;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index ad734142070d..bb0cd787a9d3 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,8 +1,11 @@
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <asm/alternative.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
+#include <asm/exec.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/mmu_context.h>
@@ -50,6 +53,14 @@ void notrace __cpu_suspend_exit(void)
set_my_cpu_offset(per_cpu_offset(cpu));
/*
+ * PSTATE was not saved over suspend/resume, re-enable any detected
+ * features that might not have been set correctly.
+ */
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
+ CONFIG_ARM64_PAN));
+ uao_thread_switch(current);
+
+ /*
* Restore HW breakpoint registers to sane values
* before debug exceptions are possibly reenabled
* through local_dbg_restore.
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5ff020f8fb7f..c9986b3e0a96 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -428,24 +428,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
}
-void cpu_enable_cache_maint_trap(void *__unused)
+int cpu_enable_cache_maint_trap(void *__unused)
{
config_sctlr_el1(SCTLR_EL1_UCI, 0);
+ return 0;
}
#define __user_cache_maint(insn, address, res) \
- asm volatile ( \
- "1: " insn ", %1\n" \
- " mov %w0, #0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %w0, %w2\n" \
- " b 2b\n" \
- " .popsection\n" \
- _ASM_EXTABLE(1b, 3b) \
- : "=r" (res) \
- : "r" (address), "i" (-EFAULT) )
+ if (untagged_addr(address) >= user_addr_max()) \
+ res = -EFAULT; \
+ else \
+ asm volatile ( \
+ "1: " insn ", %1\n" \
+ " mov %w0, #0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w2\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (res) \
+ : "r" (address), "i" (-EFAULT) )
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 53d9159662fe..0f8788374815 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -29,7 +29,9 @@
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <linux/preempt.h>
+#include <asm/bug.h>
#include <asm/cpufeature.h>
#include <asm/exception.h>
#include <asm/debug-monitors.h>
@@ -670,9 +672,17 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
NOKPROBE_SYMBOL(do_debug_exception);
#ifdef CONFIG_ARM64_PAN
-void cpu_enable_pan(void *__unused)
+int cpu_enable_pan(void *__unused)
{
+ /*
+ * We modify PSTATE. This won't work from irq context as the PSTATE
+ * is discarded once we return from the exception.
+ */
+ WARN_ON_ONCE(in_interrupt());
+
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+ asm(SET_PSTATE_PAN(1));
+ return 0;
}
#endif /* CONFIG_ARM64_PAN */
@@ -683,8 +693,9 @@ void cpu_enable_pan(void *__unused)
* We need to enable the feature at runtime (instead of adding it to
* PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
*/
-void cpu_enable_uao(void *__unused)
+int cpu_enable_uao(void *__unused)
{
asm(SET_PSTATE_UAO(1));
+ return 0;
}
#endif /* CONFIG_ARM64_UAO */
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 21c489bdeb4e..212c4d1e2f26 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -421,35 +421,35 @@ void __init mem_init(void)
pr_notice("Virtual kernel memory layout:\n");
#ifdef CONFIG_KASAN
- pr_cont(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
+ pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
#endif
- pr_cont(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(MODULES_VADDR, MODULES_END));
- pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
+ pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
MLG(VMALLOC_START, VMALLOC_END));
- pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(_text, _etext));
- pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__start_rodata, __init_begin));
- pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__init_begin, __init_end));
- pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(_sdata, _edata));
- pr_cont(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
+ pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
MLK_ROUNDUP(__bss_start, __bss_stop));
- pr_cont(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
+ pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
MLK(FIXADDR_START, FIXADDR_TOP));
- pr_cont(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(PCI_IO_START, PCI_IO_END));
#ifdef CONFIG_SPARSEMEM_VMEMMAP
- pr_cont(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
+ pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
- pr_cont(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
+ pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
(unsigned long)virt_to_page(high_memory)));
#endif
- pr_cont(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
+ pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
MLM(__phys_to_virt(memblock_start_of_DRAM()),
(unsigned long)high_memory));
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 8b8fe671b1a6..8d79286ee4e8 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -271,7 +271,7 @@ long arch_ptrace(struct task_struct *child, long request,
case BFIN_MEM_ACCESS_CORE:
case BFIN_MEM_ACCESS_CORE_ONLY:
copied = access_process_vm(child, addr, &tmp,
- to_copy, 0);
+ to_copy, FOLL_FORCE);
if (copied)
break;
@@ -324,7 +324,8 @@ long arch_ptrace(struct task_struct *child, long request,
case BFIN_MEM_ACCESS_CORE:
case BFIN_MEM_ACCESS_CORE_ONLY:
copied = access_process_vm(child, addr, &data,
- to_copy, 1);
+ to_copy,
+ FOLL_FORCE | FOLL_WRITE);
break;
case BFIN_MEM_ACCESS_DMA:
if (safe_dma_memcpy(paddr, &data, to_copy))
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index b5698c876fcc..099e170a93ee 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2722,7 +2722,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
noinpages,
0, /* read access only for in data */
- 0, /* no force */
inpages,
NULL);
@@ -2736,8 +2735,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
if (oper.do_cipher){
err = get_user_pages((unsigned long int)oper.cipher_outdata,
nooutpages,
- 1, /* write access for out data */
- 0, /* no force */
+ FOLL_WRITE, /* write access for out data */
outpages,
NULL);
up_read(&current->mm->mmap_sem);
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f085229cf870..f0df654ac6fc 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -147,7 +147,7 @@ long arch_ptrace(struct task_struct *child, long request,
/* The trampoline page is globally mapped, no page table to traverse.*/
tmp = *(unsigned long*)addr;
} else {
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
@@ -279,7 +279,7 @@ static int insn_size(struct task_struct *child, unsigned long pc)
int opsize = 0;
/* Read the opcode at pc (do what PTRACE_PEEKTEXT would do). */
- copied = access_process_vm(child, pc, &opcode, sizeof(opcode), 0);
+ copied = access_process_vm(child, pc, &opcode, sizeof(opcode), FOLL_FORCE);
if (copied != sizeof(opcode))
return 0;
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 09f845793d12..5ed0ea92c5bf 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -142,7 +142,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
u64 virt_addr=simple_strtoull(buf, NULL, 16);
int ret;
- ret = get_user_pages(virt_addr, 1, VM_READ, 0, NULL, NULL);
+ ret = get_user_pages(virt_addr, 1, FOLL_WRITE, NULL, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 6f54d511cc50..31aa8c0f68e1 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -453,7 +453,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
return 0;
}
}
- copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
+ copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
if (copied != sizeof(ret))
return -EIO;
*val = ret;
@@ -489,7 +489,8 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
*ia64_rse_skip_regs(krbs, regnum) = val;
}
}
- } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
+ } else if (access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(val))
return -EIO;
return 0;
@@ -543,7 +544,8 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
if (ret < 0)
return ret;
- if (access_process_vm(child, addr, &val, sizeof(val), 1)
+ if (access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(val))
return -EIO;
}
@@ -559,7 +561,8 @@ ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
/* now copy word for word from user rbs to kernel rbs: */
for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
- if (access_process_vm(child, addr, &val, sizeof(val), 0)
+ if (access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE)
!= sizeof(val))
return -EIO;
@@ -1156,7 +1159,8 @@ arch_ptrace (struct task_struct *child, long request,
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
/* read word at location addr */
- if (access_process_vm(child, addr, &data, sizeof(data), 0)
+ if (access_process_vm(child, addr, &data, sizeof(data),
+ FOLL_FORCE)
!= sizeof(data))
return -EIO;
/* ensure return value is not mistaken for error code */
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 51f5e9aa4901..c145605a981f 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -493,7 +493,8 @@ unregister_all_debug_traps(struct task_struct *child)
int i;
for (i = 0; i < p->nr_trap; i++)
- access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
+ access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]),
+ FOLL_FORCE | FOLL_WRITE);
p->nr_trap = 0;
}
@@ -537,7 +538,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
unsigned long next_insn, code;
unsigned long addr = next_pc & ~3;
- if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
+ if (access_process_vm(child, addr, &next_insn, sizeof(next_insn),
+ FOLL_FORCE)
!= sizeof(next_insn)) {
return -1; /* error */
}
@@ -546,7 +548,8 @@ embed_debug_trap(struct task_struct *child, unsigned long next_pc)
if (register_debug_trap(child, next_pc, next_insn, &code)) {
return -1; /* error */
}
- if (access_process_vm(child, addr, &code, sizeof(code), 1)
+ if (access_process_vm(child, addr, &code, sizeof(code),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(code)) {
return -1; /* error */
}
@@ -562,7 +565,8 @@ withdraw_debug_trap(struct pt_regs *regs)
addr = (regs->bpc - 2) & ~3;
regs->bpc -= 2;
if (unregister_debug_trap(current, addr, &code)) {
- access_process_vm(current, addr, &code, sizeof(code), 1);
+ access_process_vm(current, addr, &code, sizeof(code),
+ FOLL_FORCE | FOLL_WRITE);
invalidate_cache();
}
}
@@ -589,7 +593,8 @@ void user_enable_single_step(struct task_struct *child)
/* Compute next pc. */
pc = get_stack_long(child, PT_BPC);
- if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
+ if (access_process_vm(child, pc&~3, &insn, sizeof(insn),
+ FOLL_FORCE)
!= sizeof(insn))
return;
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 283b5a1967d1..7e71a4e0281b 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -70,7 +70,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
copied = access_process_vm(child, (u64)addrOthers, &tmp,
- sizeof(tmp), 0);
+ sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *) (unsigned long) data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
ret = 0;
if (access_process_vm(child, (u64)addrOthers, &data,
- sizeof(data), 1) == sizeof(data))
+ sizeof(data),
+ FOLL_FORCE | FOLL_WRITE) == sizeof(data))
break;
ret = -EIO;
break;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index ce961495b5e1..622037d851a3 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/kdebug.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 42d124fb6474..d8c3c159289a 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -287,7 +287,7 @@ slow_irqon:
pages += nr;
ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
- write, 0, pages);
+ pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index f7a184b6c35b..57d42d129033 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -32,9 +32,16 @@ static struct addr_range prep_kernel(void)
void *addr = 0;
struct elf_info ei;
long len;
+ int uncompressed_image = 0;
- partial_decompress(vmlinuz_addr, vmlinuz_size,
+ len = partial_decompress(vmlinuz_addr, vmlinuz_size,
elfheader, sizeof(elfheader), 0);
+ /* assume uncompressed data if -1 is returned */
+ if (len == -1) {
+ uncompressed_image = 1;
+ memcpy(elfheader, vmlinuz_addr, sizeof(elfheader));
+ printf("No valid compressed data found, assume uncompressed data\n\r");
+ }
if (!parse_elf64(elfheader, &ei) && !parse_elf32(elfheader, &ei))
fatal("Error: not a valid PPC32 or PPC64 ELF file!\n\r");
@@ -67,6 +74,13 @@ static struct addr_range prep_kernel(void)
"device tree\n\r");
}
+ if (uncompressed_image) {
+ memcpy(addr, vmlinuz_addr + ei.elfoffset, ei.loadsize);
+ printf("0x%lx bytes of uncompressed data copied\n\r",
+ ei.loadsize);
+ goto out;
+ }
+
/* Finally, decompress the kernel */
printf("Decompressing (0x%p <- 0x%p:0x%p)...\n\r", addr,
vmlinuz_addr, vmlinuz_addr+vmlinuz_size);
@@ -82,7 +96,7 @@ static struct addr_range prep_kernel(void)
len, ei.loadsize);
printf("Done! Decompressed 0x%lx bytes\n\r", len);
-
+out:
flush_cache(addr, ei.loadsize);
return (struct addr_range){addr, ei.memsize};
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index cf12c580f6b2..e8cdfec8d512 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -16,6 +16,10 @@
#define __NR__exit __NR_exit
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index f52b7db327c8..010b7b310237 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -74,7 +74,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
copied = access_process_vm(child, (u64)addrOthers, &tmp,
- sizeof(tmp), 0);
+ sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
break;
ret = put_user(tmp, (u32 __user *)data);
@@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
ret = 0;
if (access_process_vm(child, (u64)addrOthers, &tmp,
- sizeof(tmp), 1) == sizeof(tmp))
+ sizeof(tmp),
+ FOLL_FORCE | FOLL_WRITE) == sizeof(tmp))
break;
ret = -EIO;
break;
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index bb0354222b11..362954f98029 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
switch (REGION_ID(ea)) {
case USER_REGION_ID:
pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
+ if (mm == NULL)
+ return 1;
psize = get_slice_psize(mm, ea);
ssize = user_segment_size(ea);
vsid = get_vsid(mm->context.id, ea, ssize);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 75b9cd6150cc..a51c188b81f3 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -845,7 +845,7 @@ void __init dump_numa_cpu_topology(void)
return;
for_each_online_node(node) {
- printk(KERN_DEBUG "Node %d CPUs:", node);
+ pr_info("Node %d CPUs:", node);
count = 0;
/*
@@ -856,52 +856,18 @@ void __init dump_numa_cpu_topology(void)
if (cpumask_test_cpu(cpu,
node_to_cpumask_map[node])) {
if (count == 0)
- printk(" %u", cpu);
+ pr_cont(" %u", cpu);
++count;
} else {
if (count > 1)
- printk("-%u", cpu - 1);
+ pr_cont("-%u", cpu - 1);
count = 0;
}
}
if (count > 1)
- printk("-%u", nr_cpu_ids - 1);
- printk("\n");
- }
-}
-
-static void __init dump_numa_memory_topology(void)
-{
- unsigned int node;
- unsigned int count;
-
- if (min_common_depth == -1 || !numa_enabled)
- return;
-
- for_each_online_node(node) {
- unsigned long i;
-
- printk(KERN_DEBUG "Node %d Memory:", node);
-
- count = 0;
-
- for (i = 0; i < memblock_end_of_DRAM();
- i += (1 << SECTION_SIZE_BITS)) {
- if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
- if (count == 0)
- printk(" 0x%lx", i);
- ++count;
- } else {
- if (count > 0)
- printk("-0x%lx", i);
- count = 0;
- }
- }
-
- if (count > 0)
- printk("-0x%lx", i);
- printk("\n");
+ pr_cont("-%u", nr_cpu_ids - 1);
+ pr_cont("\n");
}
}
@@ -947,8 +913,6 @@ void __init initmem_init(void)
if (parse_numa_properties())
setup_nonnuma();
- else
- dump_numa_memory_topology();
memblock_dump_all();
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 1cab8a177d0e..7a27eebab28a 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -119,8 +119,13 @@ static int handle_validity(struct kvm_vcpu *vcpu)
vcpu->stat.exit_validity++;
trace_kvm_s390_intercept_validity(vcpu, viwhy);
- WARN_ONCE(true, "kvm: unhandled validity intercept 0x%x\n", viwhy);
- return -EOPNOTSUPP;
+ KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
+ current->pid, vcpu->kvm);
+
+ /* do not warn on invalid runtime instrumentation mode */
+ WARN_ONCE(viwhy != 0x44, "kvm: unhandled validity intercept 0x%x\n",
+ viwhy);
+ return -EINVAL;
}
static int handle_instruction(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index adb0c34bf431..18d4107e10ee 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -266,7 +266,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
- ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
+ ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0)
ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/score/kernel/ptrace.c b/arch/score/kernel/ptrace.c
index 55836188b217..4f7314d5f334 100644
--- a/arch/score/kernel/ptrace.c
+++ b/arch/score/kernel/ptrace.c
@@ -131,7 +131,7 @@ read_tsk_long(struct task_struct *child,
{
int copied;
- copied = access_process_vm(child, addr, res, sizeof(*res), 0);
+ copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
return copied != sizeof(*res) ? -EIO : 0;
}
@@ -142,7 +142,7 @@ read_tsk_short(struct task_struct *child,
{
int copied;
- copied = access_process_vm(child, addr, res, sizeof(*res), 0);
+ copied = access_process_vm(child, addr, res, sizeof(*res), FOLL_FORCE);
return copied != sizeof(*res) ? -EIO : 0;
}
@@ -153,7 +153,8 @@ write_tsk_short(struct task_struct *child,
{
int copied;
- copied = access_process_vm(child, addr, &val, sizeof(val), 1);
+ copied = access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE | FOLL_WRITE);
return copied != sizeof(val) ? -EIO : 0;
}
@@ -164,7 +165,8 @@ write_tsk_long(struct task_struct *child,
{
int copied;
- copied = access_process_vm(child, addr, &val, sizeof(val), 1);
+ copied = access_process_vm(child, addr, &val, sizeof(val),
+ FOLL_FORCE | FOLL_WRITE);
return copied != sizeof(val) ? -EIO : 0;
}
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 00476662ac2c..336f33a419d9 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -31,7 +31,7 @@ isa-y := $(isa-y)-up
endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
-cflags-$(CONFIG_CPU_J2) := $(call cc-option,-mj2,)
+cflags-$(CONFIG_CPU_J2) += $(call cc-option,-mj2,)
cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \
$(call cc-option,-m2a-nofpu,) \
$(call cc-option,-m4-nofpu,)
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index e9c2c42031fe..4e21949593cf 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -22,6 +22,16 @@ config SH_DEVICE_TREE
have sufficient driver coverage to use this option; do not
select it if you are using original SuperH hardware.
+config SH_JCORE_SOC
+ bool "J-Core SoC"
+ depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
+ select CLKSRC_JCORE_PIT
+ select JCORE_AIC
+ default y if CPU_J2
+ help
+ Select this option to include drivers core components of the
+ J-Core SoC, including interrupt controllers and timers.
+
config SH_SOLUTION_ENGINE
bool "SolutionEngine"
select SOLUTION_ENGINE
diff --git a/arch/sh/configs/j2_defconfig b/arch/sh/configs/j2_defconfig
index 94d1eca52f72..2eb81ebe3888 100644
--- a/arch/sh/configs/j2_defconfig
+++ b/arch/sh/configs/j2_defconfig
@@ -8,6 +8,7 @@ CONFIG_MEMORY_START=0x10000000
CONFIG_MEMORY_SIZE=0x04000000
CONFIG_CPU_BIG_ENDIAN=y
CONFIG_SH_DEVICE_TREE=y
+CONFIG_SH_JCORE_SOC=y
CONFIG_HZ_100=y
CONFIG_CMDLINE_OVERWRITE=y
CONFIG_CMDLINE="console=ttyUL0 earlycon"
@@ -20,6 +21,7 @@ CONFIG_INET=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_NETDEVICES=y
+CONFIG_SERIAL_EARLYCON=y
CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
CONFIG_I2C=y
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index 40fa6c8adc43..063c298ba56c 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -258,7 +258,8 @@ slow_irqon:
pages += nr;
ret = get_user_pages_unlocked(start,
- (end - start) >> PAGE_SHIFT, write, 0, pages);
+ (end - start) >> PAGE_SHIFT, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ddc4928a089..ac082dd8c67d 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -127,7 +127,8 @@ static int get_from_target(struct task_struct *target, unsigned long uaddr,
if (copy_from_user(kbuf, (void __user *) uaddr, len))
return -EFAULT;
} else {
- int len2 = access_process_vm(target, uaddr, kbuf, len, 0);
+ int len2 = access_process_vm(target, uaddr, kbuf, len,
+ FOLL_FORCE);
if (len2 != len)
return -EFAULT;
}
@@ -141,7 +142,8 @@ static int set_to_target(struct task_struct *target, unsigned long uaddr,
if (copy_to_user((void __user *) uaddr, kbuf, len))
return -EFAULT;
} else {
- int len2 = access_process_vm(target, uaddr, kbuf, len, 1);
+ int len2 = access_process_vm(target, uaddr, kbuf, len,
+ FOLL_FORCE | FOLL_WRITE);
if (len2 != len)
return -EFAULT;
}
@@ -505,7 +507,8 @@ static int genregs32_get(struct task_struct *target,
if (access_process_vm(target,
(unsigned long)
&reg_window[pos],
- k, sizeof(*k), 0)
+ k, sizeof(*k),
+ FOLL_FORCE)
!= sizeof(*k))
return -EFAULT;
k++;
@@ -531,12 +534,14 @@ static int genregs32_get(struct task_struct *target,
if (access_process_vm(target,
(unsigned long)
&reg_window[pos],
- &reg, sizeof(reg), 0)
+ &reg, sizeof(reg),
+ FOLL_FORCE)
!= sizeof(reg))
return -EFAULT;
if (access_process_vm(target,
(unsigned long) u,
- &reg, sizeof(reg), 1)
+ &reg, sizeof(reg),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(reg))
return -EFAULT;
pos++;
@@ -615,7 +620,8 @@ static int genregs32_set(struct task_struct *target,
(unsigned long)
&reg_window[pos],
(void *) k,
- sizeof(*k), 1)
+ sizeof(*k),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(*k))
return -EFAULT;
k++;
@@ -642,13 +648,15 @@ static int genregs32_set(struct task_struct *target,
if (access_process_vm(target,
(unsigned long)
u,
- &reg, sizeof(reg), 0)
+ &reg, sizeof(reg),
+ FOLL_FORCE)
!= sizeof(reg))
return -EFAULT;
if (access_process_vm(target,
(unsigned long)
&reg_window[pos],
- &reg, sizeof(reg), 1)
+ &reg, sizeof(reg),
+ FOLL_FORCE | FOLL_WRITE)
!= sizeof(reg))
return -EFAULT;
pos++;
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 4e06750a5d29..cd0e32bbcb1d 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -238,7 +238,8 @@ slow:
pages += nr;
ret = get_user_pages_unlocked(start,
- (end - start) >> PAGE_SHIFT, write, 0, pages);
+ (end - start) >> PAGE_SHIFT, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index ff6ef7b30822..2b3618542544 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -389,5 +389,3 @@
380 i386 pkey_mprotect sys_pkey_mprotect
381 i386 pkey_alloc sys_pkey_alloc
382 i386 pkey_free sys_pkey_free
-#383 i386 pkey_get sys_pkey_get
-#384 i386 pkey_set sys_pkey_set
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 2f024d02511d..e93ef0b38db8 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -338,8 +338,6 @@
329 common pkey_mprotect sys_pkey_mprotect
330 common pkey_alloc sys_pkey_alloc
331 common pkey_free sys_pkey_free
-#332 common pkey_get sys_pkey_get
-#333 common pkey_set sys_pkey_set
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index a3a9eb84b5cf..eab0915f5995 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3898,6 +3898,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_XEON_PHI_KNL:
+ case INTEL_FAM6_XEON_PHI_KNM:
memcpy(hw_cache_event_ids,
slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs,
@@ -3912,7 +3913,7 @@ __init int intel_pmu_init(void)
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
- pr_cont("Knights Landing events, ");
+ pr_cont("Knights Landing/Mill events, ");
break;
case INTEL_FAM6_SKYLAKE_MOBILE:
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index fc6cf21c535e..81b321ace8e0 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -458,8 +458,8 @@ void intel_pmu_lbr_del(struct perf_event *event)
if (!x86_pmu.lbr_nr)
return;
- if (branch_user_callstack(cpuc->br_sel) && event->ctx &&
- event->ctx->task_ctx_data) {
+ if (branch_user_callstack(cpuc->br_sel) &&
+ event->ctx->task_ctx_data) {
task_ctx = event->ctx->task_ctx_data;
task_ctx->lbr_callstack_users--;
}
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index b0f0e835a770..0a535cea8ff3 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -763,6 +763,7 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
+ X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index d9844cc74486..efca2685d876 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1349,6 +1349,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
+ X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 1188bc849ee3..a39629206864 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -194,6 +194,8 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 9ae5ab80a497..34a46dc076d3 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -64,5 +64,6 @@
/* Xeon Phi */
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
+#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 56f4c6676b29..78f3760ca1f2 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -88,7 +88,6 @@
#define MSR_IA32_RTIT_CTL 0x00000570
#define MSR_IA32_RTIT_STATUS 0x00000571
-#define MSR_IA32_RTIT_STATUS 0x00000571
#define MSR_IA32_RTIT_ADDR0_A 0x00000580
#define MSR_IA32_RTIT_ADDR0_B 0x00000581
#define MSR_IA32_RTIT_ADDR1_A 0x00000582
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index 3d33a719f5c1..a34e0d4b957d 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -103,8 +103,10 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
({ \
long tmp; \
struct rw_semaphore* ret; \
+ register void *__sp asm(_ASM_SP); \
+ \
asm volatile("# beginning down_write\n\t" \
- LOCK_PREFIX " xadd %1,(%3)\n\t" \
+ LOCK_PREFIX " xadd %1,(%4)\n\t" \
/* adds 0xffff0001, returns the old value */ \
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
/* was the active mask 0 before? */\
@@ -112,7 +114,7 @@ static inline bool __down_read_trylock(struct rw_semaphore *sem)
" call " slow_path "\n" \
"1:\n" \
"# ending down_write" \
- : "+m" (sem->count), "=d" (tmp), "=a" (ret) \
+ : "+m" (sem->count), "=d" (tmp), "=a" (ret), "+r" (__sp) \
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
: "memory", "cc"); \
ret; \
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2aaca53c0974..ad6f5eb07a95 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -52,6 +52,15 @@ struct task_struct;
#include <asm/cpufeature.h>
#include <linux/atomic.h>
+struct thread_info {
+ unsigned long flags; /* low level flags */
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+}
+
#define init_stack (init_thread_union.stack)
#else /* !__ASSEMBLY__ */
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 8cb57df9398d..1db8dc490b66 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -32,6 +32,8 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4VNNIW, CR_EDX, 2, 0x00000007, 0 },
+ { X86_FEATURE_AVX512_4FMAPS, CR_EDX, 3, 0x00000007, 0 },
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 81160578b91a..5130985b758b 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -27,6 +27,7 @@
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
+#include <asm/timer.h>
#include <asm/apic.h>
#define CPUID_VMWARE_INFO_LEAF 0x40000000
@@ -94,6 +95,10 @@ static void __init vmware_platform_setup(void)
} else {
pr_warn("Failed to get TSC freq from the hypervisor\n");
}
+
+#ifdef CONFIG_X86_IO_APIC
+ no_timer_check = 1;
+#endif
}
/*
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index b85fe5f91c3f..90e8dde3ec26 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -350,7 +350,7 @@ int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
* continue building up new bios map based on this
* information
*/
- if (current_type != last_type) {
+ if (current_type != last_type || current_type == E820_PRAM) {
if (last_type != 0) {
new_bios[new_bios_entry].size =
change_point[chgidx]->addr - last_addr;
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 124aa5c593f8..095ef7ddd6ae 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -74,6 +74,8 @@ void fpu__xstate_clear_all_cpu_caps(void)
setup_clear_cpu_cap(X86_FEATURE_MPX);
setup_clear_cpu_cap(X86_FEATURE_XGETBV1);
setup_clear_cpu_cap(X86_FEATURE_PKU);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512_4VNNIW);
+ setup_clear_cpu_cap(X86_FEATURE_AVX512_4FMAPS);
}
/*
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 28cee019209c..d9d8d16b69db 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -50,6 +50,7 @@
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
#include <linux/frame.h>
+#include <linux/kasan.h>
#include <asm/text-patching.h>
#include <asm/cacheflush.h>
@@ -1057,9 +1058,10 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
* tailcall optimization. So, to be absolutely safe
* we also save and restore enough stack bytes to cover
* the argument area.
+ * Use __memcpy() to avoid KASAN stack out-of-bounds reports as we copy
+ * raw stack chunk with redzones:
*/
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
- MIN_STACK_SIZE(addr));
+ __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr));
regs->flags &= ~X86_EFLAGS_IF;
trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
@@ -1080,6 +1082,9 @@ void jprobe_return(void)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ /* Unpoison stack redzones in the frames we are going to jump over. */
+ kasan_unpoison_stack_above_sp_to(kcb->jprobe_saved_sp);
+
asm volatile (
#ifdef CONFIG_X86_64
" xchg %%rbx,%%rsp \n"
@@ -1118,7 +1123,7 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
/* It's OK to start function graph tracing again */
unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs;
- memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
+ __memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index 40df33753bae..ec1f756f9dc9 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -105,9 +105,6 @@ void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact)
/* Don't let flags to be set from userspace */
act->sa.sa_flags &= ~(SA_IA32_ABI | SA_X32_ABI);
- if (user_64bit_mode(current_pt_regs()))
- return;
-
if (in_ia32_syscall())
act->sa.sa_flags |= SA_IA32_ABI;
if (in_x32_syscall())
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 68f8cc222f25..c00cb64bc0a1 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -261,8 +261,10 @@ static inline void __smp_reschedule_interrupt(void)
__visible void smp_reschedule_interrupt(struct pt_regs *regs)
{
+ irq_enter();
ack_APIC_irq();
__smp_reschedule_interrupt();
+ irq_exit();
/*
* KVM uses this interrupt to force a cpu out of guest mode
*/
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 951f093a96fe..42f5eb7b4f6c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1409,15 +1409,17 @@ __init void prefill_possible_map(void)
/* No boot processor was found in mptable or ACPI MADT */
if (!num_processors) {
- int apicid = boot_cpu_physical_apicid;
- int cpu = hard_smp_processor_id();
+ if (boot_cpu_has(X86_FEATURE_APIC)) {
+ int apicid = boot_cpu_physical_apicid;
+ int cpu = hard_smp_processor_id();
- pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
+ pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
- /* Make sure boot cpu is enumerated */
- if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
- apic->apic_id_valid(apicid))
- generic_processor_info(apicid, boot_cpu_apic_version);
+ /* Make sure boot cpu is enumerated */
+ if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
+ apic->apic_id_valid(apicid))
+ generic_processor_info(apicid, boot_cpu_apic_version);
+ }
if (!num_processors)
num_processors = 1;
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index c9a073866ca7..a23ce84a3f6c 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -57,7 +57,8 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
unsigned char opcode[15];
unsigned long addr = convert_ip_to_linear(child, regs);
- copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
+ copied = access_process_vm(child, addr, opcode, sizeof(opcode),
+ FOLL_FORCE);
for (i = 0; i < copied; i++) {
switch (opcode[i]) {
/* popf and iret */
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index c7220ba94aa7..1a22de70f7f7 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -594,7 +594,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->irr = 0;
ioapic->irr_delivered = 0;
ioapic->id = 0;
- memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
+ memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
rtc_irq_eoi_tracking_reset(ioapic);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c633de84dd7..e375235d81c9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5733,13 +5733,13 @@ static int kvmclock_cpu_online(unsigned int cpu)
static void kvm_timer_init(void)
{
- int cpu;
-
max_tsc_khz = tsc_khz;
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy policy;
+ int cpu;
+
memset(&policy, 0, sizeof(policy));
cpu = get_cpu();
cpufreq_get_policy(&policy, cpu);
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index b8b6a60b32cf..0d4fb3ebbbac 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -435,7 +435,7 @@ slow_irqon:
ret = get_user_pages_unlocked(start,
(end - start) >> PAGE_SHIFT,
- write, 0, pages);
+ pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 80476878eb4c..e4f800999b32 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -544,10 +544,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
{
long gup_ret;
int nr_pages = 1;
- int force = 0;
- gup_ret = get_user_pages((unsigned long)addr, nr_pages, write,
- force, NULL, NULL);
+ gup_ret = get_user_pages((unsigned long)addr, nr_pages,
+ write ? FOLL_WRITE : 0, NULL, NULL);
/*
* get_user_pages() returns number of pages gotten.
* 0 means we failed to fault in and get anything,
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index b4d5e95fe4df..4a6a5a26c582 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -40,7 +40,15 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
*/
return BIOS_STATUS_UNIMPLEMENTED;
- ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+ /*
+ * If EFI_OLD_MEMMAP is set, we need to fall back to using our old EFI
+ * callback method, which uses efi_call() directly, with the kernel page tables:
+ */
+ if (unlikely(test_bit(EFI_OLD_MEMMAP, &efi.flags)))
+ ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
+ else
+ ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+
return ret;
}
EXPORT_SYMBOL_GPL(uv_bios_call);
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index 5766ead6fdb9..60a5a5a85505 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -36,7 +36,8 @@ int is_syscall(unsigned long addr)
* slow, but that doesn't matter, since it will be called only
* in case of singlestepping, if copy_from_user failed.
*/
- n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+ n = access_process_vm(current, addr, &instr, sizeof(instr),
+ FOLL_FORCE);
if (n != sizeof(instr)) {
printk(KERN_ERR "is_syscall : failed to read "
"instruction from 0x%lx\n", addr);
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index 0b5c184dd5b3..e30202b1716e 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -212,7 +212,8 @@ int is_syscall(unsigned long addr)
* slow, but that doesn't matter, since it will be called only
* in case of singlestepping, if copy_from_user failed.
*/
- n = access_process_vm(current, addr, &instr, sizeof(instr), 0);
+ n = access_process_vm(current, addr, &instr, sizeof(instr),
+ FOLL_FORCE);
if (n != sizeof(instr)) {
printk("is_syscall : failed to read instruction from "
"0x%lx\n", addr);
diff --git a/block/badblocks.c b/block/badblocks.c
index 7be53cb1cc3c..6610e282a03e 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -354,7 +354,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
* current range. Earlier ranges could also overlap,
* but only this one can overlap the end of the range.
*/
- if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) {
+ if ((BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) &&
+ (BB_OFFSET(p[lo]) < target)) {
/* Partial overlap, leave the tail of this range */
int ack = BB_ACK(p[lo]);
sector_t a = BB_OFFSET(p[lo]);
@@ -377,7 +378,8 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
lo--;
}
while (lo >= 0 &&
- BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) {
+ (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) &&
+ (BB_OFFSET(p[lo]) < target)) {
/* This range does overlap */
if (BB_OFFSET(p[lo]) < s) {
/* Keep the early parts of this range. */
diff --git a/drivers/Makefile b/drivers/Makefile
index f0afdfb3c7df..194d20bee7dc 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -21,7 +21,7 @@ obj-y += video/
obj-y += idle/
# IPMI must come before ACPI in order to provide IPMI opregion support
-obj-$(CONFIG_IPMI_HANDLER) += char/ipmi/
+obj-y += char/ipmi/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_SFI) += sfi/
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index abb71628ab61..7b274ff4632c 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -415,15 +415,15 @@ struct rbd_device {
};
/*
- * Flag bits for rbd_dev->flags. If atomicity is required,
- * rbd_dev->lock is used to protect access.
- *
- * Currently, only the "removing" flag (which is coupled with the
- * "open_count" field) requires atomic access.
+ * Flag bits for rbd_dev->flags:
+ * - REMOVING (which is coupled with rbd_dev->open_count) is protected
+ * by rbd_dev->lock
+ * - BLACKLISTED is protected by rbd_dev->lock_rwsem
*/
enum rbd_dev_flags {
RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
+ RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
};
static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
@@ -3926,6 +3926,7 @@ static void rbd_reregister_watch(struct work_struct *work)
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, watch_dwork);
bool was_lock_owner = false;
+ bool need_to_wake = false;
int ret;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
@@ -3935,19 +3936,27 @@ static void rbd_reregister_watch(struct work_struct *work)
was_lock_owner = rbd_release_lock(rbd_dev);
mutex_lock(&rbd_dev->watch_mutex);
- if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR)
- goto fail_unlock;
+ if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
+ }
ret = __rbd_register_watch(rbd_dev);
if (ret) {
rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
- if (ret != -EBLACKLISTED)
+ if (ret == -EBLACKLISTED || ret == -ENOENT) {
+ set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
+ need_to_wake = true;
+ } else {
queue_delayed_work(rbd_dev->task_wq,
&rbd_dev->watch_dwork,
RBD_RETRY_DELAY);
- goto fail_unlock;
+ }
+ mutex_unlock(&rbd_dev->watch_mutex);
+ goto out;
}
+ need_to_wake = true;
rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
mutex_unlock(&rbd_dev->watch_mutex);
@@ -3963,13 +3972,10 @@ static void rbd_reregister_watch(struct work_struct *work)
ret);
}
+out:
up_write(&rbd_dev->lock_rwsem);
- wake_requests(rbd_dev, true);
- return;
-
-fail_unlock:
- mutex_unlock(&rbd_dev->watch_mutex);
- up_write(&rbd_dev->lock_rwsem);
+ if (need_to_wake)
+ wake_requests(rbd_dev, true);
}
/*
@@ -4074,7 +4080,9 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
up_read(&rbd_dev->lock_rwsem);
schedule();
down_read(&rbd_dev->lock_rwsem);
- } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
+ } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+
finish_wait(&rbd_dev->lock_waitq, &wait);
}
@@ -4166,8 +4174,16 @@ static void rbd_queue_workfn(struct work_struct *work)
if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem);
- if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
+ if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
rbd_wait_state_locked(rbd_dev);
+
+ WARN_ON((rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) ^
+ !test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
+ if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
+ result = -EBLACKLISTED;
+ goto err_unlock;
+ }
}
img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 0c6da1efa216..647d95d98196 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -173,4 +173,13 @@ config VEXPRESS_CONFIG
help
Platform configuration infrastructure for the ARM Ltd.
Versatile Express.
+
+config DA8XX_MSTPRI
+ bool "TI da8xx master peripheral priority driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ Driver for Texas Instruments da8xx master peripheral priority
+ configuration. Allows to adjust the priorities of all master
+ peripherals.
+
endmenu
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index a679d00fc4eb..cc6364bec054 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -22,3 +22,5 @@ obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o
obj-$(CONFIG_TEGRA_GMI) += tegra-gmi.o
obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
+
+obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
diff --git a/drivers/bus/da8xx-mstpri.c b/drivers/bus/da8xx-mstpri.c
new file mode 100644
index 000000000000..85f0b533f8e4
--- /dev/null
+++ b/drivers/bus/da8xx-mstpri.c
@@ -0,0 +1,269 @@
+/*
+ * TI da8xx master peripheral priority driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/regmap.h>
+#include <linux/of_fdt.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+#define DA8XX_MSTPRI0_OFFSET 0
+#define DA8XX_MSTPRI1_OFFSET 4
+#define DA8XX_MSTPRI2_OFFSET 8
+
+enum {
+ DA8XX_MSTPRI_ARM_I = 0,
+ DA8XX_MSTPRI_ARM_D,
+ DA8XX_MSTPRI_UPP,
+ DA8XX_MSTPRI_SATA,
+ DA8XX_MSTPRI_PRU0,
+ DA8XX_MSTPRI_PRU1,
+ DA8XX_MSTPRI_EDMA30TC0,
+ DA8XX_MSTPRI_EDMA30TC1,
+ DA8XX_MSTPRI_EDMA31TC0,
+ DA8XX_MSTPRI_VPIF_DMA_0,
+ DA8XX_MSTPRI_VPIF_DMA_1,
+ DA8XX_MSTPRI_EMAC,
+ DA8XX_MSTPRI_USB0CFG,
+ DA8XX_MSTPRI_USB0CDMA,
+ DA8XX_MSTPRI_UHPI,
+ DA8XX_MSTPRI_USB1,
+ DA8XX_MSTPRI_LCDC,
+};
+
+struct da8xx_mstpri_descr {
+ int reg;
+ int shift;
+ int mask;
+};
+
+static const struct da8xx_mstpri_descr da8xx_mstpri_priority_list[] = {
+ [DA8XX_MSTPRI_ARM_I] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_ARM_D] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_UPP] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_SATA] = {
+ .reg = DA8XX_MSTPRI0_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_PRU0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_PRU1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 4,
+ .mask = 0x000000f0,
+ },
+ [DA8XX_MSTPRI_EDMA30TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_EDMA30TC1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_EDMA31TC0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 16,
+ .mask = 0x000f0000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_0] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_VPIF_DMA_1] = {
+ .reg = DA8XX_MSTPRI1_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+ [DA8XX_MSTPRI_EMAC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 0,
+ .mask = 0x0000000f,
+ },
+ [DA8XX_MSTPRI_USB0CFG] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 8,
+ .mask = 0x00000f00,
+ },
+ [DA8XX_MSTPRI_USB0CDMA] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 12,
+ .mask = 0x0000f000,
+ },
+ [DA8XX_MSTPRI_UHPI] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 20,
+ .mask = 0x00f00000,
+ },
+ [DA8XX_MSTPRI_USB1] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 24,
+ .mask = 0x0f000000,
+ },
+ [DA8XX_MSTPRI_LCDC] = {
+ .reg = DA8XX_MSTPRI2_OFFSET,
+ .shift = 28,
+ .mask = 0xf0000000,
+ },
+};
+
+struct da8xx_mstpri_priority {
+ int which;
+ u32 val;
+};
+
+struct da8xx_mstpri_board_priorities {
+ const char *board;
+ const struct da8xx_mstpri_priority *priorities;
+ size_t numprio;
+};
+
+/*
+ * Default memory settings of da850 do not meet the throughput/latency
+ * requirements of tilcdc. This results in the image displayed being
+ * incorrect and the following warning being displayed by the LCDC
+ * drm driver:
+ *
+ * tilcdc da8xx_lcdc.0: tilcdc_crtc_irq(0x00000020): FIFO underfow
+ */
+static const struct da8xx_mstpri_priority da850_lcdk_priorities[] = {
+ {
+ .which = DA8XX_MSTPRI_LCDC,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC1,
+ .val = 0,
+ },
+ {
+ .which = DA8XX_MSTPRI_EDMA30TC0,
+ .val = 1,
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities da8xx_mstpri_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .priorities = da850_lcdk_priorities,
+ .numprio = ARRAY_SIZE(da850_lcdk_priorities),
+ },
+};
+
+static const struct da8xx_mstpri_board_priorities *
+da8xx_mstpri_get_board_prio(void)
+{
+ const struct da8xx_mstpri_board_priorities *board_prio;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_mstpri_board_confs); i++) {
+ board_prio = &da8xx_mstpri_board_confs[i];
+
+ if (of_machine_is_compatible(board_prio->board))
+ return board_prio;
+ }
+
+ return NULL;
+}
+
+static int da8xx_mstpri_probe(struct platform_device *pdev)
+{
+ const struct da8xx_mstpri_board_priorities *prio_list;
+ const struct da8xx_mstpri_descr *prio_descr;
+ const struct da8xx_mstpri_priority *prio;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *mstpri;
+ u32 reg;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mstpri = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mstpri)) {
+ dev_err(dev, "unable to map MSTPRI registers\n");
+ return PTR_ERR(mstpri);
+ }
+
+ prio_list = da8xx_mstpri_get_board_prio();
+ if (!prio_list) {
+ dev_err(dev, "no master priotities defined for board '%s'\n",
+ of_flat_dt_get_machine_name());
+ return -EINVAL;
+ }
+
+ for (i = 0; i < prio_list->numprio; i++) {
+ prio = &prio_list->priorities[i];
+ prio_descr = &da8xx_mstpri_priority_list[prio->which];
+
+ if (prio_descr->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev, "register offset out of range\n");
+ continue;
+ }
+
+ reg = readl(mstpri + prio_descr->reg);
+ reg &= ~prio_descr->mask;
+ reg |= prio->val << prio_descr->shift;
+
+ writel(reg, mstpri + prio_descr->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_mstpri_of_match[] = {
+ { .compatible = "ti,da850-mstpri", },
+ { },
+};
+
+static struct platform_driver da8xx_mstpri_driver = {
+ .probe = da8xx_mstpri_probe,
+ .driver = {
+ .name = "da8xx-mstpri",
+ .of_match_table = da8xx_mstpri_of_match,
+ },
+};
+module_platform_driver(da8xx_mstpri_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx master peripheral priority driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 5a9350b1069a..7f816655cbbf 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -76,3 +76,11 @@ config IPMI_POWEROFF
the IPMI management controller is capable of this.
endif # IPMI_HANDLER
+
+config ASPEED_BT_IPMI_BMC
+ depends on ARCH_ASPEED
+ tristate "BT IPMI bmc driver"
+ help
+ Provides a driver for the BT (Block Transfer) IPMI interface
+ found on Aspeed SOCs (AST2400 and AST2500). The driver
+ implements the BMC side of the BT interface.
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index f3ffde1f5f1f..0d98cd91def1 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
+obj-$(CONFIG_ASPEED_BT_IPMI_BMC) += bt-bmc.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
new file mode 100644
index 000000000000..b49e61320952
--- /dev/null
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/bt-bmc.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+/*
+ * This is a BMC device used to communicate to the host
+ */
+#define DEVICE_NAME "ipmi-bt-host"
+
+#define BT_IO_BASE 0xe4
+#define BT_IRQ 10
+
+#define BT_CR0 0x0
+#define BT_CR0_IO_BASE 16
+#define BT_CR0_IRQ 12
+#define BT_CR0_EN_CLR_SLV_RDP 0x8
+#define BT_CR0_EN_CLR_SLV_WRP 0x4
+#define BT_CR0_ENABLE_IBT 0x1
+#define BT_CR1 0x4
+#define BT_CR1_IRQ_H2B 0x01
+#define BT_CR1_IRQ_HBUSY 0x40
+#define BT_CR2 0x8
+#define BT_CR2_IRQ_H2B 0x01
+#define BT_CR2_IRQ_HBUSY 0x40
+#define BT_CR3 0xc
+#define BT_CTRL 0x10
+#define BT_CTRL_B_BUSY 0x80
+#define BT_CTRL_H_BUSY 0x40
+#define BT_CTRL_OEM0 0x20
+#define BT_CTRL_SMS_ATN 0x10
+#define BT_CTRL_B2H_ATN 0x08
+#define BT_CTRL_H2B_ATN 0x04
+#define BT_CTRL_CLR_RD_PTR 0x02
+#define BT_CTRL_CLR_WR_PTR 0x01
+#define BT_BMC2HOST 0x14
+#define BT_INTMASK 0x18
+#define BT_INTMASK_B2H_IRQEN 0x01
+#define BT_INTMASK_B2H_IRQ 0x02
+#define BT_INTMASK_BMC_HWRST 0x80
+
+#define BT_BMC_BUFFER_SIZE 256
+
+struct bt_bmc {
+ struct device dev;
+ struct miscdevice miscdev;
+ void __iomem *base;
+ int irq;
+ wait_queue_head_t queue;
+ struct timer_list poll_timer;
+ struct mutex mutex;
+};
+
+static atomic_t open_count = ATOMIC_INIT(0);
+
+static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
+{
+ return ioread8(bt_bmc->base + reg);
+}
+
+static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
+{
+ iowrite8(data, bt_bmc->base + reg);
+}
+
+static void clr_rd_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_RD_PTR, BT_CTRL);
+}
+
+static void clr_wr_ptr(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_CLR_WR_PTR, BT_CTRL);
+}
+
+static void clr_h2b_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_H2B_ATN, BT_CTRL);
+}
+
+static void set_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY))
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void clr_b_busy(struct bt_bmc *bt_bmc)
+{
+ if (bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_B_BUSY)
+ bt_outb(bt_bmc, BT_CTRL_B_BUSY, BT_CTRL);
+}
+
+static void set_b2h_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_B2H_ATN, BT_CTRL);
+}
+
+static u8 bt_read(struct bt_bmc *bt_bmc)
+{
+ return bt_inb(bt_bmc, BT_BMC2HOST);
+}
+
+static ssize_t bt_readn(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ buf[i] = bt_read(bt_bmc);
+ return n;
+}
+
+static void bt_write(struct bt_bmc *bt_bmc, u8 c)
+{
+ bt_outb(bt_bmc, c, BT_BMC2HOST);
+}
+
+static ssize_t bt_writen(struct bt_bmc *bt_bmc, u8 *buf, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++)
+ bt_write(bt_bmc, buf[i]);
+ return n;
+}
+
+static void set_sms_atn(struct bt_bmc *bt_bmc)
+{
+ bt_outb(bt_bmc, BT_CTRL_SMS_ATN, BT_CTRL);
+}
+
+static struct bt_bmc *file_bt_bmc(struct file *file)
+{
+ return container_of(file->private_data, struct bt_bmc, miscdev);
+}
+
+static int bt_bmc_open(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ if (atomic_inc_return(&open_count) == 1) {
+ clr_b_busy(bt_bmc);
+ return 0;
+ }
+
+ atomic_dec(&open_count);
+ return -EBUSY;
+}
+
+/*
+ * The BT (Block Transfer) interface means that entire messages are
+ * buffered by the host before a notification is sent to the BMC that
+ * there is data to be read. The first byte is the length and the
+ * message data follows. The read operation just tries to capture the
+ * whole before returning it to userspace.
+ *
+ * BT Message format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5:N
+ * Length NetFn/LUN Seq Cmd Data
+ *
+ */
+static ssize_t bt_bmc_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 len;
+ int len_byte = 1;
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nread;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ if (wait_event_interruptible(bt_bmc->queue,
+ bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(!(bt_inb(bt_bmc, BT_CTRL) & BT_CTRL_H2B_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ set_b_busy(bt_bmc);
+ clr_h2b_atn(bt_bmc);
+ clr_rd_ptr(bt_bmc);
+
+ /*
+ * The BT frames start with the message length, which does not
+ * include the length byte.
+ */
+ kbuffer[0] = bt_read(bt_bmc);
+ len = kbuffer[0];
+
+ /* We pass the length back to userspace as well */
+ if (len + 1 > count)
+ len = count - 1;
+
+ while (len) {
+ nread = min_t(ssize_t, len, sizeof(kbuffer) - len_byte);
+
+ bt_readn(bt_bmc, kbuffer + len_byte, nread);
+
+ if (copy_to_user(buf, kbuffer, nread + len_byte)) {
+ ret = -EFAULT;
+ break;
+ }
+ len -= nread;
+ buf += nread + len_byte;
+ ret += nread + len_byte;
+ len_byte = 0;
+ }
+
+ clr_b_busy(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+/*
+ * BT Message response format :
+ *
+ * Byte 1 Byte 2 Byte 3 Byte 4 Byte 5 Byte 6:N
+ * Length NetFn/LUN Seq Cmd Code Data
+ */
+static ssize_t bt_bmc_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ u8 kbuffer[BT_BMC_BUFFER_SIZE];
+ ssize_t ret = 0;
+ ssize_t nwritten;
+
+ /*
+ * send a minimum response size
+ */
+ if (count < 5)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ WARN_ON(*ppos);
+
+ /*
+ * There's no interrupt for clearing bmc busy so we have to
+ * poll
+ */
+ if (wait_event_interruptible(bt_bmc->queue,
+ !(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))))
+ return -ERESTARTSYS;
+
+ mutex_lock(&bt_bmc->mutex);
+
+ if (unlikely(bt_inb(bt_bmc, BT_CTRL) &
+ (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN))) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ clr_wr_ptr(bt_bmc);
+
+ while (count) {
+ nwritten = min_t(ssize_t, count, sizeof(kbuffer));
+ if (copy_from_user(&kbuffer, buf, nwritten)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ bt_writen(bt_bmc, kbuffer, nwritten);
+
+ count -= nwritten;
+ buf += nwritten;
+ ret += nwritten;
+ }
+
+ set_b2h_atn(bt_bmc);
+
+out_unlock:
+ mutex_unlock(&bt_bmc->mutex);
+ return ret;
+}
+
+static long bt_bmc_ioctl(struct file *file, unsigned int cmd,
+ unsigned long param)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ switch (cmd) {
+ case BT_BMC_IOCTL_SMS_ATN:
+ set_sms_atn(bt_bmc);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int bt_bmc_release(struct inode *inode, struct file *file)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+
+ atomic_dec(&open_count);
+ set_b_busy(bt_bmc);
+ return 0;
+}
+
+static unsigned int bt_bmc_poll(struct file *file, poll_table *wait)
+{
+ struct bt_bmc *bt_bmc = file_bt_bmc(file);
+ unsigned int mask = 0;
+ u8 ctrl;
+
+ poll_wait(file, &bt_bmc->queue, wait);
+
+ ctrl = bt_inb(bt_bmc, BT_CTRL);
+
+ if (ctrl & BT_CTRL_H2B_ATN)
+ mask |= POLLIN;
+
+ if (!(ctrl & (BT_CTRL_H_BUSY | BT_CTRL_B2H_ATN)))
+ mask |= POLLOUT;
+
+ return mask;
+}
+
+static const struct file_operations bt_bmc_fops = {
+ .owner = THIS_MODULE,
+ .open = bt_bmc_open,
+ .read = bt_bmc_read,
+ .write = bt_bmc_write,
+ .release = bt_bmc_release,
+ .poll = bt_bmc_poll,
+ .unlocked_ioctl = bt_bmc_ioctl,
+};
+
+static void poll_timer(unsigned long data)
+{
+ struct bt_bmc *bt_bmc = (void *)data;
+
+ bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
+ wake_up(&bt_bmc->queue);
+ add_timer(&bt_bmc->poll_timer);
+}
+
+static irqreturn_t bt_bmc_irq(int irq, void *arg)
+{
+ struct bt_bmc *bt_bmc = arg;
+ u32 reg;
+
+ reg = ioread32(bt_bmc->base + BT_CR2);
+ reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
+ if (!reg)
+ return IRQ_NONE;
+
+ /* ack pending IRQs */
+ iowrite32(reg, bt_bmc->base + BT_CR2);
+
+ wake_up(&bt_bmc->queue);
+ return IRQ_HANDLED;
+}
+
+static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ u32 reg;
+ int rc;
+
+ bt_bmc->irq = platform_get_irq(pdev, 0);
+ if (!bt_bmc->irq)
+ return -ENODEV;
+
+ rc = devm_request_irq(dev, bt_bmc->irq, bt_bmc_irq, IRQF_SHARED,
+ DEVICE_NAME, bt_bmc);
+ if (rc < 0) {
+ dev_warn(dev, "Unable to request IRQ %d\n", bt_bmc->irq);
+ bt_bmc->irq = 0;
+ return rc;
+ }
+
+ /*
+ * Configure IRQs on the bmc clearing the H2B and HBUSY bits;
+ * H2B will be asserted when the bmc has data for us; HBUSY
+ * will be cleared (along with B2H) when we can write the next
+ * message to the BT buffer
+ */
+ reg = ioread32(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ iowrite32(reg, bt_bmc->base + BT_CR1);
+
+ return 0;
+}
+
+static int bt_bmc_probe(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc;
+ struct device *dev;
+ struct resource *res;
+ int rc;
+
+ if (!pdev || !pdev->dev.of_node)
+ return -ENODEV;
+
+ dev = &pdev->dev;
+ dev_info(dev, "Found bt bmc device\n");
+
+ bt_bmc = devm_kzalloc(dev, sizeof(*bt_bmc), GFP_KERNEL);
+ if (!bt_bmc)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, bt_bmc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bt_bmc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
+
+ mutex_init(&bt_bmc->mutex);
+ init_waitqueue_head(&bt_bmc->queue);
+
+ bt_bmc->miscdev.minor = MISC_DYNAMIC_MINOR,
+ bt_bmc->miscdev.name = DEVICE_NAME,
+ bt_bmc->miscdev.fops = &bt_bmc_fops,
+ bt_bmc->miscdev.parent = dev;
+ rc = misc_register(&bt_bmc->miscdev);
+ if (rc) {
+ dev_err(dev, "Unable to register misc device\n");
+ return rc;
+ }
+
+ bt_bmc_config_irq(bt_bmc, pdev);
+
+ if (bt_bmc->irq) {
+ dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
+ } else {
+ dev_info(dev, "No IRQ; using timer\n");
+ setup_timer(&bt_bmc->poll_timer, poll_timer,
+ (unsigned long)bt_bmc);
+ bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
+ add_timer(&bt_bmc->poll_timer);
+ }
+
+ iowrite32((BT_IO_BASE << BT_CR0_IO_BASE) |
+ (BT_IRQ << BT_CR0_IRQ) |
+ BT_CR0_EN_CLR_SLV_RDP |
+ BT_CR0_EN_CLR_SLV_WRP |
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
+
+ clr_b_busy(bt_bmc);
+
+ return 0;
+}
+
+static int bt_bmc_remove(struct platform_device *pdev)
+{
+ struct bt_bmc *bt_bmc = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&bt_bmc->miscdev);
+ if (!bt_bmc->irq)
+ del_timer_sync(&bt_bmc->poll_timer);
+ return 0;
+}
+
+static const struct of_device_id bt_bmc_match[] = {
+ { .compatible = "aspeed,ast2400-bt-bmc" },
+ { },
+};
+
+static struct platform_driver bt_bmc_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = bt_bmc_match,
+ },
+ .probe = bt_bmc_probe,
+ .remove = bt_bmc_remove,
+};
+
+module_platform_driver(bt_bmc_driver);
+
+MODULE_DEVICE_TABLE(of, bt_bmc_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>");
+MODULE_DESCRIPTION("Linux device interface to the BT interface");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index d8619998cfb5..fcdd886819f5 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2891,11 +2891,11 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
intf->curr_channel = IPMI_MAX_CHANNELS;
}
+ rv = ipmi_bmc_register(intf, i);
+
if (rv == 0)
rv = add_proc_entries(intf, i);
- rv = ipmi_bmc_register(intf, i);
-
out:
if (rv) {
if (intf->proc_dir)
@@ -2982,8 +2982,6 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
int intf_num = intf->intf_num;
ipmi_user_t user;
- ipmi_bmc_unregister(intf);
-
mutex_lock(&smi_watchers_mutex);
mutex_lock(&ipmi_interfaces_mutex);
intf->intf_num = -1;
@@ -3007,6 +3005,7 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
mutex_unlock(&ipmi_interfaces_mutex);
remove_proc_entries(intf);
+ ipmi_bmc_unregister(intf);
/*
* Call all the watcher interfaces to tell them that
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 245190839359..e2c6e43cf8ca 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -417,6 +417,16 @@ config SYS_SUPPORTS_SH_TMU
config SYS_SUPPORTS_EM_STI
bool
+config CLKSRC_JCORE_PIT
+ bool "J-Core PIT timer driver" if COMPILE_TEST
+ depends on OF
+ depends on GENERIC_CLOCKEVENTS
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
+ help
+ This enables build of clocksource and clockevent driver for
+ the integrated PIT in the J-Core synthesizable, open source SoC.
+
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index fd9d6df0bbc0..cf87f407f1ad 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
+obj-$(CONFIG_CLKSRC_JCORE_PIT) += jcore-pit.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
new file mode 100644
index 000000000000..54e1665aa03c
--- /dev/null
+++ b/drivers/clocksource/jcore-pit.c
@@ -0,0 +1,249 @@
+/*
+ * J-Core SoC PIT/clocksource driver
+ *
+ * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/cpu.h>
+#include <linux/cpuhotplug.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#define PIT_IRQ_SHIFT 12
+#define PIT_PRIO_SHIFT 20
+#define PIT_ENABLE_SHIFT 26
+#define PIT_PRIO_MASK 0xf
+
+#define REG_PITEN 0x00
+#define REG_THROT 0x10
+#define REG_COUNT 0x14
+#define REG_BUSPD 0x18
+#define REG_SECHI 0x20
+#define REG_SECLO 0x24
+#define REG_NSEC 0x28
+
+struct jcore_pit {
+ struct clock_event_device ced;
+ void __iomem *base;
+ unsigned long periodic_delta;
+ u32 enable_val;
+};
+
+static void __iomem *jcore_pit_base;
+static struct jcore_pit __percpu *jcore_pit_percpu;
+
+static notrace u64 jcore_sched_clock_read(void)
+{
+ u32 seclo, nsec, seclo0;
+ __iomem void *base = jcore_pit_base;
+
+ seclo = readl(base + REG_SECLO);
+ do {
+ seclo0 = seclo;
+ nsec = readl(base + REG_NSEC);
+ seclo = readl(base + REG_SECLO);
+ } while (seclo0 != seclo);
+
+ return seclo * NSEC_PER_SEC + nsec;
+}
+
+static cycle_t jcore_clocksource_read(struct clocksource *cs)
+{
+ return jcore_sched_clock_read();
+}
+
+static int jcore_pit_disable(struct jcore_pit *pit)
+{
+ writel(0, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set(unsigned long delta, struct jcore_pit *pit)
+{
+ jcore_pit_disable(pit);
+ writel(delta, pit->base + REG_THROT);
+ writel(pit->enable_val, pit->base + REG_PITEN);
+ return 0;
+}
+
+static int jcore_pit_set_state_shutdown(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_oneshot(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_disable(pit);
+}
+
+static int jcore_pit_set_state_periodic(struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(pit->periodic_delta, pit);
+}
+
+static int jcore_pit_set_next_event(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct jcore_pit *pit = container_of(ced, struct jcore_pit, ced);
+
+ return jcore_pit_set(delta, pit);
+}
+
+static int jcore_pit_local_init(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+ unsigned buspd, freq;
+
+ pr_info("Local J-Core PIT init on cpu %u\n", cpu);
+
+ buspd = readl(pit->base + REG_BUSPD);
+ freq = DIV_ROUND_CLOSEST(NSEC_PER_SEC, buspd);
+ pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
+
+ clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+
+ return 0;
+}
+
+static irqreturn_t jcore_timer_interrupt(int irq, void *dev_id)
+{
+ struct jcore_pit *pit = this_cpu_ptr(dev_id);
+
+ if (clockevent_state_oneshot(&pit->ced))
+ jcore_pit_disable(pit);
+
+ pit->ced.event_handler(&pit->ced);
+
+ return IRQ_HANDLED;
+}
+
+static int __init jcore_pit_init(struct device_node *node)
+{
+ int err;
+ unsigned pit_irq, cpu;
+ unsigned long hwirq;
+ u32 irqprio, enable_val;
+
+ jcore_pit_base = of_iomap(node, 0);
+ if (!jcore_pit_base) {
+ pr_err("Error: Cannot map base address for J-Core PIT\n");
+ return -ENXIO;
+ }
+
+ pit_irq = irq_of_parse_and_map(node, 0);
+ if (!pit_irq) {
+ pr_err("Error: J-Core PIT has no IRQ\n");
+ return -ENXIO;
+ }
+
+ pr_info("Initializing J-Core PIT at %p IRQ %d\n",
+ jcore_pit_base, pit_irq);
+
+ err = clocksource_mmio_init(jcore_pit_base, "jcore_pit_cs",
+ NSEC_PER_SEC, 400, 32,
+ jcore_clocksource_read);
+ if (err) {
+ pr_err("Error registering clocksource device: %d\n", err);
+ return err;
+ }
+
+ sched_clock_register(jcore_sched_clock_read, 32, NSEC_PER_SEC);
+
+ jcore_pit_percpu = alloc_percpu(struct jcore_pit);
+ if (!jcore_pit_percpu) {
+ pr_err("Failed to allocate memory for clock event device\n");
+ return -ENOMEM;
+ }
+
+ err = request_irq(pit_irq, jcore_timer_interrupt,
+ IRQF_TIMER | IRQF_PERCPU,
+ "jcore_pit", jcore_pit_percpu);
+ if (err) {
+ pr_err("pit irq request failed: %d\n", err);
+ free_percpu(jcore_pit_percpu);
+ return err;
+ }
+
+ /*
+ * The J-Core PIT is not hard-wired to a particular IRQ, but
+ * integrated with the interrupt controller such that the IRQ it
+ * generates is programmable, as follows:
+ *
+ * The bit layout of the PIT enable register is:
+ *
+ * .....e..ppppiiiiiiii............
+ *
+ * where the .'s indicate unrelated/unused bits, e is enable,
+ * p is priority, and i is hard irq number.
+ *
+ * For the PIT included in AIC1 (obsolete but still in use),
+ * any hard irq (trap number) can be programmed via the 8
+ * iiiiiiii bits, and a priority (0-15) is programmable
+ * separately in the pppp bits.
+ *
+ * For the PIT included in AIC2 (current), the programming
+ * interface is equivalent modulo interrupt mapping. This is
+ * why a different compatible tag was not used. However only
+ * traps 64-127 (the ones actually intended to be used for
+ * interrupts, rather than syscalls/exceptions/etc.) can be
+ * programmed (the high 2 bits of i are ignored) and the
+ * priority pppp is <<2'd and or'd onto the irq number. This
+ * choice seems to have been made on the hardware engineering
+ * side under an assumption that preserving old AIC1 priority
+ * mappings was important. Future models will likely ignore
+ * the pppp field.
+ */
+ hwirq = irq_get_irq_data(pit_irq)->hwirq;
+ irqprio = (hwirq >> 2) & PIT_PRIO_MASK;
+ enable_val = (1U << PIT_ENABLE_SHIFT)
+ | (hwirq << PIT_IRQ_SHIFT)
+ | (irqprio << PIT_PRIO_SHIFT);
+
+ for_each_present_cpu(cpu) {
+ struct jcore_pit *pit = per_cpu_ptr(jcore_pit_percpu, cpu);
+
+ pit->base = of_iomap(node, cpu);
+ if (!pit->base) {
+ pr_err("Unable to map PIT for cpu %u\n", cpu);
+ continue;
+ }
+
+ pit->ced.name = "jcore_pit";
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC
+ | CLOCK_EVT_FEAT_ONESHOT
+ | CLOCK_EVT_FEAT_PERCPU;
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.rating = 400;
+ pit->ced.irq = pit_irq;
+ pit->ced.set_state_shutdown = jcore_pit_set_state_shutdown;
+ pit->ced.set_state_periodic = jcore_pit_set_state_periodic;
+ pit->ced.set_state_oneshot = jcore_pit_set_state_oneshot;
+ pit->ced.set_next_event = jcore_pit_set_next_event;
+
+ pit->enable_val = enable_val;
+ }
+
+ cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
+ "AP_JCORE_TIMER_STARTING",
+ jcore_pit_local_init, NULL);
+
+ return 0;
+}
+
+CLOCKSOURCE_OF_DECLARE(jcore_pit, "jcore,pit", jcore_pit_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index c184eb84101e..4f87f3e76d83 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -152,6 +152,13 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static cycle_t sun5i_clksrc_read(struct clocksource *clksrc)
+{
+ struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+
+ return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+}
+
static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -210,8 +217,13 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(1));
- ret = clocksource_mmio_init(base + TIMER_CNTVAL_LO_REG(1), node->name,
- rate, 340, 32, clocksource_mmio_readl_down);
+ cs->clksrc.name = node->name;
+ cs->clksrc.rating = 340;
+ cs->clksrc.read = sun5i_clksrc_read;
+ cs->clksrc.mask = CLOCKSOURCE_MASK(32);
+ cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
pr_err("Couldn't register clock source.\n");
goto err_remove_notifier;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 631c977b0da5..180f0a96528c 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -566,6 +566,11 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
+ if (lynx->registers == NULL) {
+ dev_err(&dev->dev, "Failed to map registers\n");
+ ret = -ENOMEM;
+ goto fail_deallocate_lynx;
+ }
lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
@@ -578,7 +583,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
lynx->rcv_buffer == NULL) {
dev_err(&dev->dev, "Failed to allocate receive buffer\n");
ret = -ENOMEM;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
@@ -641,7 +646,7 @@ add_card(struct pci_dev *dev, const struct pci_device_id *unused)
dev_err(&dev->dev,
"Failed to allocate shared interrupt %d\n", dev->irq);
ret = -EIO;
- goto fail_deallocate;
+ goto fail_deallocate_buffers;
}
lynx->misc.parent = &dev->dev;
@@ -668,7 +673,7 @@ fail_free_irq:
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->pci_device->irq, lynx);
-fail_deallocate:
+fail_deallocate_buffers:
if (lynx->rcv_start_pcl)
pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
@@ -679,6 +684,8 @@ fail_deallocate:
pci_free_consistent(lynx->pci_device, PAGE_SIZE,
lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
+
+fail_deallocate_lynx:
kfree(lynx);
fail_disable:
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index bca172d42c74..9968de04d1d5 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -210,5 +210,6 @@ source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/tegra/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 898ac41fa8b3..2afe75c52ac2 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -26,3 +26,4 @@ obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
obj-$(CONFIG_UEFI_CPER) += efi/
+obj-y += tegra/
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c06945160a41..5e23e2d305e7 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,7 +11,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-mno-mmx -mno-sse
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
-cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
+cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) -g0 \
-fno-builtin -fpic -mno-single-pic-base
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
@@ -79,5 +79,6 @@ quiet_cmd_stubcopy = STUBCPY $@
# decompressor. So move our .data to .data.efistub, which is preserved
# explicitly by the decompressor linker script.
#
-STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
+STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub \
+ -R ___ksymtab+sort -R ___kcrctab+sort
STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index d95c70227c05..9bf66aefdbd0 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -356,7 +356,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
scm->reset.ops = &qcom_scm_pas_reset_ops;
scm->reset.nr_resets = 1;
scm->reset.of_node = pdev->dev.of_node;
- reset_controller_register(&scm->reset);
+ ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
+ if (ret)
+ return ret;
/* vote for max clk rate for highest performance */
ret = clk_set_rate(scm->core_clk, INT_MAX);
diff --git a/drivers/firmware/tegra/Kconfig b/drivers/firmware/tegra/Kconfig
new file mode 100644
index 000000000000..ff2730d5c468
--- /dev/null
+++ b/drivers/firmware/tegra/Kconfig
@@ -0,0 +1,25 @@
+menu "Tegra firmware driver"
+
+config TEGRA_IVC
+ bool "Tegra IVC protocol"
+ depends on ARCH_TEGRA
+ help
+ IVC (Inter-VM Communication) protocol is part of the IPC
+ (Inter Processor Communication) framework on Tegra. It maintains the
+ data and the different commuication channels in SysRAM or RAM and
+ keeps the content is synchronization between host CPU and remote
+ processors.
+
+config TEGRA_BPMP
+ bool "Tegra BPMP driver"
+ depends on ARCH_TEGRA && TEGRA_HSP_MBOX && TEGRA_IVC
+ help
+ BPMP (Boot and Power Management Processor) is designed to off-loading
+ the PM functions which include clock/DVFS/thermal/power from the CPU.
+ It needs HSP as the HW synchronization and notification module and
+ IVC module as the message communication protocol.
+
+ This driver manages the IPC interface between host CPU and the
+ firmware running on BPMP.
+
+endmenu
diff --git a/drivers/firmware/tegra/Makefile b/drivers/firmware/tegra/Makefile
new file mode 100644
index 000000000000..e34a2f79e1ad
--- /dev/null
+++ b/drivers/firmware/tegra/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_TEGRA_BPMP) += bpmp.o
+obj-$(CONFIG_TEGRA_IVC) += ivc.o
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
new file mode 100644
index 000000000000..4ff02d310868
--- /dev/null
+++ b/drivers/firmware/tegra/bpmp.c
@@ -0,0 +1,868 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/clk/tegra.h>
+#include <linux/genalloc.h>
+#include <linux/mailbox_client.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/semaphore.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+#include <soc/tegra/ivc.h>
+
+#define MSG_ACK BIT(0)
+#define MSG_RING BIT(1)
+
+static inline struct tegra_bpmp *
+mbox_client_to_bpmp(struct mbox_client *client)
+{
+ return container_of(client, struct tegra_bpmp, mbox.client);
+}
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct tegra_bpmp *bpmp;
+ struct device_node *np;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,bpmp", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ bpmp = ERR_PTR(-ENODEV);
+ goto put;
+ }
+
+ bpmp = platform_get_drvdata(pdev);
+ if (!bpmp) {
+ bpmp = ERR_PTR(-EPROBE_DEFER);
+ put_device(&pdev->dev);
+ goto put;
+ }
+
+put:
+ of_node_put(np);
+ return bpmp;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_get);
+
+void tegra_bpmp_put(struct tegra_bpmp *bpmp)
+{
+ if (bpmp)
+ put_device(bpmp->dev);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_put);
+
+static int tegra_bpmp_channel_get_index(struct tegra_bpmp_channel *channel)
+{
+ return channel - channel->bpmp->channels;
+}
+
+static int
+tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned int offset, count;
+ int index;
+
+ offset = bpmp->soc->channels.thread.offset;
+ count = bpmp->soc->channels.thread.count;
+
+ index = tegra_bpmp_channel_get_index(channel);
+ if (index < 0)
+ return index;
+
+ if (index < offset || index >= offset + count)
+ return -EINVAL;
+
+ return index - offset;
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_thread(struct tegra_bpmp *bpmp, unsigned int index)
+{
+ unsigned int offset = bpmp->soc->channels.thread.offset;
+ unsigned int count = bpmp->soc->channels.thread.count;
+
+ if (index >= count)
+ return NULL;
+
+ return &bpmp->channels[offset + index];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_tx(struct tegra_bpmp *bpmp)
+{
+ unsigned int offset = bpmp->soc->channels.cpu_tx.offset;
+
+ return &bpmp->channels[offset + smp_processor_id()];
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_channel_get_rx(struct tegra_bpmp *bpmp)
+{
+ unsigned int offset = bpmp->soc->channels.cpu_rx.offset;
+
+ return &bpmp->channels[offset];
+}
+
+static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message *msg)
+{
+ return (msg->tx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->rx.size <= MSG_DATA_MIN_SZ) &&
+ (msg->tx.size == 0 || msg->tx.data) &&
+ (msg->rx.size == 0 || msg->rx.data);
+}
+
+static bool tegra_bpmp_master_acked(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_read_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ib = NULL;
+ return false;
+ }
+
+ channel->ib = frame;
+
+ return true;
+}
+
+static int tegra_bpmp_wait_ack(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t end;
+
+ end = ktime_add_us(ktime_get(), timeout);
+
+ do {
+ if (tegra_bpmp_master_acked(channel))
+ return 0;
+ } while (ktime_before(ktime_get(), end));
+
+ return -ETIMEDOUT;
+}
+
+static bool tegra_bpmp_master_free(struct tegra_bpmp_channel *channel)
+{
+ void *frame;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (IS_ERR(frame)) {
+ channel->ob = NULL;
+ return false;
+ }
+
+ channel->ob = frame;
+
+ return true;
+}
+
+static int tegra_bpmp_wait_master_free(struct tegra_bpmp_channel *channel)
+{
+ unsigned long timeout = channel->bpmp->soc->channels.cpu_tx.timeout;
+ ktime_t start, now;
+
+ start = ns_to_ktime(local_clock());
+
+ do {
+ if (tegra_bpmp_master_free(channel))
+ return 0;
+
+ now = ns_to_ktime(local_clock());
+ } while (ktime_us_delta(now, start) < timeout);
+
+ return -ETIMEDOUT;
+}
+
+static ssize_t __tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size)
+{
+ if (data && size > 0)
+ memcpy(data, channel->ib->data, size);
+
+ return tegra_ivc_read_advance(channel->ivc);
+}
+
+static ssize_t tegra_bpmp_channel_read(struct tegra_bpmp_channel *channel,
+ void *data, size_t size)
+{
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ unsigned long flags;
+ ssize_t err;
+ int index;
+
+ index = tegra_bpmp_channel_get_thread_index(channel);
+ if (index < 0)
+ return index;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+ err = __tegra_bpmp_channel_read(channel, data, size);
+ clear_bit(index, bpmp->threaded.allocated);
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ up(&bpmp->threaded.lock);
+
+ return err;
+}
+
+static ssize_t __tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ channel->ob->code = mrq;
+ channel->ob->flags = flags;
+
+ if (data && size > 0)
+ memcpy(channel->ob->data, data, size);
+
+ return tegra_ivc_write_advance(channel->ivc);
+}
+
+static struct tegra_bpmp_channel *
+tegra_bpmp_write_threaded(struct tegra_bpmp *bpmp, unsigned int mrq,
+ const void *data, size_t size)
+{
+ unsigned long timeout = bpmp->soc->channels.thread.timeout;
+ unsigned int count = bpmp->soc->channels.thread.count;
+ struct tegra_bpmp_channel *channel;
+ unsigned long flags;
+ unsigned int index;
+ int err;
+
+ err = down_timeout(&bpmp->threaded.lock, usecs_to_jiffies(timeout));
+ if (err < 0)
+ return ERR_PTR(err);
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ index = find_first_zero_bit(bpmp->threaded.allocated, count);
+ if (index == count) {
+ channel = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ channel = tegra_bpmp_channel_get_thread(bpmp, index);
+ if (!channel) {
+ channel = ERR_PTR(-EINVAL);
+ goto unlock;
+ }
+
+ if (!tegra_bpmp_master_free(channel)) {
+ channel = ERR_PTR(-EBUSY);
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.allocated);
+
+ err = __tegra_bpmp_channel_write(channel, mrq, MSG_ACK | MSG_RING,
+ data, size);
+ if (err < 0) {
+ clear_bit(index, bpmp->threaded.allocated);
+ goto unlock;
+ }
+
+ set_bit(index, bpmp->threaded.busy);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+ return channel;
+}
+
+static ssize_t tegra_bpmp_channel_write(struct tegra_bpmp_channel *channel,
+ unsigned int mrq, unsigned long flags,
+ const void *data, size_t size)
+{
+ int err;
+
+ err = tegra_bpmp_wait_master_free(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_write(channel, mrq, flags, data, size);
+}
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ int err;
+
+ if (WARN_ON(!irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = tegra_bpmp_channel_get_tx(bpmp);
+
+ err = tegra_bpmp_channel_write(channel, msg->mrq, MSG_ACK,
+ msg->tx.data, msg->tx.size);
+ if (err < 0)
+ return err;
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+
+ err = tegra_bpmp_wait_ack(channel);
+ if (err < 0)
+ return err;
+
+ return __tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic);
+
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg)
+{
+ struct tegra_bpmp_channel *channel;
+ unsigned long timeout;
+ int err;
+
+ if (WARN_ON(irqs_disabled()))
+ return -EPERM;
+
+ if (!tegra_bpmp_message_valid(msg))
+ return -EINVAL;
+
+ channel = tegra_bpmp_write_threaded(bpmp, msg->mrq, msg->tx.data,
+ msg->tx.size);
+ if (IS_ERR(channel))
+ return PTR_ERR(channel);
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return err;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+
+ timeout = usecs_to_jiffies(bpmp->soc->channels.thread.timeout);
+
+ err = wait_for_completion_timeout(&channel->completion, timeout);
+ if (err == 0)
+ return -ETIMEDOUT;
+
+ return tegra_bpmp_channel_read(channel, msg->rx.data, msg->rx.size);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_transfer);
+
+static struct tegra_bpmp_mrq *tegra_bpmp_find_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq)
+{
+ struct tegra_bpmp_mrq *entry;
+
+ list_for_each_entry(entry, &bpmp->mrqs, list)
+ if (entry->mrq == mrq)
+ return entry;
+
+ return NULL;
+}
+
+static void tegra_bpmp_mrq_return(struct tegra_bpmp_channel *channel,
+ int code, const void *data, size_t size)
+{
+ unsigned long flags = channel->ib->flags;
+ struct tegra_bpmp *bpmp = channel->bpmp;
+ struct tegra_bpmp_mb_data *frame;
+ int err;
+
+ if (WARN_ON(size > MSG_DATA_MIN_SZ))
+ return;
+
+ err = tegra_ivc_read_advance(channel->ivc);
+ if (WARN_ON(err < 0))
+ return;
+
+ if ((flags & MSG_ACK) == 0)
+ return;
+
+ frame = tegra_ivc_write_get_next_frame(channel->ivc);
+ if (WARN_ON(IS_ERR(frame)))
+ return;
+
+ frame->code = code;
+
+ if (data && size > 0)
+ memcpy(frame->data, data, size);
+
+ err = tegra_ivc_write_advance(channel->ivc);
+ if (WARN_ON(err < 0))
+ return;
+
+ if (flags & MSG_RING) {
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (WARN_ON(err < 0))
+ return;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+ }
+}
+
+static void tegra_bpmp_handle_mrq(struct tegra_bpmp *bpmp,
+ unsigned int mrq,
+ struct tegra_bpmp_channel *channel)
+{
+ struct tegra_bpmp_mrq *entry;
+ u32 zero = 0;
+
+ spin_lock(&bpmp->lock);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry) {
+ spin_unlock(&bpmp->lock);
+ tegra_bpmp_mrq_return(channel, -EINVAL, &zero, sizeof(zero));
+ return;
+ }
+
+ entry->handler(mrq, channel, entry->data);
+
+ spin_unlock(&bpmp->lock);
+}
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ tegra_bpmp_mrq_handler_t handler, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ if (!handler)
+ return -EINVAL;
+
+ entry = devm_kzalloc(bpmp->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry->mrq = mrq;
+ entry->handler = handler;
+ entry->data = data;
+ list_add(&entry->list, &bpmp->mrqs);
+
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq);
+
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq, void *data)
+{
+ struct tegra_bpmp_mrq *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bpmp->lock, flags);
+
+ entry = tegra_bpmp_find_mrq(bpmp, mrq);
+ if (!entry)
+ goto unlock;
+
+ list_del(&entry->list);
+ devm_kfree(bpmp->dev, entry);
+
+unlock:
+ spin_unlock_irqrestore(&bpmp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq);
+
+static void tegra_bpmp_mrq_handle_ping(unsigned int mrq,
+ struct tegra_bpmp_channel *channel,
+ void *data)
+{
+ struct mrq_ping_request *request;
+ struct mrq_ping_response response;
+
+ request = (struct mrq_ping_request *)channel->ib->data;
+
+ memset(&response, 0, sizeof(response));
+ response.reply = request->challenge << 1;
+
+ tegra_bpmp_mrq_return(channel, 0, &response, sizeof(response));
+}
+
+static int tegra_bpmp_ping(struct tegra_bpmp *bpmp)
+{
+ struct mrq_ping_response response;
+ struct mrq_ping_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ ktime_t start, end;
+ int err;
+
+ memset(&request, 0, sizeof(request));
+ request.challenge = 1;
+
+ memset(&response, 0, sizeof(response));
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_PING;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+ msg.rx.data = &response;
+ msg.rx.size = sizeof(response);
+
+ local_irq_save(flags);
+ start = ktime_get();
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ end = ktime_get();
+ local_irq_restore(flags);
+
+ if (!err)
+ dev_dbg(bpmp->dev,
+ "ping ok: challenge: %u, response: %u, time: %lld\n",
+ request.challenge, response.reply,
+ ktime_to_us(ktime_sub(end, start)));
+
+ return err;
+}
+
+static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp *bpmp, char *tag,
+ size_t size)
+{
+ struct mrq_query_tag_request request;
+ struct tegra_bpmp_message msg;
+ unsigned long flags;
+ dma_addr_t phys;
+ void *virt;
+ int err;
+
+ virt = dma_alloc_coherent(bpmp->dev, MSG_DATA_MIN_SZ, &phys,
+ GFP_KERNEL | GFP_DMA32);
+ if (!virt)
+ return -ENOMEM;
+
+ memset(&request, 0, sizeof(request));
+ request.addr = phys;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_QUERY_TAG;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ local_irq_save(flags);
+ err = tegra_bpmp_transfer_atomic(bpmp, &msg);
+ local_irq_restore(flags);
+
+ if (err == 0)
+ strlcpy(tag, virt, size);
+
+ dma_free_coherent(bpmp->dev, MSG_DATA_MIN_SZ, virt, phys);
+
+ return err;
+}
+
+static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel *channel)
+{
+ unsigned long flags = channel->ob->flags;
+
+ if ((flags & MSG_RING) == 0)
+ return;
+
+ complete(&channel->completion);
+}
+
+static void tegra_bpmp_handle_rx(struct mbox_client *client, void *data)
+{
+ struct tegra_bpmp *bpmp = mbox_client_to_bpmp(client);
+ struct tegra_bpmp_channel *channel;
+ unsigned int i, count;
+ unsigned long *busy;
+
+ channel = tegra_bpmp_channel_get_rx(bpmp);
+ count = bpmp->soc->channels.thread.count;
+ busy = bpmp->threaded.busy;
+
+ if (tegra_bpmp_master_acked(channel))
+ tegra_bpmp_handle_mrq(bpmp, channel->ib->code, channel);
+
+ spin_lock(&bpmp->lock);
+
+ for_each_set_bit(i, busy, count) {
+ struct tegra_bpmp_channel *channel;
+
+ channel = tegra_bpmp_channel_get_thread(bpmp, i);
+ if (!channel)
+ continue;
+
+ if (tegra_bpmp_master_acked(channel)) {
+ tegra_bpmp_channel_signal(channel);
+ clear_bit(i, busy);
+ }
+ }
+
+ spin_unlock(&bpmp->lock);
+}
+
+static void tegra_bpmp_ivc_notify(struct tegra_ivc *ivc, void *data)
+{
+ struct tegra_bpmp *bpmp = data;
+ int err;
+
+ if (WARN_ON(bpmp->mbox.channel == NULL))
+ return;
+
+ err = mbox_send_message(bpmp->mbox.channel, NULL);
+ if (err < 0)
+ return;
+
+ mbox_client_txdone(bpmp->mbox.channel, 0);
+}
+
+static int tegra_bpmp_channel_init(struct tegra_bpmp_channel *channel,
+ struct tegra_bpmp *bpmp,
+ unsigned int index)
+{
+ size_t message_size, queue_size;
+ unsigned int offset;
+ int err;
+
+ channel->ivc = devm_kzalloc(bpmp->dev, sizeof(*channel->ivc),
+ GFP_KERNEL);
+ if (!channel->ivc)
+ return -ENOMEM;
+
+ message_size = tegra_ivc_align(MSG_MIN_SZ);
+ queue_size = tegra_ivc_total_queue_size(message_size);
+ offset = queue_size * index;
+
+ err = tegra_ivc_init(channel->ivc, NULL,
+ bpmp->rx.virt + offset, bpmp->rx.phys + offset,
+ bpmp->tx.virt + offset, bpmp->tx.phys + offset,
+ 1, message_size, tegra_bpmp_ivc_notify,
+ bpmp);
+ if (err < 0) {
+ dev_err(bpmp->dev, "failed to setup IVC for channel %u: %d\n",
+ index, err);
+ return err;
+ }
+
+ init_completion(&channel->completion);
+ channel->bpmp = bpmp;
+
+ return 0;
+}
+
+static void tegra_bpmp_channel_reset(struct tegra_bpmp_channel *channel)
+{
+ /* reset the channel state */
+ tegra_ivc_reset(channel->ivc);
+
+ /* sync the channel state with BPMP */
+ while (tegra_ivc_notified(channel->ivc))
+ ;
+}
+
+static void tegra_bpmp_channel_cleanup(struct tegra_bpmp_channel *channel)
+{
+ tegra_ivc_cleanup(channel->ivc);
+}
+
+static int tegra_bpmp_probe(struct platform_device *pdev)
+{
+ struct tegra_bpmp_channel *channel;
+ struct tegra_bpmp *bpmp;
+ unsigned int i;
+ char tag[32];
+ size_t size;
+ int err;
+
+ bpmp = devm_kzalloc(&pdev->dev, sizeof(*bpmp), GFP_KERNEL);
+ if (!bpmp)
+ return -ENOMEM;
+
+ bpmp->soc = of_device_get_match_data(&pdev->dev);
+ bpmp->dev = &pdev->dev;
+
+ bpmp->tx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 0);
+ if (!bpmp->tx.pool) {
+ dev_err(&pdev->dev, "TX shmem pool not found\n");
+ return -ENOMEM;
+ }
+
+ bpmp->tx.virt = gen_pool_dma_alloc(bpmp->tx.pool, 4096, &bpmp->tx.phys);
+ if (!bpmp->tx.virt) {
+ dev_err(&pdev->dev, "failed to allocate from TX pool\n");
+ return -ENOMEM;
+ }
+
+ bpmp->rx.pool = of_gen_pool_get(pdev->dev.of_node, "shmem", 1);
+ if (!bpmp->rx.pool) {
+ dev_err(&pdev->dev, "RX shmem pool not found\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ bpmp->rx.virt = gen_pool_dma_alloc(bpmp->rx.pool, 4096, &bpmp->rx.phys);
+ if (!bpmp->rx.pool) {
+ dev_err(&pdev->dev, "failed to allocate from RX pool\n");
+ err = -ENOMEM;
+ goto free_tx;
+ }
+
+ INIT_LIST_HEAD(&bpmp->mrqs);
+ spin_lock_init(&bpmp->lock);
+
+ bpmp->threaded.count = bpmp->soc->channels.thread.count;
+ sema_init(&bpmp->threaded.lock, bpmp->threaded.count);
+
+ size = BITS_TO_LONGS(bpmp->threaded.count) * sizeof(long);
+
+ bpmp->threaded.allocated = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.allocated) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ bpmp->threaded.busy = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!bpmp->threaded.busy) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ bpmp->num_channels = bpmp->soc->channels.cpu_tx.count +
+ bpmp->soc->channels.thread.count +
+ bpmp->soc->channels.cpu_rx.count;
+
+ bpmp->channels = devm_kcalloc(&pdev->dev, bpmp->num_channels,
+ sizeof(*channel), GFP_KERNEL);
+ if (!bpmp->channels) {
+ err = -ENOMEM;
+ goto free_rx;
+ }
+
+ /* message channel initialization */
+ for (i = 0; i < bpmp->num_channels; i++) {
+ struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+ err = tegra_bpmp_channel_init(channel, bpmp, i);
+ if (err < 0)
+ goto cleanup_channels;
+ }
+
+ /* mbox registration */
+ bpmp->mbox.client.dev = &pdev->dev;
+ bpmp->mbox.client.rx_callback = tegra_bpmp_handle_rx;
+ bpmp->mbox.client.tx_block = false;
+ bpmp->mbox.client.knows_txdone = false;
+
+ bpmp->mbox.channel = mbox_request_channel(&bpmp->mbox.client, 0);
+ if (IS_ERR(bpmp->mbox.channel)) {
+ err = PTR_ERR(bpmp->mbox.channel);
+ dev_err(&pdev->dev, "failed to get HSP mailbox: %d\n", err);
+ goto cleanup_channels;
+ }
+
+ /* reset message channels */
+ for (i = 0; i < bpmp->num_channels; i++) {
+ struct tegra_bpmp_channel *channel = &bpmp->channels[i];
+
+ tegra_bpmp_channel_reset(channel);
+ }
+
+ err = tegra_bpmp_request_mrq(bpmp, MRQ_PING,
+ tegra_bpmp_mrq_handle_ping, bpmp);
+ if (err < 0)
+ goto free_mbox;
+
+ err = tegra_bpmp_ping(bpmp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to ping BPMP: %d\n", err);
+ goto free_mrq;
+ }
+
+ err = tegra_bpmp_get_firmware_tag(bpmp, tag, sizeof(tag) - 1);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get firmware tag: %d\n", err);
+ goto free_mrq;
+ }
+
+ dev_info(&pdev->dev, "firmware: %s\n", tag);
+
+ err = of_platform_default_populate(pdev->dev.of_node, NULL, &pdev->dev);
+ if (err < 0)
+ goto free_mrq;
+
+ err = tegra_bpmp_init_clocks(bpmp);
+ if (err < 0)
+ goto free_mrq;
+
+ err = tegra_bpmp_init_resets(bpmp);
+ if (err < 0)
+ goto free_mrq;
+
+ platform_set_drvdata(pdev, bpmp);
+
+ return 0;
+
+free_mrq:
+ tegra_bpmp_free_mrq(bpmp, MRQ_PING, bpmp);
+free_mbox:
+ mbox_free_channel(bpmp->mbox.channel);
+cleanup_channels:
+ while (i--)
+ tegra_bpmp_channel_cleanup(&bpmp->channels[i]);
+free_rx:
+ gen_pool_free(bpmp->rx.pool, (unsigned long)bpmp->rx.virt, 4096);
+free_tx:
+ gen_pool_free(bpmp->tx.pool, (unsigned long)bpmp->tx.virt, 4096);
+ return err;
+}
+
+static const struct tegra_bpmp_soc tegra186_soc = {
+ .channels = {
+ .cpu_tx = {
+ .offset = 0,
+ .count = 6,
+ .timeout = 60 * USEC_PER_SEC,
+ },
+ .thread = {
+ .offset = 6,
+ .count = 7,
+ .timeout = 600 * USEC_PER_SEC,
+ },
+ .cpu_rx = {
+ .offset = 13,
+ .count = 1,
+ .timeout = 0,
+ },
+ },
+ .num_resets = 193,
+};
+
+static const struct of_device_id tegra_bpmp_match[] = {
+ { .compatible = "nvidia,tegra186-bpmp", .data = &tegra186_soc },
+ { }
+};
+
+static struct platform_driver tegra_bpmp_driver = {
+ .driver = {
+ .name = "tegra-bpmp",
+ .of_match_table = tegra_bpmp_match,
+ },
+ .probe = tegra_bpmp_probe,
+};
+
+static int __init tegra_bpmp_init(void)
+{
+ return platform_driver_register(&tegra_bpmp_driver);
+}
+core_initcall(tegra_bpmp_init);
diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c
new file mode 100644
index 000000000000..29ecfd815320
--- /dev/null
+++ b/drivers/firmware/tegra/ivc.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <soc/tegra/ivc.h>
+
+#define TEGRA_IVC_ALIGN 64
+
+/*
+ * IVC channel reset protocol.
+ *
+ * Each end uses its tx_channel.state to indicate its synchronization state.
+ */
+enum tegra_ivc_state {
+ /*
+ * This value is zero for backwards compatibility with services that
+ * assume channels to be initially zeroed. Such channels are in an
+ * initially valid state, but cannot be asynchronously reset, and must
+ * maintain a valid state at all times.
+ *
+ * The transmitting end can enter the established state from the sync or
+ * ack state when it observes the receiving endpoint in the ack or
+ * established state, indicating that has cleared the counters in our
+ * rx_channel.
+ */
+ TEGRA_IVC_STATE_ESTABLISHED = 0,
+
+ /*
+ * If an endpoint is observed in the sync state, the remote endpoint is
+ * allowed to clear the counters it owns asynchronously with respect to
+ * the current endpoint. Therefore, the current endpoint is no longer
+ * allowed to communicate.
+ */
+ TEGRA_IVC_STATE_SYNC,
+
+ /*
+ * When the transmitting end observes the receiving end in the sync
+ * state, it can clear the w_count and r_count and transition to the ack
+ * state. If the remote endpoint observes us in the ack state, it can
+ * return to the established state once it has cleared its counters.
+ */
+ TEGRA_IVC_STATE_ACK
+};
+
+/*
+ * This structure is divided into two-cache aligned parts, the first is only
+ * written through the tx.channel pointer, while the second is only written
+ * through the rx.channel pointer. This delineates ownership of the cache
+ * lines, which is critical to performance and necessary in non-cache coherent
+ * implementations.
+ */
+struct tegra_ivc_header {
+ union {
+ struct {
+ /* fields owned by the transmitting end */
+ u32 count;
+ u32 state;
+ };
+
+ u8 pad[TEGRA_IVC_ALIGN];
+ } tx;
+
+ union {
+ /* fields owned by the receiving end */
+ u32 count;
+ u8 pad[TEGRA_IVC_ALIGN];
+ } rx;
+};
+
+static inline void tegra_ivc_invalidate(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush(struct tegra_ivc *ivc, dma_addr_t phys)
+{
+ if (!ivc->peer)
+ return;
+
+ dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
+ DMA_TO_DEVICE);
+}
+
+static inline bool tegra_ivc_empty(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ /*
+ * This function performs multiple checks on the same values with
+ * security implications, so create snapshots with ACCESS_ONCE() to
+ * ensure that these checks use the same values.
+ */
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * Perform an over-full check to prevent denial of service attacks
+ * where a server could be easily fooled into believing that there's
+ * an extremely large number of frames ready, since receivers are not
+ * expected to check for full or over-full conditions.
+ *
+ * Although the channel isn't empty, this is an invalid case caused by
+ * a potentially malicious peer, so returning empty is safer, because
+ * it gives the impression that the channel has gone silent.
+ */
+ if (tx - rx > ivc->num_frames)
+ return true;
+
+ return tx == rx;
+}
+
+static inline bool tegra_ivc_full(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * Invalid cases where the counters indicate that the queue is over
+ * capacity also appear full.
+ */
+ return tx - rx >= ivc->num_frames;
+}
+
+static inline u32 tegra_ivc_available(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header)
+{
+ u32 tx = ACCESS_ONCE(header->tx.count);
+ u32 rx = ACCESS_ONCE(header->rx.count);
+
+ /*
+ * This function isn't expected to be used in scenarios where an
+ * over-full situation can lead to denial of service attacks. See the
+ * comment in tegra_ivc_empty() for an explanation about special
+ * over-full considerations.
+ */
+ return tx - rx;
+}
+
+static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc)
+{
+ ACCESS_ONCE(ivc->tx.channel->tx.count) =
+ ACCESS_ONCE(ivc->tx.channel->tx.count) + 1;
+
+ if (ivc->tx.position == ivc->num_frames - 1)
+ ivc->tx.position = 0;
+ else
+ ivc->tx.position++;
+}
+
+static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc)
+{
+ ACCESS_ONCE(ivc->rx.channel->rx.count) =
+ ACCESS_ONCE(ivc->rx.channel->rx.count) + 1;
+
+ if (ivc->rx.position == ivc->num_frames - 1)
+ ivc->rx.position = 0;
+ else
+ ivc->rx.position++;
+}
+
+static inline int tegra_ivc_check_read(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * tx.channel->state is set locally, so it is not synchronized with
+ * state from the remote peer. The remote peer cannot reset its
+ * transmit counters until we've acknowledged its synchronization
+ * request, so no additional synchronization is required because an
+ * asynchronous transition of rx.channel->state to
+ * TEGRA_IVC_STATE_ACK is not allowed.
+ */
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ /*
+ * Avoid unnecessary invalidations when performing repeated accesses
+ * to an IVC channel by checking the old queue pointers first.
+ *
+ * Synchronization is only necessary when these pointers indicate
+ * empty or full.
+ */
+ if (!tegra_ivc_empty(ivc, ivc->rx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+
+ if (tegra_ivc_empty(ivc, ivc->rx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static inline int tegra_ivc_check_write(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, rx.count);
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -ECONNRESET;
+
+ if (!tegra_ivc_full(ivc, ivc->tx.channel))
+ return 0;
+
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + offset);
+
+ if (tegra_ivc_full(ivc, ivc->tx.channel))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static void *tegra_ivc_frame_virt(struct tegra_ivc *ivc,
+ struct tegra_ivc_header *header,
+ unsigned int frame)
+{
+ if (WARN_ON(frame >= ivc->num_frames))
+ return ERR_PTR(-EINVAL);
+
+ return (void *)(header + 1) + ivc->frame_size * frame;
+}
+
+static inline dma_addr_t tegra_ivc_frame_phys(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame)
+{
+ unsigned long offset;
+
+ offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame;
+
+ return phys + offset;
+}
+
+static inline void tegra_ivc_invalidate_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
+}
+
+static inline void tegra_ivc_flush_frame(struct tegra_ivc *ivc,
+ dma_addr_t phys,
+ unsigned int frame,
+ unsigned int offset,
+ size_t size)
+{
+ if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
+ return;
+
+ phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset;
+
+ dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
+}
+
+/* directly peek at the next frame rx'ed */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ if (WARN_ON(ivc == NULL))
+ return ERR_PTR(-EINVAL);
+
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ /*
+ * Order observation of ivc->rx.position potentially indicating new
+ * data before data read.
+ */
+ smp_rmb();
+
+ tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0,
+ ivc->frame_size);
+
+ return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_read_get_next_frame);
+
+int tegra_ivc_read_advance(struct tegra_ivc *ivc)
+{
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ int err;
+
+ /*
+ * No read barriers or synchronization here: the caller is expected to
+ * have already observed the channel non-empty. This check is just to
+ * catch programming errors.
+ */
+ err = tegra_ivc_check_read(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_advance_rx(ivc);
+
+ tegra_ivc_flush(ivc, ivc->rx.phys + rx);
+
+ /*
+ * Ensure our write to ivc->rx.position occurs before our read from
+ * ivc->tx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from full to non-full. The available
+ * count can only asynchronously increase, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + tx);
+
+ if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_read_advance);
+
+/* directly poke at the next frame to be tx'ed */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc)
+{
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position);
+}
+EXPORT_SYMBOL(tegra_ivc_write_get_next_frame);
+
+/* advance the tx buffer */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc)
+{
+ unsigned int tx = offsetof(struct tegra_ivc_header, tx.count);
+ unsigned int rx = offsetof(struct tegra_ivc_header, rx.count);
+ int err;
+
+ err = tegra_ivc_check_write(ivc);
+ if (err < 0)
+ return err;
+
+ tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0,
+ ivc->frame_size);
+
+ /*
+ * Order any possible stores to the frame before update of
+ * ivc->tx.position.
+ */
+ smp_wmb();
+
+ tegra_ivc_advance_tx(ivc);
+ tegra_ivc_flush(ivc, ivc->tx.phys + tx);
+
+ /*
+ * Ensure our write to ivc->tx.position occurs before our read from
+ * ivc->rx.position.
+ */
+ smp_mb();
+
+ /*
+ * Notify only upon transition from empty to non-empty. The available
+ * count can only asynchronously decrease, so the worst possible
+ * side-effect will be a spurious notification.
+ */
+ tegra_ivc_invalidate(ivc, ivc->tx.phys + rx);
+
+ if (tegra_ivc_available(ivc, ivc->tx.channel) == 1)
+ ivc->notify(ivc, ivc->notify_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_write_advance);
+
+void tegra_ivc_reset(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+ ivc->notify(ivc, ivc->notify_data);
+}
+EXPORT_SYMBOL(tegra_ivc_reset);
+
+/*
+ * =======================================================
+ * IVC State Transition Table - see tegra_ivc_notified()
+ * =======================================================
+ *
+ * local remote action
+ * ----- ------ -----------------------------------
+ * SYNC EST <none>
+ * SYNC ACK reset counters; move to EST; notify
+ * SYNC SYNC reset counters; move to ACK; notify
+ * ACK EST move to EST; notify
+ * ACK ACK move to EST; notify
+ * ACK SYNC reset counters; move to ACK; notify
+ * EST EST <none>
+ * EST ACK <none>
+ * EST SYNC reset counters; move to ACK; notify
+ *
+ * ===============================================================
+ */
+
+int tegra_ivc_notified(struct tegra_ivc *ivc)
+{
+ unsigned int offset = offsetof(struct tegra_ivc_header, tx.count);
+ enum tegra_ivc_state state;
+
+ /* Copy the receiver's state out of shared memory. */
+ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset);
+ state = ACCESS_ONCE(ivc->rx.channel->tx.state);
+
+ if (state == TEGRA_IVC_STATE_SYNC) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of TEGRA_IVC_STATE_SYNC before stores
+ * clearing tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the SYNC
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ACK state. We have just cleared our counters, so it
+ * is now safe for the remote end to start using these values.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC &&
+ state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * Order observation of ivc_state_sync before stores clearing
+ * tx_channel.
+ */
+ smp_rmb();
+
+ /*
+ * Reset tx.channel counters. The remote end is in the ACK
+ * state and won't make progress until we change our state,
+ * so the counters are not in use at this time.
+ */
+ ivc->tx.channel->tx.count = 0;
+ ivc->rx.channel->rx.count = 0;
+
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ /*
+ * Ensure that counters appear cleared before new state can be
+ * observed.
+ */
+ smp_wmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that the remote end has
+ * already cleared its counters, so it is safe to start
+ * writing/reading on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) {
+ offset = offsetof(struct tegra_ivc_header, tx.count);
+
+ /*
+ * At this point, we have observed the peer to be in either
+ * the ACK or ESTABLISHED state. Next, order observation of
+ * peer state before storing to tx.channel.
+ */
+ smp_rmb();
+
+ /*
+ * Move to ESTABLISHED state. We know that we have previously
+ * cleared our counters, and we know that the remote end has
+ * cleared its counters, so it is safe to start writing/reading
+ * on this channel.
+ */
+ ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED;
+ tegra_ivc_flush(ivc, ivc->tx.phys + offset);
+
+ /*
+ * Notify remote end to observe state transition.
+ */
+ ivc->notify(ivc, ivc->notify_data);
+
+ } else {
+ /*
+ * There is no need to handle any further action. Either the
+ * channel is already fully established, or we are waiting for
+ * the remote end to catch up with our current state. Refer
+ * to the diagram in "IVC State Transition Table" above.
+ */
+ }
+
+ if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED)
+ return -EAGAIN;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_notified);
+
+size_t tegra_ivc_align(size_t size)
+{
+ return ALIGN(size, TEGRA_IVC_ALIGN);
+}
+EXPORT_SYMBOL(tegra_ivc_align);
+
+unsigned tegra_ivc_total_queue_size(unsigned queue_size)
+{
+ if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) {
+ pr_err("%s: queue_size (%u) must be %u-byte aligned\n",
+ __func__, queue_size, TEGRA_IVC_ALIGN);
+ return 0;
+ }
+
+ return queue_size + sizeof(struct tegra_ivc_header);
+}
+EXPORT_SYMBOL(tegra_ivc_total_queue_size);
+
+static int tegra_ivc_check_params(unsigned long rx, unsigned long tx,
+ unsigned int num_frames, size_t frame_size)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, tx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct tegra_ivc_header, rx.count),
+ TEGRA_IVC_ALIGN));
+ BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct tegra_ivc_header),
+ TEGRA_IVC_ALIGN));
+
+ if ((uint64_t)num_frames * (uint64_t)frame_size >= 0x100000000UL) {
+ pr_err("num_frames * frame_size overflows\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(frame_size, TEGRA_IVC_ALIGN)) {
+ pr_err("frame size not adequately aligned: %zu\n", frame_size);
+ return -EINVAL;
+ }
+
+ /*
+ * The headers must at least be aligned enough for counters
+ * to be accessed atomically.
+ */
+ if (!IS_ALIGNED(rx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", rx);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(tx, TEGRA_IVC_ALIGN)) {
+ pr_err("IVC channel start not aligned: %#lx\n", tx);
+ return -EINVAL;
+ }
+
+ if (rx < tx) {
+ if (rx + frame_size * num_frames > tx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ rx, frame_size * num_frames, tx);
+ return -EINVAL;
+ }
+ } else {
+ if (tx + frame_size * num_frames > rx) {
+ pr_err("queue regions overlap: %#lx + %zx > %#lx\n",
+ tx, frame_size * num_frames, rx);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+ dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+ unsigned int num_frames, size_t frame_size,
+ void (*notify)(struct tegra_ivc *ivc, void *data),
+ void *data)
+{
+ size_t queue_size;
+ int err;
+
+ if (WARN_ON(!ivc || !notify))
+ return -EINVAL;
+
+ /*
+ * All sizes that can be returned by communication functions should
+ * fit in an int.
+ */
+ if (frame_size > INT_MAX)
+ return -E2BIG;
+
+ err = tegra_ivc_check_params((unsigned long)rx, (unsigned long)tx,
+ num_frames, frame_size);
+ if (err < 0)
+ return err;
+
+ queue_size = tegra_ivc_total_queue_size(num_frames * frame_size);
+
+ if (peer) {
+ ivc->rx.phys = dma_map_single(peer, rx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (ivc->rx.phys == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ ivc->tx.phys = dma_map_single(peer, tx, queue_size,
+ DMA_BIDIRECTIONAL);
+ if (ivc->tx.phys == DMA_ERROR_CODE) {
+ dma_unmap_single(peer, ivc->rx.phys, queue_size,
+ DMA_BIDIRECTIONAL);
+ return -ENOMEM;
+ }
+ } else {
+ ivc->rx.phys = rx_phys;
+ ivc->tx.phys = tx_phys;
+ }
+
+ ivc->rx.channel = rx;
+ ivc->tx.channel = tx;
+ ivc->peer = peer;
+ ivc->notify = notify;
+ ivc->notify_data = data;
+ ivc->frame_size = frame_size;
+ ivc->num_frames = num_frames;
+
+ /*
+ * These values aren't necessarily correct until the channel has been
+ * reset.
+ */
+ ivc->tx.position = 0;
+ ivc->rx.position = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_ivc_init);
+
+void tegra_ivc_cleanup(struct tegra_ivc *ivc)
+{
+ if (ivc->peer) {
+ size_t size = tegra_ivc_total_queue_size(ivc->num_frames *
+ ivc->frame_size);
+
+ dma_unmap_single(ivc->peer, ivc->rx.phys, size,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(ivc->peer, ivc->tx.phys, size,
+ DMA_BIDIRECTIONAL);
+ }
+}
+EXPORT_SYMBOL(tegra_ivc_cleanup);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 2e3a0543760d..e3281d4e3e41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -765,7 +765,7 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
-static void amdgpu_connector_destroy(struct drm_connector *connector)
+static void amdgpu_connector_unregister(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
@@ -773,6 +773,12 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false;
}
+}
+
+static void amdgpu_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
amdgpu_connector_free_edid(connector);
kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector);
@@ -826,6 +832,7 @@ static const struct drm_connector_funcs amdgpu_connector_lvds_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_lcd_property,
};
@@ -936,6 +943,7 @@ static const struct drm_connector_funcs amdgpu_connector_vga_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = amdgpu_connector_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.set_property = amdgpu_connector_set_property,
};
@@ -1203,6 +1211,7 @@ static const struct drm_connector_funcs amdgpu_connector_dvi_funcs = {
.detect = amdgpu_connector_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1493,6 +1502,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
@@ -1502,6 +1512,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.detect = amdgpu_connector_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = amdgpu_connector_set_lcd_property,
+ .early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e203e5561107..a5e2fcbef0f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,6 +43,9 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
+
+ ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7dbe85d67d26..b4f4a9239069 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1408,16 +1408,6 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_block_status[i].valid)
continue;
- if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_UVD ||
- adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_VCE)
- continue;
- /* enable clockgating to save power */
- r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
- return r;
- }
if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r) {
@@ -1426,6 +1416,18 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
}
adev->ip_block_status[i].late_initialized = true;
}
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) {
+ /* enable clockgating to save power */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_GATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
+ }
}
return 0;
@@ -1435,6 +1437,30 @@ static int amdgpu_fini(struct amdgpu_device *adev)
{
int i, r;
+ /* need to disable SMC first */
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].hw)
+ continue;
+ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) {
+ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
+ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ return r;
+ }
+ r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].funcs->name, r);
+ }
+ adev->ip_block_status[i].hw = false;
+ break;
+ }
+ }
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_block_status[i].hw)
continue;
@@ -2073,7 +2099,8 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
if (!adev->ip_block_status[i].valid)
continue;
if (adev->ip_blocks[i].funcs->check_soft_reset)
- adev->ip_blocks[i].funcs->check_soft_reset(adev);
+ adev->ip_block_status[i].hang =
+ adev->ip_blocks[i].funcs->check_soft_reset(adev);
if (adev->ip_block_status[i].hang) {
DRM_INFO("IP block:%d is hang!\n", i);
asic_hang = true;
@@ -2102,12 +2129,20 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
{
- if (adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SMC].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_ACP].hang ||
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang) {
- DRM_INFO("Some block need full reset!\n");
- return true;
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_block_status[i].valid)
+ continue;
+ if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) ||
+ (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) {
+ if (adev->ip_block_status[i].hang) {
+ DRM_INFO("Some block need full reset!\n");
+ return true;
+ }
+ }
}
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index fe36caf1b7d7..14f57d9915e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -113,24 +113,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
printk("\n");
}
+
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ vblank_in_pixels =
+ amdgpu_crtc->hw_mode.crtc_htotal *
+ (amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ (amdgpu_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e1fa8731d1e2..3cb5e903cd62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -345,8 +345,8 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
ent = debugfs_create_file(name,
S_IFREG | S_IRUGO, root,
ring, &amdgpu_debugfs_ring_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
+ if (!ent)
+ return -ENOMEM;
i_size_write(ent->d_inode, ring->ring_size + 12);
ring->ent = ent;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 887483b8b818..dcaf691f56b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -555,10 +555,13 @@ struct amdgpu_ttm_tt {
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ unsigned int flags = 0;
unsigned pinned = 0;
int r;
+ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+ flags |= FOLL_WRITE;
+
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@@ -581,7 +584,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
list_add(&guptask.list, &gtt->guptasks);
spin_unlock(&gtt->guptasklock);
- r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+ r = get_user_pages(userptr, num_pages, flags, p, NULL);
spin_lock(&gtt->guptasklock);
list_del(&guptask.list);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index f80a0834e889..3c082e143730 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1514,14 +1514,16 @@ static int cz_dpm_set_powergating_state(void *handle,
return 0;
}
-/* borrowed from KV, need future unify */
static int cz_dpm_get_temperature(struct amdgpu_device *adev)
{
int actual_temp = 0;
- uint32_t temp = RREG32_SMC(0xC0300E0C);
+ uint32_t val = RREG32_SMC(ixTHM_TCON_CUR_TMP);
+ uint32_t temp = REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
- if (temp)
+ if (REG_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
actual_temp = 1000 * ((temp / 8) - 49);
+ else
+ actual_temp = 1000 * (temp / 8);
return actual_temp;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 613ebb7ed50f..4108c686aa7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3188,16 +3188,11 @@ static int dce_v10_0_wait_for_idle(void *handle)
return 0;
}
-static int dce_v10_0_check_soft_reset(void *handle)
+static bool dce_v10_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (dce_v10_0_is_display_hung(adev))
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = true;
- else
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang = false;
-
- return 0;
+ return dce_v10_0_is_display_hung(adev);
}
static int dce_v10_0_soft_reset(void *handle)
@@ -3205,9 +3200,6 @@ static int dce_v10_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0, tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_DCE].hang)
- return 0;
-
if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6c6ff57b1c95..ee6a48a09214 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4087,14 +4087,21 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
+ u32 tmp;
gfx_v8_0_rlc_stop(adev);
/* disable CG */
- WREG32(mmRLC_CGCG_CGLS_CTRL, 0);
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL);
+ tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK |
+ RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
+ WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10)
- WREG32(mmRLC_CGCG_CGLS_CTRL_3D, 0);
+ adev->asic_type == CHIP_POLARIS10) {
+ tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
+ tmp &= ~0x3;
+ WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
+ }
/* disable PG */
WREG32(mmRLC_PG_CNTL, 0);
@@ -5137,7 +5144,7 @@ static int gfx_v8_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int gfx_v8_0_check_soft_reset(void *handle)
+static bool gfx_v8_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
@@ -5189,16 +5196,14 @@ static int gfx_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_SEM, 1);
if (grbm_soft_reset || srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = true;
adev->gfx.grbm_soft_reset = grbm_soft_reset;
adev->gfx.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang = false;
adev->gfx.grbm_soft_reset = 0;
adev->gfx.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
@@ -5226,7 +5231,8 @@ static int gfx_v8_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5264,7 +5270,8 @@ static int gfx_v8_0_soft_reset(void *handle)
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
u32 tmp;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
@@ -5334,7 +5341,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GFX].hang)
+ if ((!adev->gfx.grbm_soft_reset) &&
+ (!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1b319f5bc696..c22ef140a542 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1099,7 +1099,7 @@ static int gmc_v8_0_wait_for_idle(void *handle)
}
-static int gmc_v8_0_check_soft_reset(void *handle)
+static bool gmc_v8_0_check_soft_reset(void *handle)
{
u32 srbm_soft_reset = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1116,20 +1116,19 @@ static int gmc_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
}
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = true;
adev->mc.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang = false;
adev->mc.srbm_soft_reset = 0;
+ return false;
}
- return 0;
}
static int gmc_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_stop(adev, &adev->mc.save);
@@ -1145,7 +1144,7 @@ static int gmc_v8_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->mc.srbm_soft_reset;
@@ -1175,7 +1174,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_GMC].hang)
+ if (!adev->mc.srbm_soft_reset)
return 0;
gmc_v8_0_mc_resume(adev, &adev->mc.save);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index f325fd86430b..a9d10941fb53 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1268,7 +1268,7 @@ static int sdma_v3_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v3_0_check_soft_reset(void *handle)
+static bool sdma_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -1281,14 +1281,12 @@ static int sdma_v3_0_check_soft_reset(void *handle)
}
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = true;
adev->sdma.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang = false;
adev->sdma.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static int sdma_v3_0_pre_soft_reset(void *handle)
@@ -1296,7 +1294,7 @@ static int sdma_v3_0_pre_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1315,7 +1313,7 @@ static int sdma_v3_0_post_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
@@ -1335,7 +1333,7 @@ static int sdma_v3_0_soft_reset(void *handle)
u32 srbm_soft_reset = 0;
u32 tmp;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_SDMA].hang)
+ if (!adev->sdma.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->sdma.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 8bd08925b370..3de7bca5854b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3499,6 +3499,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
max_sclk = 75000;
max_mclk = 80000;
}
+ /* Limit clocks for some HD8600 parts */
+ if (adev->pdev->device == 0x6660 &&
+ adev->pdev->revision == 0x83) {
+ max_sclk = 75000;
+ max_mclk = 80000;
+ }
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index d127d59f953a..b4ea229bb449 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -373,7 +373,7 @@ static int tonga_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int tonga_ih_check_soft_reset(void *handle)
+static bool tonga_ih_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -384,21 +384,19 @@ static int tonga_ih_check_soft_reset(void *handle)
SOFT_RESET_IH, 1);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = true;
adev->irq.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang = false;
adev->irq.srbm_soft_reset = 0;
+ return false;
}
-
- return 0;
}
static int tonga_ih_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_fini(adev);
@@ -408,7 +406,7 @@ static int tonga_ih_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
return tonga_ih_hw_init(adev);
@@ -419,7 +417,7 @@ static int tonga_ih_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_IH].hang)
+ if (!adev->irq.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->irq.srbm_soft_reset;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index e0fd9f21ed95..ab3df6d75656 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -770,7 +770,7 @@ static int uvd_v6_0_wait_for_idle(void *handle)
}
#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
-static int uvd_v6_0_check_soft_reset(void *handle)
+static bool uvd_v6_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -782,19 +782,19 @@ static int uvd_v6_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = true;
adev->uvd.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang = false;
adev->uvd.srbm_soft_reset = 0;
+ return false;
}
- return 0;
}
+
static int uvd_v6_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
uvd_v6_0_stop(adev);
@@ -806,7 +806,7 @@ static int uvd_v6_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->uvd.srbm_soft_reset;
@@ -836,7 +836,7 @@ static int uvd_v6_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_UVD].hang)
+ if (!adev->uvd.srbm_soft_reset)
return 0;
mdelay(5);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 3f6db4ec0102..8533269ec160 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -561,7 +561,7 @@ static int vce_v3_0_wait_for_idle(void *handle)
#define AMDGPU_VCE_STATUS_BUSY_MASK (VCE_STATUS_VCPU_REPORT_AUTO_BUSY_MASK | \
VCE_STATUS_VCPU_REPORT_RB0_BUSY_MASK)
-static int vce_v3_0_check_soft_reset(void *handle)
+static bool vce_v3_0_check_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset = 0;
@@ -591,16 +591,15 @@ static int vce_v3_0_check_soft_reset(void *handle)
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = true;
adev->vce.srbm_soft_reset = srbm_soft_reset;
+ return true;
} else {
- adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang = false;
adev->vce.srbm_soft_reset = 0;
+ return false;
}
- mutex_unlock(&adev->grbm_idx_mutex);
- return 0;
}
static int vce_v3_0_soft_reset(void *handle)
@@ -608,7 +607,7 @@ static int vce_v3_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
srbm_soft_reset = adev->vce.srbm_soft_reset;
@@ -638,7 +637,7 @@ static int vce_v3_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
@@ -651,7 +650,7 @@ static int vce_v3_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!adev->ip_block_status[AMD_IP_BLOCK_TYPE_VCE].hang)
+ if (!adev->vce.srbm_soft_reset)
return 0;
mdelay(5);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index c934b78c9e2f..bec8125bceb0 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -165,7 +165,7 @@ struct amd_ip_funcs {
/* poll for idle */
int (*wait_for_idle)(void *handle);
/* check soft reset the IP block */
- int (*check_soft_reset)(void *handle);
+ bool (*check_soft_reset)(void *handle);
/* pre soft reset the IP block */
int (*pre_soft_reset)(void *handle);
/* soft reset the IP block */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 92b117843875..8cee4e0f9fde 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -49,6 +49,7 @@ static const pem_event_action * const uninitialize_event[] = {
uninitialize_display_phy_access_tasks,
disable_gfx_voltage_island_power_gating_tasks,
disable_gfx_clock_gating_tasks,
+ uninitialize_thermal_controller_tasks,
set_boot_state_tasks,
adjust_power_state_tasks,
disable_dynamic_state_management_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 7e4fcbbbe086..960424913496 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1785,6 +1785,21 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0;
}
+static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
+{
+ int actual_temp = 0;
+ uint32_t val = cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
+ uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
+
+ if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
+ actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ else
+ actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return actual_temp;
+}
+
static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -1881,6 +1896,9 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
case AMDGPU_PP_SENSOR_VCE_POWER:
*value = cz_hwmgr->vce_power_gated ? 0 : 1;
return 0;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ *value = cz_thermal_get_temperature(hwmgr);
+ return 0;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 508245d49d33..609996c84ad5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1030,20 +1030,19 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
/* disable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable) == 0),
- "Failed to disable SCLK DPM!",
- return -EINVAL);
+ if (!data->sclk_dpm_key_disabled) {
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable SCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+ }
/* disable MCLK dpm */
if (!data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable) == 0),
- "Failed to disable MCLK DPM!",
- return -EINVAL);
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable MCLK DPM when DPM is disabled",
+ return 0);
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
}
return 0;
@@ -1069,10 +1068,13 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
return -EINVAL);
}
- if (smu7_disable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
- return -EINVAL;
- }
+ smu7_disable_sclk_mclk_dpm(hwmgr);
+
+ PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
+ "Trying to disable voltage DPM when DPM is disabled",
+ return 0);
+
+ smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
return 0;
}
@@ -1226,7 +1228,7 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay);
+ smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1306,6 +1308,12 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to disable thermal auto throttle!", result = tmp_result);
+ if (1 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableAvfs)),
+ "Failed to disable AVFS!",
+ return -EINVAL);
+ }
+
tmp_result = smu7_stop_dpm(hwmgr);
PP_ASSERT_WITH_CODE((tmp_result == 0),
"Failed to stop DPM!", result = tmp_result);
@@ -1452,8 +1460,10 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
- if (table_info != NULL)
- sclk_table = table_info->vdd_dep_on_sclk;
+ if (table_info == NULL)
+ return -EINVAL;
+
+ sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
@@ -3802,13 +3812,15 @@ static inline bool smu7_are_power_levels_equal(const struct smu7_performance_lev
int smu7_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal)
{
- const struct smu7_power_state *psa = cast_const_phw_smu7_power_state(pstate1);
- const struct smu7_power_state *psb = cast_const_phw_smu7_power_state(pstate2);
+ const struct smu7_power_state *psa;
+ const struct smu7_power_state *psb;
int i;
if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
return -EINVAL;
+ psa = cast_const_phw_smu7_power_state(pstate1);
+ psb = cast_const_phw_smu7_power_state(pstate2);
/* If the two states don't even have the same number of performance levels they cannot be the same state. */
if (psa->performance_level_count != psb->performance_level_count) {
*equal = false;
@@ -4324,6 +4336,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.set_mclk_od = smu7_set_mclk_od,
.get_clock_by_type = smu7_get_clock_by_type,
.read_sensor = smu7_read_sensor,
+ .dynamic_state_management_disable = smu7_disable_dpm_tasks,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index eda802bc63c8..8c889caba420 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -2458,7 +2458,7 @@ static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
"Invalid VramInfo table.", return -EINVAL);
- if (!data->is_memory_gddr5) {
+ if (!data->is_memory_gddr5 && j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
for (k = 0; k < table->num_entries; k++) {
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2f58e9e2a59c..a51f8cbcfe26 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- if (dcrtc->dpms != dpms) {
- dcrtc->dpms = dpms;
- if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
- armada_drm_crtc_update(dcrtc);
- if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
- clk_disable_unprepare(dcrtc->clk);
+ if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
if (dpms_blanked(dpms))
armada_drm_vblank_off(dcrtc);
- else
+ else if (!IS_ERR(dcrtc->clk))
+ WARN_ON(clk_prepare_enable(dcrtc->clk));
+ dcrtc->dpms = dpms;
+ armada_drm_crtc_update(dcrtc);
+ if (!dpms_blanked(dpms))
drm_crtc_vblank_on(&dcrtc->crtc);
+ else if (!IS_ERR(dcrtc->clk))
+ clk_disable_unprepare(dcrtc->clk);
+ } else if (dcrtc->dpms != dpms) {
+ dcrtc->dpms = dpms;
}
}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 1df2d33d0b40..ffb2ab389d1d 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -54,9 +54,6 @@ int drm_name_info(struct seq_file *m, void *data)
mutex_lock(&dev->master_mutex);
master = dev->master;
- if (!master)
- goto out_unlock;
-
seq_printf(m, "%s", dev->driver->name);
if (dev->dev)
seq_printf(m, " dev=%s", dev_name(dev->dev));
@@ -65,7 +62,6 @@ int drm_name_info(struct seq_file *m, void *data)
if (dev->unique)
seq_printf(m, " unique=%s", dev->unique);
seq_printf(m, "\n");
-out_unlock:
mutex_unlock(&dev->master_mutex);
return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index cb86c7e5495c..d9230132dfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
/*
* Append a LINK to the submitted command buffer to return to
* the ring buffer. return_target is the ring target address.
- * We need three dwords: event, wait, link.
+ * We need at most 7 dwords in the return target: 2 cache flush +
+ * 2 semaphore stall + 1 event + 1 wait + 1 link.
*/
- return_dwords = 3;
+ return_dwords = 7;
return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
CMD_LINK(cmdbuf, return_dwords, return_target);
/*
- * Append event, wait and link pointing back to the wait
- * command to the ring buffer.
+ * Append a cache flush, stall, event, wait and link pointing back to
+ * the wait command to the ring buffer.
*/
+ if (gpu->exec_state == ETNA_PIPE_2D) {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_PE2D);
+ } else {
+ CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
+ VIVS_GL_FLUSH_CACHE_DEPTH |
+ VIVS_GL_FLUSH_CACHE_COLOR);
+ CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
+ VIVS_TS_FLUSH_CACHE_FLUSH);
+ }
+ CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
+ CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
- CMD_LINK(buffer, 2, return_target + 8);
+ CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
+ buffer->user_size - 4);
if (drm_debug & DRM_UT_DRIVER)
pr_info("stream link to 0x%08x @ 0x%08x %p\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5ce3603e6eac..0370b842d9cc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -748,19 +748,22 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct page **pvec;
uintptr_t ptr;
+ unsigned int flags = 0;
pvec = drm_malloc_ab(npages, sizeof(struct page *));
if (!pvec)
return ERR_PTR(-ENOMEM);
+ if (!etnaviv_obj->userptr.ro)
+ flags |= FOLL_WRITE;
+
pinned = 0;
ptr = etnaviv_obj->userptr.ptr;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
- !etnaviv_obj->userptr.ro, 0,
- pvec + pinned, NULL);
+ flags, pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index d3796ed8d8c5..169ac96e8f08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
return (u32)buf->vram_node.start;
mutex_lock(&mmu->lock);
- ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size);
+ ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
+ buf->size + SZ_64K);
if (ret < 0) {
mutex_unlock(&mmu->lock);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index aa92decf4233..fbd13fabdf2d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -488,7 +488,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
goto err_free;
}
- ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+ ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ g2d_userptr->vec);
if (ret != npages) {
DRM_ERROR("failed to get user pages from userptr.\n");
if (ret < 0)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 3371635cd4d7..b2d5e188b1b8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
DCU_MODE_DCU_MODE(DCU_MODE_OFF));
regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
DCU_UPDATE_MODE_READREG);
+ clk_disable_unprepare(fsl_dev->pix_clk);
}
static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
@@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
+ clk_prepare_enable(fsl_dev->pix_clk);
regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
DCU_MODE_DCU_MODE_MASK,
DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
@@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
return;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0884c45aefe8..e04efbed1a54 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
return ret;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- goto disable_dcu_clk;
- }
-
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
@@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
enable_irq(fsl_dev->irq);
return 0;
-
-disable_dcu_clk:
- clk_disable_unprepare(fsl_dev->clk);
- return ret;
}
#endif
@@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
goto disable_clk;
}
- ret = clk_prepare_enable(fsl_dev->pix_clk);
- if (ret < 0) {
- dev_err(dev, "failed to enable pix clk\n");
- goto unregister_pix_clk;
- }
-
fsl_dev->tcon = fsl_tcon_init(dev);
drm = drm_dev_alloc(driver, dev);
if (IS_ERR(drm)) {
ret = PTR_ERR(drm);
- goto disable_pix_clk;
+ goto unregister_pix_clk;
}
fsl_dev->dev = dev;
@@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
unref:
drm_dev_unref(drm);
-disable_pix_clk:
- clk_disable_unprepare(fsl_dev->pix_clk);
unregister_pix_clk:
clk_unregister(fsl_dev->pix_clk);
disable_clk:
@@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
clk_disable_unprepare(fsl_dev->clk);
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_unregister(fsl_dev->pix_clk);
drm_put_dev(fsl_dev->drm);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index a7e5486bd1e9..9e6f7d8112b3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
}
- regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
- DCU_MODE_DCU_MODE_MASK,
- DCU_MODE_DCU_MODE(DCU_MODE_OFF));
- regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
- DCU_UPDATE_MODE_READREG);
}
struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 26edcc899712..e1dd75b18118 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -20,38 +20,6 @@
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
-static int
-fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- return 0;
-}
-
-static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_disable(fsl_dev->tcon);
-}
-
-static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- if (fsl_dev->tcon)
- fsl_tcon_bypass_enable(fsl_dev->tcon);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .atomic_check = fsl_dcu_drm_encoder_atomic_check,
- .disable = fsl_dcu_drm_encoder_disable,
- .enable = fsl_dcu_drm_encoder_enable,
-};
-
static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
int ret;
encoder->possible_crtcs = 1;
+
+ /* Use bypass mode for parallel RGB/LVDS encoder */
+ if (fsl_dev->tcon)
+ fsl_tcon_bypass_enable(fsl_dev->tcon);
+
ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
DRM_MODE_ENCODER_LVDS, NULL);
if (ret < 0)
return ret;
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e537930c64b5..c6f780f5abc9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -508,6 +508,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
+ unsigned int flags = 0;
+
+ if (!obj->userptr.read_only)
+ flags |= FOLL_WRITE;
ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -517,7 +521,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
- !obj->userptr.read_only, 0,
+ flags,
pvec + pinned, NULL);
if (ret < 0)
break;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 6a4b020dd0b4..5a26eb4545aa 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
- u32 line_time_us, vblank_lines;
+ u32 vblank_in_pixels;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
- radeon_crtc->hw_mode.clock;
- vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
- radeon_crtc->hw_mode.crtc_vdisplay +
- (radeon_crtc->v_border * 2);
- vblank_time_us = vblank_lines * line_time_us;
+ vblank_in_pixels =
+ radeon_crtc->hw_mode.crtc_htotal *
+ (radeon_crtc->hw_mode.crtc_vblank_end -
+ radeon_crtc->hw_mode.crtc_vdisplay +
+ (radeon_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 50e96d2c593d..e18839d52e3e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -927,6 +927,16 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
return ret;
}
+static void radeon_connector_unregister(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->ddc_bus->has_aux) {
+ drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
+ radeon_connector->ddc_bus->has_aux = false;
+ }
+}
+
static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -984,6 +994,7 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_lvds_set_property,
};
@@ -1111,6 +1122,7 @@ static const struct drm_connector_funcs radeon_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1188,6 +1200,7 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_tv_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.set_property = radeon_connector_set_property,
};
@@ -1519,6 +1532,7 @@ static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
.detect = radeon_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1832,6 +1846,7 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_connector_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1841,6 +1856,7 @@ static const struct drm_connector_funcs radeon_edp_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
@@ -1850,6 +1866,7 @@ static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = radeon_lvds_set_property,
+ .early_unregister = radeon_connector_unregister,
.destroy = radeon_connector_destroy,
.force = radeon_dvi_force,
};
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index b8ab30a7dd6d..cdb8cb568c15 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1675,20 +1675,20 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
- radeon_fbdev_fini(rdev);
- kfree(rdev->mode_info.bios_hardcoded_edid);
-
- /* free i2c buses */
- radeon_i2c_fini(rdev);
-
if (rdev->mode_info.mode_config_initialized) {
- radeon_afmt_fini(rdev);
drm_kms_helper_poll_fini(rdev->ddev);
radeon_hpd_fini(rdev);
drm_crtc_force_disable_all(rdev->ddev);
+ radeon_fbdev_fini(rdev);
+ radeon_afmt_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
+
+ kfree(rdev->mode_info.bios_hardcoded_edid);
+
+ /* free i2c buses */
+ radeon_i2c_fini(rdev);
}
static bool is_hdtv_mode(const struct drm_display_mode *mode)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 91c8f4339566..00ea0002b539 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -96,9 +96,10 @@
* 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
* 2.46.0 - Add PFP_SYNC_ME support on evergreen
* 2.47.0 - Add UVD_NO_OP register support
+ * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 47
+#define KMS_DRIVER_MINOR 48
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 021aa005623f..29f7817af821 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -982,9 +982,8 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
+ WARN_ON(i2c->has_aux);
i2c_del_adapter(&i2c->adapter);
- if (i2c->has_aux)
- drm_dp_aux_unregister(&i2c->aux);
kfree(i2c);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 455268214b89..3de5e6e21662 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -566,7 +566,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
struct page **pages = ttm->pages + pinned;
- r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
+ r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
+ pages, NULL);
if (r < 0)
goto release_pages;
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 7ee9aafbdf74..e402be8821c4 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4431,6 +4431,7 @@ static bool si_vm_reg_valid(u32 reg)
case SPI_CONFIG_CNTL:
case SPI_CONFIG_CNTL_1:
case TA_CNTL_AUX:
+ case TA_CS_BC_BASE_ADDR:
return true;
default:
DRM_ERROR("Invalid register 0x%x in CS\n", reg);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index eb220eecba78..65a911ddd509 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1145,6 +1145,7 @@
#define SPI_LB_CU_MASK 0x9354
#define TA_CNTL_AUX 0x9508
+#define TA_CS_BC_BASE_ADDR 0x950C
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 7e2a12c4fed2..1a3ad769f8c8 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -241,8 +241,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
down_read(&current->mm->mmap_sem);
ret = get_user_pages((unsigned long)xfer->mem_addr,
vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE),
- 0, vsg->pages, NULL);
+ (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+ vsg->pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e8ae3dc476d1..18061a4bc2f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr);
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
-module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
-module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
+module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
-module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
+module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
-module_param_named(force_coherent, vmw_force_coherent, int, 0600);
+module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
-module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 070d750af16d..1e59a486bba8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,7 +43,7 @@
#define VMWGFX_DRIVER_DATE "20160210"
#define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 10
+#define VMWGFX_DRIVER_MINOR 11
#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dc5beff2b4aa..c7b53d987f06 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,17 +35,37 @@
#define VMW_RES_HT_ORDER 12
/**
+ * enum vmw_resource_relocation_type - Relocation type for resources
+ *
+ * @vmw_res_rel_normal: Traditional relocation. The resource id in the
+ * command stream is replaced with the actual id after validation.
+ * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
+ * with a NOP.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
+ * after validation is -1, the command is replaced with a NOP. Otherwise no
+ * action.
+ */
+enum vmw_resource_relocation_type {
+ vmw_res_rel_normal,
+ vmw_res_rel_nop,
+ vmw_res_rel_cond_nop,
+ vmw_res_rel_max
+};
+
+/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of 4 byte entries into the command buffer where the
+ * @offset: Offset of single byte entries into the command buffer where the
* id that needs fixup is located.
+ * @rel_type: Type of relocation.
*/
struct vmw_resource_relocation {
struct list_head head;
const struct vmw_resource *res;
- unsigned long offset;
+ u32 offset:29;
+ enum vmw_resource_relocation_type rel_type:3;
};
/**
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_dma_buffer *vbo,
bool validate_as_mob,
uint32_t *p_val_node);
-
+/**
+ * vmw_ptr_diff - Compute the offset from a to b in bytes
+ *
+ * @a: A starting pointer.
+ * @b: A pointer offset in the same address space.
+ *
+ * Returns: The offset in bytes between the two pointers.
+ */
+static size_t vmw_ptr_diff(void *a, void *b)
+{
+ return (unsigned long) b - (unsigned long) a;
+}
/**
* vmw_resources_unreserve - unreserve resources previously reserved for
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
* @list: Pointer to head of relocation list.
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is 4 bytes.
+ * id that needs fixup is located. Granularity is one byte.
+ * @rel_type: Relocation type.
*/
static int vmw_resource_relocation_add(struct list_head *list,
const struct vmw_resource *res,
- unsigned long offset)
+ unsigned long offset,
+ enum vmw_resource_relocation_type
+ rel_type)
{
struct vmw_resource_relocation *rel;
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
rel->res = res;
rel->offset = offset;
+ rel->rel_type = rel_type;
list_add_tail(&rel->head, list);
return 0;
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
+ /* Validate the struct vmw_resource_relocation member size */
+ BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
+ BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
+
list_for_each_entry(rel, list, head) {
- if (likely(rel->res != NULL))
- cb[rel->offset] = rel->res->id;
- else
- cb[rel->offset] = SVGA_3D_CMD_NOP;
+ u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
+ switch (rel->rel_type) {
+ case vmw_res_rel_normal:
+ *addr = rel->res->id;
+ break;
+ case vmw_res_rel_nop:
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ default:
+ if (rel->res->id == -1)
+ *addr = SVGA_3D_CMD_NOP;
+ break;
+ }
}
}
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
*p_val = NULL;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start,
+ id_loc),
+ vmw_res_rel_normal);
if (unlikely(ret != 0))
return ret;
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id_loc - sw_context->buf_start);
+ vmw_ptr_diff(sw_context->buf_start, id_loc),
+ vmw_res_rel_normal);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
- NULL, &cmd->header.id -
- sw_context->buf_start);
-
- return 0;
+ NULL,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_nop);
}
/**
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
* @header: Pointer to the command header in the command stream.
*
* Check that the view exists, and if it was not created using this
- * command batch, make sure it's validated (present in the device) so that
- * the remove command will not confuse the device.
+ * command batch, conditionally make this command a NOP.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
return ret;
/*
- * Add view to the validate list iff it was not created using this
- * command batch.
+ * If the view wasn't created during this command batch, it might
+ * have been removed due to a context swapout, so add a
+ * relocation to conditionally make this command a NOP to avoid
+ * device errors.
*/
- return vmw_view_res_val_add(sw_context, view);
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ view,
+ vmw_ptr_diff(sw_context->buf_start,
+ &cmd->header.id),
+ vmw_res_rel_cond_nop);
}
/**
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
cmd->body.shaderResourceViewId);
}
+/**
+ * vmw_cmd_dx_transfer_from_buffer -
+ * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXTransferFromBuffer body;
+ } *cmd = container_of(header, typeof(*cmd), header);
+ int ret;
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.srcSid, NULL);
+ if (ret != 0)
+ return ret;
+
+ return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ user_surface_converter,
+ &cmd->body.destSid, NULL);
+}
+
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_buffer_copy_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
&vmw_cmd_pred_copy_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
+ &vmw_cmd_dx_transfer_from_buffer,
+ true, false, true),
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
int ret;
*header = NULL;
- if (!dev_priv->cman || kernel_commands)
- return kernel_commands;
-
if (command_size > SVGA_CB_MAX_SIZE) {
DRM_ERROR("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
+ if (!dev_priv->cman || kernel_commands)
+ return kernel_commands;
+
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
ttm_bo_unref(&query_val.bo);
ttm_bo_unref(&pinned_val.bo);
vmw_dmabuf_unreference(&dev_priv->pinned_bo);
- DRM_INFO("Dummy query bo pin count: %d\n",
- dev_priv->dummy_query_bo->pin_count);
-
out_unlock:
return;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6a328d507a28..52ca1c9d070e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
long lret;
- if (nonblock)
- return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
-
- lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
+ lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
+ nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c2a721a8cef9..b445ce9b9757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
return;
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
submit_size = vmw_surface_define_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
uint8_t *cmd;
struct vmw_private *dev_priv = res->dev_priv;
- BUG_ON(val_buf->bo == NULL);
-
+ BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"DMA.\n");
return -ENOMEM;
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
submit_size = vmw_surface_destroy_size();
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"eviction.\n");
return -ENOMEM;
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
int ret;
struct vmw_resource *res = &srf->res;
- BUG_ON(res_free == NULL);
+ BUG_ON(!res_free);
if (!dev_priv->has_mob)
vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_surface_create_req *req = &arg->req;
struct drm_vmw_surface_arg *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
int ret;
int i, j;
uint32_t cur_bo_offset;
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
srf->num_sizes = num_sizes;
user_srf->size = size;
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
+ srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
+ req->size_addr,
+ sizeof(*srf->sizes) * srf->num_sizes);
+ if (IS_ERR(srf->sizes)) {
+ ret = PTR_ERR(srf->sizes);
goto out_no_sizes;
}
- srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
- GFP_KERNEL);
- if (unlikely(srf->offsets == NULL)) {
+ srf->offsets = kmalloc_array(srf->num_sizes,
+ sizeof(*srf->offsets),
+ GFP_KERNEL);
+ if (unlikely(!srf->offsets)) {
ret = -ENOMEM;
goto out_no_offsets;
}
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_no_copy;
- }
-
srf->base_size = *srf->sizes;
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
srf->multisample_count = 0;
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
- if (unlikely(base == NULL)) {
+ if (unlikely(!base)) {
DRM_ERROR("Could not find surface to reference.\n");
goto out_no_lookup;
}
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
cmd = vmw_fifo_reserve(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"creation.\n");
ret = -ENOMEM;
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd1 == NULL)) {
+ if (unlikely(!cmd1)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"binding.\n");
return -ENOMEM;
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"unbinding.\n");
return -ENOMEM;
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
+ if (unlikely(!cmd)) {
DRM_ERROR("Failed reserving FIFO space for surface "
"destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- if (srf->res.backup == NULL) {
+ if (!srf->res.backup) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
}
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
- if (unlikely(user_srf == NULL)) {
+ if (unlikely(!user_srf)) {
ret = -ENOMEM;
goto out_no_user_srf;
}
diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c
index 8fd4bf77f264..818ea7d93533 100644
--- a/drivers/hid/hid-dr.c
+++ b/drivers/hid/hid-dr.c
@@ -234,58 +234,6 @@ static __u8 pid0011_rdesc_fixed[] = {
0xC0 /* End Collection */
};
-static __u8 pid0006_rdesc_fixed[] = {
- 0x05, 0x01, /* Usage Page (Generic Desktop) */
- 0x09, 0x04, /* Usage (Joystick) */
- 0xA1, 0x01, /* Collection (Application) */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x05, /* Report Count (5) */
- 0x15, 0x00, /* Logical Minimum (0) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x35, 0x00, /* Physical Minimum (0) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x09, 0x30, /* Usage (X) */
- 0x09, 0x33, /* Usage (Ry) */
- 0x09, 0x32, /* Usage (Z) */
- 0x09, 0x31, /* Usage (Y) */
- 0x09, 0x34, /* Usage (Ry) */
- 0x81, 0x02, /* Input (Variable) */
- 0x75, 0x04, /* Report Size (4) */
- 0x95, 0x01, /* Report Count (1) */
- 0x25, 0x07, /* Logical Maximum (7) */
- 0x46, 0x3B, 0x01, /* Physical Maximum (315) */
- 0x65, 0x14, /* Unit (Centimeter) */
- 0x09, 0x39, /* Usage (Hat switch) */
- 0x81, 0x42, /* Input (Variable) */
- 0x65, 0x00, /* Unit (None) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x0C, /* Report Count (12) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x05, 0x09, /* Usage Page (Button) */
- 0x19, 0x01, /* Usage Minimum (0x01) */
- 0x29, 0x0C, /* Usage Maximum (0x0C) */
- 0x81, 0x02, /* Input (Variable) */
- 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined) */
- 0x75, 0x01, /* Report Size (1) */
- 0x95, 0x08, /* Report Count (8) */
- 0x25, 0x01, /* Logical Maximum (1) */
- 0x45, 0x01, /* Physical Maximum (1) */
- 0x09, 0x01, /* Usage (0x01) */
- 0x81, 0x02, /* Input (Variable) */
- 0xC0, /* End Collection */
- 0xA1, 0x02, /* Collection (Logical) */
- 0x75, 0x08, /* Report Size (8) */
- 0x95, 0x07, /* Report Count (7) */
- 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
- 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
- 0x09, 0x02, /* Usage (0x02) */
- 0x91, 0x02, /* Output (Variable) */
- 0xC0, /* End Collection */
- 0xC0 /* End Collection */
-};
-
static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -296,16 +244,34 @@ static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(pid0011_rdesc_fixed);
}
break;
- case 0x0006:
- if (*rsize == sizeof(pid0006_rdesc_fixed)) {
- rdesc = pid0006_rdesc_fixed;
- *rsize = sizeof(pid0006_rdesc_fixed);
- }
- break;
}
return rdesc;
}
+#define map_abs(c) hid_map_usage(hi, usage, bit, max, EV_ABS, (c))
+#define map_rel(c) hid_map_usage(hi, usage, bit, max, EV_REL, (c))
+
+static int dr_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid) {
+ /*
+ * revert to the old hid-input behavior where axes
+ * can be randomly assigned when hid->usage is reused.
+ */
+ case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+ case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
+ if (field->flags & HID_MAIN_ITEM_RELATIVE)
+ map_rel(usage->hid & 0xf);
+ else
+ map_abs(usage->hid & 0xf);
+ return 1;
+ }
+
+ return 0;
+}
+
static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
@@ -352,6 +318,7 @@ static struct hid_driver dr_driver = {
.id_table = dr_devices,
.report_fixup = dr_report_fixup,
.probe = dr_probe,
+ .input_mapping = dr_input_mapping,
};
module_hid_driver(dr_driver);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index cd59c79eebdd..6cfb5cacc253 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -64,6 +64,9 @@
#define USB_VENDOR_ID_AKAI 0x2011
#define USB_DEVICE_ID_AKAI_MPKMINI2 0x0715
+#define USB_VENDOR_ID_AKAI_09E8 0x09E8
+#define USB_DEVICE_ID_AKAI_09E8_MIDIMIX 0x0031
+
#define USB_VENDOR_ID_ALCOR 0x058f
#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
diff --git a/drivers/hid/hid-led.c b/drivers/hid/hid-led.c
index d8d55f37b4f5..d3e1ab162f7c 100644
--- a/drivers/hid/hid-led.c
+++ b/drivers/hid/hid-led.c
@@ -100,6 +100,7 @@ struct hidled_device {
const struct hidled_config *config;
struct hid_device *hdev;
struct hidled_rgb *rgb;
+ u8 *buf;
struct mutex lock;
};
@@ -118,13 +119,19 @@ static int hidled_send(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
+ /*
+ * buffer provided to hid_hw_raw_request must not be on the stack
+ * and must not be part of a data structure
+ */
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
if (ldev->config->report_type == RAW_REQUEST)
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
else if (ldev->config->report_type == OUTPUT_REPORT)
- ret = hid_hw_output_report(ldev->hdev, buf,
+ ret = hid_hw_output_report(ldev->hdev, ldev->buf,
ldev->config->report_size);
else
ret = -EINVAL;
@@ -147,17 +154,21 @@ static int hidled_recv(struct hidled_device *ldev, __u8 *buf)
mutex_lock(&ldev->lock);
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ memcpy(ldev->buf, buf, ldev->config->report_size);
+
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
if (ret < 0)
goto err;
- ret = hid_hw_raw_request(ldev->hdev, buf[0], buf,
+ ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf,
ldev->config->report_size,
HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
+
+ memcpy(buf, ldev->buf, ldev->config->report_size);
err:
mutex_unlock(&ldev->lock);
@@ -447,6 +458,10 @@ static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!ldev)
return -ENOMEM;
+ ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL);
+ if (!ldev->buf)
+ return -ENOMEM;
+
ret = hid_parse(hdev);
if (ret)
return ret;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0a0eca5da47d..354d49ea36dd 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -56,6 +56,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 98114cef1e43..2fe1828bd10b 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -194,10 +194,10 @@ static struct adm9240_data *adm9240_update_device(struct device *dev)
* 0.5'C per two measurement cycles thus ignore possible
* but unlikely aliasing error on lsb reading. --Grant
*/
- data->temp = ((i2c_smbus_read_byte_data(client,
+ data->temp = (i2c_smbus_read_byte_data(client,
ADM9240_REG_TEMP) << 8) |
i2c_smbus_read_byte_data(client,
- ADM9240_REG_TEMP_CONF)) / 128;
+ ADM9240_REG_TEMP_CONF);
for (i = 0; i < 2; i++) { /* read fans */
data->fan[i] = i2c_smbus_read_byte_data(client,
@@ -263,7 +263,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct adm9240_data *data = adm9240_update_device(dev);
- return sprintf(buf, "%d\n", data->temp * 500); /* 9-bit value */
+ return sprintf(buf, "%d\n", data->temp / 128 * 500); /* 9-bit value */
}
static ssize_t show_max(struct device *dev, struct device_attribute *devattr,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index bef84e085973..c1b9275978f9 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -268,11 +268,13 @@ static int max31790_read_pwm(struct device *dev, u32 attr, int channel,
long *val)
{
struct max31790_data *data = max31790_update_device(dev);
- u8 fan_config = data->fan_config[channel];
+ u8 fan_config;
if (IS_ERR(data))
return PTR_ERR(data);
+ fan_config = data->fan_config[channel];
+
switch (attr) {
case hwmon_pwm_input:
*val = data->pwm[channel] >> 8;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c68746ce6624..224ad274ea0b 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -94,6 +94,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
+ unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
@@ -183,6 +184,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (ret)
goto out;
+ if (!umem->writable)
+ gup_flags |= FOLL_FORCE;
+
need_release = 1;
sg_list_start = umem->sg_head.sgl;
@@ -190,7 +194,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
+ gup_flags, page_list, vma_list);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 75077a018675..1f0fe3217f23 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
u64 off;
int j, k, ret = 0, start_idx, npages = 0;
u64 base_virt_addr;
+ unsigned int flags = 0;
if (access_mask == 0)
return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
goto out_put_task;
}
+ if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ flags |= FOLL_WRITE;
+
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
k = start_idx;
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/
npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT,
- 0, local_page_list, NULL);
+ flags, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 6c00d04b8b28..c6fe89d79248 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL);
+ ret = get_user_pages(uaddr & PAGE_MASK, 1, FOLL_WRITE, pages, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2d2b94fd3633..75f08624ac05 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -67,7 +67,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got,
+ FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index a0b6ebee4d8a..1ccee6ea5bc3 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -111,6 +111,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int i;
int flags;
dma_addr_t pa;
+ unsigned int gup_flags;
if (!can_do_mlock())
return -EPERM;
@@ -135,6 +136,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
flags = IOMMU_READ | IOMMU_CACHE;
flags |= (writable) ? IOMMU_WRITE : 0;
+ gup_flags = FOLL_WRITE;
+ gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;
ret = 0;
@@ -142,7 +145,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = get_user_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
- 1, !writable, page_list, NULL);
+ gup_flags, page_list, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 82b0b5daf3f5..bc0af3307bbf 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -158,8 +158,8 @@ config PIC32_EVIC
select IRQ_DOMAIN
config JCORE_AIC
- bool "J-Core integrated AIC"
- depends on OF && (SUPERH || COMPILE_TEST)
+ bool "J-Core integrated AIC" if COMPILE_TEST
+ depends on OF
select IRQ_DOMAIN
help
Support for the J-Core integrated AIC.
diff --git a/drivers/irqchip/irq-eznps.c b/drivers/irqchip/irq-eznps.c
index efbf0e4304b7..2a7a38830a8d 100644
--- a/drivers/irqchip/irq-eznps.c
+++ b/drivers/irqchip/irq-eznps.c
@@ -85,7 +85,7 @@ static void nps400_irq_eoi_global(struct irq_data *irqd)
nps_ack_gic();
}
-static void nps400_irq_eoi(struct irq_data *irqd)
+static void nps400_irq_ack(struct irq_data *irqd)
{
unsigned int __maybe_unused irq = irqd_to_hwirq(irqd);
@@ -103,7 +103,7 @@ static struct irq_chip nps400_irq_chip_percpu = {
.name = "NPS400 IC",
.irq_mask = nps400_irq_mask,
.irq_unmask = nps400_irq_unmask,
- .irq_eoi = nps400_irq_eoi,
+ .irq_ack = nps400_irq_ack,
};
static int nps400_irq_map(struct irq_domain *d, unsigned int virq,
@@ -135,7 +135,7 @@ static const struct irq_domain_ops nps400_irq_ops = {
static int __init nps400_of_init(struct device_node *node,
struct device_node *parent)
{
- static struct irq_domain *nps400_root_domain;
+ struct irq_domain *nps400_root_domain;
if (parent) {
pr_err("DeviceTree incore ic not a root irq controller\n");
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 003495d91f9c..c5dee300e8a3 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1023,7 +1023,7 @@ static void its_free_tables(struct its_node *its)
static int its_alloc_tables(struct its_node *its)
{
- u64 typer = readq_relaxed(its->base + GITS_TYPER);
+ u64 typer = gic_read_typer(its->base + GITS_TYPER);
u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_WaWb;
@@ -1198,7 +1198,7 @@ static void its_cpu_init_collection(void)
* We now have to bind each collection to its target
* redistributor.
*/
- if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
+ if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
/*
* This ITS wants the physical address of the
* redistributor.
@@ -1208,7 +1208,7 @@ static void its_cpu_init_collection(void)
/*
* This ITS wants a linear CPU number.
*/
- target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
+ target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
@@ -1691,7 +1691,7 @@ static int __init its_probe_one(struct resource *res,
INIT_LIST_HEAD(&its->its_device_list);
its->base = its_base;
its->phys_base = res->start;
- its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
+ its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
its->numa_node = numa_node;
its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
@@ -1763,7 +1763,7 @@ out_unmap:
static bool gic_rdists_supports_plpis(void)
{
- return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
+ return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
}
int its_cpu_init(void)
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9b81bd8b929c..19d642eae096 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -153,7 +153,7 @@ static void gic_enable_redist(bool enable)
return; /* No PM support in this redistributor */
}
- while (count--) {
+ while (--count) {
val = readl_relaxed(rbase + GICR_WAKER);
if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
break;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 58e5b4e87056..d6c404b3584d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1279,7 +1279,7 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
*/
*base += 0xf000;
cpuif_res.start += 0xf000;
- pr_warn("GIC: Adjusting CPU interface base to %pa",
+ pr_warn("GIC: Adjusting CPU interface base to %pa\n",
&cpuif_res.start);
}
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index 84b01dec277d..033bccb41455 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -25,12 +25,30 @@
static struct irq_chip jcore_aic;
+/*
+ * The J-Core AIC1 and AIC2 are cpu-local interrupt controllers and do
+ * not distinguish or use distinct irq number ranges for per-cpu event
+ * interrupts (timer, IPI). Since information to determine whether a
+ * particular irq number should be treated as per-cpu is not available
+ * at mapping time, we use a wrapper handler function which chooses
+ * the right handler at runtime based on whether IRQF_PERCPU was used
+ * when requesting the irq.
+ */
+
+static void handle_jcore_irq(struct irq_desc *desc)
+{
+ if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
+ handle_percpu_irq(desc);
+ else
+ handle_simple_irq(desc);
+}
+
static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
struct irq_chip *aic = d->host_data;
- irq_set_chip_and_handler(irq, aic, handle_simple_irq);
+ irq_set_chip_and_handler(irq, aic, handle_jcore_irq);
return 0;
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 11eebfe8a4cb..ceff415f201c 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -124,6 +124,15 @@ config MAILBOX_TEST
Test client to help with testing new Controller driver
implementations.
+config TEGRA_HSP_MBOX
+ bool "Tegra HSP (Hardware Synchronization Primitives) Driver"
+ depends on ARCH_TEGRA_186_SOC
+ help
+ The Tegra HSP driver is used for the interprocessor communication
+ between different remote processors and host processors on Tegra186
+ and later SoCs. Say Y here if you want to have this support.
+ If unsure say N.
+
config XGENE_SLIMPRO_MBOX
tristate "APM SoC X-Gene SLIMpro Mailbox Controller"
depends on ARCH_XGENE
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index ace6fed8fea9..7dde4f609ae8 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -29,3 +29,5 @@ obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o
obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
+
+obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
new file mode 100644
index 000000000000..0cde356c11ab
--- /dev/null
+++ b/drivers/mailbox/tegra-hsp.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/mailbox/tegra186-hsp.h>
+
+#define HSP_INT_DIMENSIONING 0x380
+#define HSP_nSM_SHIFT 0
+#define HSP_nSS_SHIFT 4
+#define HSP_nAS_SHIFT 8
+#define HSP_nDB_SHIFT 12
+#define HSP_nSI_SHIFT 16
+#define HSP_nINT_MASK 0xf
+
+#define HSP_DB_TRIGGER 0x0
+#define HSP_DB_ENABLE 0x4
+#define HSP_DB_RAW 0x8
+#define HSP_DB_PENDING 0xc
+
+#define HSP_DB_CCPLEX 1
+#define HSP_DB_BPMP 3
+#define HSP_DB_MAX 7
+
+struct tegra_hsp_channel;
+struct tegra_hsp;
+
+struct tegra_hsp_channel {
+ struct tegra_hsp *hsp;
+ struct mbox_chan *chan;
+ void __iomem *regs;
+};
+
+struct tegra_hsp_doorbell {
+ struct tegra_hsp_channel channel;
+ struct list_head list;
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_db_map {
+ const char *name;
+ unsigned int master;
+ unsigned int index;
+};
+
+struct tegra_hsp_soc {
+ const struct tegra_hsp_db_map *map;
+};
+
+struct tegra_hsp {
+ const struct tegra_hsp_soc *soc;
+ struct mbox_controller mbox;
+ void __iomem *regs;
+ unsigned int irq;
+ unsigned int num_sm;
+ unsigned int num_as;
+ unsigned int num_ss;
+ unsigned int num_db;
+ unsigned int num_si;
+ spinlock_t lock;
+
+ struct list_head doorbells;
+};
+
+static inline struct tegra_hsp *
+to_tegra_hsp(struct mbox_controller *mbox)
+{
+ return container_of(mbox, struct tegra_hsp, mbox);
+}
+
+static inline u32 tegra_hsp_readl(struct tegra_hsp *hsp, unsigned int offset)
+{
+ return readl(hsp->regs + offset);
+}
+
+static inline void tegra_hsp_writel(struct tegra_hsp *hsp, u32 value,
+ unsigned int offset)
+{
+ writel(value, hsp->regs + offset);
+}
+
+static inline u32 tegra_hsp_channel_readl(struct tegra_hsp_channel *channel,
+ unsigned int offset)
+{
+ return readl(channel->regs + offset);
+}
+
+static inline void tegra_hsp_channel_writel(struct tegra_hsp_channel *channel,
+ u32 value, unsigned int offset)
+{
+ writel(value, channel->regs + offset);
+}
+
+static bool tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell *db)
+{
+ u32 value;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_ENABLE);
+
+ return (value & BIT(TEGRA_HSP_DB_MASTER_CCPLEX)) != 0;
+}
+
+static struct tegra_hsp_doorbell *
+__tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *entry;
+
+ list_for_each_entry(entry, &hsp->doorbells, list)
+ if (entry->master == master)
+ return entry;
+
+ return NULL;
+}
+
+static struct tegra_hsp_doorbell *
+tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return db;
+}
+
+static irqreturn_t tegra_hsp_doorbell_irq(int irq, void *data)
+{
+ struct tegra_hsp *hsp = data;
+ struct tegra_hsp_doorbell *db;
+ unsigned long master, value;
+
+ db = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!db)
+ return IRQ_NONE;
+
+ value = tegra_hsp_channel_readl(&db->channel, HSP_DB_PENDING);
+ tegra_hsp_channel_writel(&db->channel, value, HSP_DB_PENDING);
+
+ spin_lock(&hsp->lock);
+
+ for_each_set_bit(master, &value, hsp->mbox.num_chans) {
+ struct tegra_hsp_doorbell *db;
+
+ db = __tegra_hsp_doorbell_get(hsp, master);
+ /*
+ * Depending on the bootloader chain, the CCPLEX doorbell will
+ * have some doorbells enabled, which means that requesting an
+ * interrupt will immediately fire.
+ *
+ * In that case, db->channel.chan will still be NULL here and
+ * cause a crash if not properly guarded.
+ *
+ * It remains to be seen if ignoring the doorbell in that case
+ * is the correct solution.
+ */
+ if (db && db->channel.chan)
+ mbox_chan_received_data(db->channel.chan, NULL);
+ }
+
+ spin_unlock(&hsp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct tegra_hsp_channel *
+tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
+ unsigned int master, unsigned int index)
+{
+ struct tegra_hsp_doorbell *db;
+ unsigned int offset;
+ unsigned long flags;
+
+ db = kzalloc(sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return ERR_PTR(-ENOMEM);
+
+ offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) << 16;
+ offset += index * 0x100;
+
+ db->channel.regs = hsp->regs + offset;
+ db->channel.hsp = hsp;
+
+ db->name = kstrdup_const(name, GFP_KERNEL);
+ db->master = master;
+ db->index = index;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+ list_add_tail(&db->list, &hsp->doorbells);
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return &db->channel;
+}
+
+static void __tegra_hsp_doorbell_destroy(struct tegra_hsp_doorbell *db)
+{
+ list_del(&db->list);
+ kfree_const(db->name);
+ kfree(db);
+}
+
+static int tegra_hsp_doorbell_send_data(struct mbox_chan *chan, void *data)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+
+ tegra_hsp_channel_writel(&db->channel, 1, HSP_DB_TRIGGER);
+
+ return 0;
+}
+
+static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ if (db->master >= hsp->mbox.num_chans) {
+ dev_err(hsp->mbox.dev,
+ "invalid master ID %u for HSP channel\n",
+ db->master);
+ return -EINVAL;
+ }
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return -ENODEV;
+
+ if (!tegra_hsp_doorbell_can_ring(db))
+ return -ENODEV;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value |= BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return 0;
+}
+
+static void tegra_hsp_doorbell_shutdown(struct mbox_chan *chan)
+{
+ struct tegra_hsp_doorbell *db = chan->con_priv;
+ struct tegra_hsp *hsp = db->channel.hsp;
+ struct tegra_hsp_doorbell *ccplex;
+ unsigned long flags;
+ u32 value;
+
+ ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
+ if (!ccplex)
+ return;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
+ value &= ~BIT(db->master);
+ tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static const struct mbox_chan_ops tegra_hsp_doorbell_ops = {
+ .send_data = tegra_hsp_doorbell_send_data,
+ .startup = tegra_hsp_doorbell_startup,
+ .shutdown = tegra_hsp_doorbell_shutdown,
+};
+
+static struct mbox_chan *of_tegra_hsp_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ struct tegra_hsp_channel *channel = ERR_PTR(-ENODEV);
+ struct tegra_hsp *hsp = to_tegra_hsp(mbox);
+ unsigned int type = args->args[0];
+ unsigned int master = args->args[1];
+ struct tegra_hsp_doorbell *db;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ unsigned int i;
+
+ switch (type) {
+ case TEGRA_HSP_MBOX_TYPE_DB:
+ db = tegra_hsp_doorbell_get(hsp, master);
+ if (db)
+ channel = &db->channel;
+
+ break;
+
+ default:
+ break;
+ }
+
+ if (IS_ERR(channel))
+ return ERR_CAST(channel);
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ for (i = 0; i < hsp->mbox.num_chans; i++) {
+ chan = &hsp->mbox.chans[i];
+ if (!chan->con_priv) {
+ chan->con_priv = channel;
+ channel->chan = chan;
+ break;
+ }
+
+ chan = NULL;
+ }
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+
+ return chan ?: ERR_PTR(-EBUSY);
+}
+
+static void tegra_hsp_remove_doorbells(struct tegra_hsp *hsp)
+{
+ struct tegra_hsp_doorbell *db, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hsp->lock, flags);
+
+ list_for_each_entry_safe(db, tmp, &hsp->doorbells, list)
+ __tegra_hsp_doorbell_destroy(db);
+
+ spin_unlock_irqrestore(&hsp->lock, flags);
+}
+
+static int tegra_hsp_add_doorbells(struct tegra_hsp *hsp)
+{
+ const struct tegra_hsp_db_map *map = hsp->soc->map;
+ struct tegra_hsp_channel *channel;
+
+ while (map->name) {
+ channel = tegra_hsp_doorbell_create(hsp, map->name,
+ map->master, map->index);
+ if (IS_ERR(channel)) {
+ tegra_hsp_remove_doorbells(hsp);
+ return PTR_ERR(channel);
+ }
+
+ map++;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_probe(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp;
+ struct resource *res;
+ u32 value;
+ int err;
+
+ hsp = devm_kzalloc(&pdev->dev, sizeof(*hsp), GFP_KERNEL);
+ if (!hsp)
+ return -ENOMEM;
+
+ hsp->soc = of_device_get_match_data(&pdev->dev);
+ INIT_LIST_HEAD(&hsp->doorbells);
+ spin_lock_init(&hsp->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hsp->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hsp->regs))
+ return PTR_ERR(hsp->regs);
+
+ value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
+ hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
+ hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
+ hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
+ hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+
+ err = platform_get_irq_byname(pdev, "doorbell");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get doorbell IRQ: %d\n", err);
+ return err;
+ }
+
+ hsp->irq = err;
+
+ hsp->mbox.of_xlate = of_tegra_hsp_xlate;
+ hsp->mbox.num_chans = 32;
+ hsp->mbox.dev = &pdev->dev;
+ hsp->mbox.txdone_irq = false;
+ hsp->mbox.txdone_poll = false;
+ hsp->mbox.ops = &tegra_hsp_doorbell_ops;
+
+ hsp->mbox.chans = devm_kcalloc(&pdev->dev, hsp->mbox.num_chans,
+ sizeof(*hsp->mbox.chans),
+ GFP_KERNEL);
+ if (!hsp->mbox.chans)
+ return -ENOMEM;
+
+ err = tegra_hsp_add_doorbells(hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to add doorbells: %d\n", err);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, hsp);
+
+ err = mbox_controller_register(&hsp->mbox);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register mailbox: %d\n", err);
+ tegra_hsp_remove_doorbells(hsp);
+ return err;
+ }
+
+ err = devm_request_irq(&pdev->dev, hsp->irq, tegra_hsp_doorbell_irq,
+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), hsp);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
+ hsp->irq, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int tegra_hsp_remove(struct platform_device *pdev)
+{
+ struct tegra_hsp *hsp = platform_get_drvdata(pdev);
+
+ mbox_controller_unregister(&hsp->mbox);
+ tegra_hsp_remove_doorbells(hsp);
+
+ return 0;
+}
+
+static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = {
+ { "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, },
+ { "bpmp", TEGRA_HSP_DB_MASTER_BPMP, HSP_DB_BPMP, },
+ { /* sentinel */ }
+};
+
+static const struct tegra_hsp_soc tegra186_hsp_soc = {
+ .map = tegra186_hsp_db_map,
+};
+
+static const struct of_device_id tegra_hsp_match[] = {
+ { .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
+ { }
+};
+
+static struct platform_driver tegra_hsp_driver = {
+ .driver = {
+ .name = "tegra-hsp",
+ .of_match_table = tegra_hsp_match,
+ },
+ .probe = tegra_hsp_probe,
+ .remove = tegra_hsp_remove,
+};
+
+static int __init tegra_hsp_init(void)
+{
+ return platform_driver_register(&tegra_hsp_driver);
+}
+core_initcall(tegra_hsp_init);
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 4769469fe842..2c9232ef7baa 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -124,8 +124,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Get user pages for DMA Xfer */
- err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count, 0,
- 1, dma->map);
+ err = get_user_pages_unlocked(user_dma.uaddr, user_dma.page_count,
+ dma->map, FOLL_FORCE);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index b094054cda6e..f7299d3d8244 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -76,11 +76,12 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
/* Get user pages for DMA Xfer */
y_pages = get_user_pages_unlocked(y_dma.uaddr,
- y_dma.page_count, 0, 1, &dma->map[0]);
+ y_dma.page_count, &dma->map[0], FOLL_FORCE);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
uv_pages = get_user_pages_unlocked(uv_dma.uaddr,
- uv_dma.page_count, 0, 1, &dma->map[y_pages]);
+ uv_dma.page_count, &dma->map[y_pages],
+ FOLL_FORCE);
}
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index e668dde6d857..a31b95cb3b09 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
if (!vec)
return -ENOMEM;
- ret = get_vaddr_frames(virtp, 1, true, false, vec);
+ ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
if (ret != 1) {
frame_vector_destroy(vec);
return -EINVAL;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f300f060b3f3..1db0af6c7f94 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
{
unsigned long first, last;
int err, rw = 0;
+ unsigned int flags = FOLL_FORCE;
dma->direction = direction;
switch (dma->direction) {
@@ -178,12 +179,14 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
if (NULL == dma->pages)
return -ENOMEM;
+ if (rw == READ)
+ flags |= FOLL_WRITE;
+
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
- rw == READ, 1, /* force */
- dma->pages, NULL);
+ flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 3c3b517f1d1c..1cd322e939c7 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
unsigned long first, last;
unsigned long nr;
struct frame_vector *vec;
+ unsigned int flags = FOLL_FORCE;
+
+ if (write)
+ flags |= FOLL_WRITE;
first = start >> PAGE_SHIFT;
last = (start + length - 1) >> PAGE_SHIFT;
@@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 4b4c0c3c3d2f..ec80e35c8dfe 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -134,6 +134,14 @@ config MTK_SMI
mainly help enable/disable iommu and control the power domain and
clocks for each local arbiter.
+config DA8XX_DDRCTL
+ bool "Texas Instruments da8xx DDR2/mDDR driver"
+ depends on ARCH_DAVINCI_DA8XX
+ help
+ This driver is for the DDR2/mDDR Memory Controller present on
+ Texas Instruments da8xx SoCs. It's used to tweak various memory
+ controller configuration options.
+
source "drivers/memory/samsung/Kconfig"
source "drivers/memory/tegra/Kconfig"
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index b20ae38b5bfb..e88097fbc085 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
obj-$(CONFIG_JZ4780_NEMC) += jz4780-nemc.o
obj-$(CONFIG_MTK_SMI) += mtk-smi.o
+obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o
obj-$(CONFIG_SAMSUNG_MC) += samsung/
obj-$(CONFIG_TEGRA_MC) += tegra/
diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c
index b5ed3bd082b5..047d6fcdcec2 100644
--- a/drivers/memory/atmel-ebi.c
+++ b/drivers/memory/atmel-ebi.c
@@ -657,7 +657,7 @@ static int at91_ebi_dev_disable(struct at91_ebi *ebi, struct device_node *np)
return -ENOMEM;
newprop->value = devm_kstrdup(dev, "disabled", GFP_KERNEL);
- if (!newprop->name)
+ if (!newprop->value)
return -ENOMEM;
newprop->length = sizeof("disabled");
diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c
index 12080b05e3e6..b418b39af180 100644
--- a/drivers/memory/atmel-sdramc.c
+++ b/drivers/memory/atmel-sdramc.c
@@ -85,8 +85,4 @@ static struct platform_driver atmel_ramc_driver = {
},
};
-static int __init atmel_ramc_init(void)
-{
- return platform_driver_register(&atmel_ramc_driver);
-}
-device_initcall(atmel_ramc_init);
+builtin_platform_driver(atmel_ramc_driver);
diff --git a/drivers/memory/da8xx-ddrctl.c b/drivers/memory/da8xx-ddrctl.c
new file mode 100644
index 000000000000..a20e7bbbcbe0
--- /dev/null
+++ b/drivers/memory/da8xx-ddrctl.c
@@ -0,0 +1,175 @@
+/*
+ * TI da8xx DDR2/mDDR controller driver
+ *
+ * Copyright (C) 2016 BayLibre SAS
+ *
+ * Author:
+ * Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+/*
+ * REVISIT: Linux doesn't have a good framework for the kind of performance
+ * knobs this driver controls. We can't use device tree properties as it deals
+ * with hardware configuration rather than description. We also don't want to
+ * commit to maintaining some random sysfs attributes.
+ *
+ * For now we just hardcode the register values for the boards that need
+ * some changes (as is the case for the LCD controller on da850-lcdk - the
+ * first board we support here). When linux gets an appropriate framework,
+ * we'll easily convert the driver to it.
+ */
+
+struct da8xx_ddrctl_config_knob {
+ const char *name;
+ u32 reg;
+ u32 mask;
+ u32 shift;
+};
+
+static const struct da8xx_ddrctl_config_knob da8xx_ddrctl_knobs[] = {
+ {
+ .name = "da850-pbbpr",
+ .reg = 0x20,
+ .mask = 0xffffff00,
+ .shift = 0,
+ },
+};
+
+struct da8xx_ddrctl_setting {
+ const char *name;
+ u32 val;
+};
+
+struct da8xx_ddrctl_board_settings {
+ const char *board;
+ const struct da8xx_ddrctl_setting *settings;
+};
+
+static const struct da8xx_ddrctl_setting da850_lcdk_ddrctl_settings[] = {
+ {
+ .name = "da850-pbbpr",
+ .val = 0x20,
+ },
+ { }
+};
+
+static const struct da8xx_ddrctl_board_settings da8xx_ddrctl_board_confs[] = {
+ {
+ .board = "ti,da850-lcdk",
+ .settings = da850_lcdk_ddrctl_settings,
+ },
+};
+
+static const struct da8xx_ddrctl_config_knob *
+da8xx_ddrctl_match_knob(const struct da8xx_ddrctl_setting *setting)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_knobs); i++) {
+ knob = &da8xx_ddrctl_knobs[i];
+
+ if (strcmp(knob->name, setting->name) == 0)
+ return knob;
+ }
+
+ return NULL;
+}
+
+static const struct da8xx_ddrctl_setting *da8xx_ddrctl_get_board_settings(void)
+{
+ const struct da8xx_ddrctl_board_settings *board_settings;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(da8xx_ddrctl_board_confs); i++) {
+ board_settings = &da8xx_ddrctl_board_confs[i];
+
+ if (of_machine_is_compatible(board_settings->board))
+ return board_settings->settings;
+ }
+
+ return NULL;
+}
+
+static int da8xx_ddrctl_probe(struct platform_device *pdev)
+{
+ const struct da8xx_ddrctl_config_knob *knob;
+ const struct da8xx_ddrctl_setting *setting;
+ struct device_node *node;
+ struct resource *res;
+ void __iomem *ddrctl;
+ struct device *dev;
+ u32 reg;
+
+ dev = &pdev->dev;
+ node = dev->of_node;
+
+ setting = da8xx_ddrctl_get_board_settings();
+ if (!setting) {
+ dev_err(dev, "no settings for board '%s'\n",
+ of_flat_dt_get_machine_name());
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ddrctl = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ddrctl)) {
+ dev_err(dev, "unable to map memory controller registers\n");
+ return PTR_ERR(ddrctl);
+ }
+
+ for (; setting->name; setting++) {
+ knob = da8xx_ddrctl_match_knob(setting);
+ if (!knob) {
+ dev_warn(dev,
+ "no such config option: %s\n", setting->name);
+ continue;
+ }
+
+ if (knob->reg + sizeof(u32) > resource_size(res)) {
+ dev_warn(dev,
+ "register offset of '%s' exceeds mapped memory size\n",
+ knob->name);
+ continue;
+ }
+
+ reg = readl(ddrctl + knob->reg);
+ reg &= knob->mask;
+ reg |= setting->val << knob->shift;
+
+ dev_dbg(dev, "writing 0x%08x to %s\n", reg, setting->name);
+
+ writel(reg, ddrctl + knob->reg);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id da8xx_ddrctl_of_match[] = {
+ { .compatible = "ti,da850-ddr-controller", },
+ { },
+};
+
+static struct platform_driver da8xx_ddrctl_driver = {
+ .probe = da8xx_ddrctl_probe,
+ .driver = {
+ .name = "da850-ddr-controller",
+ .of_match_table = da8xx_ddrctl_of_match,
+ },
+};
+module_platform_driver(da8xx_ddrctl_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("TI da8xx DDR2/mDDR controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
index d34bc3530385..2e3cf012ef48 100644
--- a/drivers/memstick/host/rtsx_usb_ms.c
+++ b/drivers/memstick/host/rtsx_usb_ms.c
@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
int rc;
if (!host->req) {
+ pm_runtime_get_sync(ms_dev(host));
do {
rc = memstick_next_req(msh, &host->req);
dev_dbg(ms_dev(host), "next req %d\n", rc);
@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
host->req->error);
}
} while (!rc);
+ pm_runtime_put(ms_dev(host));
}
}
@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
__func__, param, value);
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
}
out:
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(ms_dev(host));
/* power-on delay */
if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
int err;
for (;;) {
+ pm_runtime_get_sync(ms_dev(host));
mutex_lock(&ucr->dev_mutex);
/* Check pending MS card changes */
@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
}
poll_again:
+ pm_runtime_put(ms_dev(host));
if (host->eject)
break;
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index f3d34b941f85..af23d7dfe752 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -229,6 +229,14 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if (ctx->status == STARTED)
goto out; /* already started */
+ /*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc)
+ goto out;
+
if (task) {
ctx->pid = get_task_pid(task, PIDTYPE_PID);
ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
@@ -240,6 +248,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
put_pid(ctx->pid);
+ cxl_adapter_context_put(ctx->afu->adapter);
cxl_ctx_put();
goto out;
}
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index c466ee2b0c97..5e506c19108a 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -238,6 +238,9 @@ int __detach_context(struct cxl_context *ctx)
put_pid(ctx->glpid);
cxl_ctx_put();
+
+ /* Decrease the attached context count on the adapter */
+ cxl_adapter_context_put(ctx->afu->adapter);
return 0;
}
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 01d372aba131..a144073593fa 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -618,6 +618,14 @@ struct cxl {
bool perst_select_user;
bool perst_same_image;
bool psl_timebase_synced;
+
+ /*
+ * number of contexts mapped on to this card. Possible values are:
+ * >0: Number of contexts mapped and new one can be mapped.
+ * 0: No active contexts and new ones can be mapped.
+ * -1: No contexts mapped and new ones cannot be mapped.
+ */
+ atomic_t contexts_num;
};
int cxl_pci_alloc_one_irq(struct cxl *adapter);
@@ -944,4 +952,20 @@ bool cxl_pci_is_vphb_device(struct pci_dev *dev);
/* decode AFU error bits in the PSL register PSL_SERR_An */
void cxl_afu_decode_psl_serr(struct cxl_afu *afu, u64 serr);
+
+/*
+ * Increments the number of attached contexts on an adapter.
+ * In case an adapter_context_lock is taken the return -EBUSY.
+ */
+int cxl_adapter_context_get(struct cxl *adapter);
+
+/* Decrements the number of attached contexts on an adapter */
+void cxl_adapter_context_put(struct cxl *adapter);
+
+/* If no active contexts then prevents contexts from being attached */
+int cxl_adapter_context_lock(struct cxl *adapter);
+
+/* Unlock the contexts-lock if taken. Warn and force unlock otherwise */
+void cxl_adapter_context_unlock(struct cxl *adapter);
+
#endif
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 5fb9894b157f..d0b421f49b39 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -205,11 +205,22 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
ctx->pid = get_task_pid(current, PIDTYPE_PID);
ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ /*
+ * Increment the mapped context count for adapter. This also checks
+ * if adapter_context_lock is taken.
+ */
+ rc = cxl_adapter_context_get(ctx->afu->adapter);
+ if (rc) {
+ afu_release_irqs(ctx, ctx);
+ goto out;
+ }
+
trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
amr))) {
afu_release_irqs(ctx, ctx);
+ cxl_adapter_context_put(ctx->afu->adapter);
goto out;
}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 9aa58a77a24d..3e102cd6ed91 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1152,6 +1152,9 @@ struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_devic
if ((rc = cxl_sysfs_adapter_add(adapter)))
goto err_put1;
+ /* release the context lock as the adapter is configured */
+ cxl_adapter_context_unlock(adapter);
+
return adapter;
err_put1:
diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
index d9be23b24aa3..62e0dfb5f15b 100644
--- a/drivers/misc/cxl/main.c
+++ b/drivers/misc/cxl/main.c
@@ -243,8 +243,10 @@ struct cxl *cxl_alloc_adapter(void)
if (dev_set_name(&adapter->dev, "card%i", adapter->adapter_num))
goto err2;
- return adapter;
+ /* start with context lock taken */
+ atomic_set(&adapter->contexts_num, -1);
+ return adapter;
err2:
cxl_remove_adapter_nr(adapter);
err1:
@@ -286,6 +288,44 @@ int cxl_afu_select_best_mode(struct cxl_afu *afu)
return 0;
}
+int cxl_adapter_context_get(struct cxl *adapter)
+{
+ int rc;
+
+ rc = atomic_inc_unless_negative(&adapter->contexts_num);
+ return rc >= 0 ? 0 : -EBUSY;
+}
+
+void cxl_adapter_context_put(struct cxl *adapter)
+{
+ atomic_dec_if_positive(&adapter->contexts_num);
+}
+
+int cxl_adapter_context_lock(struct cxl *adapter)
+{
+ int rc;
+ /* no active contexts -> contexts_num == 0 */
+ rc = atomic_cmpxchg(&adapter->contexts_num, 0, -1);
+ return rc ? -EBUSY : 0;
+}
+
+void cxl_adapter_context_unlock(struct cxl *adapter)
+{
+ int val = atomic_cmpxchg(&adapter->contexts_num, -1, 0);
+
+ /*
+ * contexts lock taken -> contexts_num == -1
+ * If not true then show a warning and force reset the lock.
+ * This will happen when context_unlock was requested without
+ * doing a context_lock.
+ */
+ if (val != -1) {
+ atomic_set(&adapter->contexts_num, 0);
+ WARN(1, "Adapter context unlocked with %d active contexts",
+ val);
+ }
+}
+
static int __init init_cxl(void)
{
int rc = 0;
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 7afad8477ad5..e96be9ca4e60 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1487,6 +1487,8 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
if ((rc = cxl_native_register_psl_err_irq(adapter)))
goto err;
+ /* Release the context lock as adapter is configured */
+ cxl_adapter_context_unlock(adapter);
return 0;
err:
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index b043c20f158f..a8b6d6a635e9 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -75,12 +75,31 @@ static ssize_t reset_adapter_store(struct device *device,
int val;
rc = sscanf(buf, "%i", &val);
- if ((rc != 1) || (val != 1))
+ if ((rc != 1) || (val != 1 && val != -1))
return -EINVAL;
- if ((rc = cxl_ops->adapter_reset(adapter)))
- return rc;
- return count;
+ /*
+ * See if we can lock the context mapping that's only allowed
+ * when there are no contexts attached to the adapter. Once
+ * taken this will also prevent any context from getting activated.
+ */
+ if (val == 1) {
+ rc = cxl_adapter_context_lock(adapter);
+ if (rc)
+ goto out;
+
+ rc = cxl_ops->adapter_reset(adapter);
+ /* In case reset failed release context lock */
+ if (rc)
+ cxl_adapter_context_unlock(adapter);
+
+ } else if (val == -1) {
+ /* Perform a forced adapter reset */
+ rc = cxl_ops->adapter_reset(adapter);
+ }
+
+out:
+ return rc ? rc : count;
}
static ssize_t load_image_on_perst_show(struct device *device,
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index e0203b1a20fd..f806a4471eb9 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -1396,8 +1396,7 @@ retry:
pinned_pages->nr_pages = get_user_pages(
(u64)addr,
nr_pages,
- !!(prot & SCIF_PROT_WRITE),
- 0,
+ (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
pinned_pages->pages,
NULL);
up_write(&mm->mmap_sem);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index a2d97b9b17e3..6fb773dbcd0c 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -198,7 +198,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
#else
*pageshift = PAGE_SHIFT;
#endif
- if (get_user_pages(vaddr, 1, write, 0, &page, NULL) <= 0)
+ if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index f84b53d6ce50..b0d4dd9b0586 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -19,12 +19,17 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/list_sort.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/mfd/syscon.h>
+#include <soc/at91/atmel-secumod.h>
#define SRAM_GRANULARITY 32
@@ -334,12 +339,35 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
return ret;
}
+static int atmel_securam_wait(void)
+{
+ struct regmap *regmap;
+ u32 val;
+
+ regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
+ if (IS_ERR(regmap))
+ return -ENODEV;
+
+ return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
+ val & AT91_SECUMOD_RAMRDY_READY,
+ 10000, 500000);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id sram_dt_ids[] = {
+ { .compatible = "mmio-sram" },
+ { .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait },
+ {}
+};
+#endif
+
static int sram_probe(struct platform_device *pdev)
{
struct sram_dev *sram;
struct resource *res;
size_t size;
int ret;
+ int (*init_func)(void);
sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
if (!sram)
@@ -384,6 +412,13 @@ static int sram_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sram);
+ init_func = of_device_get_match_data(&pdev->dev);
+ if (init_func) {
+ ret = init_func();
+ if (ret)
+ return ret;
+ }
+
dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
gen_pool_size(sram->pool) / 1024, sram->virt_base);
@@ -405,13 +440,6 @@ static int sram_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id sram_dt_ids[] = {
- { .compatible = "mmio-sram" },
- {}
-};
-#endif
-
static struct platform_driver sram_driver = {
.driver = {
.name = "sram",
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c3335112e68c..709a872ed484 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -46,6 +46,7 @@
#include <asm/uaccess.h>
#include "queue.h"
+#include "block.h"
MODULE_ALIAS("mmc:block");
#ifdef MODULE_PARAM_PREFIX
@@ -1786,7 +1787,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_data *md = mq->data;
struct mmc_packed *packed = mqrq->packed;
bool do_rel_wr, do_data_tag;
- u32 *packed_cmd_hdr;
+ __le32 *packed_cmd_hdr;
u8 hdr_blocks;
u8 i = 1;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 3c15a75bae86..342f1e3f301e 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -31,7 +31,7 @@ enum mmc_packed_type {
struct mmc_packed {
struct list_head list;
- u32 cmd_hdr[1024];
+ __le32 cmd_hdr[1024];
unsigned int blocks;
u8 nr_entries;
u8 retries;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 3486bc7fbb64..39fc5b2b96c5 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1263,6 +1263,16 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_2V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400_1_8V)
+ err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto out_err;
+
err = mmc_select_bus_width(card);
if (err < 0)
goto out_err;
@@ -1272,6 +1282,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
if (err)
goto out_err;
+ mmc_set_clock(host, card->ext_csd.hs_max_dtr);
+
err = mmc_switch_status(card);
if (err)
goto out_err;
diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
index 4106295527b9..6e9c0f8fddb1 100644
--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
+++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
dev_dbg(sdmmc_dev(host), "%s\n", __func__);
mutex_lock(&ucr->dev_mutex);
- if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
- mutex_unlock(&ucr->dev_mutex);
- return;
- }
-
sd_set_power_mode(host, ios->power_mode);
sd_set_bus_width(host, ios->bus_width);
sd_set_timing(host, ios->timing, &host->ddr_mode);
@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
container_of(work, struct rtsx_usb_sdmmc, led_work);
struct rtsx_ucr *ucr = host->ucr;
+ pm_runtime_get_sync(sdmmc_dev(host));
mutex_lock(&ucr->dev_mutex);
if (host->led.brightness == LED_OFF)
@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
rtsx_usb_turn_on_led(ucr);
mutex_unlock(&ucr->dev_mutex);
+ pm_runtime_put(sdmmc_dev(host));
}
#endif
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1f54fd8755c8..7123ef96ed18 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -346,7 +346,8 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 data;
- if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) {
+ if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE ||
+ reg == SDHCI_INT_STATUS)) {
if ((val & SDHCI_INT_CARD_INT) && !esdhc_is_usdhc(imx_data)) {
/*
* Clear and then set D3CD bit to avoid missing the
@@ -555,6 +556,25 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
esdhc_clrset_le(host, 0xffff, val, reg);
}
+static u8 esdhc_readb_le(struct sdhci_host *host, int reg)
+{
+ u8 ret;
+ u32 val;
+
+ switch (reg) {
+ case SDHCI_HOST_CONTROL:
+ val = readl(host->ioaddr + reg);
+
+ ret = val & SDHCI_CTRL_LED;
+ ret |= (val >> 5) & SDHCI_CTRL_DMA_MASK;
+ ret |= (val & ESDHC_CTRL_4BITBUS);
+ ret |= (val & ESDHC_CTRL_8BITBUS) << 3;
+ return ret;
+ }
+
+ return readb(host->ioaddr + reg);
+}
+
static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -947,6 +967,7 @@ static void esdhc_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
+ .read_b = esdhc_readb_le,
.write_l = esdhc_writel_le,
.write_w = esdhc_writew_le,
.write_b = esdhc_writeb_le,
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index da8e40af6f85..410a55b1c25f 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -250,7 +250,7 @@ static void sdhci_arasan_hs400_enhanced_strobe(struct mmc_host *mmc,
writel(vendor, host->ioaddr + SDHCI_ARASAN_VENDOR_REGISTER);
}
-void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
+static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
{
u8 ctrl;
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -265,6 +265,28 @@ void sdhci_arasan_reset(struct sdhci_host *host, u8 mask)
}
}
+static int sdhci_arasan_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios)
+{
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_180:
+ /*
+ * Plese don't switch to 1V8 as arasan,5.1 doesn't
+ * actually refer to this setting to indicate the
+ * signal voltage and the state machine will be broken
+ * actually if we force to enable 1V8. That's something
+ * like broken quirk but we could work around here.
+ */
+ return 0;
+ case MMC_SIGNAL_VOLTAGE_330:
+ case MMC_SIGNAL_VOLTAGE_120:
+ /* We don't support 3V3 and 1V2 */
+ break;
+ }
+
+ return -EINVAL;
+}
+
static struct sdhci_ops sdhci_arasan_ops = {
.set_clock = sdhci_arasan_set_clock,
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
@@ -661,6 +683,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
host->mmc_host_ops.hs400_enhanced_strobe =
sdhci_arasan_hs400_enhanced_strobe;
+ host->mmc_host_ops.start_signal_voltage_switch =
+ sdhci_arasan_voltage_switch;
}
ret = sdhci_add_host(host);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 72a1f1f5180a..1d9e00a00e9f 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -32,6 +32,14 @@
#include "sdhci-pci.h"
#include "sdhci-pci-o2micro.h"
+static int sdhci_pci_enable_dma(struct sdhci_host *host);
+static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width);
+static void sdhci_pci_hw_reset(struct sdhci_host *host);
+static int sdhci_pci_select_drive_strength(struct sdhci_host *host,
+ struct mmc_card *card,
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -390,6 +398,45 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
+#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
+#define SDHCI_INTEL_PWR_TIMEOUT_UDELAY 100
+
+static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
+{
+ int cntr;
+ u8 reg;
+
+ sdhci_set_power(host, mode, vdd);
+
+ if (mode == MMC_POWER_OFF)
+ return;
+
+ /*
+ * Bus power might not enable after D3 -> D0 transition due to the
+ * present state not yet having propagated. Retry for up to 2ms.
+ */
+ for (cntr = 0; cntr < SDHCI_INTEL_PWR_TIMEOUT_CNT; cntr++) {
+ reg = sdhci_readb(host, SDHCI_POWER_CONTROL);
+ if (reg & SDHCI_POWER_ON)
+ break;
+ udelay(SDHCI_INTEL_PWR_TIMEOUT_UDELAY);
+ reg |= SDHCI_POWER_ON;
+ sdhci_writeb(host, reg, SDHCI_POWER_CONTROL);
+ }
+}
+
+static const struct sdhci_ops sdhci_intel_byt_ops = {
+ .set_clock = sdhci_set_clock,
+ .set_power = sdhci_intel_set_power,
+ .enable_dma = sdhci_pci_enable_dma,
+ .set_bus_width = sdhci_pci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .hw_reset = sdhci_pci_hw_reset,
+ .select_drive_strength = sdhci_pci_select_drive_strength,
+};
+
static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.allow_runtime_pm = true,
.probe_slot = byt_emmc_probe_slot,
@@ -397,6 +444,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 |
SDHCI_QUIRK2_STOP_WITH_TC,
+ .ops = &sdhci_intel_byt_ops,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
@@ -405,6 +453,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
.allow_runtime_pm = true,
.probe_slot = byt_sdio_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
};
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
@@ -415,6 +464,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
.allow_runtime_pm = true,
.own_cd_for_runtime_pm = true,
.probe_slot = byt_sd_probe_slot,
+ .ops = &sdhci_intel_byt_ops,
};
/* Define Host controllers for Intel Merrifield platform */
@@ -1648,7 +1698,9 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
}
host->hw_name = "PCI";
- host->ops = &sdhci_pci_ops;
+ host->ops = chip->fixes && chip->fixes->ops ?
+ chip->fixes->ops :
+ &sdhci_pci_ops;
host->quirks = chip->quirks;
host->quirks2 = chip->quirks2;
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 9c7c08b93223..6bccf56bc5ff 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -65,6 +65,8 @@ struct sdhci_pci_fixes {
int (*suspend) (struct sdhci_pci_chip *);
int (*resume) (struct sdhci_pci_chip *);
+
+ const struct sdhci_ops *ops;
};
struct sdhci_pci_slot {
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index dd1938d341f7..d0f5c05fbc19 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -315,7 +315,7 @@ static void pxav3_set_power(struct sdhci_host *host, unsigned char mode,
struct mmc_host *mmc = host->mmc;
u8 pwr = host->pwr;
- sdhci_set_power(host, mode, vdd);
+ sdhci_set_power_noreg(host, mode, vdd);
if (host->pwr == pwr)
return;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 48055666c655..71654b90227f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -687,7 +687,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
* host->clock is in Hz. target_timeout is in us.
* Hence, us = 1000000 * cycles / Hz. Round up.
*/
- val = 1000000 * data->timeout_clks;
+ val = 1000000ULL * data->timeout_clks;
if (do_div(val, host->clock))
target_timeout++;
target_timeout += val;
@@ -1077,6 +1077,10 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
/* Initially, a command has no error */
cmd->error = 0;
+ if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
+ cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd->flags |= MMC_RSP_BUSY;
+
/* Wait max 10 ms */
timeout = 10;
@@ -1390,8 +1394,8 @@ static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
}
-void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
u8 pwr = 0;
@@ -1455,20 +1459,17 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
mdelay(10);
}
}
-EXPORT_SYMBOL_GPL(sdhci_set_power);
+EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
-static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode,
- unsigned short vdd)
+void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd)
{
- struct mmc_host *mmc = host->mmc;
-
- if (host->ops->set_power)
- host->ops->set_power(host, mode, vdd);
- else if (!IS_ERR(mmc->supply.vmmc))
- sdhci_set_power_reg(host, mode, vdd);
+ if (IS_ERR(host->mmc->supply.vmmc))
+ sdhci_set_power_noreg(host, mode, vdd);
else
- sdhci_set_power(host, mode, vdd);
+ sdhci_set_power_reg(host, mode, vdd);
}
+EXPORT_SYMBOL_GPL(sdhci_set_power);
/*****************************************************************************\
* *
@@ -1609,7 +1610,10 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
- __sdhci_set_power(host, ios->power_mode, ios->vdd);
+ if (host->ops->set_power)
+ host->ops->set_power(host, ios->power_mode, ios->vdd);
+ else
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -2409,7 +2413,7 @@ static void sdhci_timeout_data_timer(unsigned long data)
* *
\*****************************************************************************/
-static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
+static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
{
if (!host->cmd) {
/*
@@ -2453,11 +2457,6 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
return;
}
- if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
- !(host->cmd->flags & MMC_RSP_BUSY) && !host->data &&
- host->cmd->opcode == MMC_STOP_TRANSMISSION)
- *mask &= ~SDHCI_INT_DATA_END;
-
if (intmask & SDHCI_INT_RESPONSE)
sdhci_finish_command(host);
}
@@ -2680,8 +2679,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
}
if (intmask & SDHCI_INT_CMD_MASK)
- sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
- &intmask);
+ sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
if (intmask & SDHCI_INT_DATA_MASK)
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index c722cd23205c..766df17fb7eb 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -683,6 +683,8 @@ u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
void sdhci_set_clock(struct sdhci_host *host, unsigned int clock);
void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
void sdhci_reset(struct sdhci_host *host, u8 mask);
void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 95c4048a371e..388e46be6ad9 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -741,6 +741,7 @@ static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
goto out_put;
}
+ vid_hdr = ubi_get_vid_hdr(vidb);
ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
mutex_lock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index d6384d965788..2ff62157d3bb 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -287,7 +287,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
/* new_aeb is newer */
if (cmp_res & 1) {
- victim = ubi_alloc_aeb(ai, aeb->ec, aeb->pnum);
+ victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
if (!victim)
return -ENOMEM;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 329381a28edf..79e679d12f3b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id)
/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(1);
+ c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL);
*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
if (!*id)
@@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n
struct nvme_command c = { };
c.identify.opcode = nvme_admin_identify;
- c.identify.cns = cpu_to_le32(2);
+ c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST);
c.identify.nsid = cpu_to_le32(nsid);
return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000);
}
@@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id)
return -ENODEV;
}
- if (ns->ctrl->vs >= NVME_VS(1, 1))
+ if (ns->ctrl->vs >= NVME_VS(1, 1, 0))
memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui));
- if (ns->ctrl->vs >= NVME_VS(1, 2))
+ if (ns->ctrl->vs >= NVME_VS(1, 2, 0))
memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid));
return 0;
@@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
int ret;
while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) {
+ if (csts == ~0)
+ return -ENODEV;
if ((csts & NVME_CSTS_RDY) == bit)
break;
@@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
}
page_shift = NVME_CAP_MPSMIN(cap) + 12;
- if (ctrl->vs >= NVME_VS(1, 1))
+ if (ctrl->vs >= NVME_VS(1, 1, 0))
ctrl->subsystem = NVME_CAP_NSSRC(cap);
ret = nvme_identify_ctrl(ctrl, &id);
@@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work)
return;
nn = le32_to_cpu(id->nn);
- if (ctrl->vs >= NVME_VS(1, 1) &&
+ if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
if (!nvme_scan_ns_list(ctrl, nn))
goto done;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0fc99f0f2571..0248d0e21fee 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -99,6 +99,7 @@ struct nvme_dev {
dma_addr_t cmb_dma_addr;
u64 cmb_size;
u32 cmbsz;
+ u32 cmbloc;
struct nvme_ctrl ctrl;
struct completion ioq_wait;
};
@@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
/*
* Mark the request as handled, since the inline shutdown
@@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
struct nvme_queue *nvmeq;
- dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ?
+ dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ?
NVME_CAP_NSSRC(cap) : 0;
if (dev->subsystem &&
@@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data)
/* Skip controllers under certain specific conditions. */
if (nvme_should_reset(dev, csts)) {
- if (queue_work(nvme_workq, &dev->reset_work))
+ if (!nvme_reset(dev))
dev_warn(dev->dev,
"Failed status: 0x%x, reset controller.\n",
csts);
@@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
return ret >= 0 ? 0 : ret;
}
+static ssize_t nvme_cmb_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
+
+ return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ ndev->cmbloc, ndev->cmbsz);
+}
+static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
+
static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
{
u64 szu, size, offset;
- u32 cmbloc;
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb;
dma_addr_t dma_addr;
- if (!use_cmb_sqes)
- return NULL;
-
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz)))
return NULL;
+ dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
- cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
+ if (!use_cmb_sqes)
+ return NULL;
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz);
- offset = szu * NVME_CMB_OFST(cmbloc);
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc));
+ offset = szu * NVME_CMB_OFST(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
if (offset > bar_size)
return NULL;
@@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset;
+ dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
cmb = ioremap_wc(dma_addr, size);
if (!cmb)
return NULL;
@@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev)
+static void nvme_disable_io_queues(struct nvme_dev *dev, int queues)
{
- int pass, queues = dev->online_queues - 1;
+ int pass;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
@@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev->q_depth);
}
- if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2))
+ /*
+ * CMBs can currently only exist on >=1.2 PCIe devices. We only
+ * populate sysfs if a CMB is implemented. Note that we add the
+ * CMB attribute to the nvme_ctrl kobj which removes the need to remove
+ * it on exit. Since nvme_dev_attrs_group has no name we can pass
+ * NULL as final argument to sysfs_add_file_to_group.
+ */
+
+ if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) {
dev->cmb = nvme_map_cmb(dev);
+ if (dev->cmbsz) {
+ if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
+ &dev_attr_cmb.attr, NULL))
+ dev_warn(dev->dev,
+ "failed to add sysfs attribute for CMB\n");
+ }
+ }
+
pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
return 0;
@@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
{
- int i;
+ int i, queues;
u32 csts = -1;
del_timer_sync(&dev->watchdog_timer);
@@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
csts = readl(dev->bar + NVME_REG_CSTS);
}
+ queues = dev->online_queues - 1;
for (i = dev->queue_count - 1; i > 0; i--)
nvme_suspend_queue(dev->queues[i]);
@@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (dev->queue_count)
nvme_suspend_queue(dev->queues[0]);
} else {
- nvme_disable_io_queues(dev);
+ nvme_disable_io_queues(dev, queues);
nvme_disable_admin_queue(dev, shutdown);
}
nvme_pci_disable(dev);
@@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev)
{
if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
return -ENODEV;
-
+ if (work_busy(&dev->reset_work))
+ return -ENODEV;
if (!queue_work(nvme_workq, &dev->reset_work))
return -EBUSY;
-
- flush_work(&dev->reset_work);
return 0;
}
@@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
{
- return nvme_reset(to_nvme_dev(ctrl));
+ struct nvme_dev *dev = to_nvme_dev(ctrl);
+ int ret = nvme_reset(dev);
+
+ if (!ret)
+ flush_work(&dev->reset_work);
+ return ret;
}
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
@@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
if (prepare)
nvme_dev_disable(dev, false);
else
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
}
static void nvme_shutdown(struct pci_dev *pdev)
@@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- queue_work(nvme_workq, &ndev->reset_work);
+ nvme_reset(ndev);
return 0;
}
#endif
@@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
dev_info(dev->ctrl.device, "restart after slot reset\n");
pci_restore_state(pdev);
- queue_work(nvme_workq, &dev->reset_work);
+ nvme_reset(dev);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c
index c2a0a1c7d05d..3eaa4d27801e 100644
--- a/drivers/nvme/host/scsi.c
+++ b/drivers/nvme/host/scsi.c
@@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr,
eui = id_ns->eui64;
len = sizeof(id_ns->eui64);
- if (ns->ctrl->vs >= NVME_VS(1, 2)) {
+ if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) {
if (bitmap_empty(eui, len * 8)) {
eui = id_ns->nguid;
len = sizeof(id_ns->nguid);
@@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
{
int res;
- if (ns->ctrl->vs >= NVME_VS(1, 1)) {
+ if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) {
res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len);
if (res != -EOPNOTSUPP)
return res;
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 7ab9c9381b98..6fe4c48a21e4 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
*/
/* we support multiple ports and multiples hosts: */
- id->mic = (1 << 0) | (1 << 1);
+ id->cmic = (1 << 0) | (1 << 1);
/* no limit on data transfer sizes for now */
id->mdts = 0;
@@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_identify:
req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) {
- case 0x00:
+ case NVME_ID_CNS_NS:
req->execute = nvmet_execute_identify_ns;
return 0;
- case 0x01:
+ case NVME_ID_CNS_CTRL:
req->execute = nvmet_execute_identify_ctrl;
return 0;
- case 0x02:
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
req->execute = nvmet_execute_identify_nslist;
return 0;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 6559d5afa7bf..b4cacb6f0258 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
if (!subsys)
return NULL;
- subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */
+ subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */
switch (type) {
case NVME_NQN_NVME:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 6f65646e89cf..12f39eea569f 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
/* we support only dynamic controllers */
e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH);
- e->nqntype = type;
+ e->subtype = type;
memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
@@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req)
case nvme_admin_identify:
req->data_len = 4096;
switch (le32_to_cpu(cmd->identify.cns)) {
- case 0x01:
+ case NVME_ID_CNS_CTRL:
req->execute =
nvmet_execute_identify_disc_ctrl;
return 0;
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 2cb7315e26d0..653707996342 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -247,6 +247,7 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
pp = &pcie->pp;
pp->dev = dev;
+ pcie->drvdata = match->data;
pp->ops = pcie->drvdata->ops;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
@@ -256,7 +257,6 @@ static int __init ls_pcie_probe(struct platform_device *pdev)
return PTR_ERR(pcie->pp.dbi_base);
}
- pcie->drvdata = match->data;
pcie->lut = pcie->pp.dbi_base + pcie->drvdata->lut_offset;
if (!ls_pcie_is_bridge(pcie))
diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c
index 537f58a664fa..8df6312ed300 100644
--- a/drivers/pci/host/pcie-designware-plat.c
+++ b/drivers/pci/host/pcie-designware-plat.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
- * Authors: Joao Pinto <jpinto@synopsys.com>
+ * Authors: Joao Pinto <jpmpinto@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index c2ac7646b99f..a8ac4bcef2c0 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1011,7 +1011,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
rc = acpi_dev_get_resources(adev, &resource_list,
acpi_pmu_dev_add_resource, &res);
acpi_dev_free_resource_list(&resource_list);
- if (rc < 0 || IS_ERR(&res)) {
+ if (rc < 0) {
dev_err(dev, "PMU type %d: No resource address found\n", type);
goto err;
}
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index e1ab864e1a7f..c8c72e8259d3 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -151,21 +151,21 @@ FUNC_GROUP_DECL(GPID0, F19, E21);
#define GPID2_DESC SIG_DESC_SET(SCU8C, 9)
-#define D20 26
+#define F20 26
SIG_EXPR_LIST_DECL_SINGLE(SD2DAT0, SD2, SD2_DESC);
SIG_EXPR_DECL(GPID2IN, GPID2, GPID2_DESC);
SIG_EXPR_DECL(GPID2IN, GPID, GPID_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPID2IN, GPID2, GPID);
-MS_PIN_DECL(D20, GPIOD2, SD2DAT0, GPID2IN);
+MS_PIN_DECL(F20, GPIOD2, SD2DAT0, GPID2IN);
-#define D21 27
+#define D20 27
SIG_EXPR_LIST_DECL_SINGLE(SD2DAT1, SD2, SD2_DESC);
SIG_EXPR_DECL(GPID2OUT, GPID2, GPID2_DESC);
SIG_EXPR_DECL(GPID2OUT, GPID, GPID_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPID2OUT, GPID2, GPID);
-MS_PIN_DECL(D21, GPIOD3, SD2DAT1, GPID2OUT);
+MS_PIN_DECL(D20, GPIOD3, SD2DAT1, GPID2OUT);
-FUNC_GROUP_DECL(GPID2, D20, D21);
+FUNC_GROUP_DECL(GPID2, F20, D20);
#define GPIE_DESC SIG_DESC_SET(HW_STRAP1, 21)
#define GPIE0_DESC SIG_DESC_SET(SCU8C, 12)
@@ -182,28 +182,88 @@ SIG_EXPR_LIST_DECL_SINGLE(NDCD3, NDCD3, SIG_DESC_SET(SCU80, 17));
SIG_EXPR_DECL(GPIE0OUT, GPIE0, GPIE0_DESC);
SIG_EXPR_DECL(GPIE0OUT, GPIE, GPIE_DESC);
SIG_EXPR_LIST_DECL_DUAL(GPIE0OUT, GPIE0, GPIE);
-MS_PIN_DECL(C20, GPIE0, NDCD3, GPIE0OUT);
+MS_PIN_DECL(C20, GPIOE1, NDCD3, GPIE0OUT);
FUNC_GROUP_DECL(GPIE0, B20, C20);
-#define SPI1_DESC SIG_DESC_SET(HW_STRAP1, 13)
+#define SPI1_DESC { HW_STRAP1, GENMASK(13, 12), 1, 0 }
+#define SPI1DEBUG_DESC { HW_STRAP1, GENMASK(13, 12), 2, 0 }
+#define SPI1PASSTHRU_DESC { HW_STRAP1, GENMASK(13, 12), 3, 0 }
+
#define C18 64
-SIG_EXPR_LIST_DECL_SINGLE(SYSCS, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCS, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCS, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(C18, GPIOI0, SYSCS);
#define E15 65
-SIG_EXPR_LIST_DECL_SINGLE(SYSCK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSCK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSCK, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(E15, GPIOI1, SYSCK);
-#define A14 66
-SIG_EXPR_LIST_DECL_SINGLE(SYSMOSI, SPI1, COND1, SPI1_DESC);
-SS_PIN_DECL(A14, GPIOI2, SYSMOSI);
+#define B16 66
+SIG_EXPR_DECL(SYSMOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMOSI, SPI1DEBUG, SPI1PASSTHRU);
+SS_PIN_DECL(B16, GPIOI2, SYSMOSI);
#define C16 67
-SIG_EXPR_LIST_DECL_SINGLE(SYSMISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SYSMISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL_DUAL(SYSMISO, SPI1DEBUG, SPI1PASSTHRU);
SS_PIN_DECL(C16, GPIOI3, SYSMISO);
-FUNC_GROUP_DECL(SPI1, C18, E15, A14, C16);
+#define VB_DESC SIG_DESC_SET(HW_STRAP1, 5)
+
+#define B15 68
+SIG_EXPR_DECL(SPI1CS0, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CS0, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CS0, SIG_EXPR_PTR(SPI1CS0, SPI1),
+ SIG_EXPR_PTR(SPI1CS0, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CS0, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCS, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(B15, GPIOI4, SPI1CS0, VBCS);
+
+#define C15 69
+SIG_EXPR_DECL(SPI1CK, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1CK, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1CK, SIG_EXPR_PTR(SPI1CK, SPI1),
+ SIG_EXPR_PTR(SPI1CK, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1CK, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBCK, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(C15, GPIOI5, SPI1CK, VBCK);
+
+#define A14 70
+SIG_EXPR_DECL(SPI1MOSI, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MOSI, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MOSI, SIG_EXPR_PTR(SPI1MOSI, SPI1),
+ SIG_EXPR_PTR(SPI1MOSI, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1MOSI, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMOSI, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A14, GPIOI6, SPI1MOSI, VBMOSI);
+
+#define A15 71
+SIG_EXPR_DECL(SPI1MISO, SPI1, COND1, SPI1_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1DEBUG, COND1, SPI1DEBUG_DESC);
+SIG_EXPR_DECL(SPI1MISO, SPI1PASSTHRU, COND1, SPI1PASSTHRU_DESC);
+SIG_EXPR_LIST_DECL(SPI1MISO, SIG_EXPR_PTR(SPI1MISO, SPI1),
+ SIG_EXPR_PTR(SPI1MISO, SPI1DEBUG),
+ SIG_EXPR_PTR(SPI1MISO, SPI1PASSTHRU));
+SIG_EXPR_LIST_DECL_SINGLE(VBMISO, VGABIOSROM, COND1, VB_DESC);
+MS_PIN_DECL(A15, GPIOI7, SPI1MISO, VBMISO);
+
+FUNC_GROUP_DECL(SPI1, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1DEBUG, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(SPI1PASSTHRU, C18, E15, B16, C16, B15, C15, A14, A15);
+FUNC_GROUP_DECL(VGABIOSROM, B15, C15, A14, A15);
+
+#define R2 72
+SIG_EXPR_LIST_DECL_SINGLE(SGPMCK, SGPM, SIG_DESC_SET(SCU84, 8));
+SS_PIN_DECL(R2, GPIOJ0, SGPMCK);
#define L2 73
SIG_EXPR_LIST_DECL_SINGLE(SGPMLD, SGPM, SIG_DESC_SET(SCU84, 9));
@@ -580,6 +640,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(A12),
ASPEED_PINCTRL_PIN(A13),
ASPEED_PINCTRL_PIN(A14),
+ ASPEED_PINCTRL_PIN(A15),
ASPEED_PINCTRL_PIN(A2),
ASPEED_PINCTRL_PIN(A3),
ASPEED_PINCTRL_PIN(A4),
@@ -592,6 +653,8 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(B12),
ASPEED_PINCTRL_PIN(B13),
ASPEED_PINCTRL_PIN(B14),
+ ASPEED_PINCTRL_PIN(B15),
+ ASPEED_PINCTRL_PIN(B16),
ASPEED_PINCTRL_PIN(B2),
ASPEED_PINCTRL_PIN(B20),
ASPEED_PINCTRL_PIN(B3),
@@ -603,6 +666,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(C12),
ASPEED_PINCTRL_PIN(C13),
ASPEED_PINCTRL_PIN(C14),
+ ASPEED_PINCTRL_PIN(C15),
ASPEED_PINCTRL_PIN(C16),
ASPEED_PINCTRL_PIN(C18),
ASPEED_PINCTRL_PIN(C2),
@@ -614,7 +678,6 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(D10),
ASPEED_PINCTRL_PIN(D2),
ASPEED_PINCTRL_PIN(D20),
- ASPEED_PINCTRL_PIN(D21),
ASPEED_PINCTRL_PIN(D4),
ASPEED_PINCTRL_PIN(D5),
ASPEED_PINCTRL_PIN(D6),
@@ -630,6 +693,7 @@ static struct pinctrl_pin_desc aspeed_g5_pins[ASPEED_G5_NR_PINS] = {
ASPEED_PINCTRL_PIN(E7),
ASPEED_PINCTRL_PIN(E9),
ASPEED_PINCTRL_PIN(F19),
+ ASPEED_PINCTRL_PIN(F20),
ASPEED_PINCTRL_PIN(F9),
ASPEED_PINCTRL_PIN(H20),
ASPEED_PINCTRL_PIN(L1),
@@ -691,11 +755,14 @@ static const struct aspeed_pin_group aspeed_g5_groups[] = {
ASPEED_PINCTRL_GROUP(RMII2),
ASPEED_PINCTRL_GROUP(SD1),
ASPEED_PINCTRL_GROUP(SPI1),
+ ASPEED_PINCTRL_GROUP(SPI1DEBUG),
+ ASPEED_PINCTRL_GROUP(SPI1PASSTHRU),
ASPEED_PINCTRL_GROUP(TIMER4),
ASPEED_PINCTRL_GROUP(TIMER5),
ASPEED_PINCTRL_GROUP(TIMER6),
ASPEED_PINCTRL_GROUP(TIMER7),
ASPEED_PINCTRL_GROUP(TIMER8),
+ ASPEED_PINCTRL_GROUP(VGABIOSROM),
};
static const struct aspeed_pin_function aspeed_g5_functions[] = {
@@ -733,11 +800,14 @@ static const struct aspeed_pin_function aspeed_g5_functions[] = {
ASPEED_PINCTRL_FUNC(RMII2),
ASPEED_PINCTRL_FUNC(SD1),
ASPEED_PINCTRL_FUNC(SPI1),
+ ASPEED_PINCTRL_FUNC(SPI1DEBUG),
+ ASPEED_PINCTRL_FUNC(SPI1PASSTHRU),
ASPEED_PINCTRL_FUNC(TIMER4),
ASPEED_PINCTRL_FUNC(TIMER5),
ASPEED_PINCTRL_FUNC(TIMER6),
ASPEED_PINCTRL_FUNC(TIMER7),
ASPEED_PINCTRL_FUNC(TIMER8),
+ ASPEED_PINCTRL_FUNC(VGABIOSROM),
};
static struct aspeed_pinctrl_data aspeed_g5_pinctrl_data = {
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 0391f9f13f3e..49aeba912531 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -166,13 +166,9 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
bool enable, struct regmap *map)
{
int i;
- bool ret;
-
- ret = aspeed_sig_expr_eval(expr, enable, map);
- if (ret)
- return ret;
for (i = 0; i < expr->ndescs; i++) {
+ bool ret;
const struct aspeed_sig_desc *desc = &expr->descs[i];
u32 pattern = enable ? desc->enable : desc->disable;
@@ -199,12 +195,18 @@ static bool aspeed_sig_expr_set(const struct aspeed_sig_expr *expr,
static bool aspeed_sig_expr_enable(const struct aspeed_sig_expr *expr,
struct regmap *map)
{
+ if (aspeed_sig_expr_eval(expr, true, map))
+ return true;
+
return aspeed_sig_expr_set(expr, true, map);
}
static bool aspeed_sig_expr_disable(const struct aspeed_sig_expr *expr,
struct regmap *map)
{
+ if (!aspeed_sig_expr_eval(expr, true, map))
+ return true;
+
return aspeed_sig_expr_set(expr, false, map);
}
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index d22a9fe2e6df..71bbeb9321ba 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1808,6 +1808,8 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(vg->pctl_dev);
}
+ raw_spin_lock_init(&vg->lock);
+
ret = byt_gpio_probe(vg);
if (ret) {
pinctrl_unregister(vg->pctl_dev);
@@ -1815,7 +1817,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, vg);
- raw_spin_lock_init(&vg->lock);
pm_runtime_enable(&pdev->dev);
return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 63387a40b973..01443762e570 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -19,6 +19,7 @@
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
+#include "../core.h"
#include "pinctrl-intel.h"
/* Offset from regs */
@@ -1056,6 +1057,26 @@ int intel_pinctrl_remove(struct platform_device *pdev)
EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
#ifdef CONFIG_PM_SLEEP
+static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned pin)
+{
+ const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
+
+ if (!pd || !intel_pad_usable(pctrl, pin))
+ return false;
+
+ /*
+ * Only restore the pin if it is actually in use by the kernel (or
+ * by userspace). It is possible that some pins are used by the
+ * BIOS during resume and those are not always locked down so leave
+ * them alone.
+ */
+ if (pd->mux_owner || pd->gpio_owner ||
+ gpiochip_line_is_irq(&pctrl->chip, pin))
+ return true;
+
+ return false;
+}
+
int intel_pinctrl_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1069,7 +1090,7 @@ int intel_pinctrl_suspend(struct device *dev)
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
@@ -1130,7 +1151,7 @@ int intel_pinctrl_resume(struct device *dev)
void __iomem *padcfg;
u32 val;
- if (!intel_pad_usable(pctrl, desc->number))
+ if (!intel_pinctrl_should_save(pctrl, desc->number))
continue;
padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 07462d79d040..1aba2c74160e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -309,7 +309,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
* much memory to the process.
*/
down_read(&current->mm->mmap_sem);
- ret = get_user_pages(address, 1, !is_write, 0, &page, NULL);
+ ret = get_user_pages(address, 1, is_write ? 0 : FOLL_WRITE,
+ &page, NULL);
up_read(&current->mm->mmap_sem);
if (ret < 0)
break;
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 81b8dcca8891..b8a21d7b25d4 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -576,6 +576,7 @@ config ASUS_WMI
config ASUS_NB_WMI
tristate "Asus Notebook WMI Driver"
depends on ASUS_WMI
+ depends on SERIO_I8042 || SERIO_I8042 = n
---help---
This is a driver for newer Asus notebooks. It adds extra features
like wireless radio and bluetooth control, leds, hotkeys, backlight...
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index d1a091b93192..a2323941e677 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -933,6 +933,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
},
},
+ {
+ .ident = "Lenovo YOGA 910-13IKB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"),
+ },
+ },
{}
};
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 436dfe871d32..9013a585507e 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -892,7 +892,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
down_read(&current->mm->mmap_sem);
pinned = get_user_pages(
(unsigned long)xfer->loc_addr & PAGE_MASK,
- nr_pages, dir == DMA_FROM_DEVICE, 0,
+ nr_pages,
+ dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
page_list, NULL);
up_read(&current->mm->mmap_sem);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 06d9fa2f3bc0..172dc966a01f 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -94,5 +94,6 @@ config RESET_ZYNQ
source "drivers/reset/sti/Kconfig"
source "drivers/reset/hisilicon/Kconfig"
+source "drivers/reset/tegra/Kconfig"
endif
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index bbe7026617fc..13b346e03d84 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -1,6 +1,7 @@
obj-y += core.o
obj-y += hisilicon/
obj-$(CONFIG_ARCH_STI) += sti/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index b8ae1dbd4c17..10368ed8fd13 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -32,6 +32,9 @@ static LIST_HEAD(reset_controller_list);
* @refcnt: Number of gets of this reset_control
* @shared: Is this a shared (1), or an exclusive (0) reset_control?
* @deassert_cnt: Number of times this reset line has been deasserted
+ * @triggered_count: Number of times this reset line has been reset. Currently
+ * only used for shared resets, which means that the value
+ * will be either 0 or 1.
*/
struct reset_control {
struct reset_controller_dev *rcdev;
@@ -40,6 +43,7 @@ struct reset_control {
unsigned int refcnt;
int shared;
atomic_t deassert_count;
+ atomic_t triggered_count;
};
/**
@@ -134,18 +138,35 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register);
* reset_control_reset - reset the controlled device
* @rstc: reset controller
*
- * Calling this on a shared reset controller is an error.
+ * On a shared reset line the actual reset pulse is only triggered once for the
+ * lifetime of the reset_control instance: for all but the first caller this is
+ * a no-op.
+ * Consumers must not use reset_control_(de)assert on shared reset lines when
+ * reset_control_reset has been used.
*/
int reset_control_reset(struct reset_control *rstc)
{
- if (WARN_ON(IS_ERR_OR_NULL(rstc)) ||
- WARN_ON(rstc->shared))
+ int ret;
+
+ if (WARN_ON(IS_ERR_OR_NULL(rstc)))
return -EINVAL;
- if (rstc->rcdev->ops->reset)
- return rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+ if (!rstc->rcdev->ops->reset)
+ return -ENOTSUPP;
- return -ENOTSUPP;
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+
+ if (atomic_inc_return(&rstc->triggered_count) != 1)
+ return 0;
+ }
+
+ ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
+ if (rstc->shared && !ret)
+ atomic_dec(&rstc->triggered_count);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(reset_control_reset);
@@ -159,6 +180,8 @@ EXPORT_SYMBOL_GPL(reset_control_reset);
*
* For shared reset controls a driver cannot expect the hw's registers and
* internal state to be reset, but must be prepared for this to happen.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
*/
int reset_control_assert(struct reset_control *rstc)
{
@@ -169,6 +192,9 @@ int reset_control_assert(struct reset_control *rstc)
return -ENOTSUPP;
if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+ return -EINVAL;
+
if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
return -EINVAL;
@@ -185,6 +211,8 @@ EXPORT_SYMBOL_GPL(reset_control_assert);
* @rstc: reset controller
*
* After calling this function, the reset is guaranteed to be deasserted.
+ * Consumers must not use reset_control_reset on shared reset lines when
+ * reset_control_(de)assert has been used.
*/
int reset_control_deassert(struct reset_control *rstc)
{
@@ -195,6 +223,9 @@ int reset_control_deassert(struct reset_control *rstc)
return -ENOTSUPP;
if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
+ return -EINVAL;
+
if (atomic_inc_return(&rstc->deassert_count) != 1)
return 0;
}
diff --git a/drivers/reset/reset-berlin.c b/drivers/reset/reset-berlin.c
index 369f3917fd8e..371197bbd055 100644
--- a/drivers/reset/reset-berlin.c
+++ b/drivers/reset/reset-berlin.c
@@ -1,6 +1,8 @@
/*
* Copyright (C) 2014 Marvell Technology Group Ltd.
*
+ * Marvell Berlin reset driver
+ *
* Antoine Tenart <antoine.tenart@free-electrons.com>
* Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
*
@@ -12,7 +14,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -91,7 +93,6 @@ static const struct of_device_id berlin_reset_dt_match[] = {
{ .compatible = "marvell,berlin2-reset" },
{ },
};
-MODULE_DEVICE_TABLE(of, berlin_reset_dt_match);
static struct platform_driver berlin_reset_driver = {
.probe = berlin2_reset_probe,
@@ -100,9 +101,4 @@ static struct platform_driver berlin_reset_driver = {
.of_match_table = berlin_reset_dt_match,
},
};
-module_platform_driver(berlin_reset_driver);
-
-MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
-MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>");
-MODULE_DESCRIPTION("Marvell Berlin reset driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(berlin_reset_driver);
diff --git a/drivers/reset/reset-lpc18xx.c b/drivers/reset/reset-lpc18xx.c
index 54cca0055171..a62ad52e262b 100644
--- a/drivers/reset/reset-lpc18xx.c
+++ b/drivers/reset/reset-lpc18xx.c
@@ -13,7 +13,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -218,39 +218,17 @@ dis_clk_reg:
return ret;
}
-static int lpc18xx_rgu_remove(struct platform_device *pdev)
-{
- struct lpc18xx_rgu_data *rc = platform_get_drvdata(pdev);
- int ret;
-
- ret = unregister_restart_handler(&rc->restart_nb);
- if (ret)
- dev_warn(&pdev->dev, "failed to unregister restart handler\n");
-
- reset_controller_unregister(&rc->rcdev);
-
- clk_disable_unprepare(rc->clk_delay);
- clk_disable_unprepare(rc->clk_reg);
-
- return 0;
-}
-
static const struct of_device_id lpc18xx_rgu_match[] = {
{ .compatible = "nxp,lpc1850-rgu" },
{ }
};
-MODULE_DEVICE_TABLE(of, lpc18xx_rgu_match);
static struct platform_driver lpc18xx_rgu_driver = {
.probe = lpc18xx_rgu_probe,
- .remove = lpc18xx_rgu_remove,
.driver = {
- .name = "lpc18xx-reset",
- .of_match_table = lpc18xx_rgu_match,
+ .name = "lpc18xx-reset",
+ .of_match_table = lpc18xx_rgu_match,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(lpc18xx_rgu_driver);
-
-MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
-MODULE_DESCRIPTION("Reset driver for LPC18xx/43xx RGU");
-MODULE_LICENSE("GPL v2");
+builtin_platform_driver(lpc18xx_rgu_driver);
diff --git a/drivers/reset/reset-oxnas.c b/drivers/reset/reset-oxnas.c
index 944980572f79..0d9036dea010 100644
--- a/drivers/reset/reset-oxnas.c
+++ b/drivers/reset/reset-oxnas.c
@@ -80,6 +80,7 @@ static const struct reset_control_ops oxnas_reset_ops = {
static const struct of_device_id oxnas_reset_dt_ids[] = {
{ .compatible = "oxsemi,ox810se-reset", },
+ { .compatible = "oxsemi,ox820-reset", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, oxnas_reset_dt_ids);
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 78ebf8424375..43e4a9f39b9b 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -1,4 +1,6 @@
/*
+ * Socfpga Reset Controller Driver
+ *
* Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
*
* based on
@@ -16,7 +18,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
@@ -148,8 +150,4 @@ static struct platform_driver socfpga_reset_driver = {
.of_match_table = socfpga_reset_dt_ids,
},
};
-module_platform_driver(socfpga_reset_driver);
-
-MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de");
-MODULE_DESCRIPTION("Socfpga Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(socfpga_reset_driver);
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index 3080190b3f90..b44f6b5f87b6 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
@@ -142,7 +142,6 @@ static const struct of_device_id sunxi_reset_dt_ids[] = {
{ .compatible = "allwinner,sun6i-a31-clock-reset", },
{ /* sentinel */ },
};
-MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids);
static int sunxi_reset_probe(struct platform_device *pdev)
{
@@ -175,8 +174,4 @@ static struct platform_driver sunxi_reset_driver = {
.of_match_table = sunxi_reset_dt_ids,
},
};
-module_platform_driver(sunxi_reset_driver);
-
-MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
-MODULE_DESCRIPTION("Allwinner SoCs Reset Controller Driver");
-MODULE_LICENSE("GPL");
+builtin_platform_driver(sunxi_reset_driver);
diff --git a/drivers/reset/reset-zynq.c b/drivers/reset/reset-zynq.c
index 138f2f205662..87a4e355578f 100644
--- a/drivers/reset/reset-zynq.c
+++ b/drivers/reset/reset-zynq.c
@@ -3,6 +3,8 @@
*
* Xilinx Zynq Reset controller driver
*
+ * Author: Moritz Fischer <moritz.fischer@ettus.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
@@ -15,7 +17,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -137,8 +139,4 @@ static struct platform_driver zynq_reset_driver = {
.of_match_table = zynq_reset_dt_ids,
},
};
-module_platform_driver(zynq_reset_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
-MODULE_DESCRIPTION("Zynq Reset Controller Driver");
+builtin_platform_driver(zynq_reset_driver);
diff --git a/drivers/reset/sti/Kconfig b/drivers/reset/sti/Kconfig
index 613178553612..71592b5bfd14 100644
--- a/drivers/reset/sti/Kconfig
+++ b/drivers/reset/sti/Kconfig
@@ -3,14 +3,6 @@ if ARCH_STI
config STI_RESET_SYSCFG
bool
-config STIH415_RESET
- bool
- select STI_RESET_SYSCFG
-
-config STIH416_RESET
- bool
- select STI_RESET_SYSCFG
-
config STIH407_RESET
bool
select STI_RESET_SYSCFG
diff --git a/drivers/reset/sti/Makefile b/drivers/reset/sti/Makefile
index dc85dfbe56a9..f9d82411f29e 100644
--- a/drivers/reset/sti/Makefile
+++ b/drivers/reset/sti/Makefile
@@ -1,5 +1,3 @@
obj-$(CONFIG_STI_RESET_SYSCFG) += reset-syscfg.o
-obj-$(CONFIG_STIH415_RESET) += reset-stih415.o
-obj-$(CONFIG_STIH416_RESET) += reset-stih416.o
obj-$(CONFIG_STIH407_RESET) += reset-stih407.o
diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c
deleted file mode 100644
index 6f220cdbef46..000000000000
--- a/drivers/reset/sti/reset-stih415.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih415-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH415 Peripheral powerdown definitions.
- */
-static const char stih415_front[] = "st,stih415-front-syscfg";
-static const char stih415_rear[] = "st,stih415-rear-syscfg";
-static const char stih415_sbc[] = "st,stih415-sbc-syscfg";
-static const char stih415_lpm[] = "st,stih415-lpm-syscfg";
-
-#define STIH415_PDN_FRONT(_bit) \
- _SYSCFG_RST_CH(stih415_front, SYSCFG_114, _bit, SYSSTAT_187, _bit)
-
-#define STIH415_PDN_REAR(_cntl, _stat) \
- _SYSCFG_RST_CH(stih415_rear, SYSCFG_336, _cntl, SYSSTAT_384, _stat)
-
-#define STIH415_SRST_REAR(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_rear, _reg, _bit)
-
-#define STIH415_SRST_SBC(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_sbc, _reg, _bit)
-
-#define STIH415_SRST_FRONT(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_front, _reg, _bit)
-
-#define STIH415_SRST_LPM(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih415_lpm, _reg, _bit)
-
-#define SYSCFG_114 0x38 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_187 0x15c /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_336 0x90 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_384 0x150 /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_376 0x130 /* Reset generator 0 control 0 */
-#define SYSCFG_166 0x108 /* Softreset Ethernet 0 */
-#define SYSCFG_31 0x7c /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
-
-static const struct syscfg_reset_channel_data stih415_powerdowns[] = {
- [STIH415_EMISS_POWERDOWN] = STIH415_PDN_FRONT(0),
- [STIH415_NAND_POWERDOWN] = STIH415_PDN_FRONT(1),
- [STIH415_KEYSCAN_POWERDOWN] = STIH415_PDN_FRONT(2),
- [STIH415_USB0_POWERDOWN] = STIH415_PDN_REAR(0, 0),
- [STIH415_USB1_POWERDOWN] = STIH415_PDN_REAR(1, 1),
- [STIH415_USB2_POWERDOWN] = STIH415_PDN_REAR(2, 2),
- [STIH415_SATA0_POWERDOWN] = STIH415_PDN_REAR(3, 3),
- [STIH415_SATA1_POWERDOWN] = STIH415_PDN_REAR(4, 4),
- [STIH415_PCIE_POWERDOWN] = STIH415_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih415_softresets[] = {
- [STIH415_ETH0_SOFTRESET] = STIH415_SRST_FRONT(SYSCFG_166, 0),
- [STIH415_ETH1_SOFTRESET] = STIH415_SRST_SBC(SYSCFG_31, 0),
- [STIH415_IRB_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 6),
- [STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9),
- [STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10),
- [STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11),
- [STIH415_KEYSCAN_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih415_powerdown_controller = {
- .wait_for_ack = true,
- .nr_channels = ARRAY_SIZE(stih415_powerdowns),
- .channels = stih415_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih415_softreset_controller = {
- .wait_for_ack = false,
- .active_low = true,
- .nr_channels = ARRAY_SIZE(stih415_softresets),
- .channels = stih415_softresets,
-};
-
-static const struct of_device_id stih415_reset_match[] = {
- { .compatible = "st,stih415-powerdown",
- .data = &stih415_powerdown_controller, },
- { .compatible = "st,stih415-softreset",
- .data = &stih415_softreset_controller, },
- {},
-};
-
-static struct platform_driver stih415_reset_driver = {
- .probe = syscfg_reset_probe,
- .driver = {
- .name = "reset-stih415",
- .of_match_table = stih415_reset_match,
- },
-};
-
-static int __init stih415_reset_init(void)
-{
- return platform_driver_register(&stih415_reset_driver);
-}
-arch_initcall(stih415_reset_init);
diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c
deleted file mode 100644
index c581d606ef0f..000000000000
--- a/drivers/reset/sti/reset-stih416.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) 2013 STMicroelectronics (R&D) Limited
- * Author: Stephen Gallimore <stephen.gallimore@st.com>
- * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include <dt-bindings/reset/stih416-resets.h>
-
-#include "reset-syscfg.h"
-
-/*
- * STiH416 Peripheral powerdown definitions.
- */
-static const char stih416_front[] = "st,stih416-front-syscfg";
-static const char stih416_rear[] = "st,stih416-rear-syscfg";
-static const char stih416_sbc[] = "st,stih416-sbc-syscfg";
-static const char stih416_lpm[] = "st,stih416-lpm-syscfg";
-static const char stih416_cpu[] = "st,stih416-cpu-syscfg";
-
-#define STIH416_PDN_FRONT(_bit) \
- _SYSCFG_RST_CH(stih416_front, SYSCFG_1500, _bit, SYSSTAT_1578, _bit)
-
-#define STIH416_PDN_REAR(_cntl, _stat) \
- _SYSCFG_RST_CH(stih416_rear, SYSCFG_2525, _cntl, SYSSTAT_2583, _stat)
-
-#define SYSCFG_1500 0x7d0 /* Powerdown request EMI/NAND/Keyscan */
-#define SYSSTAT_1578 0x908 /* Powerdown status EMI/NAND/Keyscan */
-
-#define SYSCFG_2525 0x834 /* Powerdown request USB/SATA/PCIe */
-#define SYSSTAT_2583 0x91c /* Powerdown status USB/SATA/PCIe */
-
-#define SYSCFG_2552 0x8A0 /* Reset Generator control 0 */
-#define SYSCFG_1539 0x86c /* Softreset Ethernet 0 */
-#define SYSCFG_510 0x7f8 /* Softreset Ethernet 1 */
-#define LPM_SYSCFG_1 0x4 /* Softreset IRB */
-#define SYSCFG_2553 0x8a4 /* Softreset SATA0/1, PCIE0/1 */
-#define SYSCFG_7563 0x8cc /* MPE softresets 0 */
-#define SYSCFG_7564 0x8d0 /* MPE softresets 1 */
-
-#define STIH416_SRST_CPU(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_cpu, _reg, _bit)
-
-#define STIH416_SRST_FRONT(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_front, _reg, _bit)
-
-#define STIH416_SRST_REAR(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_rear, _reg, _bit)
-
-#define STIH416_SRST_LPM(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_lpm, _reg, _bit)
-
-#define STIH416_SRST_SBC(_reg, _bit) \
- _SYSCFG_RST_CH_NO_ACK(stih416_sbc, _reg, _bit)
-
-static const struct syscfg_reset_channel_data stih416_powerdowns[] = {
- [STIH416_EMISS_POWERDOWN] = STIH416_PDN_FRONT(0),
- [STIH416_NAND_POWERDOWN] = STIH416_PDN_FRONT(1),
- [STIH416_KEYSCAN_POWERDOWN] = STIH416_PDN_FRONT(2),
- [STIH416_USB0_POWERDOWN] = STIH416_PDN_REAR(0, 0),
- [STIH416_USB1_POWERDOWN] = STIH416_PDN_REAR(1, 1),
- [STIH416_USB2_POWERDOWN] = STIH416_PDN_REAR(2, 2),
- [STIH416_USB3_POWERDOWN] = STIH416_PDN_REAR(6, 5),
- [STIH416_SATA0_POWERDOWN] = STIH416_PDN_REAR(3, 3),
- [STIH416_SATA1_POWERDOWN] = STIH416_PDN_REAR(4, 4),
- [STIH416_PCIE0_POWERDOWN] = STIH416_PDN_REAR(7, 9),
- [STIH416_PCIE1_POWERDOWN] = STIH416_PDN_REAR(5, 8),
-};
-
-static const struct syscfg_reset_channel_data stih416_softresets[] = {
- [STIH416_ETH0_SOFTRESET] = STIH416_SRST_FRONT(SYSCFG_1539, 0),
- [STIH416_ETH1_SOFTRESET] = STIH416_SRST_SBC(SYSCFG_510, 0),
- [STIH416_IRB_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 6),
- [STIH416_USB0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 9),
- [STIH416_USB1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 10),
- [STIH416_USB2_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 11),
- [STIH416_USB3_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 28),
- [STIH416_SATA0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 7),
- [STIH416_SATA1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 3),
- [STIH416_PCIE0_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 15),
- [STIH416_PCIE1_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 2),
- [STIH416_AUD_DAC_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 14),
- [STIH416_HDTVOUT_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 5),
- [STIH416_VTAC_M_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 25),
- [STIH416_VTAC_A_RX_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2552, 26),
- [STIH416_SYNC_HD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 5),
- [STIH416_SYNC_SD_SOFTRESET] = STIH416_SRST_REAR(SYSCFG_2553, 6),
- [STIH416_BLITTER_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 10),
- [STIH416_GPU_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 11),
- [STIH416_VTAC_M_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 18),
- [STIH416_VTAC_A_TX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 19),
- [STIH416_VTG_AUX_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 21),
- [STIH416_JPEG_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7563, 23),
- [STIH416_HVA_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 2),
- [STIH416_COMPO_M_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 3),
- [STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4),
- [STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10),
- [STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16),
- [STIH416_KEYSCAN_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 8),
-};
-
-static struct syscfg_reset_controller_data stih416_powerdown_controller = {
- .wait_for_ack = true,
- .nr_channels = ARRAY_SIZE(stih416_powerdowns),
- .channels = stih416_powerdowns,
-};
-
-static struct syscfg_reset_controller_data stih416_softreset_controller = {
- .wait_for_ack = false,
- .active_low = true,
- .nr_channels = ARRAY_SIZE(stih416_softresets),
- .channels = stih416_softresets,
-};
-
-static const struct of_device_id stih416_reset_match[] = {
- { .compatible = "st,stih416-powerdown",
- .data = &stih416_powerdown_controller, },
- { .compatible = "st,stih416-softreset",
- .data = &stih416_softreset_controller, },
- {},
-};
-
-static struct platform_driver stih416_reset_driver = {
- .probe = syscfg_reset_probe,
- .driver = {
- .name = "reset-stih416",
- .of_match_table = stih416_reset_match,
- },
-};
-
-static int __init stih416_reset_init(void)
-{
- return platform_driver_register(&stih416_reset_driver);
-}
-arch_initcall(stih416_reset_init);
diff --git a/drivers/reset/tegra/Kconfig b/drivers/reset/tegra/Kconfig
new file mode 100644
index 000000000000..d2afa293df7d
--- /dev/null
+++ b/drivers/reset/tegra/Kconfig
@@ -0,0 +1,2 @@
+config RESET_TEGRA_BPMP
+ def_bool TEGRA_BPMP
diff --git a/drivers/reset/tegra/Makefile b/drivers/reset/tegra/Makefile
new file mode 100644
index 000000000000..775243ab7383
--- /dev/null
+++ b/drivers/reset/tegra/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RESET_TEGRA_BPMP) += reset-bpmp.o
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
new file mode 100644
index 000000000000..5daf2ee1a396
--- /dev/null
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/reset-controller.h>
+
+#include <soc/tegra/bpmp.h>
+#include <soc/tegra/bpmp-abi.h>
+
+static struct tegra_bpmp *to_tegra_bpmp(struct reset_controller_dev *rstc)
+{
+ return container_of(rstc, struct tegra_bpmp, rstc);
+}
+
+static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
+ enum mrq_reset_commands command,
+ unsigned int id)
+{
+ struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
+ struct mrq_reset_request request;
+ struct tegra_bpmp_message msg;
+
+ memset(&request, 0, sizeof(request));
+ request.cmd = command;
+ request.reset_id = id;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_RESET;
+ msg.tx.data = &request;
+ msg.tx.size = sizeof(request);
+
+ return tegra_bpmp_transfer(bpmp, &msg);
+}
+
+static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_MODULE, id);
+}
+
+static int tegra_bpmp_reset_assert(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_ASSERT, id);
+}
+
+static int tegra_bpmp_reset_deassert(struct reset_controller_dev *rstc,
+ unsigned long id)
+{
+ return tegra_bpmp_reset_common(rstc, CMD_RESET_DEASSERT, id);
+}
+
+static const struct reset_control_ops tegra_bpmp_reset_ops = {
+ .reset = tegra_bpmp_reset_module,
+ .assert = tegra_bpmp_reset_assert,
+ .deassert = tegra_bpmp_reset_deassert,
+};
+
+int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
+{
+ bpmp->rstc.ops = &tegra_bpmp_reset_ops;
+ bpmp->rstc.owner = THIS_MODULE;
+ bpmp->rstc.of_node = bpmp->dev->of_node;
+ bpmp->rstc.nr_resets = bpmp->soc->num_resets;
+
+ return devm_reset_controller_register(bpmp->dev, &bpmp->rstc);
+}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 637cf8973c9e..581001989937 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -384,7 +384,7 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
/* if (len > rec_len):
* dump data up to cap_len ignoring small duplicate in rec->payload
*/
- spin_lock_irqsave(&dbf->pay_lock, flags);
+ spin_lock(&dbf->pay_lock);
memset(payload, 0, sizeof(*payload));
memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
payload->fsf_req_id = req_id;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index a8762a3efeef..532474109624 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2586,7 +2586,6 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
u32 fd_ioasc;
- char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
if (ioa_cfg->sis64)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
@@ -2607,8 +2606,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
}
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
+ schedule_work(&ioa_cfg->work_q);
hostrcb = ipr_get_free_hostrcb(ioa_cfg);
- kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
}
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 54d446c9f56e..b8d3b97b217a 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -36,9 +36,9 @@ struct scsi_dh_blist {
};
static const struct scsi_dh_blist scsi_dh_blist[] = {
- {"DGC", "RAID", "clariion" },
- {"DGC", "DISK", "clariion" },
- {"DGC", "VRAID", "clariion" },
+ {"DGC", "RAID", "emc" },
+ {"DGC", "DISK", "emc" },
+ {"DGC", "VRAID", "emc" },
{"COMPAQ", "MSA1000 VOLUME", "hp_sw" },
{"COMPAQ", "HSV110", "hp_sw" },
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 212e98d940bc..6f7128f49c30 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1307,7 +1307,6 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
enum scsi_scan_mode rescan)
{
- char devname[64];
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
unsigned int length;
u64 lun;
@@ -1349,9 +1348,6 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
}
}
- sprintf(devname, "host %d channel %d id %d",
- shost->host_no, sdev->channel, sdev->id);
-
/*
* Allocate enough to hold the header (the same size as one scsi_lun)
* plus the number of luns we are requesting. 511 was the default
@@ -1470,12 +1466,12 @@ retry:
out_err:
kfree(lun_data);
out:
- scsi_device_put(sdev);
if (scsi_device_created(sdev))
/*
* the sdev we used didn't appear in the report luns scan
*/
__scsi_remove_device(sdev);
+ scsi_device_put(sdev);
return ret;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 7af5226aa55b..618422ea3a41 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4922,9 +4922,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
res = get_user_pages_unlocked(
uaddr,
nr_pages,
- rw == READ,
- 0, /* don't force */
- pages);
+ pages,
+ rw == READ ? FOLL_WRITE : 0); /* don't force */
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 0a4ea809a61b..609bb3424c14 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -23,7 +23,7 @@ config MTK_PMIC_WRAP
config MTK_SCPSYS
bool "MediaTek SCPSYS Support"
depends on ARCH_MEDIATEK || COMPILE_TEST
- default ARM64 && ARCH_MEDIATEK
+ default ARCH_MEDIATEK
select REGMAP
select MTK_INFRACFG
select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 837effe19907..beb79162369a 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -11,17 +11,16 @@
* GNU General Public License for more details.
*/
#include <linux/clk.h>
-#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
-#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
-#include <linux/regmap.h>
-#include <linux/soc/mediatek/infracfg.h>
#include <linux/regulator/consumer.h>
+#include <linux/soc/mediatek/infracfg.h>
+
+#include <dt-bindings/power/mt2701-power.h>
#include <dt-bindings/power/mt8173-power.h>
#define SPM_VDE_PWR_CON 0x0210
@@ -29,11 +28,17 @@
#define SPM_VEN_PWR_CON 0x0230
#define SPM_ISP_PWR_CON 0x0238
#define SPM_DIS_PWR_CON 0x023c
+#define SPM_CONN_PWR_CON 0x0280
#define SPM_VEN2_PWR_CON 0x0298
-#define SPM_AUDIO_PWR_CON 0x029c
+#define SPM_AUDIO_PWR_CON 0x029c /* MT8173 */
+#define SPM_BDP_PWR_CON 0x029c /* MT2701 */
+#define SPM_ETH_PWR_CON 0x02a0
+#define SPM_HIF_PWR_CON 0x02a4
+#define SPM_IFR_MSC_PWR_CON 0x02a8
#define SPM_MFG_2D_PWR_CON 0x02c0
#define SPM_MFG_ASYNC_PWR_CON 0x02c4
#define SPM_USB_PWR_CON 0x02cc
+
#define SPM_PWR_STATUS 0x060c
#define SPM_PWR_STATUS_2ND 0x0610
@@ -43,10 +48,15 @@
#define PWR_ON_2ND_BIT BIT(3)
#define PWR_CLK_DIS_BIT BIT(4)
+#define PWR_STATUS_CONN BIT(1)
#define PWR_STATUS_DISP BIT(3)
#define PWR_STATUS_MFG BIT(4)
#define PWR_STATUS_ISP BIT(5)
#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_BDP BIT(14)
+#define PWR_STATUS_ETH BIT(15)
+#define PWR_STATUS_HIF BIT(16)
+#define PWR_STATUS_IFR_MSC BIT(17)
#define PWR_STATUS_VENC_LT BIT(20)
#define PWR_STATUS_VENC BIT(21)
#define PWR_STATUS_MFG_2D BIT(22)
@@ -55,12 +65,23 @@
#define PWR_STATUS_USB BIT(25)
enum clk_id {
- MT8173_CLK_NONE,
- MT8173_CLK_MM,
- MT8173_CLK_MFG,
- MT8173_CLK_VENC,
- MT8173_CLK_VENC_LT,
- MT8173_CLK_MAX,
+ CLK_NONE,
+ CLK_MM,
+ CLK_MFG,
+ CLK_VENC,
+ CLK_VENC_LT,
+ CLK_ETHIF,
+ CLK_MAX,
+};
+
+static const char * const clk_names[] = {
+ NULL,
+ "mm",
+ "mfg",
+ "venc",
+ "venc_lt",
+ "ethif",
+ NULL,
};
#define MAX_CLKS 2
@@ -76,98 +97,6 @@ struct scp_domain_data {
bool active_wakeup;
};
-static const struct scp_domain_data scp_domain_data[] = {
- [MT8173_POWER_DOMAIN_VDEC] = {
- .name = "vdec",
- .sta_mask = PWR_STATUS_VDEC,
- .ctl_offs = SPM_VDE_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(12, 12),
- .clk_id = {MT8173_CLK_MM},
- },
- [MT8173_POWER_DOMAIN_VENC] = {
- .name = "venc",
- .sta_mask = PWR_STATUS_VENC,
- .ctl_offs = SPM_VEN_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC},
- },
- [MT8173_POWER_DOMAIN_ISP] = {
- .name = "isp",
- .sta_mask = PWR_STATUS_ISP,
- .ctl_offs = SPM_ISP_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(13, 12),
- .clk_id = {MT8173_CLK_MM},
- },
- [MT8173_POWER_DOMAIN_MM] = {
- .name = "mm",
- .sta_mask = PWR_STATUS_DISP,
- .ctl_offs = SPM_DIS_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(12, 12),
- .clk_id = {MT8173_CLK_MM},
- .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
- MT8173_TOP_AXI_PROT_EN_MM_M1,
- },
- [MT8173_POWER_DOMAIN_VENC_LT] = {
- .name = "venc_lt",
- .sta_mask = PWR_STATUS_VENC_LT,
- .ctl_offs = SPM_VEN2_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_MM, MT8173_CLK_VENC_LT},
- },
- [MT8173_POWER_DOMAIN_AUDIO] = {
- .name = "audio",
- .sta_mask = PWR_STATUS_AUDIO,
- .ctl_offs = SPM_AUDIO_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_NONE},
- },
- [MT8173_POWER_DOMAIN_USB] = {
- .name = "usb",
- .sta_mask = PWR_STATUS_USB,
- .ctl_offs = SPM_USB_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(15, 12),
- .clk_id = {MT8173_CLK_NONE},
- .active_wakeup = true,
- },
- [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
- .name = "mfg_async",
- .sta_mask = PWR_STATUS_MFG_ASYNC,
- .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = 0,
- .clk_id = {MT8173_CLK_MFG},
- },
- [MT8173_POWER_DOMAIN_MFG_2D] = {
- .name = "mfg_2d",
- .sta_mask = PWR_STATUS_MFG_2D,
- .ctl_offs = SPM_MFG_2D_PWR_CON,
- .sram_pdn_bits = GENMASK(11, 8),
- .sram_pdn_ack_bits = GENMASK(13, 12),
- .clk_id = {MT8173_CLK_NONE},
- },
- [MT8173_POWER_DOMAIN_MFG] = {
- .name = "mfg",
- .sta_mask = PWR_STATUS_MFG,
- .ctl_offs = SPM_MFG_PWR_CON,
- .sram_pdn_bits = GENMASK(13, 8),
- .sram_pdn_ack_bits = GENMASK(21, 16),
- .clk_id = {MT8173_CLK_NONE},
- .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
- MT8173_TOP_AXI_PROT_EN_MFG_M0 |
- MT8173_TOP_AXI_PROT_EN_MFG_M1 |
- MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
- },
-};
-
-#define NUM_DOMAINS ARRAY_SIZE(scp_domain_data)
-
struct scp;
struct scp_domain {
@@ -179,7 +108,7 @@ struct scp_domain {
};
struct scp {
- struct scp_domain domains[NUM_DOMAINS];
+ struct scp_domain *domains;
struct genpd_onecell_data pd_data;
struct device *dev;
void __iomem *base;
@@ -408,57 +337,55 @@ static bool scpsys_active_wakeup(struct device *dev)
return scpd->data->active_wakeup;
}
-static int scpsys_probe(struct platform_device *pdev)
+static void init_clks(struct platform_device *pdev, struct clk **clk)
+{
+ int i;
+
+ for (i = CLK_NONE + 1; i < CLK_MAX; i++)
+ clk[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+}
+
+static struct scp *init_scp(struct platform_device *pdev,
+ const struct scp_domain_data *scp_domain_data, int num)
{
struct genpd_onecell_data *pd_data;
struct resource *res;
- int i, j, ret;
+ int i, j;
struct scp *scp;
- struct clk *clk[MT8173_CLK_MAX];
+ struct clk *clk[CLK_MAX];
scp = devm_kzalloc(&pdev->dev, sizeof(*scp), GFP_KERNEL);
if (!scp)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
scp->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
scp->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(scp->base))
- return PTR_ERR(scp->base);
+ return ERR_CAST(scp->base);
+
+ scp->domains = devm_kzalloc(&pdev->dev,
+ sizeof(*scp->domains) * num, GFP_KERNEL);
+ if (!scp->domains)
+ return ERR_PTR(-ENOMEM);
pd_data = &scp->pd_data;
pd_data->domains = devm_kzalloc(&pdev->dev,
- sizeof(*pd_data->domains) * NUM_DOMAINS, GFP_KERNEL);
+ sizeof(*pd_data->domains) * num, GFP_KERNEL);
if (!pd_data->domains)
- return -ENOMEM;
-
- clk[MT8173_CLK_MM] = devm_clk_get(&pdev->dev, "mm");
- if (IS_ERR(clk[MT8173_CLK_MM]))
- return PTR_ERR(clk[MT8173_CLK_MM]);
-
- clk[MT8173_CLK_MFG] = devm_clk_get(&pdev->dev, "mfg");
- if (IS_ERR(clk[MT8173_CLK_MFG]))
- return PTR_ERR(clk[MT8173_CLK_MFG]);
-
- clk[MT8173_CLK_VENC] = devm_clk_get(&pdev->dev, "venc");
- if (IS_ERR(clk[MT8173_CLK_VENC]))
- return PTR_ERR(clk[MT8173_CLK_VENC]);
-
- clk[MT8173_CLK_VENC_LT] = devm_clk_get(&pdev->dev, "venc_lt");
- if (IS_ERR(clk[MT8173_CLK_VENC_LT]))
- return PTR_ERR(clk[MT8173_CLK_VENC_LT]);
+ return ERR_PTR(-ENOMEM);
scp->infracfg = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"infracfg");
if (IS_ERR(scp->infracfg)) {
dev_err(&pdev->dev, "Cannot find infracfg controller: %ld\n",
PTR_ERR(scp->infracfg));
- return PTR_ERR(scp->infracfg);
+ return ERR_CAST(scp->infracfg);
}
- for (i = 0; i < NUM_DOMAINS; i++) {
+ for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
const struct scp_domain_data *data = &scp_domain_data[i];
@@ -467,13 +394,15 @@ static int scpsys_probe(struct platform_device *pdev)
if (PTR_ERR(scpd->supply) == -ENODEV)
scpd->supply = NULL;
else
- return PTR_ERR(scpd->supply);
+ return ERR_CAST(scpd->supply);
}
}
- pd_data->num_domains = NUM_DOMAINS;
+ pd_data->num_domains = num;
+
+ init_clks(pdev, clk);
- for (i = 0; i < NUM_DOMAINS; i++) {
+ for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
const struct scp_domain_data *data = &scp_domain_data[i];
@@ -482,13 +411,37 @@ static int scpsys_probe(struct platform_device *pdev)
scpd->scp = scp;
scpd->data = data;
- for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++)
- scpd->clk[j] = clk[data->clk_id[j]];
+
+ for (j = 0; j < MAX_CLKS && data->clk_id[j]; j++) {
+ struct clk *c = clk[data->clk_id[j]];
+
+ if (IS_ERR(c)) {
+ dev_err(&pdev->dev, "%s: clk unavailable\n",
+ data->name);
+ return ERR_CAST(c);
+ }
+
+ scpd->clk[j] = c;
+ }
genpd->name = data->name;
genpd->power_off = scpsys_power_off;
genpd->power_on = scpsys_power_on;
genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
+ }
+
+ return scp;
+}
+
+static void mtk_register_power_domains(struct platform_device *pdev,
+ struct scp *scp, int num)
+{
+ struct genpd_onecell_data *pd_data;
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ struct scp_domain *scpd = &scp->domains[i];
+ struct generic_pm_domain *genpd = &scpd->genpd;
/*
* Initially turn on all domains to make the domains usable
@@ -507,6 +460,222 @@ static int scpsys_probe(struct platform_device *pdev)
* valid.
*/
+ pd_data = &scp->pd_data;
+
+ ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
+}
+
+/*
+ * MT2701 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt2701[] = {
+ [MT2701_POWER_DOMAIN_CONN] = {
+ .name = "conn",
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = SPM_CONN_PWR_CON,
+ .bus_prot_mask = 0x0104,
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_DISP] = {
+ .name = "disp",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = 0x0002,
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MFG},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_BDP] = {
+ .name = "bdp",
+ .sta_mask = PWR_STATUS_BDP,
+ .ctl_offs = SPM_BDP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_ETH] = {
+ .name = "eth",
+ .sta_mask = PWR_STATUS_ETH,
+ .ctl_offs = SPM_ETH_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_HIF] = {
+ .name = "hif",
+ .sta_mask = PWR_STATUS_HIF,
+ .ctl_offs = SPM_HIF_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_ETHIF},
+ .active_wakeup = true,
+ },
+ [MT2701_POWER_DOMAIN_IFR_MSC] = {
+ .name = "ifr_msc",
+ .sta_mask = PWR_STATUS_IFR_MSC,
+ .ctl_offs = SPM_IFR_MSC_PWR_CON,
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+};
+
+#define NUM_DOMAINS_MT2701 ARRAY_SIZE(scp_domain_data_mt2701)
+
+static int __init scpsys_probe_mt2701(struct platform_device *pdev)
+{
+ struct scp *scp;
+
+ scp = init_scp(pdev, scp_domain_data_mt2701, NUM_DOMAINS_MT2701);
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT2701);
+
+ return 0;
+}
+
+/*
+ * MT8173 power domain support
+ */
+
+static const struct scp_domain_data scp_domain_data_mt8173[] = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .name = "vdec",
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .name = "venc",
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC},
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .name = "isp",
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_MM},
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .name = "mm",
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .clk_id = {CLK_MM},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1,
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .name = "venc_lt",
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_MM, CLK_VENC_LT},
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .name = "audio",
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .name = "usb",
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .clk_id = {CLK_NONE},
+ .active_wakeup = true,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .name = "mfg_async",
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ .clk_id = {CLK_MFG},
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .name = "mfg_2d",
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .clk_id = {CLK_NONE},
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .name = "mfg",
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .clk_id = {CLK_NONE},
+ .bus_prot_mask = MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT,
+ },
+};
+
+#define NUM_DOMAINS_MT8173 ARRAY_SIZE(scp_domain_data_mt8173)
+
+static int __init scpsys_probe_mt8173(struct platform_device *pdev)
+{
+ struct scp *scp;
+ struct genpd_onecell_data *pd_data;
+ int ret;
+
+ scp = init_scp(pdev, scp_domain_data_mt8173, NUM_DOMAINS_MT8173);
+ if (IS_ERR(scp))
+ return PTR_ERR(scp);
+
+ mtk_register_power_domains(pdev, scp, NUM_DOMAINS_MT8173);
+
+ pd_data = &scp->pd_data;
+
ret = pm_genpd_add_subdomain(pd_data->domains[MT8173_POWER_DOMAIN_MFG_ASYNC],
pd_data->domains[MT8173_POWER_DOMAIN_MFG_2D]);
if (ret && IS_ENABLED(CONFIG_PM))
@@ -517,21 +686,39 @@ static int scpsys_probe(struct platform_device *pdev)
if (ret && IS_ENABLED(CONFIG_PM))
dev_err(&pdev->dev, "Failed to add subdomain: %d\n", ret);
- ret = of_genpd_add_provider_onecell(pdev->dev.of_node, pd_data);
- if (ret)
- dev_err(&pdev->dev, "Failed to add OF provider: %d\n", ret);
-
return 0;
}
+/*
+ * scpsys driver init
+ */
+
static const struct of_device_id of_scpsys_match_tbl[] = {
{
+ .compatible = "mediatek,mt2701-scpsys",
+ .data = scpsys_probe_mt2701,
+ }, {
.compatible = "mediatek,mt8173-scpsys",
+ .data = scpsys_probe_mt8173,
}, {
/* sentinel */
}
};
+static int scpsys_probe(struct platform_device *pdev)
+{
+ int (*probe)(struct platform_device *);
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(of_scpsys_match_tbl, pdev->dev.of_node);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ probe = of_id->data;
+
+ return probe(pdev);
+}
+
static struct platform_driver scpsys_drv = {
.probe = scpsys_probe,
.driver = {
diff --git a/drivers/soc/renesas/Makefile b/drivers/soc/renesas/Makefile
index 623039c3514c..9e0bb329594c 100644
--- a/drivers/soc/renesas/Makefile
+++ b/drivers/soc/renesas/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_ARCH_R8A7743) += rcar-sysc.o r8a7743-sysc.o
obj-$(CONFIG_ARCH_R8A7779) += rcar-sysc.o r8a7779-sysc.o
obj-$(CONFIG_ARCH_R8A7790) += rcar-sysc.o r8a7790-sysc.o
obj-$(CONFIG_ARCH_R8A7791) += rcar-sysc.o r8a7791-sysc.o
diff --git a/drivers/soc/renesas/r8a7743-sysc.c b/drivers/soc/renesas/r8a7743-sysc.c
new file mode 100644
index 000000000000..9583a327d90c
--- /dev/null
+++ b/drivers/soc/renesas/r8a7743-sysc.c
@@ -0,0 +1,32 @@
+/*
+ * Renesas RZ/G1M System Controller
+ *
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation; of the License.
+ */
+
+#include <linux/bug.h>
+#include <linux/kernel.h>
+
+#include <dt-bindings/power/r8a7743-sysc.h>
+
+#include "rcar-sysc.h"
+
+static const struct rcar_sysc_area r8a7743_areas[] __initconst = {
+ { "always-on", 0, 0, R8A7743_PD_ALWAYS_ON, -1, PD_ALWAYS_ON },
+ { "ca15-scu", 0x180, 0, R8A7743_PD_CA15_SCU, R8A7743_PD_ALWAYS_ON,
+ PD_SCU },
+ { "ca15-cpu0", 0x40, 0, R8A7743_PD_CA15_CPU0, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "ca15-cpu1", 0x40, 1, R8A7743_PD_CA15_CPU1, R8A7743_PD_CA15_SCU,
+ PD_CPU_NOCR },
+ { "sgx", 0xc0, 0, R8A7743_PD_SGX, R8A7743_PD_ALWAYS_ON },
+};
+
+const struct rcar_sysc_info r8a7743_sysc_info __initconst = {
+ .areas = r8a7743_areas,
+ .num_areas = ARRAY_SIZE(r8a7743_areas),
+};
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 65c8e1eb90c0..71acd45b13f0 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -275,6 +275,9 @@ finalize:
}
static const struct of_device_id rcar_sysc_matches[] = {
+#ifdef CONFIG_ARCH_R8A7743
+ { .compatible = "renesas,r8a7743-sysc", .data = &r8a7743_sysc_info },
+#endif
#ifdef CONFIG_ARCH_R8A7779
{ .compatible = "renesas,r8a7779-sysc", .data = &r8a7779_sysc_info },
#endif
diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h
index 77dbe861473f..8ab9ca8a825a 100644
--- a/drivers/soc/renesas/rcar-sysc.h
+++ b/drivers/soc/renesas/rcar-sysc.h
@@ -50,6 +50,7 @@ struct rcar_sysc_info {
unsigned int num_areas;
};
+extern const struct rcar_sysc_info r8a7743_sysc_info;
extern const struct rcar_sysc_info r8a7779_sysc_info;
extern const struct rcar_sysc_info r8a7790_sysc_info;
extern const struct rcar_sysc_info r8a7791_sysc_info;
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 7acd1517dd37..1c78c42416c6 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -9,6 +9,7 @@
*/
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/err.h>
#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
@@ -105,12 +106,24 @@ static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
return (val & pd_info->idle_mask) == pd_info->idle_mask;
}
+static unsigned int rockchip_pmu_read_ack(struct rockchip_pmu *pmu)
+{
+ unsigned int val;
+
+ regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
+ return val;
+}
+
static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
bool idle)
{
const struct rockchip_domain_info *pd_info = pd->info;
+ struct generic_pm_domain *genpd = &pd->genpd;
struct rockchip_pmu *pmu = pd->pmu;
+ unsigned int target_ack;
unsigned int val;
+ bool is_idle;
+ int ret;
if (pd_info->req_mask == 0)
return 0;
@@ -120,12 +133,26 @@ static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
dsb(sy);
- do {
- regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
- } while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0));
+ /* Wait util idle_ack = 1 */
+ target_ack = idle ? pd_info->ack_mask : 0;
+ ret = readx_poll_timeout_atomic(rockchip_pmu_read_ack, pmu, val,
+ (val & pd_info->ack_mask) == target_ack,
+ 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to get ack on domain '%s', val=0x%x\n",
+ genpd->name, val);
+ return ret;
+ }
- while (rockchip_pmu_domain_is_idle(pd) != idle)
- cpu_relax();
+ ret = readx_poll_timeout_atomic(rockchip_pmu_domain_is_idle, pd,
+ is_idle, is_idle == idle, 0, 10000);
+ if (ret) {
+ dev_err(pmu->dev,
+ "failed to set idle on domain '%s', val=%d\n",
+ genpd->name, is_idle);
+ return ret;
+ }
return 0;
}
@@ -198,6 +225,8 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
bool on)
{
struct rockchip_pmu *pmu = pd->pmu;
+ struct generic_pm_domain *genpd = &pd->genpd;
+ bool is_on;
if (pd->info->pwr_mask == 0)
return;
@@ -207,8 +236,13 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
dsb(sy);
- while (rockchip_pmu_domain_is_on(pd) != on)
- cpu_relax();
+ if (readx_poll_timeout_atomic(rockchip_pmu_domain_is_on, pd, is_on,
+ is_on == on, 0, 10000)) {
+ dev_err(pmu->dev,
+ "failed to set domain '%s', val=%d\n",
+ genpd->name, is_on);
+ return;
+ }
}
static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
@@ -445,7 +479,16 @@ err_out:
static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
{
- int i;
+ int i, ret;
+
+ /*
+ * We're in the error cleanup already, so we only complain,
+ * but won't emit another error on top of the original one.
+ */
+ ret = pm_genpd_remove(&pd->genpd);
+ if (ret < 0)
+ dev_err(pd->pmu->dev, "failed to remove domain '%s' : %d - state may be inconsistent\n",
+ pd->genpd.name, ret);
for (i = 0; i < pd->num_clks; i++) {
clk_unprepare(pd->clks[i]);
@@ -597,10 +640,12 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
* Configure power up and down transition delays for CORE
* and GPU domains.
*/
- rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
- pmu_info->core_power_transition_time);
- rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
- pmu_info->gpu_power_transition_time);
+ if (pmu_info->core_power_transition_time)
+ rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
+ pmu_info->core_power_transition_time);
+ if (pmu_info->gpu_pwrcnt_offset)
+ rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
+ pmu_info->gpu_power_transition_time);
error = -ENODEV;
@@ -627,7 +672,11 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
goto err_out;
}
- of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ error = of_genpd_add_provider_onecell(np, &pmu->genpd_data);
+ if (error) {
+ dev_err(dev, "failed to add provider: %d\n", error);
+ goto err_out;
+ }
return 0;
@@ -722,11 +771,7 @@ static const struct rockchip_pmu_info rk3399_pmu = {
.idle_offset = 0x64,
.ack_offset = 0x68,
- .core_pwrcnt_offset = 0x9c,
- .gpu_pwrcnt_offset = 0xa4,
-
- .core_power_transition_time = 24,
- .gpu_power_transition_time = 24,
+ /* ARM Trusted Firmware manages power transition times */
.num_domains = ARRAY_SIZE(rk3399_pm_domains),
.domain_info = rk3399_pm_domains,
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index 03089ad2fc65..e5e124c07066 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -77,5 +77,19 @@ config ARCH_TEGRA_210_SOC
controllers, such as GPIO, I2C, SPI, SDHCI, PCIe, SATA and XHCI, to
name only a few.
+config ARCH_TEGRA_186_SOC
+ bool "NVIDIA Tegra186 SoC"
+ select MAILBOX
+ select TEGRA_BPMP
+ select TEGRA_HSP_MBOX
+ select TEGRA_IVC
+ help
+ Enable support for the NVIDIA Tegar186 SoC. The Tegra186 features a
+ combination of Denver and Cortex-A57 CPU cores and a GPU based on
+ the Pascal architecture. It contains an ADSP with a Cortex-A9 CPU
+ used for audio processing, hardware video encoders/decoders with
+ multi-format support, ISP for image capture processing and BPMP for
+ power management.
+
endif
endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
index c29040fdf9a7..1091b9f1dd07 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
@@ -423,8 +423,7 @@ create_pagelist(char __user *buf, size_t count, unsigned short type,
actual_pages = get_user_pages(task, task->mm,
(unsigned long)buf & ~(PAGE_SIZE - 1),
num_pages,
- (type == PAGELIST_READ) /*Write */ ,
- 0 /*Force */ ,
+ (type == PAGELIST_READ) ? FOLL_WRITE : 0,
pages,
NULL /*vmas */);
up_read(&task->mm->mmap_sem);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index e11c0e07471b..7b6cd4d80621 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1477,8 +1477,7 @@ dump_phys_mem(void *virt_addr, uint32_t num_bytes)
current->mm, /* mm */
(unsigned long)virt_addr, /* start */
num_pages, /* len */
- 0, /* write */
- 0, /* force */
+ 0, /* gup_flags */
pages, /* pages (array of page pointers) */
NULL); /* vmas */
up_read(&current->mm->mmap_sem);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 39b928c2849d..b7d747e92c7a 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1804,6 +1804,10 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
* Otherwise, initiator is not expecting a NOPIN is response.
* Just ignore for now.
*/
+
+ if (cmd)
+ iscsit_free_cmd(cmd, false);
+
return 0;
}
EXPORT_SYMBOL(iscsit_process_nop_out);
@@ -2982,7 +2986,7 @@ iscsit_build_nopin_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
pr_debug("Built NOPIN %s Response ITT: 0x%08x, TTT: 0x%08x,"
" StatSN: 0x%08x, Length %u\n", (nopout_response) ?
- "Solicitied" : "Unsolicitied", cmd->init_task_tag,
+ "Solicited" : "Unsolicited", cmd->init_task_tag,
cmd->targ_xfer_tag, cmd->stat_sn, cmd->buf_ptr_size);
}
EXPORT_SYMBOL(iscsit_build_nopin_rsp);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index adf419fa4291..15f79a2ca34a 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -434,7 +434,7 @@ static int iscsi_login_zero_tsih_s2(
/*
* Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
- * Immediate Data + Unsolicitied Data-OUT if necessary..
+ * Immediate Data + Unsolicited Data-OUT if necessary..
*/
param = iscsi_find_param_from_key("MaxRecvDataSegmentLength",
conn->param_list);
@@ -646,7 +646,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
/*
- * FIXME: Unsolicitied NopIN support for ISER
+ * FIXME: Unsolicited NopIN support for ISER
*/
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND)
return;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 6094a6beddde..7dfefd66df93 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -754,15 +754,7 @@ EXPORT_SYMBOL(target_complete_cmd);
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{
- if (scsi_status != SAM_STAT_GOOD) {
- return;
- }
-
- /*
- * Calculate new residual count based upon length of SCSI data
- * transferred.
- */
- if (length < cmd->data_length) {
+ if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
cmd->residual_count += cmd->data_length - length;
} else {
@@ -771,12 +763,6 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
}
cmd->data_length = length;
- } else if (length > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = length - cmd->data_length;
- } else {
- cmd->se_cmd_flags &= ~(SCF_OVERFLOW_BIT | SCF_UNDERFLOW_BIT);
- cmd->residual_count = 0;
}
target_complete_cmd(cmd, scsi_status);
@@ -1706,6 +1692,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
+ case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2547,8 +2534,12 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
* fabric acknowledgement that requires two target_put_sess_cmd()
* invocations before se_cmd descriptor release.
*/
- if (ack_kref)
- kref_get(&se_cmd->cmd_kref);
+ if (ack_kref) {
+ if (!kref_get_unless_zero(&se_cmd->cmd_kref))
+ return -EINVAL;
+
+ se_cmd->se_cmd_flags |= SCF_ACK_KREF;
+ }
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2627,7 +2618,7 @@ EXPORT_SYMBOL(target_put_sess_cmd);
*/
void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
- struct se_cmd *se_cmd;
+ struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
int rc;
@@ -2639,14 +2630,16 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
se_sess->sess_tearing_down = 1;
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+ list_for_each_entry_safe(se_cmd, tmp_cmd,
+ &se_sess->sess_wait_list, se_cmd_list) {
rc = kref_get_unless_zero(&se_cmd->cmd_kref);
if (rc) {
se_cmd->cmd_wait_set = 1;
spin_lock(&se_cmd->t_state_lock);
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
spin_unlock(&se_cmd->t_state_lock);
- }
+ } else
+ list_del_init(&se_cmd->se_cmd_list);
}
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
@@ -2871,6 +2864,12 @@ static const struct sense_info sense_info_table[] = {
.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
.add_sector_info = true,
},
+ [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
+ .key = COPY_ABORTED,
+ .asc = 0x0d,
+ .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
+
+ },
[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
/*
* Returning ILLEGAL REQUEST would cause immediate IO errors on
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 62bf4fe5704a..47562509b489 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -96,7 +96,7 @@ struct tcmu_dev {
size_t dev_size;
u32 cmdr_size;
u32 cmdr_last_cleaned;
- /* Offset of data ring from start of mb */
+ /* Offset of data area from start of mb */
/* Must add data_off and mb_addr to get the address */
size_t data_off;
size_t data_size;
@@ -349,7 +349,7 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap)
/*
* We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data ring.
+ * space available on the data area.
*
* Called with ring lock held.
*/
@@ -389,7 +389,8 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
return true;
}
-static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+static sense_reason_t
+tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
@@ -405,7 +406,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
- return -EINVAL;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
/*
* Must be a certain minimum size for response sense info, but
@@ -432,11 +433,14 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
data_length += se_cmd->t_bidi_data_sg->length;
}
- if ((command_size > (udev->cmdr_size / 2))
- || data_length > udev->data_size)
- pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
- "cmd/data ring buffers\n", command_size, data_length,
+ if ((command_size > (udev->cmdr_size / 2)) ||
+ data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
+ "cmd ring/data area\n", command_size, data_length,
udev->cmdr_size, udev->data_size);
+ spin_unlock_irq(&udev->cmdr_lock);
+ return TCM_INVALID_CDB_FIELD;
+ }
while (!is_ring_space_avail(udev, command_size, data_length)) {
int ret;
@@ -450,7 +454,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) {
pr_warn("tcmu: command timed out\n");
- return -ETIMEDOUT;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
spin_lock_irq(&udev->cmdr_lock);
@@ -487,9 +491,7 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
bitmap_copy(old_bitmap, udev->data_bitmap, DATA_BLOCK_BITS);
- /*
- * Fix up iovecs, and handle if allocation in data ring wrapped.
- */
+ /* Handle allocating space from the data area */
iov = &entry->req.iov[0];
iov_cnt = 0;
copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
@@ -526,10 +528,11 @@ static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
mod_timer(&udev->timeout,
round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
- return 0;
+ return TCM_NO_SENSE;
}
-static int tcmu_queue_cmd(struct se_cmd *se_cmd)
+static sense_reason_t
+tcmu_queue_cmd(struct se_cmd *se_cmd)
{
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
@@ -538,10 +541,10 @@ static int tcmu_queue_cmd(struct se_cmd *se_cmd)
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
- return -ENOMEM;
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
ret = tcmu_queue_cmd_ring(tcmu_cmd);
- if (ret < 0) {
+ if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
spin_lock_irq(&udev->commands_lock);
idr_remove(&udev->commands, tcmu_cmd->cmd_id);
@@ -561,7 +564,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
/*
* cmd has been completed already from timeout, just reclaim
- * data ring space and free cmd
+ * data area space and free cmd
*/
free_data_area(udev, cmd);
@@ -1129,20 +1132,9 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
}
static sense_reason_t
-tcmu_pass_op(struct se_cmd *se_cmd)
-{
- int ret = tcmu_queue_cmd(se_cmd);
-
- if (ret != 0)
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- else
- return TCM_NO_SENSE;
-}
-
-static sense_reason_t
tcmu_parse_cdb(struct se_cmd *cmd)
{
- return passthrough_parse_cdb(cmd, tcmu_pass_op);
+ return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
}
static const struct target_backend_ops tcmu_ops = {
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 75cd85426ae3..094a1440eacb 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -104,7 +104,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
}
mutex_unlock(&g_device_mutex);
- pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
return -EINVAL;
}
@@ -185,7 +185,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
- unsigned short tdll)
+ unsigned short tdll, sense_reason_t *sense_ret)
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
@@ -193,6 +193,8 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
unsigned short start = 0;
bool src = true;
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
@@ -243,9 +245,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
else
rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
-
- if (rc < 0)
+ /*
+ * If a matching IEEE NAA 0x83 descriptor for the requested device
+ * is not located on this node, return COPY_ABORTED with ASQ/ASQC
+ * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the
+ * initiator to fall back to normal copy method.
+ */
+ if (rc < 0) {
+ *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE;
goto out;
+ }
pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->src_dev, &xop->src_tid_wwn[0]);
@@ -653,6 +662,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
remote_port, true);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -664,6 +674,7 @@ static int target_xcopy_read_source(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
transport_generic_free_cmd(se_cmd, 0);
return rc;
}
@@ -714,6 +725,7 @@ static int target_xcopy_write_destination(
remote_port, false);
if (rc < 0) {
struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
/*
* If the failure happened before the t_mem_list hand-off in
* target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
@@ -729,6 +741,7 @@ static int target_xcopy_write_destination(
rc = target_xcopy_issue_pt_cmd(xpt_cmd);
if (rc < 0) {
+ ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
transport_generic_free_cmd(se_cmd, 0);
return rc;
@@ -815,9 +828,14 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
kfree(xop);
-
- pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
- ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ /*
+ * Don't override an error scsi status if it has already been set
+ */
+ if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
+ pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
+ " CHECK_CONDITION -> sending response\n", rc);
+ ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ }
target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
}
@@ -875,7 +893,7 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
- rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll);
+ rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 216e18cc9133..ff5de9a96643 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -572,10 +572,10 @@ static void ft_send_work(struct work_struct *work)
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir,
- TARGET_SCF_ACK_KREF))
+ TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID))
goto err;
- pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
+ pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
return;
err:
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 6ffbb603d912..fd5c3de79470 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -39,6 +39,11 @@
#include "tcm_fc.h"
+#define TFC_SESS_DBG(lport, fmt, args...) \
+ pr_debug("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args )
+
static void ft_sess_delete_all(struct ft_tport *);
/*
@@ -167,24 +172,29 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
struct ft_tport *tport;
struct hlist_head *head;
struct ft_sess *sess;
+ char *reason = "no session created";
rcu_read_lock();
tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
- if (!tport)
+ if (!tport) {
+ reason = "not an FCP port";
goto out;
+ }
head = &tport->hash[ft_sess_hash(port_id)];
hlist_for_each_entry_rcu(sess, head, hash) {
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
- pr_debug("port_id %x found %p\n", port_id, sess);
+ TFC_SESS_DBG(lport, "port_id %x found %p\n",
+ port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
- pr_debug("port_id %x not found\n", port_id);
+ TFC_SESS_DBG(lport, "port_id %x not found, %s\n",
+ port_id, reason);
return NULL;
}
@@ -195,7 +205,7 @@ static int ft_sess_alloc_cb(struct se_portal_group *se_tpg,
struct ft_tport *tport = sess->tport;
struct hlist_head *head = &tport->hash[ft_sess_hash(sess->port_id)];
- pr_debug("port_id %x sess %p\n", sess->port_id, sess);
+ TFC_SESS_DBG(tport->lport, "port_id %x sess %p\n", sess->port_id, sess);
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
@@ -223,7 +233,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess)
- return NULL;
+ return ERR_PTR(-ENOMEM);
kref_init(&sess->kref); /* ref for table entry */
sess->tport = tport;
@@ -234,8 +244,9 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
TARGET_PROT_NORMAL, &initiatorname[0],
sess, ft_sess_alloc_cb);
if (IS_ERR(sess->se_sess)) {
+ int rc = PTR_ERR(sess->se_sess);
kfree(sess);
- return NULL;
+ sess = ERR_PTR(rc);
}
return sess;
}
@@ -319,7 +330,7 @@ void ft_sess_close(struct se_session *se_sess)
mutex_unlock(&ft_lport_lock);
return;
}
- pr_debug("port_id %x\n", port_id);
+ TFC_SESS_DBG(sess->tport->lport, "port_id %x close session\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
ft_close_sess(sess);
@@ -379,8 +390,13 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
if (!(fcp_parm & FCP_SPPF_INIT_FCN))
return FC_SPP_RESP_CONF;
sess = ft_sess_create(tport, rdata->ids.port_id, rdata);
- if (!sess)
- return FC_SPP_RESP_RES;
+ if (IS_ERR(sess)) {
+ if (PTR_ERR(sess) == -EACCES) {
+ spp->spp_flags &= ~FC_SPP_EST_IMG_PAIR;
+ return FC_SPP_RESP_CONF;
+ } else
+ return FC_SPP_RESP_RES;
+ }
if (!sess->params)
rdata->prli_count++;
sess->params = fcp_parm;
@@ -423,8 +439,8 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
- pr_debug("port_id %x flags %x ret %x\n",
- rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
+ TFC_SESS_DBG(rdata->local_port, "port_id %x flags %x ret %x\n",
+ rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@@ -477,11 +493,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
- pr_debug("sid %x\n", sid);
+ TFC_SESS_DBG(lport, "recv sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
- pr_debug("sid %x sess lookup failed\n", sid);
+ TFC_SESS_DBG(lport, "sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 3b1ca4411073..a2564ab91e62 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -686,8 +686,8 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
if (!pages)
return -ENOMEM;
- ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, WRITE,
- 0, pages);
+ ret = get_user_pages_unlocked((unsigned long)buf, nr_pages, pages,
+ FOLL_WRITE);
if (ret < nr_pages) {
nr_pages = ret;
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 60bdad3a689b..150ce2abf6c8 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -245,8 +245,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
/* Get the physical addresses of the source buffer */
down_read(&current->mm->mmap_sem);
num_pinned = get_user_pages(param.local_vaddr - lb_offset,
- num_pages, (param.source == -1) ? READ : WRITE,
- 0, pages, NULL);
+ num_pages, (param.source == -1) ? 0 : FOLL_WRITE,
+ pages, NULL);
up_read(&current->mm->mmap_sem);
if (num_pinned != num_pages) {
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index e473e3b23720..6d1fbda0f461 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -499,6 +499,10 @@ static int wdat_wdt_resume_noirq(struct device *dev)
ret = wdat_wdt_enable_reboot(wdat);
if (ret)
return ret;
+
+ ret = wdat_wdt_ping(&wdat->wdd);
+ if (ret)
+ return ret;
}
return wdat_wdt_start(&wdat->wdd);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index ccc70d96958d..d4d8b7e36b2f 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -698,7 +698,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
if (ret) {
- bio->bi_error = ret;
+ comp_bio->bi_error = ret;
bio_endio(comp_bio);
}
@@ -728,7 +728,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
if (ret) {
- bio->bi_error = ret;
+ comp_bio->bi_error = ret;
bio_endio(comp_bio);
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 7bf08825cc11..18630e800208 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1272,7 +1272,8 @@ again:
statret = __ceph_do_getattr(inode, page,
CEPH_STAT_CAP_INLINE_DATA, !!page);
if (statret < 0) {
- __free_page(page);
+ if (page)
+ __free_page(page);
if (statret == -ENODATA) {
BUG_ON(retry_op != READ_INLINE);
goto again;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index bca1b49c1c4b..ef4d04647325 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1511,7 +1511,8 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
}
- if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
+ if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 &&
+ !(rinfo->hash_order && req->r_path2)) {
/* note dir version at start of readdir so we can tell
* if any dentries get dropped */
req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a29ffce98187..b382e5910eea 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -845,6 +845,8 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
err = ceph_fs_debugfs_init(fsc);
if (err < 0)
goto fail;
+ } else {
+ root = dget(fsc->sb->s_root);
}
fsc->mount_state = CEPH_MOUNT_MOUNTED;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 40b703217977..febc28f9e2c2 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -16,7 +16,7 @@
static int __remove_xattr(struct ceph_inode_info *ci,
struct ceph_inode_xattr *xattr);
-const struct xattr_handler ceph_other_xattr_handler;
+static const struct xattr_handler ceph_other_xattr_handler;
/*
* List of handlers for synthetic system.* attributes. Other
@@ -1086,7 +1086,7 @@ static int ceph_set_xattr_handler(const struct xattr_handler *handler,
return __ceph_setxattr(inode, name, value, size, flags);
}
-const struct xattr_handler ceph_other_xattr_handler = {
+static const struct xattr_handler ceph_other_xattr_handler = {
.prefix = "", /* match any name => handlers called with full name */
.get = ceph_get_xattr_handler,
.set = ceph_set_xattr_handler,
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 61057b7dbddb..98f87fe8f186 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -151,7 +151,10 @@ static int do_page_crypto(struct inode *inode,
struct page *src_page, struct page *dest_page,
gfp_t gfp_flags)
{
- u8 xts_tweak[FS_XTS_TWEAK_SIZE];
+ struct {
+ __le64 index;
+ u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
+ } xts_tweak;
struct skcipher_request *req = NULL;
DECLARE_FS_COMPLETION_RESULT(ecr);
struct scatterlist dst, src;
@@ -171,17 +174,15 @@ static int do_page_crypto(struct inode *inode,
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
page_crypt_complete, &ecr);
- BUILD_BUG_ON(FS_XTS_TWEAK_SIZE < sizeof(index));
- memcpy(xts_tweak, &index, sizeof(index));
- memset(&xts_tweak[sizeof(index)], 0,
- FS_XTS_TWEAK_SIZE - sizeof(index));
+ BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
+ xts_tweak.index = cpu_to_le64(index);
+ memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, PAGE_SIZE, 0);
- skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
- xts_tweak);
+ skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
if (rw == FS_DECRYPT)
res = crypto_skcipher_decrypt(req);
else
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
index ed115acb5dee..6865663aac69 100644
--- a/fs/crypto/policy.c
+++ b/fs/crypto/policy.c
@@ -109,6 +109,8 @@ int fscrypt_process_policy(struct file *filp,
if (ret)
return ret;
+ inode_lock(inode);
+
if (!inode_has_encryption_context(inode)) {
if (!S_ISDIR(inode->i_mode))
ret = -EINVAL;
@@ -127,6 +129,8 @@ int fscrypt_process_policy(struct file *filp,
ret = -EINVAL;
}
+ inode_unlock(inode);
+
mnt_drop_write_file(filp);
return ret;
}
diff --git a/fs/exec.c b/fs/exec.c
index 6fcfb3f7b137..4e497b9ee71e 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -191,6 +191,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
{
struct page *page;
int ret;
+ unsigned int gup_flags = FOLL_FORCE;
#ifdef CONFIG_STACK_GROWSUP
if (write) {
@@ -199,12 +200,16 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
return NULL;
}
#endif
+
+ if (write)
+ gup_flags |= FOLL_WRITE;
+
/*
* We are doing an exec(). 'current' is the process
* doing the exec and bprm->mm is the new process's mm.
*/
- ret = get_user_pages_remote(current, bprm->mm, pos, 1, write,
- 1, &page, NULL);
+ ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
+ &page, NULL);
if (ret <= 0)
return NULL;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index d831e24dc885..41b8b44a391c 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -622,7 +622,7 @@ static int ext2_get_blocks(struct inode *inode,
u32 *bno, bool *new, bool *boundary,
int create)
{
- int err = -EIO;
+ int err;
int offsets[4];
Indirect chain[4];
Indirect *partial;
@@ -639,7 +639,7 @@ static int ext2_get_blocks(struct inode *inode,
depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
if (depth == 0)
- return (err);
+ return -EIO;
partial = ext2_get_branch(inode, depth, offsets, chain, &err);
/* Simplest case - block found, no allocation needed */
@@ -761,7 +761,6 @@ static int ext2_get_blocks(struct inode *inode,
ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex);
got_it:
- *bno = le32_to_cpu(chain[depth-1].key);
if (count > blocks_to_boundary)
*boundary = true;
err = count;
@@ -772,6 +771,8 @@ cleanup:
brelse(partial->bh);
partial--;
}
+ if (err > 0)
+ *bno = le32_to_cpu(chain[depth-1].key);
return err;
}
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 02ddec6d8a7d..fdb19543af1e 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -128,12 +128,12 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
node = rb_first(&sbi->system_blks);
while (node) {
entry = rb_entry(node, struct ext4_system_zone, node);
- printk("%s%llu-%llu", first ? "" : ", ",
+ printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
entry->start_blk, entry->start_blk + entry->count - 1);
first = 0;
node = rb_next(node);
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
int ext4_setup_system_zone(struct super_block *sb)
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 3ef1df6ae9ec..1aba469f8220 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -27,16 +27,15 @@
#ifdef CONFIG_EXT4_DEBUG
extern ushort ext4_mballoc_debug;
-#define mb_debug(n, fmt, a...) \
- do { \
- if ((n) <= ext4_mballoc_debug) { \
- printk(KERN_DEBUG "(%s, %d): %s: ", \
- __FILE__, __LINE__, __func__); \
- printk(fmt, ## a); \
- } \
- } while (0)
+#define mb_debug(n, fmt, ...) \
+do { \
+ if ((n) <= ext4_mballoc_debug) { \
+ printk(KERN_DEBUG "(%s, %d): %s: " fmt, \
+ __FILE__, __LINE__, __func__, ##__VA_ARGS__); \
+ } \
+} while (0)
#else
-#define mb_debug(n, fmt, a...) no_printk(fmt, ## a)
+#define mb_debug(n, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index f92f10d4f66a..104f8bfba718 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -577,12 +577,13 @@ static inline unsigned dx_node_limit(struct inode *dir)
static void dx_show_index(char * label, struct dx_entry *entries)
{
int i, n = dx_get_count (entries);
- printk(KERN_DEBUG "%s index ", label);
+ printk(KERN_DEBUG "%s index", label);
for (i = 0; i < n; i++) {
- printk("%x->%lu ", i ? dx_get_hash(entries + i) :
- 0, (unsigned long)dx_get_block(entries + i));
+ printk(KERN_CONT " %x->%lu",
+ i ? dx_get_hash(entries + i) : 0,
+ (unsigned long)dx_get_block(entries + i));
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
struct stats
@@ -679,7 +680,7 @@ static struct stats dx_show_leaf(struct inode *dir,
}
de = ext4_next_entry(de, size);
}
- printk("(%i)\n", names);
+ printk(KERN_CONT "(%i)\n", names);
return (struct stats) { names, space, 1 };
}
@@ -798,7 +799,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
q = entries + count - 1;
while (p <= q) {
m = p + (q - p) / 2;
- dxtrace(printk("."));
+ dxtrace(printk(KERN_CONT "."));
if (dx_get_hash(m) > hash)
q = m - 1;
else
@@ -810,7 +811,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
at = entries;
while (n--)
{
- dxtrace(printk(","));
+ dxtrace(printk(KERN_CONT ","));
if (dx_get_hash(++at) > hash)
{
at--;
@@ -821,7 +822,8 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
}
at = p - 1;
- dxtrace(printk(" %x->%u\n", at == entries ? 0 : dx_get_hash(at),
+ dxtrace(printk(KERN_CONT " %x->%u\n",
+ at == entries ? 0 : dx_get_hash(at),
dx_get_block(at)));
frame->entries = entries;
frame->at = at;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 6db81fbcbaa6..20da99da0a34 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -597,14 +597,15 @@ void __ext4_std_error(struct super_block *sb, const char *function,
void __ext4_abort(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...)
{
+ struct va_format vaf;
va_list args;
save_error_info(sb, function, line);
va_start(args, fmt);
- printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
- function, line);
- vprintk(fmt, args);
- printk("\n");
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: %pV\n",
+ sb->s_id, function, line, &vaf);
va_end(args);
if ((sb->s_flags & MS_RDONLY) == 0) {
@@ -2715,12 +2716,12 @@ static void print_daily_error_info(unsigned long arg)
es->s_first_error_func,
le32_to_cpu(es->s_first_error_line));
if (es->s_first_error_ino)
- printk(": inode %u",
+ printk(KERN_CONT ": inode %u",
le32_to_cpu(es->s_first_error_ino));
if (es->s_first_error_block)
- printk(": block %llu", (unsigned long long)
+ printk(KERN_CONT ": block %llu", (unsigned long long)
le64_to_cpu(es->s_first_error_block));
- printk("\n");
+ printk(KERN_CONT "\n");
}
if (es->s_last_error_time) {
printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
@@ -2729,12 +2730,12 @@ static void print_daily_error_info(unsigned long arg)
es->s_last_error_func,
le32_to_cpu(es->s_last_error_line));
if (es->s_last_error_ino)
- printk(": inode %u",
+ printk(KERN_CONT ": inode %u",
le32_to_cpu(es->s_last_error_ino));
if (es->s_last_error_block)
- printk(": block %llu", (unsigned long long)
+ printk(KERN_CONT ": block %llu", (unsigned long long)
le64_to_cpu(es->s_last_error_block));
- printk("\n");
+ printk(KERN_CONT "\n");
}
mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */
}
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 73bcfd41f5f2..42145be5c6b4 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -223,14 +223,18 @@ static struct attribute *ext4_attrs[] = {
EXT4_ATTR_FEATURE(lazy_itable_init);
EXT4_ATTR_FEATURE(batched_discard);
EXT4_ATTR_FEATURE(meta_bg_resize);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
EXT4_ATTR_FEATURE(encryption);
+#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(lazy_itable_init),
ATTR_LIST(batched_discard),
ATTR_LIST(meta_bg_resize),
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
ATTR_LIST(encryption),
+#endif
ATTR_LIST(metadata_csum_seed),
NULL,
};
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index c15d63389957..d77be9e9f535 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -61,18 +61,12 @@
#include "acl.h"
#ifdef EXT4_XATTR_DEBUG
-# define ea_idebug(inode, f...) do { \
- printk(KERN_DEBUG "inode %s:%lu: ", \
- inode->i_sb->s_id, inode->i_ino); \
- printk(f); \
- printk("\n"); \
- } while (0)
-# define ea_bdebug(bh, f...) do { \
- printk(KERN_DEBUG "block %pg:%lu: ", \
- bh->b_bdev, (unsigned long) bh->b_blocknr); \
- printk(f); \
- printk("\n"); \
- } while (0)
+# define ea_idebug(inode, fmt, ...) \
+ printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \
+ inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
+# define ea_bdebug(bh, fmt, ...) \
+ printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
+ bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
#else
# define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
# define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
@@ -241,7 +235,7 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
int error = -EFSCORRUPTED;
if (((void *) header >= end) ||
- (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC)))
+ (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
goto errout;
error = ext4_xattr_check_names(entry, end, entry);
errout:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 93985c64d8a8..6f14ee923acd 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -852,16 +852,16 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
for (segno = start_segno; segno < end_segno; segno++) {
- if (get_valid_blocks(sbi, segno, 1) == 0 ||
- unlikely(f2fs_cp_error(sbi)))
- goto next;
-
/* find segment summary of victim */
sum_page = find_get_page(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
- f2fs_bug_on(sbi, !PageUptodate(sum_page));
f2fs_put_page(sum_page, 0);
+ if (get_valid_blocks(sbi, segno, 1) == 0 ||
+ !PageUptodate(sum_page) ||
+ unlikely(f2fs_cp_error(sbi)))
+ goto next;
+
sum = page_address(sum_page);
f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index ad0c745ebad7..871c8b392099 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -687,6 +687,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
pri_bh = NULL;
root_found:
+ /* We don't support read-write mounts */
+ if (!(s->s_flags & MS_RDONLY)) {
+ error = -EACCES;
+ goto out_freebh;
+ }
if (joliet_level && (pri == NULL || !opt.rock)) {
/* This is the case of Joliet with the norock mount flag.
@@ -1501,9 +1506,6 @@ struct inode *__isofs_iget(struct super_block *sb,
static struct dentry *isofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- /* We don't support read-write mounts */
- if (!(flags & MS_RDONLY))
- return ERR_PTR(-EACCES);
return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
}
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 3d8246a9faa4..e1652665bd93 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1149,6 +1149,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "file as BJ_Reserved");
spin_lock(&journal->j_list_lock);
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
+ spin_unlock(&journal->j_list_lock);
} else if (jh->b_transaction == journal->j_committing_transaction) {
/* first access by this transaction */
jh->b_modified = 0;
@@ -1156,8 +1157,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "set next transaction");
spin_lock(&journal->j_list_lock);
jh->b_next_transaction = transaction;
+ spin_unlock(&journal->j_list_lock);
}
- spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
/*
diff --git a/fs/locks.c b/fs/locks.c
index ce93b416b490..22c5b4aa4961 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1609,6 +1609,7 @@ int fcntl_getlease(struct file *filp)
ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
+ percpu_down_read_preempt_disable(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
@@ -1618,6 +1619,8 @@ int fcntl_getlease(struct file *filp)
break;
}
spin_unlock(&ctx->flc_lock);
+ percpu_up_read_preempt_enable(&file_rwsem);
+
locks_dispose_list(&dispose);
}
return type;
@@ -2529,11 +2532,14 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
if (list_empty(&ctx->flc_lease))
return;
+ percpu_down_read_preempt_disable(&file_rwsem);
spin_lock(&ctx->flc_lock);
list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
if (filp == fl->fl_file)
lease_modify(fl, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
+ percpu_up_read_preempt_enable(&file_rwsem);
+
locks_dispose_list(&dispose);
}
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 217847679f0e..2905479f214a 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -344,9 +344,10 @@ static void bl_write_cleanup(struct work_struct *work)
u64 start = hdr->args.offset & (loff_t)PAGE_MASK;
u64 end = (hdr->args.offset + hdr->args.count +
PAGE_SIZE - 1) & (loff_t)PAGE_MASK;
+ u64 lwb = hdr->args.offset + hdr->args.count;
ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
- (end - start) >> SECTOR_SHIFT, end);
+ (end - start) >> SECTOR_SHIFT, lwb);
}
pnfs_ld_write_done(hdr);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ad917bd72b38..7897826d7c51 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1545,7 +1545,7 @@ static int update_open_stateid(struct nfs4_state *state,
struct nfs_client *clp = server->nfs_client;
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_delegation *deleg_cur;
- nfs4_stateid freeme = {0};
+ nfs4_stateid freeme = { };
int ret = 0;
fmode &= (FMODE_READ|FMODE_WRITE);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 89600fd5963d..81818adb8e9e 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -412,10 +412,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
mm = get_task_mm(task);
if (mm) {
vsize = task_vsize(mm);
- if (permitted) {
- eip = KSTK_EIP(task);
- esp = KSTK_ESP(task);
- }
+ /*
+ * esp and eip are intentionally zeroed out. There is no
+ * non-racy way to read them without freezing the task.
+ * Programs that need reliable values can use ptrace(2).
+ */
}
get_task_comm(tcomm, task);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c2964d890c9a..8e654468ab67 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -252,7 +252,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
* Inherently racy -- command line shares address space
* with code and data.
*/
- rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
+ rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_FORCE);
if (rv <= 0)
goto out_free_page;
@@ -270,7 +270,8 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
int nr_read;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count,
+ FOLL_FORCE);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -305,7 +306,8 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count,
+ FOLL_FORCE);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -354,7 +356,8 @@ skip_argv:
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count,
+ FOLL_FORCE);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -832,6 +835,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
unsigned long addr = *ppos;
ssize_t copied;
char *page;
+ unsigned int flags = FOLL_FORCE;
if (!mm)
return 0;
@@ -844,6 +848,9 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
if (!atomic_inc_not_zero(&mm->mm_users))
goto free;
+ if (write)
+ flags |= FOLL_WRITE;
+
while (count > 0) {
int this_len = min_t(int, count, PAGE_SIZE);
@@ -852,7 +859,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
break;
}
- this_len = access_remote_vm(mm, addr, page, this_len, write);
+ this_len = access_remote_vm(mm, addr, page, this_len, flags);
if (!this_len) {
if (!copied)
copied = -EIO;
@@ -965,7 +972,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
this_len = min(max_len, this_len);
retval = access_remote_vm(mm, (env_start + src),
- page, this_len, 0);
+ page, this_len, FOLL_FORCE);
if (retval <= 0) {
ret = retval;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6909582ce5e5..35b92d81692f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -266,24 +266,15 @@ static int do_maps_open(struct inode *inode, struct file *file,
* /proc/PID/maps that is the stack of the main task.
*/
static int is_stack(struct proc_maps_private *priv,
- struct vm_area_struct *vma, int is_pid)
+ struct vm_area_struct *vma)
{
- int stack = 0;
-
- if (is_pid) {
- stack = vma->vm_start <= vma->vm_mm->start_stack &&
- vma->vm_end >= vma->vm_mm->start_stack;
- } else {
- struct inode *inode = priv->inode;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(proc_pid(inode), PIDTYPE_PID);
- if (task)
- stack = vma_is_stack_for_task(vma, task);
- rcu_read_unlock();
- }
- return stack;
+ /*
+ * We make no effort to guess what a given thread considers to be
+ * its "stack". It's not even well-defined for programs written
+ * languages like Go.
+ */
+ return vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack;
}
static void
@@ -354,7 +345,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
goto done;
}
- if (is_stack(priv, vma, is_pid))
+ if (is_stack(priv, vma))
name = "[stack]";
}
@@ -1669,7 +1660,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
seq_file_path(m, file, "\n\t= ");
} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
seq_puts(m, " heap");
- } else if (is_stack(proc_priv, vma, is_pid)) {
+ } else if (is_stack(proc_priv, vma)) {
seq_puts(m, " stack");
}
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index faacb0c0d857..37175621e890 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -124,25 +124,17 @@ unsigned long task_statm(struct mm_struct *mm,
}
static int is_stack(struct proc_maps_private *priv,
- struct vm_area_struct *vma, int is_pid)
+ struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
- int stack = 0;
-
- if (is_pid) {
- stack = vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack;
- } else {
- struct inode *inode = priv->inode;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(proc_pid(inode), PIDTYPE_PID);
- if (task)
- stack = vma_is_stack_for_task(vma, task);
- rcu_read_unlock();
- }
- return stack;
+
+ /*
+ * We make no effort to guess what a given thread considers to be
+ * its "stack". It's not even well-defined for programs written
+ * languages like Go.
+ */
+ return vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack;
}
/*
@@ -184,7 +176,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
if (file) {
seq_pad(m, ' ');
seq_file_path(m, file, "");
- } else if (mm && is_stack(priv, vma, is_pid)) {
+ } else if (mm && is_stack(priv, vma)) {
seq_pad(m, ' ');
seq_printf(m, "[stack]");
}
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index c8f60df2733e..bd4a5e8ce441 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -439,7 +439,7 @@ static unsigned int vfs_dent_type(uint8_t type)
*/
static int ubifs_readdir(struct file *file, struct dir_context *ctx)
{
- int err;
+ int err = 0;
struct qstr nm;
union ubifs_key key;
struct ubifs_dent_node *dent;
@@ -541,14 +541,12 @@ out:
kfree(file->private_data);
file->private_data = NULL;
- if (err != -ENOENT) {
+ if (err != -ENOENT)
ubifs_err(c, "cannot find next direntry, error %d", err);
- return err;
- }
/* 2 is a special value indicating that there are no more direntries */
ctx->pos = 2;
- return 0;
+ return err;
}
/* Free saved readdir() state when the directory is closed */
@@ -1060,9 +1058,9 @@ static void unlock_4_inodes(struct inode *inode1, struct inode *inode2,
mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
}
-static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned int flags)
+static int do_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
{
struct ubifs_info *c = old_dir->i_sb->s_fs_info;
struct inode *old_inode = d_inode(old_dentry);
@@ -1323,7 +1321,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
-static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry,
+static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
@@ -1336,7 +1334,7 @@ static int ubifs_rename2(struct inode *old_dir, struct dentry *old_dentry,
if (flags & RENAME_EXCHANGE)
return ubifs_xrename(old_dir, old_dentry, new_dir, new_dentry);
- return ubifs_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+ return do_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
}
int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -1387,7 +1385,7 @@ const struct inode_operations ubifs_dir_inode_operations = {
.mkdir = ubifs_mkdir,
.rmdir = ubifs_rmdir,
.mknod = ubifs_mknod,
- .rename = ubifs_rename2,
+ .rename = ubifs_rename,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
.listxattr = ubifs_listxattr,
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 6c2f4d41ed73..d9f9615bfd71 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -172,6 +172,7 @@ out_cancel:
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
+ host_ui->xattr_names -= nm->len;
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
@@ -478,6 +479,7 @@ out_cancel:
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
+ host_ui->xattr_names += nm->len;
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
index 17a940a14477..8caa79c61703 100644
--- a/include/acpi/pcc.h
+++ b/include/acpi/pcc.h
@@ -21,7 +21,7 @@ extern void pcc_mbox_free_channel(struct mbox_chan *chan);
static inline struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
int subspace_id)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void pcc_mbox_free_channel(struct mbox_chan *chan) { }
#endif
diff --git a/include/dt-bindings/clock/tegra186-clock.h b/include/dt-bindings/clock/tegra186-clock.h
new file mode 100644
index 000000000000..f73d32098f99
--- /dev/null
+++ b/include/dt-bindings/clock/tegra186-clock.h
@@ -0,0 +1,940 @@
+/** @file */
+
+#ifndef _MACH_T186_CLK_T186_H
+#define _MACH_T186_CLK_T186_H
+
+/**
+ * @defgroup clock_ids Clock Identifiers
+ * @{
+ * @defgroup extern_input external input clocks
+ * @{
+ * @def TEGRA186_CLK_OSC
+ * @def TEGRA186_CLK_CLK_32K
+ * @def TEGRA186_CLK_DTV_INPUT
+ * @def TEGRA186_CLK_SOR0_PAD_CLKOUT
+ * @def TEGRA186_CLK_SOR1_PAD_CLKOUT
+ * @def TEGRA186_CLK_I2S1_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S2_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S3_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S4_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S5_SYNC_INPUT
+ * @def TEGRA186_CLK_I2S6_SYNC_INPUT
+ * @def TEGRA186_CLK_SPDIFIN_SYNC_INPUT
+ * @}
+ *
+ * @defgroup extern_output external output clocks
+ * @{
+ * @def TEGRA186_CLK_EXTPERIPH1
+ * @def TEGRA186_CLK_EXTPERIPH2
+ * @def TEGRA186_CLK_EXTPERIPH3
+ * @def TEGRA186_CLK_EXTPERIPH4
+ * @}
+ *
+ * @defgroup display_clks display related clocks
+ * @{
+ * @def TEGRA186_CLK_CEC
+ * @def TEGRA186_CLK_DSIC
+ * @def TEGRA186_CLK_DSIC_LP
+ * @def TEGRA186_CLK_DSID
+ * @def TEGRA186_CLK_DSID_LP
+ * @def TEGRA186_CLK_DPAUX1
+ * @def TEGRA186_CLK_DPAUX
+ * @def TEGRA186_CLK_HDA2HDMICODEC
+ * @def TEGRA186_CLK_NVDISPLAY_DISP
+ * @def TEGRA186_CLK_NVDISPLAY_DSC
+ * @def TEGRA186_CLK_NVDISPLAY_P0
+ * @def TEGRA186_CLK_NVDISPLAY_P1
+ * @def TEGRA186_CLK_NVDISPLAY_P2
+ * @def TEGRA186_CLK_NVDISPLAYHUB
+ * @def TEGRA186_CLK_SOR_SAFE
+ * @def TEGRA186_CLK_SOR0
+ * @def TEGRA186_CLK_SOR0_OUT
+ * @def TEGRA186_CLK_SOR1
+ * @def TEGRA186_CLK_SOR1_OUT
+ * @def TEGRA186_CLK_DSI
+ * @def TEGRA186_CLK_MIPI_CAL
+ * @def TEGRA186_CLK_DSIA_LP
+ * @def TEGRA186_CLK_DSIB
+ * @def TEGRA186_CLK_DSIB_LP
+ * @}
+ *
+ * @defgroup camera_clks camera related clocks
+ * @{
+ * @def TEGRA186_CLK_NVCSI
+ * @def TEGRA186_CLK_NVCSILP
+ * @def TEGRA186_CLK_VI
+ * @}
+ *
+ * @defgroup audio_clks audio related clocks
+ * @{
+ * @def TEGRA186_CLK_ACLK
+ * @def TEGRA186_CLK_ADSP
+ * @def TEGRA186_CLK_ADSPNEON
+ * @def TEGRA186_CLK_AHUB
+ * @def TEGRA186_CLK_APE
+ * @def TEGRA186_CLK_APB2APE
+ * @def TEGRA186_CLK_AUD_MCLK
+ * @def TEGRA186_CLK_DMIC1
+ * @def TEGRA186_CLK_DMIC2
+ * @def TEGRA186_CLK_DMIC3
+ * @def TEGRA186_CLK_DMIC4
+ * @def TEGRA186_CLK_DSPK1
+ * @def TEGRA186_CLK_DSPK2
+ * @def TEGRA186_CLK_HDA
+ * @def TEGRA186_CLK_HDA2CODEC_2X
+ * @def TEGRA186_CLK_I2S1
+ * @def TEGRA186_CLK_I2S2
+ * @def TEGRA186_CLK_I2S3
+ * @def TEGRA186_CLK_I2S4
+ * @def TEGRA186_CLK_I2S5
+ * @def TEGRA186_CLK_I2S6
+ * @def TEGRA186_CLK_MAUD
+ * @def TEGRA186_CLK_PLL_A_OUT0
+ * @def TEGRA186_CLK_SPDIF_DOUBLER
+ * @def TEGRA186_CLK_SPDIF_IN
+ * @def TEGRA186_CLK_SPDIF_OUT
+ * @def TEGRA186_CLK_SYNC_DMIC1
+ * @def TEGRA186_CLK_SYNC_DMIC2
+ * @def TEGRA186_CLK_SYNC_DMIC3
+ * @def TEGRA186_CLK_SYNC_DMIC4
+ * @def TEGRA186_CLK_SYNC_DMIC5
+ * @def TEGRA186_CLK_SYNC_DSPK1
+ * @def TEGRA186_CLK_SYNC_DSPK2
+ * @def TEGRA186_CLK_SYNC_I2S1
+ * @def TEGRA186_CLK_SYNC_I2S2
+ * @def TEGRA186_CLK_SYNC_I2S3
+ * @def TEGRA186_CLK_SYNC_I2S4
+ * @def TEGRA186_CLK_SYNC_I2S5
+ * @def TEGRA186_CLK_SYNC_I2S6
+ * @def TEGRA186_CLK_SYNC_SPDIF
+ * @}
+ *
+ * @defgroup uart_clks UART clocks
+ * @{
+ * @def TEGRA186_CLK_AON_UART_FST_MIPI_CAL
+ * @def TEGRA186_CLK_UARTA
+ * @def TEGRA186_CLK_UARTB
+ * @def TEGRA186_CLK_UARTC
+ * @def TEGRA186_CLK_UARTD
+ * @def TEGRA186_CLK_UARTE
+ * @def TEGRA186_CLK_UARTF
+ * @def TEGRA186_CLK_UARTG
+ * @def TEGRA186_CLK_UART_FST_MIPI_CAL
+ * @}
+ *
+ * @defgroup i2c_clks I2C clocks
+ * @{
+ * @def TEGRA186_CLK_AON_I2C_SLOW
+ * @def TEGRA186_CLK_I2C1
+ * @def TEGRA186_CLK_I2C2
+ * @def TEGRA186_CLK_I2C3
+ * @def TEGRA186_CLK_I2C4
+ * @def TEGRA186_CLK_I2C5
+ * @def TEGRA186_CLK_I2C6
+ * @def TEGRA186_CLK_I2C8
+ * @def TEGRA186_CLK_I2C9
+ * @def TEGRA186_CLK_I2C1
+ * @def TEGRA186_CLK_I2C12
+ * @def TEGRA186_CLK_I2C13
+ * @def TEGRA186_CLK_I2C14
+ * @def TEGRA186_CLK_I2C_SLOW
+ * @def TEGRA186_CLK_VI_I2C
+ * @}
+ *
+ * @defgroup spi_clks SPI clocks
+ * @{
+ * @def TEGRA186_CLK_SPI1
+ * @def TEGRA186_CLK_SPI2
+ * @def TEGRA186_CLK_SPI3
+ * @def TEGRA186_CLK_SPI4
+ * @}
+ *
+ * @defgroup storage storage related clocks
+ * @{
+ * @def TEGRA186_CLK_SATA
+ * @def TEGRA186_CLK_SATA_OOB
+ * @def TEGRA186_CLK_SATA_IOBIST
+ * @def TEGRA186_CLK_SDMMC_LEGACY_TM
+ * @def TEGRA186_CLK_SDMMC1
+ * @def TEGRA186_CLK_SDMMC2
+ * @def TEGRA186_CLK_SDMMC3
+ * @def TEGRA186_CLK_SDMMC4
+ * @def TEGRA186_CLK_QSPI
+ * @def TEGRA186_CLK_QSPI_OUT
+ * @def TEGRA186_CLK_UFSDEV_REF
+ * @def TEGRA186_CLK_UFSHC
+ * @}
+ *
+ * @defgroup pwm_clks PWM clocks
+ * @{
+ * @def TEGRA186_CLK_PWM1
+ * @def TEGRA186_CLK_PWM2
+ * @def TEGRA186_CLK_PWM3
+ * @def TEGRA186_CLK_PWM4
+ * @def TEGRA186_CLK_PWM5
+ * @def TEGRA186_CLK_PWM6
+ * @def TEGRA186_CLK_PWM7
+ * @def TEGRA186_CLK_PWM8
+ * @}
+ *
+ * @defgroup plls PLLs and related clocks
+ * @{
+ * @def TEGRA186_CLK_PLLREFE_OUT_GATED
+ * @def TEGRA186_CLK_PLLREFE_OUT1
+ * @def TEGRA186_CLK_PLLD_OUT1
+ * @def TEGRA186_CLK_PLLP_OUT0
+ * @def TEGRA186_CLK_PLLP_OUT5
+ * @def TEGRA186_CLK_PLLA
+ * @def TEGRA186_CLK_PLLE_PWRSEQ
+ * @def TEGRA186_CLK_PLLA_OUT1
+ * @def TEGRA186_CLK_PLLREFE_REF
+ * @def TEGRA186_CLK_UPHY_PLL0_PWRSEQ
+ * @def TEGRA186_CLK_UPHY_PLL1_PWRSEQ
+ * @def TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH
+ * @def TEGRA186_CLK_PLLREFE_PEX
+ * @def TEGRA186_CLK_PLLREFE_IDDQ
+ * @def TEGRA186_CLK_PLLC_OUT_AON
+ * @def TEGRA186_CLK_PLLC_OUT_ISP
+ * @def TEGRA186_CLK_PLLC_OUT_VE
+ * @def TEGRA186_CLK_PLLC4_OUT
+ * @def TEGRA186_CLK_PLLREFE_OUT
+ * @def TEGRA186_CLK_PLLREFE_PLL_REF
+ * @def TEGRA186_CLK_PLLE
+ * @def TEGRA186_CLK_PLLC
+ * @def TEGRA186_CLK_PLLP
+ * @def TEGRA186_CLK_PLLD
+ * @def TEGRA186_CLK_PLLD2
+ * @def TEGRA186_CLK_PLLREFE_VCO
+ * @def TEGRA186_CLK_PLLC2
+ * @def TEGRA186_CLK_PLLC3
+ * @def TEGRA186_CLK_PLLDP
+ * @def TEGRA186_CLK_PLLC4_VCO
+ * @def TEGRA186_CLK_PLLA1
+ * @def TEGRA186_CLK_PLLNVCSI
+ * @def TEGRA186_CLK_PLLDISPHUB
+ * @def TEGRA186_CLK_PLLD3
+ * @def TEGRA186_CLK_PLLBPMPCAM
+ * @def TEGRA186_CLK_PLLAON
+ * @def TEGRA186_CLK_PLLU
+ * @def TEGRA186_CLK_PLLC4_VCO_DIV2
+ * @def TEGRA186_CLK_PLL_REF
+ * @def TEGRA186_CLK_PLLREFE_OUT1_DIV5
+ * @def TEGRA186_CLK_UTMIP_PLL_PWRSEQ
+ * @def TEGRA186_CLK_PLL_U_48M
+ * @def TEGRA186_CLK_PLL_U_480M
+ * @def TEGRA186_CLK_PLLC4_OUT0
+ * @def TEGRA186_CLK_PLLC4_OUT1
+ * @def TEGRA186_CLK_PLLC4_OUT2
+ * @def TEGRA186_CLK_PLLC4_OUT_MUX
+ * @def TEGRA186_CLK_DFLLDISP_DIV
+ * @def TEGRA186_CLK_PLLDISPHUB_DIV
+ * @def TEGRA186_CLK_PLLP_DIV8
+ * @}
+ *
+ * @defgroup nafll_clks NAFLL clock sources
+ * @{
+ * @def TEGRA186_CLK_NAFLL_AXI_CBB
+ * @def TEGRA186_CLK_NAFLL_BCPU
+ * @def TEGRA186_CLK_NAFLL_BPMP
+ * @def TEGRA186_CLK_NAFLL_DISP
+ * @def TEGRA186_CLK_NAFLL_GPU
+ * @def TEGRA186_CLK_NAFLL_ISP
+ * @def TEGRA186_CLK_NAFLL_MCPU
+ * @def TEGRA186_CLK_NAFLL_NVDEC
+ * @def TEGRA186_CLK_NAFLL_NVENC
+ * @def TEGRA186_CLK_NAFLL_NVJPG
+ * @def TEGRA186_CLK_NAFLL_SCE
+ * @def TEGRA186_CLK_NAFLL_SE
+ * @def TEGRA186_CLK_NAFLL_TSEC
+ * @def TEGRA186_CLK_NAFLL_TSECB
+ * @def TEGRA186_CLK_NAFLL_VI
+ * @def TEGRA186_CLK_NAFLL_VIC
+ * @}
+ *
+ * @defgroup mphy MPHY related clocks
+ * @{
+ * @def TEGRA186_CLK_MPHY_L0_RX_SYMB
+ * @def TEGRA186_CLK_MPHY_L0_RX_LS_BIT
+ * @def TEGRA186_CLK_MPHY_L0_TX_SYMB
+ * @def TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT
+ * @def TEGRA186_CLK_MPHY_L0_RX_ANA
+ * @def TEGRA186_CLK_MPHY_L1_RX_ANA
+ * @def TEGRA186_CLK_MPHY_IOBIST
+ * @def TEGRA186_CLK_MPHY_TX_1MHZ_REF
+ * @def TEGRA186_CLK_MPHY_CORE_PLL_FIXED
+ * @}
+ *
+ * @defgroup eavb EAVB related clocks
+ * @{
+ * @def TEGRA186_CLK_EQOS_AXI
+ * @def TEGRA186_CLK_EQOS_PTP_REF
+ * @def TEGRA186_CLK_EQOS_RX
+ * @def TEGRA186_CLK_EQOS_RX_INPUT
+ * @def TEGRA186_CLK_EQOS_TX
+ * @}
+ *
+ * @defgroup usb USB related clocks
+ * @{
+ * @def TEGRA186_CLK_PEX_USB_PAD0_MGMT
+ * @def TEGRA186_CLK_PEX_USB_PAD1_MGMT
+ * @def TEGRA186_CLK_HSIC_TRK
+ * @def TEGRA186_CLK_USB2_TRK
+ * @def TEGRA186_CLK_USB2_HSIC_TRK
+ * @def TEGRA186_CLK_XUSB_CORE_SS
+ * @def TEGRA186_CLK_XUSB_CORE_DEV
+ * @def TEGRA186_CLK_XUSB_FALCON
+ * @def TEGRA186_CLK_XUSB_FS
+ * @def TEGRA186_CLK_XUSB
+ * @def TEGRA186_CLK_XUSB_DEV
+ * @def TEGRA186_CLK_XUSB_HOST
+ * @def TEGRA186_CLK_XUSB_SS
+ * @}
+ *
+ * @defgroup bigblock compute block related clocks
+ * @{
+ * @def TEGRA186_CLK_GPCCLK
+ * @def TEGRA186_CLK_GPC2CLK
+ * @def TEGRA186_CLK_GPU
+ * @def TEGRA186_CLK_HOST1X
+ * @def TEGRA186_CLK_ISP
+ * @def TEGRA186_CLK_NVDEC
+ * @def TEGRA186_CLK_NVENC
+ * @def TEGRA186_CLK_NVJPG
+ * @def TEGRA186_CLK_SE
+ * @def TEGRA186_CLK_TSEC
+ * @def TEGRA186_CLK_TSECB
+ * @def TEGRA186_CLK_VIC
+ * @}
+ *
+ * @defgroup can CAN bus related clocks
+ * @{
+ * @def TEGRA186_CLK_CAN1
+ * @def TEGRA186_CLK_CAN1_HOST
+ * @def TEGRA186_CLK_CAN2
+ * @def TEGRA186_CLK_CAN2_HOST
+ * @}
+ *
+ * @defgroup system basic system clocks
+ * @{
+ * @def TEGRA186_CLK_ACTMON
+ * @def TEGRA186_CLK_AON_APB
+ * @def TEGRA186_CLK_AON_CPU_NIC
+ * @def TEGRA186_CLK_AON_NIC
+ * @def TEGRA186_CLK_AXI_CBB
+ * @def TEGRA186_CLK_BPMP_APB
+ * @def TEGRA186_CLK_BPMP_CPU_NIC
+ * @def TEGRA186_CLK_BPMP_NIC_RATE
+ * @def TEGRA186_CLK_CLK_M
+ * @def TEGRA186_CLK_EMC
+ * @def TEGRA186_CLK_MSS_ENCRYPT
+ * @def TEGRA186_CLK_SCE_APB
+ * @def TEGRA186_CLK_SCE_CPU_NIC
+ * @def TEGRA186_CLK_SCE_NIC
+ * @def TEGRA186_CLK_TSC
+ * @}
+ *
+ * @defgroup pcie_clks PCIe related clocks
+ * @{
+ * @def TEGRA186_CLK_AFI
+ * @def TEGRA186_CLK_PCIE
+ * @def TEGRA186_CLK_PCIE2_IOBIST
+ * @def TEGRA186_CLK_PCIERX0
+ * @def TEGRA186_CLK_PCIERX1
+ * @def TEGRA186_CLK_PCIERX2
+ * @def TEGRA186_CLK_PCIERX3
+ * @def TEGRA186_CLK_PCIERX4
+ * @}
+ */
+
+/** @brief output of gate CLK_ENB_FUSE */
+#define TEGRA186_CLK_FUSE 0
+/**
+ * @brief It's not what you think
+ * @details output of gate CLK_ENB_GPU. This output connects to the GPU
+ * pwrclk. @warning: This is almost certainly not the clock you think
+ * it is. If you're looking for the clock of the graphics engine, see
+ * TEGRA186_GPCCLK
+ */
+#define TEGRA186_CLK_GPU 1
+/** @brief output of gate CLK_ENB_PCIE */
+#define TEGRA186_CLK_PCIE 3
+/** @brief output of the divider IPFS_CLK_DIVISOR */
+#define TEGRA186_CLK_AFI 4
+/** @brief output of gate CLK_ENB_PCIE2_IOBIST */
+#define TEGRA186_CLK_PCIE2_IOBIST 5
+/** @brief output of gate CLK_ENB_PCIERX0*/
+#define TEGRA186_CLK_PCIERX0 6
+/** @brief output of gate CLK_ENB_PCIERX1*/
+#define TEGRA186_CLK_PCIERX1 7
+/** @brief output of gate CLK_ENB_PCIERX2*/
+#define TEGRA186_CLK_PCIERX2 8
+/** @brief output of gate CLK_ENB_PCIERX3*/
+#define TEGRA186_CLK_PCIERX3 9
+/** @brief output of gate CLK_ENB_PCIERX4*/
+#define TEGRA186_CLK_PCIERX4 10
+/** @brief output branch of PLL_C for ISP, controlled by gate CLK_ENB_PLLC_OUT_ISP */
+#define TEGRA186_CLK_PLLC_OUT_ISP 11
+/** @brief output branch of PLL_C for VI, controlled by gate CLK_ENB_PLLC_OUT_VE */
+#define TEGRA186_CLK_PLLC_OUT_VE 12
+/** @brief output branch of PLL_C for AON domain, controlled by gate CLK_ENB_PLLC_OUT_AON */
+#define TEGRA186_CLK_PLLC_OUT_AON 13
+/** @brief output of gate CLK_ENB_SOR_SAFE */
+#define TEGRA186_CLK_SOR_SAFE 39
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S2 */
+#define TEGRA186_CLK_I2S2 42
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S3 */
+#define TEGRA186_CLK_I2S3 43
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDF_IN */
+#define TEGRA186_CLK_SPDIF_IN 44
+/** @brief output of gate CLK_ENB_SPDIF_DOUBLER */
+#define TEGRA186_CLK_SPDIF_DOUBLER 45
+/** @clkdesc{spi_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_SPI3} */
+#define TEGRA186_CLK_SPI3 46
+/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C1} */
+#define TEGRA186_CLK_I2C1 47
+/** @clkdesc{i2c_clks, out, mux, CLK_RST_CONTROLLER_CLK_SOURCE_I2C5} */
+#define TEGRA186_CLK_I2C5 48
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI1 */
+#define TEGRA186_CLK_SPI1 49
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_ISP */
+#define TEGRA186_CLK_ISP 50
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI */
+#define TEGRA186_CLK_VI 51
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC1 */
+#define TEGRA186_CLK_SDMMC1 52
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC2 */
+#define TEGRA186_CLK_SDMMC2 53
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */
+#define TEGRA186_CLK_SDMMC4 54
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */
+#define TEGRA186_CLK_UARTA 55
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTB */
+#define TEGRA186_CLK_UARTB 56
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HOST1X */
+#define TEGRA186_CLK_HOST1X 57
+/**
+ * @brief controls the EMC clock frequency.
+ * @details Doing a clk_set_rate on this clock will select the
+ * appropriate clock source, program the source rate and execute a
+ * specific sequence to switch to the new clock source for both memory
+ * controllers. This can be used to control the balance between memory
+ * throughput and memory controller power.
+ */
+#define TEGRA186_CLK_EMC 58
+/* @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH4 */
+#define TEGRA186_CLK_EXTPERIPH4 73
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI4 */
+#define TEGRA186_CLK_SPI4 74
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C3 */
+#define TEGRA186_CLK_I2C3 75
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC3 */
+#define TEGRA186_CLK_SDMMC3 76
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTD */
+#define TEGRA186_CLK_UARTD 77
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S1 */
+#define TEGRA186_CLK_I2S1 79
+/** output of gate CLK_ENB_DTV */
+#define TEGRA186_CLK_DTV 80
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSEC */
+#define TEGRA186_CLK_TSEC 81
+/** @brief output of gate CLK_ENB_DP2 */
+#define TEGRA186_CLK_DP2 82
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S4 */
+#define TEGRA186_CLK_I2S4 84
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S5 */
+#define TEGRA186_CLK_I2S5 85
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C4 */
+#define TEGRA186_CLK_I2C4 86
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */
+#define TEGRA186_CLK_AHUB 87
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA2CODEC_2X */
+#define TEGRA186_CLK_HDA2CODEC_2X 88
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH1 */
+#define TEGRA186_CLK_EXTPERIPH1 89
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH2 */
+#define TEGRA186_CLK_EXTPERIPH2 90
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_EXTPERIPH3 */
+#define TEGRA186_CLK_EXTPERIPH3 91
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C_SLOW */
+#define TEGRA186_CLK_I2C_SLOW 92
+/** @brief output of the SOR1_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */
+#define TEGRA186_CLK_SOR1 93
+/** @brief output of gate CLK_ENB_CEC */
+#define TEGRA186_CLK_CEC 94
+/** @brief output of gate CLK_ENB_DPAUX1 */
+#define TEGRA186_CLK_DPAUX1 95
+/** @brief output of gate CLK_ENB_DPAUX */
+#define TEGRA186_CLK_DPAUX 96
+/** @brief output of the SOR0_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */
+#define TEGRA186_CLK_SOR0 97
+/** @brief output of gate CLK_ENB_HDA2HDMICODEC */
+#define TEGRA186_CLK_HDA2HDMICODEC 98
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SATA */
+#define TEGRA186_CLK_SATA 99
+/** @brief output of gate CLK_ENB_SATA_OOB */
+#define TEGRA186_CLK_SATA_OOB 100
+/** @brief output of gate CLK_ENB_SATA_IOBIST */
+#define TEGRA186_CLK_SATA_IOBIST 101
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_HDA */
+#define TEGRA186_CLK_HDA 102
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SE */
+#define TEGRA186_CLK_SE 103
+/** @brief output of gate CLK_ENB_APB2APE */
+#define TEGRA186_CLK_APB2APE 104
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_APE */
+#define TEGRA186_CLK_APE 105
+/** @brief output of gate CLK_ENB_IQC1 */
+#define TEGRA186_CLK_IQC1 106
+/** @brief output of gate CLK_ENB_IQC2 */
+#define TEGRA186_CLK_IQC2 107
+/** divide by 2 version of TEGRA186_CLK_PLLREFE_VCO */
+#define TEGRA186_CLK_PLLREFE_OUT 108
+/** @brief output of gate CLK_ENB_PLLREFE_PLL_REF */
+#define TEGRA186_CLK_PLLREFE_PLL_REF 109
+/** @brief output of gate CLK_ENB_PLLC4_OUT */
+#define TEGRA186_CLK_PLLC4_OUT 110
+/** @brief output of mux xusb_core_clk_switch on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB 111
+/** controls xusb_dev_ce signal on page 66 and 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_DEV 112
+/** controls xusb_host_ce signal on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_HOST 113
+/** controls xusb_ss_ce signal on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_SS 114
+/** @brief output of gate CLK_ENB_DSI */
+#define TEGRA186_CLK_DSI 115
+/** @brief output of gate CLK_ENB_MIPI_CAL */
+#define TEGRA186_CLK_MIPI_CAL 116
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIA_LP */
+#define TEGRA186_CLK_DSIA_LP 117
+/** @brief output of gate CLK_ENB_DSIB */
+#define TEGRA186_CLK_DSIB 118
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIB_LP */
+#define TEGRA186_CLK_DSIB_LP 119
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */
+#define TEGRA186_CLK_DMIC1 122
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */
+#define TEGRA186_CLK_DMIC2 123
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */
+#define TEGRA186_CLK_AUD_MCLK 124
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */
+#define TEGRA186_CLK_I2C6 125
+/**output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UART_FST_MIPI_CAL */
+#define TEGRA186_CLK_UART_FST_MIPI_CAL 126
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VIC */
+#define TEGRA186_CLK_VIC 127
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM */
+#define TEGRA186_CLK_SDMMC_LEGACY_TM 128
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDEC */
+#define TEGRA186_CLK_NVDEC 129
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVJPG */
+#define TEGRA186_CLK_NVJPG 130
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVENC */
+#define TEGRA186_CLK_NVENC 131
+/** @brief output of the QSPI_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */
+#define TEGRA186_CLK_QSPI 132
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_VI_I2C */
+#define TEGRA186_CLK_VI_I2C 133
+/** @brief output of gate CLK_ENB_HSIC_TRK */
+#define TEGRA186_CLK_HSIC_TRK 134
+/** @brief output of gate CLK_ENB_USB2_TRK */
+#define TEGRA186_CLK_USB2_TRK 135
+/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MAUD */
+#define TEGRA186_CLK_MAUD 136
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSECB */
+#define TEGRA186_CLK_TSECB 137
+/** @brief output of gate CLK_ENB_ADSP */
+#define TEGRA186_CLK_ADSP 138
+/** @brief output of gate CLK_ENB_ADSPNEON */
+#define TEGRA186_CLK_ADSPNEON 139
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_RX_LS_SYMB */
+#define TEGRA186_CLK_MPHY_L0_RX_SYMB 140
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_LS_BIT */
+#define TEGRA186_CLK_MPHY_L0_RX_LS_BIT 141
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_L0_TX_LS_SYMB */
+#define TEGRA186_CLK_MPHY_L0_TX_SYMB 142
+/** @brief output of gate CLK_ENB_MPHY_L0_TX_LS_3XBIT */
+#define TEGRA186_CLK_MPHY_L0_TX_LS_3XBIT 143
+/** @brief output of gate CLK_ENB_MPHY_L0_RX_ANA */
+#define TEGRA186_CLK_MPHY_L0_RX_ANA 144
+/** @brief output of gate CLK_ENB_MPHY_L1_RX_ANA */
+#define TEGRA186_CLK_MPHY_L1_RX_ANA 145
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_IOBIST */
+#define TEGRA186_CLK_MPHY_IOBIST 146
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_TX_1MHZ_REF */
+#define TEGRA186_CLK_MPHY_TX_1MHZ_REF 147
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_MPHY_CORE_PLL_FIXED */
+#define TEGRA186_CLK_MPHY_CORE_PLL_FIXED 148
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AXI_CBB */
+#define TEGRA186_CLK_AXI_CBB 149
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC3 */
+#define TEGRA186_CLK_DMIC3 150
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */
+#define TEGRA186_CLK_DMIC4 151
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */
+#define TEGRA186_CLK_DSPK1 152
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */
+#define TEGRA186_CLK_DSPK2 153
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */
+#define TEGRA186_CLK_I2S6 154
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P0 */
+#define TEGRA186_CLK_NVDISPLAY_P0 155
+/** @brief output of the NVDISPLAY_DISP_CLK_SRC mux in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP */
+#define TEGRA186_CLK_NVDISPLAY_DISP 156
+/** @brief output of gate CLK_ENB_NVDISPLAY_DSC */
+#define TEGRA186_CLK_NVDISPLAY_DSC 157
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAYHUB */
+#define TEGRA186_CLK_NVDISPLAYHUB 158
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P1 */
+#define TEGRA186_CLK_NVDISPLAY_P1 159
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_P2 */
+#define TEGRA186_CLK_NVDISPLAY_P2 160
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TACH */
+#define TEGRA186_CLK_TACH 166
+/** @brief output of gate CLK_ENB_EQOS */
+#define TEGRA186_CLK_EQOS_AXI 167
+/** @brief output of gate CLK_ENB_EQOS_RX */
+#define TEGRA186_CLK_EQOS_RX 168
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSHC_CG_SYS */
+#define TEGRA186_CLK_UFSHC 178
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UFSDEV_REF */
+#define TEGRA186_CLK_UFSDEV_REF 179
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSI */
+#define TEGRA186_CLK_NVCSI 180
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_NVCSILP */
+#define TEGRA186_CLK_NVCSILP 181
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C7 */
+#define TEGRA186_CLK_I2C7 182
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C9 */
+#define TEGRA186_CLK_I2C9 183
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C12 */
+#define TEGRA186_CLK_I2C12 184
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C13 */
+#define TEGRA186_CLK_I2C13 185
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C14 */
+#define TEGRA186_CLK_I2C14 186
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */
+#define TEGRA186_CLK_PWM1 187
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM2 */
+#define TEGRA186_CLK_PWM2 188
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM3 */
+#define TEGRA186_CLK_PWM3 189
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM5 */
+#define TEGRA186_CLK_PWM5 190
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM6 */
+#define TEGRA186_CLK_PWM6 191
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM7 */
+#define TEGRA186_CLK_PWM7 192
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */
+#define TEGRA186_CLK_PWM8 193
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTE */
+#define TEGRA186_CLK_UARTE 194
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTF */
+#define TEGRA186_CLK_UARTF 195
+/** @deprecated */
+#define TEGRA186_CLK_DBGAPB 196
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_CPU_NIC */
+#define TEGRA186_CLK_BPMP_CPU_NIC 197
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_BPMP_APB */
+#define TEGRA186_CLK_BPMP_APB 199
+/** @brief output of mux controlled by TEGRA186_CLK_SOC_ACTMON */
+#define TEGRA186_CLK_ACTMON 201
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_CPU_NIC */
+#define TEGRA186_CLK_AON_CPU_NIC 208
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN1 */
+#define TEGRA186_CLK_CAN1 210
+/** @brief output of gate CLK_ENB_CAN1_HOST */
+#define TEGRA186_CLK_CAN1_HOST 211
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_CAN2 */
+#define TEGRA186_CLK_CAN2 212
+/** @brief output of gate CLK_ENB_CAN2_HOST */
+#define TEGRA186_CLK_CAN2_HOST 213
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_APB */
+#define TEGRA186_CLK_AON_APB 214
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTC */
+#define TEGRA186_CLK_UARTC 215
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTG */
+#define TEGRA186_CLK_UARTG 216
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_UART_FST_MIPI_CAL */
+#define TEGRA186_CLK_AON_UART_FST_MIPI_CAL 217
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */
+#define TEGRA186_CLK_I2C2 218
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C8 */
+#define TEGRA186_CLK_I2C8 219
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C10 */
+#define TEGRA186_CLK_I2C10 220
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_I2C_SLOW */
+#define TEGRA186_CLK_AON_I2C_SLOW 221
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPI2 */
+#define TEGRA186_CLK_SPI2 222
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC5 */
+#define TEGRA186_CLK_DMIC5 223
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AON_TOUCH */
+#define TEGRA186_CLK_AON_TOUCH 224
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM4 */
+#define TEGRA186_CLK_PWM4 225
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_TSC. This clock object is read only and is used for all timers in the system. */
+#define TEGRA186_CLK_TSC 226
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_MSS_ENCRYPT */
+#define TEGRA186_CLK_MSS_ENCRYPT 227
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_CPU_NIC */
+#define TEGRA186_CLK_SCE_CPU_NIC 228
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SCE_APB */
+#define TEGRA186_CLK_SCE_APB 230
+/** @brief output of gate CLK_ENB_DSIC */
+#define TEGRA186_CLK_DSIC 231
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSIC_LP */
+#define TEGRA186_CLK_DSIC_LP 232
+/** @brief output of gate CLK_ENB_DSID */
+#define TEGRA186_CLK_DSID 233
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSID_LP */
+#define TEGRA186_CLK_DSID_LP 234
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_SATA_USB_RX_BYP */
+#define TEGRA186_CLK_PEX_SATA_USB_RX_BYP 236
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SPDIF_OUT */
+#define TEGRA186_CLK_SPDIF_OUT 238
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_PTP_REF_CLK_0 */
+#define TEGRA186_CLK_EQOS_PTP_REF 239
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_EQOS_TX_CLK */
+#define TEGRA186_CLK_EQOS_TX 240
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_USB2_HSIC_TRK */
+#define TEGRA186_CLK_USB2_HSIC_TRK 241
+/** @brief output of mux xusb_ss_clk_switch on page 66 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_CORE_SS 242
+/** @brief output of mux xusb_core_dev_clk_switch on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_CORE_DEV 243
+/** @brief output of mux xusb_core_falcon_clk_switch on page 67 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_FALCON 244
+/** @brief output of mux xusb_fs_clk_switch on page 66 of T186_Clocks_IAS.doc */
+#define TEGRA186_CLK_XUSB_FS 245
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */
+#define TEGRA186_CLK_PLL_A_OUT0 246
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S1 */
+#define TEGRA186_CLK_SYNC_I2S1 247
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S2 */
+#define TEGRA186_CLK_SYNC_I2S2 248
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S3 */
+#define TEGRA186_CLK_SYNC_I2S3 249
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S4 */
+#define TEGRA186_CLK_SYNC_I2S4 250
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S5 */
+#define TEGRA186_CLK_SYNC_I2S5 251
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */
+#define TEGRA186_CLK_SYNC_I2S6 252
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK1 */
+#define TEGRA186_CLK_SYNC_DSPK1 253
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK2 */
+#define TEGRA186_CLK_SYNC_DSPK2 254
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */
+#define TEGRA186_CLK_SYNC_DMIC1 255
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */
+#define TEGRA186_CLK_SYNC_DMIC2 256
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC3 */
+#define TEGRA186_CLK_SYNC_DMIC3 257
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC4 */
+#define TEGRA186_CLK_SYNC_DMIC4 259
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_SPDIF */
+#define TEGRA186_CLK_SYNC_SPDIF 260
+/** @brief output of gate CLK_ENB_PLLREFE_OUT */
+#define TEGRA186_CLK_PLLREFE_OUT_GATED 261
+/** @brief output of the divider PLLREFE_DIVP in CLK_RST_CONTROLLER_PLLREFE_BASE. PLLREFE has 2 outputs:
+ * * VCO/pdiv defined by this clock object
+ * * VCO/2 defined by TEGRA186_CLK_PLLREFE_OUT
+ */
+#define TEGRA186_CLK_PLLREFE_OUT1 262
+#define TEGRA186_CLK_PLLD_OUT1 267
+/** @brief output of the divider PLLP_DIVP in CLK_RST_CONTROLLER_PLLP_BASE */
+#define TEGRA186_CLK_PLLP_OUT0 269
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLP_OUTC */
+#define TEGRA186_CLK_PLLP_OUT5 270
+/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */
+#define TEGRA186_CLK_PLLA 271
+/** @brief output of mux controlled by CLK_RST_CONTROLLER_ACLK_BURST_POLICY divided by the divider controlled by ACLK_CLK_DIVISOR in CLK_RST_CONTROLLER_SUPER_ACLK_DIVIDER */
+#define TEGRA186_CLK_ACLK 273
+/** fixed 48MHz clock divided down from TEGRA186_CLK_PLL_U */
+#define TEGRA186_CLK_PLL_U_48M 274
+/** fixed 480MHz clock divided down from TEGRA186_CLK_PLL_U */
+#define TEGRA186_CLK_PLL_U_480M 275
+/** @brief output of the divider PLLC4_DIVP in CLK_RST_CONTROLLER_PLLC4_BASE. Output frequency is TEGRA186_CLK_PLLC4_VCO/PLLC4_DIVP */
+#define TEGRA186_CLK_PLLC4_OUT0 276
+/** fixed /3 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/3 */
+#define TEGRA186_CLK_PLLC4_OUT1 277
+/** fixed /5 divider. Output frequency of this clock is TEGRA186_CLK_PLLC4_VCO/5 */
+#define TEGRA186_CLK_PLLC4_OUT2 278
+/** @brief output of mux controlled by PLLC4_CLK_SEL in CLK_RST_CONTROLLER_PLLC4_MISC1 */
+#define TEGRA186_CLK_PLLC4_OUT_MUX 279
+/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when DFLLDISP_DIV is selected in NVDISPLAY_DISP_CLK_SRC */
+#define TEGRA186_CLK_DFLLDISP_DIV 284
+/** @brief output of divider NVDISPLAY_DISP_CLK_DIVISOR in CLK_RST_CONTROLLER_CLK_SOURCE_NVDISPLAY_DISP when PLLDISPHUB_DIV is selected in NVDISPLAY_DISP_CLK_SRC */
+#define TEGRA186_CLK_PLLDISPHUB_DIV 285
+/** fixed /8 divider which is used as the input for TEGRA186_CLK_SOR_SAFE */
+#define TEGRA186_CLK_PLLP_DIV8 286
+/** @brief output of divider CLK_RST_CONTROLLER_BPMP_NIC_RATE */
+#define TEGRA186_CLK_BPMP_NIC 287
+/** @brief output of the divider CLK_RST_CONTROLLER_PLLA1_OUT1 */
+#define TEGRA186_CLK_PLL_A_OUT1 288
+/** @deprecated */
+#define TEGRA186_CLK_GPC2CLK 289
+/** A fake clock which must be enabled during KFUSE read operations to ensure adequate VDD_CORE voltage. */
+#define TEGRA186_CLK_KFUSE 293
+/**
+ * @brief controls the PLLE hardware sequencer.
+ * @details This clock only has enable and disable methods. When the
+ * PLLE hw sequencer is enabled, PLLE, will be enabled or disabled by
+ * hw based on the control signals from the PCIe, SATA and XUSB
+ * clocks. When the PLLE hw sequencer is disabled, the state of PLLE
+ * is controlled by sw using clk_enable/clk_disable on
+ * TEGRA186_CLK_PLLE.
+ */
+#define TEGRA186_CLK_PLLE_PWRSEQ 294
+/** fixed 60MHz clock divided down from, TEGRA186_CLK_PLL_U */
+#define TEGRA186_CLK_PLLREFE_REF 295
+/** @brief output of mux controlled by SOR0_CLK_SEL0 and SOR0_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0 */
+#define TEGRA186_CLK_SOR0_OUT 296
+/** @brief output of mux controlled by SOR1_CLK_SEL0 and SOR1_CLK_SEL1 in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1 */
+#define TEGRA186_CLK_SOR1_OUT 297
+/** @brief fixed /5 divider. Output frequency of this clock is TEGRA186_CLK_PLLREFE_OUT1/5. Used as input for TEGRA186_CLK_EQOS_AXI */
+#define TEGRA186_CLK_PLLREFE_OUT1_DIV5 298
+/** @brief controls the UTMIP_PLL (aka PLLU) hardware sqeuencer */
+#define TEGRA186_CLK_UTMIP_PLL_PWRSEQ 301
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL0_MGMT */
+#define TEGRA186_CLK_PEX_USB_PAD0_MGMT 302
+/** @brief output of the divider CLK_RST_CONTROLLER_CLK_SOURCE_PEX_USB_PAD_PLL1_MGMT */
+#define TEGRA186_CLK_PEX_USB_PAD1_MGMT 303
+/** @brief controls the UPHY_PLL0 hardware sqeuencer */
+#define TEGRA186_CLK_UPHY_PLL0_PWRSEQ 304
+/** @brief controls the UPHY_PLL1 hardware sqeuencer */
+#define TEGRA186_CLK_UPHY_PLL1_PWRSEQ 305
+/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC so the bypass output even be used when the PLL is disabled */
+#define TEGRA186_CLK_PLLREFE_PLLE_PASSTHROUGH 306
+/** @brief output of the mux controlled by PLLREFE_SEL_CLKIN_PEX in CLK_RST_CONTROLLER_PLLREFE_MISC */
+#define TEGRA186_CLK_PLLREFE_PEX 307
+/** @brief control for PLLREFE_IDDQ in CLK_RST_CONTROLLER_PLLREFE_MISC to turn on the PLL when enabled */
+#define TEGRA186_CLK_PLLREFE_IDDQ 308
+/** @brief output of the divider QSPI_CLK_DIV2_SEL in CLK_RST_CONTROLLER_CLK_SOURCE_QSPI */
+#define TEGRA186_CLK_QSPI_OUT 309
+/**
+ * @brief GPC2CLK-div-2
+ * @details fixed /2 divider. Output frequency is
+ * TEGRA186_CLK_GPC2CLK/2. The frequency of this clock is the
+ * frequency at which the GPU graphics engine runs. */
+#define TEGRA186_CLK_GPCCLK 310
+/** @brief output of divider CLK_RST_CONTROLLER_AON_NIC_RATE */
+#define TEGRA186_CLK_AON_NIC 450
+/** @brief output of divider CLK_RST_CONTROLLER_SCE_NIC_RATE */
+#define TEGRA186_CLK_SCE_NIC 451
+/** Fixed 100MHz PLL for PCIe, SATA and superspeed USB */
+#define TEGRA186_CLK_PLLE 512
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC_BASE */
+#define TEGRA186_CLK_PLLC 513
+/** Fixed 408MHz PLL for use by peripheral clocks */
+#define TEGRA186_CLK_PLLP 516
+/** @deprecated */
+#define TEGRA186_CLK_PLL_P TEGRA186_CLK_PLLP
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD_BASE for use by DSI */
+#define TEGRA186_CLK_PLLD 518
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD2_BASE for use by HDMI or DP */
+#define TEGRA186_CLK_PLLD2 519
+/**
+ * @brief PLL controlled by CLK_RST_CONTROLLER_PLLREFE_BASE.
+ * @details Note that this clock only controls the VCO output, before
+ * the post-divider. See TEGRA186_CLK_PLLREFE_OUT1 for more
+ * information.
+ */
+#define TEGRA186_CLK_PLLREFE_VCO 520
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC2_BASE */
+#define TEGRA186_CLK_PLLC2 521
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC3_BASE */
+#define TEGRA186_CLK_PLLC3 522
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDP_BASE for use as the DP link clock */
+#define TEGRA186_CLK_PLLDP 523
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */
+#define TEGRA186_CLK_PLLC4_VCO 524
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLA1_BASE for use by audio clocks */
+#define TEGRA186_CLK_PLLA1 525
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLNVCSI_BASE */
+#define TEGRA186_CLK_PLLNVCSI 526
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLDISPHUB_BASE */
+#define TEGRA186_CLK_PLLDISPHUB 527
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLD3_BASE for use by HDMI or DP */
+#define TEGRA186_CLK_PLLD3 528
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLBPMPCAM_BASE */
+#define TEGRA186_CLK_PLLBPMPCAM 531
+/** @brief PLL controlled by CLK_RST_CONTROLLER_PLLAON_BASE for use by IP blocks in the AON domain */
+#define TEGRA186_CLK_PLLAON 532
+/** Fixed frequency 960MHz PLL for USB and EAVB */
+#define TEGRA186_CLK_PLLU 533
+/** fixed /2 divider. Output frequency is TEGRA186_CLK_PLLC4_VCO/2 */
+#define TEGRA186_CLK_PLLC4_VCO_DIV2 535
+/** @brief NAFLL clock source for AXI_CBB */
+#define TEGRA186_CLK_NAFLL_AXI_CBB 564
+/** @brief NAFLL clock source for BPMP */
+#define TEGRA186_CLK_NAFLL_BPMP 565
+/** @brief NAFLL clock source for ISP */
+#define TEGRA186_CLK_NAFLL_ISP 566
+/** @brief NAFLL clock source for NVDEC */
+#define TEGRA186_CLK_NAFLL_NVDEC 567
+/** @brief NAFLL clock source for NVENC */
+#define TEGRA186_CLK_NAFLL_NVENC 568
+/** @brief NAFLL clock source for NVJPG */
+#define TEGRA186_CLK_NAFLL_NVJPG 569
+/** @brief NAFLL clock source for SCE */
+#define TEGRA186_CLK_NAFLL_SCE 570
+/** @brief NAFLL clock source for SE */
+#define TEGRA186_CLK_NAFLL_SE 571
+/** @brief NAFLL clock source for TSEC */
+#define TEGRA186_CLK_NAFLL_TSEC 572
+/** @brief NAFLL clock source for TSECB */
+#define TEGRA186_CLK_NAFLL_TSECB 573
+/** @brief NAFLL clock source for VI */
+#define TEGRA186_CLK_NAFLL_VI 574
+/** @brief NAFLL clock source for VIC */
+#define TEGRA186_CLK_NAFLL_VIC 575
+/** @brief NAFLL clock source for DISP */
+#define TEGRA186_CLK_NAFLL_DISP 576
+/** @brief NAFLL clock source for GPU */
+#define TEGRA186_CLK_NAFLL_GPU 577
+/** @brief NAFLL clock source for M-CPU cluster */
+#define TEGRA186_CLK_NAFLL_MCPU 578
+/** @brief NAFLL clock source for B-CPU cluster */
+#define TEGRA186_CLK_NAFLL_BCPU 579
+/** @brief input from Tegra's CLK_32K_IN pad */
+#define TEGRA186_CLK_CLK_32K 608
+/** @brief output of divider CLK_RST_CONTROLLER_CLK_M_DIVIDE */
+#define TEGRA186_CLK_CLK_M 609
+/** @brief output of divider PLL_REF_DIV in CLK_RST_CONTROLLER_OSC_CTRL */
+#define TEGRA186_CLK_PLL_REF 610
+/** @brief input from Tegra's XTAL_IN */
+#define TEGRA186_CLK_OSC 612
+/** @brief clock recovered from EAVB input */
+#define TEGRA186_CLK_EQOS_RX_INPUT 613
+/** @brief clock recovered from DTV input */
+#define TEGRA186_CLK_DTV_INPUT 614
+/** @brief SOR0 brick output which feeds into SOR0_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR0*/
+#define TEGRA186_CLK_SOR0_PAD_CLKOUT 615
+/** @brief SOR1 brick output which feeds into SOR1_CLK_SEL mux in CLK_RST_CONTROLLER_CLK_SOURCE_SOR1*/
+#define TEGRA186_CLK_SOR1_PAD_CLKOUT 616
+/** @brief clock recovered from I2S1 input */
+#define TEGRA186_CLK_I2S1_SYNC_INPUT 617
+/** @brief clock recovered from I2S2 input */
+#define TEGRA186_CLK_I2S2_SYNC_INPUT 618
+/** @brief clock recovered from I2S3 input */
+#define TEGRA186_CLK_I2S3_SYNC_INPUT 619
+/** @brief clock recovered from I2S4 input */
+#define TEGRA186_CLK_I2S4_SYNC_INPUT 620
+/** @brief clock recovered from I2S5 input */
+#define TEGRA186_CLK_I2S5_SYNC_INPUT 621
+/** @brief clock recovered from I2S6 input */
+#define TEGRA186_CLK_I2S6_SYNC_INPUT 622
+/** @brief clock recovered from SPDIFIN input */
+#define TEGRA186_CLK_SPDIFIN_SYNC_INPUT 623
+
+/**
+ * @brief subject to change
+ * @details maximum clock identifier value plus one.
+ */
+#define TEGRA186_CLK_CLK_MAX 624
+
+/** @} */
+
+#endif
diff --git a/include/dt-bindings/mailbox/tegra186-hsp.h b/include/dt-bindings/mailbox/tegra186-hsp.h
new file mode 100644
index 000000000000..f5d66e5f5f10
--- /dev/null
+++ b/include/dt-bindings/mailbox/tegra186-hsp.h
@@ -0,0 +1,24 @@
+/*
+ * This header provides constants for binding nvidia,tegra186-hsp.
+ */
+
+#ifndef _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H
+#define _DT_BINDINGS_MAILBOX_TEGRA186_HSP_H
+
+/*
+ * These define the type of mailbox that is to be used (doorbell, shared
+ * mailbox, shared semaphore or arbitrated semaphore).
+ */
+#define TEGRA_HSP_MBOX_TYPE_DB 0x0
+#define TEGRA_HSP_MBOX_TYPE_SM 0x1
+#define TEGRA_HSP_MBOX_TYPE_SS 0x2
+#define TEGRA_HSP_MBOX_TYPE_AS 0x3
+
+/*
+ * These defines represent the bit associated with the given master ID in the
+ * doorbell registers.
+ */
+#define TEGRA_HSP_DB_MASTER_CCPLEX 17
+#define TEGRA_HSP_DB_MASTER_BPMP 19
+
+#endif
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index aafa76cb569d..d33f17c8a515 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -89,6 +89,10 @@
#define PMA8084_GPIO_S4 2
#define PMA8084_GPIO_L6 3
+#define PM8994_GPIO_VPH 0
+#define PM8994_GPIO_S4 2
+#define PM8994_GPIO_L12 3
+
/* To be used with "function" */
#define PMIC_GPIO_FUNC_NORMAL "normal"
#define PMIC_GPIO_FUNC_PAIRED "paired"
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
index a15c1704d0ec..2e360d8f7801 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-mpp.h
@@ -65,6 +65,12 @@
#define PMA8084_MPP_S4 2
#define PMA8084_MPP_L6 3
+#define PM8994_MPP_VPH 0
+/* Only supported for MPP_05-MPP_08 */
+#define PM8994_MPP_L19 1
+#define PM8994_MPP_S4 2
+#define PM8994_MPP_L12 3
+
/*
* Analog Input - Set the source for analog input.
* To be used with "qcom,amux-route" property
diff --git a/include/dt-bindings/power/r8a7743-sysc.h b/include/dt-bindings/power/r8a7743-sysc.h
new file mode 100644
index 000000000000..61cfbb2907ea
--- /dev/null
+++ b/include/dt-bindings/power/r8a7743-sysc.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 Cogent Embedded Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A7743_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A7743_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A7743_PD_CA15_CPU0 0
+#define R8A7743_PD_CA15_CPU1 1
+#define R8A7743_PD_CA15_SCU 12
+#define R8A7743_PD_SGX 20
+
+/* Always-on power area */
+#define R8A7743_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A7743_SYSC_H__ */
diff --git a/include/dt-bindings/power/tegra186-powergate.h b/include/dt-bindings/power/tegra186-powergate.h
new file mode 100644
index 000000000000..388d6e228dc8
--- /dev/null
+++ b/include/dt-bindings/power/tegra186-powergate.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H
+#define _DT_BINDINGS_POWER_TEGRA186_POWERGATE_H
+
+#define TEGRA186_POWER_DOMAIN_AUD 0
+#define TEGRA186_POWER_DOMAIN_DFD 1
+#define TEGRA186_POWER_DOMAIN_DISP 2
+#define TEGRA186_POWER_DOMAIN_DISPB 3
+#define TEGRA186_POWER_DOMAIN_DISPC 4
+#define TEGRA186_POWER_DOMAIN_ISPA 5
+#define TEGRA186_POWER_DOMAIN_NVDEC 6
+#define TEGRA186_POWER_DOMAIN_NVJPG 7
+#define TEGRA186_POWER_DOMAIN_MPE 8
+#define TEGRA186_POWER_DOMAIN_PCX 9
+#define TEGRA186_POWER_DOMAIN_SAX 10
+#define TEGRA186_POWER_DOMAIN_VE 11
+#define TEGRA186_POWER_DOMAIN_VIC 12
+#define TEGRA186_POWER_DOMAIN_XUSBA 13
+#define TEGRA186_POWER_DOMAIN_XUSBB 14
+#define TEGRA186_POWER_DOMAIN_XUSBC 15
+#define TEGRA186_POWER_DOMAIN_GPU 43
+#define TEGRA186_POWER_DOMAIN_MAX 44
+
+#endif
diff --git a/include/dt-bindings/reset/oxsemi,ox810se.h b/include/dt-bindings/reset/oxsemi,ox810se.h
new file mode 100644
index 000000000000..960c26e4504a
--- /dev/null
+++ b/include/dt-bindings/reset/oxsemi,ox810se.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_RESET_OXSEMI_OX810SE_H
+#define DT_RESET_OXSEMI_OX810SE_H
+
+#define RESET_ARM 0
+#define RESET_COPRO 1
+/* Reserved 2 */
+/* Reserved 3 */
+#define RESET_USBHS 4
+#define RESET_USBHSPHY 5
+#define RESET_MAC 6
+#define RESET_PCI 7
+#define RESET_DMA 8
+#define RESET_DPE 9
+#define RESET_DDR 10
+#define RESET_SATA 11
+#define RESET_SATA_LINK 12
+#define RESET_SATA_PHY 13
+ /* Reserved 14 */
+#define RESET_NAND 15
+#define RESET_GPIO 16
+#define RESET_UART1 17
+#define RESET_UART2 18
+#define RESET_MISC 19
+#define RESET_I2S 20
+#define RESET_AHB_MON 21
+#define RESET_UART3 22
+#define RESET_UART4 23
+#define RESET_SGDMA 24
+/* Reserved 25 */
+/* Reserved 26 */
+/* Reserved 27 */
+/* Reserved 28 */
+/* Reserved 29 */
+/* Reserved 30 */
+#define RESET_BUS 31
+
+#endif /* DT_RESET_OXSEMI_OX810SE_H */
diff --git a/include/dt-bindings/reset/oxsemi,ox820.h b/include/dt-bindings/reset/oxsemi,ox820.h
new file mode 100644
index 000000000000..cc6797bf01d8
--- /dev/null
+++ b/include/dt-bindings/reset/oxsemi,ox820.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_RESET_OXSEMI_OX820_H
+#define DT_RESET_OXSEMI_OX820_H
+
+#define RESET_SCU 0
+#define RESET_LEON 1
+#define RESET_ARM0 2
+#define RESET_ARM1 3
+#define RESET_USBHS 4
+#define RESET_USBPHYA 5
+#define RESET_MAC 6
+#define RESET_PCIEA 7
+#define RESET_SGDMA 8
+#define RESET_CIPHER 9
+#define RESET_DDR 10
+#define RESET_SATA 11
+#define RESET_SATA_LINK 12
+#define RESET_SATA_PHY 13
+#define RESET_PCIEPHY 14
+#define RESET_NAND 15
+#define RESET_GPIO 16
+#define RESET_UART1 17
+#define RESET_UART2 18
+#define RESET_MISC 19
+#define RESET_I2S 20
+#define RESET_SD 21
+#define RESET_MAC_2 22
+#define RESET_PCIEB 23
+#define RESET_VIDEO 24
+#define RESET_DDR_PHY 25
+#define RESET_USBPHYB 26
+#define RESET_USBDEV 27
+/* Reserved 29 */
+#define RESET_ARMDBG 29
+#define RESET_PLLA 30
+#define RESET_PLLB 31
+
+#endif /* DT_RESET_OXSEMI_OX820_H */
diff --git a/include/dt-bindings/reset/tegra186-reset.h b/include/dt-bindings/reset/tegra186-reset.h
new file mode 100644
index 000000000000..8a184e357955
--- /dev/null
+++ b/include/dt-bindings/reset/tegra186-reset.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ABI_MACH_T186_RESET_T186_H_
+#define _ABI_MACH_T186_RESET_T186_H_
+
+
+#define TEGRA186_RESET_ACTMON 0
+#define TEGRA186_RESET_AFI 1
+#define TEGRA186_RESET_CEC 2
+#define TEGRA186_RESET_CSITE 3
+#define TEGRA186_RESET_DP2 4
+#define TEGRA186_RESET_DPAUX 5
+#define TEGRA186_RESET_DSI 6
+#define TEGRA186_RESET_DSIB 7
+#define TEGRA186_RESET_DTV 8
+#define TEGRA186_RESET_DVFS 9
+#define TEGRA186_RESET_ENTROPY 10
+#define TEGRA186_RESET_EXTPERIPH1 11
+#define TEGRA186_RESET_EXTPERIPH2 12
+#define TEGRA186_RESET_EXTPERIPH3 13
+#define TEGRA186_RESET_GPU 14
+#define TEGRA186_RESET_HDA 15
+#define TEGRA186_RESET_HDA2CODEC_2X 16
+#define TEGRA186_RESET_HDA2HDMICODEC 17
+#define TEGRA186_RESET_HOST1X 18
+#define TEGRA186_RESET_I2C1 19
+#define TEGRA186_RESET_I2C2 20
+#define TEGRA186_RESET_I2C3 21
+#define TEGRA186_RESET_I2C4 22
+#define TEGRA186_RESET_I2C5 23
+#define TEGRA186_RESET_I2C6 24
+#define TEGRA186_RESET_ISP 25
+#define TEGRA186_RESET_KFUSE 26
+#define TEGRA186_RESET_LA 27
+#define TEGRA186_RESET_MIPI_CAL 28
+#define TEGRA186_RESET_PCIE 29
+#define TEGRA186_RESET_PCIEXCLK 30
+#define TEGRA186_RESET_SATA 31
+#define TEGRA186_RESET_SATACOLD 32
+#define TEGRA186_RESET_SDMMC1 33
+#define TEGRA186_RESET_SDMMC2 34
+#define TEGRA186_RESET_SDMMC3 35
+#define TEGRA186_RESET_SDMMC4 36
+#define TEGRA186_RESET_SE 37
+#define TEGRA186_RESET_SOC_THERM 38
+#define TEGRA186_RESET_SOR0 39
+#define TEGRA186_RESET_SPI1 40
+#define TEGRA186_RESET_SPI2 41
+#define TEGRA186_RESET_SPI3 42
+#define TEGRA186_RESET_SPI4 43
+#define TEGRA186_RESET_TMR 44
+#define TEGRA186_RESET_TRIG_SYS 45
+#define TEGRA186_RESET_TSEC 46
+#define TEGRA186_RESET_UARTA 47
+#define TEGRA186_RESET_UARTB 48
+#define TEGRA186_RESET_UARTC 49
+#define TEGRA186_RESET_UARTD 50
+#define TEGRA186_RESET_VI 51
+#define TEGRA186_RESET_VIC 52
+#define TEGRA186_RESET_XUSB_DEV 53
+#define TEGRA186_RESET_XUSB_HOST 54
+#define TEGRA186_RESET_XUSB_PADCTL 55
+#define TEGRA186_RESET_XUSB_SS 56
+#define TEGRA186_RESET_AON_APB 57
+#define TEGRA186_RESET_AXI_CBB 58
+#define TEGRA186_RESET_BPMP_APB 59
+#define TEGRA186_RESET_CAN1 60
+#define TEGRA186_RESET_CAN2 61
+#define TEGRA186_RESET_DMIC5 62
+#define TEGRA186_RESET_DSIC 63
+#define TEGRA186_RESET_DSID 64
+#define TEGRA186_RESET_EMC_EMC 65
+#define TEGRA186_RESET_EMC_MEM 66
+#define TEGRA186_RESET_EMCSB_EMC 67
+#define TEGRA186_RESET_EMCSB_MEM 68
+#define TEGRA186_RESET_EQOS 69
+#define TEGRA186_RESET_GPCDMA 70
+#define TEGRA186_RESET_GPIO_CTL0 71
+#define TEGRA186_RESET_GPIO_CTL1 72
+#define TEGRA186_RESET_GPIO_CTL2 73
+#define TEGRA186_RESET_GPIO_CTL3 74
+#define TEGRA186_RESET_GPIO_CTL4 75
+#define TEGRA186_RESET_GPIO_CTL5 76
+#define TEGRA186_RESET_I2C10 77
+#define TEGRA186_RESET_I2C12 78
+#define TEGRA186_RESET_I2C13 79
+#define TEGRA186_RESET_I2C14 80
+#define TEGRA186_RESET_I2C7 81
+#define TEGRA186_RESET_I2C8 82
+#define TEGRA186_RESET_I2C9 83
+#define TEGRA186_RESET_JTAG2AXI 84
+#define TEGRA186_RESET_MPHY_IOBIST 85
+#define TEGRA186_RESET_MPHY_L0_RX 86
+#define TEGRA186_RESET_MPHY_L0_TX 87
+#define TEGRA186_RESET_NVCSI 88
+#define TEGRA186_RESET_NVDISPLAY0_HEAD0 89
+#define TEGRA186_RESET_NVDISPLAY0_HEAD1 90
+#define TEGRA186_RESET_NVDISPLAY0_HEAD2 91
+#define TEGRA186_RESET_NVDISPLAY0_MISC 92
+#define TEGRA186_RESET_NVDISPLAY0_WGRP0 93
+#define TEGRA186_RESET_NVDISPLAY0_WGRP1 94
+#define TEGRA186_RESET_NVDISPLAY0_WGRP2 95
+#define TEGRA186_RESET_NVDISPLAY0_WGRP3 96
+#define TEGRA186_RESET_NVDISPLAY0_WGRP4 97
+#define TEGRA186_RESET_NVDISPLAY0_WGRP5 98
+#define TEGRA186_RESET_PWM1 99
+#define TEGRA186_RESET_PWM2 100
+#define TEGRA186_RESET_PWM3 101
+#define TEGRA186_RESET_PWM4 102
+#define TEGRA186_RESET_PWM5 103
+#define TEGRA186_RESET_PWM6 104
+#define TEGRA186_RESET_PWM7 105
+#define TEGRA186_RESET_PWM8 106
+#define TEGRA186_RESET_SCE_APB 107
+#define TEGRA186_RESET_SOR1 108
+#define TEGRA186_RESET_TACH 109
+#define TEGRA186_RESET_TSC 110
+#define TEGRA186_RESET_UARTF 111
+#define TEGRA186_RESET_UARTG 112
+#define TEGRA186_RESET_UFSHC 113
+#define TEGRA186_RESET_UFSHC_AXI_M 114
+#define TEGRA186_RESET_UPHY 115
+#define TEGRA186_RESET_ADSP 116
+#define TEGRA186_RESET_ADSPDBG 117
+#define TEGRA186_RESET_ADSPINTF 118
+#define TEGRA186_RESET_ADSPNEON 119
+#define TEGRA186_RESET_ADSPPERIPH 120
+#define TEGRA186_RESET_ADSPSCU 121
+#define TEGRA186_RESET_ADSPWDT 122
+#define TEGRA186_RESET_APE 123
+#define TEGRA186_RESET_DPAUX1 124
+#define TEGRA186_RESET_NVDEC 125
+#define TEGRA186_RESET_NVENC 126
+#define TEGRA186_RESET_NVJPG 127
+#define TEGRA186_RESET_PEX_USB_UPHY 128
+#define TEGRA186_RESET_QSPI 129
+#define TEGRA186_RESET_TSECB 130
+#define TEGRA186_RESET_VI_I2C 131
+#define TEGRA186_RESET_UARTE 132
+#define TEGRA186_RESET_TOP_GTE 133
+#define TEGRA186_RESET_SHSP 134
+#define TEGRA186_RESET_PEX_USB_UPHY_L5 135
+#define TEGRA186_RESET_PEX_USB_UPHY_L4 136
+#define TEGRA186_RESET_PEX_USB_UPHY_L3 137
+#define TEGRA186_RESET_PEX_USB_UPHY_L2 138
+#define TEGRA186_RESET_PEX_USB_UPHY_L1 139
+#define TEGRA186_RESET_PEX_USB_UPHY_L0 140
+#define TEGRA186_RESET_PEX_USB_UPHY_PLL1 141
+#define TEGRA186_RESET_PEX_USB_UPHY_PLL0 142
+#define TEGRA186_RESET_TSCTNVI 143
+#define TEGRA186_RESET_EXTPERIPH4 144
+#define TEGRA186_RESET_DSIPADCTL 145
+#define TEGRA186_RESET_AUD_MCLK 146
+#define TEGRA186_RESET_MPHY_CLK_CTL 147
+#define TEGRA186_RESET_MPHY_L1_RX 148
+#define TEGRA186_RESET_MPHY_L1_TX 149
+#define TEGRA186_RESET_UFSHC_LP 150
+#define TEGRA186_RESET_BPMP_NIC 151
+#define TEGRA186_RESET_BPMP_NSYSPORESET 152
+#define TEGRA186_RESET_BPMP_NRESET 153
+#define TEGRA186_RESET_BPMP_DBGRESETN 154
+#define TEGRA186_RESET_BPMP_PRESETDBGN 155
+#define TEGRA186_RESET_BPMP_PM 156
+#define TEGRA186_RESET_BPMP_CVC 157
+#define TEGRA186_RESET_BPMP_DMA 158
+#define TEGRA186_RESET_BPMP_HSP 159
+#define TEGRA186_RESET_TSCTNBPMP 160
+#define TEGRA186_RESET_BPMP_TKE 161
+#define TEGRA186_RESET_BPMP_GTE 162
+#define TEGRA186_RESET_BPMP_PM_ACTMON 163
+#define TEGRA186_RESET_AON_NIC 164
+#define TEGRA186_RESET_AON_NSYSPORESET 165
+#define TEGRA186_RESET_AON_NRESET 166
+#define TEGRA186_RESET_AON_DBGRESETN 167
+#define TEGRA186_RESET_AON_PRESETDBGN 168
+#define TEGRA186_RESET_AON_ACTMON 169
+#define TEGRA186_RESET_AOPM 170
+#define TEGRA186_RESET_AOVC 171
+#define TEGRA186_RESET_AON_DMA 172
+#define TEGRA186_RESET_AON_GPIO 173
+#define TEGRA186_RESET_AON_HSP 174
+#define TEGRA186_RESET_TSCTNAON 175
+#define TEGRA186_RESET_AON_TKE 176
+#define TEGRA186_RESET_AON_GTE 177
+#define TEGRA186_RESET_SCE_NIC 178
+#define TEGRA186_RESET_SCE_NSYSPORESET 179
+#define TEGRA186_RESET_SCE_NRESET 180
+#define TEGRA186_RESET_SCE_DBGRESETN 181
+#define TEGRA186_RESET_SCE_PRESETDBGN 182
+#define TEGRA186_RESET_SCE_ACTMON 183
+#define TEGRA186_RESET_SCE_PM 184
+#define TEGRA186_RESET_SCE_DMA 185
+#define TEGRA186_RESET_SCE_HSP 186
+#define TEGRA186_RESET_TSCTNSCE 187
+#define TEGRA186_RESET_SCE_TKE 188
+#define TEGRA186_RESET_SCE_GTE 189
+#define TEGRA186_RESET_SCE_CFG 190
+#define TEGRA186_RESET_ADSP_ALL 191
+/** @brief controls the power up/down sequence of UFSHC PSW partition. Controls LP_PWR_READY, LP_ISOL_EN, and LP_RESET_N signals */
+#define TEGRA186_RESET_UFSHC_LP_SEQ 192
+#define TEGRA186_RESET_SIZE 193
+
+#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 5fa55fc56e18..32dc0cbd51ca 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -677,10 +677,10 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
if (best == table - 1)
return pos - table;
- return best - pos;
+ return best - table;
}
- return best - pos;
+ return best - table;
}
/* Works only on sorted freq-tables */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 9b207a8c5af3..afe641c02dca 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -81,6 +81,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_DUMMY_TIMER_STARTING,
+ CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
CPUHP_AP_METAG_TIMER_STARTING,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8361c8d3edd1..b7e34313cdfe 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -290,7 +290,7 @@
#define GITS_BASER_TYPE_SHIFT (56)
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
-#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d600303306eb..820c0ad54a01 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -44,6 +44,7 @@ static inline void kasan_disable_current(void)
void kasan_unpoison_shadow(const void *address, size_t size);
void kasan_unpoison_task_stack(struct task_struct *task);
+void kasan_unpoison_stack_above_sp_to(const void *watermark);
void kasan_alloc_pages(struct page *page, unsigned int order);
void kasan_free_pages(struct page *page, unsigned int order);
@@ -85,6 +86,7 @@ size_t kasan_metadata_size(struct kmem_cache *cache);
static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {}
static inline void kasan_enable_current(void) {}
static inline void kasan_disable_current(void) {}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e9caec6a51e9..3a191853faaa 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1266,9 +1266,10 @@ static inline int fixup_user_fault(struct task_struct *tsk,
}
#endif
-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
+ unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write);
+ void *buf, int len, unsigned int gup_flags);
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
@@ -1276,19 +1277,18 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages, int *locked);
+ unsigned int gup_flags, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags);
+ struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages);
+ struct page **pages, unsigned int gup_flags);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
@@ -1306,7 +1306,7 @@ struct frame_vector {
struct frame_vector *frame_vector_create(unsigned int nr_frames);
void frame_vector_destroy(struct frame_vector *vec);
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
- bool write, bool force, struct frame_vector *vec);
+ unsigned int gup_flags, struct frame_vector *vec);
void put_vaddr_frames(struct frame_vector *vec);
int frame_vector_to_pages(struct frame_vector *vec);
void frame_vector_to_pfns(struct frame_vector *vec);
@@ -1391,7 +1391,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
-int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
+int vma_is_stack_for_current(struct vm_area_struct *vma);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -2232,6 +2232,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
+#define FOLL_COW 0x4000 /* internal GUP flag */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 7676557ce357..fc3c24206593 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -16,7 +16,6 @@
#define _LINUX_NVME_H
#include <linux/types.h>
-#include <linux/uuid.h>
/* NQN names in commands fields specified one size */
#define NVMF_NQN_FIELD_LEN 256
@@ -182,7 +181,7 @@ struct nvme_id_ctrl {
char fr[8];
__u8 rab;
__u8 ieee[3];
- __u8 mic;
+ __u8 cmic;
__u8 mdts;
__le16 cntlid;
__le32 ver;
@@ -202,7 +201,13 @@ struct nvme_id_ctrl {
__u8 apsta;
__le16 wctemp;
__le16 cctemp;
- __u8 rsvd270[50];
+ __le16 mtfa;
+ __le32 hmpre;
+ __le32 hmmin;
+ __u8 tnvmcap[16];
+ __u8 unvmcap[16];
+ __le32 rpmbs;
+ __u8 rsvd316[4];
__le16 kas;
__u8 rsvd322[190];
__u8 sqes;
@@ -267,7 +272,7 @@ struct nvme_id_ns {
__le16 nabo;
__le16 nabspf;
__u16 rsvd46;
- __le64 nvmcap[2];
+ __u8 nvmcap[16];
__u8 rsvd64[40];
__u8 nguid[16];
__u8 eui64[8];
@@ -277,6 +282,16 @@ struct nvme_id_ns {
};
enum {
+ NVME_ID_CNS_NS = 0x00,
+ NVME_ID_CNS_CTRL = 0x01,
+ NVME_ID_CNS_NS_ACTIVE_LIST = 0x02,
+ NVME_ID_CNS_NS_PRESENT_LIST = 0x10,
+ NVME_ID_CNS_NS_PRESENT = 0x11,
+ NVME_ID_CNS_CTRL_NS_LIST = 0x12,
+ NVME_ID_CNS_CTRL_LIST = 0x13,
+};
+
+enum {
NVME_NS_FEAT_THIN = 1 << 0,
NVME_NS_FLBAS_LBA_MASK = 0xf,
NVME_NS_FLBAS_META_EXT = 0x10,
@@ -556,8 +571,10 @@ enum nvme_admin_opcode {
nvme_admin_set_features = 0x09,
nvme_admin_get_features = 0x0a,
nvme_admin_async_event = 0x0c,
+ nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
@@ -583,6 +600,7 @@ enum {
NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c,
+ NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_KATO = 0x0f,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
@@ -745,7 +763,7 @@ struct nvmf_common_command {
struct nvmf_disc_rsp_page_entry {
__u8 trtype;
__u8 adrfam;
- __u8 nqntype;
+ __u8 subtype;
__u8 treq;
__le16 portid;
__le16 cntlid;
@@ -794,7 +812,7 @@ struct nvmf_connect_command {
};
struct nvmf_connect_data {
- uuid_be hostid;
+ __u8 hostid[16];
__le16 cntlid;
char resv4[238];
char subsysnqn[NVMF_NQN_FIELD_LEN];
@@ -905,12 +923,23 @@ enum {
NVME_SC_INVALID_VECTOR = 0x108,
NVME_SC_INVALID_LOG_PAGE = 0x109,
NVME_SC_INVALID_FORMAT = 0x10a,
- NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
+ NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
NVME_SC_INVALID_QUEUE = 0x10c,
NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
- NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
+ NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
+ NVME_SC_FW_NEEDS_RESET = 0x111,
+ NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
+ NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
+ NVME_SC_OVERLAPPING_RANGE = 0x114,
+ NVME_SC_NS_INSUFFICENT_CAP = 0x115,
+ NVME_SC_NS_ID_UNAVAILABLE = 0x116,
+ NVME_SC_NS_ALREADY_ATTACHED = 0x118,
+ NVME_SC_NS_IS_PRIVATE = 0x119,
+ NVME_SC_NS_NOT_ATTACHED = 0x11a,
+ NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
+ NVME_SC_CTRL_LIST_INVALID = 0x11c,
/*
* I/O Command Set Specific - NVM commands:
@@ -941,6 +970,7 @@ enum {
NVME_SC_REFTAG_CHECK = 0x284,
NVME_SC_COMPARE_FAILED = 0x285,
NVME_SC_ACCESS_DENIED = 0x286,
+ NVME_SC_UNWRITTEN_BLOCK = 0x287,
NVME_SC_DNR = 0x4000,
};
@@ -960,6 +990,7 @@ struct nvme_completion {
__le16 status; /* did the command fail, and if so, why? */
};
-#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+#define NVME_VS(major, minor, tertiary) \
+ (((major) << 16) | ((minor) << 8) | (tertiary))
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h
index a37bc5538f19..eab64976a73b 100644
--- a/include/linux/soc/qcom/wcnss_ctrl.h
+++ b/include/linux/soc/qcom/wcnss_ctrl.h
@@ -3,6 +3,19 @@
#include <linux/soc/qcom/smd.h>
+#if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL)
+
struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb);
+#else
+
+static inline struct qcom_smd_channel*
+qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+{
+ WARN_ON(1);
+ return ERR_PTR(-ENXIO);
+}
+
+#endif
+
#endif
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0d7abb8b7315..91a740f6b884 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -902,8 +902,5 @@ asmlinkage long sys_pkey_mprotect(unsigned long start, size_t len,
unsigned long prot, int pkey);
asmlinkage long sys_pkey_alloc(unsigned long flags, unsigned long init_val);
asmlinkage long sys_pkey_free(int pkey);
-//asmlinkage long sys_pkey_get(int pkey, unsigned long flags);
-//asmlinkage long sys_pkey_set(int pkey, unsigned long access_rights,
-// unsigned long flags);
#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 45f004e9cc59..2873baf5372a 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -14,17 +14,6 @@ struct timespec;
struct compat_timespec;
#ifdef CONFIG_THREAD_INFO_IN_TASK
-struct thread_info {
- unsigned long flags; /* low level flags */
-};
-
-#define INIT_THREAD_INFO(tsk) \
-{ \
- .flags = 0, \
-}
-#endif
-
-#ifdef CONFIG_THREAD_INFO_IN_TASK
#define current_thread_info() ((struct thread_info *)current)
#endif
diff --git a/include/soc/at91/atmel-secumod.h b/include/soc/at91/atmel-secumod.h
new file mode 100644
index 000000000000..22cd5d506926
--- /dev/null
+++ b/include/soc/at91/atmel-secumod.h
@@ -0,0 +1,19 @@
+/*
+ * Atmel Security Module register offsets and bit definitions.
+ *
+ * Copyright (C) 2016 Atmel
+ *
+ * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_SOC_AT91_ATMEL_SECUMOD_H
+#define _LINUX_SOC_AT91_ATMEL_SECUMOD_H
+
+#define AT91_SECUMOD_RAMRDY 0x14
+#define AT91_SECUMOD_RAMRDY_READY BIT(0)
+
+#endif /* _LINUX_SOC_AT91_ATMEL_SECUMOD_H */
diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h
new file mode 100644
index 000000000000..0aaef5960e29
--- /dev/null
+++ b/include/soc/tegra/bpmp-abi.h
@@ -0,0 +1,1601 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ABI_BPMP_ABI_H_
+#define _ABI_BPMP_ABI_H_
+
+#ifdef LK
+#include <stdint.h>
+#endif
+
+#ifndef __ABI_PACKED
+#define __ABI_PACKED __attribute__((packed))
+#endif
+
+#ifdef NO_GCC_EXTENSIONS
+#define EMPTY char empty;
+#define EMPTY_ARRAY 1
+#else
+#define EMPTY
+#define EMPTY_ARRAY 0
+#endif
+
+#ifndef __UNION_ANON
+#define __UNION_ANON
+#endif
+/**
+ * @file
+ */
+
+
+/**
+ * @defgroup MRQ MRQ Messages
+ * @brief Messages sent to/from BPMP via IPC
+ * @{
+ * @defgroup MRQ_Format Message Format
+ * @defgroup MRQ_Codes Message Request (MRQ) Codes
+ * @defgroup MRQ_Payloads Message Payloads
+ * @defgroup Error_Codes Error Codes
+ * @}
+ */
+
+/**
+ * @addtogroup MRQ_Format Message Format
+ * @{
+ * The CPU requests the BPMP to perform a particular service by
+ * sending it an IVC frame containing a single MRQ message. An MRQ
+ * message consists of a @ref mrq_request followed by a payload whose
+ * format depends on mrq_request::mrq.
+ *
+ * The BPMP processes the data and replies with an IVC frame (on the
+ * same IVC channel) containing and MRQ response. An MRQ response
+ * consists of a @ref mrq_response followed by a payload whose format
+ * depends on the associated mrq_request::mrq.
+ *
+ * A well-defined subset of the MRQ messages that the CPU sends to the
+ * BPMP can lead to BPMP eventually sending an MRQ message to the
+ * CPU. For example, when the CPU uses an #MRQ_THERMAL message to set
+ * a thermal trip point, the BPMP may eventually send a single
+ * #MRQ_THERMAL message of its own to the CPU indicating that the trip
+ * point has been crossed.
+ * @}
+ */
+
+/**
+ * @ingroup MRQ_Format
+ * @brief header for an MRQ message
+ *
+ * Provides the MRQ number for the MRQ message: #mrq. The remainder of
+ * the MRQ message is a payload (immediately following the
+ * mrq_request) whose format depends on mrq.
+ *
+ * @todo document the flags
+ */
+struct mrq_request {
+ /** @brief MRQ number of the request */
+ uint32_t mrq;
+ /** @brief flags for the request */
+ uint32_t flags;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Format
+ * @brief header for an MRQ response
+ *
+ * Provides an error code for the associated MRQ message. The
+ * remainder of the MRQ response is a payload (immediately following
+ * the mrq_response) whose format depends on the associated
+ * mrq_request::mrq
+ *
+ * @todo document the flags
+ */
+struct mrq_response {
+ /** @brief error code for the MRQ request itself */
+ int32_t err;
+ /** @brief flags for the response */
+ uint32_t flags;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Format
+ * Minimum needed size for an IPC message buffer
+ */
+#define MSG_MIN_SZ 128
+/**
+ * @ingroup MRQ_Format
+ * Minimum size guaranteed for data in an IPC message buffer
+ */
+#define MSG_DATA_MIN_SZ 120
+
+/**
+ * @ingroup MRQ_Codes
+ * @name Legal MRQ codes
+ * These are the legal values for mrq_request::mrq
+ * @{
+ */
+
+#define MRQ_PING 0
+#define MRQ_QUERY_TAG 1
+#define MRQ_MODULE_LOAD 4
+#define MRQ_MODULE_UNLOAD 5
+#define MRQ_TRACE_MODIFY 7
+#define MRQ_WRITE_TRACE 8
+#define MRQ_THREADED_PING 9
+#define MRQ_MODULE_MAIL 11
+#define MRQ_DEBUGFS 19
+#define MRQ_RESET 20
+#define MRQ_I2C 21
+#define MRQ_CLK 22
+#define MRQ_QUERY_ABI 23
+#define MRQ_PG_READ_STATE 25
+#define MRQ_PG_UPDATE_STATE 26
+#define MRQ_THERMAL 27
+#define MRQ_CPU_VHINT 28
+#define MRQ_ABI_RATCHET 29
+#define MRQ_EMC_DVFS_LATENCY 31
+#define MRQ_TRACE_ITER 64
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @brief Maximum MRQ code to be sent by CPU software to
+ * BPMP. Subject to change in future
+ */
+#define MAX_CPU_MRQ_ID 64
+
+/**
+ * @addtogroup MRQ_Payloads Message Payloads
+ * @{
+ * @defgroup Ping
+ * @defgroup Query_Tag Query Tag
+ * @defgroup Module Loadable Modules
+ * @defgroup Trace
+ * @defgroup Debugfs
+ * @defgroup Reset
+ * @defgroup I2C
+ * @defgroup Clocks
+ * @defgroup ABI_info ABI Info
+ * @defgroup MC_Flush MC Flush
+ * @defgroup Powergating
+ * @defgroup Thermal
+ * @defgroup Vhint CPU Voltage hint
+ * @defgroup MRQ_Deprecated Deprecated MRQ messages
+ * @defgroup EMC
+ * @}
+ */
+
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PING
+ * @brief A simple ping
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: Any
+ * * Request Payload: @ref mrq_ping_request
+ * * Response Payload: @ref mrq_ping_response
+ *
+ * @ingroup MRQ_Codes
+ * @def MRQ_THREADED_PING
+ * @brief A deeper ping
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_ping_request
+ * * Response Payload: @ref mrq_ping_response
+ *
+ * Behavior is equivalent to a simple #MRQ_PING except that BPMP
+ * responds from a thread context (providing a slightly more robust
+ * sign of life).
+ *
+ */
+
+/**
+ * @ingroup Ping
+ * @brief request with #MRQ_PING
+ *
+ * Used by the sender of an #MRQ_PING message to request a pong from
+ * recipient. The response from the recipient is computed based on
+ * #challenge.
+ */
+struct mrq_ping_request {
+/** @brief arbitrarily chosen value */
+ uint32_t challenge;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Ping
+ * @brief response to #MRQ_PING
+ *
+ * Sent in response to an #MRQ_PING message. #reply should be the
+ * mrq_ping_request challenge left shifted by 1 with the carry-bit
+ * dropped.
+ *
+ */
+struct mrq_ping_response {
+ /** @brief response to the MRQ_PING challege */
+ uint32_t reply;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_QUERY_TAG
+ * @brief Query BPMP firmware's tag (i.e. version information)
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_query_tag_request
+ * * Response Payload: N/A
+ *
+ */
+
+/**
+ * @ingroup Query_Tag
+ * @brief request with #MRQ_QUERY_TAG
+ *
+ * Used by #MRQ_QUERY_TAG call to ask BPMP to fill in the memory
+ * pointed by #addr with BPMP firmware header.
+ *
+ * The sender is reponsible for ensuring that #addr is mapped in to
+ * the recipient's address map.
+ */
+struct mrq_query_tag_request {
+ /** @brief base address to store the firmware header */
+ uint32_t addr;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_LOAD
+ * @brief dynamically load a BPMP code module
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_module_load_request
+ * * Response Payload: @ref mrq_module_load_response
+ *
+ * @note This MRQ is disabled on production systems
+ *
+ */
+
+/**
+ * @ingroup Module
+ * @brief request with #MRQ_MODULE_LOAD
+ *
+ * Used by #MRQ_MODULE_LOAD calls to ask the recipient to dynamically
+ * load the code located at #phys_addr and having size #size
+ * bytes. #phys_addr is treated as a void pointer.
+ *
+ * The recipient copies the code from #phys_addr to locally allocated
+ * memory prior to responding to this message.
+ *
+ * @todo document the module header format
+ *
+ * The sender is responsible for ensuring that the code is mapped in
+ * the recipient's address map.
+ *
+ */
+struct mrq_module_load_request {
+ /** @brief base address of the code to load. Treated as (void *) */
+ uint32_t phys_addr; /* (void *) */
+ /** @brief size in bytes of code to load */
+ uint32_t size;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Module
+ * @brief response to #MRQ_MODULE_LOAD
+ *
+ * @todo document mrq_response::err
+ */
+struct mrq_module_load_response {
+ /** @brief handle to the loaded module */
+ uint32_t base;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_UNLOAD
+ * @brief unload a previously loaded code module
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_module_unload_request
+ * * Response Payload: N/A
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Module
+ * @brief request with #MRQ_MODULE_UNLOAD
+ *
+ * Used by #MRQ_MODULE_UNLOAD calls to request that a previously loaded
+ * module be unloaded.
+ */
+struct mrq_module_unload_request {
+ /** @brief handle of the module to unload */
+ uint32_t base;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_TRACE_MODIFY
+ * @brief modify the set of enabled trace events
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_trace_modify_request
+ * * Response Payload: @ref mrq_trace_modify_response
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Trace
+ * @brief request with #MRQ_TRACE_MODIFY
+ *
+ * Used by %MRQ_TRACE_MODIFY calls to enable or disable specify trace
+ * events. #set takes precedence for any bit set in both #set and
+ * #clr.
+ */
+struct mrq_trace_modify_request {
+ /** @brief bit mask of trace events to disable */
+ uint32_t clr;
+ /** @brief bit mask of trace events to enable */
+ uint32_t set;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Trace
+ * @brief response to #MRQ_TRACE_MODIFY
+ *
+ * Sent in repsonse to an #MRQ_TRACE_MODIFY message. #mask reflects the
+ * state of which events are enabled after the recipient acted on the
+ * message.
+ *
+ */
+struct mrq_trace_modify_response {
+ /** @brief bit mask of trace event enable states */
+ uint32_t mask;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_WRITE_TRACE
+ * @brief Write trace data to a buffer
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_write_trace_request
+ * * Response Payload: @ref mrq_write_trace_response
+ *
+ * mrq_response::err depends on the @ref mrq_write_trace_request field
+ * values. err is -#BPMP_EINVAL if size is zero or area is NULL or
+ * area is in an illegal range. A positive value for err indicates the
+ * number of bytes written to area.
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Trace
+ * @brief request with #MRQ_WRITE_TRACE
+ *
+ * Used by MRQ_WRITE_TRACE calls to ask the recipient to copy trace
+ * data from the recipient's local buffer to the output buffer. #area
+ * is treated as a byte-aligned pointer in the recipient's address
+ * space.
+ *
+ * The sender is responsible for ensuring that the output
+ * buffer is mapped in the recipient's address map. The recipient is
+ * responsible for protecting its own code and data from accidental
+ * overwrites.
+ */
+struct mrq_write_trace_request {
+ /** @brief base address of output buffer */
+ uint32_t area;
+ /** @brief size in bytes of the output buffer */
+ uint32_t size;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Trace
+ * @brief response to #MRQ_WRITE_TRACE
+ *
+ * Once this response is sent, the respondent will not access the
+ * output buffer further.
+ */
+struct mrq_write_trace_response {
+ /**
+ * @brief flag whether more data remains in local buffer
+ *
+ * Value is 1 if the entire local trace buffer has been
+ * drained to the outputbuffer. Value is 0 otherwise.
+ */
+ uint32_t eof;
+} __ABI_PACKED;
+
+/** @private */
+struct mrq_threaded_ping_request {
+ uint32_t challenge;
+} __ABI_PACKED;
+
+/** @private */
+struct mrq_threaded_ping_response {
+ uint32_t reply;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_MODULE_MAIL
+ * @brief send a message to a loadable module
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_module_mail_request
+ * * Response Payload: @ref mrq_module_mail_response
+ *
+ * @note This MRQ is disabled on production systems
+ */
+
+/**
+ * @ingroup Module
+ * @brief request with #MRQ_MODULE_MAIL
+ */
+struct mrq_module_mail_request {
+ /** @brief handle to the previously loaded module */
+ uint32_t base;
+ /** @brief module-specific mail payload
+ *
+ * The length of data[ ] is unknown to the BPMP core firmware
+ * but it is limited to the size of an IPC message.
+ */
+ uint8_t data[EMPTY_ARRAY];
+} __ABI_PACKED;
+
+/**
+ * @ingroup Module
+ * @brief response to #MRQ_MODULE_MAIL
+ */
+struct mrq_module_mail_response {
+ /** @brief module-specific mail payload
+ *
+ * The length of data[ ] is unknown to the BPMP core firmware
+ * but it is limited to the size of an IPC message.
+ */
+ uint8_t data[EMPTY_ARRAY];
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_DEBUGFS
+ * @brief Interact with BPMP's debugfs file nodes
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_debugfs_request
+ * * Response Payload: @ref mrq_debugfs_response
+ */
+
+/**
+ * @addtogroup Debugfs
+ * @{
+ *
+ * The BPMP firmware implements a pseudo-filesystem called
+ * debugfs. Any driver within the firmware may register with debugfs
+ * to expose an arbitrary set of "files" in the filesystem. When
+ * software on the CPU writes to a debugfs file, debugfs passes the
+ * written data to a callback provided by the driver. When software on
+ * the CPU reads a debugfs file, debugfs queries the driver for the
+ * data to return to the CPU. The intention of the debugfs filesystem
+ * is to provide information useful for debugging the system at
+ * runtime.
+ *
+ * @note The files exposed via debugfs are not part of the
+ * BPMP firmware's ABI. debugfs files may be added or removed in any
+ * given version of the firmware. Typically the semantics of a debugfs
+ * file are consistent from version to version but even that is not
+ * guaranteed.
+ *
+ * @}
+ */
+/** @ingroup Debugfs */
+enum mrq_debugfs_commands {
+ CMD_DEBUGFS_READ = 1,
+ CMD_DEBUGFS_WRITE = 2,
+ CMD_DEBUGFS_DUMPDIR = 3,
+ CMD_DEBUGFS_MAX
+};
+
+/**
+ * @ingroup Debugfs
+ * @brief parameters for CMD_DEBUGFS_READ/WRITE command
+ */
+struct cmd_debugfs_fileop_request {
+ /** @brief physical address pointing at filename */
+ uint32_t fnameaddr;
+ /** @brief length in bytes of filename buffer */
+ uint32_t fnamelen;
+ /** @brief physical address pointing to data buffer */
+ uint32_t dataaddr;
+ /** @brief length in bytes of data buffer */
+ uint32_t datalen;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief parameters for CMD_DEBUGFS_READ/WRITE command
+ */
+struct cmd_debugfs_dumpdir_request {
+ /** @brief physical address pointing to data buffer */
+ uint32_t dataaddr;
+ /** @brief length in bytes of data buffer */
+ uint32_t datalen;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief response data for CMD_DEBUGFS_READ/WRITE command
+ */
+struct cmd_debugfs_fileop_response {
+ /** @brief always 0 */
+ uint32_t reserved;
+ /** @brief number of bytes read from or written to data buffer */
+ uint32_t nbytes;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief response data for CMD_DEBUGFS_DUMPDIR command
+ */
+struct cmd_debugfs_dumpdir_response {
+ /** @brief always 0 */
+ uint32_t reserved;
+ /** @brief number of bytes read from or written to data buffer */
+ uint32_t nbytes;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ * @brief request with #MRQ_DEBUGFS.
+ *
+ * The sender of an MRQ_DEBUGFS message uses #cmd to specify a debugfs
+ * command to execute. Legal commands are the values of @ref
+ * mrq_debugfs_commands. Each command requires a specific additional
+ * payload of data.
+ *
+ * |command |payload|
+ * |-------------------|-------|
+ * |CMD_DEBUGFS_READ |fop |
+ * |CMD_DEBUGFS_WRITE |fop |
+ * |CMD_DEBUGFS_DUMPDIR|dumpdir|
+ */
+struct mrq_debugfs_request {
+ uint32_t cmd;
+ union {
+ struct cmd_debugfs_fileop_request fop;
+ struct cmd_debugfs_dumpdir_request dumpdir;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Debugfs
+ */
+struct mrq_debugfs_response {
+ /** @brief always 0 */
+ int32_t reserved;
+ union {
+ /** @brief response data for CMD_DEBUGFS_READ OR
+ * CMD_DEBUGFS_WRITE command
+ */
+ struct cmd_debugfs_fileop_response fop;
+ /** @brief response data for CMD_DEBUGFS_DUMPDIR command */
+ struct cmd_debugfs_dumpdir_response dumpdir;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @addtogroup Debugfs
+ * @{
+ */
+#define DEBUGFS_S_ISDIR (1 << 9)
+#define DEBUGFS_S_IRUSR (1 << 8)
+#define DEBUGFS_S_IWUSR (1 << 7)
+/** @} */
+
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_RESET
+ * @brief reset an IP block
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_reset_request
+ * * Response Payload: N/A
+ */
+
+/**
+ * @ingroup Reset
+ */
+enum mrq_reset_commands {
+ CMD_RESET_ASSERT = 1,
+ CMD_RESET_DEASSERT = 2,
+ CMD_RESET_MODULE = 3,
+ CMD_RESET_MAX, /* not part of ABI and subject to change */
+};
+
+/**
+ * @ingroup Reset
+ * @brief request with MRQ_RESET
+ *
+ * Used by the sender of an #MRQ_RESET message to request BPMP to
+ * assert or or deassert a given reset line.
+ */
+struct mrq_reset_request {
+ /** @brief reset action to perform (@enum mrq_reset_commands) */
+ uint32_t cmd;
+ /** @brief id of the reset to affected */
+ uint32_t reset_id;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_I2C
+ * @brief issue an i2c transaction
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_i2c_request
+ * * Response Payload: @ref mrq_i2c_response
+ */
+
+/**
+ * @addtogroup I2C
+ * @{
+ */
+#define TEGRA_I2C_IPC_MAX_IN_BUF_SIZE (MSG_DATA_MIN_SZ - 12)
+#define TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE (MSG_DATA_MIN_SZ - 4)
+/** @} */
+
+/**
+ * @ingroup I2C
+ * @name Serial I2C flags
+ * Use these flags with serial_i2c_request::flags
+ * @{
+ */
+#define SERIALI2C_TEN 0x0010
+#define SERIALI2C_RD 0x0001
+#define SERIALI2C_STOP 0x8000
+#define SERIALI2C_NOSTART 0x4000
+#define SERIALI2C_REV_DIR_ADDR 0x2000
+#define SERIALI2C_IGNORE_NAK 0x1000
+#define SERIALI2C_NO_RD_ACK 0x0800
+#define SERIALI2C_RECV_LEN 0x0400
+/** @} */
+/** @ingroup I2C */
+enum {
+ CMD_I2C_XFER = 1
+};
+
+/**
+ * @ingroup I2C
+ * @brief serializable i2c request
+ *
+ * Instances of this structure are packed (little-endian) into
+ * cmd_i2c_xfer_request::data_buf. Each instance represents a single
+ * transaction (or a portion of a transaction with repeated starts) on
+ * an i2c bus.
+ *
+ * Because these structures are packed, some instances are likely to
+ * be misaligned. Additionally because #data is variable length, it is
+ * not possible to iterate through a serialized list of these
+ * structures without inspecting #len in each instance. It may be
+ * easier to serialize or deserialize cmd_i2c_xfer_request::data_buf
+ * manually rather than using this structure definition.
+*/
+struct serial_i2c_request {
+ /** @brief I2C slave address */
+ uint16_t addr;
+ /** @brief bitmask of SERIALI2C_ flags */
+ uint16_t flags;
+ /** @brief length of I2C transaction in bytes */
+ uint16_t len;
+ /** @brief for write transactions only, #len bytes of data */
+ uint8_t data[];
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief trigger one or more i2c transactions
+ */
+struct cmd_i2c_xfer_request {
+ /** @brief valid bus number from mach-t186/i2c-t186.h*/
+ uint32_t bus_id;
+
+ /** @brief count of valid bytes in #data_buf*/
+ uint32_t data_size;
+
+ /** @brief serialized packed instances of @ref serial_i2c_request*/
+ uint8_t data_buf[TEGRA_I2C_IPC_MAX_IN_BUF_SIZE];
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief container for data read from the i2c bus
+ *
+ * Processing an cmd_i2c_xfer_request::data_buf causes BPMP to execute
+ * zero or more I2C reads. The data read from the bus is serialized
+ * into #data_buf.
+ */
+struct cmd_i2c_xfer_response {
+ /** @brief count of valid bytes in #data_buf*/
+ uint32_t data_size;
+ /** @brief i2c read data */
+ uint8_t data_buf[TEGRA_I2C_IPC_MAX_OUT_BUF_SIZE];
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief request with #MRQ_I2C
+ */
+struct mrq_i2c_request {
+ /** @brief always CMD_I2C_XFER (i.e. 1) */
+ uint32_t cmd;
+ /** @brief parameters of the transfer request */
+ struct cmd_i2c_xfer_request xfer;
+} __ABI_PACKED;
+
+/**
+ * @ingroup I2C
+ * @brief response to #MRQ_I2C
+ */
+struct mrq_i2c_response {
+ struct cmd_i2c_xfer_response xfer;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_CLK
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_clk_request
+ * * Response Payload: @ref mrq_clk_response
+ * @addtogroup Clocks
+ * @{
+ */
+
+/**
+ * @name MRQ_CLK sub-commands
+ * @{
+ */
+enum {
+ CMD_CLK_GET_RATE = 1,
+ CMD_CLK_SET_RATE = 2,
+ CMD_CLK_ROUND_RATE = 3,
+ CMD_CLK_GET_PARENT = 4,
+ CMD_CLK_SET_PARENT = 5,
+ CMD_CLK_IS_ENABLED = 6,
+ CMD_CLK_ENABLE = 7,
+ CMD_CLK_DISABLE = 8,
+ CMD_CLK_GET_ALL_INFO = 14,
+ CMD_CLK_GET_MAX_CLK_ID = 15,
+ CMD_CLK_MAX,
+};
+/** @} */
+
+#define MRQ_CLK_NAME_MAXLEN 40
+#define MRQ_CLK_MAX_PARENTS 16
+
+/** @private */
+struct cmd_clk_get_rate_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_rate_response {
+ int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_set_rate_request {
+ int32_t unused;
+ int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_set_rate_response {
+ int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_round_rate_request {
+ int32_t unused;
+ int64_t rate;
+} __ABI_PACKED;
+
+struct cmd_clk_round_rate_response {
+ int64_t rate;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_get_parent_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_parent_response {
+ uint32_t parent_id;
+} __ABI_PACKED;
+
+struct cmd_clk_set_parent_request {
+ uint32_t parent_id;
+} __ABI_PACKED;
+
+struct cmd_clk_set_parent_response {
+ uint32_t parent_id;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_is_enabled_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_is_enabled_response {
+ int32_t state;
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_enable_request {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_enable_response {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_disable_request {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_disable_response {
+ EMPTY
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_get_all_info_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_all_info_response {
+ uint32_t flags;
+ uint32_t parent;
+ uint32_t parents[MRQ_CLK_MAX_PARENTS];
+ uint8_t num_parents;
+ uint8_t name[MRQ_CLK_NAME_MAXLEN];
+} __ABI_PACKED;
+
+/** @private */
+struct cmd_clk_get_max_clk_id_request {
+ EMPTY
+} __ABI_PACKED;
+
+struct cmd_clk_get_max_clk_id_response {
+ uint32_t max_id;
+} __ABI_PACKED;
+/** @} */
+
+/**
+ * @ingroup Clocks
+ * @brief request with #MRQ_CLK
+ *
+ * Used by the sender of an #MRQ_CLK message to control clocks. The
+ * clk_request is split into several sub-commands. Some sub-commands
+ * require no additional data. Others have a sub-command specific
+ * payload
+ *
+ * |sub-command |payload |
+ * |----------------------------|-----------------------|
+ * |CMD_CLK_GET_RATE |- |
+ * |CMD_CLK_SET_RATE |clk_set_rate |
+ * |CMD_CLK_ROUND_RATE |clk_round_rate |
+ * |CMD_CLK_GET_PARENT |- |
+ * |CMD_CLK_SET_PARENT |clk_set_parent |
+ * |CMD_CLK_IS_ENABLED |- |
+ * |CMD_CLK_ENABLE |- |
+ * |CMD_CLK_DISABLE |- |
+ * |CMD_CLK_GET_ALL_INFO |- |
+ * |CMD_CLK_GET_MAX_CLK_ID |- |
+ *
+ */
+
+struct mrq_clk_request {
+ /** @brief sub-command and clock id concatenated to 32-bit word.
+ * - bits[31..24] is the sub-cmd.
+ * - bits[23..0] is the clock id
+ */
+ uint32_t cmd_and_id;
+
+ union {
+ /** @private */
+ struct cmd_clk_get_rate_request clk_get_rate;
+ struct cmd_clk_set_rate_request clk_set_rate;
+ struct cmd_clk_round_rate_request clk_round_rate;
+ /** @private */
+ struct cmd_clk_get_parent_request clk_get_parent;
+ struct cmd_clk_set_parent_request clk_set_parent;
+ /** @private */
+ struct cmd_clk_enable_request clk_enable;
+ /** @private */
+ struct cmd_clk_disable_request clk_disable;
+ /** @private */
+ struct cmd_clk_is_enabled_request clk_is_enabled;
+ /** @private */
+ struct cmd_clk_get_all_info_request clk_get_all_info;
+ /** @private */
+ struct cmd_clk_get_max_clk_id_request clk_get_max_clk_id;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup Clocks
+ * @brief response to MRQ_CLK
+ *
+ * Each sub-command supported by @ref mrq_clk_request may return
+ * sub-command-specific data. Some do and some do not as indicated in
+ * the following table
+ *
+ * |sub-command |payload |
+ * |----------------------------|------------------------|
+ * |CMD_CLK_GET_RATE |clk_get_rate |
+ * |CMD_CLK_SET_RATE |clk_set_rate |
+ * |CMD_CLK_ROUND_RATE |clk_round_rate |
+ * |CMD_CLK_GET_PARENT |clk_get_parent |
+ * |CMD_CLK_SET_PARENT |clk_set_parent |
+ * |CMD_CLK_IS_ENABLED |clk_is_enabled |
+ * |CMD_CLK_ENABLE |- |
+ * |CMD_CLK_DISABLE |- |
+ * |CMD_CLK_GET_ALL_INFO |clk_get_all_info |
+ * |CMD_CLK_GET_MAX_CLK_ID |clk_get_max_id |
+ *
+ */
+
+struct mrq_clk_response {
+ union {
+ struct cmd_clk_get_rate_response clk_get_rate;
+ struct cmd_clk_set_rate_response clk_set_rate;
+ struct cmd_clk_round_rate_response clk_round_rate;
+ struct cmd_clk_get_parent_response clk_get_parent;
+ struct cmd_clk_set_parent_response clk_set_parent;
+ /** @private */
+ struct cmd_clk_enable_response clk_enable;
+ /** @private */
+ struct cmd_clk_disable_response clk_disable;
+ struct cmd_clk_is_enabled_response clk_is_enabled;
+ struct cmd_clk_get_all_info_response clk_get_all_info;
+ struct cmd_clk_get_max_clk_id_response clk_get_max_clk_id;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_QUERY_ABI
+ * @brief check if an MRQ is implemented
+ *
+ * * Platforms: All
+ * * Initiators: Any
+ * * Targets: Any
+ * * Request Payload: @ref mrq_query_abi_request
+ * * Response Payload: @ref mrq_query_abi_response
+ */
+
+/**
+ * @ingroup ABI_info
+ * @brief request with MRQ_QUERY_ABI
+ *
+ * Used by #MRQ_QUERY_ABI call to check if MRQ code #mrq is supported
+ * by the recipient.
+ */
+struct mrq_query_abi_request {
+ /** @brief MRQ code to query */
+ uint32_t mrq;
+} __ABI_PACKED;
+
+/**
+ * @ingroup ABI_info
+ * @brief response to MRQ_QUERY_ABI
+ */
+struct mrq_query_abi_response {
+ /** @brief 0 if queried MRQ is supported. Else, -#BPMP_ENODEV */
+ int32_t status;
+} __ABI_PACKED;
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PG_READ_STATE
+ * @brief read the power-gating state of a partition
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pg_read_state_request
+ * * Response Payload: @ref mrq_pg_read_state_response
+ * @addtogroup Powergating
+ * @{
+ */
+
+/**
+ * @brief request with #MRQ_PG_READ_STATE
+ *
+ * Used by MRQ_PG_READ_STATE call to read the current state of a
+ * partition.
+ */
+struct mrq_pg_read_state_request {
+ /** @brief ID of partition */
+ uint32_t partition_id;
+} __ABI_PACKED;
+
+/**
+ * @brief response to MRQ_PG_READ_STATE
+ * @todo define possible errors.
+ */
+struct mrq_pg_read_state_response {
+ /** @brief read as don't care */
+ uint32_t sram_state;
+ /** @brief state of power partition
+ * * 0 : off
+ * * 1 : on
+ */
+ uint32_t logic_state;
+} __ABI_PACKED;
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_PG_UPDATE_STATE
+ * @brief modify the power-gating state of a partition
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_pg_update_state_request
+ * * Response Payload: N/A
+ * @addtogroup Powergating
+ * @{
+ */
+
+/**
+ * @brief request with mrq_pg_update_state_request
+ *
+ * Used by #MRQ_PG_UPDATE_STATE call to request BPMP to change the
+ * state of a power partition #partition_id.
+ */
+struct mrq_pg_update_state_request {
+ /** @brief ID of partition */
+ uint32_t partition_id;
+ /** @brief secondary control of power partition
+ * @details Ignored by many versions of the BPMP
+ * firmware. For maximum compatibility, set the value
+ * according to @logic_state
+ * * 0x1: power ON partition (@ref logic_state == 0x3)
+ * * 0x3: power OFF partition (@ref logic_state == 0x1)
+ */
+ uint32_t sram_state;
+ /** @brief controls state of power partition, legal values are
+ * * 0x1 : power OFF partition
+ * * 0x3 : power ON partition
+ */
+ uint32_t logic_state;
+ /** @brief change state of clocks of the power partition, legal values
+ * * 0x0 : do not change clock state
+ * * 0x1 : disable partition clocks (only applicable when
+ * @ref logic_state == 0x1)
+ * * 0x3 : enable partition clocks (only applicable when
+ * @ref logic_state == 0x3)
+ */
+ uint32_t clock_state;
+} __ABI_PACKED;
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_THERMAL
+ * @brief interact with BPMP thermal framework
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: Any
+ * * Request Payload: TODO
+ * * Response Payload: TODO
+ *
+ * @addtogroup Thermal
+ *
+ * The BPMP firmware includes a thermal framework. Drivers within the
+ * bpmp firmware register with the framework to provide thermal
+ * zones. Each thermal zone corresponds to an entity whose temperature
+ * can be measured. The framework also has a notion of trip points. A
+ * trip point consists of a thermal zone id, a temperature, and a
+ * callback routine. The framework invokes the callback when the zone
+ * hits the indicated temperature. The BPMP firmware uses this thermal
+ * framework interally to implement various temperature-dependent
+ * functions.
+ *
+ * Software on the CPU can use #MRQ_THERMAL (with payload @ref
+ * mrq_thermal_host_to_bpmp_request) to interact with the BPMP thermal
+ * framework. The CPU must It can query the number of supported zones,
+ * query zone temperatures, and set trip points.
+ *
+ * When a trip point set by the CPU gets crossed, BPMP firmware issues
+ * an IPC to the CPU having mrq_request::mrq = #MRQ_THERMAL and a
+ * payload of @ref mrq_thermal_bpmp_to_host_request.
+ * @{
+ */
+enum mrq_thermal_host_to_bpmp_cmd {
+ /**
+ * @brief Check whether the BPMP driver supports the specified
+ * request type.
+ *
+ * Host needs to supply request parameters.
+ *
+ * mrq_response::err is 0 if the specified request is
+ * supported and -#BPMP_ENODEV otherwise.
+ */
+ CMD_THERMAL_QUERY_ABI = 0,
+
+ /**
+ * @brief Get the current temperature of the specified zone.
+ *
+ * Host needs to supply request parameters.
+ *
+ * mrq_response::err is
+ * * 0: Temperature query succeeded.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_ENOENT: No driver registered for thermal zone..
+ * * -#BPMP_EFAULT: Problem reading temperature measurement.
+ */
+ CMD_THERMAL_GET_TEMP = 1,
+
+ /**
+ * @brief Enable or disable and set the lower and upper
+ * thermal limits for a thermal trip point. Each zone has
+ * one trip point.
+ *
+ * Host needs to supply request parameters. Once the
+ * temperature hits a trip point, the BPMP will send a message
+ * to the CPU having MRQ=MRQ_THERMAL and
+ * type=CMD_THERMAL_HOST_TRIP_REACHED
+ *
+ * mrq_response::err is
+ * * 0: Trip successfully set.
+ * * -#BPMP_EINVAL: Invalid request parameters.
+ * * -#BPMP_ENOENT: No driver registered for thermal zone.
+ * * -#BPMP_EFAULT: Problem setting trip point.
+ */
+ CMD_THERMAL_SET_TRIP = 2,
+
+ /**
+ * @brief Get the number of supported thermal zones.
+ *
+ * No request parameters required.
+ *
+ * mrq_response::err is always 0, indicating success.
+ */
+ CMD_THERMAL_GET_NUM_ZONES = 3,
+
+ /** @brief: number of supported host-to-bpmp commands. May
+ * increase in future
+ */
+ CMD_THERMAL_HOST_TO_BPMP_NUM
+};
+
+enum mrq_thermal_bpmp_to_host_cmd {
+ /**
+ * @brief Indication that the temperature for a zone has
+ * exceeded the range indicated in the thermal trip point
+ * for the zone.
+ *
+ * BPMP needs to supply request parameters. Host only needs to
+ * acknowledge.
+ */
+ CMD_THERMAL_HOST_TRIP_REACHED = 100,
+
+ /** @brief: number of supported bpmp-to-host commands. May
+ * increase in future
+ */
+ CMD_THERMAL_BPMP_TO_HOST_NUM
+};
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_QUERY_ABI
+ *
+ * zone: Request type for which to check existence.
+ */
+struct cmd_thermal_query_abi_request {
+ uint32_t type;
+} __ABI_PACKED;
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_GET_TEMP
+ *
+ * zone: Number of thermal zone.
+ */
+struct cmd_thermal_get_temp_request {
+ uint32_t zone;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host reply data for request CMD_THERMAL_GET_TEMP
+ *
+ * error: 0 if request succeeded.
+ * -BPMP_EINVAL if request parameters were invalid.
+ * -BPMP_ENOENT if no driver was registered for the specified thermal zone.
+ * -BPMP_EFAULT for other thermal zone driver errors.
+ * temp: Current temperature in millicelsius.
+ */
+struct cmd_thermal_get_temp_response {
+ int32_t temp;
+} __ABI_PACKED;
+
+/*
+ * Host->BPMP request data for request type CMD_THERMAL_SET_TRIP
+ *
+ * zone: Number of thermal zone.
+ * low: Temperature of lower trip point in millicelsius
+ * high: Temperature of upper trip point in millicelsius
+ * enabled: 1 to enable trip point, 0 to disable trip point
+ */
+struct cmd_thermal_set_trip_request {
+ uint32_t zone;
+ int32_t low;
+ int32_t high;
+ uint32_t enabled;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host request data for request type CMD_THERMAL_HOST_TRIP_REACHED
+ *
+ * zone: Number of thermal zone where trip point was reached.
+ */
+struct cmd_thermal_host_trip_reached_request {
+ uint32_t zone;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host reply data for request type CMD_THERMAL_GET_NUM_ZONES
+ *
+ * num: Number of supported thermal zones. The thermal zones are indexed
+ * starting from zero.
+ */
+struct cmd_thermal_get_num_zones_response {
+ uint32_t num;
+} __ABI_PACKED;
+
+/*
+ * Host->BPMP request data.
+ *
+ * Reply type is union mrq_thermal_bpmp_to_host_response.
+ *
+ * type: Type of request. Values listed in enum mrq_thermal_type.
+ * data: Request type specific parameters.
+ */
+struct mrq_thermal_host_to_bpmp_request {
+ uint32_t type;
+ union {
+ struct cmd_thermal_query_abi_request query_abi;
+ struct cmd_thermal_get_temp_request get_temp;
+ struct cmd_thermal_set_trip_request set_trip;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/*
+ * BPMP->Host request data.
+ *
+ * type: Type of request. Values listed in enum mrq_thermal_type.
+ * data: Request type specific parameters.
+ */
+struct mrq_thermal_bpmp_to_host_request {
+ uint32_t type;
+ union {
+ struct cmd_thermal_host_trip_reached_request host_trip_reached;
+ } __UNION_ANON;
+} __ABI_PACKED;
+
+/*
+ * Data in reply to a Host->BPMP request.
+ */
+union mrq_thermal_bpmp_to_host_response {
+ struct cmd_thermal_get_temp_response get_temp;
+ struct cmd_thermal_get_num_zones_response get_num_zones;
+} __ABI_PACKED;
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_CPU_VHINT
+ * @brief Query CPU voltage hint data
+ *
+ * * Platforms: T186
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_cpu_vhint_request
+ * * Response Payload: N/A
+ *
+ * @addtogroup Vhint CPU Voltage hint
+ * @{
+ */
+
+/**
+ * @brief request with #MRQ_CPU_VHINT
+ *
+ * Used by #MRQ_CPU_VHINT call by CCPLEX to retrieve voltage hint data
+ * from BPMP to memory space pointed by #addr. CCPLEX is responsible
+ * to allocate sizeof(cpu_vhint_data) sized block of memory and
+ * appropriately map it for BPMP before sending the request.
+ */
+struct mrq_cpu_vhint_request {
+ /** @brief IOVA address for the #cpu_vhint_data */
+ uint32_t addr; /* struct cpu_vhint_data * */
+ /** @brief ID of the cluster whose data is requested */
+ uint32_t cluster_id; /* enum cluster_id */
+} __ABI_PACKED;
+
+/**
+ * @brief description of the CPU v/f relation
+ *
+ * Used by #MRQ_CPU_VHINT call to carry data pointed by #addr of
+ * struct mrq_cpu_vhint_request
+ */
+struct cpu_vhint_data {
+ uint32_t ref_clk_hz; /**< reference frequency in Hz */
+ uint16_t pdiv; /**< post divider value */
+ uint16_t mdiv; /**< input divider value */
+ uint16_t ndiv_max; /**< fMAX expressed with max NDIV value */
+ /** table of ndiv values as a function of vINDEX (voltage index) */
+ uint16_t ndiv[80];
+ /** minimum allowed NDIV value */
+ uint16_t ndiv_min;
+ /** minimum allowed voltage hint value (as in vINDEX) */
+ uint16_t vfloor;
+ /** maximum allowed voltage hint value (as in vINDEX) */
+ uint16_t vceil;
+ /** post-multiplier for vindex value */
+ uint16_t vindex_mult;
+ /** post-divider for vindex value */
+ uint16_t vindex_div;
+ /** reserved for future use */
+ uint16_t reserved[328];
+} __ABI_PACKED;
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_ABI_RATCHET
+ * @brief ABI ratchet value query
+ *
+ * * Platforms: T186
+ * * Initiators: Any
+ * * Targets: BPMP
+ * * Request Payload: @ref mrq_abi_ratchet_request
+ * * Response Payload: @ref mrq_abi_ratchet_response
+ * @addtogroup ABI_info
+ * @{
+ */
+
+/**
+ * @brief an ABI compatibility mechanism
+ *
+ * BPMP_ABI_RATCHET_VALUE may increase for various reasons in a future
+ * revision of this header file.
+ * 1. That future revision deprecates some MRQ
+ * 2. That future revision introduces a breaking change to an existing
+ * MRQ or
+ * 3. A bug is discovered in an existing implementation of the BPMP-FW
+ * (or possibly one of its clients) which warrants deprecating that
+ * implementation.
+ */
+#define BPMP_ABI_RATCHET_VALUE 3
+
+/**
+ * @brief request with #MRQ_ABI_RATCHET.
+ *
+ * #ratchet should be #BPMP_ABI_RATCHET_VALUE from the ABI header
+ * against which the requester was compiled.
+ *
+ * If ratchet is less than BPMP's #BPMP_ABI_RATCHET_VALUE, BPMP may
+ * reply with mrq_response::err = -#BPMP_ERANGE to indicate that
+ * BPMP-FW cannot interoperate correctly with the requester. Requester
+ * should cease further communication with BPMP.
+ *
+ * Otherwise, err shall be 0.
+ */
+struct mrq_abi_ratchet_request {
+ /** @brief requester's ratchet value */
+ uint16_t ratchet;
+};
+
+/**
+ * @brief response to #MRQ_ABI_RATCHET
+ *
+ * #ratchet shall be #BPMP_ABI_RATCHET_VALUE from the ABI header
+ * against which BPMP firwmare was compiled.
+ *
+ * If #ratchet is less than the requester's #BPMP_ABI_RATCHET_VALUE,
+ * the requster must either interoperate with BPMP according to an ABI
+ * header version with BPMP_ABI_RATCHET_VALUE = ratchet or cease
+ * communication with BPMP.
+ *
+ * If mrq_response::err is 0 and ratchet is greater than or equal to the
+ * requester's BPMP_ABI_RATCHET_VALUE, the requester should continue
+ * normal operation.
+ */
+struct mrq_abi_ratchet_response {
+ /** @brief BPMP's ratchet value */
+ uint16_t ratchet;
+};
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_EMC_DVFS_LATENCY
+ * @brief query frequency dependent EMC DVFS latency
+ *
+ * * Platforms: T186
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_emc_dvfs_latency_response
+ * @addtogroup EMC
+ * @{
+ */
+
+/**
+ * @brief used by @ref mrq_emc_dvfs_latency_response
+ */
+struct emc_dvfs_latency {
+ /** @brief EMC frequency in kHz */
+ uint32_t freq;
+ /** @brief EMC DVFS latency in nanoseconds */
+ uint32_t latency;
+} __ABI_PACKED;
+
+#define EMC_DVFS_LATENCY_MAX_SIZE 14
+/**
+ * @brief response to #MRQ_EMC_DVFS_LATENCY
+ */
+struct mrq_emc_dvfs_latency_response {
+ /** @brief the number valid entries in #pairs */
+ uint32_t num_pairs;
+ /** @brief EMC <frequency, latency> information */
+ struct emc_dvfs_latency pairs[EMC_DVFS_LATENCY_MAX_SIZE];
+} __ABI_PACKED;
+
+/** @} */
+
+/**
+ * @ingroup MRQ_Codes
+ * @def MRQ_TRACE_ITER
+ * @brief manage the trace iterator
+ *
+ * * Platforms: All
+ * * Initiators: CCPLEX
+ * * Targets: BPMP
+ * * Request Payload: N/A
+ * * Response Payload: @ref mrq_trace_iter_request
+ * @addtogroup Trace
+ * @{
+ */
+enum {
+ /** @brief (re)start the tracing now. Ignore older events */
+ TRACE_ITER_INIT = 0,
+ /** @brief clobber all events in the trace buffer */
+ TRACE_ITER_CLEAN = 1
+};
+
+/**
+ * @brief request with #MRQ_TRACE_ITER
+ */
+struct mrq_trace_iter_request {
+ /** @brief TRACE_ITER_INIT or TRACE_ITER_CLEAN */
+ uint32_t cmd;
+} __ABI_PACKED;
+
+/** @} */
+
+/*
+ * 4. Enumerations
+ */
+
+/*
+ * 4.1 CPU enumerations
+ *
+ * See <mach-t186/system-t186.h>
+ *
+ * 4.2 CPU Cluster enumerations
+ *
+ * See <mach-t186/system-t186.h>
+ *
+ * 4.3 System low power state enumerations
+ *
+ * See <mach-t186/system-t186.h>
+ */
+
+/*
+ * 4.4 Clock enumerations
+ *
+ * For clock enumerations, see <mach-t186/clk-t186.h>
+ */
+
+/*
+ * 4.5 Reset enumerations
+ *
+ * For reset enumerations, see <mach-t186/reset-t186.h>
+ */
+
+/*
+ * 4.6 Thermal sensor enumerations
+ *
+ * For thermal sensor enumerations, see <mach-t186/thermal-t186.h>
+ */
+
+/**
+ * @defgroup Error_Codes
+ * Negative values for mrq_response::err generally indicate some
+ * error. The ABI defines the following error codes. Negating these
+ * defines is an exercise left to the user.
+ * @{
+ */
+/** @brief No such file or directory */
+#define BPMP_ENOENT 2
+/** @brief No MRQ handler */
+#define BPMP_ENOHANDLER 3
+/** @brief I/O error */
+#define BPMP_EIO 5
+/** @brief Bad sub-MRQ command */
+#define BPMP_EBADCMD 6
+/** @brief Not enough memory */
+#define BPMP_ENOMEM 12
+/** @brief Permission denied */
+#define BPMP_EACCES 13
+/** @brief Bad address */
+#define BPMP_EFAULT 14
+/** @brief No such device */
+#define BPMP_ENODEV 19
+/** @brief Argument is a directory */
+#define BPMP_EISDIR 21
+/** @brief Invalid argument */
+#define BPMP_EINVAL 22
+/** @brief Timeout during operation */
+#define BPMP_ETIMEDOUT 23
+/** @brief Out of range */
+#define BPMP_ERANGE 34
+/** @} */
+/** @} */
+#endif
diff --git a/include/soc/tegra/bpmp.h b/include/soc/tegra/bpmp.h
new file mode 100644
index 000000000000..13dcd44e91bb
--- /dev/null
+++ b/include/soc/tegra/bpmp.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __SOC_TEGRA_BPMP_H
+#define __SOC_TEGRA_BPMP_H
+
+#include <linux/mailbox_client.h>
+#include <linux/reset-controller.h>
+#include <linux/semaphore.h>
+#include <linux/types.h>
+
+#include <soc/tegra/bpmp-abi.h>
+
+struct tegra_bpmp_clk;
+
+struct tegra_bpmp_soc {
+ struct {
+ struct {
+ unsigned int offset;
+ unsigned int count;
+ unsigned int timeout;
+ } cpu_tx, thread, cpu_rx;
+ } channels;
+ unsigned int num_resets;
+};
+
+struct tegra_bpmp_mb_data {
+ u32 code;
+ u32 flags;
+ u8 data[MSG_DATA_MIN_SZ];
+} __packed;
+
+struct tegra_bpmp_channel {
+ struct tegra_bpmp *bpmp;
+ struct tegra_bpmp_mb_data *ib;
+ struct tegra_bpmp_mb_data *ob;
+ struct completion completion;
+ struct tegra_ivc *ivc;
+};
+
+typedef void (*tegra_bpmp_mrq_handler_t)(unsigned int mrq,
+ struct tegra_bpmp_channel *channel,
+ void *data);
+
+struct tegra_bpmp_mrq {
+ struct list_head list;
+ unsigned int mrq;
+ tegra_bpmp_mrq_handler_t handler;
+ void *data;
+};
+
+struct tegra_bpmp {
+ const struct tegra_bpmp_soc *soc;
+ struct device *dev;
+
+ struct {
+ struct gen_pool *pool;
+ dma_addr_t phys;
+ void *virt;
+ } tx, rx;
+
+ struct {
+ struct mbox_client client;
+ struct mbox_chan *channel;
+ } mbox;
+
+ struct tegra_bpmp_channel *channels;
+ unsigned int num_channels;
+
+ struct {
+ unsigned long *allocated;
+ unsigned long *busy;
+ unsigned int count;
+ struct semaphore lock;
+ } threaded;
+
+ struct list_head mrqs;
+ spinlock_t lock;
+
+ struct tegra_bpmp_clk **clocks;
+ unsigned int num_clocks;
+
+ struct reset_controller_dev rstc;
+};
+
+struct tegra_bpmp *tegra_bpmp_get(struct device *dev);
+void tegra_bpmp_put(struct tegra_bpmp *bpmp);
+
+struct tegra_bpmp_message {
+ unsigned int mrq;
+
+ struct {
+ const void *data;
+ size_t size;
+ } tx;
+
+ struct {
+ void *data;
+ size_t size;
+ } rx;
+};
+
+int tegra_bpmp_transfer_atomic(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg);
+int tegra_bpmp_transfer(struct tegra_bpmp *bpmp,
+ struct tegra_bpmp_message *msg);
+
+int tegra_bpmp_request_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ tegra_bpmp_mrq_handler_t handler, void *data);
+void tegra_bpmp_free_mrq(struct tegra_bpmp *bpmp, unsigned int mrq,
+ void *data);
+
+#if IS_ENABLED(CONFIG_CLK_TEGRA_BPMP)
+int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp);
+#else
+static inline int tegra_bpmp_init_clocks(struct tegra_bpmp *bpmp)
+{
+ return 0;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_RESET_TEGRA_BPMP)
+int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp);
+#else
+static inline int tegra_bpmp_init_resets(struct tegra_bpmp *bpmp)
+{
+ return 0;
+}
+#endif
+
+#endif /* __SOC_TEGRA_BPMP_H */
diff --git a/include/soc/tegra/ivc.h b/include/soc/tegra/ivc.h
new file mode 100644
index 000000000000..b13cc43ad9d8
--- /dev/null
+++ b/include/soc/tegra/ivc.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __TEGRA_IVC_H
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+struct tegra_ivc_header;
+
+struct tegra_ivc {
+ struct device *peer;
+
+ struct {
+ struct tegra_ivc_header *channel;
+ unsigned int position;
+ dma_addr_t phys;
+ } rx, tx;
+
+ void (*notify)(struct tegra_ivc *ivc, void *data);
+ void *notify_data;
+
+ unsigned int num_frames;
+ size_t frame_size;
+};
+
+/**
+ * tegra_ivc_read_get_next_frame - Peek at the next frame to receive
+ * @ivc pointer of the IVC channel
+ *
+ * Peek at the next frame to be received, without removing it from
+ * the queue.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_ivc_read_get_next_frame(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_read_advance - Advance the read queue
+ * @ivc pointer of the IVC channel
+ *
+ * Advance the read queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_ivc_read_advance(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_write_get_next_frame - Poke at the next frame to transmit
+ * @ivc pointer of the IVC channel
+ *
+ * Get access to the next frame.
+ *
+ * Returns a pointer to the frame, or an error encoded pointer.
+ */
+void *tegra_ivc_write_get_next_frame(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_write_advance - Advance the write queue
+ * @ivc pointer of the IVC channel
+ *
+ * Advance the write queue
+ *
+ * Returns 0, or a negative error value if failed.
+ */
+int tegra_ivc_write_advance(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_notified - handle internal messages
+ * @ivc pointer of the IVC channel
+ *
+ * This function must be called following every notification.
+ *
+ * Returns 0 if the channel is ready for communication, or -EAGAIN if a channel
+ * reset is in progress.
+ */
+int tegra_ivc_notified(struct tegra_ivc *ivc);
+
+/**
+ * tegra_ivc_reset - initiates a reset of the shared memory state
+ * @ivc pointer of the IVC channel
+ *
+ * This function must be called after a channel is reserved before it is used
+ * for communication. The channel will be ready for use when a subsequent call
+ * to notify the remote of the channel reset.
+ */
+void tegra_ivc_reset(struct tegra_ivc *ivc);
+
+size_t tegra_ivc_align(size_t size);
+unsigned tegra_ivc_total_queue_size(unsigned queue_size);
+int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
+ dma_addr_t rx_phys, void *tx, dma_addr_t tx_phys,
+ unsigned int num_frames, size_t frame_size,
+ void (*notify)(struct tegra_ivc *ivc, void *data),
+ void *data);
+void tegra_ivc_cleanup(struct tegra_ivc *ivc);
+
+#endif /* __TEGRA_IVC_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index fb8e3b6febdf..c2119008990a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -177,6 +177,7 @@ enum tcm_sense_reason_table {
TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED = R(0x15),
TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
+ TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
#undef R
};
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index dbfee7e86ba6..9b1462e38b82 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -730,10 +730,6 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect)
__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc)
#define __NR_pkey_free 290
__SYSCALL(__NR_pkey_free, sys_pkey_free)
-#define __NR_pkey_get 291
-//__SYSCALL(__NR_pkey_get, sys_pkey_get)
-#define __NR_pkey_set 292
-//__SYSCALL(__NR_pkey_set, sys_pkey_set)
#undef __NR_syscalls
#define __NR_syscalls 291
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 6965d0909554..cd2be1c8e9fb 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -75,6 +75,7 @@ header-y += bpf_perf_event.h
header-y += bpf.h
header-y += bpqether.h
header-y += bsg.h
+header-y += bt-bmc.h
header-y += btrfs.h
header-y += can.h
header-y += capability.h
diff --git a/include/uapi/linux/bt-bmc.h b/include/uapi/linux/bt-bmc.h
new file mode 100644
index 000000000000..d9ec766a63d0
--- /dev/null
+++ b/include/uapi/linux/bt-bmc.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2015-2016, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_BT_BMC_H
+#define _UAPI_LINUX_BT_BMC_H
+
+#include <linux/ioctl.h>
+
+#define __BT_BMC_IOCTL_MAGIC 0xb1
+#define BT_BMC_IOCTL_SMS_ATN _IO(__BT_BMC_IOCTL_MAGIC, 0x00)
+
+#endif /* _UAPI_LINUX_BT_BMC_H */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 5df20d6d1520..29de1a9352c0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -228,7 +228,7 @@ static struct {
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- .dep_map = {.name = "cpu_hotplug.lock" },
+ .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
#endif
};
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index d4129bb05e5d..f9ec9add2164 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -300,7 +300,8 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
retry:
/* Read the page with vaddr into memory */
- ret = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
+ ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
+ &vma);
if (ret <= 0)
return ret;
@@ -1710,7 +1711,8 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
* but we treat this as a 'remote' access since it is
* essentially a kernel access to the memory.
*/
- result = get_user_pages_remote(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
+ result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
+ NULL);
if (result < 0)
return result;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0c5f1a5db654..9c4d30483264 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -721,6 +721,7 @@ int irq_set_parent(int irq, int parent_irq)
irq_put_desc_unlock(desc, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(irq_set_parent);
#endif
/*
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index d5e397315473..de08fc90baaf 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1769,6 +1769,10 @@ static size_t log_output(int facility, int level, enum log_flags lflags, const c
cont_flush();
}
+ /* Skip empty continuation lines that couldn't be added - they just flush */
+ if (!text_len && (lflags & LOG_CONT))
+ return 0;
+
/* If it doesn't end in a newline, try to buffer the current line */
if (!(lflags & LOG_NEWLINE)) {
if (cont_add(facility, level, lflags, text, text_len))
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2a99027312a6..e6474f7272ec 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
int this_len, retval;
this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
- retval = access_process_vm(tsk, src, buf, this_len, 0);
+ retval = access_process_vm(tsk, src, buf, this_len, FOLL_FORCE);
if (!retval) {
if (copied)
break;
@@ -564,7 +564,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
if (copy_from_user(buf, src, this_len))
return -EFAULT;
- retval = access_process_vm(tsk, dst, buf, this_len, 1);
+ retval = access_process_vm(tsk, dst, buf, this_len,
+ FOLL_FORCE | FOLL_WRITE);
if (!retval) {
if (copied)
break;
@@ -1127,7 +1128,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
unsigned long tmp;
int copied;
- copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
+ copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), FOLL_FORCE);
if (copied != sizeof(tmp))
return -EIO;
return put_user(tmp, (unsigned long __user *)data);
@@ -1138,7 +1139,8 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
{
int copied;
- copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
+ copied = access_process_vm(tsk, addr, &data, sizeof(data),
+ FOLL_FORCE | FOLL_WRITE);
return (copied == sizeof(data)) ? 0 : -EIO;
}
@@ -1155,7 +1157,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
switch (request) {
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
- ret = access_process_vm(child, addr, &word, sizeof(word), 0);
+ ret = access_process_vm(child, addr, &word, sizeof(word),
+ FOLL_FORCE);
if (ret != sizeof(word))
ret = -EIO;
else
@@ -1164,7 +1167,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
- ret = access_process_vm(child, addr, &data, sizeof(data), 1);
+ ret = access_process_vm(child, addr, &data, sizeof(data),
+ FOLL_FORCE | FOLL_WRITE);
ret = (ret != sizeof(data) ? -EIO : 0);
break;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2d4ad72f8f3c..d941c97dfbc3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -690,7 +690,14 @@ void init_entity_runnable_average(struct sched_entity *se)
* will definitely be update (after enqueue).
*/
sa->period_contrib = 1023;
- sa->load_avg = scale_load_down(se->load.weight);
+ /*
+ * Tasks are intialized with full load to be seen as heavy tasks until
+ * they get a chance to stabilize to their real load level.
+ * Group entities are intialized with zero load to reflect the fact that
+ * nothing has been attached to the task group yet.
+ */
+ if (entity_is_task(se))
+ sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
/*
* At this point, util_avg won't be used in select_task_rq_fair anyway
@@ -5471,13 +5478,18 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd
*/
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
{
- struct sched_domain *this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
- u64 avg_idle = this_rq()->avg_idle;
- u64 avg_cost = this_sd->avg_scan_cost;
+ struct sched_domain *this_sd;
+ u64 avg_cost, avg_idle = this_rq()->avg_idle;
u64 time, cost;
s64 delta;
int cpu, wrap;
+ this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
+ if (!this_sd)
+ return -1;
+
+ avg_cost = this_sd->avg_scan_cost;
+
/*
* Due to large variance we need a large fuzz factor; hackbench in
* particularly is sensitive here.
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index c3aad685bbc0..12dd190634ab 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -542,7 +542,6 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
static int alarm_timer_create(struct k_itimer *new_timer)
{
enum alarmtimer_type type;
- struct alarm_base *base;
if (!alarmtimer_get_rtcdev())
return -ENOTSUPP;
@@ -551,7 +550,6 @@ static int alarm_timer_create(struct k_itimer *new_timer)
return -EPERM;
type = clock2alarm(new_timer->it_clock);
- base = &alarm_bases[type];
alarm_init(&new_timer->it.alarm.alarmtimer, type, alarm_handle_timer);
return 0;
}
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 381bb07ed14f..db77dcb38afd 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -11,10 +11,7 @@
* get_vaddr_frames() - map virtual addresses to pfns
* @start: starting user address
* @nr_frames: number of pages / pfns from start to map
- * @write: whether pages will be written to by the caller
- * @force: whether to force write access even if user mapping is
- * readonly. See description of the same argument of
- get_user_pages().
+ * @gup_flags: flags modifying lookup behaviour
* @vec: structure which receives pages / pfns of the addresses mapped.
* It should have space for at least nr_frames entries.
*
@@ -34,7 +31,7 @@
* This function takes care of grabbing mmap_sem as necessary.
*/
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
- bool write, bool force, struct frame_vector *vec)
+ unsigned int gup_flags, struct frame_vector *vec)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -59,7 +56,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
vec->got_ref = true;
vec->is_pfns = false;
ret = get_user_pages_locked(start, nr_frames,
- write, force, (struct page **)(vec->ptrs), &locked);
+ gup_flags, (struct page **)(vec->ptrs), &locked);
goto out;
}
diff --git a/mm/gup.c b/mm/gup.c
index 96b2b2fd0fbd..7aa113c2d373 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
return -EEXIST;
}
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+ return pte_write(pte) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags)
{
@@ -95,7 +105,7 @@ retry:
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !pte_write(pte)) {
+ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
@@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
* reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
- *flags &= ~FOLL_WRITE;
+ *flags |= FOLL_COW;
return 0;
}
@@ -729,7 +739,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
- int write, int force,
struct page **pages,
struct vm_area_struct **vmas,
int *locked, bool notify_drop,
@@ -747,10 +756,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
if (pages)
flags |= FOLL_GET;
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
pages_done = 0;
lock_dropped = false;
@@ -843,12 +848,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
* up_read(&mm->mmap_sem);
*/
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
int *locked)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
- write, force, pages, NULL, locked, true,
- FOLL_TOUCH);
+ pages, NULL, locked, true,
+ gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_locked);
@@ -864,14 +869,14 @@ EXPORT_SYMBOL(get_user_pages_locked);
*/
__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags)
+ struct page **pages, unsigned int gup_flags)
{
long ret;
int locked = 1;
+
down_read(&mm->mmap_sem);
- ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
- pages, NULL, &locked, false, gup_flags);
+ ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
+ &locked, false, gup_flags);
if (locked)
up_read(&mm->mmap_sem);
return ret;
@@ -896,10 +901,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
* "force" parameter).
*/
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages)
+ struct page **pages, unsigned int gup_flags)
{
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- write, force, pages, FOLL_TOUCH);
+ pages, gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -910,9 +915,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* @mm: mm_struct of target mm
* @start: starting user address
* @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to by the caller
- * @force: whether to force access even when user mapping is currently
- * protected (but never forces write access to shared mapping).
+ * @gup_flags: flags modifying lookup behaviour
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long. Or NULL, if caller
* only intends to ensure the pages are faulted in.
@@ -941,9 +944,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* or similar operation cannot guarantee anything stronger anyway because
* locks can't be held over the syscall boundary.
*
- * If write=0, the page must not be written to. If the page is written to,
- * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
- * after the page is finished with, and before put_page is called.
+ * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
+ * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
+ * be called after the page is finished with, and before put_page is called.
*
* get_user_pages is typically used for fewer-copy IO operations, to get a
* handle on the memory by some means other than accesses via the user virtual
@@ -960,12 +963,12 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
*/
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
- return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
- pages, vmas, NULL, false,
- FOLL_TOUCH | FOLL_REMOTE);
+ return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
+ NULL, false,
+ gup_flags | FOLL_TOUCH | FOLL_REMOTE);
}
EXPORT_SYMBOL(get_user_pages_remote);
@@ -976,12 +979,12 @@ EXPORT_SYMBOL(get_user_pages_remote);
* obviously don't pass FOLL_REMOTE in here.
*/
long get_user_pages(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
return __get_user_pages_locked(current, current->mm, start, nr_pages,
- write, force, pages, vmas, NULL, false,
- FOLL_TOUCH);
+ pages, vmas, NULL, false,
+ gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
@@ -1505,7 +1508,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start += nr << PAGE_SHIFT;
pages += nr;
- ret = get_user_pages_unlocked(start, nr_pages - nr, write, 0, pages);
+ ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 88af13c00d3c..70c009741aab 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -34,6 +34,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
+#include <linux/bug.h>
#include "kasan.h"
#include "../slab.h"
@@ -62,7 +63,7 @@ void kasan_unpoison_shadow(const void *address, size_t size)
}
}
-static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
+static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
{
void *base = task_stack_page(task);
size_t size = sp - base;
@@ -77,9 +78,24 @@ void kasan_unpoison_task_stack(struct task_struct *task)
}
/* Unpoison the stack for the current task beyond a watermark sp value. */
-asmlinkage void kasan_unpoison_remaining_stack(void *sp)
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
{
- __kasan_unpoison_stack(current, sp);
+ __kasan_unpoison_stack(current, watermark);
+}
+
+/*
+ * Clear all poison for the region between the current SP and a provided
+ * watermark value, as is sometimes required prior to hand-crafted asm function
+ * returns in the middle of functions.
+ */
+void kasan_unpoison_stack_above_sp_to(const void *watermark)
+{
+ const void *sp = __builtin_frame_address(0);
+ size_t size = watermark - sp;
+
+ if (WARN_ON(sp > watermark))
+ return;
+ kasan_unpoison_shadow(sp, size);
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index fc1987dfd8cc..e18c57bdc75c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3869,10 +3869,11 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
* given task for page fault accounting.
*/
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, int write)
+ unsigned long addr, void *buf, int len, unsigned int gup_flags)
{
struct vm_area_struct *vma;
void *old_buf = buf;
+ int write = gup_flags & FOLL_WRITE;
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
@@ -3882,7 +3883,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct page *page = NULL;
ret = get_user_pages_remote(tsk, mm, addr, 1,
- write, 1, &page, &vma);
+ gup_flags, &page, &vma);
if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT
break;
@@ -3934,14 +3935,14 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
- * @write: whether the access is a write
+ * @gup_flags: flags modifying lookup behaviour
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write)
+ void *buf, int len, unsigned int gup_flags)
{
- return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
}
/*
@@ -3950,7 +3951,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
* Do not walk the page table directly, use get_user_pages
*/
int access_process_vm(struct task_struct *tsk, unsigned long addr,
- void *buf, int len, int write)
+ void *buf, int len, unsigned int gup_flags)
{
struct mm_struct *mm;
int ret;
@@ -3959,7 +3960,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr,
if (!mm)
return 0;
- ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
+ ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
+
mmput(mm);
return ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ad1c96ac313c..0b859af06b87 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -850,7 +850,7 @@ static int lookup_node(unsigned long addr)
struct page *p;
int err;
- err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
+ err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
if (err >= 0) {
err = page_to_nid(p);
put_page(p);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index bcdbe62f3e6d..11936526b08b 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -25,7 +25,6 @@
#include <linux/perf_event.h>
#include <linux/pkeys.h>
#include <linux/ksm.h>
-#include <linux/pkeys.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
diff --git a/mm/nommu.c b/mm/nommu.c
index 95daf81a4855..db5fd1795298 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -160,33 +160,25 @@ finish_or_fault:
* - don't permit access to VMAs that don't support it, such as I/O mappings
*/
long get_user_pages(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
- int flags = 0;
-
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
-
- return __get_user_pages(current, current->mm, start, nr_pages, flags,
- pages, vmas, NULL);
+ return __get_user_pages(current, current->mm, start, nr_pages,
+ gup_flags, pages, vmas, NULL);
}
EXPORT_SYMBOL(get_user_pages);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
int *locked)
{
- return get_user_pages(start, nr_pages, write, force, pages, NULL);
+ return get_user_pages(start, nr_pages, gup_flags, pages, NULL);
}
EXPORT_SYMBOL(get_user_pages_locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags)
+ struct page **pages, unsigned int gup_flags)
{
long ret;
down_read(&mm->mmap_sem);
@@ -198,10 +190,10 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
EXPORT_SYMBOL(__get_user_pages_unlocked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages)
+ struct page **pages, unsigned int gup_flags)
{
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
- write, force, pages, 0);
+ pages, gup_flags);
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -1817,9 +1809,10 @@ void filemap_map_pages(struct fault_env *fe,
EXPORT_SYMBOL(filemap_map_pages);
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, int write)
+ unsigned long addr, void *buf, int len, unsigned int gup_flags)
{
struct vm_area_struct *vma;
+ int write = gup_flags & FOLL_WRITE;
down_read(&mm->mmap_sem);
@@ -1854,21 +1847,22 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
- * @write: whether the access is a write
+ * @gup_flags: flags modifying lookup behaviour
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write)
+ void *buf, int len, unsigned int gup_flags)
{
- return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
}
/*
* Access another process' address space.
* - source/target buffer must be kernel space
*/
-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
+int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
+ unsigned int gup_flags)
{
struct mm_struct *mm;
@@ -1879,7 +1873,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
if (!mm)
return 0;
- len = __access_remote_vm(tsk, mm, addr, buf, len, write);
+ len = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
mmput(mm);
return len;
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 07514d41ebcc..be8dc8d1edb9 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr,
ssize_t rc = 0;
unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
/ sizeof(struct pages *);
+ unsigned int flags = FOLL_REMOTE;
/* Work out address and page range required */
if (len == 0)
return 0;
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
+ if (vm_write)
+ flags |= FOLL_WRITE;
+
while (!rc && nr_pages && iov_iter_count(iter)) {
int pages = min(nr_pages, max_pages_per_loop);
size_t bytes;
@@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
* current/current->mm
*/
pages = __get_user_pages_unlocked(task, mm, pa, pages,
- vm_write, 0, process_pages,
- FOLL_REMOTE);
+ process_pages, flags);
if (pages <= 0)
return -EFAULT;
diff --git a/mm/util.c b/mm/util.c
index 662cddf914af..1a41553db866 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -230,8 +230,10 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
}
/* Check if the vma is being used as a stack by this task */
-int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
+int vma_is_stack_for_current(struct vm_area_struct *vma)
{
+ struct task_struct * __maybe_unused t = current;
+
return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
}
@@ -283,7 +285,8 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
int __weak get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages)
{
- return get_user_pages_unlocked(start, nr_pages, write, 0, pages);
+ return get_user_pages_unlocked(start, nr_pages, pages,
+ write ? FOLL_WRITE : 0);
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
@@ -623,7 +626,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
if (len > buflen)
len = buflen;
- res = access_process_vm(task, arg_start, buffer, len, 0);
+ res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
/*
* If the nul at the end of args has been overwritten, then
@@ -638,7 +641,8 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
if (len > buflen - res)
len = buflen - res;
res += access_process_vm(task, env_start,
- buffer+res, len, 0);
+ buffer+res, len,
+ FOLL_FORCE);
res = strnlen(buffer, res);
}
}
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 00d2601407c5..1a7c9a79a53c 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
while (got < num_pages) {
rc = get_user_pages_unlocked(
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
- num_pages - got, write_page, 0, pages + got);
+ num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
if (rc < 0)
break;
BUG_ON(rc == 0);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 085057936287..09fd6108e421 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3557,7 +3557,7 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
} else if (!vma->vm_file &&
((vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) ||
- vma_is_stack_for_task(vma, current))) {
+ vma_is_stack_for_current(vma))) {
rc = current_has_perm(current, PROCESS__EXECSTACK);
} else if (vma->vm_file && vma->anon_vma) {
/*
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index ade7c6cad172..682b73af7766 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* the execve().
*/
if (get_user_pages_remote(current, bprm->mm, pos, 1,
- 0, 1, &page, NULL) <= 0)
+ FOLL_FORCE, &page, NULL) <= 0)
return false;
#else
page = bprm->page[pos / PAGE_SIZE];
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 1188bc849ee3..a39629206864 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -194,6 +194,8 @@
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index c0c0b265e88e..b63a31be1218 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -98,6 +98,15 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
*type = INSN_FP_SETUP;
break;
+ case 0x8d:
+ if (insn.rex_prefix.bytes &&
+ insn.rex_prefix.bytes[0] == 0x48 &&
+ insn.modrm.nbytes && insn.modrm.bytes[0] == 0x2c &&
+ insn.sib.nbytes && insn.sib.bytes[0] == 0x24)
+ /* lea %(rsp), %rbp */
+ *type = INSN_FP_SETUP;
+ break;
+
case 0x90:
*type = INSN_NOP;
break;
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 143b6cdd7f06..4490601a9235 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -97,6 +97,19 @@ static struct instruction *next_insn_same_sec(struct objtool_file *file,
return next;
}
+static bool gcov_enabled(struct objtool_file *file)
+{
+ struct section *sec;
+ struct symbol *sym;
+
+ list_for_each_entry(sec, &file->elf->sections, list)
+ list_for_each_entry(sym, &sec->symbol_list, list)
+ if (!strncmp(sym->name, "__gcov_.", 8))
+ return true;
+
+ return false;
+}
+
#define for_each_insn(file, insn) \
list_for_each_entry(insn, &file->insn_list, list)
@@ -713,6 +726,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
struct instruction *insn)
{
struct rela *text_rela, *rodata_rela;
+ struct instruction *orig_insn = insn;
text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
if (text_rela && text_rela->sym == file->rodata->sym) {
@@ -733,10 +747,16 @@ static struct rela *find_switch_table(struct objtool_file *file,
/* case 3 */
func_for_each_insn_continue_reverse(file, func, insn) {
- if (insn->type == INSN_JUMP_UNCONDITIONAL ||
- insn->type == INSN_JUMP_DYNAMIC)
+ if (insn->type == INSN_JUMP_DYNAMIC)
break;
+ /* allow small jumps within the range */
+ if (insn->type == INSN_JUMP_UNCONDITIONAL &&
+ insn->jump_dest &&
+ (insn->jump_dest->offset <= insn->offset ||
+ insn->jump_dest->offset >= orig_insn->offset))
+ break;
+
text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
insn->len);
if (text_rela && text_rela->sym == file->rodata->sym)
@@ -1034,34 +1054,6 @@ static int validate_branch(struct objtool_file *file,
return 0;
}
-static bool is_gcov_insn(struct instruction *insn)
-{
- struct rela *rela;
- struct section *sec;
- struct symbol *sym;
- unsigned long offset;
-
- rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len);
- if (!rela)
- return false;
-
- if (rela->sym->type != STT_SECTION)
- return false;
-
- sec = rela->sym->sec;
- offset = rela->addend + insn->offset + insn->len - rela->offset;
-
- list_for_each_entry(sym, &sec->symbol_list, list) {
- if (sym->type != STT_OBJECT)
- continue;
-
- if (offset >= sym->offset && offset < sym->offset + sym->len)
- return (!memcmp(sym->name, "__gcov0.", 8));
- }
-
- return false;
-}
-
static bool is_kasan_insn(struct instruction *insn)
{
return (insn->type == INSN_CALL &&
@@ -1083,9 +1075,6 @@ static bool ignore_unreachable_insn(struct symbol *func,
if (insn->type == INSN_NOP)
return true;
- if (is_gcov_insn(insn))
- return true;
-
/*
* Check if this (or a subsequent) instruction is related to
* CONFIG_UBSAN or CONFIG_KASAN.
@@ -1146,6 +1135,19 @@ static int validate_functions(struct objtool_file *file)
ignore_unreachable_insn(func, insn))
continue;
+ /*
+ * gcov produces a lot of unreachable
+ * instructions. If we get an unreachable
+ * warning and the file has gcov enabled, just
+ * ignore it, and all other such warnings for
+ * the file.
+ */
+ if (!file->ignore_unreachables &&
+ gcov_enabled(file)) {
+ file->ignore_unreachables = true;
+ continue;
+ }
+
WARN_FUNC("function has unreachable instruction", insn->sec, insn->offset);
warnings++;
}
diff --git a/tools/perf/jvmti/Makefile b/tools/perf/jvmti/Makefile
index 5ce61a1bda9c..df14e6b67b63 100644
--- a/tools/perf/jvmti/Makefile
+++ b/tools/perf/jvmti/Makefile
@@ -36,7 +36,7 @@ SOLIBEXT=so
# The following works at least on fedora 23, you may need the next
# line for other distros.
ifneq (,$(wildcard /usr/sbin/update-java-alternatives))
-JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | cut -d ' ' -f 3)
+JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
else
ifneq (,$(wildcard /usr/sbin/alternatives))
JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index fb8e42c7507a..4ffff7be9299 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -601,7 +601,8 @@ int hist_browser__run(struct hist_browser *browser, const char *help)
u64 nr_entries;
hbt->timer(hbt->arg);
- if (hist_browser__has_filter(browser))
+ if (hist_browser__has_filter(browser) ||
+ symbol_conf.report_hierarchy)
hist_browser__update_nr_entries(browser);
nr_entries = hist_browser__nr_entries(browser);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 85dd0db0a127..2f3eded54b0c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1895,7 +1895,6 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
if (ph->needs_swap)
nr = bswap_32(nr);
- ph->env.nr_numa_nodes = nr;
nodes = zalloc(sizeof(*nodes) * nr);
if (!nodes)
return -ENOMEM;
@@ -1932,6 +1931,7 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
free(str);
}
+ ph->env.nr_numa_nodes = nr;
ph->env.numa_nodes = nodes;
return 0;
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 9f43fda2570f..660fca05bc93 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -136,8 +136,8 @@ do { \
group [^,{}/]*[{][^}]*[}][^,{}/]*
event_pmu [^,{}/]+[/][^/]*[/][^,{}/]*
event [^,{}/]+
-bpf_object .*\.(o|bpf)
-bpf_source .*\.c
+bpf_object [^,{}]+\.(o|bpf)
+bpf_source [^,{}]+\.c
num_dec [0-9]+
num_hex 0x[a-fA-F0-9]+
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index db9668869f6f..8035cc1eb955 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work)
* mm and might be done in another context, so we must
* use FOLL_REMOTE.
*/
- __get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE);
+ __get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
+ FOLL_WRITE | FOLL_REMOTE);
kvm_async_page_present_sync(vcpu, apf);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 81dfc73d3df3..28510e72618a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1416,10 +1416,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
down_read(&current->mm->mmap_sem);
npages = get_user_page_nowait(addr, write_fault, page);
up_read(&current->mm->mmap_sem);
- } else
+ } else {
+ unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
+
+ if (write_fault)
+ flags |= FOLL_WRITE;
+
npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
- write_fault, 0, page,
- FOLL_TOUCH|FOLL_HWPOISON);
+ page, flags);
+ }
if (npages != 1)
return npages;