aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds2015-02-11 17:42:32 -0800
committerLinus Torvalds2015-02-11 17:42:32 -0800
commitb3d6524ff7956c5a898d51a18eaecb62a60a2b84 (patch)
treecc049e7ec9edd9f5a76f286e04d8db9a1caa516a /arch
parent07f80d41cf24b7e6e76cd97d420167932c9a7f82 (diff)
parent6a039eab53c01a58bfff95c78fc800ca7de27c77 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: - The remaining patches for the z13 machine support: kernel build option for z13, the cache synonym avoidance, SMT support, compare-and-delay for spinloops and the CES5S crypto adapater. - The ftrace support for function tracing with the gcc hotpatch option. This touches common code Makefiles, Steven is ok with the changes. - The hypfs file system gets an extension to access diagnose 0x0c data in user space for performance analysis for Linux running under z/VM. - The iucv hvc console gets wildcard spport for the user id filtering. - The cacheinfo code is converted to use the generic infrastructure. - Cleanup and bug fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (42 commits) s390/process: free vx save area when releasing tasks s390/hypfs: Eliminate hypfs interval s390/hypfs: Add diagnose 0c support s390/cacheinfo: don't use smp_processor_id() in preemptible context s390/zcrypt: fixed domain scanning problem (again) s390/smp: increase maximum value of NR_CPUS to 512 s390/jump label: use different nop instruction s390/jump label: add sanity checks s390/mm: correct missing space when reporting user process faults s390/dasd: cleanup profiling s390/dasd: add locking for global_profile access s390/ftrace: hotpatch support for function tracing ftrace: let notrace function attribute disable hotpatching if necessary ftrace: allow architectures to specify ftrace compile options s390: reintroduce diag 44 calls for cpu_relax() s390/zcrypt: Add support for new crypto express (CEX5S) adapter. s390/zcrypt: Number of supported ap domains is not retrievable. s390/spinlock: add compare-and-delay to lock wait loops s390/tape: remove redundant if statement s390/hvc_iucv: add simple wildcard matches to the iucv allow filter ...
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig42
-rw-r--r--arch/s390/Makefile12
-rw-r--r--arch/s390/boot/compressed/misc.c3
-rw-r--r--arch/s390/configs/default_defconfig2
-rw-r--r--arch/s390/configs/gcov_defconfig1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c4
-rw-r--r--arch/s390/defconfig7
-rw-r--r--arch/s390/hypfs/Makefile1
-rw-r--r--arch/s390/hypfs/hypfs.h7
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c49
-rw-r--r--arch/s390/hypfs/hypfs_diag0c.c139
-rw-r--r--arch/s390/hypfs/inode.c9
-rw-r--r--arch/s390/include/asm/cpu_mf.h14
-rw-r--r--arch/s390/include/asm/elf.h8
-rw-r--r--arch/s390/include/asm/ftrace.h15
-rw-r--r--arch/s390/include/asm/jump_label.h7
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/include/asm/reset.h3
-rw-r--r--arch/s390/include/asm/sclp.h7
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/sigp.h1
-rw-r--r--arch/s390/include/asm/smp.h4
-rw-r--r--arch/s390/include/asm/sysinfo.h20
-rw-r--r--arch/s390/include/asm/topology.h4
-rw-r--r--arch/s390/include/uapi/asm/hypfs.h35
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/base.S3
-rw-r--r--arch/s390/kernel/cache.c391
-rw-r--r--arch/s390/kernel/dis.c9
-rw-r--r--arch/s390/kernel/early.c18
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/ftrace.c108
-rw-r--r--arch/s390/kernel/head.S4
-rw-r--r--arch/s390/kernel/ipl.c11
-rw-r--r--arch/s390/kernel/jump_label.c63
-rw-r--r--arch/s390/kernel/kprobes.c3
-rw-r--r--arch/s390/kernel/machine_kexec.c19
-rw-r--r--arch/s390/kernel/mcount.S2
-rw-r--r--arch/s390/kernel/process.c18
-rw-r--r--arch/s390/kernel/processor.c10
-rw-r--r--arch/s390/kernel/sclp.S3
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/smp.c261
-rw-r--r--arch/s390/kernel/sysinfo.c8
-rw-r--r--arch/s390/kernel/topology.c63
-rw-r--r--arch/s390/kernel/vtime.c58
-rw-r--r--arch/s390/lib/spinlock.c52
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/mm/init.c9
-rw-r--r--arch/s390/mm/mmap.c142
-rw-r--r--arch/s390/mm/pgtable.c6
-rw-r--r--arch/s390/pci/pci_mmio.c4
55 files changed, 1120 insertions, 568 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 68b68d755fdf..373cd5badf1c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -66,6 +66,7 @@ config S390
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_SG_CHAIN
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK
select ARCH_INLINE_READ_LOCK_BH
@@ -116,7 +117,6 @@ config S390
select HAVE_BPF_JIT if 64BIT && PACK_STACK
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
- select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DYNAMIC_FTRACE if 64BIT
select HAVE_DYNAMIC_FTRACE_WITH_REGS if 64BIT
@@ -151,7 +151,6 @@ config S390
select TTY
select VIRT_CPU_ACCOUNTING
select VIRT_TO_BUS
- select ARCH_HAS_SG_CHAIN
config SCHED_OMIT_FRAME_POINTER
def_bool y
@@ -185,6 +184,10 @@ config HAVE_MARCH_ZEC12_FEATURES
def_bool n
select HAVE_MARCH_Z196_FEATURES
+config HAVE_MARCH_Z13_FEATURES
+ def_bool n
+ select HAVE_MARCH_ZEC12_FEATURES
+
choice
prompt "Processor type"
default MARCH_G5
@@ -244,6 +247,14 @@ config MARCH_ZEC12
2827 series). The kernel will be slightly faster but will not work on
older machines.
+config MARCH_Z13
+ bool "IBM z13"
+ select HAVE_MARCH_Z13_FEATURES if 64BIT
+ help
+ Select this to enable optimizations for IBM z13 (2964 series).
+ The kernel will be slightly faster but will not work on older
+ machines.
+
endchoice
config MARCH_G5_TUNE
@@ -267,6 +278,9 @@ config MARCH_Z196_TUNE
config MARCH_ZEC12_TUNE
def_bool TUNE_ZEC12 || MARCH_ZEC12 && TUNE_DEFAULT
+config MARCH_Z13_TUNE
+ def_bool TUNE_Z13 || MARCH_Z13 && TUNE_DEFAULT
+
choice
prompt "Tune code generation"
default TUNE_DEFAULT
@@ -305,6 +319,9 @@ config TUNE_Z196
config TUNE_ZEC12
bool "IBM zBC12 and zEC12"
+config TUNE_Z13
+ bool "IBM z13"
+
endchoice
config 64BIT
@@ -356,14 +373,14 @@ config SMP
Even if you don't know what to do here, say Y.
config NR_CPUS
- int "Maximum number of CPUs (2-256)"
- range 2 256
+ int "Maximum number of CPUs (2-512)"
+ range 2 512
depends on SMP
default "32" if !64BIT
default "64" if 64BIT
help
This allows you to specify the maximum number of CPUs which this
- kernel will support. The maximum supported value is 256 and the
+ kernel will support. The maximum supported value is 512 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
@@ -378,17 +395,26 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config SCHED_SMT
+ def_bool n
+
config SCHED_MC
def_bool n
config SCHED_BOOK
+ def_bool n
+
+config SCHED_TOPOLOGY
def_bool y
- prompt "Book scheduler support"
+ prompt "Topology scheduler support"
depends on SMP
+ select SCHED_SMT
select SCHED_MC
+ select SCHED_BOOK
help
- Book scheduler support improves the CPU scheduler's decision making
- when dealing with machines that have several books.
+ Topology scheduler support improves the CPU scheduler's decision
+ making when dealing with machines that have multi-threading,
+ multiple cores or multiple books.
source kernel/Kconfig.preempt
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 878e67973151..acb6859c6a95 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_Z9_109) := -march=z9-109
mflags-$(CONFIG_MARCH_Z10) := -march=z10
mflags-$(CONFIG_MARCH_Z196) := -march=z196
mflags-$(CONFIG_MARCH_ZEC12) := -march=zEC12
+mflags-$(CONFIG_MARCH_Z13) := -march=z13
aflags-y += $(mflags-y)
cflags-y += $(mflags-y)
@@ -53,6 +54,7 @@ cflags-$(CONFIG_MARCH_Z9_109_TUNE) += -mtune=z9-109
cflags-$(CONFIG_MARCH_Z10_TUNE) += -mtune=z10
cflags-$(CONFIG_MARCH_Z196_TUNE) += -mtune=z196
cflags-$(CONFIG_MARCH_ZEC12_TUNE) += -mtune=zEC12
+cflags-$(CONFIG_MARCH_Z13_TUNE) += -mtune=z13
#KBUILD_IMAGE is necessary for make rpm
KBUILD_IMAGE :=arch/s390/boot/image
@@ -85,6 +87,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
+ifdef CONFIG_FUNCTION_TRACER
+# make use of hotpatch feature if the compiler supports it
+cc_hotpatch := -mhotpatch=0,3
+ifeq ($(call cc-option-yn,$(cc_hotpatch)),y)
+CC_FLAGS_FTRACE := $(cc_hotpatch)
+KBUILD_AFLAGS += -DCC_USING_HOTPATCH
+KBUILD_CFLAGS += -DCC_USING_HOTPATCH
+endif
+endif
+
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
KBUILD_AFLAGS += $(aflags-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 57cbaff1f397..42506b371b74 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -8,6 +8,7 @@
#include <asm/uaccess.h>
#include <asm/page.h>
+#include <asm/sclp.h>
#include <asm/ipl.h>
#include "sizes.h"
@@ -63,8 +64,6 @@ static unsigned long free_mem_end_ptr;
#include "../../../../lib/decompress_unxz.c"
#endif
-extern _sclp_print_early(const char *);
-
static int puts(const char *s)
{
_sclp_print_early(s);
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 9432d0f202ef..64707750c780 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -555,7 +555,6 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
-CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_RB=y
@@ -563,6 +562,7 @@ CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 219dca6ea926..5c3097272cd8 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -540,6 +540,7 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 822c2f2e0c25..bda70f1ffd2c 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -537,6 +537,7 @@ CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 9d63051ebec4..1b0184a0f7f2 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -71,6 +71,7 @@ CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
+CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 1f272b24fc0b..5566ce80abdb 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -134,7 +134,7 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(need_fallback(sctx->key_len))) {
crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
@@ -159,7 +159,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
- const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
+ struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
if (unlikely(need_fallback(sctx->key_len))) {
crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 785c5f24d6f9..83ef702d2403 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -14,7 +14,6 @@ CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
@@ -22,12 +21,8 @@ CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_RD_XZ=y
-CONFIG_RD_LZO=y
-CONFIG_RD_LZ4=y
CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
diff --git a/arch/s390/hypfs/Makefile b/arch/s390/hypfs/Makefile
index 06f8d95a16cd..2ee25ba252d6 100644
--- a/arch/s390/hypfs/Makefile
+++ b/arch/s390/hypfs/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_S390_HYPFS_FS) += s390_hypfs.o
s390_hypfs-objs := inode.o hypfs_diag.o hypfs_vm.o hypfs_dbfs.o hypfs_sprp.o
+s390_hypfs-objs += hypfs_diag0c.o
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index b34b5ab90a31..eecde500ed49 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -37,6 +37,10 @@ extern int hypfs_vm_init(void);
extern void hypfs_vm_exit(void);
extern int hypfs_vm_create_files(struct dentry *root);
+/* VM diagnose 0c */
+int hypfs_diag0c_init(void);
+void hypfs_diag0c_exit(void);
+
/* Set Partition-Resource Parameter */
int hypfs_sprp_init(void);
void hypfs_sprp_exit(void);
@@ -49,7 +53,6 @@ struct hypfs_dbfs_data {
void *buf_free_ptr;
size_t size;
struct hypfs_dbfs_file *dbfs_file;
- struct kref kref;
};
struct hypfs_dbfs_file {
@@ -61,8 +64,6 @@ struct hypfs_dbfs_file {
unsigned long);
/* Private data for hypfs_dbfs.c */
- struct hypfs_dbfs_data *data;
- struct delayed_work data_free_work;
struct mutex lock;
struct dentry *dentry;
};
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index 47fe1055c714..752f6df3e697 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -17,33 +17,16 @@ static struct hypfs_dbfs_data *hypfs_dbfs_data_alloc(struct hypfs_dbfs_file *f)
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
- kref_init(&data->kref);
data->dbfs_file = f;
return data;
}
-static void hypfs_dbfs_data_free(struct kref *kref)
+static void hypfs_dbfs_data_free(struct hypfs_dbfs_data *data)
{
- struct hypfs_dbfs_data *data;
-
- data = container_of(kref, struct hypfs_dbfs_data, kref);
data->dbfs_file->data_free(data->buf_free_ptr);
kfree(data);
}
-static void data_free_delayed(struct work_struct *work)
-{
- struct hypfs_dbfs_data *data;
- struct hypfs_dbfs_file *df;
-
- df = container_of(work, struct hypfs_dbfs_file, data_free_work.work);
- mutex_lock(&df->lock);
- data = df->data;
- df->data = NULL;
- mutex_unlock(&df->lock);
- kref_put(&data->kref, hypfs_dbfs_data_free);
-}
-
static ssize_t dbfs_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
@@ -56,28 +39,21 @@ static ssize_t dbfs_read(struct file *file, char __user *buf,
df = file_inode(file)->i_private;
mutex_lock(&df->lock);
- if (!df->data) {
- data = hypfs_dbfs_data_alloc(df);
- if (!data) {
- mutex_unlock(&df->lock);
- return -ENOMEM;
- }
- rc = df->data_create(&data->buf, &data->buf_free_ptr,
- &data->size);
- if (rc) {
- mutex_unlock(&df->lock);
- kfree(data);
- return rc;
- }
- df->data = data;
- schedule_delayed_work(&df->data_free_work, HZ);
+ data = hypfs_dbfs_data_alloc(df);
+ if (!data) {
+ mutex_unlock(&df->lock);
+ return -ENOMEM;
+ }
+ rc = df->data_create(&data->buf, &data->buf_free_ptr, &data->size);
+ if (rc) {
+ mutex_unlock(&df->lock);
+ kfree(data);
+ return rc;
}
- data = df->data;
- kref_get(&data->kref);
mutex_unlock(&df->lock);
rc = simple_read_from_buffer(buf, size, ppos, data->buf, data->size);
- kref_put(&data->kref, hypfs_dbfs_data_free);
+ hypfs_dbfs_data_free(data);
return rc;
}
@@ -108,7 +84,6 @@ int hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
if (IS_ERR(df->dentry))
return PTR_ERR(df->dentry);
mutex_init(&df->lock);
- INIT_DELAYED_WORK(&df->data_free_work, data_free_delayed);
return 0;
}
diff --git a/arch/s390/hypfs/hypfs_diag0c.c b/arch/s390/hypfs/hypfs_diag0c.c
new file mode 100644
index 000000000000..d4c0d3717543
--- /dev/null
+++ b/arch/s390/hypfs/hypfs_diag0c.c
@@ -0,0 +1,139 @@
+/*
+ * Hypervisor filesystem for Linux on s390
+ *
+ * Diag 0C implementation
+ *
+ * Copyright IBM Corp. 2014
+ */
+
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <asm/hypfs.h>
+#include "hypfs.h"
+
+#define DBFS_D0C_HDR_VERSION 0
+
+/*
+ * Execute diagnose 0c in 31 bit mode
+ */
+static void diag0c(struct hypfs_diag0c_entry *entry)
+{
+ asm volatile (
+#ifdef CONFIG_64BIT
+ " sam31\n"
+ " diag %0,%0,0x0c\n"
+ " sam64\n"
+#else
+ " diag %0,%0,0x0c\n"
+#endif
+ : /* no output register */
+ : "a" (entry)
+ : "memory");
+}
+
+/*
+ * Get hypfs_diag0c_entry from CPU vector and store diag0c data
+ */
+static void diag0c_fn(void *data)
+{
+ diag0c(((void **) data)[smp_processor_id()]);
+}
+
+/*
+ * Allocate buffer and store diag 0c data
+ */
+static void *diag0c_store(unsigned int *count)
+{
+ struct hypfs_diag0c_data *diag0c_data;
+ unsigned int cpu_count, cpu, i;
+ void **cpu_vec;
+
+ get_online_cpus();
+ cpu_count = num_online_cpus();
+ cpu_vec = kmalloc(sizeof(*cpu_vec) * num_possible_cpus(), GFP_KERNEL);
+ if (!cpu_vec)
+ goto fail_put_online_cpus;
+ /* Note: Diag 0c needs 8 byte alignment and real storage */
+ diag0c_data = kzalloc(sizeof(struct hypfs_diag0c_hdr) +
+ cpu_count * sizeof(struct hypfs_diag0c_entry),
+ GFP_KERNEL | GFP_DMA);
+ if (!diag0c_data)
+ goto fail_kfree_cpu_vec;
+ i = 0;
+ /* Fill CPU vector for each online CPU */
+ for_each_online_cpu(cpu) {
+ diag0c_data->entry[i].cpu = cpu;
+ cpu_vec[cpu] = &diag0c_data->entry[i++];
+ }
+ /* Collect data all CPUs */
+ on_each_cpu(diag0c_fn, cpu_vec, 1);
+ *count = cpu_count;
+ kfree(cpu_vec);
+ put_online_cpus();
+ return diag0c_data;
+
+fail_kfree_cpu_vec:
+ kfree(cpu_vec);
+fail_put_online_cpus:
+ put_online_cpus();
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Hypfs DBFS callback: Free diag 0c data
+ */
+static void dbfs_diag0c_free(const void *data)
+{
+ kfree(data);
+}
+
+/*
+ * Hypfs DBFS callback: Create diag 0c data
+ */
+static int dbfs_diag0c_create(void **data, void **data_free_ptr, size_t *size)
+{
+ struct hypfs_diag0c_data *diag0c_data;
+ unsigned int count;
+
+ diag0c_data = diag0c_store(&count);
+ if (IS_ERR(diag0c_data))
+ return PTR_ERR(diag0c_data);
+ memset(&diag0c_data->hdr, 0, sizeof(diag0c_data->hdr));
+ get_tod_clock_ext(diag0c_data->hdr.tod_ext);
+ diag0c_data->hdr.len = count * sizeof(struct hypfs_diag0c_entry);
+ diag0c_data->hdr.version = DBFS_D0C_HDR_VERSION;
+ diag0c_data->hdr.count = count;
+ *data = diag0c_data;
+ *data_free_ptr = diag0c_data;
+ *size = diag0c_data->hdr.len + sizeof(struct hypfs_diag0c_hdr);
+ return 0;
+}
+
+/*
+ * Hypfs DBFS file structure
+ */
+static struct hypfs_dbfs_file dbfs_file_0c = {
+ .name = "diag_0c",
+ .data_create = dbfs_diag0c_create,
+ .data_free = dbfs_diag0c_free,
+};
+
+/*
+ * Initialize diag 0c interface for z/VM
+ */
+int __init hypfs_diag0c_init(void)
+{
+ if (!MACHINE_IS_VM)
+ return 0;
+ return hypfs_dbfs_create_file(&dbfs_file_0c);
+}
+
+/*
+ * Shutdown diag 0c interface for z/VM
+ */
+void hypfs_diag0c_exit(void)
+{
+ if (!MACHINE_IS_VM)
+ return;
+ hypfs_dbfs_remove_file(&dbfs_file_0c);
+}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index c952b981e4f2..4c8008dd938e 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -482,10 +482,14 @@ static int __init hypfs_init(void)
rc = -ENODATA;
goto fail_hypfs_vm_exit;
}
+ if (hypfs_diag0c_init()) {
+ rc = -ENODATA;
+ goto fail_hypfs_sprp_exit;
+ }
s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
if (!s390_kobj) {
rc = -ENOMEM;
- goto fail_hypfs_sprp_exit;
+ goto fail_hypfs_diag0c_exit;
}
rc = register_filesystem(&hypfs_type);
if (rc)
@@ -494,6 +498,8 @@ static int __init hypfs_init(void)
fail_filesystem:
kobject_put(s390_kobj);
+fail_hypfs_diag0c_exit:
+ hypfs_diag0c_exit();
fail_hypfs_sprp_exit:
hypfs_sprp_exit();
fail_hypfs_vm_exit:
@@ -510,6 +516,7 @@ static void __exit hypfs_exit(void)
{
unregister_filesystem(&hypfs_type);
kobject_put(s390_kobj);
+ hypfs_diag0c_exit();
hypfs_sprp_exit();
hypfs_vm_exit();
hypfs_diag_exit();
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index cb700d54bd83..5243a8679a1d 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -189,6 +189,20 @@ static inline int ecctr(u64 ctr, u64 *val)
return cc;
}
+/* Store CPU counter multiple for the MT utilization counter set */
+static inline int stcctm5(u64 num, u64 *val)
+{
+ typedef struct { u64 _[num]; } addrtype;
+ int cc;
+
+ asm volatile (
+ " .insn rsy,0xeb0000000017,%2,5,%1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc");
+ return cc;
+}
+
/* Query sampling information */
static inline int qsi(struct hws_qsi_info_block *info)
{
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index f6e43d39e3d8..c9df40b5c0ac 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -163,8 +163,8 @@ extern unsigned int vdso_enabled;
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
-extern unsigned long randomize_et_dyn(unsigned long base);
-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
+extern unsigned long randomize_et_dyn(void);
+#define ELF_ET_DYN_BASE randomize_et_dyn()
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
@@ -209,7 +209,9 @@ do { \
} while (0)
#endif /* CONFIG_COMPAT */
-#define STACK_RND_MASK 0x7ffUL
+extern unsigned long mmap_rnd_mask;
+
+#define STACK_RND_MASK (mmap_rnd_mask)
#define ARCH_DLINFO \
do { \
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index abb618f1ead2..836c56290499 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -3,8 +3,12 @@
#define ARCH_SUPPORTS_FTRACE_OPS 1
+#ifdef CC_USING_HOTPATCH
+#define MCOUNT_INSN_SIZE 6
+#else
#define MCOUNT_INSN_SIZE 24
#define MCOUNT_RETURN_FIXUP 18
+#endif
#ifndef __ASSEMBLY__
@@ -37,18 +41,29 @@ struct ftrace_insn {
static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ /* brcl 0,0 */
+ insn->opc = 0xc004;
+ insn->disp = 0;
+#else
/* jg .+24 */
insn->opc = 0xc0f4;
insn->disp = MCOUNT_INSN_SIZE / 2;
#endif
+#endif
}
static inline int is_ftrace_nop(struct ftrace_insn *insn)
{
#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CC_USING_HOTPATCH
+ if (insn->disp == 0)
+ return 1;
+#else
if (insn->disp == MCOUNT_INSN_SIZE / 2)
return 1;
#endif
+#endif
return 0;
}
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 346b1c85ffb4..58642fd29c87 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#define JUMP_LABEL_NOP_SIZE 6
+#define JUMP_LABEL_NOP_OFFSET 2
#ifdef CONFIG_64BIT
#define ASM_PTR ".quad"
@@ -13,9 +14,13 @@
#define ASM_ALIGN ".balign 4"
#endif
+/*
+ * We use a brcl 0,2 instruction for jump labels at compile time so it
+ * can be easily distinguished from a hotpatch generated instruction.
+ */
static __always_inline bool arch_static_branch(struct static_key *key)
{
- asm_volatile_goto("0: brcl 0,0\n"
+ asm_volatile_goto("0: brcl 0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
".pushsection __jump_table, \"aw\"\n"
ASM_ALIGN "\n"
ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index ffb1d8ce97ae..0441ec24ae87 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1758,6 +1758,10 @@ extern int s390_enable_sie(void);
extern int s390_enable_skey(void);
extern void s390_reset_cmma(struct mm_struct *mm);
+/* s390 has a private copy of get unmapped area to deal with cache synonyms */
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
/*
* No page table caches to initialise
*/
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bed05ea7ec27..e7cbbdcdee13 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -215,10 +215,7 @@ static inline unsigned short stap(void)
/*
* Give up the time slice of the virtual PU.
*/
-static inline void cpu_relax(void)
-{
- barrier();
-}
+void cpu_relax(void);
#define cpu_relax_lowlatency() barrier()
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h
index 804578587a7a..72786067b300 100644
--- a/arch/s390/include/asm/reset.h
+++ b/arch/s390/include/asm/reset.h
@@ -15,5 +15,6 @@ struct reset_call {
extern void register_reset_call(struct reset_call *reset);
extern void unregister_reset_call(struct reset_call *reset);
-extern void s390_reset_system(void (*func)(void *), void *data);
+extern void s390_reset_system(void (*fn_pre)(void),
+ void (*fn_post)(void *), void *data);
#endif /* _ASM_S390_RESET_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 1aba89b53cb9..edb453cfc2c6 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -27,7 +27,7 @@ struct sclp_ipl_info {
};
struct sclp_cpu_entry {
- u8 address;
+ u8 core_id;
u8 reserved0[2];
u8 : 3;
u8 siif : 1;
@@ -51,6 +51,9 @@ int sclp_cpu_deconfigure(u8 cpu);
unsigned long long sclp_get_rnmax(void);
unsigned long long sclp_get_rzm(void);
unsigned int sclp_get_max_cpu(void);
+unsigned int sclp_get_mtid(u8 cpu_type);
+unsigned int sclp_get_mtid_max(void);
+unsigned int sclp_get_mtid_prev(void);
int sclp_sdias_blk_count(void);
int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
@@ -68,4 +71,6 @@ void sclp_early_detect(void);
int sclp_has_siif(void);
unsigned int sclp_get_ibc(void);
+long _sclp_print_early(const char *);
+
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 7736fdd72595..b8d1e54b4733 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -57,6 +57,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_FLAG_TLB_LC (1UL << 17)
#define MACHINE_FLAG_VX (1UL << 18)
+#define MACHINE_FLAG_CAD (1UL << 19)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -80,6 +81,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TE (0)
#define MACHINE_HAS_TLB_LC (0)
#define MACHINE_HAS_VX (0)
+#define MACHINE_HAS_CAD (0)
#else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
@@ -93,6 +95,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
+#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
#endif /* CONFIG_64BIT */
/*
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index fad4ae23ece0..ec60cf7fa0a2 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -16,6 +16,7 @@
#define SIGP_SET_ARCHITECTURE 18
#define SIGP_COND_EMERGENCY_SIGNAL 19
#define SIGP_SENSE_RUNNING 21
+#define SIGP_SET_MULTI_THREADING 22
#define SIGP_STORE_ADDITIONAL_STATUS 23
/* SIGP condition codes */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 762d4f88af5a..b3bd0282dd98 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -16,6 +16,8 @@
#define raw_smp_processor_id() (S390_lowcore.cpu_nr)
extern struct mutex smp_cpu_state_mutex;
+extern unsigned int smp_cpu_mt_shift;
+extern unsigned int smp_cpu_mtid;
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
@@ -35,6 +37,8 @@ extern void smp_fill_possible_mask(void);
#else /* CONFIG_SMP */
+#define smp_cpu_mtid 0
+
static inline void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
func(data);
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index f92428e459f8..73f12d21af4d 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -90,7 +90,11 @@ struct sysinfo_2_2_2 {
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
- char reserved_2[16];
+ char reserved_2[8];
+ unsigned char mt_installed;
+ unsigned char mt_general;
+ unsigned char mt_psmtid;
+ char reserved_3[5];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
};
@@ -120,26 +124,28 @@ struct sysinfo_3_2_2 {
extern int topology_max_mnest;
-#define TOPOLOGY_CPU_BITS 64
+#define TOPOLOGY_CORE_BITS 64
#define TOPOLOGY_NR_MAG 6
-struct topology_cpu {
- unsigned char reserved0[4];
+struct topology_core {
+ unsigned char nl;
+ unsigned char reserved0[3];
unsigned char :6;
unsigned char pp:2;
unsigned char reserved1;
unsigned short origin;
- unsigned long mask[TOPOLOGY_CPU_BITS / BITS_PER_LONG];
+ unsigned long mask[TOPOLOGY_CORE_BITS / BITS_PER_LONG];
};
struct topology_container {
- unsigned char reserved[7];
+ unsigned char nl;
+ unsigned char reserved[6];
unsigned char id;
};
union topology_entry {
unsigned char nl;
- struct topology_cpu cpu;
+ struct topology_core cpu;
struct topology_container container;
};
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index 56af53093d24..c4fbb9527c5c 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -9,9 +9,11 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK
struct cpu_topology_s390 {
+ unsigned short thread_id;
unsigned short core_id;
unsigned short socket_id;
unsigned short book_id;
+ cpumask_t thread_mask;
cpumask_t core_mask;
cpumask_t book_mask;
};
@@ -19,6 +21,8 @@ struct cpu_topology_s390 {
extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
+#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id)
+#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask)
#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask)
#define topology_book_id(cpu) (cpu_topology[cpu].book_id)
diff --git a/arch/s390/include/uapi/asm/hypfs.h b/arch/s390/include/uapi/asm/hypfs.h
index 37998b449531..b3fe12d8dd87 100644
--- a/arch/s390/include/uapi/asm/hypfs.h
+++ b/arch/s390/include/uapi/asm/hypfs.h
@@ -1,16 +1,19 @@
/*
- * IOCTL interface for hypfs
+ * Structures for hypfs interface
*
* Copyright IBM Corp. 2013
*
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#ifndef _ASM_HYPFS_CTL_H
-#define _ASM_HYPFS_CTL_H
+#ifndef _ASM_HYPFS_H
+#define _ASM_HYPFS_H
#include <linux/types.h>
+/*
+ * IOCTL for binary interface /sys/kernel/debug/diag_304
+ */
struct hypfs_diag304 {
__u32 args[2];
__u64 data;
@@ -22,4 +25,30 @@ struct hypfs_diag304 {
#define HYPFS_DIAG304 \
_IOWR(HYPFS_IOCTL_MAGIC, 0x20, struct hypfs_diag304)
+/*
+ * Structures for binary interface /sys/kernel/debug/diag_0c
+ */
+struct hypfs_diag0c_hdr {
+ __u64 len; /* Length of diag0c buffer without header */
+ __u16 version; /* Version of header */
+ char reserved1[6]; /* Reserved */
+ char tod_ext[16]; /* TOD clock for diag0c */
+ __u64 count; /* Number of entries (CPUs) in diag0c array */
+ char reserved2[24]; /* Reserved */
+};
+
+struct hypfs_diag0c_entry {
+ char date[8]; /* MM/DD/YY in EBCDIC */
+ char time[8]; /* HH:MM:SS in EBCDIC */
+ __u64 virtcpu; /* Virtual time consumed by the virt CPU (us) */
+ __u64 totalproc; /* Total of virtual and simulation time (us) */
+ __u32 cpu; /* Linux logical CPU number */
+ __u32 reserved; /* Align to 8 byte */
+};
+
+struct hypfs_diag0c_data {
+ struct hypfs_diag0c_hdr hdr; /* 64 byte header */
+ struct hypfs_diag0c_entry entry[]; /* diag0c entry array */
+};
+
#endif
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 204c43a4c245..31fab2676fe9 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,8 +4,8 @@
ifdef CONFIG_FUNCTION_TRACER
# Don't trace early setup code and tracing code
-CFLAGS_REMOVE_early.o = -pg
-CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
endif
#
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 797a823a2275..f74a53d339b0 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -97,7 +97,8 @@ ENTRY(diag308_reset)
lg %r4,0(%r4) # Save PSW
sturg %r4,%r3 # Use sturg, because of large pages
lghi %r1,1
- diag %r1,%r1,0x308
+ lghi %r0,0
+ diag %r0,%r1,0x308
.Lrestart_part2:
lhi %r0,0 # Load r0 with zero
lhi %r1,2 # Use mode 2 = ESAME (dump)
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index c0b03c28d157..632fa06ea162 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -5,37 +5,11 @@
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
-#include <linux/notifier.h>
#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/cacheinfo.h>
#include <asm/facility.h>
-struct cache {
- unsigned long size;
- unsigned int line_size;
- unsigned int associativity;
- unsigned int nr_sets;
- unsigned int level : 3;
- unsigned int type : 2;
- unsigned int private : 1;
- struct list_head list;
-};
-
-struct cache_dir {
- struct kobject *kobj;
- struct cache_index_dir *index;
-};
-
-struct cache_index_dir {
- struct kobject kobj;
- int cpu;
- struct cache *cache;
- struct cache_index_dir *next;
-};
-
enum {
CACHE_SCOPE_NOTEXISTS,
CACHE_SCOPE_PRIVATE,
@@ -44,10 +18,10 @@ enum {
};
enum {
- CACHE_TYPE_SEPARATE,
- CACHE_TYPE_DATA,
- CACHE_TYPE_INSTRUCTION,
- CACHE_TYPE_UNIFIED,
+ CTYPE_SEPARATE,
+ CTYPE_DATA,
+ CTYPE_INSTRUCTION,
+ CTYPE_UNIFIED,
};
enum {
@@ -70,37 +44,60 @@ struct cache_info {
};
#define CACHE_MAX_LEVEL 8
-
union cache_topology {
struct cache_info ci[CACHE_MAX_LEVEL];
unsigned long long raw;
};
static const char * const cache_type_string[] = {
- "Data",
+ "",
"Instruction",
+ "Data",
+ "",
"Unified",
};
-static struct cache_dir *cache_dir_cpu[NR_CPUS];
-static LIST_HEAD(cache_list);
+static const enum cache_type cache_type_map[] = {
+ [CTYPE_SEPARATE] = CACHE_TYPE_SEPARATE,
+ [CTYPE_DATA] = CACHE_TYPE_DATA,
+ [CTYPE_INSTRUCTION] = CACHE_TYPE_INST,
+ [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
+};
void show_cacheinfo(struct seq_file *m)
{
- struct cache *cache;
- int index = 0;
+ struct cpu_cacheinfo *this_cpu_ci;
+ struct cacheinfo *cache;
+ int idx;
- list_for_each_entry(cache, &cache_list, list) {
- seq_printf(m, "cache%-11d: ", index);
+ get_online_cpus();
+ this_cpu_ci = get_cpu_cacheinfo(cpumask_any(cpu_online_mask));
+ for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
+ cache = this_cpu_ci->info_list + idx;
+ seq_printf(m, "cache%-11d: ", idx);
seq_printf(m, "level=%d ", cache->level);
seq_printf(m, "type=%s ", cache_type_string[cache->type]);
- seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
- seq_printf(m, "size=%luK ", cache->size >> 10);
- seq_printf(m, "line_size=%u ", cache->line_size);
- seq_printf(m, "associativity=%d", cache->associativity);
+ seq_printf(m, "scope=%s ",
+ cache->disable_sysfs ? "Shared" : "Private");
+ seq_printf(m, "size=%dK ", cache->size >> 10);
+ seq_printf(m, "line_size=%u ", cache->coherency_line_size);
+ seq_printf(m, "associativity=%d", cache->ways_of_associativity);
seq_puts(m, "\n");
- index++;
}
+ put_online_cpus();
+}
+
+static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
+{
+ if (level >= CACHE_MAX_LEVEL)
+ return CACHE_TYPE_NOCACHE;
+
+ ci += level;
+
+ if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
+ return CACHE_TYPE_NOCACHE;
+
+ return cache_type_map[ci->type];
}
static inline unsigned long ecag(int ai, int li, int ti)
@@ -113,277 +110,79 @@ static inline unsigned long ecag(int ai, int li, int ti)
return val;
}
-static int __init cache_add(int level, int private, int type)
+static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
+ enum cache_type type, unsigned int level)
{
- struct cache *cache;
- int ti;
+ int ti, num_sets;
+ int cpu = smp_processor_id();
- cache = kzalloc(sizeof(*cache), GFP_KERNEL);
- if (!cache)
- return -ENOMEM;
- if (type == CACHE_TYPE_INSTRUCTION)
+ if (type == CACHE_TYPE_INST)
ti = CACHE_TI_INSTRUCTION;
else
ti = CACHE_TI_UNIFIED;
- cache->size = ecag(EXTRACT_SIZE, level, ti);
- cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
- cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
- cache->nr_sets = cache->size / cache->associativity;
- cache->nr_sets /= cache->line_size;
- cache->private = private;
- cache->level = level + 1;
- cache->type = type - 1;
- list_add_tail(&cache->list, &cache_list);
- return 0;
-}
-
-static void __init cache_build_info(void)
-{
- struct cache *cache, *next;
- union cache_topology ct;
- int level, private, rc;
-
- ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
- for (level = 0; level < CACHE_MAX_LEVEL; level++) {
- switch (ct.ci[level].scope) {
- case CACHE_SCOPE_SHARED:
- private = 0;
- break;
- case CACHE_SCOPE_PRIVATE:
- private = 1;
- break;
- default:
- return;
- }
- if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
- rc = cache_add(level, private, CACHE_TYPE_DATA);
- rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
- } else {
- rc = cache_add(level, private, ct.ci[level].type);
- }
- if (rc)
- goto error;
- }
- return;
-error:
- list_for_each_entry_safe(cache, next, &cache_list, list) {
- list_del(&cache->list);
- kfree(cache);
- }
-}
-
-static struct cache_dir *cache_create_cache_dir(int cpu)
-{
- struct cache_dir *cache_dir;
- struct kobject *kobj = NULL;
- struct device *dev;
-
- dev = get_cpu_device(cpu);
- if (!dev)
- goto out;
- kobj = kobject_create_and_add("cache", &dev->kobj);
- if (!kobj)
- goto out;
- cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
- if (!cache_dir)
- goto out;
- cache_dir->kobj = kobj;
- cache_dir_cpu[cpu] = cache_dir;
- return cache_dir;
-out:
- kobject_put(kobj);
- return NULL;
-}
-
-static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
-{
- return container_of(kobj, struct cache_index_dir, kobj);
-}
-
-static void cache_index_release(struct kobject *kobj)
-{
- struct cache_index_dir *index;
-
- index = kobj_to_cache_index_dir(kobj);
- kfree(index);
-}
-
-static ssize_t cache_index_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- struct kobj_attribute *kobj_attr;
-
- kobj_attr = container_of(attr, struct kobj_attribute, attr);
- return kobj_attr->show(kobj, kobj_attr, buf);
-}
-
-#define DEFINE_CACHE_ATTR(_name, _format, _value) \
-static ssize_t cache_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
- char *buf) \
-{ \
- struct cache_index_dir *index; \
- \
- index = kobj_to_cache_index_dir(kobj); \
- return sprintf(buf, _format, _value); \
-} \
-static struct kobj_attribute cache_##_name##_attr = \
- __ATTR(_name, 0444, cache_##_name##_show, NULL);
-DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
-DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
-DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
-DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
-DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
-DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
+ this_leaf->level = level + 1;
+ this_leaf->type = type;
+ this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
+ this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
+ level, ti);
+ this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
-static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
-{
- struct cache_index_dir *index;
- int len;
-
- index = kobj_to_cache_index_dir(kobj);
- len = type ?
- cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
- cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
- len += sprintf(&buf[len], "\n");
- return len;
-}
-
-static ssize_t shared_cpu_map_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return shared_cpu_map_func(kobj, 0, buf);
+ num_sets = this_leaf->size / this_leaf->coherency_line_size;
+ num_sets /= this_leaf->ways_of_associativity;
+ this_leaf->number_of_sets = num_sets;
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ if (!private)
+ this_leaf->disable_sysfs = true;
}
-static struct kobj_attribute cache_shared_cpu_map_attr =
- __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
-static ssize_t shared_cpu_list_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+int init_cache_level(unsigned int cpu)
{
- return shared_cpu_map_func(kobj, 1, buf);
-}
-static struct kobj_attribute cache_shared_cpu_list_attr =
- __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
-
-static struct attribute *cache_index_default_attrs[] = {
- &cache_type_attr.attr,
- &cache_size_attr.attr,
- &cache_number_of_sets_attr.attr,
- &cache_ways_of_associativity_attr.attr,
- &cache_level_attr.attr,
- &cache_coherency_line_size_attr.attr,
- &cache_shared_cpu_map_attr.attr,
- &cache_shared_cpu_list_attr.attr,
- NULL,
-};
-
-static const struct sysfs_ops cache_index_ops = {
- .show = cache_index_show,
-};
-
-static struct kobj_type cache_index_type = {
- .sysfs_ops = &cache_index_ops,
- .release = cache_index_release,
- .default_attrs = cache_index_default_attrs,
-};
-
-static int cache_create_index_dir(struct cache_dir *cache_dir,
- struct cache *cache, int index, int cpu)
-{
- struct cache_index_dir *index_dir;
- int rc;
-
- index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
- if (!index_dir)
- return -ENOMEM;
- index_dir->cache = cache;
- index_dir->cpu = cpu;
- rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
- cache_dir->kobj, "index%d", index);
- if (rc)
- goto out;
- index_dir->next = cache_dir->index;
- cache_dir->index = index_dir;
- return 0;
-out:
- kfree(index_dir);
- return rc;
-}
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ unsigned int level = 0, leaves = 0;
+ union cache_topology ct;
+ enum cache_type ctype;
-static int cache_add_cpu(int cpu)
-{
- struct cache_dir *cache_dir;
- struct cache *cache;
- int rc, index = 0;
+ if (!this_cpu_ci)
+ return -EINVAL;
- if (list_empty(&cache_list))
- return 0;
- cache_dir = cache_create_cache_dir(cpu);
- if (!cache_dir)
- return -ENOMEM;
- list_for_each_entry(cache, &cache_list, list) {
- if (!cache->private)
+ ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
+ do {
+ ctype = get_cache_type(&ct.ci[0], level);
+ if (ctype == CACHE_TYPE_NOCACHE)
break;
- rc = cache_create_index_dir(cache_dir, cache, index, cpu);
- if (rc)
- return rc;
- index++;
- }
- return 0;
-}
+ /* Separate instruction and data caches */
+ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
+ } while (++level < CACHE_MAX_LEVEL);
-static void cache_remove_cpu(int cpu)
-{
- struct cache_index_dir *index, *next;
- struct cache_dir *cache_dir;
+ this_cpu_ci->num_levels = level;
+ this_cpu_ci->num_leaves = leaves;
- cache_dir = cache_dir_cpu[cpu];
- if (!cache_dir)
- return;
- index = cache_dir->index;
- while (index) {
- next = index->next;
- kobject_put(&index->kobj);
- index = next;
- }
- kobject_put(cache_dir->kobj);
- kfree(cache_dir);
- cache_dir_cpu[cpu] = NULL;
+ return 0;
}
-static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+int populate_cache_leaves(unsigned int cpu)
{
- int cpu = (long)hcpu;
- int rc = 0;
+ unsigned int level, idx, pvt;
+ union cache_topology ct;
+ enum cache_type ctype;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
- rc = cache_add_cpu(cpu);
- if (rc)
- cache_remove_cpu(cpu);
- break;
- case CPU_DEAD:
- cache_remove_cpu(cpu);
- break;
+ ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
+ for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
+ idx < this_cpu_ci->num_leaves; idx++, level++) {
+ if (!this_leaf)
+ return -EINVAL;
+
+ pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
+ ctype = get_cache_type(&ct.ci[0], level);
+ if (ctype == CACHE_TYPE_SEPARATE) {
+ ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
+ ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
+ } else {
+ ci_leaf_init(this_leaf++, pvt, ctype, level);
+ }
}
- return rc ? NOTIFY_BAD : NOTIFY_OK;
-}
-
-static int __init cache_init(void)
-{
- int cpu;
-
- if (!test_facility(34))
- return 0;
- cache_build_info();
-
- cpu_notifier_register_begin();
- for_each_online_cpu(cpu)
- cache_add_cpu(cpu);
- __hotcpu_notifier(cache_hotplug, 0);
- cpu_notifier_register_done();
return 0;
}
-device_initcall(cache_init);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index f3762937dd82..533430307da8 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -137,7 +137,7 @@ enum {
INSTR_RSI_RRP,
INSTR_RSL_LRDFU, INSTR_RSL_R0RD,
INSTR_RSY_AARD, INSTR_RSY_CCRD, INSTR_RSY_RRRD, INSTR_RSY_RURD,
- INSTR_RSY_RDRM,
+ INSTR_RSY_RDRM, INSTR_RSY_RMRD,
INSTR_RS_AARD, INSTR_RS_CCRD, INSTR_RS_R0RD, INSTR_RS_RRRD,
INSTR_RS_RURD,
INSTR_RXE_FRRD, INSTR_RXE_RRRD, INSTR_RXE_RRRDM,
@@ -226,7 +226,6 @@ static const struct s390_operand operands[] =
[U16_32] = { 16, 32, 0 },
[J16_16] = { 16, 16, OPERAND_PCREL },
[J16_32] = { 16, 32, OPERAND_PCREL },
- [I16_32] = { 16, 32, OPERAND_SIGNED },
[I24_24] = { 24, 24, OPERAND_SIGNED },
[J32_16] = { 32, 16, OPERAND_PCREL },
[I32_16] = { 32, 16, OPERAND_SIGNED },
@@ -308,6 +307,7 @@ static const unsigned char formats[][7] = {
[INSTR_RSY_AARD] = { 0xff, A_8,A_12,D20_20,B_16,0,0 },
[INSTR_RSY_CCRD] = { 0xff, C_8,C_12,D20_20,B_16,0,0 },
[INSTR_RSY_RDRM] = { 0xff, R_8,D20_20,B_16,U4_12,0,0 },
+ [INSTR_RSY_RMRD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
[INSTR_RSY_RRRD] = { 0xff, R_8,R_12,D20_20,B_16,0,0 },
[INSTR_RSY_RURD] = { 0xff, R_8,U4_12,D20_20,B_16,0,0 },
[INSTR_RS_AARD] = { 0xff, A_8,A_12,D_20,B_16,0,0 },
@@ -451,7 +451,8 @@ enum {
LONG_INSN_VERLLV,
LONG_INSN_VESRAV,
LONG_INSN_VESRLV,
- LONG_INSN_VSBCBI
+ LONG_INSN_VSBCBI,
+ LONG_INSN_STCCTM
};
static char *long_insn_name[] = {
@@ -531,6 +532,7 @@ static char *long_insn_name[] = {
[LONG_INSN_VESRAV] = "vesrav",
[LONG_INSN_VESRLV] = "vesrlv",
[LONG_INSN_VSBCBI] = "vsbcbi",
+ [LONG_INSN_STCCTM] = "stcctm",
};
static struct s390_insn opcode[] = {
@@ -1656,6 +1658,7 @@ static struct s390_insn opcode_eb[] = {
{ "lric", 0x60, INSTR_RSY_RDRM },
{ "stric", 0x61, INSTR_RSY_RDRM },
{ "mric", 0x62, INSTR_RSY_RDRM },
+ { { 0, LONG_INSN_STCCTM }, 0x17, INSTR_RSY_RMRD },
#endif
{ "rll", 0x1d, INSTR_RSY_RRRD },
{ "mvclu", 0x8e, INSTR_RSY_RRRD },
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 302ac1f7f8e7..70a329450901 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -393,9 +393,27 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129))
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
+ if (test_facility(128))
+ S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
#endif
}
+static int __init nocad_setup(char *str)
+{
+ S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
+ return 0;
+}
+early_param("nocad", nocad_setup);
+
+static int __init cad_init(void)
+{
+ if (MACHINE_HAS_CAD)
+ /* Enable problem state CAD. */
+ __ctl_set_bit(2, 3);
+ return 0;
+}
+early_initcall(cad_init);
+
static __init void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 8e61393c8275..834df047d35f 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -71,9 +71,11 @@ struct s390_mmap_arg_struct;
struct fadvise64_64_args;
struct old_sigaction;
+long sys_rt_sigreturn(void);
+long sys_sigreturn(void);
+
long sys_s390_personality(unsigned int personality);
long sys_s390_runtime_instr(int command, int signum);
-
long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index b86bb8823f15..82c19899574f 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -46,6 +46,13 @@
* lg %r14,8(%r15) # offset 18
* The jg instruction branches to offset 24 to skip as many instructions
* as possible.
+ * In case we use gcc's hotpatch feature the original and also the disabled
+ * function prologue contains only a single six byte instruction and looks
+ * like this:
+ * > brcl 0,0 # offset 0
+ * To enable ftrace the code gets patched like above and afterwards looks
+ * like this:
+ * > brasl %r0,ftrace_caller # offset 0
*/
unsigned long ftrace_plt;
@@ -59,62 +66,71 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
- struct ftrace_insn insn;
- unsigned short op;
- void *from, *to;
- size_t size;
-
- ftrace_generate_nop_insn(&insn);
- size = sizeof(insn);
- from = &insn;
- to = (void *) rec->ip;
- if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
+ struct ftrace_insn orig, new, old;
+
+ if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- /*
- * If we find a breakpoint instruction, a kprobe has been placed
- * at the beginning of the function. We write the constant
- * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
- * instruction so that the kprobes handler can execute a nop, if it
- * reaches this breakpoint.
- */
- if (op == BREAKPOINT_INSTRUCTION) {
- size -= 2;
- from += 2;
- to += 2;
- insn.disp = KPROBE_ON_FTRACE_NOP;
+ if (addr == MCOUNT_ADDR) {
+ /* Initial code replacement */
+#ifdef CC_USING_HOTPATCH
+ /* We expect to see brcl 0,0 */
+ ftrace_generate_nop_insn(&orig);
+#else
+ /* We expect to see stg r14,8(r15) */
+ orig.opc = 0xe3e0;
+ orig.disp = 0xf0080024;
+#endif
+ ftrace_generate_nop_insn(&new);
+ } else if (old.opc == BREAKPOINT_INSTRUCTION) {
+ /*
+ * If we find a breakpoint instruction, a kprobe has been
+ * placed at the beginning of the function. We write the
+ * constant KPROBE_ON_FTRACE_NOP into the remaining four
+ * bytes of the original instruction so that the kprobes
+ * handler can execute a nop, if it reaches this breakpoint.
+ */
+ new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
+ orig.disp = KPROBE_ON_FTRACE_CALL;
+ new.disp = KPROBE_ON_FTRACE_NOP;
+ } else {
+ /* Replace ftrace call with a nop. */
+ ftrace_generate_call_insn(&orig, rec->ip);
+ ftrace_generate_nop_insn(&new);
}
- if (probe_kernel_write(to, from, size))
+ /* Verify that the to be replaced code matches what we expect. */
+ if (memcmp(&orig, &old, sizeof(old)))
+ return -EINVAL;
+ if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
return -EPERM;
return 0;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- struct ftrace_insn insn;
- unsigned short op;
- void *from, *to;
- size_t size;
-
- ftrace_generate_call_insn(&insn, rec->ip);
- size = sizeof(insn);
- from = &insn;
- to = (void *) rec->ip;
- if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
+ struct ftrace_insn orig, new, old;
+
+ if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- /*
- * If we find a breakpoint instruction, a kprobe has been placed
- * at the beginning of the function. We write the constant
- * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
- * instruction so that the kprobes handler can execute a brasl if it
- * reaches this breakpoint.
- */
- if (op == BREAKPOINT_INSTRUCTION) {
- size -= 2;
- from += 2;
- to += 2;
- insn.disp = KPROBE_ON_FTRACE_CALL;
+ if (old.opc == BREAKPOINT_INSTRUCTION) {
+ /*
+ * If we find a breakpoint instruction, a kprobe has been
+ * placed at the beginning of the function. We write the
+ * constant KPROBE_ON_FTRACE_CALL into the remaining four
+ * bytes of the original instruction so that the kprobes
+ * handler can execute a brasl if it reaches this breakpoint.
+ */
+ new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
+ orig.disp = KPROBE_ON_FTRACE_NOP;
+ new.disp = KPROBE_ON_FTRACE_CALL;
+ } else {
+ /* Replace nop with an ftrace call. */
+ ftrace_generate_nop_insn(&orig);
+ ftrace_generate_call_insn(&new, rec->ip);
}
- if (probe_kernel_write(to, from, size))
+ /* Verify that the to be replaced code matches what we expect. */
+ if (memcmp(&orig, &old, sizeof(old)))
+ return -EINVAL;
+ if (probe_kernel_write((void *) rec->ip, &new, sizeof(new)))
return -EPERM;
return 0;
}
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index d62eee11f0b5..132f4c9ade60 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -436,7 +436,9 @@ ENTRY(startup_kdump)
# followed by the facility words.
#if defined(CONFIG_64BIT)
-#if defined(CONFIG_MARCH_ZEC12)
+#if defined(CONFIG_MARCH_Z13)
+ .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
+#elif defined(CONFIG_MARCH_ZEC12)
.long 3, 0xc100eff2, 0xf46ce800, 0x00400000
#elif defined(CONFIG_MARCH_Z196)
.long 2, 0xc100eff2, 0xf46c0000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 39badb9ca0b3..5c8651f36509 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2074,7 +2074,8 @@ static void do_reset_calls(void)
u32 dump_prefix_page;
-void s390_reset_system(void (*func)(void *), void *data)
+void s390_reset_system(void (*fn_pre)(void),
+ void (*fn_post)(void *), void *data)
{
struct _lowcore *lc;
@@ -2112,7 +2113,11 @@ void s390_reset_system(void (*func)(void *), void *data)
/* Store status at absolute zero */
store_status();
+ /* Call function before reset */
+ if (fn_pre)
+ fn_pre();
do_reset_calls();
- if (func)
- func(data);
+ /* Call function after reset */
+ if (fn_post)
+ fn_post(data);
}
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index b987ab2c1541..cb2d51e779df 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -22,31 +22,66 @@ struct insn_args {
enum jump_label_type type;
};
+static void jump_label_make_nop(struct jump_entry *entry, struct insn *insn)
+{
+ /* brcl 0,0 */
+ insn->opcode = 0xc004;
+ insn->offset = 0;
+}
+
+static void jump_label_make_branch(struct jump_entry *entry, struct insn *insn)
+{
+ /* brcl 15,offset */
+ insn->opcode = 0xc0f4;
+ insn->offset = (entry->target - entry->code) >> 1;
+}
+
+static void jump_label_bug(struct jump_entry *entry, struct insn *insn)
+{
+ unsigned char *ipc = (unsigned char *)entry->code;
+ unsigned char *ipe = (unsigned char *)insn;
+
+ pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
+ pr_emerg("Found: %02x %02x %02x %02x %02x %02x\n",
+ ipc[0], ipc[1], ipc[2], ipc[3], ipc[4], ipc[5]);
+ pr_emerg("Expected: %02x %02x %02x %02x %02x %02x\n",
+ ipe[0], ipe[1], ipe[2], ipe[3], ipe[4], ipe[5]);
+ panic("Corrupted kernel text");
+}
+
+static struct insn orignop = {
+ .opcode = 0xc004,
+ .offset = JUMP_LABEL_NOP_OFFSET >> 1,
+};
+
static void __jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type)
+ enum jump_label_type type,
+ int init)
{
- struct insn insn;
- int rc;
+ struct insn old, new;
if (type == JUMP_LABEL_ENABLE) {
- /* brcl 15,offset */
- insn.opcode = 0xc0f4;
- insn.offset = (entry->target - entry->code) >> 1;
+ jump_label_make_nop(entry, &old);
+ jump_label_make_branch(entry, &new);
} else {
- /* brcl 0,0 */
- insn.opcode = 0xc004;
- insn.offset = 0;
+ jump_label_make_branch(entry, &old);
+ jump_label_make_nop(entry, &new);
}
-
- rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE);
- WARN_ON_ONCE(rc < 0);
+ if (init) {
+ if (memcmp((void *)entry->code, &orignop, sizeof(orignop)))
+ jump_label_bug(entry, &old);
+ } else {
+ if (memcmp((void *)entry->code, &old, sizeof(old)))
+ jump_label_bug(entry, &old);
+ }
+ probe_kernel_write((void *)entry->code, &new, sizeof(new));
}
static int __sm_arch_jump_label_transform(void *data)
{
struct insn_args *args = data;
- __jump_label_transform(args->entry, args->type);
+ __jump_label_transform(args->entry, args->type, 0);
return 0;
}
@@ -64,7 +99,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
- __jump_label_transform(entry, type);
+ __jump_label_transform(entry, type, 1);
}
#endif
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 1e4c710dfb92..f516edc1fbe3 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -69,7 +69,8 @@ static void copy_instruction(struct kprobe *p)
/*
* If kprobes patches the instruction that is morphed by
* ftrace make sure that kprobes always sees the branch
- * "jg .+24" that skips the mcount block
+ * "jg .+24" that skips the mcount block or the "brcl 0,0"
+ * in case of hotpatch.
*/
ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
p->ainsn.is_ftrace_insn = 1;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 4685337fa7c6..fb0901ec4306 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -103,21 +103,18 @@ static int __init machine_kdump_pm_init(void)
return 0;
}
arch_initcall(machine_kdump_pm_init);
-#endif
/*
* Start kdump: We expect here that a store status has been done on our CPU
*/
static void __do_machine_kdump(void *image)
{
-#ifdef CONFIG_CRASH_DUMP
int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
- setup_regs();
__load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
start_kdump(1);
-#endif
}
+#endif
/*
* Check if kdump checksums are valid: We call purgatory with parameter "0"
@@ -249,18 +246,18 @@ static void __do_machine_kexec(void *data)
*/
static void __machine_kexec(void *data)
{
- struct kimage *image = data;
-
__arch_local_irq_stosm(0x04); /* enable DAT */
pfault_fini();
tracing_off();
debug_locks_off();
- if (image->type == KEXEC_TYPE_CRASH) {
+#ifdef CONFIG_CRASH_DUMP
+ if (((struct kimage *) data)->type == KEXEC_TYPE_CRASH) {
+
lgr_info_log();
- s390_reset_system(__do_machine_kdump, data);
- } else {
- s390_reset_system(__do_machine_kexec, data);
- }
+ s390_reset_system(setup_regs, __do_machine_kdump, data);
+ } else
+#endif
+ s390_reset_system(NULL, __do_machine_kexec, data);
disabled_wait((unsigned long) __builtin_return_address(0));
}
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index b6dfc5bfcb89..e499370fbccb 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,7 +27,9 @@ ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
lgr %r1,%r15
+#ifndef CC_USING_HOTPATCH
aghi %r0,MCOUNT_RETURN_FIXUP
+#endif
aghi %r15,-STACK_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index aa7a83948c7b..13fc0978ca7e 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -79,6 +79,14 @@ void release_thread(struct task_struct *dead_task)
{
}
+#ifdef CONFIG_64BIT
+void arch_release_task_struct(struct task_struct *tsk)
+{
+ if (tsk->thread.vxrs)
+ kfree(tsk->thread.vxrs);
+}
+#endif
+
int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
unsigned long arg, struct task_struct *p)
{
@@ -243,13 +251,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
ret = PAGE_ALIGN(mm->brk + brk_rnd());
return (ret > mm->brk) ? ret : mm->brk;
}
-
-unsigned long randomize_et_dyn(unsigned long base)
-{
- unsigned long ret;
-
- if (!(current->flags & PF_RANDOMIZE))
- return base;
- ret = PAGE_ALIGN(base + brk_rnd());
- return (ret > base) ? ret : base;
-}
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index dbdd33ee0102..26108232fcaa 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -8,16 +8,24 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/smp.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/elf.h>
#include <asm/lowcore.h>
#include <asm/param.h>
+#include <asm/smp.h>
static DEFINE_PER_CPU(struct cpuid, cpu_id);
+void cpu_relax(void)
+{
+ if (!smp_cpu_mtid && MACHINE_HAS_DIAG44)
+ asm volatile("diag 0,0,0x44");
+ barrier();
+}
+EXPORT_SYMBOL(cpu_relax);
+
/*
* cpu_init - initializes state that is per-CPU.
*/
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index a41f2c99dcc8..7e77e03378f3 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -294,7 +294,8 @@ ENTRY(_sclp_print_early)
#ifdef CONFIG_64BIT
tm LC_AR_MODE_ID,1
jno .Lesa3
- lmh %r6,%r15,96(%r15) # store upper register halves
+ lgfr %r2,%r2 # sign extend return value
+ lmh %r6,%r15,96(%r15) # restore upper register halves
ahi %r15,80
.Lesa3:
#endif
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4e532c67832f..bfac77ada4f2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -810,6 +810,9 @@ static void __init setup_hwcaps(void)
case 0x2828:
strcpy(elf_platform, "zEC12");
break;
+ case 0x2964:
+ strcpy(elf_platform, "z13");
+ break;
}
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 0b499f5cbe19..a668993ff577 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -71,9 +71,30 @@ struct pcpu {
};
static u8 boot_cpu_type;
-static u16 boot_cpu_address;
static struct pcpu pcpu_devices[NR_CPUS];
+unsigned int smp_cpu_mt_shift;
+EXPORT_SYMBOL(smp_cpu_mt_shift);
+
+unsigned int smp_cpu_mtid;
+EXPORT_SYMBOL(smp_cpu_mtid);
+
+static unsigned int smp_max_threads __initdata = -1U;
+
+static int __init early_nosmt(char *s)
+{
+ smp_max_threads = 1;
+ return 0;
+}
+early_param("nosmt", early_nosmt);
+
+static int __init early_smt(char *s)
+{
+ get_option(&s, &smp_max_threads);
+ return 0;
+}
+early_param("smt", early_smt);
+
/*
* The smp_cpu_state_mutex must be held when changing the state or polarization
* member of a pcpu data structure within the pcpu_devices arreay.
@@ -132,7 +153,7 @@ static inline int pcpu_running(struct pcpu *pcpu)
/*
* Find struct pcpu by cpu address.
*/
-static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
+static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
{
int cpu;
@@ -299,6 +320,32 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
}
/*
+ * Enable additional logical cpus for multi-threading.
+ */
+static int pcpu_set_smt(unsigned int mtid)
+{
+ register unsigned long reg1 asm ("1") = (unsigned long) mtid;
+ int cc;
+
+ if (smp_cpu_mtid == mtid)
+ return 0;
+ asm volatile(
+ " sigp %1,0,%2 # sigp set multi-threading\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
+ : "cc");
+ if (cc == 0) {
+ smp_cpu_mtid = mtid;
+ smp_cpu_mt_shift = 0;
+ while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
+ smp_cpu_mt_shift++;
+ pcpu_devices[0].address = stap();
+ }
+ return cc;
+}
+
+/*
* Call function on an online CPU.
*/
void smp_call_online_cpu(void (*func)(void *), void *data)
@@ -512,22 +559,17 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
#ifdef CONFIG_CRASH_DUMP
-static void __init smp_get_save_area(int cpu, u16 address)
+static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
{
void *lc = pcpu_devices[0].lowcore;
struct save_area_ext *sa_ext;
unsigned long vx_sa;
- if (is_kdump_kernel())
- return;
- if (!OLDMEM_BASE && (address == boot_cpu_address ||
- ipl_info.type != IPL_TYPE_FCP_DUMP))
- return;
sa_ext = dump_save_area_create(cpu);
if (!sa_ext)
panic("could not allocate memory for save area\n");
- if (address == boot_cpu_address) {
- /* Copy the registers of the boot cpu. */
+ if (is_boot_cpu) {
+ /* Copy the registers of the boot CPU. */
copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
SAVE_AREA_BASE - PAGE_SIZE, 0);
if (MACHINE_HAS_VX)
@@ -548,6 +590,64 @@ static void __init smp_get_save_area(int cpu, u16 address)
free_page(vx_sa);
}
+/*
+ * Collect CPU state of the previous, crashed system.
+ * There are four cases:
+ * 1) standard zfcp dump
+ * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
+ * The state for all CPUs except the boot CPU needs to be collected
+ * with sigp stop-and-store-status. The boot CPU state is located in
+ * the absolute lowcore of the memory stored in the HSA. The zcore code
+ * will allocate the save area and copy the boot CPU state from the HSA.
+ * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
+ * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
+ * The state for all CPUs except the boot CPU needs to be collected
+ * with sigp stop-and-store-status. The firmware or the boot-loader
+ * stored the registers of the boot CPU in the absolute lowcore in the
+ * memory of the old system.
+ * 3) kdump and the old kernel did not store the CPU state,
+ * or stand-alone kdump for DASD
+ * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
+ * The state for all CPUs except the boot CPU needs to be collected
+ * with sigp stop-and-store-status. The kexec code or the boot-loader
+ * stored the registers of the boot CPU in the memory of the old system.
+ * 4) kdump and the old kernel stored the CPU state
+ * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
+ * The state of all CPUs is stored in ELF sections in the memory of the
+ * old system. The ELF sections are picked up by the crash_dump code
+ * via elfcorehdr_addr.
+ */
+static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
+{
+ unsigned int cpu, address, i, j;
+ int is_boot_cpu;
+
+ if (is_kdump_kernel())
+ /* Previous system stored the CPU states. Nothing to do. */
+ return;
+ if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
+ /* No previous system present, normal boot. */
+ return;
+ /* Set multi-threading state to the previous system. */
+ pcpu_set_smt(sclp_get_mtid_prev());
+ /* Collect CPU states. */
+ cpu = 0;
+ for (i = 0; i < info->configured; i++) {
+ /* Skip CPUs with different CPU type. */
+ if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
+ continue;
+ for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
+ address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
+ is_boot_cpu = (address == pcpu_devices[0].address);
+ if (is_boot_cpu && !OLDMEM_BASE)
+ /* Skip boot CPU for standard zfcp dump. */
+ continue;
+ /* Get state for this CPu. */
+ __smp_store_cpu_state(cpu, address, is_boot_cpu);
+ }
+ }
+}
+
int smp_store_status(int cpu)
{
unsigned long vx_sa;
@@ -565,10 +665,6 @@ int smp_store_status(int cpu)
return 0;
}
-#else /* CONFIG_CRASH_DUMP */
-
-static inline void smp_get_save_area(int cpu, u16 address) { }
-
#endif /* CONFIG_CRASH_DUMP */
void smp_cpu_set_polarization(int cpu, int val)
@@ -590,11 +686,13 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
use_sigp_detection = 1;
- for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
+ for (address = 0; address <= MAX_CPU_ADDRESS;
+ address += (1U << smp_cpu_mt_shift)) {
if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
SIGP_CC_NOT_OPERATIONAL)
continue;
- info->cpu[info->configured].address = address;
+ info->cpu[info->configured].core_id =
+ address >> smp_cpu_mt_shift;
info->configured++;
}
info->combined = info->configured;
@@ -608,7 +706,8 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
{
struct pcpu *pcpu;
cpumask_t avail;
- int cpu, nr, i;
+ int cpu, nr, i, j;
+ u16 address;
nr = 0;
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
@@ -616,51 +715,76 @@ static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
continue;
- if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
- continue;
- pcpu = pcpu_devices + cpu;
- pcpu->address = info->cpu[i].address;
- pcpu->state = (i >= info->configured) ?
- CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
- set_cpu_present(cpu, true);
- if (sysfs_add && smp_add_present_cpu(cpu) != 0)
- set_cpu_present(cpu, false);
- else
- nr++;
- cpu = cpumask_next(cpu, &avail);
+ address = info->cpu[i].core_id << smp_cpu_mt_shift;
+ for (j = 0; j <= smp_cpu_mtid; j++) {
+ if (pcpu_find_address(cpu_present_mask, address + j))
+ continue;
+ pcpu = pcpu_devices + cpu;
+ pcpu->address = address + j;
+ pcpu->state =
+ (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
+ CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
+ smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ set_cpu_present(cpu, true);
+ if (sysfs_add && smp_add_present_cpu(cpu) != 0)
+ set_cpu_present(cpu, false);
+ else
+ nr++;
+ cpu = cpumask_next(cpu, &avail);
+ if (cpu >= nr_cpu_ids)
+ break;
+ }
}
return nr;
}
static void __init smp_detect_cpus(void)
{
- unsigned int cpu, c_cpus, s_cpus;
+ unsigned int cpu, mtid, c_cpus, s_cpus;
struct sclp_cpu_info *info;
+ u16 address;
+ /* Get CPU information */
info = smp_get_cpu_info();
if (!info)
panic("smp_detect_cpus failed to allocate memory\n");
+
+ /* Find boot CPU type */
if (info->has_cpu_type) {
- for (cpu = 0; cpu < info->combined; cpu++) {
- if (info->cpu[cpu].address != boot_cpu_address)
- continue;
- /* The boot cpu dictates the cpu type. */
- boot_cpu_type = info->cpu[cpu].type;
- break;
- }
+ address = stap();
+ for (cpu = 0; cpu < info->combined; cpu++)
+ if (info->cpu[cpu].core_id == address) {
+ /* The boot cpu dictates the cpu type. */
+ boot_cpu_type = info->cpu[cpu].type;
+ break;
+ }
+ if (cpu >= info->combined)
+ panic("Could not find boot CPU type");
}
+
+#ifdef CONFIG_CRASH_DUMP
+ /* Collect CPU state of previous system */
+ smp_store_cpu_states(info);
+#endif
+
+ /* Set multi-threading state for the current system */
+ mtid = sclp_get_mtid(boot_cpu_type);
+ mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
+ pcpu_set_smt(mtid);
+
+ /* Print number of CPUs */
c_cpus = s_cpus = 0;
for (cpu = 0; cpu < info->combined; cpu++) {
if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
continue;
- if (cpu < info->configured) {
- smp_get_save_area(c_cpus, info->cpu[cpu].address);
- c_cpus++;
- } else
- s_cpus++;
+ if (cpu < info->configured)
+ c_cpus += smp_cpu_mtid + 1;
+ else
+ s_cpus += smp_cpu_mtid + 1;
}
pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
+
+ /* Add CPUs present at boot */
get_online_cpus();
__smp_rescan_cpus(info, 0);
put_online_cpus();
@@ -696,12 +820,23 @@ static void smp_start_secondary(void *cpuvoid)
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
struct pcpu *pcpu;
- int rc;
+ int base, i, rc;
pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
- if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
+ base = cpu - (cpu % (smp_cpu_mtid + 1));
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (base + i < nr_cpu_ids)
+ if (cpu_online(base + i))
+ break;
+ }
+ /*
+ * If this is the first CPU of the core to get online
+ * do an initial CPU reset.
+ */
+ if (i > smp_cpu_mtid &&
+ pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
@@ -774,7 +909,8 @@ void __init smp_fill_possible_mask(void)
{
unsigned int possible, sclp, cpu;
- sclp = sclp_get_max_cpu() ?: nr_cpu_ids;
+ sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
+ sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
possible = setup_possible_cpus ?: nr_cpu_ids;
possible = min(possible, sclp);
for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
@@ -796,9 +932,8 @@ void __init smp_prepare_boot_cpu(void)
{
struct pcpu *pcpu = pcpu_devices;
- boot_cpu_address = stap();
pcpu->state = CPU_STATE_CONFIGURED;
- pcpu->address = boot_cpu_address;
+ pcpu->address = stap();
pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
+ STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
@@ -848,7 +983,7 @@ static ssize_t cpu_configure_store(struct device *dev,
const char *buf, size_t count)
{
struct pcpu *pcpu;
- int cpu, val, rc;
+ int cpu, val, rc, i;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
@@ -860,29 +995,43 @@ static ssize_t cpu_configure_store(struct device *dev,
rc = -EBUSY;
/* disallow configuration changes of online cpus and cpu 0 */
cpu = dev->id;
- if (cpu_online(cpu) || cpu == 0)
+ cpu -= cpu % (smp_cpu_mtid + 1);
+ if (cpu == 0)
goto out;
+ for (i = 0; i <= smp_cpu_mtid; i++)
+ if (cpu_online(cpu + i))
+ goto out;
pcpu = pcpu_devices + cpu;
rc = 0;
switch (val) {
case 0:
if (pcpu->state != CPU_STATE_CONFIGURED)
break;
- rc = sclp_cpu_deconfigure(pcpu->address);
+ rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
- pcpu->state = CPU_STATE_STANDBY;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
+ continue;
+ pcpu[i].state = CPU_STATE_STANDBY;
+ smp_cpu_set_polarization(cpu + i,
+ POLARIZATION_UNKNOWN);
+ }
topology_expect_change();
break;
case 1:
if (pcpu->state != CPU_STATE_STANDBY)
break;
- rc = sclp_cpu_configure(pcpu->address);
+ rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
if (rc)
break;
- pcpu->state = CPU_STATE_CONFIGURED;
- smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
+ continue;
+ pcpu[i].state = CPU_STATE_CONFIGURED;
+ smp_cpu_set_polarization(cpu + i,
+ POLARIZATION_UNKNOWN);
+ }
topology_expect_change();
break;
default:
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 811f542b8ed4..85565f1ff474 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -194,6 +194,14 @@ static void stsi_2_2_2(struct seq_file *m, struct sysinfo_2_2_2 *info)
seq_printf(m, "LPAR CPUs Reserved: %d\n", info->cpus_reserved);
seq_printf(m, "LPAR CPUs Dedicated: %d\n", info->cpus_dedicated);
seq_printf(m, "LPAR CPUs Shared: %d\n", info->cpus_shared);
+ if (info->mt_installed & 0x80) {
+ seq_printf(m, "LPAR CPUs G-MTID: %d\n",
+ info->mt_general & 0x1f);
+ seq_printf(m, "LPAR CPUs S-MTID: %d\n",
+ info->mt_installed & 0x1f);
+ seq_printf(m, "LPAR CPUs PS-MTID: %d\n",
+ info->mt_psmtid & 0x1f);
+ }
}
static void stsi_3_2_2(struct seq_file *m, struct sysinfo_3_2_2 *info)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index b93bed76ea94..24ee33f1af24 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -59,32 +59,50 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
return mask;
}
-static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
+static cpumask_t cpu_thread_map(unsigned int cpu)
+{
+ cpumask_t mask;
+ int i;
+
+ cpumask_copy(&mask, cpumask_of(cpu));
+ if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
+ return mask;
+ cpu -= cpu % (smp_cpu_mtid + 1);
+ for (i = 0; i <= smp_cpu_mtid; i++)
+ if (cpu_present(cpu + i))
+ cpumask_set_cpu(cpu + i, &mask);
+ return mask;
+}
+
+static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
struct mask_info *book,
struct mask_info *socket,
int one_socket_per_cpu)
{
- unsigned int cpu;
+ unsigned int core;
- for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
- unsigned int rcpu;
- int lcpu;
+ for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
+ unsigned int rcore;
+ int lcpu, i;
- rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
- lcpu = smp_find_processor_id(rcpu);
+ rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
+ lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
if (lcpu < 0)
continue;
- cpumask_set_cpu(lcpu, &book->mask);
- cpu_topology[lcpu].book_id = book->id;
- cpumask_set_cpu(lcpu, &socket->mask);
- cpu_topology[lcpu].core_id = rcpu;
- if (one_socket_per_cpu) {
- cpu_topology[lcpu].socket_id = rcpu;
- socket = socket->next;
- } else {
- cpu_topology[lcpu].socket_id = socket->id;
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ cpu_topology[lcpu + i].book_id = book->id;
+ cpu_topology[lcpu + i].core_id = rcore;
+ cpu_topology[lcpu + i].thread_id = lcpu + i;
+ cpumask_set_cpu(lcpu + i, &book->mask);
+ cpumask_set_cpu(lcpu + i, &socket->mask);
+ if (one_socket_per_cpu)
+ cpu_topology[lcpu + i].socket_id = rcore;
+ else
+ cpu_topology[lcpu + i].socket_id = socket->id;
+ smp_cpu_set_polarization(lcpu + i, tl_core->pp);
}
- smp_cpu_set_polarization(lcpu, tl_cpu->pp);
+ if (one_socket_per_cpu)
+ socket = socket->next;
}
return socket;
}
@@ -108,7 +126,7 @@ static void clear_masks(void)
static union topology_entry *next_tle(union topology_entry *tle)
{
if (!tle->nl)
- return (union topology_entry *)((struct topology_cpu *)tle + 1);
+ return (union topology_entry *)((struct topology_core *)tle + 1);
return (union topology_entry *)((struct topology_container *)tle + 1);
}
@@ -231,9 +249,11 @@ static void update_cpu_masks(void)
spin_lock_irqsave(&topology_lock, flags);
for_each_possible_cpu(cpu) {
+ cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
if (!MACHINE_HAS_TOPOLOGY) {
+ cpu_topology[cpu].thread_id = cpu;
cpu_topology[cpu].core_id = cpu;
cpu_topology[cpu].socket_id = cpu;
cpu_topology[cpu].book_id = cpu;
@@ -445,6 +465,12 @@ int topology_cpu_init(struct cpu *cpu)
return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
}
+const struct cpumask *cpu_thread_mask(int cpu)
+{
+ return &cpu_topology[cpu].thread_mask;
+}
+
+
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_mask;
@@ -456,6 +482,7 @@ static const struct cpumask *cpu_book_mask(int cpu)
}
static struct sched_domain_topology_level s390_topology[] = {
+ { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
{ cpu_book_mask, SD_INIT_NAME(BOOK) },
{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e34122e539a1..e53d3595a7c8 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -15,6 +15,8 @@
#include <asm/cputime.h>
#include <asm/vtimer.h>
#include <asm/vtime.h>
+#include <asm/cpu_mf.h>
+#include <asm/smp.h>
static void virt_timer_expire(void);
@@ -23,6 +25,10 @@ static DEFINE_SPINLOCK(virt_timer_lock);
static atomic64_t virt_timer_current;
static atomic64_t virt_timer_elapsed;
+static DEFINE_PER_CPU(u64, mt_cycles[32]);
+static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
+static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
+
static inline u64 get_vtimer(void)
{
u64 timer;
@@ -61,6 +67,8 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
struct thread_info *ti = task_thread_info(tsk);
u64 timer, clock, user, system, steal;
+ u64 user_scaled, system_scaled;
+ int i;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -76,15 +84,49 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
+ /* Do MT utilization calculation */
+ if (smp_cpu_mtid) {
+ u64 cycles_new[32], *cycles_old;
+ u64 delta, mult, div;
+
+ cycles_old = this_cpu_ptr(mt_cycles);
+ if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
+ mult = div = 0;
+ for (i = 0; i <= smp_cpu_mtid; i++) {
+ delta = cycles_new[i] - cycles_old[i];
+ mult += delta;
+ div += (i + 1) * delta;
+ }
+ if (mult > 0) {
+ /* Update scaling factor */
+ __this_cpu_write(mt_scaling_mult, mult);
+ __this_cpu_write(mt_scaling_div, div);
+ memcpy(cycles_old, cycles_new,
+ sizeof(u64) * (smp_cpu_mtid + 1));
+ }
+ }
+ }
+
user = S390_lowcore.user_timer - ti->user_timer;
S390_lowcore.steal_timer -= user;
ti->user_timer = S390_lowcore.user_timer;
- account_user_time(tsk, user, user);
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
- account_system_time(tsk, hardirq_offset, system, system);
+
+ user_scaled = user;
+ system_scaled = system;
+ /* Do MT utilization scaling */
+ if (smp_cpu_mtid) {
+ u64 mult = __this_cpu_read(mt_scaling_mult);
+ u64 div = __this_cpu_read(mt_scaling_div);
+
+ user_scaled = (user_scaled * mult) / div;
+ system_scaled = (system_scaled * mult) / div;
+ }
+ account_user_time(tsk, user, user_scaled);
+ account_system_time(tsk, hardirq_offset, system, system_scaled);
steal = S390_lowcore.steal_timer;
if ((s64) steal > 0) {
@@ -126,7 +168,7 @@ void vtime_account_user(struct task_struct *tsk)
void vtime_account_irq_enter(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
- u64 timer, system;
+ u64 timer, system, system_scaled;
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
@@ -135,7 +177,15 @@ void vtime_account_irq_enter(struct task_struct *tsk)
system = S390_lowcore.system_timer - ti->system_timer;
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
- account_system_time(tsk, 0, system, system);
+ system_scaled = system;
+ /* Do MT utilization scaling */
+ if (smp_cpu_mtid) {
+ u64 mult = __this_cpu_read(mt_scaling_mult);
+ u64 div = __this_cpu_read(mt_scaling_div);
+
+ system_scaled = (system_scaled * mult) / div;
+ }
+ account_system_time(tsk, 0, system, system_scaled);
virt_timer_forward(system);
}
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 034a35a3e9c1..d6c9991f7797 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -12,7 +12,15 @@
#include <linux/smp.h>
#include <asm/io.h>
-int spin_retry = 1000;
+int spin_retry = -1;
+
+static int __init spin_retry_init(void)
+{
+ if (spin_retry < 0)
+ spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
+ return 0;
+}
+early_initcall(spin_retry_init);
/**
* spin_retry= parameter
@@ -24,6 +32,11 @@ static int __init spin_retry_setup(char *str)
}
__setup("spin_retry=", spin_retry_setup);
+static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
+{
+ asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
+}
+
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
unsigned int cpu = SPINLOCK_LOCKVAL;
@@ -46,6 +59,8 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
/* Loop for a while on the lock value. */
count = spin_retry;
do {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
@@ -84,6 +99,8 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
/* Loop for a while on the lock value. */
count = spin_retry;
do {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
@@ -100,11 +117,19 @@ EXPORT_SYMBOL(arch_spin_lock_wait_flags);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
+ unsigned int cpu = SPINLOCK_LOCKVAL;
+ unsigned int owner;
int count;
- for (count = spin_retry; count > 0; count--)
- if (arch_spin_trylock_once(lp))
- return 1;
+ for (count = spin_retry; count > 0; count--) {
+ owner = ACCESS_ONCE(lp->lock);
+ /* Try to get the lock if it is free. */
+ if (!owner) {
+ if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ return 1;
+ } else if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&lp->lock, owner);
+ }
return 0;
}
EXPORT_SYMBOL(arch_spin_trylock_retry);
@@ -126,8 +151,11 @@ void _raw_read_lock_wait(arch_rwlock_t *rw)
}
old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner);
- if ((int) old < 0)
+ if ((int) old < 0) {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
continue;
+ }
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return;
}
@@ -141,8 +169,11 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw)
while (count-- > 0) {
old = ACCESS_ONCE(rw->lock);
- if ((int) old < 0)
+ if ((int) old < 0) {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
continue;
+ }
if (_raw_compare_and_swap(&rw->lock, old, old + 1))
return 1;
}
@@ -173,6 +204,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
}
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
@@ -201,6 +234,8 @@ void _raw_write_lock_wait(arch_rwlock_t *rw)
smp_rmb();
if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
break;
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
@@ -214,8 +249,11 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw)
while (count-- > 0) {
old = ACCESS_ONCE(rw->lock);
- if (old)
+ if (old) {
+ if (MACHINE_HAS_CAD)
+ _raw_compare_and_delay(&rw->lock, old);
continue;
+ }
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
return 1;
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 9065d5aa3932..3ff86533f7db 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -171,7 +171,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
table = table + ((address >> 20) & 0x7ff);
if (bad_address(table))
goto bad;
- pr_cont(KERN_CONT "S:%016lx ", *table);
+ pr_cont("S:%016lx ", *table);
if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
goto out;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
@@ -261,7 +261,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
return;
if (!printk_ratelimit())
return;
- printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d",
+ printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
regs->int_code & 0xffff, regs->int_code >> 17);
print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
printk(KERN_CONT "\n");
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7235e01fd67..d35b15113b17 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -71,13 +71,16 @@ static void __init setup_zero_pages(void)
break;
case 0x2827: /* zEC12 */
case 0x2828: /* zEC12 */
- default:
order = 5;
break;
+ case 0x2964: /* z13 */
+ default:
+ order = 7;
+ break;
}
/* Limit number of empty zero pages for small memory sizes */
- if (order > 2 && totalram_pages <= 16384)
- order = 2;
+ while (order > 2 && (totalram_pages >> 10) < (1UL << order))
+ order--;
empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!empty_zero_page)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 9b436c21195e..d008f638b2cd 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -28,8 +28,12 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/compat.h>
+#include <linux/security.h>
#include <asm/pgalloc.h>
+unsigned long mmap_rnd_mask;
+unsigned long mmap_align_mask;
+
static unsigned long stack_maxrandom_size(void)
{
if (!(current->flags & PF_RANDOMIZE))
@@ -60,8 +64,10 @@ static unsigned long mmap_rnd(void)
{
if (!(current->flags & PF_RANDOMIZE))
return 0;
- /* 8MB randomization for mmap_base */
- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
+ if (is_32bit_task())
+ return (get_random_int() & 0x7ff) << PAGE_SHIFT;
+ else
+ return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
}
static unsigned long mmap_base_legacy(void)
@@ -81,6 +87,106 @@ static inline unsigned long mmap_base(void)
return STACK_TOP - stack_maxrandom_size() - mmap_rnd() - gap;
}
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
+ int do_color_align;
+
+ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+ return addr;
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = !is_32bit_task();
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ return vm_unmapped_area(&info);
+}
+
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr = addr0;
+ struct vm_unmapped_area_info info;
+ int do_color_align;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+ return addr;
+
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = !is_32bit_task();
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.high_limit = mm->mmap_base;
+ info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+ addr = vm_unmapped_area(&info);
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ if (addr & ~PAGE_MASK) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ }
+
+ return addr;
+}
+
+unsigned long randomize_et_dyn(void)
+{
+ unsigned long base;
+
+ base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
+ return base + mmap_rnd();
+}
+
#ifndef CONFIG_64BIT
/*
@@ -177,4 +283,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
}
}
+static int __init setup_mmap_rnd(void)
+{
+ struct cpuid cpu_id;
+
+ get_cpu_id(&cpu_id);
+ switch (cpu_id.machine) {
+ case 0x9672:
+ case 0x2064:
+ case 0x2066:
+ case 0x2084:
+ case 0x2086:
+ case 0x2094:
+ case 0x2096:
+ case 0x2097:
+ case 0x2098:
+ case 0x2817:
+ case 0x2818:
+ case 0x2827:
+ case 0x2828:
+ mmap_rnd_mask = 0x7ffUL;
+ mmap_align_mask = 0UL;
+ break;
+ case 0x2964: /* z13 */
+ default:
+ mmap_rnd_mask = 0x3ff80UL;
+ mmap_align_mask = 0x7fUL;
+ break;
+ }
+ return 0;
+}
+early_initcall(setup_mmap_rnd);
+
#endif
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 3cf8cc03fff6..b2c1542f2ba2 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -527,7 +527,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
table += (gaddr >> 53) & 0x7ff;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
- gaddr & 0xffe0000000000000))
+ gaddr & 0xffe0000000000000UL))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
@@ -535,7 +535,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
table += (gaddr >> 42) & 0x7ff;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
- gaddr & 0xfffffc0000000000))
+ gaddr & 0xfffffc0000000000UL))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
@@ -543,7 +543,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
table += (gaddr >> 31) & 0x7ff;
if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
- gaddr & 0xffffffff80000000))
+ gaddr & 0xffffffff80000000UL))
return -ENOMEM;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
index 62c5ea6d8682..8aa271b3d1ad 100644
--- a/arch/s390/pci/pci_mmio.c
+++ b/arch/s390/pci/pci_mmio.c
@@ -55,7 +55,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
if (ret)
goto out;
- io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+ io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
ret = -EFAULT;
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
@@ -96,7 +96,7 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
ret = get_pfn(mmio_addr, VM_READ, &pfn);
if (ret)
goto out;
- io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+ io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
ret = -EFAULT;
if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)