From 62733e5a5a480a893e56fa6133ae90904d857bc4 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:28:15 +0200 Subject: [S390] cio: move scsw helper functions to header file All scsw helper functions are very short and usage of them shouldn't result in function calls. Therefore we move them to a separate header file. Also saves a lot of EXPORT_SYMBOLs. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/chsc.h | 28 ++ arch/s390/include/asm/cio.h | 223 +--------- arch/s390/include/asm/scsw.h | 956 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 985 insertions(+), 222 deletions(-) create mode 100644 arch/s390/include/asm/scsw.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h index 807997f7414b..4943654ed7fd 100644 --- a/arch/s390/include/asm/chsc.h +++ b/arch/s390/include/asm/chsc.h @@ -125,4 +125,32 @@ struct chsc_cpd_info { #define CHSC_INFO_CPD _IOWR(CHSC_IOCTL_MAGIC, 0x87, struct chsc_cpd_info) #define CHSC_INFO_DCAL _IOWR(CHSC_IOCTL_MAGIC, 0x88, struct chsc_dcal) +#ifdef __KERNEL__ + +struct css_general_char { + u64 : 12; + u32 dynio : 1; /* bit 12 */ + u32 : 28; + u32 aif : 1; /* bit 41 */ + u32 : 3; + u32 mcss : 1; /* bit 45 */ + u32 fcs : 1; /* bit 46 */ + u32 : 1; + u32 ext_mb : 1; /* bit 48 */ + u32 : 7; + u32 aif_tdd : 1; /* bit 56 */ + u32 : 1; + u32 qebsm : 1; /* bit 58 */ + u32 : 8; + u32 aif_osa : 1; /* bit 67 */ + u32 : 14; + u32 cib : 1; /* bit 82 */ + u32 : 5; + u32 fcx : 1; /* bit 88 */ + u32 : 7; +}__attribute__((packed)); + +extern struct css_general_char css_general_characteristics; + +#endif /* __KERNEL__ */ #endif diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index 619bf94b11f1..e85679af54dd 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h @@ -15,228 +15,7 @@ #define LPM_ANYPATH 0xff #define __MAX_CSSID 0 -/** - * struct cmd_scsw - command-mode subchannel status word - * @key: subchannel key - * @sctl: suspend control - * @eswf: esw format - * @cc: deferred condition code - * @fmt: format - * @pfch: prefetch - * @isic: initial-status interruption control - * @alcc: address-limit checking control - * @ssi: suppress-suspended interruption - * @zcc: zero condition code - * @ectl: extended control - * @pno: path not operational - * @res: reserved - * @fctl: function control - * @actl: activity control - * @stctl: status control - * @cpa: channel program address - * @dstat: device status - * @cstat: subchannel status - * @count: residual count - */ -struct cmd_scsw { - __u32 key : 4; - __u32 sctl : 1; - __u32 eswf : 1; - __u32 cc : 2; - __u32 fmt : 1; - __u32 pfch : 1; - __u32 isic : 1; - __u32 alcc : 1; - __u32 ssi : 1; - __u32 zcc : 1; - __u32 ectl : 1; - __u32 pno : 1; - __u32 res : 1; - __u32 fctl : 3; - __u32 actl : 7; - __u32 stctl : 5; - __u32 cpa; - __u32 dstat : 8; - __u32 cstat : 8; - __u32 count : 16; -} __attribute__ ((packed)); - -/** - * struct tm_scsw - transport-mode subchannel status word - * @key: subchannel key - * @eswf: esw format - * @cc: deferred condition code - * @fmt: format - * @x: IRB-format control - * @q: interrogate-complete - * @ectl: extended control - * @pno: path not operational - * @fctl: function control - * @actl: activity control - * @stctl: status control - * @tcw: TCW address - * @dstat: device status - * @cstat: subchannel status - * @fcxs: FCX status - * @schxs: subchannel-extended status - */ -struct tm_scsw { - u32 key:4; - u32 :1; - u32 eswf:1; - u32 cc:2; - u32 fmt:3; - u32 x:1; - u32 q:1; - u32 :1; - u32 ectl:1; - u32 pno:1; - u32 :1; - u32 fctl:3; - u32 actl:7; - u32 stctl:5; - u32 tcw; - u32 dstat:8; - u32 cstat:8; - u32 fcxs:8; - u32 schxs:8; -} __attribute__ ((packed)); - -/** - * union scsw - subchannel status word - * @cmd: command-mode SCSW - * @tm: transport-mode SCSW - */ -union scsw { - struct cmd_scsw cmd; - struct tm_scsw tm; -} __attribute__ ((packed)); - -int scsw_is_tm(union scsw *scsw); -u32 scsw_key(union scsw *scsw); -u32 scsw_eswf(union scsw *scsw); -u32 scsw_cc(union scsw *scsw); -u32 scsw_ectl(union scsw *scsw); -u32 scsw_pno(union scsw *scsw); -u32 scsw_fctl(union scsw *scsw); -u32 scsw_actl(union scsw *scsw); -u32 scsw_stctl(union scsw *scsw); -u32 scsw_dstat(union scsw *scsw); -u32 scsw_cstat(union scsw *scsw); -int scsw_is_solicited(union scsw *scsw); -int scsw_is_valid_key(union scsw *scsw); -int scsw_is_valid_eswf(union scsw *scsw); -int scsw_is_valid_cc(union scsw *scsw); -int scsw_is_valid_ectl(union scsw *scsw); -int scsw_is_valid_pno(union scsw *scsw); -int scsw_is_valid_fctl(union scsw *scsw); -int scsw_is_valid_actl(union scsw *scsw); -int scsw_is_valid_stctl(union scsw *scsw); -int scsw_is_valid_dstat(union scsw *scsw); -int scsw_is_valid_cstat(union scsw *scsw); -int scsw_cmd_is_valid_key(union scsw *scsw); -int scsw_cmd_is_valid_sctl(union scsw *scsw); -int scsw_cmd_is_valid_eswf(union scsw *scsw); -int scsw_cmd_is_valid_cc(union scsw *scsw); -int scsw_cmd_is_valid_fmt(union scsw *scsw); -int scsw_cmd_is_valid_pfch(union scsw *scsw); -int scsw_cmd_is_valid_isic(union scsw *scsw); -int scsw_cmd_is_valid_alcc(union scsw *scsw); -int scsw_cmd_is_valid_ssi(union scsw *scsw); -int scsw_cmd_is_valid_zcc(union scsw *scsw); -int scsw_cmd_is_valid_ectl(union scsw *scsw); -int scsw_cmd_is_valid_pno(union scsw *scsw); -int scsw_cmd_is_valid_fctl(union scsw *scsw); -int scsw_cmd_is_valid_actl(union scsw *scsw); -int scsw_cmd_is_valid_stctl(union scsw *scsw); -int scsw_cmd_is_valid_dstat(union scsw *scsw); -int scsw_cmd_is_valid_cstat(union scsw *scsw); -int scsw_cmd_is_solicited(union scsw *scsw); -int scsw_tm_is_valid_key(union scsw *scsw); -int scsw_tm_is_valid_eswf(union scsw *scsw); -int scsw_tm_is_valid_cc(union scsw *scsw); -int scsw_tm_is_valid_fmt(union scsw *scsw); -int scsw_tm_is_valid_x(union scsw *scsw); -int scsw_tm_is_valid_q(union scsw *scsw); -int scsw_tm_is_valid_ectl(union scsw *scsw); -int scsw_tm_is_valid_pno(union scsw *scsw); -int scsw_tm_is_valid_fctl(union scsw *scsw); -int scsw_tm_is_valid_actl(union scsw *scsw); -int scsw_tm_is_valid_stctl(union scsw *scsw); -int scsw_tm_is_valid_dstat(union scsw *scsw); -int scsw_tm_is_valid_cstat(union scsw *scsw); -int scsw_tm_is_valid_fcxs(union scsw *scsw); -int scsw_tm_is_valid_schxs(union scsw *scsw); -int scsw_tm_is_solicited(union scsw *scsw); - -#define SCSW_FCTL_CLEAR_FUNC 0x1 -#define SCSW_FCTL_HALT_FUNC 0x2 -#define SCSW_FCTL_START_FUNC 0x4 - -#define SCSW_ACTL_SUSPENDED 0x1 -#define SCSW_ACTL_DEVACT 0x2 -#define SCSW_ACTL_SCHACT 0x4 -#define SCSW_ACTL_CLEAR_PEND 0x8 -#define SCSW_ACTL_HALT_PEND 0x10 -#define SCSW_ACTL_START_PEND 0x20 -#define SCSW_ACTL_RESUME_PEND 0x40 - -#define SCSW_STCTL_STATUS_PEND 0x1 -#define SCSW_STCTL_SEC_STATUS 0x2 -#define SCSW_STCTL_PRIM_STATUS 0x4 -#define SCSW_STCTL_INTER_STATUS 0x8 -#define SCSW_STCTL_ALERT_STATUS 0x10 - -#define DEV_STAT_ATTENTION 0x80 -#define DEV_STAT_STAT_MOD 0x40 -#define DEV_STAT_CU_END 0x20 -#define DEV_STAT_BUSY 0x10 -#define DEV_STAT_CHN_END 0x08 -#define DEV_STAT_DEV_END 0x04 -#define DEV_STAT_UNIT_CHECK 0x02 -#define DEV_STAT_UNIT_EXCEP 0x01 - -#define SCHN_STAT_PCI 0x80 -#define SCHN_STAT_INCORR_LEN 0x40 -#define SCHN_STAT_PROG_CHECK 0x20 -#define SCHN_STAT_PROT_CHECK 0x10 -#define SCHN_STAT_CHN_DATA_CHK 0x08 -#define SCHN_STAT_CHN_CTRL_CHK 0x04 -#define SCHN_STAT_INTF_CTRL_CHK 0x02 -#define SCHN_STAT_CHAIN_CHECK 0x01 - -/* - * architectured values for first sense byte - */ -#define SNS0_CMD_REJECT 0x80 -#define SNS_CMD_REJECT SNS0_CMD_REJEC -#define SNS0_INTERVENTION_REQ 0x40 -#define SNS0_BUS_OUT_CHECK 0x20 -#define SNS0_EQUIPMENT_CHECK 0x10 -#define SNS0_DATA_CHECK 0x08 -#define SNS0_OVERRUN 0x04 -#define SNS0_INCOMPL_DOMAIN 0x01 - -/* - * architectured values for second sense byte - */ -#define SNS1_PERM_ERR 0x80 -#define SNS1_INV_TRACK_FORMAT 0x40 -#define SNS1_EOC 0x20 -#define SNS1_MESSAGE_TO_OPER 0x10 -#define SNS1_NO_REC_FOUND 0x08 -#define SNS1_FILE_PROTECTED 0x04 -#define SNS1_WRITE_INHIBITED 0x02 -#define SNS1_INPRECISE_END 0x01 - -/* - * architectured values for third sense byte - */ -#define SNS2_REQ_INH_WRITE 0x80 -#define SNS2_CORRECTABLE 0x40 -#define SNS2_FIRST_LOG_ERR 0x20 -#define SNS2_ENV_DATA_PRESENT 0x10 -#define SNS2_INPRECISE_END 0x04 +#include /** * struct ccw1 - channel command word diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h new file mode 100644 index 000000000000..de389cb54d28 --- /dev/null +++ b/arch/s390/include/asm/scsw.h @@ -0,0 +1,956 @@ +/* + * Helper functions for scsw access. + * + * Copyright IBM Corp. 2008,2009 + * Author(s): Peter Oberparleiter + */ + +#ifndef _ASM_S390_SCSW_H_ +#define _ASM_S390_SCSW_H_ + +#include +#include +#include + +/** + * struct cmd_scsw - command-mode subchannel status word + * @key: subchannel key + * @sctl: suspend control + * @eswf: esw format + * @cc: deferred condition code + * @fmt: format + * @pfch: prefetch + * @isic: initial-status interruption control + * @alcc: address-limit checking control + * @ssi: suppress-suspended interruption + * @zcc: zero condition code + * @ectl: extended control + * @pno: path not operational + * @res: reserved + * @fctl: function control + * @actl: activity control + * @stctl: status control + * @cpa: channel program address + * @dstat: device status + * @cstat: subchannel status + * @count: residual count + */ +struct cmd_scsw { + __u32 key : 4; + __u32 sctl : 1; + __u32 eswf : 1; + __u32 cc : 2; + __u32 fmt : 1; + __u32 pfch : 1; + __u32 isic : 1; + __u32 alcc : 1; + __u32 ssi : 1; + __u32 zcc : 1; + __u32 ectl : 1; + __u32 pno : 1; + __u32 res : 1; + __u32 fctl : 3; + __u32 actl : 7; + __u32 stctl : 5; + __u32 cpa; + __u32 dstat : 8; + __u32 cstat : 8; + __u32 count : 16; +} __attribute__ ((packed)); + +/** + * struct tm_scsw - transport-mode subchannel status word + * @key: subchannel key + * @eswf: esw format + * @cc: deferred condition code + * @fmt: format + * @x: IRB-format control + * @q: interrogate-complete + * @ectl: extended control + * @pno: path not operational + * @fctl: function control + * @actl: activity control + * @stctl: status control + * @tcw: TCW address + * @dstat: device status + * @cstat: subchannel status + * @fcxs: FCX status + * @schxs: subchannel-extended status + */ +struct tm_scsw { + u32 key:4; + u32 :1; + u32 eswf:1; + u32 cc:2; + u32 fmt:3; + u32 x:1; + u32 q:1; + u32 :1; + u32 ectl:1; + u32 pno:1; + u32 :1; + u32 fctl:3; + u32 actl:7; + u32 stctl:5; + u32 tcw; + u32 dstat:8; + u32 cstat:8; + u32 fcxs:8; + u32 schxs:8; +} __attribute__ ((packed)); + +/** + * union scsw - subchannel status word + * @cmd: command-mode SCSW + * @tm: transport-mode SCSW + */ +union scsw { + struct cmd_scsw cmd; + struct tm_scsw tm; +} __attribute__ ((packed)); + +#define SCSW_FCTL_CLEAR_FUNC 0x1 +#define SCSW_FCTL_HALT_FUNC 0x2 +#define SCSW_FCTL_START_FUNC 0x4 + +#define SCSW_ACTL_SUSPENDED 0x1 +#define SCSW_ACTL_DEVACT 0x2 +#define SCSW_ACTL_SCHACT 0x4 +#define SCSW_ACTL_CLEAR_PEND 0x8 +#define SCSW_ACTL_HALT_PEND 0x10 +#define SCSW_ACTL_START_PEND 0x20 +#define SCSW_ACTL_RESUME_PEND 0x40 + +#define SCSW_STCTL_STATUS_PEND 0x1 +#define SCSW_STCTL_SEC_STATUS 0x2 +#define SCSW_STCTL_PRIM_STATUS 0x4 +#define SCSW_STCTL_INTER_STATUS 0x8 +#define SCSW_STCTL_ALERT_STATUS 0x10 + +#define DEV_STAT_ATTENTION 0x80 +#define DEV_STAT_STAT_MOD 0x40 +#define DEV_STAT_CU_END 0x20 +#define DEV_STAT_BUSY 0x10 +#define DEV_STAT_CHN_END 0x08 +#define DEV_STAT_DEV_END 0x04 +#define DEV_STAT_UNIT_CHECK 0x02 +#define DEV_STAT_UNIT_EXCEP 0x01 + +#define SCHN_STAT_PCI 0x80 +#define SCHN_STAT_INCORR_LEN 0x40 +#define SCHN_STAT_PROG_CHECK 0x20 +#define SCHN_STAT_PROT_CHECK 0x10 +#define SCHN_STAT_CHN_DATA_CHK 0x08 +#define SCHN_STAT_CHN_CTRL_CHK 0x04 +#define SCHN_STAT_INTF_CTRL_CHK 0x02 +#define SCHN_STAT_CHAIN_CHECK 0x01 + +/* + * architectured values for first sense byte + */ +#define SNS0_CMD_REJECT 0x80 +#define SNS_CMD_REJECT SNS0_CMD_REJEC +#define SNS0_INTERVENTION_REQ 0x40 +#define SNS0_BUS_OUT_CHECK 0x20 +#define SNS0_EQUIPMENT_CHECK 0x10 +#define SNS0_DATA_CHECK 0x08 +#define SNS0_OVERRUN 0x04 +#define SNS0_INCOMPL_DOMAIN 0x01 + +/* + * architectured values for second sense byte + */ +#define SNS1_PERM_ERR 0x80 +#define SNS1_INV_TRACK_FORMAT 0x40 +#define SNS1_EOC 0x20 +#define SNS1_MESSAGE_TO_OPER 0x10 +#define SNS1_NO_REC_FOUND 0x08 +#define SNS1_FILE_PROTECTED 0x04 +#define SNS1_WRITE_INHIBITED 0x02 +#define SNS1_INPRECISE_END 0x01 + +/* + * architectured values for third sense byte + */ +#define SNS2_REQ_INH_WRITE 0x80 +#define SNS2_CORRECTABLE 0x40 +#define SNS2_FIRST_LOG_ERR 0x20 +#define SNS2_ENV_DATA_PRESENT 0x10 +#define SNS2_INPRECISE_END 0x04 + +/** + * scsw_is_tm - check for transport mode scsw + * @scsw: pointer to scsw + * + * Return non-zero if the specified scsw is a transport mode scsw, zero + * otherwise. + */ +static inline int scsw_is_tm(union scsw *scsw) +{ + return css_general_characteristics.fcx && (scsw->tm.x == 1); +} + +/** + * scsw_key - return scsw key field + * @scsw: pointer to scsw + * + * Return the value of the key field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_key(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.key; + else + return scsw->cmd.key; +} + +/** + * scsw_eswf - return scsw eswf field + * @scsw: pointer to scsw + * + * Return the value of the eswf field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_eswf(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.eswf; + else + return scsw->cmd.eswf; +} + +/** + * scsw_cc - return scsw cc field + * @scsw: pointer to scsw + * + * Return the value of the cc field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_cc(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.cc; + else + return scsw->cmd.cc; +} + +/** + * scsw_ectl - return scsw ectl field + * @scsw: pointer to scsw + * + * Return the value of the ectl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_ectl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.ectl; + else + return scsw->cmd.ectl; +} + +/** + * scsw_pno - return scsw pno field + * @scsw: pointer to scsw + * + * Return the value of the pno field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_pno(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.pno; + else + return scsw->cmd.pno; +} + +/** + * scsw_fctl - return scsw fctl field + * @scsw: pointer to scsw + * + * Return the value of the fctl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_fctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.fctl; + else + return scsw->cmd.fctl; +} + +/** + * scsw_actl - return scsw actl field + * @scsw: pointer to scsw + * + * Return the value of the actl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_actl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.actl; + else + return scsw->cmd.actl; +} + +/** + * scsw_stctl - return scsw stctl field + * @scsw: pointer to scsw + * + * Return the value of the stctl field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_stctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.stctl; + else + return scsw->cmd.stctl; +} + +/** + * scsw_dstat - return scsw dstat field + * @scsw: pointer to scsw + * + * Return the value of the dstat field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_dstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.dstat; + else + return scsw->cmd.dstat; +} + +/** + * scsw_cstat - return scsw cstat field + * @scsw: pointer to scsw + * + * Return the value of the cstat field of the specified scsw, regardless of + * whether it is a transport mode or command mode scsw. + */ +static inline u32 scsw_cstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw->tm.cstat; + else + return scsw->cmd.cstat; +} + +/** + * scsw_cmd_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_key(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_sctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_sctl(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_eswf(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); +} + +/** + * scsw_cmd_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_cc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && + (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND); +} + +/** + * scsw_cmd_is_valid_fmt - check fmt field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fmt field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_fmt(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_pfch - check pfch field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pfch field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_pfch(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_isic - check isic field validity + * @scsw: pointer to scsw + * + * Return non-zero if the isic field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_isic(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_alcc - check alcc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the alcc field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_alcc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_ssi - check ssi field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ssi field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_ssi(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_cmd_is_valid_zcc - check zcc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the zcc field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_zcc(union scsw *scsw) +{ + return (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) && + (scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS); +} + +/** + * scsw_cmd_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_ectl(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + !(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->cmd.stctl & SCSW_STCTL_ALERT_STATUS); +} + +/** + * scsw_cmd_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_pno(union scsw *scsw) +{ + return (scsw->cmd.fctl != 0) && + (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (!(scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) || + ((scsw->cmd.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->cmd.actl & SCSW_ACTL_SUSPENDED))); +} + +/** + * scsw_cmd_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_fctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_cmd_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_actl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_cmd_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_stctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_cmd_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_dstat(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->cmd.cc != 3); +} + +/** + * scsw_cmd_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified command mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_cmd_is_valid_cstat(union scsw *scsw) +{ + return (scsw->cmd.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->cmd.cc != 3); +} + +/** + * scsw_tm_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_key(union scsw *scsw) +{ + return (scsw->tm.fctl & SCSW_FCTL_START_FUNC); +} + +/** + * scsw_tm_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_eswf(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); +} + +/** + * scsw_tm_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_cc(union scsw *scsw) +{ + return (scsw->tm.fctl & SCSW_FCTL_START_FUNC) && + (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND); +} + +/** + * scsw_tm_is_valid_fmt - check fmt field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fmt field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_fmt(union scsw *scsw) +{ + return 1; +} + +/** + * scsw_tm_is_valid_x - check x field validity + * @scsw: pointer to scsw + * + * Return non-zero if the x field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_x(union scsw *scsw) +{ + return 1; +} + +/** + * scsw_tm_is_valid_q - check q field validity + * @scsw: pointer to scsw + * + * Return non-zero if the q field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_q(union scsw *scsw) +{ + return 1; +} + +/** + * scsw_tm_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_ectl(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + !(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->tm.stctl & SCSW_STCTL_ALERT_STATUS); +} + +/** + * scsw_tm_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_pno(union scsw *scsw) +{ + return (scsw->tm.fctl != 0) && + (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (!(scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) || + ((scsw->tm.stctl & SCSW_STCTL_INTER_STATUS) && + (scsw->tm.actl & SCSW_ACTL_SUSPENDED))); +} + +/** + * scsw_tm_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_fctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_tm_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_actl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_tm_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_stctl(union scsw *scsw) +{ + /* Only valid if pmcw.dnv == 1*/ + return 1; +} + +/** + * scsw_tm_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_dstat(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->tm.cc != 3); +} + +/** + * scsw_tm_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_cstat(union scsw *scsw) +{ + return (scsw->tm.stctl & SCSW_STCTL_STATUS_PEND) && + (scsw->tm.cc != 3); +} + +/** + * scsw_tm_is_valid_fcxs - check fcxs field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fcxs field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_fcxs(union scsw *scsw) +{ + return 1; +} + +/** + * scsw_tm_is_valid_schxs - check schxs field validity + * @scsw: pointer to scsw + * + * Return non-zero if the schxs field of the specified transport mode scsw is + * valid, zero otherwise. + */ +static inline int scsw_tm_is_valid_schxs(union scsw *scsw) +{ + return (scsw->tm.cstat & (SCHN_STAT_PROG_CHECK | + SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_PROT_CHECK | + SCHN_STAT_CHN_DATA_CHK)); +} + +/** + * scsw_is_valid_actl - check actl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the actl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_actl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_actl(scsw); + else + return scsw_cmd_is_valid_actl(scsw); +} + +/** + * scsw_is_valid_cc - check cc field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cc field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_cc(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_cc(scsw); + else + return scsw_cmd_is_valid_cc(scsw); +} + +/** + * scsw_is_valid_cstat - check cstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the cstat field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_cstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_cstat(scsw); + else + return scsw_cmd_is_valid_cstat(scsw); +} + +/** + * scsw_is_valid_dstat - check dstat field validity + * @scsw: pointer to scsw + * + * Return non-zero if the dstat field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_dstat(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_dstat(scsw); + else + return scsw_cmd_is_valid_dstat(scsw); +} + +/** + * scsw_is_valid_ectl - check ectl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the ectl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_ectl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_ectl(scsw); + else + return scsw_cmd_is_valid_ectl(scsw); +} + +/** + * scsw_is_valid_eswf - check eswf field validity + * @scsw: pointer to scsw + * + * Return non-zero if the eswf field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_eswf(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_eswf(scsw); + else + return scsw_cmd_is_valid_eswf(scsw); +} + +/** + * scsw_is_valid_fctl - check fctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the fctl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_fctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_fctl(scsw); + else + return scsw_cmd_is_valid_fctl(scsw); +} + +/** + * scsw_is_valid_key - check key field validity + * @scsw: pointer to scsw + * + * Return non-zero if the key field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_key(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_key(scsw); + else + return scsw_cmd_is_valid_key(scsw); +} + +/** + * scsw_is_valid_pno - check pno field validity + * @scsw: pointer to scsw + * + * Return non-zero if the pno field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_pno(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_pno(scsw); + else + return scsw_cmd_is_valid_pno(scsw); +} + +/** + * scsw_is_valid_stctl - check stctl field validity + * @scsw: pointer to scsw + * + * Return non-zero if the stctl field of the specified scsw is valid, + * regardless of whether it is a transport mode or command mode scsw. + * Return zero if the field does not contain a valid value. + */ +static inline int scsw_is_valid_stctl(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_valid_stctl(scsw); + else + return scsw_cmd_is_valid_stctl(scsw); +} + +/** + * scsw_cmd_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the command mode scsw indicates that the associated + * status condition is solicited, zero if it is unsolicited. + */ +static inline int scsw_cmd_is_solicited(union scsw *scsw) +{ + return (scsw->cmd.cc != 0) || (scsw->cmd.stctl != + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); +} + +/** + * scsw_tm_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the transport mode scsw indicates that the associated + * status condition is solicited, zero if it is unsolicited. + */ +static inline int scsw_tm_is_solicited(union scsw *scsw) +{ + return (scsw->tm.cc != 0) || (scsw->tm.stctl != + (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)); +} + +/** + * scsw_is_solicited - check for solicited scsw + * @scsw: pointer to scsw + * + * Return non-zero if the transport or command mode scsw indicates that the + * associated status condition is solicited, zero if it is unsolicited. + */ +static inline int scsw_is_solicited(union scsw *scsw) +{ + if (scsw_is_tm(scsw)) + return scsw_tm_is_solicited(scsw); + else + return scsw_cmd_is_solicited(scsw); +} + +#endif /* _ASM_S390_SCSW_H_ */ -- cgit v1.2.3 From 05e7ff7da78bad3edc1290ed86b4a37da72ced62 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:28:31 +0200 Subject: [S390] introduce get_clock_monotonic Introduce get_clock_monotonic() function which can be used to get a (fast) timestamp. Resolution is the same as for get_clock(). The only difference is that the timestamps are monotonic and don't jump backward or forward. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/timex.h | 14 ++++++++++++++ arch/s390/kernel/time.c | 3 ++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index cc21e3e20fd7..24aa1cda20ad 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -90,4 +90,18 @@ unsigned long long monotonic_clock(void); extern u64 sched_clock_base_cc; +/** + * get_clock_monotonic - returns current time in clock rate units + * + * The caller must ensure that preemption is disabled. + * The clock and sched_clock_base get changed via stop_machine. + * Therefore preemption must be disabled when calling this + * function, otherwise the returned value is not guaranteed to + * be monotonic. + */ +static inline unsigned long long get_clock_monotonic(void) +{ + return get_clock_xt() - sched_clock_base_cc; +} + #endif diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index d4c8e9c47c81..54e327e9af04 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -60,6 +60,7 @@ #define TICK_SIZE tick u64 sched_clock_base_cc = -1; /* Force to data section. */ +EXPORT_SYMBOL_GPL(sched_clock_base_cc); static DEFINE_PER_CPU(struct clock_event_device, comparators); @@ -68,7 +69,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); */ unsigned long long notrace sched_clock(void) { - return ((get_clock_xt() - sched_clock_base_cc) * 125) >> 9; + return (get_clock_monotonic() * 125) >> 9; } /* -- cgit v1.2.3 From 04efc3be767cfed0d348fd598eba8fe5f5bf5fe6 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:28:32 +0200 Subject: [S390] convert/optimize csum_fold() to C In the meantime gcc generates better code than the old inline assemblies do. Original inline assembly results in: lr %r1,%r2 sr %r3,%r3 lr %r2,%r1 srdl %r2,16 alr %r2,%r3 alr %r1,%r2 srl %r1,16 xilf %r1,65535 llghr %r2,%r1 br %r14 Out of the C code gcc generates this: rll %r1,%r2,16 ar %r1,%r2 srl %r1,16 xilf %r1,65535 llghr %r2,%r1 br %r14 In addition we don't have any static register allocations anymore and gcc is free to shuffle instructions around for better pipeline usage. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/checksum.h | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index d5a8e7c1477c..6c00f6800a34 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h @@ -78,28 +78,11 @@ csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) */ static inline __sum16 csum_fold(__wsum sum) { -#ifndef __s390x__ - register_pair rp; + u32 csum = (__force u32) sum; - asm volatile( - " slr %N1,%N1\n" /* %0 = H L */ - " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ - " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ - " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ - " alr %0,%1\n" /* %0 = H+L+C L+H */ - " srl %0,16\n" /* %0 = H+L+C */ - : "+&d" (sum), "=d" (rp) : : "cc"); -#else /* __s390x__ */ - asm volatile( - " sr 3,3\n" /* %0 = H*65536 + L */ - " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */ - " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */ - " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */ - " alr %0,2\n" /* %0 = H+L+C L+H */ - " srl %0,16\n" /* %0 = H+L+C */ - : "+&d" (sum) : : "cc", "2", "3"); -#endif /* __s390x__ */ - return (__force __sum16) ~sum; + csum += (csum >> 16) + (csum << 16); + csum >>= 16; + return (__force __sum16) ~csum; } /* -- cgit v1.2.3 From 6ac2a4ddd10d6916785b4c566d521025c855f823 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 11 Sep 2009 10:28:33 +0200 Subject: [S390] improve mcount code Move the 64 bit mount code from mcount.S into mcount64.S and avoid code duplication. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/Makefile | 2 +- arch/s390/kernel/mcount.S | 147 +++----------------------------------------- arch/s390/kernel/mcount64.S | 78 +++++++++++++++++++++++ 3 files changed, 89 insertions(+), 138 deletions(-) create mode 100644 arch/s390/kernel/mcount64.S (limited to 'arch/s390') diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index c75ed43b1a18..794955305f38 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_FUNCTION_TRACER) += mcount.o +obj-$(CONFIG_FUNCTION_TRACER) += $(if $(CONFIG_64BIT),mcount64.o,mcount.o) obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 2a0a5e97ba8c..dfe015d7398c 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -11,111 +11,27 @@ ftrace_stub: br %r14 -#ifdef CONFIG_64BIT - -#ifdef CONFIG_DYNAMIC_FTRACE - .globl _mcount _mcount: - br %r14 - - .globl ftrace_caller -ftrace_caller: - larl %r1,function_trace_stop - icm %r1,0xf,0(%r1) - bnzr %r14 - stmg %r2,%r5,32(%r15) - stg %r14,112(%r15) - lgr %r1,%r15 - aghi %r15,-160 - stg %r1,__SF_BACKCHAIN(%r15) - lgr %r2,%r14 - lg %r3,168(%r15) - larl %r14,ftrace_dyn_func - lg %r14,0(%r14) - basr %r14,%r14 -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - .globl ftrace_graph_caller -ftrace_graph_caller: - # This unconditional branch gets runtime patched. Change only if - # you know what you are doing. See ftrace_enable_graph_caller(). - j 0f - lg %r2,272(%r15) - lg %r3,168(%r15) - brasl %r14,prepare_ftrace_return - stg %r2,168(%r15) -0: -#endif - aghi %r15,160 - lmg %r2,%r5,32(%r15) - lg %r14,112(%r15) +#ifdef CONFIG_DYNAMIC_FTRACE br %r14 .data .globl ftrace_dyn_func ftrace_dyn_func: - .quad ftrace_stub + .long ftrace_stub .previous -#else /* CONFIG_DYNAMIC_FTRACE */ - - .globl _mcount -_mcount: - larl %r1,function_trace_stop - icm %r1,0xf,0(%r1) - bnzr %r14 - stmg %r2,%r5,32(%r15) - stg %r14,112(%r15) - lgr %r1,%r15 - aghi %r15,-160 - stg %r1,__SF_BACKCHAIN(%r15) - lgr %r2,%r14 - lg %r3,168(%r15) - larl %r14,ftrace_trace_function - lg %r14,0(%r14) - basr %r14,%r14 -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - lg %r2,272(%r15) - lg %r3,168(%r15) - brasl %r14,prepare_ftrace_return - stg %r2,168(%r15) -#endif - aghi %r15,160 - lmg %r2,%r5,32(%r15) - lg %r14,112(%r15) - br %r14 - -#endif /* CONFIG_DYNAMIC_FTRACE */ - -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - - .globl return_to_handler -return_to_handler: - stmg %r2,%r5,32(%r15) - lgr %r1,%r15 - aghi %r15,-160 - stg %r1,__SF_BACKCHAIN(%r15) - brasl %r14,ftrace_return_to_handler - aghi %r15,160 - lgr %r14,%r2 - lmg %r2,%r5,32(%r15) - br %r14 - -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ - -#else /* CONFIG_64BIT */ - -#ifdef CONFIG_DYNAMIC_FTRACE - - .globl _mcount -_mcount: - br %r14 - .globl ftrace_caller ftrace_caller: +#endif stm %r2,%r5,16(%r15) bras %r1,2f +#ifdef CONFIG_DYNAMIC_FTRACE +0: .long ftrace_dyn_func +#else 0: .long ftrace_trace_function +#endif 1: .long function_trace_stop 2: l %r2,1b-0b(%r1) icm %r2,0xf,0(%r2) @@ -131,53 +47,13 @@ ftrace_caller: l %r14,0(%r14) basr %r14,%r14 #ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE .globl ftrace_graph_caller ftrace_graph_caller: # This unconditional branch gets runtime patched. Change only if # you know what you are doing. See ftrace_enable_graph_caller(). j 1f - bras %r1,0f - .long prepare_ftrace_return -0: l %r2,152(%r15) - l %r4,0(%r1) - l %r3,100(%r15) - basr %r14,%r4 - st %r2,100(%r15) -1: #endif - ahi %r15,96 - l %r14,56(%r15) -3: lm %r2,%r5,16(%r15) - br %r14 - - .data - .globl ftrace_dyn_func -ftrace_dyn_func: - .long ftrace_stub - .previous - -#else /* CONFIG_DYNAMIC_FTRACE */ - - .globl _mcount -_mcount: - stm %r2,%r5,16(%r15) - bras %r1,2f -0: .long ftrace_trace_function -1: .long function_trace_stop -2: l %r2,1b-0b(%r1) - icm %r2,0xf,0(%r2) - jnz 3f - st %r14,56(%r15) - lr %r0,%r15 - ahi %r15,-96 - l %r3,100(%r15) - la %r2,0(%r14) - st %r0,__SF_BACKCHAIN(%r15) - la %r3,0(%r3) - l %r14,0b-0b(%r1) - l %r14,0(%r14) - basr %r14,%r14 -#ifdef CONFIG_FUNCTION_GRAPH_TRACER bras %r1,0f .long prepare_ftrace_return 0: l %r2,152(%r15) @@ -185,14 +61,13 @@ _mcount: l %r3,100(%r15) basr %r14,%r4 st %r2,100(%r15) +1: #endif ahi %r15,96 l %r14,56(%r15) 3: lm %r2,%r5,16(%r15) br %r14 -#endif /* CONFIG_DYNAMIC_FTRACE */ - #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl return_to_handler @@ -211,6 +86,4 @@ return_to_handler: lm %r2,%r5,16(%r15) br %r14 -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ - -#endif /* CONFIG_64BIT */ +#endif diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S new file mode 100644 index 000000000000..c37211c6092b --- /dev/null +++ b/arch/s390/kernel/mcount64.S @@ -0,0 +1,78 @@ +/* + * Copyright IBM Corp. 2008,2009 + * + * Author(s): Heiko Carstens , + * + */ + +#include + + .globl ftrace_stub +ftrace_stub: + br %r14 + + .globl _mcount +_mcount: +#ifdef CONFIG_DYNAMIC_FTRACE + br %r14 + + .data + .globl ftrace_dyn_func +ftrace_dyn_func: + .quad ftrace_stub + .previous + + .globl ftrace_caller +ftrace_caller: +#endif + larl %r1,function_trace_stop + icm %r1,0xf,0(%r1) + bnzr %r14 + stmg %r2,%r5,32(%r15) + stg %r14,112(%r15) + lgr %r1,%r15 + aghi %r15,-160 + stg %r1,__SF_BACKCHAIN(%r15) + lgr %r2,%r14 + lg %r3,168(%r15) +#ifdef CONFIG_DYNAMIC_FTRACE + larl %r14,ftrace_dyn_func +#else + larl %r14,ftrace_trace_function +#endif + lg %r14,0(%r14) + basr %r14,%r14 +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE + .globl ftrace_graph_caller +ftrace_graph_caller: + # This unconditional branch gets runtime patched. Change only if + # you know what you are doing. See ftrace_enable_graph_caller(). + j 0f +#endif + lg %r2,272(%r15) + lg %r3,168(%r15) + brasl %r14,prepare_ftrace_return + stg %r2,168(%r15) +0: +#endif + aghi %r15,160 + lmg %r2,%r5,32(%r15) + lg %r14,112(%r15) + br %r14 + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + + .globl return_to_handler +return_to_handler: + stmg %r2,%r5,32(%r15) + lgr %r1,%r15 + aghi %r15,-160 + stg %r1,__SF_BACKCHAIN(%r15) + brasl %r14,ftrace_return_to_handler + aghi %r15,160 + lgr %r14,%r2 + lmg %r2,%r5,32(%r15) + br %r14 + +#endif -- cgit v1.2.3 From 12751058515860ed43c8f874ebcb2097b323736a Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:28:34 +0200 Subject: [S390] atomic ops: add effecient atomic64 support for 31 bit Use compare double and swap to implement efficient atomic64 ops for 31 bit. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 1 - arch/s390/include/asm/atomic.h | 164 +++++++++++++++++++++++++++++++---------- 2 files changed, 127 insertions(+), 38 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2ae5d72f47ed..47836b945d03 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -95,7 +95,6 @@ config S390 select HAVE_ARCH_TRACEHOOK select INIT_ALL_POSSIBLE select HAVE_PERF_COUNTERS - select GENERIC_ATOMIC64 if !64BIT config SCHED_OMIT_FRAME_POINTER bool diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index c7d0abfb0f00..b491d5e963cf 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -1,28 +1,20 @@ #ifndef __ARCH_S390_ATOMIC__ #define __ARCH_S390_ATOMIC__ -#include -#include - /* - * include/asm-s390/atomic.h + * Copyright 1999,2009 IBM Corp. + * Author(s): Martin Schwidefsky , + * Denis Joseph Barrow, + * Arnd Bergmann , * - * S390 version - * Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - * Denis Joseph Barrow, - * Arnd Bergmann (arndb@de.ibm.com) - * - * Derived from "include/asm-i386/bitops.h" - * Copyright (C) 1992, Linus Torvalds + * Atomic operations that C can't guarantee us. + * Useful for resource counting etc. + * s390 uses 'Compare And Swap' for atomicity in SMP enviroment. * */ -/* - * Atomic operations that C can't guarantee us. Useful for - * resource counting etc.. - * S390 uses 'Compare And Swap' for atomicity in SMP enviroment - */ +#include +#include #define ATOMIC_INIT(i) { (i) } @@ -146,9 +138,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #undef __CS_LOOP -#ifdef __s390x__ #define ATOMIC64_INIT(i) { (i) } +#ifdef CONFIG_64BIT + #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) #define __CSG_LOOP(ptr, op_val, op_string) ({ \ @@ -202,21 +195,11 @@ static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) { return __CSG_LOOP(v, i, "agr"); } -#define atomic64_add(_i, _v) atomic64_add_return(_i, _v) -#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) -#define atomic64_inc(_v) atomic64_add_return(1, _v) -#define atomic64_inc_return(_v) atomic64_add_return(1, _v) -#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) { return __CSG_LOOP(v, i, "sgr"); } -#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) -#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) -#define atomic64_dec(_v) atomic64_sub_return(1, _v) -#define atomic64_dec_return(_v) atomic64_sub_return(1, _v) -#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) { @@ -249,6 +232,111 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v, return old; } +#undef __CSG_LOOP + +#else /* CONFIG_64BIT */ + +typedef struct { + long long counter; +} atomic64_t; + +static inline long long atomic64_read(const atomic64_t *v) +{ + register_pair rp; + + asm volatile( + " lm %0,%N0,0(%1)" + : "=&d" (rp) + : "a" (&v->counter), "m" (v->counter) + ); + return rp.pair; +} + +static inline void atomic64_set(atomic64_t *v, long long i) +{ + register_pair rp = {.pair = i}; + + asm volatile( + " stm %1,%N1,0(%2)" + : "=m" (v->counter) + : "d" (rp), "a" (&v->counter) + ); +} + +static inline long long atomic64_xchg(atomic64_t *v, long long new) +{ + register_pair rp_new = {.pair = new}; + register_pair rp_old; + + asm volatile( + " lm %0,%N0,0(%2)\n" + "0: cds %0,%3,0(%2)\n" + " jl 0b\n" + : "=&d" (rp_old), "+m" (v->counter) + : "a" (&v->counter), "d" (rp_new) + : "cc"); + return rp_old.pair; +} + +static inline long long atomic64_cmpxchg(atomic64_t *v, + long long old, long long new) +{ + register_pair rp_old = {.pair = old}; + register_pair rp_new = {.pair = new}; + + asm volatile( + " cds %0,%3,0(%2)" + : "+&d" (rp_old), "+m" (v->counter) + : "a" (&v->counter), "d" (rp_new) + : "cc"); + return rp_old.pair; +} + + +static inline long long atomic64_add_return(long long i, atomic64_t *v) +{ + long long old, new; + + do { + old = atomic64_read(v); + new = old + i; + } while (atomic64_cmpxchg(v, old, new) != old); + return new; +} + +static inline long long atomic64_sub_return(long long i, atomic64_t *v) +{ + long long old, new; + + do { + old = atomic64_read(v); + new = old - i; + } while (atomic64_cmpxchg(v, old, new) != old); + return new; +} + +static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) +{ + long long old, new; + + do { + old = atomic64_read(v); + new = old | mask; + } while (atomic64_cmpxchg(v, old, new) != old); +} + +static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) +{ + long long old, new; + + do { + old = atomic64_read(v); + new = old & mask; + } while (atomic64_cmpxchg(v, old, new) != old); +} + +#endif /* CONFIG_64BIT */ + static __inline__ int atomic64_add_unless(atomic64_t *v, long long a, long long u) { @@ -265,15 +353,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, return c != u; } -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -#undef __CSG_LOOP - -#else /* __s390x__ */ - -#include - -#endif /* __s390x__ */ +#define atomic64_add(_i, _v) atomic64_add_return(_i, _v) +#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0) +#define atomic64_inc(_v) atomic64_add_return(1, _v) +#define atomic64_inc_return(_v) atomic64_add_return(1, _v) +#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0) +#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v) +#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0) +#define atomic64_dec(_v) atomic64_sub_return(1, _v) +#define atomic64_dec_return(_v) atomic64_sub_return(1, _v) +#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() -- cgit v1.2.3 From bfe3349b516df011dcf6462b0fd748a6f5c2e8af Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:28:35 +0200 Subject: [S390] atomic ops: small cleanups Couple of coding style fixes, replace __inline__ with inline and remove #ifdef __KERNEL_- since the header file isn't exported. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/atomic.h | 41 +++++++++++++++++++---------------------- 1 file changed, 19 insertions(+), 22 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index b491d5e963cf..ae7c8f9f94a5 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -18,8 +18,6 @@ #define ATOMIC_INIT(i) { (i) } -#ifdef __KERNEL__ - #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) #define __CS_LOOP(ptr, op_val, op_string) ({ \ @@ -69,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i) barrier(); } -static __inline__ int atomic_add_return(int i, atomic_t * v) +static inline int atomic_add_return(int i, atomic_t *v) { return __CS_LOOP(v, i, "ar"); } @@ -79,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) #define atomic_inc_return(_v) atomic_add_return(1, _v) #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0) -static __inline__ int atomic_sub_return(int i, atomic_t * v) +static inline int atomic_sub_return(int i, atomic_t *v) { return __CS_LOOP(v, i, "sr"); } @@ -89,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) #define atomic_dec_return(_v) atomic_sub_return(1, _v) #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0) -static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) +static inline void atomic_clear_mask(unsigned long mask, atomic_t *v) { - __CS_LOOP(v, ~mask, "nr"); + __CS_LOOP(v, ~mask, "nr"); } -static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) +static inline void atomic_set_mask(unsigned long mask, atomic_t *v) { - __CS_LOOP(v, mask, "or"); + __CS_LOOP(v, mask, "or"); } #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) +static inline int atomic_cmpxchg(atomic_t *v, int old, int new) { #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) asm volatile( @@ -119,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) return old; } -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_add_unless(atomic_t *v, int a, int u) { int c, old; c = atomic_read(v); @@ -155,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) : "=&d" (old_val), "=&d" (new_val), \ "=Q" (((atomic_t *)(ptr))->counter) \ : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \ - : "cc", "memory" ); \ + : "cc", "memory"); \ new_val; \ }) @@ -173,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) "=m" (((atomic_t *)(ptr))->counter) \ : "a" (ptr), "d" (op_val), \ "m" (((atomic_t *)(ptr))->counter) \ - : "cc", "memory" ); \ + : "cc", "memory"); \ new_val; \ }) @@ -191,29 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i) barrier(); } -static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) +static inline long long atomic64_add_return(long long i, atomic64_t *v) { return __CSG_LOOP(v, i, "agr"); } -static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) +static inline long long atomic64_sub_return(long long i, atomic64_t *v) { return __CSG_LOOP(v, i, "sgr"); } -static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) +static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v) { - __CSG_LOOP(v, ~mask, "ngr"); + __CSG_LOOP(v, ~mask, "ngr"); } -static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) +static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v) { - __CSG_LOOP(v, mask, "ogr"); + __CSG_LOOP(v, mask, "ogr"); } #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -static __inline__ long long atomic64_cmpxchg(atomic64_t *v, +static inline long long atomic64_cmpxchg(atomic64_t *v, long long old, long long new) { #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) @@ -337,8 +335,7 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) #endif /* CONFIG_64BIT */ -static __inline__ int atomic64_add_unless(atomic64_t *v, - long long a, long long u) +static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) { long long c, old; c = atomic64_read(v); @@ -371,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, #define smp_mb__after_atomic_inc() smp_mb() #include -#endif /* __KERNEL__ */ + #endif /* __ARCH_S390_ATOMIC__ */ -- cgit v1.2.3 From f3d1263e81daf2be1353baf1ad3f6e25d135f2c5 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:28:36 +0200 Subject: [S390] hibernation: remove dead file There is no caller of do_after_copyback() anywhere. Remove it. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/power/Makefile | 1 - arch/s390/power/swsusp_64.c | 17 ----------------- 2 files changed, 18 deletions(-) delete mode 100644 arch/s390/power/swsusp_64.c (limited to 'arch/s390') diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile index 973bb45a8fec..ee2f279beff0 100644 --- a/arch/s390/power/Makefile +++ b/arch/s390/power/Makefile @@ -4,5 +4,4 @@ obj-$(CONFIG_HIBERNATION) += suspend.o obj-$(CONFIG_HIBERNATION) += swsusp.o -obj-$(CONFIG_HIBERNATION) += swsusp_64.o obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c deleted file mode 100644 index 9516a517d72f..000000000000 --- a/arch/s390/power/swsusp_64.c +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Support for suspend and resume on s390 - * - * Copyright IBM Corp. 2009 - * - * Author(s): Hans-Joachim Picht - * - */ - -#include -#include - -void do_after_copyback(void) -{ - mb(); -} - -- cgit v1.2.3 From c48ff644f2c86f34f69f382b68b16c6d30854783 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:28:37 +0200 Subject: [S390] hibernation: merge files and move to kernel/ Merge the nearly empty C files and move everything from power/ to kernel/. That way the files are easier to handle. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Makefile | 3 +- arch/s390/kernel/Makefile | 2 +- arch/s390/kernel/suspend.c | 73 ++++++++++++++++ arch/s390/kernel/swsusp_asm64.S | 184 ++++++++++++++++++++++++++++++++++++++++ arch/s390/power/Makefile | 7 -- arch/s390/power/suspend.c | 40 --------- arch/s390/power/swsusp.c | 42 --------- arch/s390/power/swsusp_asm64.S | 184 ---------------------------------------- 8 files changed, 259 insertions(+), 276 deletions(-) create mode 100644 arch/s390/kernel/suspend.c create mode 100644 arch/s390/kernel/swsusp_asm64.S delete mode 100644 arch/s390/power/Makefile delete mode 100644 arch/s390/power/suspend.c delete mode 100644 arch/s390/power/swsusp.c delete mode 100644 arch/s390/power/swsusp_asm64.S (limited to 'arch/s390') diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 0ff387cebf88..fc8fb20e7fc0 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -88,8 +88,7 @@ LDFLAGS_vmlinux := -e start head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ - arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \ - arch/s390/power/ + arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ libs-y += arch/s390/lib/ drivers-y += drivers/s390/ diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 794955305f38..c7be8e10b87e 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -32,7 +32,7 @@ extra-y += head.o init_task.o vmlinux.lds obj-$(CONFIG_MODULES) += s390_ksyms.o module.o obj-$(CONFIG_SMP) += smp.o topology.o - +obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o obj-$(CONFIG_AUDIT) += audit.o compat-obj-$(CONFIG_AUDIT) += compat_audit.o obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \ diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c new file mode 100644 index 000000000000..086bee970cae --- /dev/null +++ b/arch/s390/kernel/suspend.c @@ -0,0 +1,73 @@ +/* + * Suspend support specific for s390. + * + * Copyright IBM Corp. 2009 + * + * Author(s): Hans-Joachim Picht + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * References to section boundaries + */ +extern const void __nosave_begin, __nosave_end; + +/* + * check if given pfn is in the 'nosave' or in the read only NSS section + */ +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; + unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) + >> PAGE_SHIFT; + unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; + unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); + + if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) + return 1; + if (pfn >= stext_pfn && pfn <= eshared_pfn) { + if (ipl_info.type == IPL_TYPE_NSS) + return 1; + } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) + return 1; + return 0; +} + +void save_processor_state(void) +{ + /* swsusp_arch_suspend() actually saves all cpu register contents. + * Machine checks must be disabled since swsusp_arch_suspend() stores + * register contents to their lowcore save areas. That's the same + * place where register contents on machine checks would be saved. + * To avoid register corruption disable machine checks. + * We must also disable machine checks in the new psw mask for + * program checks, since swsusp_arch_suspend() may generate program + * checks. Disabling machine checks for all other new psw masks is + * just paranoia. + */ + local_mcck_disable(); + /* Disable lowcore protection */ + __ctl_clear_bit(0,28); + S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK; + S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK; + S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK; + S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK; +} + +void restore_processor_state(void) +{ + S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK; + S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK; + S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK; + S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK; + /* Enable lowcore protection */ + __ctl_set_bit(0,28); + local_mcck_enable(); +} diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S new file mode 100644 index 000000000000..7cd6b096f0d1 --- /dev/null +++ b/arch/s390/kernel/swsusp_asm64.S @@ -0,0 +1,184 @@ +/* + * S390 64-bit swsusp implementation + * + * Copyright IBM Corp. 2009 + * + * Author(s): Hans-Joachim Picht + * Michael Holzheu + */ + +#include +#include +#include + +/* + * Save register context in absolute 0 lowcore and call swsusp_save() to + * create in-memory kernel image. The context is saved in the designated + * "store status" memory locations (see POP). + * We return from this function twice. The first time during the suspend to + * disk process. The second time via the swsusp_arch_resume() function + * (see below) in the resume process. + * This function runs with disabled interrupts. + */ + .section .text + .align 4 + .globl swsusp_arch_suspend +swsusp_arch_suspend: + stmg %r6,%r15,__SF_GPRS(%r15) + lgr %r1,%r15 + aghi %r15,-STACK_FRAME_OVERHEAD + stg %r1,__SF_BACKCHAIN(%r15) + + /* Deactivate DAT */ + stnsm __SF_EMPTY(%r15),0xfb + + /* Store prefix register on stack */ + stpx __SF_EMPTY(%r15) + + /* Save prefix register contents for lowcore */ + llgf %r4,__SF_EMPTY(%r15) + + /* Get pointer to save area */ + lghi %r1,0x1000 + + /* Store registers */ + mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ + stfpc 0x31c(%r1) /* store fpu control */ + std 0,0x200(%r1) /* store f0 */ + std 1,0x208(%r1) /* store f1 */ + std 2,0x210(%r1) /* store f2 */ + std 3,0x218(%r1) /* store f3 */ + std 4,0x220(%r1) /* store f4 */ + std 5,0x228(%r1) /* store f5 */ + std 6,0x230(%r1) /* store f6 */ + std 7,0x238(%r1) /* store f7 */ + std 8,0x240(%r1) /* store f8 */ + std 9,0x248(%r1) /* store f9 */ + std 10,0x250(%r1) /* store f10 */ + std 11,0x258(%r1) /* store f11 */ + std 12,0x260(%r1) /* store f12 */ + std 13,0x268(%r1) /* store f13 */ + std 14,0x270(%r1) /* store f14 */ + std 15,0x278(%r1) /* store f15 */ + stam %a0,%a15,0x340(%r1) /* store access registers */ + stctg %c0,%c15,0x380(%r1) /* store control registers */ + stmg %r0,%r15,0x280(%r1) /* store general registers */ + + stpt 0x328(%r1) /* store timer */ + stckc 0x330(%r1) /* store clock comparator */ + + /* Activate DAT */ + stosm __SF_EMPTY(%r15),0x04 + + /* Set prefix page to zero */ + xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) + spx __SF_EMPTY(%r15) + + lghi %r2,0 + lghi %r3,2*PAGE_SIZE + lghi %r5,2*PAGE_SIZE +1: mvcle %r2,%r4,0 + jo 1b + + /* Save image */ + brasl %r14,swsusp_save + + /* Restore prefix register and return */ + lghi %r1,0x1000 + spx 0x318(%r1) + lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) + lghi %r2,0 + br %r14 + +/* + * Restore saved memory image to correct place and restore register context. + * Then we return to the function that called swsusp_arch_suspend(). + * swsusp_arch_resume() runs with disabled interrupts. + */ + .globl swsusp_arch_resume +swsusp_arch_resume: + stmg %r6,%r15,__SF_GPRS(%r15) + lgr %r1,%r15 + aghi %r15,-STACK_FRAME_OVERHEAD + stg %r1,__SF_BACKCHAIN(%r15) + +#ifdef CONFIG_SMP + /* Save boot cpu number */ + brasl %r14,smp_get_phys_cpu_id + lgr %r10,%r2 +#endif + /* Deactivate DAT */ + stnsm __SF_EMPTY(%r15),0xfb + + /* Set prefix page to zero */ + xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) + spx __SF_EMPTY(%r15) + + /* Restore saved image */ + larl %r1,restore_pblist + lg %r1,0(%r1) + ltgr %r1,%r1 + jz 2f +0: + lg %r2,8(%r1) + lg %r4,0(%r1) + lghi %r3,PAGE_SIZE + lghi %r5,PAGE_SIZE +1: + mvcle %r2,%r4,0 + jo 1b + lg %r1,16(%r1) + ltgr %r1,%r1 + jnz 0b +2: + ptlb /* flush tlb */ + + /* Restore registers */ + lghi %r13,0x1000 /* %r1 = pointer to save arae */ + + spt 0x328(%r13) /* reprogram timer */ + //sckc 0x330(%r13) /* set clock comparator */ + + lctlg %c0,%c15,0x380(%r13) /* load control registers */ + lam %a0,%a15,0x340(%r13) /* load access registers */ + + lfpc 0x31c(%r13) /* load fpu control */ + ld 0,0x200(%r13) /* load f0 */ + ld 1,0x208(%r13) /* load f1 */ + ld 2,0x210(%r13) /* load f2 */ + ld 3,0x218(%r13) /* load f3 */ + ld 4,0x220(%r13) /* load f4 */ + ld 5,0x228(%r13) /* load f5 */ + ld 6,0x230(%r13) /* load f6 */ + ld 7,0x238(%r13) /* load f7 */ + ld 8,0x240(%r13) /* load f8 */ + ld 9,0x248(%r13) /* load f9 */ + ld 10,0x250(%r13) /* load f10 */ + ld 11,0x258(%r13) /* load f11 */ + ld 12,0x260(%r13) /* load f12 */ + ld 13,0x268(%r13) /* load f13 */ + ld 14,0x270(%r13) /* load f14 */ + ld 15,0x278(%r13) /* load f15 */ + + /* Load old stack */ + lg %r15,0x2f8(%r13) + + /* Pointer to save area */ + lghi %r13,0x1000 + +#ifdef CONFIG_SMP + /* Switch CPUs */ + lgr %r2,%r10 /* get cpu id */ + llgf %r3,0x318(%r13) + brasl %r14,smp_switch_boot_cpu_in_resume +#endif + /* Restore prefix register */ + spx 0x318(%r13) + + /* Activate DAT */ + stosm __SF_EMPTY(%r15),0x04 + + /* Return 0 */ + lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) + lghi %r2,0 + br %r14 diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile deleted file mode 100644 index ee2f279beff0..000000000000 --- a/arch/s390/power/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# -# Makefile for s390 PM support -# - -obj-$(CONFIG_HIBERNATION) += suspend.o -obj-$(CONFIG_HIBERNATION) += swsusp.o -obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c deleted file mode 100644 index b3351eceebbe..000000000000 --- a/arch/s390/power/suspend.c +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Suspend support specific for s390. - * - * Copyright IBM Corp. 2009 - * - * Author(s): Hans-Joachim Picht - */ - -#include -#include -#include -#include -#include -#include - -/* - * References to section boundaries - */ -extern const void __nosave_begin, __nosave_end; - -/* - * check if given pfn is in the 'nosave' or in the read only NSS section - */ -int pfn_is_nosave(unsigned long pfn) -{ - unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; - unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) - >> PAGE_SHIFT; - unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; - unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); - - if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) - return 1; - if (pfn >= stext_pfn && pfn <= eshared_pfn) { - if (ipl_info.type == IPL_TYPE_NSS) - return 1; - } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) - return 1; - return 0; -} diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c deleted file mode 100644 index bd1f5c6b0b8c..000000000000 --- a/arch/s390/power/swsusp.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Support for suspend and resume on s390 - * - * Copyright IBM Corp. 2009 - * - * Author(s): Hans-Joachim Picht - * - */ - -#include - -void save_processor_state(void) -{ - /* swsusp_arch_suspend() actually saves all cpu register contents. - * Machine checks must be disabled since swsusp_arch_suspend() stores - * register contents to their lowcore save areas. That's the same - * place where register contents on machine checks would be saved. - * To avoid register corruption disable machine checks. - * We must also disable machine checks in the new psw mask for - * program checks, since swsusp_arch_suspend() may generate program - * checks. Disabling machine checks for all other new psw masks is - * just paranoia. - */ - local_mcck_disable(); - /* Disable lowcore protection */ - __ctl_clear_bit(0,28); - S390_lowcore.external_new_psw.mask &= ~PSW_MASK_MCHECK; - S390_lowcore.svc_new_psw.mask &= ~PSW_MASK_MCHECK; - S390_lowcore.io_new_psw.mask &= ~PSW_MASK_MCHECK; - S390_lowcore.program_new_psw.mask &= ~PSW_MASK_MCHECK; -} - -void restore_processor_state(void) -{ - S390_lowcore.external_new_psw.mask |= PSW_MASK_MCHECK; - S390_lowcore.svc_new_psw.mask |= PSW_MASK_MCHECK; - S390_lowcore.io_new_psw.mask |= PSW_MASK_MCHECK; - S390_lowcore.program_new_psw.mask |= PSW_MASK_MCHECK; - /* Enable lowcore protection */ - __ctl_set_bit(0,28); - local_mcck_enable(); -} diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S deleted file mode 100644 index b26df5c5933e..000000000000 --- a/arch/s390/power/swsusp_asm64.S +++ /dev/null @@ -1,184 +0,0 @@ -/* - * S390 64-bit swsusp implementation - * - * Copyright IBM Corp. 2009 - * - * Author(s): Hans-Joachim Picht - * Michael Holzheu - */ - -#include -#include -#include - -/* - * Save register context in absolute 0 lowcore and call swsusp_save() to - * create in-memory kernel image. The context is saved in the designated - * "store status" memory locations (see POP). - * We return from this function twice. The first time during the suspend to - * disk process. The second time via the swsusp_arch_resume() function - * (see below) in the resume process. - * This function runs with disabled interrupts. - */ - .section .text - .align 2 - .globl swsusp_arch_suspend -swsusp_arch_suspend: - stmg %r6,%r15,__SF_GPRS(%r15) - lgr %r1,%r15 - aghi %r15,-STACK_FRAME_OVERHEAD - stg %r1,__SF_BACKCHAIN(%r15) - - /* Deactivate DAT */ - stnsm __SF_EMPTY(%r15),0xfb - - /* Store prefix register on stack */ - stpx __SF_EMPTY(%r15) - - /* Save prefix register contents for lowcore */ - llgf %r4,__SF_EMPTY(%r15) - - /* Get pointer to save area */ - lghi %r1,0x1000 - - /* Store registers */ - mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ - stfpc 0x31c(%r1) /* store fpu control */ - std 0,0x200(%r1) /* store f0 */ - std 1,0x208(%r1) /* store f1 */ - std 2,0x210(%r1) /* store f2 */ - std 3,0x218(%r1) /* store f3 */ - std 4,0x220(%r1) /* store f4 */ - std 5,0x228(%r1) /* store f5 */ - std 6,0x230(%r1) /* store f6 */ - std 7,0x238(%r1) /* store f7 */ - std 8,0x240(%r1) /* store f8 */ - std 9,0x248(%r1) /* store f9 */ - std 10,0x250(%r1) /* store f10 */ - std 11,0x258(%r1) /* store f11 */ - std 12,0x260(%r1) /* store f12 */ - std 13,0x268(%r1) /* store f13 */ - std 14,0x270(%r1) /* store f14 */ - std 15,0x278(%r1) /* store f15 */ - stam %a0,%a15,0x340(%r1) /* store access registers */ - stctg %c0,%c15,0x380(%r1) /* store control registers */ - stmg %r0,%r15,0x280(%r1) /* store general registers */ - - stpt 0x328(%r1) /* store timer */ - stckc 0x330(%r1) /* store clock comparator */ - - /* Activate DAT */ - stosm __SF_EMPTY(%r15),0x04 - - /* Set prefix page to zero */ - xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) - spx __SF_EMPTY(%r15) - - lghi %r2,0 - lghi %r3,2*PAGE_SIZE - lghi %r5,2*PAGE_SIZE -1: mvcle %r2,%r4,0 - jo 1b - - /* Save image */ - brasl %r14,swsusp_save - - /* Restore prefix register and return */ - lghi %r1,0x1000 - spx 0x318(%r1) - lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) - lghi %r2,0 - br %r14 - -/* - * Restore saved memory image to correct place and restore register context. - * Then we return to the function that called swsusp_arch_suspend(). - * swsusp_arch_resume() runs with disabled interrupts. - */ - .globl swsusp_arch_resume -swsusp_arch_resume: - stmg %r6,%r15,__SF_GPRS(%r15) - lgr %r1,%r15 - aghi %r15,-STACK_FRAME_OVERHEAD - stg %r1,__SF_BACKCHAIN(%r15) - -#ifdef CONFIG_SMP - /* Save boot cpu number */ - brasl %r14,smp_get_phys_cpu_id - lgr %r10,%r2 -#endif - /* Deactivate DAT */ - stnsm __SF_EMPTY(%r15),0xfb - - /* Set prefix page to zero */ - xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) - spx __SF_EMPTY(%r15) - - /* Restore saved image */ - larl %r1,restore_pblist - lg %r1,0(%r1) - ltgr %r1,%r1 - jz 2f -0: - lg %r2,8(%r1) - lg %r4,0(%r1) - lghi %r3,PAGE_SIZE - lghi %r5,PAGE_SIZE -1: - mvcle %r2,%r4,0 - jo 1b - lg %r1,16(%r1) - ltgr %r1,%r1 - jnz 0b -2: - ptlb /* flush tlb */ - - /* Restore registers */ - lghi %r13,0x1000 /* %r1 = pointer to save arae */ - - spt 0x328(%r13) /* reprogram timer */ - //sckc 0x330(%r13) /* set clock comparator */ - - lctlg %c0,%c15,0x380(%r13) /* load control registers */ - lam %a0,%a15,0x340(%r13) /* load access registers */ - - lfpc 0x31c(%r13) /* load fpu control */ - ld 0,0x200(%r13) /* load f0 */ - ld 1,0x208(%r13) /* load f1 */ - ld 2,0x210(%r13) /* load f2 */ - ld 3,0x218(%r13) /* load f3 */ - ld 4,0x220(%r13) /* load f4 */ - ld 5,0x228(%r13) /* load f5 */ - ld 6,0x230(%r13) /* load f6 */ - ld 7,0x238(%r13) /* load f7 */ - ld 8,0x240(%r13) /* load f8 */ - ld 9,0x248(%r13) /* load f9 */ - ld 10,0x250(%r13) /* load f10 */ - ld 11,0x258(%r13) /* load f11 */ - ld 12,0x260(%r13) /* load f12 */ - ld 13,0x268(%r13) /* load f13 */ - ld 14,0x270(%r13) /* load f14 */ - ld 15,0x278(%r13) /* load f15 */ - - /* Load old stack */ - lg %r15,0x2f8(%r13) - - /* Pointer to save area */ - lghi %r13,0x1000 - -#ifdef CONFIG_SMP - /* Switch CPUs */ - lgr %r2,%r10 /* get cpu id */ - llgf %r3,0x318(%r13) - brasl %r14,smp_switch_boot_cpu_in_resume -#endif - /* Restore prefix register */ - spx 0x318(%r13) - - /* Activate DAT */ - stosm __SF_EMPTY(%r15),0x04 - - /* Return 0 */ - lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) - lghi %r2,0 - br %r14 -- cgit v1.2.3 From 684d2fd48e718e70dad21ef7c528649578147e48 Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Fri, 11 Sep 2009 10:28:40 +0200 Subject: [S390] kernel: Append scpdata to kernel boot command line Append scpdata to the kernel boot command line. If scpdata starts with the equal sign (=), the kernel boot command line is replaced. (For consistency with zIPL and IPL PARM parameters.) To use scpdata for the kernel boot command line, scpdata must consist of ascii characters only. If scpdata contains other characters, scpdata is not appended to the kernel boot command line. In addition, re-IPL is extended for setting scpdata for the next Linux reboot. Signed-off-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ipl.h | 5 +- arch/s390/include/asm/setup.h | 2 +- arch/s390/kernel/early.c | 35 +++++++--- arch/s390/kernel/ipl.c | 157 ++++++++++++++++++++++++++++++++++++++---- 4 files changed, 172 insertions(+), 27 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 1171e6d144a3..5e95d95450b3 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -57,6 +57,8 @@ struct ipl_block_fcp { } __attribute__((packed)); #define DIAG308_VMPARM_SIZE 64 +#define DIAG308_SCPDATA_SIZE (PAGE_SIZE - (sizeof(struct ipl_list_hdr) + \ + offsetof(struct ipl_block_fcp, scp_data))) struct ipl_block_ccw { u8 load_parm[8]; @@ -91,7 +93,8 @@ extern void do_halt(void); extern void do_poff(void); extern void ipl_save_parameters(void); extern void ipl_update_parameters(void); -extern void get_ipl_vmparm(char *); +extern size_t append_ipl_vmparm(char *, size_t); +extern size_t append_ipl_scpdata(char *, size_t); enum { IPL_DEVNO_VALID = 1, diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 38b0fc221ed7..e37478e87286 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -8,7 +8,7 @@ #ifndef _ASM_S390_SETUP_H #define _ASM_S390_SETUP_H -#define COMMAND_LINE_SIZE 1024 +#define COMMAND_LINE_SIZE 4096 #define ARCH_COMMAND_LINE_SIZE 896 diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index cae14c499511..21f3799fe27c 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -81,6 +81,8 @@ asm( " br 14\n" " .size savesys_ipl_nss, .-savesys_ipl_nss\n"); +static __initdata char upper_command_line[COMMAND_LINE_SIZE]; + static noinline __init void create_kernel_nss(void) { unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size; @@ -90,7 +92,6 @@ static noinline __init void create_kernel_nss(void) int response; size_t len; char *savesys_ptr; - char upper_command_line[COMMAND_LINE_SIZE]; char defsys_cmd[DEFSYS_CMD_SIZE]; char savesys_cmd[SAVESYS_CMD_SIZE]; @@ -367,21 +368,35 @@ static __init void rescue_initrd(void) } /* Set up boot command line */ -static void __init setup_boot_command_line(void) +static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) { - char *parm = NULL; + char *parm, *delim; + size_t rc, len; + + len = strlen(boot_command_line); + delim = boot_command_line + len; /* '\0' character position */ + parm = boot_command_line + len + 1; /* append right after '\0' */ + + rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1); + if (rc) { + if (*parm == '=') + memmove(boot_command_line, parm + 1, rc); + else + *delim = ' '; /* replace '\0' with space */ + } +} + +static void __init setup_boot_command_line(void) +{ /* copy arch command line */ strlcpy(boot_command_line, COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); /* append IPL PARM data to the boot command line */ - if (MACHINE_IS_VM) { - parm = boot_command_line + strlen(boot_command_line); - *parm++ = ' '; - get_ipl_vmparm(parm); - if (parm[0] == '=') - memmove(boot_command_line, parm + 1, strlen(parm)); - } + if (MACHINE_IS_VM) + append_to_cmdline(append_ipl_vmparm); + + append_to_cmdline(append_ipl_scpdata); } diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 371a2d88f4ac..04451a5e3c15 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -272,17 +272,18 @@ static ssize_t ipl_type_show(struct kobject *kobj, struct kobj_attribute *attr, static struct kobj_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type); /* VM IPL PARM routines */ -static void reipl_get_ascii_vmparm(char *dest, +size_t reipl_get_ascii_vmparm(char *dest, size_t size, const struct ipl_parameter_block *ipb) { int i; - int len = 0; + size_t len; char has_lowercase = 0; + len = 0; if ((ipb->ipl_info.ccw.vm_flags & DIAG308_VM_FLAGS_VP_VALID) && (ipb->ipl_info.ccw.vm_parm_len > 0)) { - len = ipb->ipl_info.ccw.vm_parm_len; + len = min_t(size_t, size - 1, ipb->ipl_info.ccw.vm_parm_len); memcpy(dest, ipb->ipl_info.ccw.vm_parm, len); /* If at least one character is lowercase, we assume mixed * case; otherwise we convert everything to lowercase. @@ -299,14 +300,20 @@ static void reipl_get_ascii_vmparm(char *dest, EBCASC(dest, len); } dest[len] = 0; + + return len; } -void get_ipl_vmparm(char *dest) +size_t append_ipl_vmparm(char *dest, size_t size) { + size_t rc; + + rc = 0; if (diag308_set_works && (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_CCW)) - reipl_get_ascii_vmparm(dest, &ipl_block); + rc = reipl_get_ascii_vmparm(dest, size, &ipl_block); else dest[0] = 0; + return rc; } static ssize_t ipl_vm_parm_show(struct kobject *kobj, @@ -314,10 +321,56 @@ static ssize_t ipl_vm_parm_show(struct kobject *kobj, { char parm[DIAG308_VMPARM_SIZE + 1] = {}; - get_ipl_vmparm(parm); + append_ipl_vmparm(parm, sizeof(parm)); return sprintf(page, "%s\n", parm); } +static size_t scpdata_length(const char* buf, size_t count) +{ + while (count) { + if (buf[count - 1] != '\0' && buf[count - 1] != ' ') + break; + count--; + } + return count; +} + +size_t reipl_append_ascii_scpdata(char *dest, size_t size, + const struct ipl_parameter_block *ipb) +{ + size_t count; + size_t i; + + count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data, + ipb->ipl_info.fcp.scp_data_len)); + if (!count) + goto out; + + for (i = 0; i < count; i++) + if (!isascii(ipb->ipl_info.fcp.scp_data[i])) { + count = 0; + goto out; + } + + memcpy(dest, ipb->ipl_info.fcp.scp_data, count); +out: + dest[count] = '\0'; + return count; +} + +size_t append_ipl_scpdata(char *dest, size_t len) +{ + size_t rc; + + rc = 0; + if (ipl_block.hdr.pbt == DIAG308_IPL_TYPE_FCP) + rc = reipl_append_ascii_scpdata(dest, len, &ipl_block); + else + dest[0] = 0; + return rc; +} + + static struct kobj_attribute sys_ipl_vm_parm_attr = __ATTR(parm, S_IRUGO, ipl_vm_parm_show, NULL); @@ -553,7 +606,7 @@ static ssize_t reipl_generic_vmparm_show(struct ipl_parameter_block *ipb, { char vmparm[DIAG308_VMPARM_SIZE + 1] = {}; - reipl_get_ascii_vmparm(vmparm, ipb); + reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); return sprintf(page, "%s\n", vmparm); } @@ -626,6 +679,59 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr = /* FCP reipl device attributes */ +static ssize_t reipl_fcp_scpdata_read(struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + size_t size = reipl_block_fcp->ipl_info.fcp.scp_data_len; + void *scp_data = reipl_block_fcp->ipl_info.fcp.scp_data; + + return memory_read_from_buffer(buf, count, &off, scp_data, size); +} + +static ssize_t reipl_fcp_scpdata_write(struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + size_t padding; + size_t scpdata_len; + + if (off < 0) + return -EINVAL; + + if (off >= DIAG308_SCPDATA_SIZE) + return -ENOSPC; + + if (count > DIAG308_SCPDATA_SIZE - off) + count = DIAG308_SCPDATA_SIZE - off; + + memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count); + scpdata_len = off + count; + + if (scpdata_len % 8) { + padding = 8 - (scpdata_len % 8); + memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len, + 0, padding); + scpdata_len += padding; + } + + reipl_block_fcp->ipl_info.fcp.scp_data_len = scpdata_len; + reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN + scpdata_len; + reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN + scpdata_len; + + return count; +} + +static struct bin_attribute sys_reipl_fcp_scp_data_attr = { + .attr = { + .name = "scp_data", + .mode = S_IRUGO | S_IWUSR, + }, + .size = PAGE_SIZE, + .read = reipl_fcp_scpdata_read, + .write = reipl_fcp_scpdata_write, +}; + DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", reipl_block_fcp->ipl_info.fcp.wwpn); DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", @@ -647,7 +753,6 @@ static struct attribute *reipl_fcp_attrs[] = { }; static struct attribute_group reipl_fcp_attr_group = { - .name = IPL_FCP_STR, .attrs = reipl_fcp_attrs, }; @@ -895,6 +1000,7 @@ static struct kobj_attribute reipl_type_attr = __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store); static struct kset *reipl_kset; +static struct kset *reipl_fcp_kset; static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, const enum ipl_method m) @@ -906,7 +1012,7 @@ static void get_ipl_string(char *dst, struct ipl_parameter_block *ipb, reipl_get_ascii_loadparm(loadparm, ipb); reipl_get_ascii_nss_name(nss_name, ipb); - reipl_get_ascii_vmparm(vmparm, ipb); + reipl_get_ascii_vmparm(vmparm, sizeof(vmparm), ipb); switch (m) { case REIPL_METHOD_CCW_VM: @@ -1076,23 +1182,44 @@ static int __init reipl_fcp_init(void) int rc; if (!diag308_set_works) { - if (ipl_info.type == IPL_TYPE_FCP) + if (ipl_info.type == IPL_TYPE_FCP) { make_attrs_ro(reipl_fcp_attrs); - else + sys_reipl_fcp_scp_data_attr.attr.mode = S_IRUGO; + } else return 0; } reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL); if (!reipl_block_fcp) return -ENOMEM; - rc = sysfs_create_group(&reipl_kset->kobj, &reipl_fcp_attr_group); + + /* sysfs: create fcp kset for mixing attr group and bin attrs */ + reipl_fcp_kset = kset_create_and_add(IPL_FCP_STR, NULL, + &reipl_kset->kobj); + if (!reipl_kset) { + free_page((unsigned long) reipl_block_fcp); + return -ENOMEM; + } + + rc = sysfs_create_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); if (rc) { - free_page((unsigned long)reipl_block_fcp); + kset_unregister(reipl_fcp_kset); + free_page((unsigned long) reipl_block_fcp); return rc; } - if (ipl_info.type == IPL_TYPE_FCP) { + + rc = sysfs_create_bin_file(&reipl_fcp_kset->kobj, + &sys_reipl_fcp_scp_data_attr); + if (rc) { + sysfs_remove_group(&reipl_fcp_kset->kobj, &reipl_fcp_attr_group); + kset_unregister(reipl_fcp_kset); + free_page((unsigned long) reipl_block_fcp); + return rc; + } + + if (ipl_info.type == IPL_TYPE_FCP) memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); - } else { + else { reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; -- cgit v1.2.3 From 18d00acfe2f3fc5ee62f679eb2e397ae962fe69b Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Fri, 11 Sep 2009 10:28:41 +0200 Subject: [S390] kernel: Convert upper case scpdata to lower case If the CP SET LOADDEV on the 3215 console has been used to specify SCPdata, all data is converted to upper case letters. When scpdata contains upper case letters only, convert all letters to lower case. Signed-off-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ipl.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 04451a5e3c15..ee57a42e6e93 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -340,19 +340,28 @@ size_t reipl_append_ascii_scpdata(char *dest, size_t size, { size_t count; size_t i; + int has_lowercase; count = min(size - 1, scpdata_length(ipb->ipl_info.fcp.scp_data, ipb->ipl_info.fcp.scp_data_len)); if (!count) goto out; - for (i = 0; i < count; i++) + has_lowercase = 0; + for (i = 0; i < count; i++) { if (!isascii(ipb->ipl_info.fcp.scp_data[i])) { count = 0; goto out; } + if (!has_lowercase && islower(ipb->ipl_info.fcp.scp_data[i])) + has_lowercase = 1; + } - memcpy(dest, ipb->ipl_info.fcp.scp_data, count); + if (has_lowercase) + memcpy(dest, ipb->ipl_info.fcp.scp_data, count); + else + for (i = 0; i < count; i++) + dest[i] = tolower(ipb->ipl_info.fcp.scp_data[i]); out: dest[count] = '\0'; return count; -- cgit v1.2.3 From a8c3cb4955d1a051732ead9ecf8bcffc8e7c039d Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Fri, 11 Sep 2009 10:28:42 +0200 Subject: [S390] move (io|sysc)_restore_trace_psw into .data section The sysc_restore_trace_psw and io_restore_trace_psw storage locations are created in the .text section. When creating and IPLing from a named saved system (NSS), writing to these locations causes a protection exception (because the .text section is mapped as shared read-only in the NSS). To permit write access, move the storage locations into the .data section. The problem occurs only when CONFIG_TRACE_IRQFLAGS is set. The git commmit that has introduced these variables is: 411788ea7fca01ee803af8225ac35807b4d02050 Signed-off-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 16 ++++++++++++++-- arch/s390/kernel/entry64.S | 4 ++++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index c4c80a22bc1f..f78580a74039 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -278,7 +278,8 @@ sysc_return: bnz BASED(sysc_work) # there is work to do (signals etc.) sysc_restore: #ifdef CONFIG_TRACE_IRQFLAGS - la %r1,BASED(sysc_restore_trace_psw) + la %r1,BASED(sysc_restore_trace_psw_addr) + l %r1,0(%r1) lpsw 0(%r1) sysc_restore_trace: TRACE_IRQS_CHECK @@ -289,10 +290,15 @@ sysc_leave: sysc_done: #ifdef CONFIG_TRACE_IRQFLAGS +sysc_restore_trace_psw_addr: + .long sysc_restore_trace_psw + + .section .data,"aw",@progbits .align 8 .globl sysc_restore_trace_psw sysc_restore_trace_psw: .long 0, sysc_restore_trace + 0x80000000 + .previous #endif # @@ -606,7 +612,8 @@ io_return: bnz BASED(io_work) # there is work to do (signals etc.) io_restore: #ifdef CONFIG_TRACE_IRQFLAGS - la %r1,BASED(io_restore_trace_psw) + la %r1,BASED(io_restore_trace_psw_addr) + l %r1,0(%r1) lpsw 0(%r1) io_restore_trace: TRACE_IRQS_CHECK @@ -617,10 +624,15 @@ io_leave: io_done: #ifdef CONFIG_TRACE_IRQFLAGS +io_restore_trace_psw_addr: + .long io_restore_trace_psw + + .section .data,"aw",@progbits .align 8 .globl io_restore_trace_psw io_restore_trace_psw: .long 0, io_restore_trace + 0x80000000 + .previous #endif # diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index f6618e9e15ef..009ca6175db9 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -284,10 +284,12 @@ sysc_leave: sysc_done: #ifdef CONFIG_TRACE_IRQFLAGS + .section .data,"aw",@progbits .align 8 .globl sysc_restore_trace_psw sysc_restore_trace_psw: .quad 0, sysc_restore_trace + .previous #endif # @@ -595,10 +597,12 @@ io_leave: io_done: #ifdef CONFIG_TRACE_IRQFLAGS + .section .data,"aw",@progbits .align 8 .globl io_restore_trace_psw io_restore_trace_psw: .quad 0, io_restore_trace + .previous #endif # -- cgit v1.2.3 From 26803144666bd2155a19392fa58a7d512d9c0962 Mon Sep 17 00:00:00 2001 From: Tim Abbott Date: Fri, 11 Sep 2009 10:28:43 +0200 Subject: [S390] Use macros for .data.page_aligned. .data.page_aligned should not need a separate output section, so as part of this cleanup I moved into the .data output section in the linker scripts in order to eliminate unnecessary references to the section name. Remove the reference to .data.idt, since nothing is put into the .data.idt section on the s390 architecture. It looks like Cyrill Gorcunov posted a patch to remove the .data.idt code on s390 previously: CCing him and the people who acked that patch in case there's a reason it wasn't applied. Signed-off-by: Tim Abbott Cc: Martin Schwidefsky Acked-by: Cyrill Gorcunov Cc: Sam Ravnborg Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/vmlinux.lds.S | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index a53db23ee092..8e023c8c2461 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -60,6 +60,7 @@ SECTIONS } :data .data : { /* Data */ + PAGE_ALIGNED_DATA(PAGE_SIZE) DATA_DATA CONSTRUCTORS } @@ -72,11 +73,6 @@ SECTIONS . = ALIGN(PAGE_SIZE); __nosave_end = .; - . = ALIGN(PAGE_SIZE); - .data.page_aligned : { - *(.data.idt) - } - . = ALIGN(0x100); .data.cacheline_aligned : { *(.data.cacheline_aligned) -- cgit v1.2.3 From 04a95f6df9d7753098729ef1464e38552627af9d Mon Sep 17 00:00:00 2001 From: Nelson Elhage Date: Fri, 11 Sep 2009 10:28:44 +0200 Subject: [S390] clean up linker script using new linker script macros. Note that this patch moves .data.init_task inside _edata. In addition, the alignment of .init.ramfs changes: It is now PAGE_ALIGNED and __initramfs_end is arbitrarily aligned; Previously it was only aligned to a 0x100-byte boundary, and always ended on an even byte. This change results in fewer output sections and in some data being reordered, but should have no functional effect. Signed-off-by: Nelson Elhage Signed-off-by: Tim Abbott Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: linux-s390@vger.kernel.org Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/vmlinux.lds.S | 83 ++++-------------------------------------- 1 file changed, 7 insertions(+), 76 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 8e023c8c2461..7315f9e67e1d 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -52,51 +52,18 @@ SECTIONS . = ALIGN(PAGE_SIZE); _eshared = .; /* End of shareable data */ - . = ALIGN(16); /* Exception table */ - __ex_table : { - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } :data - - .data : { /* Data */ - PAGE_ALIGNED_DATA(PAGE_SIZE) - DATA_DATA - CONSTRUCTORS - } - - . = ALIGN(PAGE_SIZE); - .data_nosave : { - __nosave_begin = .; - *(.data.nosave) - } - . = ALIGN(PAGE_SIZE); - __nosave_end = .; + EXCEPTION_TABLE(16) :data - . = ALIGN(0x100); - .data.cacheline_aligned : { - *(.data.cacheline_aligned) - } + RW_DATA_SECTION(0x100, PAGE_SIZE, THREAD_SIZE) - . = ALIGN(0x100); - .data.read_mostly : { - *(.data.read_mostly) - } _edata = .; /* End of data section */ - . = ALIGN(THREAD_SIZE); /* init_task */ - .data.init_task : { - *(.data.init_task) - } - /* will be freed after init */ . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; - .init.text : { - _sinittext = .; - INIT_TEXT - _einittext = .; - } + + INIT_TEXT_SECTION(PAGE_SIZE) + /* * .exit.text is discarded at runtime, not link time, * to deal with references from __bug_table @@ -107,49 +74,13 @@ SECTIONS /* early.c uses stsi, which requires page aligned data. */ . = ALIGN(PAGE_SIZE); - .init.data : { - INIT_DATA - } - . = ALIGN(0x100); - .init.setup : { - __setup_start = .; - *(.init.setup) - __setup_end = .; - } - .initcall.init : { - __initcall_start = .; - INITCALLS - __initcall_end = .; - } - - .con_initcall.init : { - __con_initcall_start = .; - *(.con_initcall.init) - __con_initcall_end = .; - } - SECURITY_INIT - -#ifdef CONFIG_BLK_DEV_INITRD - . = ALIGN(0x100); - .init.ramfs : { - __initramfs_start = .; - *(.init.ramfs) - . = ALIGN(2); - __initramfs_end = .; - } -#endif + INIT_DATA_SECTION(0x100) PERCPU(PAGE_SIZE) . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ - /* BSS */ - .bss : { - __bss_start = .; - *(.bss) - . = ALIGN(2); - __bss_stop = .; - } + BSS_SECTION(0, 2, 0) _end = . ; -- cgit v1.2.3 From d3135e0c40c9a13ad0c57783f2b9569c4c00bd26 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:28:45 +0200 Subject: [S390] kernel: always keep machine flags in lowcore Eleminate the local variable machine_flags and always change machine flags directly in the lowcore. This avoids confusion about when and why the two variables have to be synchronized. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/early.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 21f3799fe27c..734deeaa7361 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -35,8 +35,6 @@ char kernel_nss_name[NSS_NAME_SIZE + 1]; -static unsigned long machine_flags; - static void __init setup_boot_command_line(void); /* @@ -206,12 +204,9 @@ static noinline __init void detect_machine_type(void) /* Running under KVM? If not we assume z/VM */ if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3)) - machine_flags |= MACHINE_FLAG_KVM; + S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; else - machine_flags |= MACHINE_FLAG_VM; - - /* Store machine flags for setting up lowcore early */ - S390_lowcore.machine_flags = machine_flags; + S390_lowcore.machine_flags |= MACHINE_FLAG_VM; } static __init void early_pgm_check_handler(void) @@ -246,7 +241,7 @@ static noinline __init void setup_hpage(void) facilities = stfl(); if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29))) return; - machine_flags |= MACHINE_FLAG_HPAGE; + S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE; __ctl_set_bit(0, 23); #endif } @@ -264,7 +259,7 @@ static __init void detect_mvpg(void) EX_TABLE(0b,1b) : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0"); if (!rc) - machine_flags |= MACHINE_FLAG_MVPG; + S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG; #endif } @@ -280,7 +275,7 @@ static __init void detect_ieee(void) EX_TABLE(0b,1b) : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc"); if (!rc) - machine_flags |= MACHINE_FLAG_IEEE; + S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE; #endif } @@ -299,7 +294,7 @@ static __init void detect_csp(void) EX_TABLE(0b,1b) : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2"); if (!rc) - machine_flags |= MACHINE_FLAG_CSP; + S390_lowcore.machine_flags |= MACHINE_FLAG_CSP; #endif } @@ -316,7 +311,7 @@ static __init void detect_diag9c(void) EX_TABLE(0b,1b) : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc"); if (!rc) - machine_flags |= MACHINE_FLAG_DIAG9C; + S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C; } static __init void detect_diag44(void) @@ -331,7 +326,7 @@ static __init void detect_diag44(void) EX_TABLE(0b,1b) : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc"); if (!rc) - machine_flags |= MACHINE_FLAG_DIAG44; + S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44; #endif } @@ -342,11 +337,11 @@ static __init void detect_machine_facilities(void) facilities = stfl(); if (facilities & (1 << 28)) - machine_flags |= MACHINE_FLAG_IDTE; + S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE; if (facilities & (1 << 23)) - machine_flags |= MACHINE_FLAG_PFMF; + S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF; if (facilities & (1 << 4)) - machine_flags |= MACHINE_FLAG_MVCOS; + S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS; #endif } @@ -428,7 +423,6 @@ void __init startup_init(void) setup_hpage(); sclp_facilities_detect(); detect_memory_layout(memory_chunk); - S390_lowcore.machine_flags = machine_flags; #ifdef CONFIG_DYNAMIC_FTRACE S390_lowcore.ftrace_func = (unsigned long)ftrace_caller; #endif -- cgit v1.2.3 From 275c340941991a925969c03ec6b900fd135d09dd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 11 Sep 2009 10:28:46 +0200 Subject: [S390] remove unused irq_cpustat_t defintion No need to defined a irq_cpustat_t type if __ARCH_IRQ_STAT is defined. Signed-off-by: Christoph Hellwig Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/hardirq.h | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index 89ec7056da28..498bc3892385 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h @@ -18,13 +18,6 @@ #include #include -/* irq_cpustat_t is unused currently, but could be converted - * into a percpu variable instead of storing softirq_pending - * on the lowcore */ -typedef struct { - unsigned int __softirq_pending; -} irq_cpustat_t; - #define local_softirq_pending() (S390_lowcore.softirq_pending) #define __ARCH_IRQ_STAT -- cgit v1.2.3 From 2395ecd98f028b16a6200eb81108a0f67461d16b Mon Sep 17 00:00:00 2001 From: Vitaliy Gusev Date: Fri, 11 Sep 2009 10:28:48 +0200 Subject: [S390] hypfs: remove useless variable qname Local variable 'qname' in the function hypfs_create_file() really is not used for any purpose. Signed-off-by: Vitaliy Gusev Cc: Michael Holzheu Signed-off-by: Andrew Morton Signed-off-by: Martin Schwidefsky --- arch/s390/hypfs/inode.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 5a805df216bb..e760b5b01411 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -355,11 +355,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb, { struct dentry *dentry; struct inode *inode; - struct qstr qname; - qname.name = name; - qname.len = strlen(name); - qname.hash = full_name_hash(name, qname.len); mutex_lock(&parent->d_inode->i_mutex); dentry = lookup_one_len(name, parent, strlen(name)); if (IS_ERR(dentry)) { -- cgit v1.2.3 From 11af97e18ea8c310d73b59f1361e6f04444e05a8 Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Fri, 11 Sep 2009 10:28:53 +0200 Subject: [S390] kernel: Print an error message if kernel NSS cannot be defined If a named saved system (NSS) cannot be defined or saved, print out an error message with the return code of the underlying z/VM CP command. Signed-off-by: Hendrik Brueckner Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/early.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 734deeaa7361..bf8b4ae7ff2d 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -6,6 +6,9 @@ * Heiko Carstens */ +#define KMSG_COMPONENT "setup" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + #include #include #include @@ -16,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -140,6 +144,8 @@ static noinline __init void create_kernel_nss(void) __cpcmd(defsys_cmd, NULL, 0, &response); if (response != 0) { + pr_err("Defining the Linux kernel NSS failed with rc=%d\n", + response); kernel_nss_name[0] = '\0'; return; } @@ -152,8 +158,11 @@ static noinline __init void create_kernel_nss(void) * max SAVESYS_CMD_SIZE * On error: response contains the numeric portion of cp error message. * for SAVESYS it will be >= 263 + * for missing privilege class, it will be 1 */ - if (response > SAVESYS_CMD_SIZE) { + if (response > SAVESYS_CMD_SIZE || response == 1) { + pr_err("Saving the Linux kernel NSS failed with rc=%d\n", + response); kernel_nss_name[0] = '\0'; return; } -- cgit v1.2.3 From ad2a5d8e0b518f997af126dd737127bdada90a6f Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Fri, 11 Sep 2009 10:28:54 +0200 Subject: [S390] hypfs: Use "%u" instead of "%d" for unsigned ints in snprintf For printing unsigned integers hypfs uses "%d" in snprintf(). This is wrong. With this patch "%u" is used instead. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/hypfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index e760b5b01411..bd9914b89488 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -422,7 +422,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir, char tmp[TMP_SIZE]; struct dentry *dentry; - snprintf(tmp, TMP_SIZE, "%lld\n", (unsigned long long int)value); + snprintf(tmp, TMP_SIZE, "%llu\n", (unsigned long long int)value); buffer = kstrdup(tmp, GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); -- cgit v1.2.3 From c4de0c1a18237c2727dde8ad392e333539b0af3c Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Fri, 11 Sep 2009 10:28:56 +0200 Subject: [S390] kvm: use console_initcall() to initialize s390 virtio console Use a console_initcall() to initialize the s390 virtio console and clean up s390 console initialization in setup.c. Signed-off-by: Hendrik Brueckner Tested-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/kvm_virtio.h | 10 ---------- arch/s390/kernel/setup.c | 10 +++------- drivers/s390/kvm/kvm_virtio.c | 8 ++++++-- 3 files changed, 9 insertions(+), 19 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h index 0503936f101f..acdfdff26611 100644 --- a/arch/s390/include/asm/kvm_virtio.h +++ b/arch/s390/include/asm/kvm_virtio.h @@ -54,14 +54,4 @@ struct kvm_vqconfig { * This is pagesize for historical reasons. */ #define KVM_S390_VIRTIO_RING_ALIGN 4096 -#ifdef __KERNEL__ -/* early virtio console setup */ -#ifdef CONFIG_S390_GUEST -extern void s390_virtio_console_init(void); -#else -static inline void s390_virtio_console_init(void) -{ -} -#endif /* CONFIG_VIRTIO_CONSOLE */ -#endif /* __KERNEL__ */ #endif diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index cbb897bc50bd..9ed13a1ed376 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -156,15 +156,11 @@ __setup("condev=", condev_setup); static void __init set_preferred_console(void) { - if (MACHINE_IS_KVM) { + if (MACHINE_IS_KVM) add_preferred_console("hvc", 0, NULL); - s390_virtio_console_init(); - return; - } - - if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) + else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) add_preferred_console("ttyS", 0, NULL); - if (CONSOLE_IS_3270) + else if (CONSOLE_IS_3270) add_preferred_console("tty3270", 0, NULL); } diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index e38e5d306faf..2930fc763ac5 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -403,10 +403,14 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) return len; } -void __init s390_virtio_console_init(void) +static int __init s390_virtio_console_init(void) { - virtio_cons_early_init(early_put_chars); + if (!MACHINE_IS_KVM) + return -ENODEV; + return virtio_cons_early_init(early_put_chars); } +console_initcall(s390_virtio_console_init); + /* * We do this after core stuff, but before the drivers. -- cgit v1.2.3 From 50aa98bad056a17655864a4d71ebc32d95c629a7 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Fri, 11 Sep 2009 10:28:57 +0200 Subject: [S390] fix recursive locking on page_table_lock Suzuki Poulose reported the following recursive locking bug on s390: Here is the stack trace : (see Appendix I for more info) [<0000000000406ed6>] _spin_lock+0x52/0x94 [<0000000000103bde>] crst_table_free+0x14e/0x1a4 [<00000000001ba684>] __pmd_alloc+0x114/0x1ec [<00000000001be8d0>] handle_mm_fault+0x2cc/0xb80 [<0000000000407d62>] do_dat_exception+0x2b6/0x3a0 [<0000000000114f8c>] sysc_return+0x0/0x8 [<00000200001642b2>] 0x200001642b2 The page_table_lock is already acquired in __pmd_alloc (mm/memory.c) and it tries to populate the pud/pgd with a new pmd allocated. If another thread populates it before we get a chance, we free the pmd using pmd_free(). On s390x, pmd_free(even pud_free ) is #defined to crst_table_free(), which acquires the page_table_lock to protect the crst_table index updates. Hence this ends up in a recursive locking of the page_table_lock. The solution suggested by Dave Hansen is to use a new spin lock in the mmu context to protect the access to the crst_list and the pgtable_list. Reported-by: Suzuki Poulose Cc: Dave Hansen Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/mmu.h | 1 + arch/s390/include/asm/pgalloc.h | 1 + arch/s390/mm/pgtable.c | 24 ++++++++++++------------ arch/s390/mm/vmem.c | 1 + 4 files changed, 15 insertions(+), 12 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 3b59216e6284..03be99919d62 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -2,6 +2,7 @@ #define __MMU_H typedef struct { + spinlock_t list_lock; struct list_head crst_list; struct list_head pgtable_list; unsigned long asce_bits; diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index b2658b9220fe..ddad5903341c 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -140,6 +140,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pgd_t *pgd_alloc(struct mm_struct *mm) { + spin_lock_init(&mm->context.list_lock); INIT_LIST_HEAD(&mm->context.crst_list); INIT_LIST_HEAD(&mm->context.pgtable_list); return (pgd_t *) crst_table_alloc(mm, s390_noexec); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 565667207985..c70215247071 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -78,9 +78,9 @@ unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec) } page->index = page_to_phys(shadow); } - spin_lock(&mm->page_table_lock); + spin_lock(&mm->context.list_lock); list_add(&page->lru, &mm->context.crst_list); - spin_unlock(&mm->page_table_lock); + spin_unlock(&mm->context.list_lock); return (unsigned long *) page_to_phys(page); } @@ -89,9 +89,9 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) unsigned long *shadow = get_shadow_table(table); struct page *page = virt_to_page(table); - spin_lock(&mm->page_table_lock); + spin_lock(&mm->context.list_lock); list_del(&page->lru); - spin_unlock(&mm->page_table_lock); + spin_unlock(&mm->context.list_lock); if (shadow) free_pages((unsigned long) shadow, ALLOC_ORDER); free_pages((unsigned long) table, ALLOC_ORDER); @@ -182,7 +182,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) unsigned long bits; bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; - spin_lock(&mm->page_table_lock); + spin_lock(&mm->context.list_lock); page = NULL; if (!list_empty(&mm->context.pgtable_list)) { page = list_first_entry(&mm->context.pgtable_list, @@ -191,7 +191,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) page = NULL; } if (!page) { - spin_unlock(&mm->page_table_lock); + spin_unlock(&mm->context.list_lock); page = alloc_page(GFP_KERNEL|__GFP_REPEAT); if (!page) return NULL; @@ -202,7 +202,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) clear_table_pgstes(table); else clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); - spin_lock(&mm->page_table_lock); + spin_lock(&mm->context.list_lock); list_add(&page->lru, &mm->context.pgtable_list); } table = (unsigned long *) page_to_phys(page); @@ -213,7 +213,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) page->flags |= bits; if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) list_move_tail(&page->lru, &mm->context.pgtable_list); - spin_unlock(&mm->page_table_lock); + spin_unlock(&mm->context.list_lock); return table; } @@ -225,7 +225,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) bits = (mm->context.noexec || mm->context.has_pgste) ? 3UL : 1UL; bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); page = pfn_to_page(__pa(table) >> PAGE_SHIFT); - spin_lock(&mm->page_table_lock); + spin_lock(&mm->context.list_lock); page->flags ^= bits; if (page->flags & FRAG_MASK) { /* Page now has some free pgtable fragments. */ @@ -234,7 +234,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) } else /* All fragments of the 4K page have been freed. */ list_del(&page->lru); - spin_unlock(&mm->page_table_lock); + spin_unlock(&mm->context.list_lock); if (page) { pgtable_page_dtor(page); __free_page(page); @@ -245,7 +245,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) { struct page *page; - spin_lock(&mm->page_table_lock); + spin_lock(&mm->context.list_lock); /* Free shadow region and segment tables. */ list_for_each_entry(page, &mm->context.crst_list, lru) if (page->index) { @@ -255,7 +255,7 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk) /* "Free" second halves of page tables. */ list_for_each_entry(page, &mm->context.pgtable_list, lru) page->flags &= ~SECOND_HALVES; - spin_unlock(&mm->page_table_lock); + spin_unlock(&mm->context.list_lock); mm->context.noexec = 0; update_mm(mm, tsk); } diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index e4868bfc672f..5f91a38d7592 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -331,6 +331,7 @@ void __init vmem_map_init(void) unsigned long start, end; int i; + spin_lock_init(&init_mm.context.list_lock); INIT_LIST_HEAD(&init_mm.context.crst_list); INIT_LIST_HEAD(&init_mm.context.pgtable_list); init_mm.context.noexec = 0; -- cgit v1.2.3 From 0c88ee5b7523e76e290d558c28cd0be48ffad597 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:28:58 +0200 Subject: [S390] Initialize __LC_THREAD_INFO early. "lockdep: Fix backtraces" reveales a bug in early setup code: when lockdep tries to save a stack backtrace before setup_arch has been called the lowcore pointer for the current thread info pointer isn't initialized yet. However our save stack backtrace code relies on it. If the pointer isn't initialized the saved backtrace will have zero entries. lockdep however relies (correctly) on the fact that that cannot happen. A write access to some random memory region is the result. Fix this by initializing the thread info pointer early. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/head31.S | 1 + arch/s390/kernel/head64.S | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/s390') diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S index 2ced846065b7..602b508cd4c4 100644 --- a/arch/s390/kernel/head31.S +++ b/arch/s390/kernel/head31.S @@ -24,6 +24,7 @@ startup_continue: # Setup stack # l %r15,.Linittu-.LPG1(%r13) + st %r15,__LC_THREAD_INFO # cache thread info in lowcore mvc __LC_CURRENT(4),__TI_task(%r15) ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE st %r15,__LC_KERNEL_STACK # set end of kernel stack diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index 65667b2e65ce..bdcb3f05bcd1 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -92,6 +92,7 @@ startup_continue: # Setup stack # larl %r15,init_thread_union + stg %r15,__LC_THREAD_INFO # cache thread info in lowcore lg %r14,__TI_task(%r15) # cache current in lowcore stg %r14,__LC_CURRENT aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE -- cgit v1.2.3 From f64d04c042193183c6dad98944ae2861b998e8b7 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Fri, 11 Sep 2009 10:28:59 +0200 Subject: [S390] s390dbf: Add description for usage of "%s" in sprintf events Using "%s" in sprintf event functions is dangerous. This patch adds a short description for this issue to the s390 debug feature documentation. Signed-off-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- Documentation/s390/s390dbf.txt | 7 +++++++ arch/s390/include/asm/debug.h | 9 ++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) (limited to 'arch/s390') diff --git a/Documentation/s390/s390dbf.txt b/Documentation/s390/s390dbf.txt index 2d10053dd97e..ae66f9b90a25 100644 --- a/Documentation/s390/s390dbf.txt +++ b/Documentation/s390/s390dbf.txt @@ -495,6 +495,13 @@ and for each vararg a long value. So e.g. for a debug entry with a format string plus two varargs one would need to allocate a (3 * sizeof(long)) byte data area in the debug_register() function. +IMPORTANT: Using "%s" in sprintf event functions is dangerous. You can only +use "%s" in the sprintf event functions, if the memory for the passed string is +available as long as the debug feature exists. The reason behind this is that +due to performance considerations only a pointer to the string is stored in +the debug feature. If you log a string that is freed afterwards, you will get +an OOPS when inspecting the debug feature, because then the debug feature will +access the already freed memory. NOTE: If using the sprintf view do NOT use other event/exception functions than the sprintf-event and -exception functions. diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h index 31ed5686a968..18124b75a7ab 100644 --- a/arch/s390/include/asm/debug.h +++ b/arch/s390/include/asm/debug.h @@ -167,6 +167,10 @@ debug_text_event(debug_info_t* id, int level, const char* txt) return debug_event_common(id,level,txt,strlen(txt)); } +/* + * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are + * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! + */ extern debug_entry_t * debug_sprintf_event(debug_info_t* id,int level,char *string,...) __attribute__ ((format(printf, 3, 4))); @@ -206,7 +210,10 @@ debug_text_exception(debug_info_t* id, int level, const char* txt) return debug_exception_common(id,level,txt,strlen(txt)); } - +/* + * IMPORTANT: Use "%s" in sprintf format strings with care! Only pointers are + * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! + */ extern debug_entry_t * debug_sprintf_exception(debug_info_t* id,int level,char *string,...) __attribute__ ((format(printf, 3, 4))); -- cgit v1.2.3 From 5dd1d2ece0250125fec2dcccbfb8ab9bb2ac020c Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:29:00 +0200 Subject: [S390] use generic scatterlist.h Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/scatterlist.h | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/scatterlist.h b/arch/s390/include/asm/scatterlist.h index 29ec8e28c8df..35d786fe93ae 100644 --- a/arch/s390/include/asm/scatterlist.h +++ b/arch/s390/include/asm/scatterlist.h @@ -1,19 +1 @@ -#ifndef _ASMS390_SCATTERLIST_H -#define _ASMS390_SCATTERLIST_H - -struct scatterlist { -#ifdef CONFIG_DEBUG_SG - unsigned long sg_magic; -#endif - unsigned long page_link; - unsigned int offset; - unsigned int length; -}; - -#ifdef __s390x__ -#define ISA_DMA_THRESHOLD (0xffffffffffffffffUL) -#else -#define ISA_DMA_THRESHOLD (0xffffffffUL) -#endif - -#endif /* _ASMS390X_SCATTERLIST_H */ +#include -- cgit v1.2.3 From 2ddddf3e0a55a7fcd6f240a7416cfcb12dd38b7e Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:29:01 +0200 Subject: [S390] Enable guest page hinting by default. Get rid of the PAGE_STATES config option and enable guest page hinting by default. It can be disabled by specifying "cmma=off" at the command line. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/Kconfig | 7 ------- arch/s390/include/asm/page.h | 4 ---- arch/s390/include/asm/system.h | 4 ---- arch/s390/mm/Makefile | 4 ++-- arch/s390/mm/page-states.c | 6 ++---- 5 files changed, 4 insertions(+), 21 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 47836b945d03..e030e86ff6a3 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -480,13 +480,6 @@ config CMM_IUCV Select this option to enable the special message interface to the cooperative memory management. -config PAGE_STATES - bool "Unused page notification" - help - This enables the notification of unused pages to the - hypervisor. The ESSA instruction is used to do the states - changes between a page that has content and the unused state. - config APPLDATA_BASE bool "Linux - VM Monitor Stream, base infrastructure" depends on PROC_FS diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 3e3594d01f83..5e9daf5d7f22 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -125,8 +125,6 @@ page_get_storage_key(unsigned long addr) return skey; } -#ifdef CONFIG_PAGE_STATES - struct page; void arch_free_page(struct page *page, int order); void arch_alloc_page(struct page *page, int order); @@ -134,8 +132,6 @@ void arch_alloc_page(struct page *page, int order); #define HAVE_ARCH_FREE_PAGE #define HAVE_ARCH_ALLOC_PAGE -#endif - #endif /* !__ASSEMBLY__ */ #define __PAGE_OFFSET 0x0UL diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 4fb83c1cdb77..379661d2f81a 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -109,11 +109,7 @@ extern void pfault_fini(void); #define pfault_fini() do { } while (0) #endif /* CONFIG_PFAULT */ -#ifdef CONFIG_PAGE_STATES extern void cmma_init(void); -#else -static inline void cmma_init(void) { } -#endif #define finish_arch_switch(prev) do { \ set_fs(current->thread.mm_segment); \ diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index db05661ac895..eec054484419 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile @@ -2,7 +2,7 @@ # Makefile for the linux s390-specific parts of the memory manager. # -obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o +obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \ + page-states.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o -obj-$(CONFIG_PAGE_STATES) += page-states.o diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index fc0ad73ffd90..f92ec203ad92 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -1,6 +1,4 @@ /* - * arch/s390/mm/page-states.c - * * Copyright IBM Corp. 2008 * * Guest page hinting for unused pages. @@ -17,11 +15,12 @@ #define ESSA_SET_STABLE 1 #define ESSA_SET_UNUSED 2 -static int cmma_flag; +static int cmma_flag = 1; static int __init cmma(char *str) { char *parm; + parm = strstrip(str); if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) { cmma_flag = 1; @@ -32,7 +31,6 @@ static int __init cmma(char *str) return 1; return 0; } - __setup("cmma=", cmma); void __init cmma_init(void) -- cgit v1.2.3 From 4bb5e07b68565d7983108993aa23eccf5f1b35fe Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:29:03 +0200 Subject: [S390] Limit cpu detection to 256 physical cpus. Saves us more than 65k pointless IPIs. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cpu.h | 6 ++++++ arch/s390/kernel/head.S | 1 + arch/s390/kernel/head64.S | 8 +++----- arch/s390/kernel/smp.c | 5 +++-- 4 files changed, 13 insertions(+), 7 deletions(-) create mode 100644 arch/s390/include/asm/cpu.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h new file mode 100644 index 000000000000..702ad46841cf --- /dev/null +++ b/arch/s390/include/asm/cpu.h @@ -0,0 +1,6 @@ +#ifndef _ASM_S390_CPU_H +#define _ASM_S390_CPU_H + +#define MAX_CPU_ADDRESS 255 + +#endif /* _ASM_S390_CPU_H */ diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index ec6882348520..c52b4f7742fa 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -27,6 +27,7 @@ #include #include #include +#include #ifdef CONFIG_64BIT #define ARCH_OFFSET 4 diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S index bdcb3f05bcd1..6a250808092b 100644 --- a/arch/s390/kernel/head64.S +++ b/arch/s390/kernel/head64.S @@ -62,9 +62,9 @@ startup_continue: clr %r11,%r12 je 5f # no more space in prefix array 4: - ahi %r8,1 # next cpu (r8 += 1) - cl %r8,.Llast_cpu-.LPG1(%r13) # is last possible cpu ? - jl 1b # jump if not last cpu + ahi %r8,1 # next cpu (r8 += 1) + chi %r8,MAX_CPU_ADDRESS # is last possible cpu ? + jle 1b # jump if not last cpu 5: lhi %r1,2 # mode 2 = esame (dump) j 6f @@ -130,8 +130,6 @@ startup_continue: #ifdef CONFIG_ZFCPDUMP .Lcurrent_cpu: .long 0x0 -.Llast_cpu: - .long 0x0000ffff .Lpref_arr_ptr: .long zfcpdump_prefix_array #endif /* CONFIG_ZFCPDUMP */ diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index be2cae083406..9261495ca2c3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -49,6 +49,7 @@ #include #include #include +#include #include "entry.h" static struct task_struct *current_set[NR_CPUS]; @@ -300,7 +301,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) logical_cpu = cpumask_first(&avail); if (logical_cpu >= nr_cpu_ids) return 0; - for (cpu_id = 0; cpu_id <= 65535; cpu_id++) { + for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) { if (cpu_known(cpu_id)) continue; __cpu_logical_map[logical_cpu] = cpu_id; @@ -379,7 +380,7 @@ static void __init smp_detect_cpus(void) /* Use sigp detection algorithm if sclp doesn't work. */ if (sclp_get_cpu_info(info)) { smp_use_sigp_detection = 1; - for (cpu = 0; cpu <= 65535; cpu++) { + for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) { if (cpu == boot_cpu_addr) continue; __cpu_logical_map[CPU_INIT_NO] = cpu; -- cgit v1.2.3 From e86a6ed63f46fe8fb555fda531084bca3ef62fd7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:29:04 +0200 Subject: [S390] Get rid of cpuid.h header file. Merge cpuid.h header file into cpu.h. While at it convert from typedef to struct declaration and also convert cio code to use proper lowcore structure instead of casts. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cpu.h | 20 ++++++++++++++++++++ arch/s390/include/asm/cpuid.h | 25 ------------------------- arch/s390/include/asm/kvm_host.h | 6 +++--- arch/s390/include/asm/lowcore.h | 6 +++--- arch/s390/include/asm/processor.h | 4 ++-- drivers/s390/char/zcore.c | 2 +- drivers/s390/cio/css.c | 4 ++-- 7 files changed, 31 insertions(+), 36 deletions(-) delete mode 100644 arch/s390/include/asm/cpuid.h (limited to 'arch/s390') diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h index 702ad46841cf..471234b90574 100644 --- a/arch/s390/include/asm/cpu.h +++ b/arch/s390/include/asm/cpu.h @@ -1,6 +1,26 @@ +/* + * Copyright IBM Corp. 2000,2009 + * Author(s): Hartmut Penner , + * Martin Schwidefsky , + * Christian Ehrhardt , + */ + #ifndef _ASM_S390_CPU_H #define _ASM_S390_CPU_H #define MAX_CPU_ADDRESS 255 +#ifndef __ASSEMBLY__ + +#include + +struct cpuid +{ + unsigned int version : 8; + unsigned int ident : 24; + unsigned int machine : 16; + unsigned int unused : 16; +} __packed; + +#endif /* __ASSEMBLY__ */ #endif /* _ASM_S390_CPU_H */ diff --git a/arch/s390/include/asm/cpuid.h b/arch/s390/include/asm/cpuid.h deleted file mode 100644 index 07836a2e5222..000000000000 --- a/arch/s390/include/asm/cpuid.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright IBM Corp. 2000,2009 - * Author(s): Hartmut Penner , - * Martin Schwidefsky - * Christian Ehrhardt - */ - -#ifndef _ASM_S390_CPUID_H_ -#define _ASM_S390_CPUID_H_ - -/* - * CPU type and hardware bug flags. Kept separately for each CPU. - * Members of this structure are referenced in head.S, so think twice - * before touching them. [mj] - */ - -typedef struct -{ - unsigned int version : 8; - unsigned int ident : 24; - unsigned int machine : 16; - unsigned int unused : 16; -} __attribute__ ((packed)) cpuid_t; - -#endif /* _ASM_S390_CPUID_H_ */ diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 1cd02f6073a0..698988f69403 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -17,7 +17,7 @@ #include #include #include -#include +#include #define KVM_MAX_VCPUS 64 #define KVM_MEMORY_SLOTS 32 @@ -217,8 +217,8 @@ struct kvm_vcpu_arch { struct hrtimer ckc_timer; struct tasklet_struct tasklet; union { - cpuid_t cpu_id; - u64 stidp_data; + struct cpuid cpu_id; + u64 stidp_data; }; }; diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 5046ad6b7a63..6bc9426a6fbf 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -132,7 +132,7 @@ #ifndef __ASSEMBLY__ -#include +#include #include #include @@ -275,7 +275,7 @@ struct _lowcore __u32 user_exec_asce; /* 0x02ac */ /* SMP info area */ - cpuid_t cpu_id; /* 0x02b0 */ + struct cpuid cpu_id; /* 0x02b0 */ __u32 cpu_nr; /* 0x02b8 */ __u32 softirq_pending; /* 0x02bc */ __u32 percpu_offset; /* 0x02c0 */ @@ -380,7 +380,7 @@ struct _lowcore __u64 user_exec_asce; /* 0x0318 */ /* SMP info area */ - cpuid_t cpu_id; /* 0x0320 */ + struct cpuid cpu_id; /* 0x0320 */ __u32 cpu_nr; /* 0x0328 */ __u32 softirq_pending; /* 0x032c */ __u64 percpu_offset; /* 0x0330 */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index c139fa7b8e89..cf8eed3fa779 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -14,7 +14,7 @@ #define __ASM_S390_PROCESSOR_H #include -#include +#include #include #include #include @@ -26,7 +26,7 @@ */ #define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; }) -static inline void get_cpu_id(cpuid_t *ptr) +static inline void get_cpu_id(struct cpuid *ptr) { asm volatile("stidp 0(%1)" : "=m" (*ptr) : "a" (ptr)); } diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 1bbae433fbd8..c431198bdbc4 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -275,7 +275,7 @@ struct zcore_header { u32 num_pages; u32 pad1; u64 tod; - cpuid_t cpu_id; + struct cpuid cpu_id; u32 arch_id; u32 volnr; u32 build_arch; diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 45858f3b7318..e995123fd805 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -657,8 +657,8 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high) css->global_pgid.pgid_high.cpu_addr = 0; #endif } - css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; - css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; + css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident; + css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine; css->global_pgid.tod_high = tod_high; } -- cgit v1.2.3 From 5c0b912e755caaad555eb6feefdb1124462d8f37 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:29:05 +0200 Subject: [S390] Remove smp_cpu_not_running. smp_cpu_not_running() and cpu_stopped() are doing the same. Remove one and also get rid of the last hard_smp_processor_id() leftover. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/smp.h | 32 +------------------------------- arch/s390/kernel/smp.c | 34 +++++++++++++++++++--------------- 2 files changed, 20 insertions(+), 46 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index 72137bc907ac..c991fe6473c9 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -51,32 +51,7 @@ extern void machine_power_off_smp(void); #define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ #define raw_smp_processor_id() (S390_lowcore.cpu_nr) - -/* - * returns 1 if cpu is in stopped/check stopped state or not operational - * returns 0 otherwise - */ -static inline int -smp_cpu_not_running(int cpu) -{ - __u32 status; - - switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { - case sigp_order_code_accepted: - case sigp_status_stored: - /* Check for stopped and check stop state */ - if (status & 0x50) - return 1; - break; - case sigp_not_operational: - return 1; - default: - break; - } - return 0; -} - -#define cpu_logical_map(cpu) (cpu) +#define cpu_logical_map(cpu) (cpu) extern int __cpu_disable (void); extern void __cpu_die (unsigned int cpu); @@ -91,11 +66,6 @@ extern void arch_send_call_function_ipi(cpumask_t mask); #endif -#ifndef CONFIG_SMP -#define hard_smp_processor_id() 0 -#define smp_cpu_not_running(cpu) 1 -#endif - #ifdef CONFIG_HOTPLUG_CPU extern int smp_rescan_cpus(void); #else diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 9261495ca2c3..56c16876b919 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -71,6 +71,23 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); static void smp_ext_bitcall(int, ec_bit_sig); +static int cpu_stopped(int cpu) +{ + __u32 status; + + switch (signal_processor_ps(&status, 0, cpu, sigp_sense)) { + case sigp_order_code_accepted: + case sigp_status_stored: + /* Check for stopped and check stop state */ + if (status & 0x50) + return 1; + break; + default: + break; + } + return 0; +} + void smp_send_stop(void) { int cpu, rc; @@ -87,7 +104,7 @@ void smp_send_stop(void) rc = signal_processor(cpu, sigp_stop); } while (rc == sigp_busy); - while (!smp_cpu_not_running(cpu)) + while (!cpu_stopped(cpu)) cpu_relax(); } } @@ -270,19 +287,6 @@ static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } #endif /* CONFIG_ZFCPDUMP */ -static int cpu_stopped(int cpu) -{ - __u32 status; - - /* Check for stopped state */ - if (signal_processor_ps(&status, 0, cpu, sigp_sense) == - sigp_status_stored) { - if (status & 0x40) - return 1; - } - return 0; -} - static int cpu_known(int cpu_id) { int cpu; @@ -636,7 +640,7 @@ int __cpu_disable(void) void __cpu_die(unsigned int cpu) { /* Wait until target cpu is down */ - while (!smp_cpu_not_running(cpu)) + while (!cpu_stopped(cpu)) cpu_relax(); smp_free_lowcore(cpu); pr_info("Processor %d stopped\n", cpu); -- cgit v1.2.3 From bde69af2ab696eebfac9583ea1e8a46b571e317f Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 11 Sep 2009 10:29:06 +0200 Subject: [S390] Wire up page fault events for software perf counters. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/fault.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index e5e119fe03b2..1abbadd497e1 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -10,6 +10,7 @@ * Copyright (C) 1995 Linus Torvalds */ +#include #include #include #include @@ -305,7 +306,7 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write) * interrupts again and then search the VMAs */ local_irq_enable(); - + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); down_read(&mm->mmap_sem); si_code = SEGV_MAPERR; @@ -363,11 +364,15 @@ good_area: } BUG(); } - if (fault & VM_FAULT_MAJOR) + if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; - else + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + regs, address); + } else { tsk->min_flt++; - + perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + regs, address); + } up_read(&mm->mmap_sem); /* * The instruction that caused the program check will -- cgit v1.2.3