From bb664f49f8be17d7b8bf9821144e8a53d7fcfe8a Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Wed, 17 Jun 2009 21:54:47 +0000 Subject: af_iucv: Change if condition in sendmsg() for more readability Change the if condition to exit sendmsg() if the socket in not connected. Signed-off-by: Hendrik Brueckner Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/iucv/af_iucv.c | 163 ++++++++++++++++++++++++++--------------------------- 1 file changed, 81 insertions(+), 82 deletions(-) (limited to 'net') diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a9b3a6f9ea95..42b7198a6883 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -747,108 +747,107 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, goto out; } - if (sk->sk_state == IUCV_CONNECTED) { - /* initialize defaults */ - cmsg_done = 0; /* check for duplicate headers */ - txmsg.class = 0; - - /* iterate over control messages */ - for (cmsg = CMSG_FIRSTHDR(msg); cmsg; - cmsg = CMSG_NXTHDR(msg, cmsg)) { - - if (!CMSG_OK(msg, cmsg)) { - err = -EINVAL; - goto out; - } + /* Return if the socket is not in connected state */ + if (sk->sk_state != IUCV_CONNECTED) { + err = -ENOTCONN; + goto out; + } - if (cmsg->cmsg_level != SOL_IUCV) - continue; + /* initialize defaults */ + cmsg_done = 0; /* check for duplicate headers */ + txmsg.class = 0; - if (cmsg->cmsg_type & cmsg_done) { - err = -EINVAL; - goto out; - } - cmsg_done |= cmsg->cmsg_type; + /* iterate over control messages */ + for (cmsg = CMSG_FIRSTHDR(msg); cmsg; + cmsg = CMSG_NXTHDR(msg, cmsg)) { - switch (cmsg->cmsg_type) { - case SCM_IUCV_TRGCLS: - if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { - err = -EINVAL; - goto out; - } + if (!CMSG_OK(msg, cmsg)) { + err = -EINVAL; + goto out; + } - /* set iucv message target class */ - memcpy(&txmsg.class, - (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); + if (cmsg->cmsg_level != SOL_IUCV) + continue; - break; + if (cmsg->cmsg_type & cmsg_done) { + err = -EINVAL; + goto out; + } + cmsg_done |= cmsg->cmsg_type; - default: + switch (cmsg->cmsg_type) { + case SCM_IUCV_TRGCLS: + if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { err = -EINVAL; goto out; - break; } - } - /* allocate one skb for each iucv message: - * this is fine for SOCK_SEQPACKET (unless we want to support - * segmented records using the MSG_EOR flag), but - * for SOCK_STREAM we might want to improve it in future */ - if (!(skb = sock_alloc_send_skb(sk, len, - msg->msg_flags & MSG_DONTWAIT, - &err))) - goto out; + /* set iucv message target class */ + memcpy(&txmsg.class, + (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); - if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { - err = -EFAULT; - goto fail; + break; + + default: + err = -EINVAL; + goto out; + break; } + } - /* increment and save iucv message tag for msg_completion cbk */ - txmsg.tag = iucv->send_tag++; - memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); - skb_queue_tail(&iucv->send_skb_q, skb); + /* allocate one skb for each iucv message: + * this is fine for SOCK_SEQPACKET (unless we want to support + * segmented records using the MSG_EOR flag), but + * for SOCK_STREAM we might want to improve it in future */ + skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, + &err); + if (!skb) + goto out; + if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { + err = -EFAULT; + goto fail; + } - if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) - && skb->len <= 7) { - err = iucv_send_iprm(iucv->path, &txmsg, skb); + /* increment and save iucv message tag for msg_completion cbk */ + txmsg.tag = iucv->send_tag++; + memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); + skb_queue_tail(&iucv->send_skb_q, skb); - /* on success: there is no message_complete callback - * for an IPRMDATA msg; remove skb from send queue */ - if (err == 0) { - skb_unlink(skb, &iucv->send_skb_q); - kfree_skb(skb); - } + if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) + && skb->len <= 7) { + err = iucv_send_iprm(iucv->path, &txmsg, skb); - /* this error should never happen since the - * IUCV_IPRMDATA path flag is set... sever path */ - if (err == 0x15) { - iucv_path_sever(iucv->path, NULL); - skb_unlink(skb, &iucv->send_skb_q); - err = -EPIPE; - goto fail; - } - } else - err = iucv_message_send(iucv->path, &txmsg, 0, 0, - (void *) skb->data, skb->len); - if (err) { - if (err == 3) { - user_id[8] = 0; - memcpy(user_id, iucv->dst_user_id, 8); - appl_id[8] = 0; - memcpy(appl_id, iucv->dst_name, 8); - pr_err("Application %s on z/VM guest %s" - " exceeds message limit\n", - user_id, appl_id); - } + /* on success: there is no message_complete callback + * for an IPRMDATA msg; remove skb from send queue */ + if (err == 0) { + skb_unlink(skb, &iucv->send_skb_q); + kfree_skb(skb); + } + + /* this error should never happen since the + * IUCV_IPRMDATA path flag is set... sever path */ + if (err == 0x15) { + iucv_path_sever(iucv->path, NULL); skb_unlink(skb, &iucv->send_skb_q); err = -EPIPE; goto fail; } - - } else { - err = -ENOTCONN; - goto out; + } else + err = iucv_message_send(iucv->path, &txmsg, 0, 0, + (void *) skb->data, skb->len); + if (err) { + if (err == 3) { + user_id[8] = 0; + memcpy(user_id, iucv->dst_user_id, 8); + appl_id[8] = 0; + memcpy(appl_id, iucv->dst_name, 8); + pr_err("Application %s on z/VM guest %s" + " exceeds message limit\n", + appl_id, user_id); + } + skb_unlink(skb, &iucv->send_skb_q); + err = -EPIPE; + goto fail; } release_sock(sk); -- cgit v1.2.3 From 0ea920d211e0a870871965418923b08da2025b4a Mon Sep 17 00:00:00 2001 From: Hendrik Brueckner Date: Wed, 17 Jun 2009 21:54:48 +0000 Subject: af_iucv: Return -EAGAIN if iucv msg limit is exceeded If the iucv message limit for a communication path is exceeded, sendmsg() returns -EAGAIN instead of -EPIPE. The calling application can then handle this error situtation, e.g. to try again after waiting some time. For blocking sockets, sendmsg() waits up to the socket timeout before returning -EAGAIN. For the new wait condition, a macro has been introduced and the iucv_sock_wait_state() has been refactored to this macro. Signed-off-by: Hendrik Brueckner Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- include/net/iucv/af_iucv.h | 2 - net/iucv/af_iucv.c | 144 ++++++++++++++++++++++++++++++++------------- 2 files changed, 103 insertions(+), 43 deletions(-) (limited to 'net') diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index 21ee49ffcbaf..f82a1e877372 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -94,8 +94,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, poll_table *wait); void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); -int iucv_sock_wait_state(struct sock *sk, int state, int state2, - unsigned long timeo); int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo); void iucv_accept_enqueue(struct sock *parent, struct sock *sk); void iucv_accept_unlink(struct sock *sk); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 42b7198a6883..abadb4a846cf 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -53,6 +53,38 @@ static const u8 iprm_shutdown[8] = #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ #define CB_TRGCLS_LEN (TRGCLS_SIZE) +#define __iucv_sock_wait(sk, condition, timeo, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + long __timeo = timeo; \ + ret = 0; \ + while (!(condition)) { \ + prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \ + if (!__timeo) { \ + ret = -EAGAIN; \ + break; \ + } \ + if (signal_pending(current)) { \ + ret = sock_intr_errno(__timeo); \ + break; \ + } \ + release_sock(sk); \ + __timeo = schedule_timeout(__timeo); \ + lock_sock(sk); \ + ret = sock_error(sk); \ + if (ret) \ + break; \ + } \ + finish_wait(sk->sk_sleep, &__wait); \ +} while (0) + +#define iucv_sock_wait(sk, condition, timeo) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __iucv_sock_wait(sk, condition, timeo, __ret); \ + __ret; \ +}) static void iucv_sock_kill(struct sock *sk); static void iucv_sock_close(struct sock *sk); @@ -121,6 +153,48 @@ static inline size_t iucv_msg_length(struct iucv_message *msg) return msg->length; } +/** + * iucv_sock_in_state() - check for specific states + * @sk: sock structure + * @state: first iucv sk state + * @state: second iucv sk state + * + * Returns true if the socket in either in the first or second state. + */ +static int iucv_sock_in_state(struct sock *sk, int state, int state2) +{ + return (sk->sk_state == state || sk->sk_state == state2); +} + +/** + * iucv_below_msglim() - function to check if messages can be sent + * @sk: sock structure + * + * Returns true if the send queue length is lower than the message limit. + * Always returns true if the socket is not connected (no iucv path for + * checking the message limit). + */ +static inline int iucv_below_msglim(struct sock *sk) +{ + struct iucv_sock *iucv = iucv_sk(sk); + + if (sk->sk_state != IUCV_CONNECTED) + return 1; + return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); +} + +/** + * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit + */ +static void iucv_sock_wake_msglim(struct sock *sk) +{ + read_lock(&sk->sk_callback_lock); + if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) + wake_up_interruptible_all(sk->sk_sleep); + sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); + read_unlock(&sk->sk_callback_lock); +} + /* Timers */ static void iucv_sock_timeout(unsigned long arg) { @@ -212,7 +286,9 @@ static void iucv_sock_close(struct sock *sk) timeo = sk->sk_lingertime; else timeo = IUCV_DISCONN_TIMEOUT; - err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo); + err = iucv_sock_wait(sk, + iucv_sock_in_state(sk, IUCV_CLOSED, 0), + timeo); } case IUCV_CLOSING: /* fall through */ @@ -393,39 +469,6 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) return NULL; } -int iucv_sock_wait_state(struct sock *sk, int state, int state2, - unsigned long timeo) -{ - DECLARE_WAITQUEUE(wait, current); - int err = 0; - - add_wait_queue(sk->sk_sleep, &wait); - while (sk->sk_state != state && sk->sk_state != state2) { - set_current_state(TASK_INTERRUPTIBLE); - - if (!timeo) { - err = -EAGAIN; - break; - } - - if (signal_pending(current)) { - err = sock_intr_errno(timeo); - break; - } - - release_sock(sk); - timeo = schedule_timeout(timeo); - lock_sock(sk); - - err = sock_error(sk); - if (err) - break; - } - set_current_state(TASK_RUNNING); - remove_wait_queue(sk->sk_sleep, &wait); - return err; -} - /* Bind an unbound socket */ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) @@ -570,8 +613,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr, } if (sk->sk_state != IUCV_CONNECTED) { - err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN, - sock_sndtimeo(sk, flags & O_NONBLOCK)); + err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED, + IUCV_DISCONN), + sock_sndtimeo(sk, flags & O_NONBLOCK)); } if (sk->sk_state == IUCV_DISCONN) { @@ -725,9 +769,11 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct iucv_message txmsg; struct cmsghdr *cmsg; int cmsg_done; + long timeo; char user_id[9]; char appl_id[9]; int err; + int noblock = msg->msg_flags & MSG_DONTWAIT; err = sock_error(sk); if (err) @@ -799,8 +845,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, * this is fine for SOCK_SEQPACKET (unless we want to support * segmented records using the MSG_EOR flag), but * for SOCK_STREAM we might want to improve it in future */ - skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT, - &err); + skb = sock_alloc_send_skb(sk, len, noblock, &err); if (!skb) goto out; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { @@ -808,6 +853,18 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, goto fail; } + /* wait if outstanding messages for iucv path has reached */ + timeo = sock_sndtimeo(sk, noblock); + err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); + if (err) + goto fail; + + /* return -ECONNRESET if the socket is no longer connected */ + if (sk->sk_state != IUCV_CONNECTED) { + err = -ECONNRESET; + goto fail; + } + /* increment and save iucv message tag for msg_completion cbk */ txmsg.tag = iucv->send_tag++; memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); @@ -844,9 +901,10 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, pr_err("Application %s on z/VM guest %s" " exceeds message limit\n", appl_id, user_id); - } + err = -EAGAIN; + } else + err = -EPIPE; skb_unlink(skb, &iucv->send_skb_q); - err = -EPIPE; goto fail; } @@ -1463,7 +1521,11 @@ static void iucv_callback_txdone(struct iucv_path *path, spin_unlock_irqrestore(&list->lock, flags); - kfree_skb(this); + if (this) { + kfree_skb(this); + /* wake up any process waiting for sending */ + iucv_sock_wake_msglim(sk); + } } BUG_ON(!this); -- cgit v1.2.3 From 25502bda07b4cd4f1d9942cca2df446c4a0f167c Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Thu, 18 Jun 2009 04:16:46 +0000 Subject: ieee802154: use standard routine for printing dumps Use print_hex_dump_bytes instead of self-written dumping function for outputting packet dumps. Signed-off-by: Dmitry Eremin-Solenikov Signed-off-by: David S. Miller --- net/ieee802154/af_ieee802154.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'net') diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c index 882a927cefae..3bb6bdb1dac1 100644 --- a/net/ieee802154/af_ieee802154.c +++ b/net/ieee802154/af_ieee802154.c @@ -39,14 +39,6 @@ #include "af802154.h" -#define DBG_DUMP(data, len) { \ - int i; \ - pr_debug("function: %s: data: len %d:\n", __func__, len); \ - for (i = 0; i < len; i++) {\ - pr_debug("%02x: %02x\n", i, (data)[i]); \ - } \ -} - /* * Utility function for families */ @@ -302,10 +294,12 @@ static struct net_proto_family ieee802154_family_ops = { static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { - DBG_DUMP(skb->data, skb->len); if (!netif_running(dev)) return -ENODEV; pr_debug("got frame, type %d, dev %p\n", dev->type, dev); +#ifdef DEBUG + print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len); +#endif if (!net_eq(dev_net(dev), &init_net)) goto drop; -- cgit v1.2.3 From 7fa20a7f60df0afceafbb8197b5d110507f42c72 Mon Sep 17 00:00:00 2001 From: Alan Jenkins Date: Tue, 16 Jun 2009 14:53:24 +0100 Subject: rfkill: rfkill_set_block() when suspended nitpick If we return after fiddling with the state, userspace will see the wrong state and rfkill_set_sw_state() won't work until the next call to rfkill_set_block(). At the moment rfkill_set_block() will always be called from rfkill_resume(), but this will change in future. Also, presumably the point of this test is to avoid bothering devices which may be suspended. If we don't want to call set_block(), we probably don't want to call query() either :-). Signed-off-by: Alan Jenkins Signed-off-by: John W. Linville --- net/rfkill/core.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 4e68ab439d5d..868d79f8ac1d 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -270,6 +270,9 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked) unsigned long flags; int err; + if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) + return; + /* * Some platforms (...!) generate input events which affect the * _hard_ kill state -- whenever something tries to change the @@ -292,9 +295,6 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked) rfkill->state |= RFKILL_BLOCK_SW_SETCALL; spin_unlock_irqrestore(&rfkill->lock, flags); - if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) - return; - err = rfkill->ops->set_block(rfkill->data, blocked); spin_lock_irqsave(&rfkill->lock, flags); -- cgit v1.2.3 From 06d5caf47ef4fbd9efdceae33293c42778cb7b0c Mon Sep 17 00:00:00 2001 From: Alan Jenkins Date: Tue, 16 Jun 2009 15:39:51 +0100 Subject: rfkill: don't restore software blocked state on persistent devices The setting of the "persistent" flag is also made more explicit using a new rfkill_init_sw_state() function, instead of special-casing rfkill_set_sw_state() when it is called before registration. Suspend is a bit of a corner case so we try to get away without adding another hack to rfkill-input - it's going to be removed soon. If the state does change over suspend, users will simply have to prod rfkill-input twice in order to toggle the state. Userspace policy agents will be able to implement a more consistent user experience. For example, they can avoid the above problem if they toggle devices individually. Then there would be no "global state" to get out of sync. Currently there are only two rfkill drivers with persistent soft-blocked state. thinkpad-acpi already checks the software state on resume. eeepc-laptop will require modification. Signed-off-by: Alan Jenkins CC: Marcel Holtmann Acked-by: Henrique de Moraes Holschuh Signed-off-by: John W. Linville --- drivers/platform/x86/eeepc-laptop.c | 8 ++++---- drivers/platform/x86/thinkpad_acpi.c | 14 ++++++------- include/linux/rfkill.h | 32 ++++++++++++++++++++++++----- net/rfkill/core.c | 40 ++++++++++++++++++++++-------------- 4 files changed, 63 insertions(+), 31 deletions(-) (limited to 'net') diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 03bf522bd7ab..01682eca4360 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -675,8 +675,8 @@ static int eeepc_hotk_add(struct acpi_device *device) if (!ehotk->eeepc_wlan_rfkill) goto wlan_fail; - rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, - get_acpi(CM_ASL_WLAN) != 1); + rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill, + get_acpi(CM_ASL_WLAN) != 1); result = rfkill_register(ehotk->eeepc_wlan_rfkill); if (result) goto wlan_fail; @@ -693,8 +693,8 @@ static int eeepc_hotk_add(struct acpi_device *device) if (!ehotk->eeepc_bluetooth_rfkill) goto bluetooth_fail; - rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill, - get_acpi(CM_ASL_BLUETOOTH) != 1); + rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill, + get_acpi(CM_ASL_BLUETOOTH) != 1); result = rfkill_register(ehotk->eeepc_bluetooth_rfkill); if (result) goto bluetooth_fail; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 86e958539f46..40d64c03278c 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1163,8 +1163,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id, { struct tpacpi_rfk *atp_rfk; int res; - bool initial_sw_state = false; - int initial_sw_status; + bool sw_state = false; + int sw_status; BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]); @@ -1185,17 +1185,17 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id, atp_rfk->id = id; atp_rfk->ops = tp_rfkops; - initial_sw_status = (tp_rfkops->get_status)(); - if (initial_sw_status < 0) { + sw_status = (tp_rfkops->get_status)(); + if (sw_status < 0) { printk(TPACPI_ERR "failed to read initial state for %s, error %d\n", - name, initial_sw_status); + name, sw_status); } else { - initial_sw_state = (initial_sw_status == TPACPI_RFK_RADIO_OFF); + sw_state = (sw_status == TPACPI_RFK_RADIO_OFF); if (set_default) { /* try to keep the initial state, since we ask the * firmware to preserve it across S5 in NVRAM */ - rfkill_set_sw_state(atp_rfk->rfkill, initial_sw_state); + rfkill_init_sw_state(atp_rfk->rfkill, sw_state); } } rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state()); diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 16e39c7a67fc..dcac724340d8 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -160,8 +160,9 @@ struct rfkill * __must_check rfkill_alloc(const char *name, * the rfkill structure. Before calling this function the driver needs * to be ready to service method calls from rfkill. * - * If the software blocked state is not set before registration, - * set_block will be called to initialize it to a default value. + * If rfkill_init_sw_state() is not called before registration, + * set_block() will be called to initialize the software blocked state + * to a default value. * * If the hardware blocked state is not set before registration, * it is assumed to be unblocked. @@ -234,9 +235,11 @@ bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); * rfkill drivers that get events when the soft-blocked state changes * (yes, some platforms directly act on input but allow changing again) * use this function to notify the rfkill core (and through that also - * userspace) of the current state. It is not necessary to notify on - * resume; since hibernation can always change the soft-blocked state, - * the rfkill core will unconditionally restore the previous state. + * userspace) of the current state. + * + * Drivers should also call this function after resume if the state has + * been changed by the user. This only makes sense for "persistent" + * devices (see rfkill_init_sw_state()). * * This function can be called in any context, even from within rfkill * callbacks. @@ -246,6 +249,21 @@ bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked); */ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); +/** + * rfkill_init_sw_state - Initialize persistent software block state + * @rfkill: pointer to the rfkill class to modify. + * @state: the current software block state to set + * + * rfkill drivers that preserve their software block state over power off + * use this function to notify the rfkill core (and through that also + * userspace) of their initial state. It should only be used before + * registration. + * + * In addition, it marks the device as "persistent". Persistent devices + * are expected to preserve preserve their own state when suspended. + */ +void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); + /** * rfkill_set_states - Set the internal rfkill block states * @rfkill: pointer to the rfkill class to modify. @@ -307,6 +325,10 @@ static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) return blocked; } +static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) +{ +} + static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) { } diff --git a/net/rfkill/core.c b/net/rfkill/core.c index 868d79f8ac1d..dcf8df7c573c 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -56,7 +56,6 @@ struct rfkill { u32 idx; bool registered; - bool suspended; bool persistent; const struct rfkill_ops *ops; @@ -224,7 +223,7 @@ static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) static void rfkill_event(struct rfkill *rfkill) { - if (!rfkill->registered || rfkill->suspended) + if (!rfkill->registered) return; kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); @@ -508,19 +507,32 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) blocked = blocked || hwblock; spin_unlock_irqrestore(&rfkill->lock, flags); - if (!rfkill->registered) { - rfkill->persistent = true; - } else { - if (prev != blocked && !hwblock) - schedule_work(&rfkill->uevent_work); + if (!rfkill->registered) + return blocked; - rfkill_led_trigger_event(rfkill); - } + if (prev != blocked && !hwblock) + schedule_work(&rfkill->uevent_work); + + rfkill_led_trigger_event(rfkill); return blocked; } EXPORT_SYMBOL(rfkill_set_sw_state); +void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) +{ + unsigned long flags; + + BUG_ON(!rfkill); + BUG_ON(rfkill->registered); + + spin_lock_irqsave(&rfkill->lock, flags); + __rfkill_set_sw_state(rfkill, blocked); + rfkill->persistent = true; + spin_unlock_irqrestore(&rfkill->lock, flags); +} +EXPORT_SYMBOL(rfkill_init_sw_state); + void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) { unsigned long flags; @@ -718,8 +730,6 @@ static int rfkill_suspend(struct device *dev, pm_message_t state) rfkill_pause_polling(rfkill); - rfkill->suspended = true; - return 0; } @@ -728,10 +738,10 @@ static int rfkill_resume(struct device *dev) struct rfkill *rfkill = to_rfkill(dev); bool cur; - cur = !!(rfkill->state & RFKILL_BLOCK_SW); - rfkill_set_block(rfkill, cur); - - rfkill->suspended = false; + if (!rfkill->persistent) { + cur = !!(rfkill->state & RFKILL_BLOCK_SW); + rfkill_set_block(rfkill, cur); + } rfkill_resume_polling(rfkill); -- cgit v1.2.3 From 464902e812025792c9e33e19e1555c343672d5cf Mon Sep 17 00:00:00 2001 From: Alan Jenkins Date: Tue, 16 Jun 2009 14:54:04 +0100 Subject: rfkill: export persistent attribute in sysfs This information allows userspace to implement a hybrid policy where it can store the rfkill soft-blocked state in platform non-volatile storage if available, and if not then file-based storage can be used. Some users prefer platform non-volatile storage because of the behaviour when dual-booting multiple versions of Linux, or if the rfkill setting is changed in the BIOS setting screens, or if the BIOS responds to wireless-toggle hotkeys itself before the relevant platform driver has been loaded. Signed-off-by: Alan Jenkins Acked-by: Henrique de Moraes Holschuh Signed-off-by: John W. Linville --- Documentation/rfkill.txt | 2 ++ include/linux/rfkill.h | 5 +++-- net/rfkill/core.c | 10 ++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt index c8acd8659e91..b4860509c319 100644 --- a/Documentation/rfkill.txt +++ b/Documentation/rfkill.txt @@ -111,6 +111,8 @@ following attributes: name: Name assigned by driver to this key (interface or driver name). type: Driver type string ("wlan", "bluetooth", etc). + persistent: Whether the soft blocked state is initialised from + non-volatile storage at startup. state: Current state of the transmitter 0: RFKILL_STATE_SOFT_BLOCKED transmitter is turned off by software diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index dcac724340d8..e73e2429a1b1 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -259,8 +259,9 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked); * userspace) of their initial state. It should only be used before * registration. * - * In addition, it marks the device as "persistent". Persistent devices - * are expected to preserve preserve their own state when suspended. + * In addition, it marks the device as "persistent", an attribute which + * can be read by userspace. Persistent devices are expected to preserve + * their own state when suspended. */ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked); diff --git a/net/rfkill/core.c b/net/rfkill/core.c index dcf8df7c573c..79693fe2001e 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -610,6 +610,15 @@ static ssize_t rfkill_idx_show(struct device *dev, return sprintf(buf, "%d\n", rfkill->idx); } +static ssize_t rfkill_persistent_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rfkill *rfkill = to_rfkill(dev); + + return sprintf(buf, "%d\n", rfkill->persistent); +} + static u8 user_state_from_blocked(unsigned long state) { if (state & RFKILL_BLOCK_HW) @@ -668,6 +677,7 @@ static struct device_attribute rfkill_dev_attrs[] = { __ATTR(name, S_IRUGO, rfkill_name_show, NULL), __ATTR(type, S_IRUGO, rfkill_type_show, NULL), __ATTR(index, S_IRUGO, rfkill_idx_show, NULL), + __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), __ATTR_NULL -- cgit v1.2.3 From 155cc9e4b1d60161ee53ffaf2c15b9411f086fa7 Mon Sep 17 00:00:00 2001 From: Andrey Yurovsky Date: Tue, 16 Jun 2009 11:31:04 -0700 Subject: cfg80211: allow adding/deleting stations on mesh Commit b2a151a288 added a check that prevents adding or deleting stations on non-AP interfaces. Adding and deleting stations is supported for Mesh Point interfaces, so add Mesh Point to that check as well. Signed-off-by: Andrey Yurovsky Signed-off-by: John W. Linville --- net/wireless/nl80211.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 24168560ebae..2c55d25ed34d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1763,7 +1763,8 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) goto out_rtnl; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && - dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EINVAL; goto out; } @@ -1812,7 +1813,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) goto out_rtnl; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && - dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EINVAL; goto out; } -- cgit v1.2.3 From 9a5e8bbc8fece7851a2a69a8676a6fd0507bc550 Mon Sep 17 00:00:00 2001 From: Andrey Yurovsky Date: Tue, 16 Jun 2009 16:09:37 -0700 Subject: cfg80211: allow setting station parameters in mesh Mesh Point interfaces can also set parameters, for example plink_open is used to manually establish peer links from user-space (currently via iw). Add Mesh Point to the check in nl80211_set_station. Signed-off-by: Andrey Yurovsky Signed-off-by: John W. Linville --- net/wireless/nl80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 2c55d25ed34d..304b3d568e07 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1688,7 +1688,8 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) goto out_rtnl; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && - dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && + dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EINVAL; goto out; } -- cgit v1.2.3 From a97f4424fb4cddecf9b13c9b0e3f79924b624a7f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 18 Jun 2009 17:23:43 +0200 Subject: cfg80211: validate station settings When I disallowed interfering with stations on non-AP interfaces, I not only forget mesh but also managed interfaces which need this for the authorized flag. Let's actually validate everything properly. This fixes an nl80211 regression introduced by the interfering, under which wpa_supplicant -Dnl80211 could not properly connect. Signed-off-by: Johannes Berg Signed-off-by: John W. Linville --- net/wireless/nl80211.c | 94 +++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 78 insertions(+), 16 deletions(-) (limited to 'net') diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 304b3d568e07..241bddd0b4f1 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1687,14 +1687,52 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) if (err) goto out_rtnl; - if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && - dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && - dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { - err = -EINVAL; + err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); + if (err) goto out; + + /* validate settings */ + err = 0; + + switch (dev->ieee80211_ptr->iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + /* disallow mesh-specific things */ + if (params.plink_action) + err = -EINVAL; + break; + case NL80211_IFTYPE_STATION: + /* disallow everything but AUTHORIZED flag */ + if (params.plink_action) + err = -EINVAL; + if (params.vlan) + err = -EINVAL; + if (params.supported_rates) + err = -EINVAL; + if (params.ht_capa) + err = -EINVAL; + if (params.listen_interval >= 0) + err = -EINVAL; + if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) + err = -EINVAL; + break; + case NL80211_IFTYPE_MESH_POINT: + /* disallow things mesh doesn't support */ + if (params.vlan) + err = -EINVAL; + if (params.ht_capa) + err = -EINVAL; + if (params.listen_interval >= 0) + err = -EINVAL; + if (params.supported_rates) + err = -EINVAL; + if (params.sta_flags_mask) + err = -EINVAL; + break; + default: + err = -EINVAL; } - err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); if (err) goto out; @@ -1729,9 +1767,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; - if (!info->attrs[NL80211_ATTR_STA_AID]) - return -EINVAL; - if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) return -EINVAL; @@ -1746,9 +1781,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); - params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); - if (!params.aid || params.aid > IEEE80211_MAX_AID) - return -EINVAL; + if (info->attrs[NL80211_ATTR_STA_AID]) { + params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); + if (!params.aid || params.aid > IEEE80211_MAX_AID) + return -EINVAL; + } if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params.ht_capa = @@ -1763,14 +1800,39 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) if (err) goto out_rtnl; - if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && - dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && - dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { - err = -EINVAL; + err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); + if (err) goto out; + + /* validate settings */ + err = 0; + + switch (dev->ieee80211_ptr->iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_AP_VLAN: + /* all ok but must have AID */ + if (!params.aid) + err = -EINVAL; + break; + case NL80211_IFTYPE_MESH_POINT: + /* disallow things mesh doesn't support */ + if (params.vlan) + err = -EINVAL; + if (params.aid) + err = -EINVAL; + if (params.ht_capa) + err = -EINVAL; + if (params.listen_interval >= 0) + err = -EINVAL; + if (params.supported_rates) + err = -EINVAL; + if (params.sta_flags_mask) + err = -EINVAL; + break; + default: + err = -EINVAL; } - err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan); if (err) goto out; -- cgit v1.2.3 From 73e42897e8e5619eacb787d2ce69be12f47cfc21 Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Sat, 20 Jun 2009 01:15:16 -0700 Subject: ipv4: fix NULL pointer + success return in route lookup path Don't drop route if we're not caching I recently got a report of an oops on a route lookup. Maxime was testing what would happen if route caching was turned off (doing so by setting making rt_caching always return 0), and found that it triggered an oops. I looked at it and found that the problem stemmed from the fact that the route lookup routines were returning success from their lookup paths (which is good), but never set the **rp pointer to anything (which is bad). This happens because in rt_intern_hash, if rt_caching returns false, we call rt_drop and return 0. This almost emulates slient success. What we should be doing is assigning *rp = rt and _not_ dropping the route. This way, during slow path lookups, when we create a new route cache entry, we don't immediately discard it, rather we just don't add it into the cache hash table, but we let this one lookup use it for the purpose of this route request. Maxime has tested and reports it prevents the oops. There is still a subsequent routing issue that I'm looking into further, but I'm confident that, even if its related to this same path, this patch makes sense to take. Signed-off-by: Neil Horman Signed-off-by: David S. Miller --- net/ipv4/route.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/ipv4/route.c b/net/ipv4/route.c index cd76b3cb7092..65b3a8b11a6c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1085,8 +1085,16 @@ restart: now = jiffies; if (!rt_caching(dev_net(rt->u.dst.dev))) { - rt_drop(rt); - return 0; + /* + * If we're not caching, just tell the caller we + * were successful and don't touch the route. The + * caller hold the sole reference to the cache entry, and + * it will be released when the caller is done with it. + * If we drop it here, the callers have no way to resolve routes + * when we're not caching. Instead, just point *rp at rt, so + * the caller gets a single use out of the route + */ + goto report_and_exit; } rthp = &rt_hash_table[hash].chain; @@ -1217,6 +1225,8 @@ restart: rcu_assign_pointer(rt_hash_table[hash].chain, rt); spin_unlock_bh(rt_hash_lock_addr(hash)); + +report_and_exit: if (rp) *rp = rt; else -- cgit v1.2.3