diff options
53 files changed, 302 insertions, 240 deletions
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 3256192dcde8..f9b956fb2b8b 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -120,7 +120,7 @@ static unsigned int hci_vhci_chr_poll(struct file *file, poll_table * wait) poll_wait(file, &hci_vhci->read_wait, wait); - if (skb_queue_len(&hci_vhci->readq)) + if (!skb_queue_empty(&hci_vhci->readq)) return POLLIN | POLLRDNORM; return POLLOUT | POLLWRNORM; diff --git a/drivers/isdn/hisax/isdnl1.c b/drivers/isdn/hisax/isdnl1.c index ac899503a74f..bab356886483 100644 --- a/drivers/isdn/hisax/isdnl1.c +++ b/drivers/isdn/hisax/isdnl1.c @@ -279,7 +279,8 @@ BChannel_proc_xmt(struct BCState *bcs) if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags)) st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); if (!test_bit(BC_FLG_ACTIV, &bcs->Flag)) { - if (!test_bit(BC_FLG_BUSY, &bcs->Flag) && (!skb_queue_len(&bcs->squeue))) { + if (!test_bit(BC_FLG_BUSY, &bcs->Flag) && + skb_queue_empty(&bcs->squeue)) { st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL); } } diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index 9022583fd6a0..1615c1a76ab8 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c @@ -108,7 +108,8 @@ static int l2addrsize(struct Layer2 *l2); static void set_peer_busy(struct Layer2 *l2) { test_and_set_bit(FLG_PEER_BUSY, &l2->flag); - if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue)) + if (!skb_queue_empty(&l2->i_queue) || + !skb_queue_empty(&l2->ui_queue)) test_and_set_bit(FLG_L2BLOCK, &l2->flag); } @@ -754,7 +755,7 @@ l2_restart_multi(struct FsmInst *fi, int event, void *arg) st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL); if ((ST_L2_7==state) || (ST_L2_8 == state)) - if (skb_queue_len(&st->l2.i_queue) && cansend(st)) + if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } @@ -810,7 +811,7 @@ l2_connected(struct FsmInst *fi, int event, void *arg) if (pr != -1) st->l2.l2l3(st, pr, NULL); - if (skb_queue_len(&st->l2.i_queue) && cansend(st)) + if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } @@ -1014,7 +1015,7 @@ l2_st7_got_super(struct FsmInst *fi, int event, void *arg) if(typ != RR) FsmDelTimer(&st->l2.t203, 9); restart_t200(st, 12); } - if (skb_queue_len(&st->l2.i_queue) && (typ == RR)) + if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } else nrerrorrecovery(fi); @@ -1120,7 +1121,7 @@ l2_got_iframe(struct FsmInst *fi, int event, void *arg) return; } - if (skb_queue_len(&st->l2.i_queue) && (fi->state == ST_L2_7)) + if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag)) enquiry_cr(st, RR, RSP, 0); @@ -1138,7 +1139,7 @@ l2_got_tei(struct FsmInst *fi, int event, void *arg) test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } else FsmChangeState(fi, ST_L2_4); - if (skb_queue_len(&st->l2.ui_queue)) + if (!skb_queue_empty(&st->l2.ui_queue)) tx_ui(st); } @@ -1301,7 +1302,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) FsmDelTimer(&st->l2.t203, 13); FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11); } - if (skb_queue_len(&l2->i_queue) && cansend(st)) + if (!skb_queue_empty(&l2->i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } @@ -1347,7 +1348,7 @@ l2_st8_got_super(struct FsmInst *fi, int event, void *arg) } invoke_retransmission(st, nr); FsmChangeState(fi, ST_L2_7); - if (skb_queue_len(&l2->i_queue) && cansend(st)) + if (!skb_queue_empty(&l2->i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } else nrerrorrecovery(fi); diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c index abcc9530eb34..c9917cd2132b 100644 --- a/drivers/isdn/hisax/isdnl3.c +++ b/drivers/isdn/hisax/isdnl3.c @@ -302,7 +302,7 @@ release_l3_process(struct l3_process *p) !test_bit(FLG_PTP, &p->st->l2.flag)) { if (p->debug) l3_debug(p->st, "release_l3_process: last process"); - if (!skb_queue_len(&p->st->l3.squeue)) { + if (skb_queue_empty(&p->st->l3.squeue)) { if (p->debug) l3_debug(p->st, "release_l3_process: release link"); if (p->st->protocol != ISDN_PTYPE_NI1) diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index ad5aa38fb5a6..b37ef1f06b3d 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -1223,7 +1223,7 @@ isdn_tty_write(struct tty_struct *tty, const u_char * buf, int count) total += c; } atomic_dec(&info->xmit_lock); - if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue))) { + if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue)) { if (m->mdmreg[REG_DXMT] & BIT_DXMT) { isdn_tty_senddown(info); isdn_tty_tint(info); @@ -1284,7 +1284,7 @@ isdn_tty_flush_chars(struct tty_struct *tty) if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_flush_chars")) return; - if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue))) + if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue)) isdn_timer_ctrl(ISDN_TIMER_MODEMXMIT, 1); } diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index 9fc0c1e03732..e0d1b01cc74c 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c @@ -304,12 +304,12 @@ icn_pollbchan_send(int channel, icn_card * card) isdn_ctrl cmd; if (!(card->sndcount[channel] || card->xskb[channel] || - skb_queue_len(&card->spqueue[channel]))) + !skb_queue_empty(&card->spqueue[channel]))) return; if (icn_trymaplock_channel(card, mch)) { while (sbfree && (card->sndcount[channel] || - skb_queue_len(&card->spqueue[channel]) || + !skb_queue_empty(&card->spqueue[channel]) || card->xskb[channel])) { spin_lock_irqsave(&card->lock, flags); if (card->xmit_lock[channel]) { diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index ece1b1a13186..c27e417f32bf 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -304,7 +304,7 @@ static inline void scc_discard_buffers(struct scc_channel *scc) scc->tx_buff = NULL; } - while (skb_queue_len(&scc->tx_queue)) + while (!skb_queue_empty(&scc->tx_queue)) dev_kfree_skb(skb_dequeue(&scc->tx_queue)); spin_unlock_irqrestore(&scc->lock, flags); @@ -1126,8 +1126,7 @@ static void t_dwait(unsigned long channel) if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */ { - if (skb_queue_len(&scc->tx_queue) == 0) /* nothing to send */ - { + if (skb_queue_empty(&scc->tx_queue)) { /* nothing to send */ scc->stat.tx_state = TXS_IDLE; netif_wake_queue(scc->dev); /* t_maxkeyup locked it. */ return; diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index 5e48b9ab3045..59e8183c639e 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -364,7 +364,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, spin_lock_irqsave(&ap->recv_lock, flags); ppp_async_input(ap, buf, cflags, count); spin_unlock_irqrestore(&ap->recv_lock, flags); - if (skb_queue_len(&ap->rqueue)) + if (!skb_queue_empty(&ap->rqueue)) tasklet_schedule(&ap->tsk); ap_put(ap); if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index ab726ab43798..a32668e88e09 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -1237,8 +1237,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) pch = list_entry(list, struct channel, clist); navail += pch->avail = (pch->chan != NULL); if (pch->avail) { - if (skb_queue_len(&pch->file.xq) == 0 - || !pch->had_frag) { + if (skb_queue_empty(&pch->file.xq) || + !pch->had_frag) { pch->avail = 2; ++nfree; } @@ -1374,8 +1374,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) /* try to send it down the channel */ chan = pch->chan; - if (skb_queue_len(&pch->file.xq) - || !chan->ops->start_xmit(chan, frag)) + if (!skb_queue_empty(&pch->file.xq) || + !chan->ops->start_xmit(chan, frag)) skb_queue_tail(&pch->file.xq, frag); pch->had_frag = 1; p += flen; @@ -1412,7 +1412,7 @@ ppp_channel_push(struct channel *pch) spin_lock_bh(&pch->downl); if (pch->chan != 0) { - while (skb_queue_len(&pch->file.xq) > 0) { + while (!skb_queue_empty(&pch->file.xq)) { skb = skb_dequeue(&pch->file.xq); if (!pch->chan->ops->start_xmit(pch->chan, skb)) { /* put the packet back and try again later */ @@ -1426,7 +1426,7 @@ ppp_channel_push(struct channel *pch) } spin_unlock_bh(&pch->downl); /* see if there is anything from the attached unit to be sent */ - if (skb_queue_len(&pch->file.xq) == 0) { + if (skb_queue_empty(&pch->file.xq)) { read_lock_bh(&pch->upl); ppp = pch->ppp; if (ppp != 0) diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index fd9f50180355..4d51c0c8023d 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c @@ -406,7 +406,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, spin_lock_irqsave(&ap->recv_lock, flags); ppp_sync_input(ap, buf, cflags, count); spin_unlock_irqrestore(&ap->recv_lock, flags); - if (skb_queue_len(&ap->rqueue)) + if (!skb_queue_empty(&ap->rqueue)) tasklet_schedule(&ap->tsk); sp_put(ap); if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7bfee366297b..effab0b9adca 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -215,7 +215,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) poll_wait(file, &tun->read_wait, wait); - if (skb_queue_len(&tun->readq)) + if (!skb_queue_empty(&tun->readq)) mask |= POLLIN | POLLRDNORM; return mask; diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index c12648d8192b..47f3c5d0203d 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -2374,7 +2374,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) /* * Clean out tx queue */ - if (test_bit(FLAG_MPI, &ai->flags) && skb_queue_len (&ai->txq) > 0) { + if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) { struct sk_buff *skb = NULL; for (;(skb = skb_dequeue(&ai->txq));) dev_kfree_skb(skb); @@ -3287,7 +3287,7 @@ exitrx: if (status & EV_TXEXC) get_tx_error(apriv, -1); spin_lock_irqsave(&apriv->aux_lock, flags); - if (skb_queue_len (&apriv->txq)) { + if (!skb_queue_empty(&apriv->txq)) { spin_unlock_irqrestore(&apriv->aux_lock,flags); mpi_send_packet (dev); } else { diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 60440dbe3a27..24c0af49c25c 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c @@ -428,7 +428,7 @@ claw_pack_skb(struct claw_privbk *privptr) new_skb = NULL; /* assume no dice */ pkt_cnt = 0; CLAW_DBF_TEXT(4,trace,"PackSKBe"); - if (skb_queue_len(&p_ch->collect_queue) > 0) { + if (!skb_queue_empty(&p_ch->collect_queue)) { /* some data */ held_skb = skb_dequeue(&p_ch->collect_queue); if (p_env->packing != DO_PACKED) @@ -1254,7 +1254,7 @@ claw_write_next ( struct chbk * p_ch ) privptr = (struct claw_privbk *) dev->priv; claw_free_wrt_buf( dev ); if ((privptr->write_free_count > 0) && - (skb_queue_len(&p_ch->collect_queue) > 0)) { + !skb_queue_empty(&p_ch->collect_queue)) { pk_skb = claw_pack_skb(privptr); while (pk_skb != NULL) { rc = claw_hw_tx( pk_skb, dev,1); diff --git a/drivers/s390/net/ctctty.c b/drivers/s390/net/ctctty.c index 3080393e823d..968f2c113efe 100644 --- a/drivers/s390/net/ctctty.c +++ b/drivers/s390/net/ctctty.c @@ -156,7 +156,7 @@ ctc_tty_readmodem(ctc_tty_info *info) skb_queue_head(&info->rx_queue, skb); else { kfree_skb(skb); - ret = skb_queue_len(&info->rx_queue); + ret = !skb_queue_empty(&info->rx_queue); } } } @@ -530,7 +530,7 @@ ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count) total += c; count -= c; } - if (skb_queue_len(&info->tx_queue)) { + if (!skb_queue_empty(&info->tx_queue)) { info->lsr &= ~UART_LSR_TEMT; tasklet_schedule(&info->tasklet); } @@ -594,7 +594,7 @@ ctc_tty_flush_chars(struct tty_struct *tty) return; if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars")) return; - if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue))) + if (tty->stopped || tty->hw_stopped || skb_queue_empty(&info->tx_queue)) return; tasklet_schedule(&info->tasklet); } diff --git a/drivers/usb/net/usbnet.c b/drivers/usb/net/usbnet.c index 8a945f4f3693..576f3b852fce 100644 --- a/drivers/usb/net/usbnet.c +++ b/drivers/usb/net/usbnet.c @@ -3227,9 +3227,9 @@ static int usbnet_stop (struct net_device *net) temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq); // maybe wait for deletions to finish. - while (skb_queue_len (&dev->rxq) - && skb_queue_len (&dev->txq) - && skb_queue_len (&dev->done)) { + while (!skb_queue_empty(&dev->rxq) && + !skb_queue_empty(&dev->txq) && + !skb_queue_empty(&dev->done)) { msleep(UNLINK_TIMEOUT_MS); if (netif_msg_ifdown (dev)) devdbg (dev, "waited for %d urb completions", temp); diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 390e760a96d3..0c31ef0b5bad 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -148,7 +148,6 @@ struct ip_sf_socklist struct ip_mc_socklist { struct ip_mc_socklist *next; - int count; struct ip_mreqn multi; unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ struct ip_sf_socklist *sflist; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 14b950413495..5d4a990d5577 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -300,20 +300,26 @@ struct sk_buff { #include <asm/system.h> extern void __kfree_skb(struct sk_buff *skb); -extern struct sk_buff *alloc_skb(unsigned int size, int priority); +extern struct sk_buff *alloc_skb(unsigned int size, + unsigned int __nocast priority); extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, - unsigned int size, int priority); + unsigned int size, + unsigned int __nocast priority); extern void kfree_skbmem(struct sk_buff *skb); -extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority); -extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority); -extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask); +extern struct sk_buff *skb_clone(struct sk_buff *skb, + unsigned int __nocast priority); +extern struct sk_buff *skb_copy(const struct sk_buff *skb, + unsigned int __nocast priority); +extern struct sk_buff *pskb_copy(struct sk_buff *skb, + unsigned int __nocast gfp_mask); extern int pskb_expand_head(struct sk_buff *skb, - int nhead, int ntail, int gfp_mask); + int nhead, int ntail, + unsigned int __nocast gfp_mask); extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, - int priority); + unsigned int __nocast priority); extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) kfree_skb(a) extern void skb_over_panic(struct sk_buff *skb, int len, @@ -464,7 +470,8 @@ static inline int skb_shared(const struct sk_buff *skb) * * NULL is returned on a memory allocation failure. */ -static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) +static inline struct sk_buff *skb_share_check(struct sk_buff *skb, + unsigned int __nocast pri) { might_sleep_if(pri & __GFP_WAIT); if (skb_shared(skb)) { @@ -1001,7 +1008,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) * %NULL is returned in there is no free memory. */ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, - int gfp_mask) + unsigned int __nocast gfp_mask) { struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); if (likely(skb)) @@ -1114,8 +1121,8 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i, * If there is no free memory -ENOMEM is returned, otherwise zero * is returned and the old skb data released. */ -extern int __skb_linearize(struct sk_buff *skb, int gfp); -static inline int skb_linearize(struct sk_buff *skb, int gfp) +extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp); +static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp) { return __skb_linearize(skb, gfp); } diff --git a/include/net/irda/irda_device.h b/include/net/irda/irda_device.h index 71d6af83b631..92c828029cd8 100644 --- a/include/net/irda/irda_device.h +++ b/include/net/irda/irda_device.h @@ -224,7 +224,7 @@ int irda_device_is_receiving(struct net_device *dev); /* Interface for internal use */ static inline int irda_device_txqueue_empty(const struct net_device *dev) { - return (skb_queue_len(&dev->qdisc->q) == 0); + return skb_queue_empty(&dev->qdisc->q); } int irda_device_set_raw_mode(struct net_device* self, int status); struct net_device *alloc_irdadev(int sizeof_priv); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 47727c7cc628..7435528a1747 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -582,7 +582,6 @@ void sctp_datamsg_track(struct sctp_chunk *); void sctp_chunk_fail(struct sctp_chunk *, int error); int sctp_chunk_abandoned(struct sctp_chunk *); - /* RFC2960 1.4 Key Terms * * o Chunk: A unit of information within an SCTP packet, consisting of @@ -592,13 +591,8 @@ int sctp_chunk_abandoned(struct sctp_chunk *); * each chunk as well as a few other header pointers... */ struct sctp_chunk { - /* These first three elements MUST PRECISELY match the first - * three elements of struct sk_buff. This allows us to reuse - * all the skb_* queue management functions. - */ - struct sctp_chunk *next; - struct sctp_chunk *prev; - struct sk_buff_head *list; + struct list_head list; + atomic_t refcnt; /* This is our link to the per-transport transmitted list. */ @@ -717,7 +711,7 @@ struct sctp_packet { __u32 vtag; /* This contains the payload chunks. */ - struct sk_buff_head chunks; + struct list_head chunk_list; /* This is the overhead of the sctp and ip headers. */ size_t overhead; @@ -974,7 +968,7 @@ struct sctp_inq { /* This is actually a queue of sctp_chunk each * containing a partially decoded packet. */ - struct sk_buff_head in; + struct list_head in_chunk_list; /* This is the packet which is currently off the in queue and is * being worked on through the inbound chunk processing. */ @@ -1017,7 +1011,7 @@ struct sctp_outq { struct sctp_association *asoc; /* Data pending that has never been transmitted. */ - struct sk_buff_head out; + struct list_head out_chunk_list; unsigned out_qlen; /* Total length of queued data chunks. */ @@ -1025,7 +1019,7 @@ struct sctp_outq { unsigned error; /* These are control chunks we want to send. */ - struct sk_buff_head control; + struct list_head control_chunk_list; /* These are chunks that have been sacked but are above the * CTSN, or cumulative tsn ack point. @@ -1672,7 +1666,7 @@ struct sctp_association { * which already resides in sctp_outq. Please move this * queue and its supporting logic down there. --piggy] */ - struct sk_buff_head addip_chunks; + struct list_head addip_chunk_list; /* ADDIP Section 4.1 ASCONF Chunk Procedures * diff --git a/include/net/sock.h b/include/net/sock.h index 7b76f891ae2d..a1042d08becd 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -684,16 +684,17 @@ extern void FASTCALL(release_sock(struct sock *sk)); #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) -extern struct sock *sk_alloc(int family, int priority, +extern struct sock *sk_alloc(int family, + unsigned int __nocast priority, struct proto *prot, int zero_it); extern void sk_free(struct sock *sk); extern struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, - int priority); + unsigned int __nocast priority); extern struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, - int priority); + unsigned int __nocast priority); extern void sock_wfree(struct sk_buff *skb); extern void sock_rfree(struct sk_buff *skb); @@ -708,7 +709,8 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode); -extern void *sock_kmalloc(struct sock *sk, int size, int priority); +extern void *sock_kmalloc(struct sock *sk, int size, + unsigned int __nocast priority); extern void sock_kfree_s(struct sock *sk, void *mem, int size); extern void sk_send_sigurg(struct sock *sk); @@ -1132,7 +1134,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) } static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, - int size, int mem, int gfp) + int size, int mem, + unsigned int __nocast gfp) { struct sk_buff *skb; int hdr_len; @@ -1155,7 +1158,8 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, } static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, - int size, int gfp) + int size, + unsigned int __nocast gfp) { return sk_stream_alloc_pskb(sk, size, 0, gfp); } @@ -1188,7 +1192,7 @@ static inline int sock_writeable(const struct sock *sk) return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); } -static inline int gfp_any(void) +static inline unsigned int __nocast gfp_any(void) { return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } diff --git a/include/net/tcp.h b/include/net/tcp.h index a166918ca56d..f4f9aba07ac2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -860,7 +860,8 @@ extern void tcp_send_probe0(struct sock *); extern void tcp_send_partial(struct sock *); extern int tcp_write_wakeup(struct sock *); extern void tcp_send_fin(struct sock *sk); -extern void tcp_send_active_reset(struct sock *sk, int priority); +extern void tcp_send_active_reset(struct sock *sk, + unsigned int __nocast priority); extern int tcp_send_synack(struct sock *); extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_send_ack(struct sock *sk); @@ -991,7 +992,7 @@ static __inline__ void tcp_fast_path_on(struct tcp_sock *tp) static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) { - if (skb_queue_len(&tp->out_of_order_queue) == 0 && + if (skb_queue_empty(&tp->out_of_order_queue) && tp->rcv_wnd && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && !tp->urg_data) diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c index 2e341de3e763..901eff7ebe74 100644 --- a/net/bluetooth/cmtp/core.c +++ b/net/bluetooth/cmtp/core.c @@ -213,7 +213,7 @@ static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, in return kernel_sendmsg(sock, &msg, &iv, 1, len); } -static int cmtp_process_transmit(struct cmtp_session *session) +static void cmtp_process_transmit(struct cmtp_session *session) { struct sk_buff *skb, *nskb; unsigned char *hdr; @@ -223,7 +223,7 @@ static int cmtp_process_transmit(struct cmtp_session *session) if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { BT_ERR("Can't allocate memory for new frame"); - return -ENOMEM; + return; } while ((skb = skb_dequeue(&session->transmit))) { @@ -275,8 +275,6 @@ static int cmtp_process_transmit(struct cmtp_session *session) cmtp_send_frame(session, nskb->data, nskb->len); kfree_skb(nskb); - - return skb_queue_len(&session->transmit); } static int cmtp_session(void *arg) diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c index affbc55462e8..de8af5f42394 100644 --- a/net/bluetooth/hidp/core.c +++ b/net/bluetooth/hidp/core.c @@ -428,7 +428,7 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) return kernel_sendmsg(sock, &msg, &iv, 1, len); } -static int hidp_process_transmit(struct hidp_session *session) +static void hidp_process_transmit(struct hidp_session *session) { struct sk_buff *skb; @@ -453,9 +453,6 @@ static int hidp_process_transmit(struct hidp_session *session) hidp_set_timer(session); kfree_skb(skb); } - - return skb_queue_len(&session->ctrl_transmit) + - skb_queue_len(&session->intr_transmit); } static int hidp_session(void *arg) diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index f3f6355a2786..63a123c5c41b 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -590,8 +590,11 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo) for (;;) { set_current_state(TASK_INTERRUPTIBLE); - if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || - signal_pending(current) || !timeo) + if (!skb_queue_empty(&sk->sk_receive_queue) || + sk->sk_err || + (sk->sk_shutdown & RCV_SHUTDOWN) || + signal_pending(current) || + !timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 6d689200bcf3..6304590fd36a 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -781,7 +781,7 @@ static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) BT_DBG("tty %p dev %p", tty, dev); - if (skb_queue_len(&dlc->tx_queue)) + if (!skb_queue_empty(&dlc->tx_queue)) return dlc->mtu; return 0; diff --git a/net/core/dev.c b/net/core/dev.c index 7f5f62c65115..ff9dc029233a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1127,7 +1127,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) extern void skb_release_data(struct sk_buff *); /* Keep head the same: replace data */ -int __skb_linearize(struct sk_buff *skb, int gfp_mask) +int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) { unsigned int size; u8 *data; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 733deee24b9f..d9f7b06fe886 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -129,7 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ -struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) +struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask) { struct sk_buff *skb; u8 *data; @@ -182,7 +182,8 @@ nodata: * %GFP_ATOMIC. */ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, - unsigned int size, int gfp_mask) + unsigned int size, + unsigned int __nocast gfp_mask) { struct sk_buff *skb; u8 *data; @@ -322,7 +323,7 @@ void __kfree_skb(struct sk_buff *skb) * %GFP_ATOMIC. */ -struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) +struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) { struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); @@ -460,7 +461,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) * header is going to be modified. Use pskb_copy() instead. */ -struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) +struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) { int headerlen = skb->data - skb->head; /* @@ -499,7 +500,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) * The returned buffer has a reference count of 1. */ -struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) +struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) { /* * Allocate the copy buffer @@ -557,7 +558,8 @@ out: * reloaded after call to this function. */ -int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) +int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, + unsigned int __nocast gfp_mask) { int i; u8 *data; @@ -647,7 +649,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) * only by netfilter in the cases when checksum is recalculated? --ANK */ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, - int newheadroom, int newtailroom, int gfp_mask) + int newheadroom, int newtailroom, + unsigned int __nocast gfp_mask) { /* * Allocate the copy buffer diff --git a/net/core/sock.c b/net/core/sock.c index a6ec3ada7f9e..8b35ccdc2b3b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -622,7 +622,8 @@ lenout: * @prot: struct proto associated with this new sock instance * @zero_it: if we should zero the newly allocated sock */ -struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it) +struct sock *sk_alloc(int family, unsigned int __nocast priority, + struct proto *prot, int zero_it) { struct sock *sk = NULL; kmem_cache_t *slab = prot->slab; @@ -750,7 +751,8 @@ unsigned long sock_i_ino(struct sock *sk) /* * Allocate a skb from the socket's send buffer. */ -struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority) +struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, + unsigned int __nocast priority) { if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { struct sk_buff * skb = alloc_skb(size, priority); @@ -765,7 +767,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int /* * Allocate a skb from the socket's receive buffer. */ -struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) +struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, + unsigned int __nocast priority) { if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { struct sk_buff *skb = alloc_skb(size, priority); @@ -780,7 +783,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int /* * Allocate a memory block from the socket's option memory buffer. */ -void *sock_kmalloc(struct sock *sk, int size, int priority) +void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority) { if ((unsigned)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 29bb3cd21965..96a02800cd28 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -536,7 +536,7 @@ static void dn_keepalive(struct sock *sk) * we are double checking that we are not sending too * many of these keepalive frames. */ - if (skb_queue_len(&scp->other_xmit_queue) == 0) + if (skb_queue_empty(&scp->other_xmit_queue)) dn_nsp_send_link(sk, DN_NOCHANGE, 0); } @@ -1191,7 +1191,7 @@ static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table struct dn_scp *scp = DN_SK(sk); int mask = datagram_poll(file, sock, wait); - if (skb_queue_len(&scp->other_receive_queue)) + if (!skb_queue_empty(&scp->other_receive_queue)) mask |= POLLRDBAND; return mask; @@ -1214,7 +1214,7 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCATMARK: lock_sock(sk); - val = (skb_queue_len(&scp->other_receive_queue) != 0); + val = !skb_queue_empty(&scp->other_receive_queue); if (scp->state != DN_RUN) val = -ENOTCONN; release_sock(sk); @@ -1630,7 +1630,7 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int int len = 0; if (flags & MSG_OOB) - return skb_queue_len(q) ? 1 : 0; + return !skb_queue_empty(q) ? 1 : 0; while(skb != (struct sk_buff *)q) { struct dn_skb_cb *cb = DN_SKB_CB(skb); @@ -1707,7 +1707,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, if (sk->sk_err) goto out; - if (skb_queue_len(&scp->other_receive_queue)) { + if (!skb_queue_empty(&scp->other_receive_queue)) { if (!(flags & MSG_OOB)) { msg->msg_flags |= MSG_OOB; if (!scp->other_report) { diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 42abbf3f524f..8cce1fdbda90 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -342,7 +342,8 @@ int dn_nsp_xmit_timeout(struct sock *sk) dn_nsp_output(sk); - if (skb_queue_len(&scp->data_xmit_queue) || skb_queue_len(&scp->other_xmit_queue)) + if (!skb_queue_empty(&scp->data_xmit_queue) || + !skb_queue_empty(&scp->other_xmit_queue)) scp->persist = dn_nsp_persist(sk); return 0; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index cb759484979d..279f57abfecb 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -970,7 +970,8 @@ int icmp_rcv(struct sk_buff *skb) * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently * discarded if to broadcast/multicast. */ - if (icmph->type == ICMP_ECHO && + if ((icmph->type == ICMP_ECHO || + icmph->type == ICMP_TIMESTAMP) && sysctl_icmp_echo_ignore_broadcasts) { goto error; } diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 1f3183168a90..5088f90835ae 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1615,9 +1615,10 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) { int err; u32 addr = imr->imr_multiaddr.s_addr; - struct ip_mc_socklist *iml, *i; + struct ip_mc_socklist *iml=NULL, *i; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); + int ifindex; int count = 0; if (!MULTICAST(addr)) @@ -1633,37 +1634,30 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) goto done; } - iml = (struct ip_mc_socklist *)sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); - err = -EADDRINUSE; + ifindex = imr->imr_ifindex; for (i = inet->mc_list; i; i = i->next) { - if (memcmp(&i->multi, imr, sizeof(*imr)) == 0) { - /* New style additions are reference counted */ - if (imr->imr_address.s_addr == 0) { - i->count++; - err = 0; - } + if (i->multi.imr_multiaddr.s_addr == addr && + i->multi.imr_ifindex == ifindex) goto done; - } count++; } err = -ENOBUFS; - if (iml == NULL || count >= sysctl_igmp_max_memberships) + if (count >= sysctl_igmp_max_memberships) + goto done; + iml = (struct ip_mc_socklist *)sock_kmalloc(sk,sizeof(*iml),GFP_KERNEL); + if (iml == NULL) goto done; + memcpy(&iml->multi, imr, sizeof(*imr)); iml->next = inet->mc_list; - iml->count = 1; iml->sflist = NULL; iml->sfmode = MCAST_EXCLUDE; inet->mc_list = iml; ip_mc_inc_group(in_dev, addr); - iml = NULL; err = 0; - done: rtnl_shunlock(); - if (iml) - sock_kfree_s(sk, iml, sizeof(*iml)); return err; } @@ -1693,30 +1687,25 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml, **imlp; + struct in_device *in_dev; + u32 group = imr->imr_multiaddr.s_addr; + u32 ifindex; rtnl_lock(); + in_dev = ip_mc_find_dev(imr); + if (!in_dev) { + rtnl_unlock(); + return -ENODEV; + } + ifindex = imr->imr_ifindex; for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { - if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr && - iml->multi.imr_address.s_addr==imr->imr_address.s_addr && - (!imr->imr_ifindex || iml->multi.imr_ifindex==imr->imr_ifindex)) { - struct in_device *in_dev; - - in_dev = inetdev_by_index(iml->multi.imr_ifindex); - if (in_dev) - (void) ip_mc_leave_src(sk, iml, in_dev); - if (--iml->count) { - rtnl_unlock(); - if (in_dev) - in_dev_put(in_dev); - return 0; - } + if (iml->multi.imr_multiaddr.s_addr == group && + iml->multi.imr_ifindex == ifindex) { + (void) ip_mc_leave_src(sk, iml, in_dev); *imlp = iml->next; - if (in_dev) { - ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr); - in_dev_put(in_dev); - } + ip_mc_dec_group(in_dev, group); rtnl_unlock(); sock_kfree_s(sk, iml, sizeof(*iml)); return 0; @@ -1736,6 +1725,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct struct in_device *in_dev = NULL; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *psl; + int leavegroup = 0; int i, j, rv; if (!MULTICAST(addr)) @@ -1755,15 +1745,20 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct err = -EADDRNOTAVAIL; for (pmc=inet->mc_list; pmc; pmc=pmc->next) { - if (memcmp(&pmc->multi, mreqs, 2*sizeof(__u32)) == 0) + if (pmc->multi.imr_multiaddr.s_addr == imr.imr_multiaddr.s_addr + && pmc->multi.imr_ifindex == imr.imr_ifindex) break; } - if (!pmc) /* must have a prior join */ + if (!pmc) { /* must have a prior join */ + err = -EINVAL; goto done; + } /* if a source filter was set, must be the same mode as before */ if (pmc->sflist) { - if (pmc->sfmode != omode) + if (pmc->sfmode != omode) { + err = -EINVAL; goto done; + } } else if (pmc->sfmode != omode) { /* allow mode switches for empty-set filters */ ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); @@ -1775,7 +1770,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct psl = pmc->sflist; if (!add) { if (!psl) - goto done; + goto done; /* err = -EADDRNOTAVAIL */ rv = !0; for (i=0; i<psl->sl_count; i++) { rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, @@ -1784,7 +1779,13 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct break; } if (rv) /* source not found */ + goto done; /* err = -EADDRNOTAVAIL */ + + /* special case - (INCLUDE, empty) == LEAVE_GROUP */ + if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { + leavegroup = 1; goto done; + } /* update the interface filter */ ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, @@ -1842,18 +1843,21 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct &mreqs->imr_sourceaddr, 1); done: rtnl_shunlock(); + if (leavegroup) + return ip_mc_leave_group(sk, &imr); return err; } int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) { - int err; + int err = 0; struct ip_mreqn imr; u32 addr = msf->imsf_multiaddr; struct ip_mc_socklist *pmc; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *newpsl, *psl; + int leavegroup = 0; if (!MULTICAST(addr)) return -EINVAL; @@ -1872,15 +1876,22 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) err = -ENODEV; goto done; } - err = -EADDRNOTAVAIL; + + /* special case - (INCLUDE, empty) == LEAVE_GROUP */ + if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) { + leavegroup = 1; + goto done; + } for (pmc=inet->mc_list; pmc; pmc=pmc->next) { if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && pmc->multi.imr_ifindex == imr.imr_ifindex) break; } - if (!pmc) /* must have a prior join */ + if (!pmc) { /* must have a prior join */ + err = -EINVAL; goto done; + } if (msf->imsf_numsrc) { newpsl = (struct ip_sf_socklist *)sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc), GFP_KERNEL); @@ -1909,8 +1920,11 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) 0, NULL, 0); pmc->sflist = newpsl; pmc->sfmode = msf->imsf_fmode; + err = 0; done: rtnl_shunlock(); + if (leavegroup) + err = ip_mc_leave_group(sk, &imr); return err; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f8b172f89811..fc7c481d0d79 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -677,11 +677,11 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, mreq.imr_address.s_addr = mreqs.imr_interface; mreq.imr_ifindex = 0; err = ip_mc_join_group(sk, &mreq); - if (err) + if (err && err != -EADDRINUSE) break; omode = MCAST_INCLUDE; add = 1; - } else /*IP_DROP_SOURCE_MEMBERSHIP */ { + } else /* IP_DROP_SOURCE_MEMBERSHIP */ { omode = MCAST_INCLUDE; add = 0; } @@ -754,7 +754,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, mreq.imr_address.s_addr = 0; mreq.imr_ifindex = greqs.gsr_interface; err = ip_mc_join_group(sk, &mreq); - if (err) + if (err && err != -EADDRINUSE) break; greqs.gsr_interface = mreq.imr_ifindex; omode = MCAST_INCLUDE; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 29894c749163..ddb6ce4ecff2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1105,7 +1105,7 @@ static void tcp_prequeue_process(struct sock *sk) struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); - NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); + NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); /* RX process wants to run with disabled BHs, though it is not * necessary */ @@ -1369,7 +1369,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * is not empty. It is more elegant, but eats cycles, * unfortunately. */ - if (skb_queue_len(&tp->ucopy.prequeue)) + if (!skb_queue_empty(&tp->ucopy.prequeue)) goto do_prequeue; /* __ Set realtime policy in scheduler __ */ @@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } if (tp->rcv_nxt == tp->copied_seq && - skb_queue_len(&tp->ucopy.prequeue)) { + !skb_queue_empty(&tp->ucopy.prequeue)) { do_prequeue: tcp_prequeue_process(sk); @@ -1476,7 +1476,7 @@ skip_copy: } while (len > 0); if (user_recv) { - if (skb_queue_len(&tp->ucopy.prequeue)) { + if (!skb_queue_empty(&tp->ucopy.prequeue)) { int chunk; tp->ucopy.len = copied > 0 ? len : 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8de2f1071c2b..53a8a5399f1e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2802,7 +2802,7 @@ static void tcp_sack_remove(struct tcp_sock *tp) int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ - if (skb_queue_len(&tp->out_of_order_queue) == 0) { + if (skb_queue_empty(&tp->out_of_order_queue)) { tp->rx_opt.num_sacks = 0; tp->rx_opt.eff_sacks = tp->rx_opt.dsack; return; @@ -2935,13 +2935,13 @@ queue_and_out: if(th->fin) tcp_fin(skb, sk, th); - if (skb_queue_len(&tp->out_of_order_queue)) { + if (!skb_queue_empty(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ - if (!skb_queue_len(&tp->out_of_order_queue)) + if (skb_queue_empty(&tp->out_of_order_queue)) tp->ack.pingpong = 0; } @@ -3249,9 +3249,8 @@ static int tcp_prune_queue(struct sock *sk) * This must not ever occur. */ /* First, purge the out_of_order queue. */ - if (skb_queue_len(&tp->out_of_order_queue)) { - NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED, - skb_queue_len(&tp->out_of_order_queue)); + if (!skb_queue_empty(&tp->out_of_order_queue)) { + NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e041d057ec86..e3f8ea1bfa9c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1613,7 +1613,7 @@ void tcp_send_fin(struct sock *sk) * was unread data in the receive queue. This behavior is recommended * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM */ -void tcp_send_active_reset(struct sock *sk, int priority) +void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b127b4498565..0084227438c2 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -231,11 +231,10 @@ static void tcp_delack_timer(unsigned long data) } tp->ack.pending &= ~TCP_ACK_TIMER; - if (skb_queue_len(&tp->ucopy.prequeue)) { + if (!skb_queue_empty(&tp->ucopy.prequeue)) { struct sk_buff *skb; - NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, - skb_queue_len(&tp->ucopy.prequeue)); + NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk->sk_backlog_rcv(sk, skb); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 562fcd14fdea..29fed6e58d0a 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -281,7 +281,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) } write_unlock_bh(&ipv6_sk_mc_lock); - return -ENOENT; + return -EADDRNOTAVAIL; } static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex) @@ -386,12 +386,16 @@ int ip6_mc_source(int add, int omode, struct sock *sk, if (ipv6_addr_equal(&pmc->addr, group)) break; } - if (!pmc) /* must have a prior join */ + if (!pmc) { /* must have a prior join */ + err = -EINVAL; goto done; + } /* if a source filter was set, must be the same mode as before */ if (pmc->sflist) { - if (pmc->sfmode != omode) + if (pmc->sfmode != omode) { + err = -EINVAL; goto done; + } } else if (pmc->sfmode != omode) { /* allow mode switches for empty-set filters */ ip6_mc_add_src(idev, group, omode, 0, NULL, 0); @@ -402,7 +406,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, psl = pmc->sflist; if (!add) { if (!psl) - goto done; + goto done; /* err = -EADDRNOTAVAIL */ rv = !0; for (i=0; i<psl->sl_count; i++) { rv = memcmp(&psl->sl_addr[i], source, @@ -411,7 +415,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, break; } if (rv) /* source not found */ - goto done; + goto done; /* err = -EADDRNOTAVAIL */ /* special case - (INCLUDE, empty) == LEAVE_GROUP */ if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { @@ -488,6 +492,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) struct inet6_dev *idev; struct ipv6_pinfo *inet6 = inet6_sk(sk); struct ip6_sf_socklist *newpsl, *psl; + int leavegroup = 0; int i, err; group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; @@ -503,7 +508,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) if (!idev) return -ENODEV; dev = idev->dev; - err = -EADDRNOTAVAIL; + + err = 0; + if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { + leavegroup = 1; + goto done; + } for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { if (pmc->ifindex != gsf->gf_interface) @@ -511,8 +521,10 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) if (ipv6_addr_equal(&pmc->addr, group)) break; } - if (!pmc) /* must have a prior join */ + if (!pmc) { /* must have a prior join */ + err = -EINVAL; goto done; + } if (gsf->gf_numsrc) { newpsl = (struct ip6_sf_socklist *)sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc), GFP_ATOMIC); @@ -544,10 +556,13 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); pmc->sflist = newpsl; pmc->sfmode = gsf->gf_fmode; + err = 0; done: read_unlock_bh(&idev->lock); in6_dev_put(idev); dev_put(dev); + if (leavegroup) + err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); return err; } diff --git a/net/irda/irlap.c b/net/irda/irlap.c index 046ad0750e48..7029618f5719 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -445,9 +445,8 @@ void irlap_disconnect_request(struct irlap_cb *self) IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Don't disconnect until all data frames are successfully sent */ - if (skb_queue_len(&self->txq) > 0) { + if (!skb_queue_empty(&self->txq)) { self->disconnect_pending = TRUE; - return; } diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index 1cd89f5f3b75..a505b5457608 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c @@ -191,7 +191,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout) * Send out the RR frames faster if our own transmit queue is empty, or * if the peer is busy. The effect is a much faster conversation */ - if ((skb_queue_len(&self->txq) == 0) || (self->remote_busy)) { + if (skb_queue_empty(&self->txq) || self->remote_busy) { if (self->fast_RR == TRUE) { /* * Assert that the fast poll timer has not reached the @@ -263,7 +263,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, skb_queue_len(&self->txq)); - if (skb_queue_len(&self->txq)) { + if (!skb_queue_empty(&self->txq)) { /* Prevent race conditions with irlap_data_request() */ self->local_busy = TRUE; @@ -1074,7 +1074,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* Window has been adjusted for the max packet * size, so much simpler... - Jean II */ - nextfit = (skb_queue_len(&self->txq) > 0); + nextfit = !skb_queue_empty(&self->txq); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Send data with poll bit cleared only if window > 1 @@ -1814,7 +1814,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* Window has been adjusted for the max packet * size, so much simpler... - Jean II */ - nextfit = (skb_queue_len(&self->txq) > 0); + nextfit = !skb_queue_empty(&self->txq); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Send data with final bit cleared only if window > 1 @@ -1937,7 +1937,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, irlap_data_indication(self, skb, FALSE); /* Any pending data requests? */ - if ((skb_queue_len(&self->txq) > 0) && + if (!skb_queue_empty(&self->txq) && (self->window > 0)) { self->ack_required = TRUE; @@ -2038,7 +2038,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, /* * Any pending data requests? */ - if ((skb_queue_len(&self->txq) > 0) && + if (!skb_queue_empty(&self->txq) && (self->window > 0) && !self->remote_busy) { irlap_data_indication(self, skb, TRUE); @@ -2069,7 +2069,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, */ nr_status = irlap_validate_nr_received(self, info->nr); if (nr_status == NR_EXPECTED) { - if ((skb_queue_len( &self->txq) > 0) && + if (!skb_queue_empty(&self->txq) && (self->window > 0)) { self->remote_busy = FALSE; diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index 040abe714aa3..6dafbb43b529 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -1018,11 +1018,10 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) /* * We can now fill the window with additional data frames */ - while (skb_queue_len( &self->txq) > 0) { + while (!skb_queue_empty(&self->txq)) { IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__); - if ((skb_queue_len( &self->txq) > 0) && - (self->window > 0)) { + if (self->window > 0) { skb = skb_dequeue( &self->txq); IRDA_ASSERT(skb != NULL, return;); @@ -1031,8 +1030,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) * bit cleared */ if ((self->window > 1) && - skb_queue_len(&self->txq) > 0) - { + !skb_queue_empty(&self->txq)) { irlap_send_data_primary(self, skb); } else { irlap_send_data_primary_poll(self, skb); diff --git a/net/irda/irttp.c b/net/irda/irttp.c index d091ccf773b3..6602d901f8b1 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -1513,7 +1513,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, /* * Check if there is still data segments in the transmit queue */ - if (skb_queue_len(&self->tx_queue) > 0) { + if (!skb_queue_empty(&self->tx_queue)) { if (priority == P_HIGH) { /* * No need to send the queued data, if we are diff --git a/net/llc/llc_c_ev.c b/net/llc/llc_c_ev.c index cd130c3b72bc..d5bdb53a348f 100644 --- a/net/llc/llc_c_ev.c +++ b/net/llc/llc_c_ev.c @@ -84,7 +84,7 @@ static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr) if (llc->dev->flags & IFF_LOOPBACK) goto out; rc = 1; - if (!skb_queue_len(&llc->pdu_unack_q)) + if (skb_queue_empty(&llc->pdu_unack_q)) goto out; skb = skb_peek(&llc->pdu_unack_q); pdu = llc_pdu_sn_hdr(skb); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index fc456a7aaec3..3405fdf41b93 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -858,7 +858,7 @@ static inline void netlink_rcv_wake(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); - if (!skb_queue_len(&sk->sk_receive_queue)) + if (skb_queue_empty(&sk->sk_receive_queue)) clear_bit(0, &nlk->state); if (!test_bit(0, &nlk->state)) wake_up_interruptible(&nlk->wait); diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 664d0e47374f..7845d045eec4 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -385,7 +385,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256); q->qcount = -1; - if (skb_queue_len(&sch->q) == 0) + if (skb_queue_empty(&sch->q)) PSCHED_SET_PASTPERFECT(q->qidlestart); sch_tree_unlock(sch); return 0; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 7ae6aa772dab..4b47dd6f2485 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -203,7 +203,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a */ asoc->addip_serial = asoc->c.initial_tsn; - skb_queue_head_init(&asoc->addip_chunks); + INIT_LIST_HEAD(&asoc->addip_chunk_list); /* Make an empty list of remote transport addresses. */ INIT_LIST_HEAD(&asoc->peer.transport_addr_list); diff --git a/net/sctp/input.c b/net/sctp/input.c index 339f7acfdb64..5e085e041a6e 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -115,6 +115,17 @@ static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk) atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc); } +struct sctp_input_cb { + union { + struct inet_skb_parm h4; +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) + struct inet6_skb_parm h6; +#endif + } header; + struct sctp_chunk *chunk; +}; +#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0])) + /* * This is the routine which IP calls when receiving an SCTP packet. */ @@ -243,6 +254,7 @@ int sctp_rcv(struct sk_buff *skb) ret = -ENOMEM; goto discard_release; } + SCTP_INPUT_CB(skb)->chunk = chunk; sctp_rcv_set_owner_r(skb,sk); @@ -265,9 +277,9 @@ int sctp_rcv(struct sk_buff *skb) sctp_bh_lock_sock(sk); if (sock_owned_by_user(sk)) - sk_add_backlog(sk, (struct sk_buff *) chunk); + sk_add_backlog(sk, skb); else - sctp_backlog_rcv(sk, (struct sk_buff *) chunk); + sctp_backlog_rcv(sk, skb); /* Release the sock and any reference counts we took in the * lookup calls. @@ -302,14 +314,8 @@ discard_release: */ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) { - struct sctp_chunk *chunk; - struct sctp_inq *inqueue; - - /* One day chunk will live inside the skb, but for - * now this works. - */ - chunk = (struct sctp_chunk *) skb; - inqueue = &chunk->rcvr->inqueue; + struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; + struct sctp_inq *inqueue = &chunk->rcvr->inqueue; sctp_inq_push(inqueue, chunk); return 0; diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index cedf4351556c..2d33922c044b 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -50,7 +50,7 @@ /* Initialize an SCTP inqueue. */ void sctp_inq_init(struct sctp_inq *queue) { - skb_queue_head_init(&queue->in); + INIT_LIST_HEAD(&queue->in_chunk_list); queue->in_progress = NULL; /* Create a task for delivering data. */ @@ -62,11 +62,13 @@ void sctp_inq_init(struct sctp_inq *queue) /* Release the memory associated with an SCTP inqueue. */ void sctp_inq_free(struct sctp_inq *queue) { - struct sctp_chunk *chunk; + struct sctp_chunk *chunk, *tmp; /* Empty the queue. */ - while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)) != NULL) + list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { + list_del_init(&chunk->list); sctp_chunk_free(chunk); + } /* If there is a packet which is currently being worked on, * free it as well. @@ -92,7 +94,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *packet) * Eventually, we should clean up inqueue to not rely * on the BH related data structures. */ - skb_queue_tail(&(q->in), (struct sk_buff *) packet); + list_add_tail(&packet->list, &q->in_chunk_list); q->immediate.func(q->immediate.data); } @@ -131,12 +133,16 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) /* Do we need to take the next packet out of the queue to process? */ if (!chunk) { + struct list_head *entry; + /* Is the queue empty? */ - if (skb_queue_empty(&queue->in)) + if (list_empty(&queue->in_chunk_list)) return NULL; + entry = queue->in_chunk_list.next; chunk = queue->in_progress = - (struct sctp_chunk *) skb_dequeue(&queue->in); + list_entry(entry, struct sctp_chunk, list); + list_del_init(entry); /* This is the first chunk in the packet. */ chunk->singleton = 1; diff --git a/net/sctp/output.c b/net/sctp/output.c index 84b5b370b09d..931371633464 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -108,7 +108,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, packet->transport = transport; packet->source_port = sport; packet->destination_port = dport; - skb_queue_head_init(&packet->chunks); + INIT_LIST_HEAD(&packet->chunk_list); if (asoc) { struct sctp_sock *sp = sctp_sk(asoc->base.sk); overhead = sp->pf->af->net_header_len; @@ -129,12 +129,14 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, /* Free a packet. */ void sctp_packet_free(struct sctp_packet *packet) { - struct sctp_chunk *chunk; + struct sctp_chunk *chunk, *tmp; SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); - while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) + list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { + list_del_init(&chunk->list); sctp_chunk_free(chunk); + } if (packet->malloced) kfree(packet); @@ -276,7 +278,7 @@ append: packet->has_sack = 1; /* It is OK to send this chunk. */ - __skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk); + list_add_tail(&chunk->list, &packet->chunk_list); packet->size += chunk_len; chunk->transport = packet->transport; finish: @@ -295,7 +297,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) struct sctphdr *sh; __u32 crc32; struct sk_buff *nskb; - struct sctp_chunk *chunk; + struct sctp_chunk *chunk, *tmp; struct sock *sk; int err = 0; int padding; /* How much padding do we need? */ @@ -305,11 +307,11 @@ int sctp_packet_transmit(struct sctp_packet *packet) SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); /* Do NOT generate a chunkless packet. */ - chunk = (struct sctp_chunk *)skb_peek(&packet->chunks); - if (unlikely(!chunk)) + if (list_empty(&packet->chunk_list)) return err; /* Set up convenience variables... */ + chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); sk = chunk->skb->sk; /* Allocate the new skb. */ @@ -370,7 +372,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) * [This whole comment explains WORD_ROUND() below.] */ SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); - while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { + list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { + list_del_init(&chunk->list); if (sctp_chunk_is_data(chunk)) { if (!chunk->has_tsn) { @@ -511,7 +514,8 @@ err: * will get resent or dropped later. */ - while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { + list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { + list_del_init(&chunk->list); if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 4eb81a1407b7..efb72faba20c 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -75,7 +75,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); static inline void sctp_outq_head_data(struct sctp_outq *q, struct sctp_chunk *ch) { - __skb_queue_head(&q->out, (struct sk_buff *)ch); + list_add(&ch->list, &q->out_chunk_list); q->out_qlen += ch->skb->len; return; } @@ -83,17 +83,22 @@ static inline void sctp_outq_head_data(struct sctp_outq *q, /* Take data from the front of the queue. */ static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) { - struct sctp_chunk *ch; - ch = (struct sctp_chunk *)__skb_dequeue(&q->out); - if (ch) + struct sctp_chunk *ch = NULL; + + if (!list_empty(&q->out_chunk_list)) { + struct list_head *entry = q->out_chunk_list.next; + + ch = list_entry(entry, struct sctp_chunk, list); + list_del_init(entry); q->out_qlen -= ch->skb->len; + } return ch; } /* Add data chunk to the end of the queue. */ static inline void sctp_outq_tail_data(struct sctp_outq *q, struct sctp_chunk *ch) { - __skb_queue_tail(&q->out, (struct sk_buff *)ch); + list_add_tail(&ch->list, &q->out_chunk_list); q->out_qlen += ch->skb->len; return; } @@ -197,8 +202,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary, void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) { q->asoc = asoc; - skb_queue_head_init(&q->out); - skb_queue_head_init(&q->control); + INIT_LIST_HEAD(&q->out_chunk_list); + INIT_LIST_HEAD(&q->control_chunk_list); INIT_LIST_HEAD(&q->retransmit); INIT_LIST_HEAD(&q->sacked); INIT_LIST_HEAD(&q->abandoned); @@ -217,7 +222,7 @@ void sctp_outq_teardown(struct sctp_outq *q) { struct sctp_transport *transport; struct list_head *lchunk, *pos, *temp; - struct sctp_chunk *chunk; + struct sctp_chunk *chunk, *tmp; /* Throw away unacknowledged chunks. */ list_for_each(pos, &q->asoc->peer.transport_addr_list) { @@ -269,8 +274,10 @@ void sctp_outq_teardown(struct sctp_outq *q) q->error = 0; /* Throw away any leftover control chunks. */ - while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL) + list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { + list_del_init(&chunk->list); sctp_chunk_free(chunk); + } } /* Free the outqueue structure and any related pending chunks. */ @@ -333,7 +340,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) break; }; } else { - __skb_queue_tail(&q->control, (struct sk_buff *) chunk); + list_add_tail(&chunk->list, &q->control_chunk_list); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); } @@ -650,10 +657,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) __u16 sport = asoc->base.bind_addr.port; __u16 dport = asoc->peer.port; __u32 vtag = asoc->peer.i.init_tag; - struct sk_buff_head *queue; struct sctp_transport *transport = NULL; struct sctp_transport *new_transport; - struct sctp_chunk *chunk; + struct sctp_chunk *chunk, *tmp; sctp_xmit_t status; int error = 0; int start_timer = 0; @@ -675,8 +681,9 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) * ... */ - queue = &q->control; - while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) { + list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { + list_del_init(&chunk->list); + /* Pick the right transport to use. */ new_transport = chunk->transport; @@ -814,8 +821,6 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) /* Finally, transmit new packets. */ start_timer = 0; - queue = &q->out; - while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. @@ -1149,8 +1154,9 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) /* See if all chunks are acked. * Make sure the empty queue handler will get run later. */ - q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) && - list_empty(&q->retransmit); + q->empty = (list_empty(&q->out_chunk_list) && + list_empty(&q->control_chunk_list) && + list_empty(&q->retransmit)); if (!q->empty) goto finish; @@ -1679,9 +1685,9 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) if (TSN_lte(tsn, ctsn)) { list_del_init(lchunk); if (!chunk->tsn_gap_acked) { - chunk->transport->flight_size -= - sctp_data_size(chunk); - q->outstanding_bytes -= sctp_data_size(chunk); + chunk->transport->flight_size -= + sctp_data_size(chunk); + q->outstanding_bytes -= sctp_data_size(chunk); } sctp_chunk_free(chunk); } else { @@ -1729,7 +1735,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) nskips, &ftsn_skip_arr[0]); if (ftsn_chunk) { - __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk); + list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); } } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 5baed9bb7de5..773cd93fa3d0 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1003,6 +1003,7 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb); } + INIT_LIST_HEAD(&retval->list); retval->skb = skb; retval->asoc = (struct sctp_association *)asoc; retval->resent = 0; @@ -1116,8 +1117,7 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk) /* Possibly, free the chunk. */ void sctp_chunk_free(struct sctp_chunk *chunk) { - /* Make sure that we are not on any list. */ - skb_unlink((struct sk_buff *) chunk); + BUG_ON(!list_empty(&chunk->list)); list_del_init(&chunk->transmitted_list); /* Release our reference on the message tracker. */ @@ -2739,8 +2739,12 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, asoc->addip_last_asconf = NULL; /* Send the next asconf chunk from the addip chunk queue. */ - asconf = (struct sctp_chunk *)__skb_dequeue(&asoc->addip_chunks); - if (asconf) { + if (!list_empty(&asoc->addip_chunk_list)) { + struct list_head *entry = asoc->addip_chunk_list.next; + asconf = list_entry(entry, struct sctp_chunk, list); + + list_del_init(entry); + /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(asconf); if (sctp_primitive_ASCONF(asoc, asconf)) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index aad55dc3792b..091a66f06a35 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -406,7 +406,7 @@ static int sctp_send_asconf(struct sctp_association *asoc, * transmission. */ if (asoc->addip_last_asconf) { - __skb_queue_tail(&asoc->addip_chunks, (struct sk_buff *)chunk); + list_add_tail(&chunk->list, &asoc->addip_chunk_list); goto out; } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c420eba4876b..d403e34088ad 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -302,7 +302,7 @@ static void unix_write_space(struct sock *sk) * may receive messages only from that peer. */ static void unix_dgram_disconnected(struct sock *sk, struct sock *other) { - if (skb_queue_len(&sk->sk_receive_queue)) { + if (!skb_queue_empty(&sk->sk_receive_queue)) { skb_queue_purge(&sk->sk_receive_queue); wake_up_interruptible_all(&unix_sk(sk)->peer_wait); @@ -1619,7 +1619,7 @@ static long unix_stream_data_wait(struct sock * sk, long timeo) for (;;) { prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); - if (skb_queue_len(&sk->sk_receive_queue) || + if (!skb_queue_empty(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || |