lkml.org 
[lkml]   [2012]   [Aug]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[ 078/109] netem: add limitation to reordered packets
    Date
    From: Greg KH <gregkh@linuxfoundation.org>

    3.4-stable review patch. If anyone has any objections, please let me know.

    ------------------


    From: Eric Dumazet <edumazet@google.com>

    [ Upstream commit 960fb66e520a405dde39ff883f17ff2669c13d85 ]

    Fix two netem bugs :

    1) When a frame was dropped by tfifo_enqueue(), drop counter
    was incremented twice.

    2) When reordering is triggered, we enqueue a packet without
    checking queue limit. This can OOM pretty fast when this
    is repeated enough, since skbs are orphaned, no socket limit
    can help in this situation.

    Signed-off-by: Eric Dumazet <edumazet@google.com>
    Cc: Mark Gordon <msg@google.com>
    Cc: Andreas Terzis <aterzis@google.com>
    Cc: Yuchung Cheng <ycheng@google.com>
    Cc: Hagen Paul Pfeifer <hagen@jauu.net>
    Signed-off-by: David S. Miller <davem@davemloft.net>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    net/sched/sch_netem.c | 42 +++++++++++++++---------------------------
    1 file changed, 15 insertions(+), 27 deletions(-)

    --- a/net/sched/sch_netem.c
    +++ b/net/sched/sch_netem.c
    @@ -329,29 +329,22 @@ static psched_time_t packet_len_2_sched_
    return PSCHED_NS2TICKS(ticks);
    }

    -static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
    +static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
    {
    struct sk_buff_head *list = &sch->q;
    psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
    - struct sk_buff *skb;
    + struct sk_buff *skb = skb_peek_tail(list);

    - if (likely(skb_queue_len(list) < sch->limit)) {
    - skb = skb_peek_tail(list);
    - /* Optimize for add at tail */
    - if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
    - return qdisc_enqueue_tail(nskb, sch);
    -
    - skb_queue_reverse_walk(list, skb) {
    - if (tnext >= netem_skb_cb(skb)->time_to_send)
    - break;
    - }
    + /* Optimize for add at tail */
    + if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
    + return __skb_queue_tail(list, nskb);

    - __skb_queue_after(list, skb, nskb);
    - sch->qstats.backlog += qdisc_pkt_len(nskb);
    - return NET_XMIT_SUCCESS;
    + skb_queue_reverse_walk(list, skb) {
    + if (tnext >= netem_skb_cb(skb)->time_to_send)
    + break;
    }

    - return qdisc_reshape_fail(nskb, sch);
    + __skb_queue_after(list, skb, nskb);
    }

    /*
    @@ -366,7 +359,6 @@ static int netem_enqueue(struct sk_buff
    /* We don't fill cb now as skb_unshare() may invalidate it */
    struct netem_skb_cb *cb;
    struct sk_buff *skb2;
    - int ret;
    int count = 1;

    /* Random duplication */
    @@ -414,6 +406,11 @@ static int netem_enqueue(struct sk_buff
    skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
    }

    + if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
    + return qdisc_reshape_fail(skb, sch);
    +
    + sch->qstats.backlog += qdisc_pkt_len(skb);
    +
    cb = netem_skb_cb(skb);
    if (q->gap == 0 || /* not doing reordering */
    q->counter < q->gap - 1 || /* inside last reordering gap */
    @@ -445,7 +442,7 @@ static int netem_enqueue(struct sk_buff

    cb->time_to_send = now + delay;
    ++q->counter;
    - ret = tfifo_enqueue(skb, sch);
    + tfifo_enqueue(skb, sch);
    } else {
    /*
    * Do re-ordering by putting one out of N packets at the front
    @@ -455,16 +452,7 @@ static int netem_enqueue(struct sk_buff
    q->counter = 0;

    __skb_queue_head(&sch->q, skb);
    - sch->qstats.backlog += qdisc_pkt_len(skb);
    sch->qstats.requeues++;
    - ret = NET_XMIT_SUCCESS;
    - }
    -
    - if (ret != NET_XMIT_SUCCESS) {
    - if (net_xmit_drop_count(ret)) {
    - sch->qstats.drops++;
    - return ret;
    - }
    }

    return NET_XMIT_SUCCESS;



    \
     
     \ /
      Last update: 2012-08-08 02:21    [W:2.289 / U:0.172 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site