lkml.org 
[lkml]   [2023]   [Aug]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [RFC Optimizing veth xsk performance 10/10] veth: af_xdp tx batch support for ipv4 udp
On Thu, Aug 03, 2023 at 10:04:36PM +0800, huangjie.albert wrote:

...

> @@ -103,6 +104,18 @@ struct veth_xdp_tx_bq {
> unsigned int count;
> };
>
> +struct veth_gso_tuple {
> + __u8 protocol;
> + __be32 saddr;
> + __be32 daddr;
> + __be16 source;
> + __be16 dest;
> + __be16 gso_size;
> + __be16 gso_segs;
> + bool gso_enable;
> + bool gso_flush;
> +};
> +
> struct veth_seg_info {
> u32 segs;
> u64 desc[] ____cacheline_aligned_in_smp;

...

> +static inline bool gso_segment_match(struct veth_gso_tuple *gso_tuple, struct iphdr *iph, struct udphdr *udph)
> +{
> + if (gso_tuple->protocol == iph->protocol &&
> + gso_tuple->saddr == iph->saddr &&
> + gso_tuple->daddr == iph->daddr &&
> + gso_tuple->source == udph->source &&
> + gso_tuple->dest == udph->dest &&
> + gso_tuple->gso_size == ntohs(udph->len))

The type of the gso_size field is __be16,
but it is being assigned a host byte order value.

> + {
> + gso_tuple->gso_flush = false;
> + return true;
> + } else {
> + gso_tuple->gso_flush = true;
> + return false;
> + }
> +}
> +
> +static inline void gso_tuple_init(struct veth_gso_tuple *gso_tuple, struct iphdr *iph, struct udphdr *udph)
> +{
> + gso_tuple->protocol = iph->protocol;
> + gso_tuple->saddr = iph->saddr;
> + gso_tuple->daddr = iph->daddr;
> + gso_tuple->source = udph->source;
> + gso_tuple->dest = udph->dest;
> + gso_tuple->gso_flush = false;
> + gso_tuple->gso_size = ntohs(udph->len);


Likewise, here.

As flagged by Sparse.

.../veth.c:721:29: warning: incorrect type in assignment (different base types)
.../veth.c:721:29: expected restricted __be16 [usertype] gso_size
.../veth.c:721:29: got unsigned short [usertype]
.../veth.c:703:26: warning: restricted __be16 degrades to integer

> + gso_tuple->gso_segs = 0;
> +}

...

> +static struct sk_buff *veth_build_skb_zerocopy_gso(struct net_device *dev, struct xsk_buff_pool *pool,
> + struct xdp_desc *desc, struct veth_gso_tuple *gso_tuple, struct sk_buff *prev_skb)

Please consider constraining line length to 80 columns.

> +{
> + u32 hr, len, ts, index, iph_len, th_len, data_offset, data_len, tot_len;
> + struct veth_seg_info *seg_info;
> + void *buffer;
> + struct udphdr *udph;
> + struct iphdr *iph;
> + struct sk_buff *skb;
> + struct page *page;
> + int hh_len = 0;
> + u64 addr;
> +
> + addr = desc->addr;
> + len = desc->len;
> +
> + /* l2 reserved len */
> + hh_len = LL_RESERVED_SPACE(dev);
> + hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(hh_len));
> +
> + /* data points to eth header */
> + buffer = (unsigned char *)xsk_buff_raw_get_data(pool, addr);
> +
> + iph = (struct iphdr *)(buffer + ETH_HLEN);
> + iph_len = iph->ihl * 4;
> +
> + udph = (struct udphdr *)(buffer + ETH_HLEN + iph_len);
> + th_len = sizeof(struct udphdr);
> +
> + if (gso_tuple->gso_flush)
> + gso_tuple_init(gso_tuple, iph, udph);
> +
> + ts = pool->unaligned ? len : pool->chunk_size;
> +
> + data_offset = offset_in_page(buffer) + ETH_HLEN + iph_len + th_len;
> + data_len = len - (ETH_HLEN + iph_len + th_len);
> +
> + /* head is null or this is a new 5 tuple */
> + if (NULL == prev_skb || !gso_segment_match(gso_tuple, iph, udph)) {
> + tot_len = hr + iph_len + th_len;
> + skb = veth_build_gso_head_skb(dev, buffer, tot_len, hr, iph_len, th_len);
> + if (!skb) {
> + /* to do: handle here for skb */
> + return NULL;
> + }
> +
> + /* store information for gso */
> + seg_info = (struct veth_seg_info *)kmalloc(struct_size(seg_info, desc, MAX_SKB_FRAGS), GFP_KERNEL);

No need to case the return value of kmalloc, it's type is void *.

seg_info = kmalloc(struct_size(seg_info, desc, MAX_SKB_FRAGS),
GFP_KERNEL);
> + if (!seg_info) {
> + /* to do */
> + kfree_skb(skb);
> + return NULL;
> + }

...

\
 
 \ /
  Last update: 2023-08-04 23:14    [W:0.138 / U:0.640 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site