diff options
| author | Maciej Fijalkowski <maciej.fijalkowski@intel.com> | 2023-07-19 15:24:08 +0200 | 
|---|---|---|
| committer | Alexei Starovoitov <ast@kernel.org> | 2023-07-19 09:56:49 -0700 | 
| commit | 24ea50127ecf0efe819c1f6230add27abc6ca9d9 (patch) | |
| tree | ef1c4492c553100e68df539ec534eca8ac0f85c5 /net | |
| parent | 13ce2daa259a3bfbc9a5aeeee8b9a87058703731 (diff) | |
xsk: support mbuf on ZC RX
Given that skb_shared_info relies on skb_frag_t, in order to support
xskb chaining, introduce xdp_buff_xsk::xskb_list_node and
xsk_buff_pool::xskb_list.
This is needed so ZC drivers can add frags as xskb nodes which will make
it possible to handle it both when producing AF_XDP Rx descriptors as
well as freeing/recycling all the frags that a single frame carries.
Speaking of latter, update xsk_buff_free() to take care of list nodes.
For the former (adding as frags), introduce xsk_buff_add_frag() for ZC
drivers usage that is going to be used to add a frag to xskb list from
pool.
xsk_buff_get_frag() will be utilized by XDP_TX and, on contrary, will
return xdp_buff.
One of the previous patches added a wrapper for ZC Rx so implement xskb
list walk and production of Rx descriptors there.
On bind() path, bail out if socket wants to use ZC multi-buffer but
underlying netdev does not support it.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Link: https://lore.kernel.org/r/20230719132421.584801-12-maciej.fijalkowski@intel.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net')
| -rw-r--r-- | net/xdp/xsk.c | 26 | ||||
| -rw-r--r-- | net/xdp/xsk_buff_pool.c | 7 | 
2 files changed, 32 insertions, 1 deletions
| diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index ba755fed3750..4f1e0599146e 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -155,8 +155,32 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,  static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)  {  	struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); +	u32 frags = xdp_buff_has_frags(xdp); +	struct xdp_buff_xsk *pos, *tmp; +	struct list_head *xskb_list; +	u32 contd = 0; +	int err; + +	if (frags) +		contd = XDP_PKT_CONTD; -	return __xsk_rcv_zc(xs, xskb, len, 0); +	err = __xsk_rcv_zc(xs, xskb, len, contd); +	if (err || likely(!frags)) +		goto out; + +	xskb_list = &xskb->pool->xskb_list; +	list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { +		if (list_is_singular(xskb_list)) +			contd = 0; +		len = pos->xdp.data_end - pos->xdp.data; +		err = __xsk_rcv_zc(xs, pos, len, contd); +		if (err) +			return err; +		list_del(&pos->xskb_list_node); +	} + +out: +	return err;  }  static void *xsk_copy_xdp_start(struct xdp_buff *from) diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 26f6d304451e..b3f7b310811e 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -86,6 +86,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,  	pool->umem = umem;  	pool->addrs = umem->addrs;  	INIT_LIST_HEAD(&pool->free_list); +	INIT_LIST_HEAD(&pool->xskb_list);  	INIT_LIST_HEAD(&pool->xsk_tx_list);  	spin_lock_init(&pool->xsk_tx_list_lock);  	spin_lock_init(&pool->cq_lock); @@ -99,6 +100,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,  		xskb->pool = pool;  		xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;  		INIT_LIST_HEAD(&xskb->free_list_node); +		INIT_LIST_HEAD(&xskb->xskb_list_node);  		if (pool->unaligned)  			pool->free_heads[i] = xskb;  		else @@ -187,6 +189,11 @@ int xp_assign_dev(struct xsk_buff_pool *pool,  		goto err_unreg_pool;  	} +	if (netdev->xdp_zc_max_segs == 1 && (flags & XDP_USE_SG)) { +		err = -EOPNOTSUPP; +		goto err_unreg_pool; +	} +  	bpf.command = XDP_SETUP_XSK_POOL;  	bpf.xsk.pool = pool;  	bpf.xsk.queue_id = queue_id; | 
