mirror of
https://kernel.googlesource.com/pub/scm/linux/kernel/git/stable/linux-stable.git
synced 2025-09-25 16:49:33 +10:00
RDMA/rxe: Flush delayed SKBs while releasing RXE resources
commit3c3e9a9f29
upstream. When skb packets are sent out, these skb packets still depends on the rxe resources, for example, QP, sk, when these packets are destroyed. If these rxe resources are released when the skb packets are destroyed, the call traces will appear. To avoid skb packets hang too long time in some network devices, a timestamp is added when these skb packets are created. If these skb packets hang too long time in network devices, these network devices can free these skb packets to release rxe resources. Reported-by: syzbot+8425ccfb599521edb153@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=8425ccfb599521edb153 Tested-by: syzbot+8425ccfb599521edb153@syzkaller.appspotmail.com Fixes:1a633bdc8f
("RDMA/rxe: Let destroy qp succeed with stuck packet") Signed-off-by: Zhu Yanjun <yanjun.zhu@linux.dev> Link: https://patch.msgid.link/20250726013104.463570-1-yanjun.zhu@linux.dev Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
e7ea47001f
commit
8ead139a8e
@ -345,33 +345,15 @@ int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
|
||||
|
||||
static void rxe_skb_tx_dtor(struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *ndev = skb->dev;
|
||||
struct rxe_dev *rxe;
|
||||
unsigned int qp_index;
|
||||
struct rxe_qp *qp;
|
||||
struct rxe_qp *qp = skb->sk->sk_user_data;
|
||||
int skb_out;
|
||||
|
||||
rxe = rxe_get_dev_from_net(ndev);
|
||||
if (!rxe && is_vlan_dev(ndev))
|
||||
rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev));
|
||||
if (WARN_ON(!rxe))
|
||||
return;
|
||||
|
||||
qp_index = (int)(uintptr_t)skb->sk->sk_user_data;
|
||||
if (!qp_index)
|
||||
return;
|
||||
|
||||
qp = rxe_pool_get_index(&rxe->qp_pool, qp_index);
|
||||
if (!qp)
|
||||
goto put_dev;
|
||||
|
||||
skb_out = atomic_dec_return(&qp->skb_out);
|
||||
if (qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)
|
||||
if (unlikely(qp->need_req_skb &&
|
||||
skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
|
||||
rxe_sched_task(&qp->send_task);
|
||||
|
||||
rxe_put(qp);
|
||||
put_dev:
|
||||
ib_device_put(&rxe->ib_dev);
|
||||
sock_put(skb->sk);
|
||||
}
|
||||
|
||||
@ -383,6 +365,7 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
|
||||
sock_hold(sk);
|
||||
skb->sk = sk;
|
||||
skb->destructor = rxe_skb_tx_dtor;
|
||||
rxe_get(pkt->qp);
|
||||
atomic_inc(&pkt->qp->skb_out);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
@ -405,6 +388,7 @@ static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
|
||||
sock_hold(sk);
|
||||
skb->sk = sk;
|
||||
skb->destructor = rxe_skb_tx_dtor;
|
||||
rxe_get(pkt->qp);
|
||||
atomic_inc(&pkt->qp->skb_out);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
@ -497,6 +481,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Add time stamp to skb. */
|
||||
skb->tstamp = ktime_get();
|
||||
|
||||
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
|
||||
|
||||
/* FIXME: hold reference to this netdev until life of this skb. */
|
||||
|
@ -244,7 +244,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
|
||||
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
|
||||
if (err < 0)
|
||||
return err;
|
||||
qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
|
||||
qp->sk->sk->sk_user_data = qp;
|
||||
|
||||
/* pick a source UDP port number for this QP based on
|
||||
* the source QPN. this spreads traffic for different QPs
|
||||
|
Loading…
Reference in New Issue
Block a user