scsi: lpfc: Fix buffer free/clear order in deferred receive path

Fix a use-after-free window by correcting the buffer release sequence in
the deferred receive path. The code freed the RQ buffer first and only
then cleared the context pointer under the lock. Concurrent paths (e.g.,
ABTS and the repost path) also inspect and release the same pointer under
the lock, so the old order could lead to double-free/UAF.

Note that the repost path already uses the correct pattern: detach the
pointer under the lock, then free it after dropping the lock. The
deferred path should do the same.

Fixes: 472e146d1c ("scsi: lpfc: Correct upcalling nvmet_fc transport during io done downcall")
Cc: stable@vger.kernel.org
Signed-off-by: John Evans <evans1210144@gmail.com>
Link: https://lore.kernel.org/r/20250828044008.743-1-evans1210144@gmail.com
Reviewed-by: Justin Tee <justin.tee@broadcom.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
John Evans 2025-08-28 12:40:08 +08:00 committed by Martin K. Petersen
parent 6300d5c543
commit 9dba9a45c3

View File

@ -1243,7 +1243,7 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
struct lpfc_nvmet_tgtport *tgtp; struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_async_xchg_ctx *ctxp = struct lpfc_async_xchg_ctx *ctxp =
container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req); container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer; struct rqb_dmabuf *nvmebuf;
struct lpfc_hba *phba = ctxp->phba; struct lpfc_hba *phba = ctxp->phba;
unsigned long iflag; unsigned long iflag;
@ -1251,13 +1251,18 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
ctxp->oxid, ctxp->size, raw_smp_processor_id()); ctxp->oxid, ctxp->size, raw_smp_processor_id());
spin_lock_irqsave(&ctxp->ctxlock, iflag);
nvmebuf = ctxp->rqb_buffer;
if (!nvmebuf) { if (!nvmebuf) {
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6425 Defer rcv: no buffer oxid x%x: " "6425 Defer rcv: no buffer oxid x%x: "
"flg %x ste %x\n", "flg %x ste %x\n",
ctxp->oxid, ctxp->flag, ctxp->state); ctxp->oxid, ctxp->flag, ctxp->state);
return; return;
} }
ctxp->rqb_buffer = NULL;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
tgtp = phba->targetport->private; tgtp = phba->targetport->private;
if (tgtp) if (tgtp)
@ -1265,9 +1270,6 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
/* Free the nvmebuf since a new buffer already replaced it */ /* Free the nvmebuf since a new buffer already replaced it */
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
spin_lock_irqsave(&ctxp->ctxlock, iflag);
ctxp->rqb_buffer = NULL;
spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
} }
/** /**