block-6.14-20250313

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmfTYxsQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpjVKD/9qZjztmVq6Rk9RBjZwMYxcO9Nzj7qQQ6m9
 S15eXslAA1eLec3p1Mx4oVaWoFranY03BClqCgywBUAgpYstnT9cEqkz0P+n6xIE
 bNGjfxx4NInvrQYRETskc4wQqOnAdiRMd9i96EpHqW9Pi/pl8dSQxmxlaeo0BBIM
 XDvodLhr38aLJwcNBQ9NKCfhJ7RruACuSiXRAsPH3D641ZpccW4ADuhYxhJDehKa
 fMzuEFaa9/mBvmIrhE3QCbvWr7VYzSkadMLJWnxLsN1PU4FXZbh4Oy5Kp7DrA4Zq
 YkwezSivjNWNqNsiyvVa63mKbxfe9MSh5odqWuLrkWr4cOzEOcpHRbNV2El5RK/x
 BtGt/eCT2cRQAG4MzveoiE1yG9AAmUvUZL/RvxbERedqWO69IsgrsIsdnoiaLgw/
 267eCeGQlpHGhVUKga7ouShlTowTaCLCi+XgJwUTsVP/VPuzEFwgkzX0J45bSPGd
 h0laUzuHcThe8cRY2t5JWu+JJTqHj6ubsPeqiMAQzCns1C+IWYsjPXEohfqt7av+
 2yoIwG9DCBfJfh0ml0t3yHHMSJzjcwQcQAw1P7loLI+TIDvrpVP7AYOVt4SYeXl4
 RTEvNKQRmQGNZ8B3lGrqVKnbJ5ExBzvE6muQTOhockCTQsNK7WNaT2dMWRLyW6rW
 HcdUkADDVg==
 =MG/C
 -----END PGP SIGNATURE-----

Merge tag 'block-6.14-20250313' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
     - Concurrent pci error and hotplug handling fix (Keith)
     - Endpoint function fixes (Damien)

 - Fix for a regression introduced in this cycle with error checking for
   batched request completions (Shin'ichiro)

* tag 'block-6.14-20250313' of git://git.kernel.dk/linux:
  block: change blk_mq_add_to_batch() third argument type to bool
  nvme: move error logging from nvme_end_req() to __nvme_end_req()
  nvmet: pci-epf: Do not add an IRQ vector if not needed
  nvmet: pci-epf: Set NVMET_PCI_EPF_Q_LIVE when a queue is fully created
  nvme-pci: fix stuck reset on concurrent DPC and HP
This commit is contained in:
Linus Torvalds 2025-03-14 11:22:05 -10:00
commit 580b203235
7 changed files with 54 additions and 32 deletions

View File

@ -1549,8 +1549,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
blk_mq_end_request_batch))
if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK,
blk_mq_end_request_batch))
blk_mq_end_request(req, cmd->error);
nr++;
}

View File

@ -1207,11 +1207,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
struct request *req = blk_mq_rq_from_pdu(vbr);
u8 status = virtblk_vbr_status(vbr);
found++;
if (!blk_mq_complete_request_remote(req) &&
!blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr),
virtblk_complete_batch))
!blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK,
virtblk_complete_batch))
virtblk_request_done(req);
}

View File

@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
}
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
!blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
!blk_mq_add_to_batch(req, iob,
nvme_req(req)->status != NVME_SC_SUCCESS,
apple_nvme_complete_batch))
apple_nvme_complete_rq(req);
}

View File

@ -431,6 +431,12 @@ static inline void nvme_end_req_zoned(struct request *req)
static inline void __nvme_end_req(struct request *req)
{
if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
if (blk_rq_is_passthrough(req))
nvme_log_err_passthru(req);
else
nvme_log_error(req);
}
nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
if (req->cmd_flags & REQ_NVME_MPATH)
@ -441,12 +447,6 @@ void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) {
if (blk_rq_is_passthrough(req))
nvme_log_err_passthru(req);
else
nvme_log_error(req);
}
__nvme_end_req(req);
blk_mq_end_request(req, status);
}

View File

@ -1130,8 +1130,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
!blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
nvme_pci_complete_batch))
!blk_mq_add_to_batch(req, iob,
nvme_req(req)->status != NVME_SC_SUCCESS,
nvme_pci_complete_batch))
nvme_pci_complete_rq(req);
}
@ -1411,9 +1412,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
struct nvme_dev *dev = nvmeq->dev;
struct request *abort_req;
struct nvme_command cmd = { };
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 csts = readl(dev->bar + NVME_REG_CSTS);
u8 opcode;
/*
* Shutdown the device immediately if we see it is disconnected. This
* unblocks PCIe error handling if the nvme driver is waiting in
* error_resume for a device that has been removed. We can't unbind the
* driver while the driver's error callback is waiting to complete, so
* we're relying on a timeout to break that deadlock if a removal
* occurs while reset work is running.
*/
if (pci_dev_is_disconnected(pdev))
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
if (nvme_state_terminal(&dev->ctrl))
goto disable;
@ -1421,7 +1433,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
* the recovery mechanism will surely fail.
*/
mb();
if (pci_channel_offline(to_pci_dev(dev->dev)))
if (pci_channel_offline(pdev))
return BLK_EH_RESET_TIMER;
/*

View File

@ -1265,15 +1265,12 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid];
u16 status;
if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
if (!(flags & NVME_QUEUE_PHYS_CONTIG))
return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR;
if (flags & NVME_CQ_IRQ_ENABLED)
set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
cq->pci_addr = pci_addr;
cq->qid = cqid;
cq->depth = qsize + 1;
@ -1290,24 +1287,27 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl,
cq->qes = ctrl->io_cqes;
cq->pci_size = cq->qes * cq->depth;
cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
if (!cq->iv) {
status = NVME_SC_INTERNAL | NVME_STATUS_DNR;
goto err;
if (flags & NVME_CQ_IRQ_ENABLED) {
cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector);
if (!cq->iv)
return NVME_SC_INTERNAL | NVME_STATUS_DNR;
set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
}
status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth);
if (status != NVME_SC_SUCCESS)
goto err;
set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n",
cqid, qsize, cq->qes, cq->vector);
return NVME_SC_SUCCESS;
err:
clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags);
clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags);
if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags))
nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector);
return status;
}
@ -1333,7 +1333,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid];
u16 status;
if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags))
return NVME_SC_QID_INVALID | NVME_STATUS_DNR;
if (!(flags & NVME_QUEUE_PHYS_CONTIG))
@ -1355,7 +1355,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth);
if (status != NVME_SC_SUCCESS)
goto out_clear_bit;
return status;
sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND,
min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid);
@ -1365,6 +1365,8 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
goto out_destroy_sq;
}
set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n",
sqid, qsize, sq->qes);
@ -1372,8 +1374,6 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl,
out_destroy_sq:
nvmet_sq_destroy(&sq->nvme_sq);
out_clear_bit:
clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags);
return status;
}

View File

@ -852,12 +852,20 @@ static inline bool blk_mq_is_reserved_rq(struct request *rq)
return rq->rq_flags & RQF_RESV;
}
/*
/**
* blk_mq_add_to_batch() - add a request to the completion batch
* @req: The request to add to batch
* @iob: The batch to add the request
* @is_error: Specify true if the request failed with an error
* @complete: The completaion handler for the request
*
* Batched completions only work when there is no I/O error and no special
* ->end_io handler.
*
* Return: true when the request was added to the batch, otherwise false
*/
static inline bool blk_mq_add_to_batch(struct request *req,
struct io_comp_batch *iob, int ioerror,
struct io_comp_batch *iob, bool is_error,
void (*complete)(struct io_comp_batch *))
{
/*
@ -865,7 +873,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
* 1) No batch container
* 2) Has scheduler data attached
* 3) Not a passthrough request and end_io set
* 4) Not a passthrough request and an ioerror
* 4) Not a passthrough request and failed with an error
*/
if (!iob)
return false;
@ -874,7 +882,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
if (!blk_rq_is_passthrough(req)) {
if (req->end_io)
return false;
if (ioerror < 0)
if (is_error)
return false;
}