4343#include <asm/page.h>
4444#include <linux/task_work.h>
4545#include <linux/namei.h>
46+ #include <linux/kref.h>
4647#include <uapi/linux/ublk_cmd.h>
4748
4849#define UBLK_MINORS (1U << MINORBITS)
6263
6364struct ublk_rq_data {
6465struct llist_node node ;
66+
67+ struct kref ref ;
6568};
6669
6770struct ublk_uring_cmd_pdu {
@@ -181,6 +184,9 @@ struct ublk_params_header {
181184__u32 types ;
182185};
183186
187+ static inline void __ublk_complete_rq (struct request * req );
188+ static void ublk_complete_rq (struct kref * ref );
189+
184190static dev_t ublk_chr_devt ;
185191static struct class * ublk_chr_class ;
186192
@@ -289,6 +295,45 @@ static int ublk_apply_params(struct ublk_device *ub)
289295return 0 ;
290296}
291297
298+ static inline bool ublk_need_req_ref (const struct ublk_queue * ubq )
299+ {
300+ return false;
301+ }
302+
303+ static inline void ublk_init_req_ref (const struct ublk_queue * ubq ,
304+ struct request * req )
305+ {
306+ if (ublk_need_req_ref (ubq )) {
307+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
308+
309+ kref_init (& data -> ref );
310+ }
311+ }
312+
313+ static inline bool ublk_get_req_ref (const struct ublk_queue * ubq ,
314+ struct request * req )
315+ {
316+ if (ublk_need_req_ref (ubq )) {
317+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
318+
319+ return kref_get_unless_zero (& data -> ref );
320+ }
321+
322+ return true;
323+ }
324+
325+ static inline void ublk_put_req_ref (const struct ublk_queue * ubq ,
326+ struct request * req )
327+ {
328+ if (ublk_need_req_ref (ubq )) {
329+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
330+
331+ kref_put (& data -> ref , ublk_complete_rq );
332+ } else {
333+ __ublk_complete_rq (req );
334+ }
335+ }
336+
292337static inline bool ublk_need_get_data (const struct ublk_queue * ubq )
293338{
294339return ubq -> flags & UBLK_F_NEED_GET_DATA ;
@@ -625,13 +670,19 @@ static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
625670}
626671
627672/* todo: handle partial completion */
628- static void ublk_complete_rq (struct request * req )
673+ static inline void __ublk_complete_rq (struct request * req )
629674{
630675struct ublk_queue * ubq = req -> mq_hctx -> driver_data ;
631676struct ublk_io * io = & ubq -> ios [req -> tag ];
632677unsigned int unmapped_bytes ;
633678blk_status_t res = BLK_STS_OK ;
634679
680+ /* called from ublk_abort_queue() code path */
681+ if (io -> flags & UBLK_IO_FLAG_ABORTED ) {
682+ res = BLK_STS_IOERR ;
683+ goto exit ;
684+ }
685+
635686/* failed read IO if nothing is read */
636687if (!io -> res && req_op (req ) == REQ_OP_READ )
637688io -> res = - EIO ;
@@ -671,6 +722,15 @@ static void ublk_complete_rq(struct request *req)
671722blk_mq_end_request (req , res );
672723}
673724
725+ static void ublk_complete_rq (struct kref * ref )
726+ {
727+ struct ublk_rq_data * data = container_of (ref , struct ublk_rq_data ,
728+ ref );
729+ struct request * req = blk_mq_rq_from_pdu (data );
730+
731+ __ublk_complete_rq (req );
732+ }
733+
674734/*
675735 * Since __ublk_rq_task_work always fails requests immediately during
676736 * exiting, __ublk_fail_req() is only called from abort context during
@@ -689,7 +749,7 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
689749if (ublk_queue_can_use_recovery_reissue (ubq ))
690750blk_mq_requeue_request (req , false);
691751else
692- blk_mq_end_request ( req , BLK_STS_IOERR );
752+ ublk_put_req_ref ( ubq , req );
693753}
694754}
695755
@@ -798,6 +858,7 @@ static inline void __ublk_rq_task_work(struct request *req,
798858mapped_bytes >> 9 ;
799859}
800860
861+ ublk_init_req_ref (ubq , req );
801862ubq_complete_io_cmd (io , UBLK_IO_RES_OK , issue_flags );
802863}
803864
@@ -1002,7 +1063,7 @@ static void ublk_commit_completion(struct ublk_device *ub,
10021063req = blk_mq_tag_to_rq (ub -> tag_set .tags [qid ], tag );
10031064
10041065if (req && likely (!blk_should_fake_timeout (req -> q )))
1005- ublk_complete_rq ( req );
1066+ ublk_put_req_ref ( ubq , req );
10061067}
10071068
10081069/*
0 commit comments