From 1ba64edef6051d2ec79bb2fbd3a0c8f0df00ab55 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:37 +0100 Subject: block, sx8: kill blk_insert_request() The only user left for blk_insert_request() is sx8 and it can be trivially switched to use blk_execute_rq_nowait() - special requests aren't included in io stat and sx8 doesn't use block layer tagging. Switch sx8 and kill blk_insert_requeset(). This patch doesn't introduce any functional difference. Only compile tested. Signed-off-by: Tejun Heo Acked-by: Jeff Garzik Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c7a6d3b5bc7b..8a6b51b13a1c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -660,7 +660,6 @@ extern void __blk_put_request(struct request_queue *, struct request *); extern struct request *blk_get_request(struct request_queue *, int, gfp_t); extern struct request *blk_make_request(struct request_queue *, struct bio *, gfp_t); -extern void blk_insert_request(struct request_queue *, struct request *, int, void *); extern void blk_requeue_request(struct request_queue *, struct request *); extern void blk_add_request_payload(struct request *rq, struct page *page, unsigned int len); -- cgit v1.2.3 From 34f6055c80285e4efb3f602a9119db75239744dc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:37 +0100 Subject: block: add blk_queue_dead() There are a number of QUEUE_FLAG_DEAD tests. Add blk_queue_dead() macro and use it. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8a6b51b13a1c..783f97c14d0a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -481,6 +481,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) +#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) #define blk_queue_noxmerges(q) \ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) -- cgit v1.2.3 From a73f730d013ff2788389fd0c46ad3e5510f124e6 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:37 +0100 Subject: block, cfq: move cfqd->cic_index to q->id cfq allocates per-queue id using ida and uses it to index cic radix tree from io_context. Move it to q->id and allocate on queue init and free on queue release. This simplifies cfq a bit and will allow for further improvements of io context life-cycle management. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 783f97c14d0a..8c8dbc4738ea 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -310,6 +310,12 @@ struct request_queue { */ unsigned long queue_flags; + /* + * ida allocated id for this queue. Used to index queues from + * ioctx. + */ + int id; + /* * queue needs bounce pages for pages above this limit */ -- cgit v1.2.3 From 42ec57a8f68311bbbf4ff96a5d33c8a2e90b9d05 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:37 +0100 Subject: block: misc ioc cleanups * int return from put_io_context() wasn't used by anybody. Make it return void like other put functions and docbook-fy the function comment. * Reorder dummy declarations for !CONFIG_BLOCK case a bit. * Make alloc_ioc_context() use __GFP_ZERO allocation, take init out of if block and drop 0'ing. * Docbook-fy current_io_context() comment. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/iocontext.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 5037a0ad2312..8a6ecb66346f 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -76,20 +76,14 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) struct task_struct; #ifdef CONFIG_BLOCK -int put_io_context(struct io_context *ioc); +void put_io_context(struct io_context *ioc); void exit_io_context(struct task_struct *task); struct io_context *get_io_context(gfp_t gfp_flags, int node); struct io_context *alloc_io_context(gfp_t gfp_flags, int node); #else -static inline void exit_io_context(struct task_struct *task) -{ -} - struct io_context; -static inline int put_io_context(struct io_context *ioc) -{ - return 1; -} +static inline void put_io_context(struct io_context *ioc) { } +static inline void exit_io_context(struct task_struct *task) { } #endif #endif -- cgit v1.2.3 From 6e736be7f282fff705db7c34a15313281b372a76 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:38 +0100 Subject: block: make ioc get/put interface more conventional and fix race on alloction Ignoring copy_io() during fork, io_context can be allocated from two places - current_io_context() and set_task_ioprio(). The former is always called from local task while the latter can be called from different task. The synchornization between them are peculiar and dubious. * current_io_context() doesn't grab task_lock() and assumes that if it saw %NULL ->io_context, it would stay that way until allocation and assignment is complete. It has smp_wmb() between alloc/init and assignment. * set_task_ioprio() grabs task_lock() for assignment and does smp_read_barrier_depends() between "ioc = task->io_context" and "if (ioc)". Unfortunately, this doesn't achieve anything - the latter is not a dependent load of the former. ie, if ioc itself were being dereferenced "ioc->xxx", it would mean something (not sure what tho) but as the code currently stands, the dependent read barrier is noop. As only one of the the two test-assignment sequences is task_lock() protected, the task_lock() can't do much about race between the two. Nothing prevents current_io_context() and set_task_ioprio() allocating its own ioc for the same task and overwriting the other's. Also, set_task_ioprio() can race with exiting task and create a new ioc after exit_io_context() is finished. ioc get/put doesn't have any reason to be complex. The only hot path is accessing the existing ioc of %current, which is simple to achieve given that ->io_context is never destroyed as long as the task is alive. All other paths can happily go through task_lock() like all other task sub structures without impacting anything. This patch updates ioc get/put so that it becomes more conventional. * alloc_io_context() is replaced with get_task_io_context(). This is the only interface which can acquire access to ioc of another task. On return, the caller has an explicit reference to the object which should be put using put_io_context() afterwards. * The functionality of current_io_context() remains the same but when creating a new ioc, it shares the code path with get_task_io_context() and always goes through task_lock(). * get_io_context() now means incrementing ref on an ioc which the caller already has access to (be that an explicit refcnt or implicit %current one). * PF_EXITING inhibits creation of new io_context and once exit_io_context() is finished, it's guaranteed that both ioc acquisition functions return %NULL. * All users are updated. Most are trivial but smp_read_barrier_depends() removal from cfq_get_io_context() needs a bit of explanation. I suppose the original intention was to ensure ioc->ioprio is visible when set_task_ioprio() allocates new io_context and installs it; however, this wouldn't have worked because set_task_ioprio() doesn't have wmb between init and install. There are other problems with this which will be fixed in another patch. * While at it, use NUMA_NO_NODE instead of -1 for wildcard node specification. -v2: Vivek spotted contamination from debug patch. Removed. Signed-off-by: Tejun Heo Cc: Vivek Goyal Signed-off-by: Jens Axboe --- include/linux/iocontext.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 8a6ecb66346f..28bb621ef5a2 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -78,8 +78,8 @@ struct task_struct; #ifdef CONFIG_BLOCK void put_io_context(struct io_context *ioc); void exit_io_context(struct task_struct *task); -struct io_context *get_io_context(gfp_t gfp_flags, int node); -struct io_context *alloc_io_context(gfp_t gfp_flags, int node); +struct io_context *get_task_io_context(struct task_struct *task, + gfp_t gfp_flags, int node); #else struct io_context; static inline void put_io_context(struct io_context *ioc) { } -- cgit v1.2.3 From 09ac46c429464c919d04bb737b27edd84d944f02 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:38 +0100 Subject: block: misc updates to blk_get_queue() * blk_get_queue() is peculiar in that it returns 0 on success and 1 on failure instead of 0 / -errno or boolean. Update it such that it returns %true on success and %false on failure. * Make sure the caller checks for the return value. * Separate out __blk_get_queue() which doesn't check whether @q is dead and put it in blk.h. This will be used later. This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8c8dbc4738ea..d1b6f4ed1f96 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -865,7 +865,7 @@ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatte extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); -int blk_get_queue(struct request_queue *); +bool __must_check blk_get_queue(struct request_queue *); struct request_queue *blk_alloc_queue(gfp_t); struct request_queue *blk_alloc_queue_node(gfp_t, int); extern void blk_put_queue(struct request_queue *); -- cgit v1.2.3 From 283287a52e3c3f7f8f9da747f4b8c5202740d776 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:38 +0100 Subject: block, cfq: misc updates to cfq_io_context Make the following changes to prepare for ioc/cic management cleanup. * Add cic->q so that ioc can determine the associated queue without querying cfq. This will eventually replace ->key. * Factor out cfq_release_cic() from cic_free_func(). This function assumes that the caller handled locking. * Rename __cfq_exit_single_io_context() to cfq_exit_cic() and make it take only @cic. * Restructure cfq_cic_link() for future updates. This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/iocontext.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 28bb621ef5a2..079aea8fd8a8 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -15,6 +15,7 @@ struct cfq_ttime { struct cfq_io_context { void *key; + struct request_queue *q; struct cfq_queue *cfqq[2]; -- cgit v1.2.3 From dc86900e0a8f665122de6faadd27fb4c6d2b3e4d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:38 +0100 Subject: block, cfq: move ioc ioprio/cgroup changed handling to cic ioprio/cgroup change was handled by marking the changed state in ioc and, on the following access to the ioc, performing RCU-protected iteration through all cic's grabbing the matching queue_lock. This patch moves the changed state to each cic. When ioprio or cgroup changes, the respective bit is set on all cic's of the ioc and when each of those cic (not ioc) is accessed, change is applied for that specific ioc-queue pair. This also fixes the following two race conditions between setting and clearing of changed states. * Missing barrier between assign/load of ioprio and ioprio_changed allowed applying old ioprio. * Change requests could happen between application of change and clearing of changed variables. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/iocontext.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 079aea8fd8a8..2c2b6da96b3c 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -13,6 +13,11 @@ struct cfq_ttime { unsigned long ttime_mean; }; +enum { + CIC_IOPRIO_CHANGED, + CIC_CGROUP_CHANGED, +}; + struct cfq_io_context { void *key; struct request_queue *q; @@ -26,6 +31,8 @@ struct cfq_io_context { struct list_head queue_list; struct hlist_node cic_list; + unsigned long changed; + void (*dtor)(struct io_context *); /* destructor */ void (*exit)(struct io_context *); /* called on task exit */ @@ -44,11 +51,6 @@ struct io_context { spinlock_t lock; unsigned short ioprio; - unsigned short ioprio_changed; - -#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) - unsigned short cgroup_changed; -#endif /* * For request batching @@ -81,6 +83,8 @@ void put_io_context(struct io_context *ioc); void exit_io_context(struct task_struct *task); struct io_context *get_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node); +void ioc_ioprio_changed(struct io_context *ioc, int ioprio); +void ioc_cgroup_changed(struct io_context *ioc); #else struct io_context; static inline void put_io_context(struct io_context *ioc) { } -- cgit v1.2.3 From b2efa05265d62bc29f3a64400fad4b44340eedb8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:39 +0100 Subject: block, cfq: unlink cfq_io_context's immediately cic is association between io_context and request_queue. A cic is linked from both ioc and q and should be destroyed when either one goes away. As ioc and q both have their own locks, locking becomes a bit complex - both orders work for removal from one but not from the other. Currently, cfq tries to circumvent this locking order issue with RCU. ioc->lock nests inside queue_lock but the radix tree and cic's are also protected by RCU allowing either side to walk their lists without grabbing lock. This rather unconventional use of RCU quickly devolves into extremely fragile convolution. e.g. The following is from cfqd going away too soon after ioc and q exits raced. general protection fault: 0000 [#1] PREEMPT SMP CPU 2 Modules linked in: [ 88.503444] Pid: 599, comm: hexdump Not tainted 3.1.0-rc10-work+ #158 Bochs Bochs RIP: 0010:[] [] cfq_exit_single_io_context+0x58/0xf0 ... Call Trace: [] call_for_each_cic+0x5a/0x90 [] cfq_exit_io_context+0x15/0x20 [] exit_io_context+0x100/0x140 [] do_exit+0x579/0x850 [] do_group_exit+0x5b/0xd0 [] sys_exit_group+0x17/0x20 [] system_call_fastpath+0x16/0x1b The only real hot path here is cic lookup during request initialization and avoiding extra locking requires very confined use of RCU. This patch makes cic removal from both ioc and request_queue perform double-locking and unlink immediately. * From q side, the change is almost trivial as ioc->lock nests inside queue_lock. It just needs to grab each ioc->lock as it walks cic_list and unlink it. * From ioc side, it's a bit more difficult because of inversed lock order. ioc needs its lock to walk its cic_list but can't grab the matching queue_lock and needs to perform unlock-relock dancing. Unlinking is now wholly done from put_io_context() and fast path is optimized by using the queue_lock the caller already holds, which is by far the most common case. If the ioc accessed multiple devices, it tries with trylock. In unlikely cases of fast path failure, it falls back to full double-locking dance from workqueue. Double-locking isn't the prettiest thing in the world but it's *far* simpler and more understandable than RCU trick without adding any meaningful overhead. This still leaves a lot of now unnecessary RCU logics. Future patches will trim them. -v2: Vivek pointed out that cic->q was being dereferenced after cic->release() was called. Updated to use local variable @this_q instead. Signed-off-by: Tejun Heo Cc: Vivek Goyal Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 3 +++ include/linux/iocontext.h | 12 ++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d1b6f4ed1f96..65c2f8c70089 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -393,6 +393,9 @@ struct request_queue { /* Throttle data */ struct throtl_data *td; #endif +#ifdef CONFIG_LOCKDEP + int ioc_release_depth; +#endif }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 2c2b6da96b3c..01e863128780 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -3,6 +3,7 @@ #include #include +#include struct cfq_queue; struct cfq_ttime { @@ -33,8 +34,8 @@ struct cfq_io_context { unsigned long changed; - void (*dtor)(struct io_context *); /* destructor */ - void (*exit)(struct io_context *); /* called on task exit */ + void (*exit)(struct cfq_io_context *); + void (*release)(struct cfq_io_context *); struct rcu_head rcu_head; }; @@ -61,6 +62,8 @@ struct io_context { struct radix_tree_root radix_root; struct hlist_head cic_list; void __rcu *ioc_data; + + struct work_struct release_work; }; static inline struct io_context *ioc_task_link(struct io_context *ioc) @@ -79,7 +82,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) struct task_struct; #ifdef CONFIG_BLOCK -void put_io_context(struct io_context *ioc); +void put_io_context(struct io_context *ioc, struct request_queue *locked_q); void exit_io_context(struct task_struct *task); struct io_context *get_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node); @@ -87,7 +90,8 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio); void ioc_cgroup_changed(struct io_context *ioc); #else struct io_context; -static inline void put_io_context(struct io_context *ioc) { } +static inline void put_io_context(struct io_context *ioc, + struct request_queue *locked_q) { } static inline void exit_io_context(struct task_struct *task) { } #endif -- cgit v1.2.3 From b9a1920837bc53430d339380e393a6e4c372939f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:39 +0100 Subject: block, cfq: remove delayed unlink Now that all cic's are immediately unlinked from both ioc and queue, lazy dropping from lookup path and trimming on elevator unregister are unnecessary. Kill them and remove now unused elevator_ops->trim(). This also leaves call_for_each_cic() without any user. Removed. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/elevator.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 1d0f7a2ff73b..581dd1bd3d3e 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -63,7 +63,6 @@ struct elevator_ops elevator_init_fn *elevator_init_fn; elevator_exit_fn *elevator_exit_fn; - void (*trim)(struct io_context *); }; #define ELV_NAME_MAX (16) -- cgit v1.2.3 From b50b636bce6293fa858cc7ff6c3ffe4920d90006 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:39 +0100 Subject: block, cfq: kill ioc_gone Now that cic's are immediately unlinked under both locks, there's no need to count and drain cic's before module unload. RCU callback completion is waited with rcu_barrier(). While at it, remove residual RCU operations on cic_list. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/elevator.h | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 581dd1bd3d3e..02604c89ddde 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -196,22 +196,5 @@ enum { INIT_LIST_HEAD(&(rq)->csd.list); \ } while (0) -/* - * io context count accounting - */ -#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val) -#define elv_ioc_count_inc(name) this_cpu_inc(name) -#define elv_ioc_count_dec(name) this_cpu_dec(name) - -#define elv_ioc_count_read(name) \ -({ \ - unsigned long __val = 0; \ - int __cpu; \ - smp_wmb(); \ - for_each_possible_cpu(__cpu) \ - __val += per_cpu(name, __cpu); \ - __val; \ -}) - #endif /* CONFIG_BLOCK */ #endif -- cgit v1.2.3 From 1238033c79e92e5c315af12e45396f1a78c73dec Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:40 +0100 Subject: block, cfq: kill cic->key Now that lazy paths are removed, cfqd_dead_key() is meaningless and cic->q can be used whereever cic->key is used. Kill cic->key. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/iocontext.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 01e863128780..b2b75a54f252 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -20,7 +20,6 @@ enum { }; struct cfq_io_context { - void *key; struct request_queue *q; struct cfq_queue *cfqq[2]; -- cgit v1.2.3 From 22f746e235a5cbee2a6ca9887b1be2aa7d31fe71 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:41 +0100 Subject: block: remove elevator_queue->ops elevator_queue->ops points to the same ops struct ->elevator_type.ops is pointing to. The only effect of caching it in elevator_queue is shorter notation - it doesn't save any indirect derefence. Relocate elevator_type->list which used only during module init/exit to the end of the structure, rename elevator_queue->elevator_type to ->type, and replace elevator_queue->ops with elevator_queue->type.ops. This doesn't introduce any functional difference. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/elevator.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 02604c89ddde..04958ef53e62 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -78,11 +78,11 @@ struct elv_fs_entry { */ struct elevator_type { - struct list_head list; struct elevator_ops ops; struct elv_fs_entry *elevator_attrs; char elevator_name[ELV_NAME_MAX]; struct module *elevator_owner; + struct list_head list; }; /* @@ -90,10 +90,9 @@ struct elevator_type */ struct elevator_queue { - struct elevator_ops *ops; + struct elevator_type *type; void *elevator_data; struct kobject kobj; - struct elevator_type *elevator_type; struct mutex sysfs_lock; struct hlist_head *hash; unsigned int registered:1; -- cgit v1.2.3 From c58698073218f2c8f2fc5982fa3938c2d3803b9f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:41 +0100 Subject: block, cfq: reorganize cfq_io_context into generic and cfq specific parts Currently io_context and cfq logics are mixed without clear boundary. Most of io_context is independent from cfq but cfq_io_context handling logic is dispersed between generic ioc code and cfq. cfq_io_context represents association between an io_context and a request_queue, which is a concept useful outside of cfq, but it also contains fields which are useful only to cfq. This patch takes out generic part and put it into io_cq (io context-queue) and the rest into cfq_io_cq (cic moniker remains the same) which contains io_cq. The following changes are made together. * cfq_ttime and cfq_io_cq now live in cfq-iosched.c. * All related fields, functions and constants are renamed accordingly. * ioc->ioc_data is now "struct io_cq *" instead of "void *" and renamed to icq_hint. This prepares for io_context API cleanup. Documentation is currently sparse. It will be added later. Changes in this patch are mechanical and don't cause functional change. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/iocontext.h | 43 ++++++++++++++----------------------------- 1 file changed, 14 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index b2b75a54f252..d15ca6591f96 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -5,38 +5,23 @@ #include #include -struct cfq_queue; -struct cfq_ttime { - unsigned long last_end_request; - - unsigned long ttime_total; - unsigned long ttime_samples; - unsigned long ttime_mean; -}; - enum { - CIC_IOPRIO_CHANGED, - CIC_CGROUP_CHANGED, + ICQ_IOPRIO_CHANGED, + ICQ_CGROUP_CHANGED, }; -struct cfq_io_context { - struct request_queue *q; - - struct cfq_queue *cfqq[2]; - - struct io_context *ioc; - - struct cfq_ttime ttime; - - struct list_head queue_list; - struct hlist_node cic_list; +struct io_cq { + struct request_queue *q; + struct io_context *ioc; - unsigned long changed; + struct list_head q_node; + struct hlist_node ioc_node; - void (*exit)(struct cfq_io_context *); - void (*release)(struct cfq_io_context *); + unsigned long changed; + struct rcu_head rcu_head; - struct rcu_head rcu_head; + void (*exit)(struct io_cq *); + void (*release)(struct io_cq *); }; /* @@ -58,9 +43,9 @@ struct io_context { int nr_batch_requests; /* Number of requests left in the batch */ unsigned long last_waited; /* Time last woken after wait for request */ - struct radix_tree_root radix_root; - struct hlist_head cic_list; - void __rcu *ioc_data; + struct radix_tree_root icq_tree; + struct io_cq __rcu *icq_hint; + struct hlist_head icq_list; struct work_struct release_work; }; -- cgit v1.2.3 From a612fddf0d8090f2877305c9168b6c1a34fb5d90 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:41 +0100 Subject: block, cfq: move cfqd->icq_list to request_queue and add request->elv.icq Most of icq management is about to be moved out of cfq into blk-ioc. This patch prepares for it. * Move cfqd->icq_list to request_queue->icq_list * Make request explicitly point to icq instead of through elevator private data. ->elevator_private[3] is replaced with sub struct elv which contains icq pointer and priv[2]. cfq is updated accordingly. * Meaningless clearing of ->elevator_private[0] removed from elv_set_request(). At that point in code, the field was guaranteed to be %NULL anyway. This patch doesn't introduce any functional change. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 65c2f8c70089..8bca04873f53 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -111,10 +111,14 @@ struct request { * Three pointers are available for the IO schedulers, if they need * more they have to dynamically allocate it. Flush requests are * never put on the IO scheduler. So let the flush fields share - * space with the three elevator_private pointers. + * space with the elevator data. */ union { - void *elevator_private[3]; + struct { + struct io_cq *icq; + void *priv[2]; + } elv; + struct { unsigned int seq; struct list_head list; @@ -357,6 +361,8 @@ struct request_queue { struct timer_list timeout; struct list_head timeout_list; + struct list_head icq_list; + struct queue_limits limits; /* -- cgit v1.2.3 From 3d3c2379feb177a5fd55bb0ed76776dc9d4f3243 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:42 +0100 Subject: block, cfq: move icq cache management to block core Let elevators set ->icq_size and ->icq_align in elevator_type and elv_register() and elv_unregister() respectively create and destroy kmem_cache for icq. * elv_register() now can return failure. All callers updated. * icq caches are automatically named "ELVNAME_io_cq". * cfq_slab_setup/kill() are collapsed into cfq_init/exit(). * While at it, minor indentation change for iosched_cfq.elevator_name for consistency. This will help moving icq management to block core. This doesn't introduce any functional change. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/elevator.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 04958ef53e62..d3d3e28cbfd4 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -78,10 +78,19 @@ struct elv_fs_entry { */ struct elevator_type { + /* managed by elevator core */ + struct kmem_cache *icq_cache; + + /* fields provided by elevator implementation */ struct elevator_ops ops; + size_t icq_size; + size_t icq_align; struct elv_fs_entry *elevator_attrs; char elevator_name[ELV_NAME_MAX]; struct module *elevator_owner; + + /* managed by elevator core */ + char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */ struct list_head list; }; @@ -127,7 +136,7 @@ extern void elv_drain_elevator(struct request_queue *); /* * io scheduler registration */ -extern void elv_register(struct elevator_type *); +extern int elv_register(struct elevator_type *); extern void elv_unregister(struct elevator_type *); /* -- cgit v1.2.3 From 7e5a8794492e43e9eebb68a98a23be055888ccd0 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:42 +0100 Subject: block, cfq: move io_cq exit/release to blk-ioc.c With kmem_cache managed by blk-ioc, io_cq exit/release can be moved to blk-ioc too. The odd ->io_cq->exit/release() callbacks are replaced with elevator_ops->elevator_exit_icq_fn() with unlinking from both ioc and q, and freeing automatically handled by blk-ioc. The elevator operation only need to perform exit operation specific to the elevator - in cfq's case, exiting the cfqq's. Also, clearing of io_cq's on q detach is moved to block core and automatically performed on elevator switch and q release. Because the q io_cq points to might be freed before RCU callback for the io_cq runs, blk-ioc code should remember to which cache the io_cq needs to be freed when the io_cq is released. New field io_cq->__rcu_icq_cache is added for this purpose. As both the new field and rcu_head are used only after io_cq is released and the q/ioc_node fields aren't, they are put into unions. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/elevator.h | 5 +++++ include/linux/iocontext.h | 20 ++++++++++++++------ 2 files changed, 19 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index d3d3e28cbfd4..06e4dd568717 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -5,6 +5,8 @@ #ifdef CONFIG_BLOCK +struct io_cq; + typedef int (elevator_merge_fn) (struct request_queue *, struct request **, struct bio *); @@ -24,6 +26,7 @@ typedef struct request *(elevator_request_list_fn) (struct request_queue *, stru typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); typedef int (elevator_may_queue_fn) (struct request_queue *, int); +typedef void (elevator_exit_icq_fn) (struct io_cq *); typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t); typedef void (elevator_put_req_fn) (struct request *); typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); @@ -56,6 +59,8 @@ struct elevator_ops elevator_request_list_fn *elevator_former_req_fn; elevator_request_list_fn *elevator_latter_req_fn; + elevator_exit_icq_fn *elevator_exit_icq_fn; + elevator_set_req_fn *elevator_set_req_fn; elevator_put_req_fn *elevator_put_req_fn; diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index d15ca6591f96..ac390a34c0e7 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -14,14 +14,22 @@ struct io_cq { struct request_queue *q; struct io_context *ioc; - struct list_head q_node; - struct hlist_node ioc_node; + /* + * q_node and ioc_node link io_cq through icq_list of q and ioc + * respectively. Both fields are unused once ioc_exit_icq() is + * called and shared with __rcu_icq_cache and __rcu_head which are + * used for RCU free of io_cq. + */ + union { + struct list_head q_node; + struct kmem_cache *__rcu_icq_cache; + }; + union { + struct hlist_node ioc_node; + struct rcu_head __rcu_head; + }; unsigned long changed; - struct rcu_head rcu_head; - - void (*exit)(struct io_cq *); - void (*release)(struct io_cq *); }; /* -- cgit v1.2.3 From 9b84cacd013996f244d85b3d873287c2a8f88658 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:42 +0100 Subject: block, cfq: restructure io_cq creation path for io_context interface cleanup Add elevator_ops->elevator_init_icq_fn() and restructure cfq_create_cic() and rename it to ioc_create_icq(). The new function expects its caller to pass in io_context, uses elevator_type->icq_cache, handles generic init, calls the new elevator operation for elevator specific initialization, and returns pointer to created or looked up icq. This leaves cfq_icq_pool variable without any user. Removed. This prepares for io_context interface cleanup and doesn't introduce any functional difference. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/elevator.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 06e4dd568717..c8f1e67a8ebe 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -26,6 +26,7 @@ typedef struct request *(elevator_request_list_fn) (struct request_queue *, stru typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); typedef int (elevator_may_queue_fn) (struct request_queue *, int); +typedef void (elevator_init_icq_fn) (struct io_cq *); typedef void (elevator_exit_icq_fn) (struct io_cq *); typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t); typedef void (elevator_put_req_fn) (struct request *); @@ -59,6 +60,7 @@ struct elevator_ops elevator_request_list_fn *elevator_former_req_fn; elevator_request_list_fn *elevator_latter_req_fn; + elevator_init_icq_fn *elevator_init_icq_fn; elevator_exit_icq_fn *elevator_exit_icq_fn; elevator_set_req_fn *elevator_set_req_fn; -- cgit v1.2.3 From f1f8cc94651738b418ba54c039df536303b91704 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 14 Dec 2011 00:33:42 +0100 Subject: block, cfq: move icq creation and rq->elv.icq association to block core Now block layer knows everything necessary to create and associate icq's with requests. Move ioc_create_icq() to blk-ioc.c and update get_request() such that, if elevator_type->icq_size is set, requests are automatically associated with their matching icq's before elv_set_request(). io_context reference is also managed by block core on request alloc/free. * Only ioprio/cgroup changed handling remains from cfq_get_cic(). Collapsed into cfq_set_request(). * This removes queue kicking on icq allocation failure (for now). As icq allocation failure is rare and the only effect of queue kicking achieved was possibily accelerating queue processing, this change shouldn't be noticeable. There is a larger underlying problem. Unlike request allocation, icq allocation is not guaranteed to succeed eventually after retries. The number of icq is unbound and thus mempool can't be the solution either. This effectively adds allocation dependency on memory free path and thus possibility of deadlock. This usually wouldn't happen because icq allocation is not a hot path and, even when the condition triggers, it's highly unlikely that none of the writeback workers already has icq. However, this is still possible especially if elevator is being switched under high memory pressure, so we better get it fixed. Probably the only solution is just bypassing elevator and appending to dispatch queue on any elevator allocation failure. * Comment added to explain how icq's are managed and synchronized. This completes cleanup of io_context interface. Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- include/linux/elevator.h | 8 +++---- include/linux/iocontext.h | 59 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/elevator.h b/include/linux/elevator.h index c8f1e67a8ebe..c24f3d7fbf1e 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -60,8 +60,8 @@ struct elevator_ops elevator_request_list_fn *elevator_former_req_fn; elevator_request_list_fn *elevator_latter_req_fn; - elevator_init_icq_fn *elevator_init_icq_fn; - elevator_exit_icq_fn *elevator_exit_icq_fn; + elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */ + elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */ elevator_set_req_fn *elevator_set_req_fn; elevator_put_req_fn *elevator_put_req_fn; @@ -90,8 +90,8 @@ struct elevator_type /* fields provided by elevator implementation */ struct elevator_ops ops; - size_t icq_size; - size_t icq_align; + size_t icq_size; /* see iocontext.h */ + size_t icq_align; /* ditto */ struct elv_fs_entry *elevator_attrs; char elevator_name[ELV_NAME_MAX]; struct module *elevator_owner; diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index ac390a34c0e7..7e1371c4bccf 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h @@ -10,6 +10,65 @@ enum { ICQ_CGROUP_CHANGED, }; +/* + * An io_cq (icq) is association between an io_context (ioc) and a + * request_queue (q). This is used by elevators which need to track + * information per ioc - q pair. + * + * Elevator can request use of icq by setting elevator_type->icq_size and + * ->icq_align. Both size and align must be larger than that of struct + * io_cq and elevator can use the tail area for private information. The + * recommended way to do this is defining a struct which contains io_cq as + * the first member followed by private members and using its size and + * align. For example, + * + * struct snail_io_cq { + * struct io_cq icq; + * int poke_snail; + * int feed_snail; + * }; + * + * struct elevator_type snail_elv_type { + * .ops = { ... }, + * .icq_size = sizeof(struct snail_io_cq), + * .icq_align = __alignof__(struct snail_io_cq), + * ... + * }; + * + * If icq_size is set, block core will manage icq's. All requests will + * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn() + * is called and be holding a reference to the associated io_context. + * + * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is + * called and, on destruction, ->elevator_exit_icq_fn(). Both functions + * are called with both the associated io_context and queue locks held. + * + * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding + * queue lock but the returned icq is valid only until the queue lock is + * released. Elevators can not and should not try to create or destroy + * icq's. + * + * As icq's are linked from both ioc and q, the locking rules are a bit + * complex. + * + * - ioc lock nests inside q lock. + * + * - ioc->icq_list and icq->ioc_node are protected by ioc lock. + * q->icq_list and icq->q_node by q lock. + * + * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq + * itself is protected by q lock. However, both the indexes and icq + * itself are also RCU managed and lookup can be performed holding only + * the q lock. + * + * - icq's are not reference counted. They are destroyed when either the + * ioc or q goes away. Each request with icq set holds an extra + * reference to ioc to ensure it stays until the request is completed. + * + * - Linking and unlinking icq's are performed while holding both ioc and q + * locks. Due to the lock ordering, q exit is simple but ioc exit + * requires reverse-order double lock dance. + */ struct io_cq { struct request_queue *q; struct io_context *ioc; -- cgit v1.2.3 From b1bd055d397e09f99dcef9b138ed104ff1812fcb Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Wed, 11 Jan 2012 16:27:11 +0100 Subject: block: Introduce blk_set_stacking_limits function Stacking driver queue limits are typically bounded exclusively by the capabilities of the low level devices, not by the stacking driver itself. This patch introduces blk_set_stacking_limits() which has more liberal metrics than the default queue limits function. This allows us to inherit topology parameters from bottom devices without manually tweaking the default limits in each driver prior to calling the stacking function. Since there is now a clear distinction between stacking and low-level devices, blk_set_default_limits() has been modified to carry the more conservative values that we used to manually set in blk_queue_make_request(). Signed-off-by: Martin K. Petersen Acked-by: Mike Snitzer Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8bca04873f53..adc34133a56a 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -844,6 +844,7 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_set_default_limits(struct queue_limits *lim); +extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, -- cgit v1.2.3 From ef00f59c95fe6e002e7c6e3663cdea65e253f4cc Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Wed, 11 Jan 2012 16:29:31 +0100 Subject: block: Add BLKROTATIONAL ioctl Introduce an ioctl which permits applications to query whether a block device is rotational. Signed-off-by: Martin K. Petersen Signed-off-by: Jens Axboe --- include/linux/fs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index e0bc4ffb8e7f..95dd911506f1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -319,6 +319,7 @@ struct inodes_stat_t { #define BLKPBSZGET _IO(0x12,123) #define BLKDISCARDZEROES _IO(0x12,124) #define BLKSECDISCARD _IO(0x12,125) +#define BLKROTATIONAL _IO(0x12,126) #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ #define FIBMAP _IO(0x00,1) /* bmap access */ -- cgit v1.2.3 From fd83240a60ecc59849420df3393e9e6d35c77683 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Thu, 12 Jan 2012 09:17:30 +0100 Subject: blockdev: convert some macros to static inlines We prefer to program in C rather than preprocessor and it fixes this warning when CONFIG_BLK_DEV_INTEGRITY is not set: drivers/md/dm-table.c: In function 'dm_table_set_integrity': drivers/md/dm-table.c:1285:3: warning: statement with no effect [-Wunused-value] Signed-off-by: Stephen Rothwell Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 77 +++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 64 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index adc34133a56a..5cfb9b22627f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1298,19 +1298,70 @@ queue_max_integrity_segments(struct request_queue *q) #else /* CONFIG_BLK_DEV_INTEGRITY */ -#define blk_integrity_rq(rq) (0) -#define blk_rq_count_integrity_sg(a, b) (0) -#define blk_rq_map_integrity_sg(a, b, c) (0) -#define bdev_get_integrity(a) (0) -#define blk_get_integrity(a) (0) -#define blk_integrity_compare(a, b) (0) -#define blk_integrity_register(a, b) (0) -#define blk_integrity_unregister(a) do { } while (0) -#define blk_queue_max_integrity_segments(a, b) do { } while (0) -#define queue_max_integrity_segments(a) (0) -#define blk_integrity_merge_rq(a, b, c) (0) -#define blk_integrity_merge_bio(a, b, c) (0) -#define blk_integrity_is_initialized(a) (0) +struct bio; +struct block_device; +struct gendisk; +struct blk_integrity; + +static inline int blk_integrity_rq(struct request *rq) +{ + return 0; +} +static inline int blk_rq_count_integrity_sg(struct request_queue *q, + struct bio *b) +{ + return 0; +} +static inline int blk_rq_map_integrity_sg(struct request_queue *q, + struct bio *b, + struct scatterlist *s) +{ + return 0; +} +static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) +{ + return 0; +} +static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) +{ + return NULL; +} +static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) +{ + return 0; +} +static inline int blk_integrity_register(struct gendisk *d, + struct blk_integrity *b) +{ + return 0; +} +static inline void blk_integrity_unregister(struct gendisk *d) +{ +} +static inline void blk_queue_max_integrity_segments(struct request_queue *q, + unsigned int segs) +{ +} +static inline unsigned short queue_max_integrity_segments(struct request_queue *q) +{ + return 0; +} +static inline int blk_integrity_merge_rq(struct request_queue *rq, + struct request *r1, + struct request *r2) +{ + return 0; +} +static inline int blk_integrity_merge_bio(struct request_queue *rq, + struct request *r, + struct bio *b) +{ + return 0; +} +static inline bool blk_integrity_is_initialized(struct gendisk *g) +{ + return 0; +} #endif /* CONFIG_BLK_DEV_INTEGRITY */ -- cgit v1.2.3 From 6898e3bd11cc9a931ef115eee9000ac9d8f8c3cf Mon Sep 17 00:00:00 2001 From: "Martin K. Petersen" Date: Fri, 13 Jan 2012 08:15:33 +0100 Subject: block: Stop using macro stubs for the bio data integrity calls Replace preprocessor macro stubs with real function declarations to prevent warnings when CONFIG_BLK_DEV_INTEGRITY is disabled. Signed-off-by: Martin K. Petersen Signed-off-by: Jens Axboe --- include/linux/bio.h | 66 ++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 53 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bio.h b/include/linux/bio.h index 847994aef0e9..129a9c097958 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -515,24 +515,64 @@ extern void bio_integrity_init(void); #else /* CONFIG_BLK_DEV_INTEGRITY */ -#define bio_integrity(a) (0) -#define bioset_integrity_create(a, b) (0) -#define bio_integrity_prep(a) (0) -#define bio_integrity_enabled(a) (0) +static inline int bio_integrity(struct bio *bio) +{ + return 0; +} + +static inline int bio_integrity_enabled(struct bio *bio) +{ + return 0; +} + +static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) +{ + return 0; +} + +static inline void bioset_integrity_free (struct bio_set *bs) +{ + return; +} + +static inline int bio_integrity_prep(struct bio *bio) +{ + return 0; +} + +static inline void bio_integrity_free(struct bio *bio, struct bio_set *bs) +{ + return; +} + static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp_mask, struct bio_set *bs) { return 0; } -#define bioset_integrity_free(a) do { } while (0) -#define bio_integrity_free(a, b) do { } while (0) -#define bio_integrity_endio(a, b) do { } while (0) -#define bio_integrity_advance(a, b) do { } while (0) -#define bio_integrity_trim(a, b, c) do { } while (0) -#define bio_integrity_split(a, b, c) do { } while (0) -#define bio_integrity_set_tag(a, b, c) do { } while (0) -#define bio_integrity_get_tag(a, b, c) do { } while (0) -#define bio_integrity_init(a) do { } while (0) + +static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp, + int sectors) +{ + return; +} + +static inline void bio_integrity_advance(struct bio *bio, + unsigned int bytes_done) +{ + return; +} + +static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, + unsigned int sectors) +{ + return; +} + +static inline void bio_integrity_init(void) +{ + return; +} #endif /* CONFIG_BLK_DEV_INTEGRITY */ -- cgit v1.2.3