return NULL;
}
if (unlikely(blk_queue_dead(q)) ||
- !q->elevator->ops->elevator_dispatch_fn(q, 0))
+ !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
}
{
struct elevator_queue *e = q->elevator;
- if (e->ops->elevator_activate_req_fn)
- e->ops->elevator_activate_req_fn(q, rq);
+ if (e->type->ops.elevator_activate_req_fn)
+ e->type->ops.elevator_activate_req_fn(q, rq);
}
static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
- if (e->ops->elevator_deactivate_req_fn)
- e->ops->elevator_deactivate_req_fn(q, rq);
+ if (e->type->ops.elevator_deactivate_req_fn)
+ e->type->ops.elevator_deactivate_req_fn(q, rq);
}
#ifdef CONFIG_FAIL_IO_TIMEOUT
struct request *next);
void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
+bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
+int blk_try_merge(struct request *rq, struct bio *bio);
void blk_queue_congestion_threshold(struct request_queue *q);
return q->nr_congestion_off;
}
-static inline int blk_cpu_to_group(int cpu)
-{
- int group = NR_CPUS;
-#ifdef CONFIG_SCHED_MC
- const struct cpumask *mask = cpu_coregroup_mask(cpu);
- group = cpumask_first(mask);
-#elif defined(CONFIG_SCHED_SMT)
- group = cpumask_first(topology_thread_cpumask(cpu));
-#else
- return cpu;
-#endif
- if (likely(group < NR_CPUS))
- return group;
- return cpu;
-}
-
/*
* Contribute to IO statistics IFF:
*
* Internal io_context interface
*/
void get_io_context(struct io_context *ioc);
+struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
+struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
+void ioc_clear_queue(struct request_queue *q);
void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
int node);