bc1c116974
There's a race between shutting down one io scheduler and firing up the next, in which a new io could enter and cause the io scheduler to be invoked with bad or NULL data. To fix this, we need to maintain the queue lock for a bit longer. Unfortunately we cannot do that, since the elevator init requires to be run without the lock held. This isn't easily fixable, without also changing the mempool API. So split the initialization into two parts, and alloc-init operation and an attach operation. Then we can preallocate the io scheduler and related structures, and run the attach inside the lock after we detach the old one. This patch has survived 30 minutes of 1 second io scheduler switching with a very busy io load. Signed-off-by: Jens Axboe <axboe@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
118 lines
2.5 KiB
C
118 lines
2.5 KiB
C
/*
|
|
* elevator noop
|
|
*/
|
|
#include <linux/blkdev.h>
|
|
#include <linux/elevator.h>
|
|
#include <linux/bio.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
|
|
struct noop_data {
|
|
struct list_head queue;
|
|
};
|
|
|
|
static void noop_merged_requests(request_queue_t *q, struct request *rq,
|
|
struct request *next)
|
|
{
|
|
list_del_init(&next->queuelist);
|
|
}
|
|
|
|
static int noop_dispatch(request_queue_t *q, int force)
|
|
{
|
|
struct noop_data *nd = q->elevator->elevator_data;
|
|
|
|
if (!list_empty(&nd->queue)) {
|
|
struct request *rq;
|
|
rq = list_entry(nd->queue.next, struct request, queuelist);
|
|
list_del_init(&rq->queuelist);
|
|
elv_dispatch_sort(q, rq);
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static void noop_add_request(request_queue_t *q, struct request *rq)
|
|
{
|
|
struct noop_data *nd = q->elevator->elevator_data;
|
|
|
|
list_add_tail(&rq->queuelist, &nd->queue);
|
|
}
|
|
|
|
static int noop_queue_empty(request_queue_t *q)
|
|
{
|
|
struct noop_data *nd = q->elevator->elevator_data;
|
|
|
|
return list_empty(&nd->queue);
|
|
}
|
|
|
|
static struct request *
|
|
noop_former_request(request_queue_t *q, struct request *rq)
|
|
{
|
|
struct noop_data *nd = q->elevator->elevator_data;
|
|
|
|
if (rq->queuelist.prev == &nd->queue)
|
|
return NULL;
|
|
return list_entry(rq->queuelist.prev, struct request, queuelist);
|
|
}
|
|
|
|
static struct request *
|
|
noop_latter_request(request_queue_t *q, struct request *rq)
|
|
{
|
|
struct noop_data *nd = q->elevator->elevator_data;
|
|
|
|
if (rq->queuelist.next == &nd->queue)
|
|
return NULL;
|
|
return list_entry(rq->queuelist.next, struct request, queuelist);
|
|
}
|
|
|
|
static void *noop_init_queue(request_queue_t *q, elevator_t *e)
|
|
{
|
|
struct noop_data *nd;
|
|
|
|
nd = kmalloc(sizeof(*nd), GFP_KERNEL);
|
|
if (!nd)
|
|
return NULL;
|
|
INIT_LIST_HEAD(&nd->queue);
|
|
return nd;
|
|
}
|
|
|
|
static void noop_exit_queue(elevator_t *e)
|
|
{
|
|
struct noop_data *nd = e->elevator_data;
|
|
|
|
BUG_ON(!list_empty(&nd->queue));
|
|
kfree(nd);
|
|
}
|
|
|
|
static struct elevator_type elevator_noop = {
|
|
.ops = {
|
|
.elevator_merge_req_fn = noop_merged_requests,
|
|
.elevator_dispatch_fn = noop_dispatch,
|
|
.elevator_add_req_fn = noop_add_request,
|
|
.elevator_queue_empty_fn = noop_queue_empty,
|
|
.elevator_former_req_fn = noop_former_request,
|
|
.elevator_latter_req_fn = noop_latter_request,
|
|
.elevator_init_fn = noop_init_queue,
|
|
.elevator_exit_fn = noop_exit_queue,
|
|
},
|
|
.elevator_name = "noop",
|
|
.elevator_owner = THIS_MODULE,
|
|
};
|
|
|
|
static int __init noop_init(void)
|
|
{
|
|
return elv_register(&elevator_noop);
|
|
}
|
|
|
|
static void __exit noop_exit(void)
|
|
{
|
|
elv_unregister(&elevator_noop);
|
|
}
|
|
|
|
module_init(noop_init);
|
|
module_exit(noop_exit);
|
|
|
|
|
|
MODULE_AUTHOR("Jens Axboe");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("No-op IO scheduler");
|