[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[dm-devel] example manual failover



ps-manual-failover-exp.patch - this is just an exmaple of how you could use
the init function. Please do not apply it. It was written based on some other
patches, and I am trying to make it more generic so we can support other HW.
I am only sending it becuase I am not sure if the functions I am using to
send the failover command are correct and I am looking for feedback.


-- Mike Christie mikenc us ibm com
diff -Naurp linux-2.6.6/drivers/md/dm-ps-rdac.c linux-2.6.6-work/drivers/md/dm-ps-rdac.c
--- linux-2.6.6/drivers/md/dm-ps-rdac.c	1969-12-31 16:00:00.000000000 -0800
+++ linux-2.6.6-work/drivers/md/dm-ps-rdac.c	2004-05-24 17:02:19.555535341 -0700
@@ -0,0 +1,793 @@
+/*
+ * LSI RDAC Path-Selector for Device Mapper Multipath
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111s-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2004
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/rcupdate.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+
+#include "dm.h"
+#include "dm-path-selector.h"
+
+#define RDAC_VERSION "0.1"
+
+enum {
+	CTRLR_ACTIVE,
+	CTRLR_PASSIVE,
+	CTRLR_FAILED,
+	CTRLR_FAILING,
+};
+
+static char *rdac_states[] = {
+	"active",
+	"passive",
+	"failed",
+	"failing",
+};
+
+struct controller {
+	atomic_t failed;
+	spinlock_t state_lock;
+	int state;
+	struct list_head path_groups;
+
+	struct list_head failover_notify_list;
+};
+
+#define RDAC_WWN_SZ 16
+
+struct rdac_device {
+	struct controller controllers[2];
+	struct list_head list;
+	char wwn[RDAC_WWN_SZ];
+	atomic_t refcount;
+	unsigned use_subpage;
+};
+
+static struct list_head rdac_devs = LIST_HEAD_INIT(rdac_devs);
+/*
+ * device list manipulation and rdac device handle management
+ * should be done under this lock.
+ */
+static DECLARE_MUTEX(rdac_devs_lock);
+
+struct path_info {
+	struct list_head list;
+	struct path *path;
+
+	unsigned fail_count;
+};
+
+struct path_group {
+	spinlock_t path_lock;
+
+	struct list_head active_paths;
+	struct list_head failed_paths;
+
+	struct rdac_device *rdac_dev;
+
+	unsigned ctrlr;
+	struct list_head ctrlr_list;
+	struct list_head notify;
+
+	struct path_selector *ps;
+};
+
+/* FIXME: get rid of this */
+#define MPATH_FAIL_COUNT 1
+
+/*
+ * rdac dev and controller functions
+ */
+static void controller_init(struct controller *ctrlr)
+{
+	spin_lock_init(&ctrlr->state_lock);
+	ctrlr->state = CTRLR_ACTIVE;
+	INIT_LIST_HEAD(&ctrlr->path_groups);
+	INIT_LIST_HEAD(&ctrlr->failover_notify_list);
+}
+
+static inline struct controller *to_controller(struct path_group *pg)
+{
+	return &pg->rdac_dev->controllers[pg->ctrlr];
+}
+
+/*
+ * this is only done during device init and delete time so
+ * perf is not really a big deal.
+ */
+static struct rdac_device *__rdac_dev_lookup(char *wwn)
+{
+	struct rdac_device *dev;
+
+	list_for_each_entry (dev, &rdac_devs, list) {
+		if (!memcmp(dev->wwn, wwn, RDAC_WWN_SZ))
+			return dev;
+	}
+
+	return NULL;
+}
+
+static inline void __rdac_dev_get(struct rdac_device *dev)
+{
+	atomic_inc(&dev->refcount);
+}
+
+static inline void __rdac_dev_put(struct rdac_device *dev)
+{
+	if (atomic_dec_and_test(&dev->refcount)) {
+		list_del(&dev->list);
+		kfree(dev);
+	}
+}
+
+static struct rdac_device *rdac_dev_alloc(void)
+{
+	struct rdac_device *dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+
+	if (dev) {
+		INIT_LIST_HEAD(&dev->list);
+		controller_init(&dev->controllers[0]);
+		controller_init(&dev->controllers[1]);
+		atomic_set(&dev->refcount, 1);
+		list_add(&dev->list, &rdac_devs);
+	}
+
+	return dev;
+}
+
+static struct rdac_device *rdac_dev_init(char **argv, char **error)
+{
+	struct rdac_device *rdac_dev;
+
+	up(&rdac_devs_lock);
+
+	rdac_dev = __rdac_dev_lookup(argv[0]);
+	if (rdac_dev) {	
+		__rdac_dev_get(rdac_dev);
+		goto done;
+	}
+
+	rdac_dev = rdac_dev_alloc();
+	if (!rdac_dev) {
+		*error = "rdac ps: could not allocate rdac dev";
+		goto done;
+	}
+
+	memcpy(rdac_dev->wwn, argv[0], RDAC_WWN_SZ);
+	sscanf(argv[1], "%u", &rdac_dev->use_subpage);
+done:
+	down(&rdac_devs_lock);
+
+	return rdac_dev;
+}
+
+/*
+ * path and path group functions
+ */
+
+/* This is rarely executed so the loop is not as bad as it looks */
+static struct path_info *path_lookup(struct list_head *head, struct path *p)
+{
+	struct path_info *pi;
+
+	list_for_each_entry (pi, head, list)
+		if (pi->path == p)
+			return pi;
+
+	return NULL;
+}
+
+static struct path_group *path_group_alloc(void)
+{
+	struct path_group *pg = kmalloc(sizeof(*pg), GFP_KERNEL);
+
+	if (pg) {
+		INIT_LIST_HEAD(&pg->active_paths);
+		INIT_LIST_HEAD(&pg->failed_paths);
+		INIT_LIST_HEAD(&pg->ctrlr_list);
+		INIT_LIST_HEAD(&pg->notify);
+		spin_lock_init(&pg->path_lock);
+	}
+
+	return pg;
+}
+
+static void free_paths(struct list_head *paths)
+{
+	struct path_info *pi, *next;
+
+	list_for_each_entry_safe (pi, next, paths, list) {
+		list_del(&pi->list);
+		kfree(pi);
+	}
+}
+
+static void path_group_free(struct path_group *pg)
+{
+	free_paths(&pg->active_paths);
+	free_paths(&pg->failed_paths);
+	kfree(pg);
+}
+
+static struct path_group *path_group_init(struct rdac_device *rdac_dev,
+					  char **argv, char **error)
+{
+	struct path_group *pg;
+	unsigned state;
+	struct controller *ctrlr;
+
+	pg = path_group_alloc();
+	if (!pg) {
+		*error = "rdac ps: could not allocate path group";
+		return NULL;
+	}
+	pg->rdac_dev = rdac_dev;
+
+	sscanf(argv[2], "%u", &pg->ctrlr);
+	if (pg->ctrlr > 1) {
+		*error = "rdac ps: invalid controller index";
+		kfree(pg);
+		return NULL;
+	}
+
+	sscanf(argv[3], "%u", &state);
+	if (state > CTRLR_INITIALIZING) {
+		*error = "rdac ps: invalid controller state";
+		kfree(pg);
+		return NULL;
+	}
+
+	ctrlr = to_controller(pg);
+	spin_lock(&ctrlr->state_lock);
+	ctrlr->state = state;
+	list_add(&pg->ctrlr_list, &ctrlr->path_groups);
+	spin_unlock(&ctrlr->state_lock);
+
+	return pg;
+}
+
+static int rdac_ctr(struct path_selector *ps, int argc, char **argv,
+		    char **error)
+{
+	struct path_group *pg;
+	struct rdac_device *rdac_dev;
+
+	if (argc != 4) {
+		*error = "rdac ps: incorrect number of arguments";
+		return -EINVAL;
+	}
+
+	rdac_dev = rdac_dev_init(argv, error);
+	if (!rdac_dev)
+		return -ENOMEM;
+
+	pg = path_group_init(rdac_dev, argv, error);
+	if (!pg) {
+		up(&rdac_devs_lock);
+		__rdac_dev_put(rdac_dev);
+		down(&rdac_devs_lock);
+
+		return -ENOMEM;
+	}
+
+	pg->ps = ps;
+	ps->context = pg;
+	return 0;
+}
+
+static void rdac_dtr(struct path_selector *ps)
+{
+	struct path_group *pg = (struct path_group *) ps->context;
+
+	up(&rdac_devs_lock);
+	__rdac_dev_put(pg->rdac_dev);
+	down(&rdac_devs_lock);
+
+	path_group_free(pg);
+}
+
+static int rdac_add_path(struct path_selector *ps, struct path *path,
+			 int argc, char **argv, char **error)
+{
+	struct path_group *pg = (struct path_group *)ps->context;
+	struct path_info *pi;
+
+	if (argc != 0) {
+		*error = "rdac ps: incorrect number of arguments";
+		return -EINVAL;
+	}
+
+	if (!bdev_get_queue(dm_path_to_bdev(path))) {
+		*error = "rdac ps: invalid device (no request queue)";
+		return -EPERM;
+	}
+
+	pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+	if (!pi) {
+		*error = "rdac ps: error allocating path context";
+		return -ENOMEM;
+	}
+
+	pi->fail_count = MPATH_FAIL_COUNT;
+	pi->path = path;
+
+	spin_lock(&pg->path_lock);
+	list_add(&pi->list, &pg->active_paths);
+	spin_unlock(&pg->path_lock);
+
+	return 0;
+}
+
+static void set_controller_failed(void *arg)
+{
+	struct controller *ctrlr = (struct controller *)arg;
+
+	spin_lock(&ctrlr->state_lock);
+	ctrlr->state = CTRLR_FAILED;
+	spin_unlock(&ctrlr->state_lock);
+}
+
+static void rdac_update_path(struct path_selector *ps, struct path *p,
+			     int error)
+{
+	unsigned long flags;
+	struct path_group *pg = (struct path_group *)ps->context;
+	struct path_info *pi;
+	struct controller *ctrlr = to_controller(pg);
+
+	spin_lock_irqsave(&pg->path_lock, flags);
+
+	pi = path_lookup(&pg->active_paths, p);
+	if (pi && !--pi->fail_count) {
+		list_move(&pi->list, &pg->failed_paths);
+		if (list_empty(&pg->active_paths)) {
+			spin_lock(&ctrlr->state_lock);
+			atomic_set(&ctrlr->failed, 1);
+			ctrlr->state = CTRLR_FAILED;
+			call_rcu(&ctrlr->rcu, set_controller_failed, ctrlr);
+			spin_unlock(&ctrlr->state_lock);
+		}
+	}
+
+	spin_unlock_irqrestore(&pg->path_lock, flags);
+}
+
+static struct path *rdac_select_path(struct path_selector *ps)
+{
+	unsigned long flags;
+	struct path_group *pg = (struct path_group *)ps->context;
+	struct path_info *pi = NULL;
+	struct controller *ctrlr = to_controller(pg);
+
+	if (atomic_read(&ctrlr->failed) == 1)
+		return NULL;
+
+	spin_lock_irqsave(&pg->path_lock, flags);
+	if (!list_empty(&pg->active_paths)) {
+		pi = list_entry(pg->active_paths.next, struct path_info, list);
+		list_move_tail(&pi->list, &pg->active_paths);
+	}
+	spin_unlock_irqrestore(&pg->path_lock, flags);
+
+	return pi ? pi->path : NULL;
+}
+
+/* path status */
+static int rdac_path_status(struct path_selector *ps, struct path *path,
+			    status_type_t type, char *result,
+			    unsigned int maxlen)
+{
+	unsigned long flags;
+	struct path_info *pi;
+	int failed = 0;
+	struct path_group *pg = (struct path_group *)ps->context;
+	int sz = 0;
+
+	if (type == STATUSTYPE_INFO) {
+
+		spin_lock_irqsave(&pg->path_lock, flags);
+
+		/*
+		 * Is status called often for testing or something?
+		 * If so maybe a ps's info should be allocated w/ path
+		 * so a simple container_of can be used.
+		 */
+		pi = path_lookup(&pg->active_paths, path);
+		if (!pi) {
+			failed = 1;
+			pi = path_lookup(&pg->failed_paths, path);
+		}
+		
+		sz = scnprintf(result, maxlen, "%s %u ", failed ? "F" : "A",
+			       pi->fail_count);
+		
+		spin_unlock_irqrestore(&pg->path_lock, flags);
+	}
+
+	return sz;
+}
+
+/* path selector status */
+static int rdac_ps_status(struct path_selector *ps, status_type_t type,
+			  char *result, unsigned int maxlen)
+{
+	unsigned long flags;
+	int sz = 0;
+	struct path_group *pg = (struct path_group *)ps->context;
+
+	spin_lock_irqsave(&pg->path_lock, flags);
+	
+	switch (type) {
+	case STATUSTYPE_INFO:
+		sz = scnprintf(result, maxlen, "%u %s ",
+			pg->ctrlr, rdac_states[to_controller(pg)->state]);
+		break;
+	case STATUSTYPE_TABLE:
+		sz = scnprintf(result, maxlen, "%s %u %u %u ",
+			pg->rdac_dev->wwn, pg->rdac_dev->use_subpage,
+			pg->ctrlr, to_controller(pg)->state);
+		break;
+	}
+
+	spin_unlock_irqrestore(&pg->path_lock, flags);
+	
+	return sz;
+}
+
+/* this is so awkward */
+static void notify_init_completion(struct path_group *pg)
+{
+	struct path_group *next;
+	struct controller *ctrlr = to_controller(pg);
+
+	list_for_each_entry_safe (pg, next, &ctrlr->failover_notify_list,
+				  notify) {
+		list_del_init(&pg->notify);
+		dm_ps_init_completion(pg->ps);
+	}
+}
+
+static inline void lsi_free_request(struct request *rq)
+{
+	kfree(rq->sense);
+	kfree(rq->data);
+	blk_put_request(rq);
+}
+
+static int failover_controller(struct path_group *pg);
+
+static void rdac_end_request(struct request *rq)
+{
+	unsigned long flags;
+	struct path_group *pg = rq->special;
+	struct controller *ctrlr = to_controller(pg);
+	int error = rq->errors;
+
+	lsi_free_request(rq);
+
+	spin_lock_irqsave(&ctrlr->state_lock, flags);
+
+	/* this does not catch host errors */
+	if (!error) {
+		ctrlr->state = CTRLR_ACTIVE;
+		notify_init_completion(pg);
+	} else {
+		struct path_info *pi;
+
+		pi = list_entry(pg->active_paths.next, struct path_info, list);
+		list_move(&pi->list, &pg->failed_paths);
+		
+		spin_unlock_irqrestore(&ctrlr->state_lock, flags);
+		failover_controller(pg);
+		spin_lock_irqsave(&ctrlr->state_lock, flags);
+	}
+
+	spin_unlock_irqrestore(&ctrlr->state_lock, flags);
+}
+
+/*
+ * LSI mode page stuff
+ *
+ * These struct definitions and the forming of the
+ * mode page were taken from the LSI RDAC 2.4 GPL'd
+ * driver, and then converted to Linux conventions.
+ */
+
+/*
+ * default value TODO - make configurable
+ */
+#define RDAC_FAILOVER_TIMEOUT (60 * HZ)
+
+#define RDAC_QUIESCENCE_TIME 20;
+/*
+ * Page Codes
+ */
+#define RDAC_PC_REDUNDANT_CONTROLLER 0x2c
+
+/*
+ * Controller modes definitions 
+ */
+#define RDAC_ACTIVE_MODE 0x01
+
+/*
+ * RDAC Options field
+ */
+#define RDAC_FORCED_QUIESENCE 0x02
+
+struct mode_param_hdr {
+	u16	data_len;
+	u8	medium_type;
+	u8	device_params;
+	u16	reserved;
+	u16	block_desc_len;
+};
+
+struct mode_page2c {
+	struct mode_param_hdr hdr;
+	u8	page_code;
+	u8	page_len;
+	u8	controller_serial[16];
+	u8	alt_controller_serial[16];
+	u8	rdac_mode[2];
+	u8	alt_rdac_mode[2];
+	u8	quiescence_timeout;
+	u8	rdac_options;
+	u8	lun_table[64];
+	u8	reserved3;
+	u8	reserved4;
+};
+
+static struct request *lsi_page2c_init(struct block_device *bdev)
+{
+	struct mode_page2c *page2c;
+	struct request *rq;
+	int data_len = sizeof(*page2c);
+	struct request_queue *q = bdev_get_queue(bdev);
+
+	/* setup request */
+	rq = blk_get_request(q, WRITE, GFP_ATOMIC);
+	if (!rq)
+		return NULL;
+
+	rq->timeout = RDAC_FAILOVER_TIMEOUT;
+	rq->flags |= (REQ_BLOCK_PC | REQ_FAILFAST | REQ_NOMERGE);
+	rq->rq_disk = bdev->bd_contains->bd_disk;
+
+	rq->sense = kmalloc(GFP_ATOMIC, SCSI_SENSE_BUFFERSIZE);
+	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+	rq->sense_len = 0;
+
+	memset(&rq->cmd, 0, BLK_MAX_CDB);
+	rq->cmd[0] = MODE_SELECT_10;
+	rq->cmd[1] = 1;
+	rq->cmd[8] = data_len;
+	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+	/* setup data */
+	page2c = kmalloc(GFP_ATOMIC, data_len);
+	if (!page2c) {
+		blk_put_request(rq);
+		return NULL;
+	}
+	memset(page2c, 0, data_len);
+
+	rq->data_len = data_len;
+	rq->data = page2c;
+
+	page2c->page_len = 0x68;
+	page2c->page_code = RDAC_PC_REDUNDANT_CONTROLLER;
+	/* switch to active/passive - this one active */
+	page2c->rdac_mode[1] = RDAC_ACTIVE_MODE;
+
+	page2c->quiescence_timeout = RDAC_QUIESCENCE_TIME;
+	/* forced quiesce */
+	page2c->rdac_options = RDAC_FORCED_QUIESENCE;
+
+	return rq;
+}
+
+struct mode_page2c_subpage {
+	struct mode_param_hdr hdr;
+	u8	page_code;
+	u8	subpage_code;
+	u8	page_len[2];
+	u8	controller_serial[16];
+	u8	alt_controller_serial[16];
+	u8	rdac_mode[2];
+	u8	alt_rdac_mode[2];
+	u8	quiescence_timeout;
+	u8	rdac_options;
+	u8	lun_table[256];
+	u8	reserved3;
+	u8	reserved4;
+};
+
+static struct request *lsi_page2c_subpage_init(struct block_device *bdev)
+{
+	struct mode_page2c_subpage *page2c;
+	struct request *rq;
+	int data_len = sizeof(*page2c);
+	struct request_queue *q = bdev_get_queue(bdev);
+
+	/* setup request */
+	rq = blk_get_request(q, WRITE, GFP_ATOMIC);
+	if (!rq)
+		return NULL;
+
+	rq->timeout = RDAC_FAILOVER_TIMEOUT;
+	rq->flags |= (REQ_BLOCK_PC | REQ_FAILFAST | REQ_NOMERGE);
+	rq->rq_disk = bdev->bd_contains->bd_disk;
+
+	rq->sense = kmalloc(GFP_ATOMIC, SCSI_SENSE_BUFFERSIZE);
+	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
+	rq->sense_len = 0;
+
+	memset(&rq->cmd, 0, BLK_MAX_CDB);
+	rq->cmd[0] = MODE_SELECT_10;
+	rq->cmd[1] = 1;
+	rq->cmd[7] = data_size >> 8;
+	rq->cmd[8] = data_size & 0xff;
+	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
+
+	/* setup data */
+	page2c = kmalloc(GFP_ATOMIC, data_len);
+	if (!page2c) {
+		blk_put_request(rq);
+		return NULL;
+	}
+	memset(page2c, 0, data_len);
+
+	rq->data_len = data_len;
+	rq->data = page2c;
+
+	page2c->page_len[0] = 0x01;
+	page2c->page_len[1] = 0x28;
+	/* Page 0x2C with the SPF bit set to */
+	page2c->page_code = 0x2c + 0x4;
+	page2c->subpage_code = 0x01;
+	/* switch to active/passive - this one active */
+	page2c->rdac_mode[1] = RDAC_ACTIVE_MODE;
+
+	page2c->quiescence_timeout = RDAC_QUIESCENCE_TIME;
+	/* forced quiesce */
+	page2c->rdac_options = RDAC_FORCED_QUIESENCE;
+
+	return rq;
+}
+
+static int exec_failover_req(struct block_device *bdev, struct path_group *pg)
+{
+	struct request *rq;
+	struct request_queue *q = bdev_get_queue(bdev);
+
+	/* SCSI PC cmd - modeled after block/scsi_ioctl.c */
+	if (pg->rdac_dev->use_subpage)
+		rq = lsi_page2c_subpage_init(bdev, pg);
+	else
+		rq = lsi_page2c_init(bdev, pg);
+	if (!rq)
+		return -ENOMEM;
+
+	rq->end_request = rdac_end_request;
+	rq->ref_count++;
+
+	blk_insert_request(q, rq, 1, pg, 0)
+
+	return 0;
+}
+
+static int failover_controller(struct path_group *pg)
+{
+	struct controller *ctrlr = to_controller(pg);
+	struct path_info *pi;
+	unsigned long flags;
+
+	while (!list_empty(&pg->active_paths)) {
+		pi = list_entry(pg->active_paths.next, struct path_info, list);
+
+		if (!exec_failover_req(dm_path_to_bdev(pi->path), pg))
+			return DM_PS_INITIALIZING;
+		 else
+			list_move(&pi->list, &pg->failed_paths);
+	}
+	/*
+	 * At this point we could look at the rdac_devs's groups
+	 * incase a group had paths through other HBAs or fabrics.
+	 */
+	spin_lock_irqsave(&ctrlr->state_lock, flags);
+	ctrlr->state = CTRLR_FAILED;
+	notify_init_completion(pg);
+	spin_unlock_irqrestore(&ctrlr->state_lock, flags);
+
+	return DM_PS_FAILED;
+}
+
+static int rdac_init(struct path_selector *ps)
+{
+	struct path_group *pg = (struct path_group *)ps->context;
+	struct controller *ctrlr = to_controller(pg);
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctrlr->state_lock, flags);
+	switch (ctrlr->state) {
+	case CTRLR_PASSIVE:
+		ctrlr->state = CTRLR_INITIALIZING;
+
+		spin_unlock_irqrestore(&ctrlr->state_lock, flags);
+		ret = failover_controller(pg);
+		spin_lock_irqsave(&ctrlr->state_lock, flags);
+		if (ret == DM_PS_FAILED)
+			break;
+	case CTRLR_INITIALIZING:
+		list_add(&pg->notify, &ctrlr->failover_notify_list);
+		ret = DM_PS_INITIALIZING;
+		break;
+	case CTRLR_FAILED:
+		ret = DM_PS_FAILED;
+		break;
+	default:
+		ret = DM_PS_SUCCESS;
+	}
+	spin_unlock_irqrestore(&ctrlr->state_lock, flags);
+
+	return ret;
+}
+
+static struct path_selector_type rdac_ps = {
+	.name = "rdac",
+	.module = THIS_MODULE,
+	.selector_tbl_args = 4,
+	.selector_info_args = 2,
+	.ctr = rdac_ctr,
+	.init = rdac_init,
+	.dtr = rdac_dtr,
+	.add_path = rdac_add_path,
+	.update_path = rdac_update_path,
+	.select_path = rdac_select_path,
+	.selector_status = rdac_ps_status,
+	.path_status = rdac_path_status,
+};
+
+int __init init_rdac(void)
+{
+	return dm_register_path_selector(&rdac_ps);
+}
+
+void __exit exit_rdac(void)
+{
+	dm_unregister_path_selector(&rdac_ps);
+}
+
+module_init(init_rdac);
+module_exit(exit_rdac);
+
+MODULE_DESCRIPTION("RDAC Path Selector");
+MODULE_AUTHOR("Mike Christie");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(RDAC_VERSION);

[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]