[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[Cluster-devel] cluster/cmirror-kernel src/dm-clog-tfr.c src/d ...



CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	jbrassow sourceware org	2007-08-23 19:54:58

Modified files:
	cmirror-kernel/src: dm-clog-tfr.c dm-clog-tfr.h dm-clog.c 
Added files:
	cmirror-kernel : Makefile configure 
	cmirror-kernel/make: defines.mk.input release.mk.input 
	cmirror-kernel/scripts: uninstall.pl 
	cmirror-kernel/src: Makefile 

Log message:
	- RHEL5 cluster mirror module

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/Makefile.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.6.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/configure.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.6.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/make/defines.mk.input.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.6.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/make/release.mk.input.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.6.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/scripts/uninstall.pl.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.6.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/src/Makefile.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=NONE&r2=1.1.6.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/src/dm-clog-tfr.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/src/dm-clog-tfr.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.1&r2=1.1.2.1
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/cmirror-kernel/src/dm-clog.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.2&r2=1.2.2.1

--- cluster/cmirror-kernel/src/dm-clog-tfr.c	2006/06/21 01:41:43	1.1
+++ cluster/cmirror-kernel/src/dm-clog-tfr.c	2007/08/23 19:54:57	1.1.2.1
@@ -4,19 +4,106 @@
  * This file is released under the LGPL.
  */
 
-#include "dm-clog-tfr.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <net/sock.h>
+#include <linux/workqueue.h>
+#include <linux/dm-clog-tfr.h>
+#include <linux/connector.h>
+#include "dm.h"
+
+static uint64_t seq = 0;
 
 /*
- * Pre-allocated nominal request area for speed
+ * Pre-allocated space for speed
  */
-#define DM_CLOG_NOMINAL_REQUEST_SIZE 512
-static char nominal_request[DM_CLOG_NOMINAL_REQUEST_SIZE];
+#define DM_CLOG_PREALLOCED_SIZE 512
+static struct cn_msg *prealloced_cn_msg = NULL;
+static struct clog_tfr *prealloced_clog_tfr = NULL;
+
+static struct cb_id cn_clog_id = { 0x4, 0x1 };
+static DEFINE_MUTEX(_lock);
+
+struct recieving_pkg {
+	struct list_head list;
+	struct completion complete;
+
+	uint64_t seq;
+
+	int error;
+	int *data_size;
+	char *data;
+};
+
+static spinlock_t recieving_list_lock = SPIN_LOCK_UNLOCKED;
+static struct list_head recieving_list;
 
-static DECLARE_MUTEX(consult_server_lock);
+static int dm_clog_sendto_server(struct clog_tfr *tfr)
+{
+	int r;
+	int size;
+	struct cn_msg *msg = prealloced_cn_msg;
+
+	if (tfr != prealloced_clog_tfr) {
+		size = sizeof(*msg) + sizeof(struct clog_tfr) + tfr->data_size;
+		msg = kmalloc(size, GFP_NOIO);
+		if (!msg)
+			return -ENOMEM;
+		memcpy((msg + 1), tfr, sizeof(struct clog_tfr) + tfr->data_size);
+	}
+
+	memset(msg, 0, sizeof(*msg));
+
+	msg->id.idx = cn_clog_id.idx;
+	msg->id.val = cn_clog_id.val;
+	msg->ack = 0;
+	msg->seq = 0;
+	msg->len = sizeof(struct clog_tfr) + tfr->data_size;
+
+	r = cn_netlink_send(msg, 0, gfp_any());
+	if (msg != prealloced_cn_msg)
+		kfree(msg);
+
+	return r;
+}
+
+static void cn_clog_callback(void *data)
+{
+	struct cn_msg *msg = (struct cn_msg *)data;
+	struct clog_tfr *tfr = (struct clog_tfr *)(msg + 1);
+	struct recieving_pkg *pkg;
+
+	spin_lock(&recieving_list_lock);
+	if (msg->len < sizeof(*tfr)) {
+		DMERR("Incomplete message recieved from cluster log server");
+		spin_unlock(&recieving_list_lock);
+		return;
+	}
+
+	list_for_each_entry(pkg, &recieving_list, list) {
+		if (tfr->seq == pkg->seq) {
+			if (tfr->data_size > *(pkg->data_size)) {
+				DMERR("Insufficient space to recieve package [%s]",
+				      RQ_TYPE(tfr->request_type));
+				*(pkg->data_size) = 0;
+				pkg->error = -ENOSPC;
+			} else {
+				pkg->error = tfr->error;
+				memcpy(pkg->data, tfr->data, tfr->data_size);
+				*(pkg->data_size) = tfr->data_size;
+			}
+
+			complete(&pkg->complete);
+			break;
+		}
+	}
+
+	spin_unlock(&recieving_list_lock);
+}
 
 /*
  * dm_clog_consult_server
- * @uuid: log's uuid (must be MAX_NAME_LEN in size)
+ * @uuid: log's uuid (must be DM_UUID_LEN in size)
  * @request_type:
  * @data: data to tx to the server
  * @data_size: size of data in bytes
@@ -24,60 +111,117 @@
  * @rdata_size: value-result (amount of space given/amount of space used)
  *
  * Only one process at a time can communicate with the server.
- * Possible error return values:
- *   +XXX:       Server-side error
- *   -XXX:       Client-side error
- *   -ENOSPC:    Not enough space in rdata
- *   -ENOMEM:    Unable to allocate memory to complete request
- *   -ESRCH:     Unable to contact server
- *   EIO:        Server unable to commit request
+ * rdata_size is undefined on failure.
  *
- * Returns: 0 on success, otherwise failure
+ * Returns: 0 on success, -EXXX on failure
  */
 int dm_clog_consult_server(const char *uuid, int request_type,
 			   char *data, int data_size,
 			   char *rdata, int *rdata_size)
 {
 	int r = 0;
-	struct clog_tfr *tfr = (struct clog_tfr *)nominal_request;
+	int dummy = 0;
+	int overhead_size = sizeof(struct clog_tfr *) + sizeof(struct cn_msg);
+	struct clog_tfr *tfr = prealloced_clog_tfr;
+	struct recieving_pkg pkg;
 
-	mutex_lock(&consult_server_lock);
-	if (data_size > (DM_CLOG_NOMINAL_REQUEST_SIZE - sizeof(*tfr)))
+	if (data_size > (DM_CLOG_PREALLOCED_SIZE - overhead_size)) {
+		DMINFO("Size of tfr exceeds preallocated size");
 		/* FIXME: is kmalloc sufficient if we need this much space? */
-		tfr = kmalloc(data_size + sizeof(*tfr), GFP_KERNEL);
+		tfr = kmalloc(data_size + sizeof(*tfr), GFP_NOIO);
+	}
 
 	if (!tfr)
 		return -ENOMEM;
 
-	memcpy(tfr->uuid, uuid, MAX_NAME_LEN);
+	if (!rdata_size)
+		rdata_size = &dummy;
+resend:
+	/*
+	 * We serialize the sending of requests so we can
+	 * use the preallocated space.
+	 */
+	mutex_lock(&_lock);
+
+	memset(tfr, 0, data_size + sizeof(*tfr));
+	memcpy(tfr->uuid, uuid, DM_UUID_LEN);
+	tfr->seq = seq++;
 	tfr->request_type = request_type;
 	tfr->data_size = data_size;
+	if (data && data_size)
+		memcpy(tfr->data, data, data_size);
 
-	/*
-	 * FIXME: Send to server
-	 */
+	memset(&pkg, 0, sizeof(pkg));
+	init_completion(&pkg.complete);
+	pkg.seq = tfr->seq;
+	pkg.data_size = rdata_size;
+	pkg.data = rdata;
+	spin_lock(&recieving_list_lock);
+	list_add(&(pkg.list), &recieving_list);
+	spin_unlock(&recieving_list_lock);
+
+	r = dm_clog_sendto_server(tfr);
+
+	mutex_unlock(&_lock);
+
+	if (r) {
+		DMERR("Unable to send cluster log request [%s] to server: %d",
+		      RQ_TYPE(request_type), r);
+		goto out;
+	}
 
-	if (rdata) {
-		/* FIXME: receive from server */
-		if (tfr->error) {
-			r = tfr->error;
-		} else if (tfr->data_size > *rdata_size) {
-			r = -ENOSPC;
-		} else {
-			*rdata_size = tfr->data_size;
-			memcpy(rdata, tft->data, tfr->data_size);
-		}
-		/* FIXME:  If using netlink, we may wish to ack back */
-	} else {
-		/*
-		 * FIXME: If we are using netlink, we may want an
-		 * ack from the server to know that it got the
-		 * request.  (Ack is implicit if we are receiving
-		 * data.)
-		 */
+	r = wait_for_completion_timeout(&(pkg.complete), 5 * HZ);
+	if (!r) {
+		DMWARN("Timed out waiting for cluster log server [%s]",
+		       RQ_TYPE(request_type));
+		DMWARN("Retrying request [%s]", RQ_TYPE(request_type));
+		spin_lock(&recieving_list_lock);
+		list_del_init(&(pkg.list));
+		spin_unlock(&recieving_list_lock);
+
+		goto resend;
 	}
-	r = ENOSYS;
+	r = pkg.error;
+	if (r)
+		DMERR("Server error while processing request [%s]: %d",
+		      RQ_TYPE(request_type), r);
+
+out:
+	spin_lock(&recieving_list_lock);
+	list_del_init(&(pkg.list));
+	spin_unlock(&recieving_list_lock);
+
+	if (tfr != (struct clog_tfr *)prealloced_clog_tfr)
+		kfree(tfr);
 
-	mutex_unlock(&consult_server_lock);
 	return r;
 }
+
+int dm_clog_tfr_init(void)
+{
+	int r;
+	void *prealloced;
+
+	INIT_LIST_HEAD(&recieving_list);
+
+	prealloced = kmalloc(DM_CLOG_PREALLOCED_SIZE, GFP_KERNEL);
+	if (!prealloced)
+		return -ENOMEM;
+
+	prealloced_cn_msg = prealloced;
+	prealloced_clog_tfr = prealloced + sizeof(struct cn_msg);
+
+	r = cn_add_callback(&cn_clog_id, "clulog", cn_clog_callback);
+	if (r) {
+		cn_del_callback(&cn_clog_id);
+		return r;
+	}
+
+	return 0;
+}
+
+void dm_clog_tfr_exit(void)
+{
+	cn_del_callback(&cn_clog_id);
+	kfree(prealloced_cn_msg);
+}
--- cluster/cmirror-kernel/src/dm-clog-tfr.h	2006/06/21 01:41:43	1.1
+++ cluster/cmirror-kernel/src/dm-clog-tfr.h	2007/08/23 19:54:57	1.1.2.1
@@ -5,6 +5,11 @@
  */
 
 #ifndef __DM_CLOG_TFR_H__
+#define __DM_CLOG_TFR_H__
+
+#include <linux/dm-ioctl.h> /* For DM_UUID_LEN */
+
+#define DM_CLOG_TFR_SIZE 1024
 
 #define DM_CLOG_CTR                    1
 #define DM_CLOG_DTR                    2
@@ -13,28 +18,56 @@
 #define DM_CLOG_RESUME                 5
 #define DM_CLOG_GET_REGION_SIZE        6
 #define DM_CLOG_IS_CLEAN               7
-#define DM_CLOG_IS_REMOTE_RECOVERING   8
-#define DM_CLOG_IN_SYNC                9
-#define DM_CLOG_FLUSH                 10
-#define DM_CLOG_MARK_REGION           11
-#define DM_CLOG_CLEAR_REGION          12
-#define DM_CLOG_GET_RESYNC_WORK       13
-#define DM_CLOG_SET_REGION_SYNC       14
-#define DM_CLOG_GET_SYNC_COUNT        15
-#define DM_CLOG_STATUS                16
-#define DM_CLOG_GET_FAILURE_RESPONSE  17
+#define DM_CLOG_IN_SYNC                8
+#define DM_CLOG_FLUSH                  9
+#define DM_CLOG_MARK_REGION           10
+#define DM_CLOG_CLEAR_REGION          11
+#define DM_CLOG_GET_RESYNC_WORK       12
+#define DM_CLOG_SET_REGION_SYNC       13
+#define DM_CLOG_GET_SYNC_COUNT        14
+#define DM_CLOG_STATUS_INFO           15
+#define DM_CLOG_STATUS_TABLE          16
+
+#define RQ_TYPE(x) \
+	((x) == DM_CLOG_CTR) ? "DM_CLOG_CTR" : \
+	((x) == DM_CLOG_DTR) ? "DM_CLOG_DTR" : \
+	((x) == DM_CLOG_PRESUSPEND) ? "DM_CLOG_PRESUSPEND" : \
+	((x) == DM_CLOG_POSTSUSPEND) ? "DM_CLOG_POSTSUSPEND" : \
+	((x) == DM_CLOG_RESUME) ? "DM_CLOG_RESUME" : \
+	((x) == DM_CLOG_GET_REGION_SIZE) ? "DM_CLOG_GET_REGION_SIZE" : \
+	((x) == DM_CLOG_IS_CLEAN) ? "DM_CLOG_IS_CLEAN" : \
+	((x) == DM_CLOG_IN_SYNC) ? "DM_CLOG_IN_SYNC" : \
+	((x) == DM_CLOG_FLUSH) ? "DM_CLOG_FLUSH" : \
+	((x) == DM_CLOG_MARK_REGION) ? "DM_CLOG_MARK_REGION" : \
+	((x) == DM_CLOG_CLEAR_REGION) ? "DM_CLOG_CLEAR_REGION" : \
+	((x) == DM_CLOG_GET_RESYNC_WORK) ? "DM_CLOG_GET_RESYNC_WORK" : \
+	((x) == DM_CLOG_SET_REGION_SYNC) ? "DM_CLOG_SET_REGION_SYNC" : \
+	((x) == DM_CLOG_GET_SYNC_COUNT) ? "DM_CLOG_GET_SYNC_COUNT" : \
+	((x) == DM_CLOG_STATUS_INFO) ? "DM_CLOG_STATUS_INFO" : \
+	((x) == DM_CLOG_STATUS_TABLE) ? "DM_CLOG_STATUS_TABLE" : \
+	NULL
 
 struct clog_tfr {
-	char uuid[MAX_NAME_LEN];
-	int error;               /* Used by server to inform of errors */
-	int request_type;
-	int data_size;
+	uint64_t private[2];
+	char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */
+
+	int error;              /* Used by server to inform of errors */
+	uint32_t originator;    /* Cluster ID of this machine */
+
+	uint64_t seq;           /* Sequence number for request */
+	uint32_t request_type;  /* DM_CLOG_* */
+	uint32_t data_size;     /* How much data (not including this struct) */
 	char data[0];
 };
 
+#ifdef __KERNEL__
+#define DM_MSG_PREFIX "clulog"
 
+int dm_clog_tfr_init(void);
+void dm_clog_tfr_exit(void);
 int dm_clog_consult_server(const char *uuid, int request_type,
 			   char *data, int data_size,
 			   char *rdata, int *rdata_size);
+#endif
 
 #endif /* __DM_CLOG_TFR_H__ */
--- cluster/cmirror-kernel/src/dm-clog.c	2006/06/26 20:10:51	1.2
+++ cluster/cmirror-kernel/src/dm-clog.c	2007/08/23 19:54:57	1.2.2.1
@@ -4,7 +4,9 @@
  * This file is released under the LGPL.
  */
 
-#include "dm-clog-tfr.h"
+#include "dm.h"
+#include "dm-log.h"
+#include <linux/dm-clog-tfr.h>
 
 struct flush_entry {
 	int type;
@@ -16,8 +18,7 @@
 	struct dm_target *ti;
 	uint32_t region_size;
 	region_t region_count;
-	int failure_response;
-	char uuid[MAX_NAME_LEN];
+	char uuid[DM_UUID_LEN];
 
 	spinlock_t flush_lock;
 	struct list_head flush_list;  /* only for clear and mark requests */
@@ -25,7 +26,7 @@
 
 static mempool_t *flush_entry_pool = NULL;
 
-static void *flush_entry_alloc(int gfp_mask, void *pool_data)
+static void *flush_entry_alloc(gfp_t gfp_mask, void *pool_data)
 {
 	return kmalloc(sizeof(struct flush_entry), gfp_mask);
 }
@@ -40,24 +41,17 @@
 {
 	int i;
 	int r = 0;
-	int failure_response = FR_NONBLOCK;
+	int str_size;
+	char *ctr_str = NULL;
 	struct log_c *lc = NULL;
 	uint32_t region_size;
 	region_t region_count;
 
 	/* Already checked argument count */
-
-	/* Check for block_on_error.  It must be present. */
-	for (i = 1; i < argc; i++) {
-		if (!strcmp(argv[i], "block_on_error"))
-			failure_response = FR_BLOCK;
-	}
-	if (failure_response != FR_BLOCK) {
-		DMWARN("Required \"block_on_error\" argument not supplied.");
+	if (disk_log != 0 && disk_log != 1)
 		return -EINVAL;
-	}
 
-	if (sscanf(argv[0], SECTOR_FORMAT, &region_size) != 1) {
+	if (sscanf(argv[disk_log], "%u", &region_size) != 1) {
 		DMWARN("Invalid region size string");
 		return -EINVAL;
 	}
@@ -69,17 +63,47 @@
 		DMWARN("Unable to allocate cluster log context.");
 		return -ENOMEM;
 	}
+
 	lc->ti = ti;
 	lc->region_size = region_size;
 	lc->region_count = region_count;
 
-	/* FIXME: Send table string to server */
+	/* FIXME: Need to check size of uuid arg */
+	memcpy(lc->uuid, argv[1 + disk_log], DM_UUID_LEN);
+	spin_lock_init(&lc->flush_lock);
+	INIT_LIST_HEAD(&lc->flush_list);
+
+	for (i = 0, str_size = 0; i < argc; i++)
+		str_size += strlen(argv[i]) + 1; /* +1 for space between args */
+
+	str_size += 20; /* Max number of chars in a printed u64 number */
+
+	ctr_str = kmalloc(str_size, GFP_KERNEL);
+	if (!ctr_str) {
+		DMWARN("Unable to allocate memory for constructor string");
+		kfree(lc);
+		return -ENOMEM;
+	}
 
-fail:
-	if (lc)
+	for (i = 0, str_size = 0; i < argc; i++)
+		str_size += sprintf(ctr_str + str_size, "%s ", argv[i]);
+	str_size += sprintf(ctr_str + str_size, "%llu", ti->len);
+
+	/* Send table string */
+	r = dm_clog_consult_server(lc->uuid, DM_CLOG_CTR,
+				   ctr_str, str_size, NULL, NULL);
+
+	if (r == -ESRCH)
+		DMERR(" Userspace cluster log server not found");
+
+	log->context = lc;
+
+	if (r && lc)
 		kfree(lc);
-	
-	return -ENOSYS;
+	if (ctr_str)
+		kfree(ctr_str);
+
+	return r;
 }
 
 /*
@@ -90,24 +114,26 @@
  * @argv
  *
  * argv contains:
- *   <region_size> <uuid> [[no]sync] "block_on_error"
+ *   <region_size> <uuid> [[no]sync] <block_on_error>
  *
  * Returns: 0 on success, -XXX on failure
  */
 static int cluster_core_ctr(struct dirty_log *log, struct dm_target *ti,
 			    unsigned int argc, char **argv)
 {
-	int i;
+	int i, r;
 	if ((argc < 3) || (argc > 4)) {
 		DMERR("Too %s arguments to clustered_core mirror log type.",
-		      (argc < 3) ? "few" : "many");
+		      (argc < 2) ? "few" : "many");
 		DMERR("  %d arguments supplied:", argc);
 		for (i = 0; i < argc; i++)
 			DMERR("    %s", argv[i]);
 		return -EINVAL;
 	}
 
-	return cluster_ctr(log, ti, argc, argv, 0);
+	r = cluster_ctr(log, ti, argc, argv, 0);
+
+	return r;
 }
 
 
@@ -119,7 +145,7 @@
  * @argv
  *
  * argv contains:
- *   <disk> <region_size> <uuid> [[no]sync] "block_on_error"
+ *   <disk> <region_size> <uuid> [[no]sync] <block_on_error>
  *
  * Returns: 0 on success, -XXX on failure
  */
@@ -129,7 +155,7 @@
 	int i;
 	if ((argc < 4) || (argc > 5)) {
 		DMERR("Too %s arguments to clustered_disk mirror log type.",
-		      (argc < 4) ? "few" : "many");
+		      (argc < 3) ? "few" : "many");
 		DMERR("  %d arguments supplied:", argc);
 		for (i = 0; i < argc; i++)
 			DMERR("    %s", argv[i]);
@@ -153,6 +179,7 @@
 				   NULL, NULL);
 
 	/* FIXME: What do we do on failure? */
+
 	kfree(lc);
 
 	return;
@@ -171,7 +198,7 @@
 				   NULL, 0,
 				   NULL, NULL);
 
-	return (r > 0) ? -r : r;
+	return r;
 }
 
 /*
@@ -187,7 +214,7 @@
 				   NULL, 0,
 				   NULL, NULL);
 
-	return (r > 0) ? -r : r;
+	return r;
 }
 
 /*
@@ -203,7 +230,7 @@
 				   NULL, 0,
 				   NULL, NULL);
 
-	return (r > 0) ? -r : r;
+	return r;
 }
 
 /*
@@ -247,32 +274,6 @@
 }
 
 /*
- * cluster_is_remote_recovering
- * @log
- * @region
- *
- * Check whether a region is being resync'ed on a remote node.
- * If there is any sort of failure when consulting the server,
- * we assume that the region is being remotely recovered.
- *
- * Returns: 1 if remote recovering, 0 otherwise
- */
-static int cluster_is_remote_recovering(struct dirty_log *log, region_t region)
-{
-	int r;
-	int is_recovering;
-	int rdata_size;
-	struct log_c *lc = (struct log_c *)log->context;
-
-	rdata_size = sizeof(is_recovering);
-	r = dm_clog_consult_server(lc->uuid, DM_CLOG_IS_REMOTE_RECOVERING,
-				   (char *)&region, sizeof(region),
-				   (char *)&is_recovering, &rdata_size);
-
-	return (r) ? 1 : is_recovering;
-}
-
-/*
  * cluster_in_sync
  * @log
  * @region
@@ -298,7 +299,6 @@
 	r = dm_clog_consult_server(lc->uuid, DM_CLOG_IN_SYNC,
 				   (char *)&region, sizeof(region),
 				   (char *)&in_sync, &rdata_size);
-
 	return (r) ? 0 : in_sync;
 }
 
@@ -324,15 +324,17 @@
 {
 	int r = 0;
 	int flags;
-	region_t region;
 	struct log_c *lc = (struct log_c *)log->context;
-	struct list_head flush_list;
+	LIST_HEAD(flush_list);
 	struct flush_entry *fe, *tmp_fe;
 
 	spin_lock_irqsave(&lc->flush_lock, flags);
-	flush_list = lc->flush_list;
+	list_splice_init(&lc->flush_list, &flush_list);
 	spin_unlock_irqrestore(&lc->flush_lock, flags);
 
+	if (list_empty(&flush_list))
+		return 0;
+
 	/*
 	 * FIXME: Count up requests, group request types,
 	 * allocate memory to stick all requests in and
@@ -345,25 +347,32 @@
 					   (char *)&fe->region,
 					   sizeof(fe->region),
 					   NULL, NULL);
-		if (r) {
-			r = (r > 0) ? -r : r;
+		if (r)
 			goto fail;
-		}
 	}
 
-	r = dm_clog_consult_server(lc->uuid, DM_CLOG_FLUSH,
-				   NULL, 0, NULL, NULL);
-	if (r)
-		r = (r > 0) ? -r : r;
+	do {
+		r = dm_clog_consult_server(lc->uuid, DM_CLOG_FLUSH,
+					   NULL, 0, NULL, NULL);
+		if (r != -EAGAIN)
+			break;
+
+		DMINFO("Flush conflicts with recovery [delaying]");
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(HZ/4);
+	} while (1);
 
 fail:
+	/*
+	 * We can safely remove these entries, even if failure.
+	 * Calling code will recieve an error and will know that
+	 * the log facility has failed.
+	 */
 	list_for_each_entry_safe(fe, tmp_fe, &flush_list, list) {
 		list_del(&fe->list);
 		mempool_free(fe, flush_entry_pool);
 	}
 
-	r = -EIO;
-
 	return r;
 }
 
@@ -382,7 +391,7 @@
 	struct flush_entry *fe;
 
 	/* Wait for an allocation, but _never_ fail */
-	fe = mempool_alloc(flush_enrty_pool, GFP_KERNEL);
+	fe = mempool_alloc(flush_entry_pool, GFP_NOIO);
 	BUG_ON(!fe);
 
 	spin_lock_irqsave(&lc->flush_lock, flags);
@@ -390,7 +399,7 @@
 	fe->region = region;
 	list_add(&fe->list, &lc->flush_list);
 	spin_unlock_irqrestore(&lc->flush_lock, flags);
-		
+
 	return;
 }
 
@@ -412,17 +421,24 @@
 	struct log_c *lc = (struct log_c *)log->context;
 	struct flush_entry *fe;
 
-	fe = mempool_alloc(flush_enrty_pool, GFP_ATOMIC);
+	/*
+	 * If we fail to allocate, we skip the clearing of
+	 * the region.  This doesn't hurt us in any way, except
+	 * to cause the region to be resync'ed when the
+	 * device is activated next time.
+	 */
+	fe = mempool_alloc(flush_entry_pool, GFP_ATOMIC);
 	if (!fe) {
 		DMERR("Failed to allocate memory to clear region.");
 		return;
 	}
+
 	spin_lock_irqsave(&lc->flush_lock, flags);
 	fe->type = DM_CLOG_CLEAR_REGION;
 	fe->region = region;
 	list_add(&fe->list, &lc->flush_list);
 	spin_unlock_irqrestore(&lc->flush_lock, flags);
-	
+
 	return;
 }
 
@@ -446,12 +462,9 @@
 	rdata_size = sizeof(pkg);
 	r = dm_clog_consult_server(lc->uuid, DM_CLOG_GET_RESYNC_WORK,
 				   NULL, 0,
-				   &pkg, &rdata_size);
-
-	r = (r > 0) ? -r : r;
+				   (char *)&pkg, &rdata_size);
 
 	*region = pkg.r;
-
 	return (r) ? r : pkg.i;
 }
 
@@ -475,7 +488,7 @@
 	pkg.i = in_sync;
 
 	r = dm_clog_consult_server(lc->uuid, DM_CLOG_SET_REGION_SYNC,
-				   &pkg, sizeof(pkg),
+				   (char *)&pkg, sizeof(pkg),
 				   NULL, NULL);
 
 	/* FIXME: It would be nice to be able to report failures */
@@ -518,18 +531,22 @@
 static int cluster_status(struct dirty_log *log, status_type_t status_type,
 			  char *result, unsigned int maxlen)
 {
-	int r;
+	int r = 0;
 	unsigned int sz = maxlen;
 	struct log_c *lc = (struct log_c *)log->context;
 
-	switch(status) {
+	switch(status_type) {
 	case STATUSTYPE_INFO:
 		r = dm_clog_consult_server(lc->uuid, DM_CLOG_STATUS_INFO,
 					   NULL, 0,
 					   result, &sz);
+		/*
+		 * FIXME: If we fail to contact server, we should still
+		 * populate this with parsible results
+		 */
 		break;
 	case STATUSTYPE_TABLE:
-		r = dm_clog_consult_server(lc->uuid, DM_CLOG_STATUS_INFO,
+		r = dm_clog_consult_server(lc->uuid, DM_CLOG_STATUS_TABLE,
 					   NULL, 0,
 					   result, &sz);
 		break;
@@ -537,11 +554,10 @@
 	return (r) ? 0: sz;
 }
 
-status int cluster_get_failure_response(struct dirty_log *log)
+static int cluster_get_failure_response(struct dirty_log *log)
 {
-	struct log_c *lc = (struct log_c *)log->context;
-
-	return lc->failure_response;
+	/* This type of return is not an option */
+	return DMLOG_IOERR_BLOCK;
 }
 
 static struct dirty_log_type _clustered_core_type = {
@@ -554,7 +570,6 @@
 	.resume = cluster_resume,
 	.get_region_size = cluster_get_region_size,
 	.is_clean = cluster_is_clean,
-	.is_remote_recovering = cluster_is_remote_recovering,
 	.in_sync = cluster_in_sync,
 	.flush = cluster_flush,
 	.mark_region = cluster_mark_region,
@@ -576,7 +591,6 @@
 	.resume = cluster_resume,
 	.get_region_size = cluster_get_region_size,
 	.is_clean = cluster_is_clean,
-	.is_remote_recovering = cluster_is_remote_recovering,
 	.in_sync = cluster_in_sync,
 	.flush = cluster_flush,
 	.mark_region = cluster_mark_region,
@@ -596,13 +610,22 @@
 					  flush_entry_free, NULL);
 
 	if (!flush_entry_pool) {
-		DMERR("Unable to create flush_entry_pool:  No memory.");
+		DMWARN("Unable to create flush_entry_pool:  No memory.");
 		return -ENOMEM;
 	}
 
+	r = dm_clog_tfr_init();
+	if (r) {
+		DMWARN("Unable to initialize cluster log communications");
+		mempool_destroy(flush_entry_pool);
+		return r;
+	}
+
 	r = dm_register_dirty_log_type(&_clustered_core_type);
 	if (r) {
 		DMWARN("Couldn't register clustered_core dirty log type");
+		dm_clog_tfr_exit();
+		mempool_destroy(flush_entry_pool);
 		return r;
 	}
 
@@ -610,15 +633,28 @@
 	if (r) {
 		DMWARN("Couldn't register clustered_disk dirty log type");
 		dm_unregister_dirty_log_type(&_clustered_core_type);
+		dm_clog_tfr_exit();
+		mempool_destroy(flush_entry_pool);
 		return r;
 	}
 
-	return r;
+	DMINFO("dm-clulog (built %s %s) installed", __DATE__, __TIME__);
+	return 0;
 }
 
 static void __exit cluster_dirty_log_exit(void)
 {
 	dm_unregister_dirty_log_type(&_clustered_disk_type);
 	dm_unregister_dirty_log_type(&_clustered_core_type);
+	dm_clog_tfr_exit();
+	mempool_destroy(flush_entry_pool);
+	DMINFO("dm-clulog (built %s %s) removed", __DATE__, __TIME__);
 	return;
 }
+
+module_init(cluster_dirty_log_init);
+module_exit(cluster_dirty_log_exit);
+
+MODULE_DESCRIPTION(DM_NAME " mirror cluster-aware log");
+MODULE_AUTHOR("Jonathan Brassow");
+MODULE_LICENSE("GPL");


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]