[Cluster-devel] cluster/group/gfs_controld cpg.c lock_dlm.h pl ...

teigland at sourceware.org teigland at sourceware.org
Mon Nov 20 21:07:20 UTC 2006


CVSROOT:	/cvs/cluster
Module name:	cluster
Changes by:	teigland at sourceware.org	2006-11-20 21:07:19

Modified files:
	group/gfs_controld: cpg.c lock_dlm.h plock.c 

Log message:
	fix a couple of problems if openais enables flow control:
	- the poll loop spins due to plocks being ready to process but being
	ignored due to the flow control; we need to remove the plock fd from
	the poll set when flow control is enabled (just like we do when the
	plock rate limiter is active)
	- we were not updating the flow control state from openais when
	flow control was enabled unless we received a cpg message; we need
	to update it periodically while blocked since we may not receive
	cpg messages from other nodes causing us to update the state

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/cpg.c.diff?cvsroot=cluster&r1=1.9&r2=1.10
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/lock_dlm.h.diff?cvsroot=cluster&r1=1.21&r2=1.22
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/group/gfs_controld/plock.c.diff?cvsroot=cluster&r1=1.27&r2=1.28

--- cluster/group/gfs_controld/cpg.c	2006/10/13 20:00:02	1.9
+++ cluster/group/gfs_controld/cpg.c	2006/11/20 21:07:18	1.10
@@ -131,11 +131,31 @@
 	.cpg_confchg_fn = confchg_cb,
 };
 
-int process_cpg(void)
+void update_flow_control_status(void)
 {
 	cpg_flow_control_state_t flow_control_state;
 	cpg_error_t error;
 	
+	error = cpg_flow_control_state_get(daemon_handle, &flow_control_state);
+	if (error != CPG_OK) {
+		log_error("cpg_flow_control_state_get %d", error);
+		return;
+	}
+
+	if (flow_control_state == CPG_FLOW_CONTROL_ENABLED) {
+		message_flow_control_on = 1;
+		log_debug("flow control on");
+	} else {
+		if (message_flow_control_on)
+			log_debug("flow control off");
+		message_flow_control_on = 0;
+	}
+}
+
+int process_cpg(void)
+{
+	cpg_error_t error;
+
 	got_msg = 0;
 	saved_len = 0;
 	saved_nodeid = 0;
@@ -150,20 +170,7 @@
 	if (got_msg)
 		do_deliver(saved_nodeid, saved_data, saved_len);
 
-	error = cpg_flow_control_state_get(daemon_handle, &flow_control_state);
-	if (error != CPG_OK) {
-		log_error("cpg_flow_control_state_get %d", error);
-		return -1;
-	}
-
-	if (flow_control_state == CPG_FLOW_CONTROL_ENABLED) {
-		message_flow_control_on = 1;
-		log_debug("flow control on");
-	} else {
-		if (message_flow_control_on)
-			log_debug("flow control off");
-		message_flow_control_on = 0;
-	}
+	update_flow_control_status();
 
 	return 0;
 }
--- cluster/group/gfs_controld/lock_dlm.h	2006/10/23 15:44:33	1.21
+++ cluster/group/gfs_controld/lock_dlm.h	2006/11/20 21:07:18	1.22
@@ -280,6 +280,7 @@
 int client_send(int ci, char *buf, int len);
 
 int send_group_message(struct mountgroup *mg, int len, char *buf);
+void update_flow_control_status(void);
 
 void store_plocks(struct mountgroup *mg, int nodeid);
 void retrieve_plocks(struct mountgroup *mg);
--- cluster/group/gfs_controld/plock.c	2006/11/20 18:10:00	1.27
+++ cluster/group/gfs_controld/plock.c	2006/11/20 21:07:18	1.28
@@ -336,8 +336,12 @@
 	int len, rv;
 
 	/* Don't send more messages while the cpg message queue is backed up */
-	if (message_flow_control_on)
-		return 0;
+
+	if (message_flow_control_on) {
+		update_flow_control_status();
+		if (message_flow_control_on)
+			return -EBUSY;
+	}
 
 	/* Every N ops we check how long it's taken to do those N ops.
 	   If it's less than 1000 ms, we don't take any more. */




More information about the Cluster-devel mailing list