[Cluster-devel] cluster/rgmanager/src/resources fs.sh

lhh at sourceware.org lhh at sourceware.org
Wed Jul 26 15:31:22 UTC 2006


CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL4U4
Changes by:	lhh at sourceware.org	2006-07-26 15:31:20

Modified files:
	rgmanager/src/resources: fs.sh 

Log message:
	Change ordering in fs.sh to avoid possibility of a lock being reclaimed on the fs we're trying to unmount

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/rgmanager/src/resources/fs.sh.diff?cvsroot=cluster&only_with_tag=RHEL4U4&r1=1.4.2.14&r2=1.4.2.14.2.1

--- cluster/rgmanager/src/resources/fs.sh	2006/06/16 19:57:52	1.4.2.14
+++ cluster/rgmanager/src/resources/fs.sh	2006/07/26 15:31:20	1.4.2.14.2.1
@@ -1061,7 +1061,8 @@
 	typeset -i ret_val=0
 	typeset -i try=1
 	typeset -i max_tries=3		# how many times to try umount
-	typeset -i sleep_time=2		# time between each umount failure
+	typeset -i sleep_time=5		# time between each umount failure
+	typeset -i nfslock_reclaim=0
 	typeset done=""
 	typeset umount_failed=""
 	typeset force_umount=""
@@ -1157,11 +1158,8 @@
 	          		    mkdir -p $mp/.clumanager/statd
 				    # Copy out the notify list; our 
 				    # IPs are already torn down
-				    if notify_list_store $mp/.clumanager/statd
-				    then
-				      notify_list_broadcast \
-				        $mp/.clumanager/statd
-				    fi
+				    notify_list_store $mp/.clumanager/statd
+				    nfslock_reclaim=1
 				  fi
 				fi
 			fi
@@ -1186,6 +1184,11 @@
 		fi
 	done # while 
 
+	if [ $nfslock_reclaim -eq 1 ]; then
+		# If we have this flag set, do a full reclaim broadcast
+		notify_list_broadcast $mp/.clumanager/statd
+	fi
+
 	if [ -n "$umount_failed" ]; then
 		ocf_log err "'umount $mp' failed, error=$ret_val"
 




More information about the Cluster-devel mailing list