[Cluster-devel] cluster/gfs-kernel/src/gfs glock.c incore.h io ...

wcheng at sourceware.org wcheng at sourceware.org
Sun Jun 17 05:16:53 UTC 2007


CVSROOT:	/cvs/cluster
Module name:	cluster
Branch: 	RHEL5
Changes by:	wcheng at sourceware.org	2007-06-17 05:16:52

Modified files:
	gfs-kernel/src/gfs: glock.c incore.h ioctl.c super.c 

Log message:
	bugzilla 239729:
	
	Backport RHEL4 glock trimming patch over.

Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.29.2.1&r2=1.29.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/incore.h.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.30.2.1&r2=1.30.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ioctl.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.13.2.1&r2=1.13.2.2
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/super.c.diff?cvsroot=cluster&only_with_tag=RHEL5&r1=1.22&r2=1.22.2.1

--- cluster/gfs-kernel/src/gfs/glock.c	2006/12/21 20:55:24	1.29.2.1
+++ cluster/gfs-kernel/src/gfs/glock.c	2007/06/17 05:16:52	1.29.2.2
@@ -41,7 +41,7 @@
 	struct work_struct gr_work;
 };
 
-typedef void (*glock_examiner) (struct gfs_glock * gl);
+typedef void (*glock_examiner) (struct gfs_glock * gl, unsigned int *cnt);
 
 /**
  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
@@ -2495,12 +2495,14 @@
 
 static int
 examine_bucket(glock_examiner examiner,
-	       struct gfs_sbd *sdp, struct gfs_gl_hash_bucket *bucket)
+		struct gfs_sbd *sdp, struct gfs_gl_hash_bucket *bucket,
+		unsigned int purge_nr)
 {
 	struct glock_plug plug;
 	struct list_head *tmp;
 	struct gfs_glock *gl;
 	int entries;
+	unsigned int p_cnt=purge_nr;
 
 	/* Add "plug" to end of bucket list, work back up list from there */
 	memset(&plug.gl_flags, 0, sizeof(unsigned long));
@@ -2541,10 +2543,45 @@
 
 		write_unlock(&bucket->hb_lock);
 
-		examiner(gl);
+		examiner(gl, &p_cnt);
 	}
 }
 
+static void
+try_purge_iopen(struct gfs_glock *gl, unsigned int *p_count)
+{
+	struct gfs_glock *i_gl;
+
+	if (*p_count == 0)
+		return;
+
+	/* find the associated inode glock */
+	i_gl = get_gl2gl(gl);
+	if (!i_gl) 
+		return;
+
+	/* 
+	 * If the associated inode glock has been in unlocked 
+	 * state, try to purge it.
+	 */
+	if (trylock_on_glock(i_gl)) {
+		if (i_gl->gl_state == LM_ST_UNLOCKED) {
+			*p_count = *p_count - 1;
+			unlock_on_glock(i_gl);
+			atomic_inc(&gl->gl_count);
+			gfs_iopen_go_callback(gl, LM_ST_UNLOCKED);
+			handle_callback(gl, LM_ST_UNLOCKED);
+			spin_lock(&gl->gl_spin);
+			run_queue(gl);
+			spin_unlock(&gl->gl_spin);
+			glock_put(gl);
+		} else 
+			unlock_on_glock(i_gl);
+	}
+
+	return;
+}
+
 /**
  * scan_glock - look at a glock and see if we can reclaim it
  * @gl: the glock to look at
@@ -2560,7 +2597,7 @@
  */
 
 static void
-scan_glock(struct gfs_glock *gl)
+scan_glock(struct gfs_glock *gl,  unsigned int *p_count)
 {
 	if (trylock_on_glock(gl)) {
 		if (queue_empty(gl, &gl->gl_holders)) {
@@ -2582,7 +2619,12 @@
 				goto out;
 			}
 		}
-
+		/* iopen always has holder(s) */
+		if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
+			unlock_on_glock(gl);
+			try_purge_iopen(gl, p_count);
+			goto out;
+		}
 		unlock_on_glock(gl);
 	}
 
@@ -2607,10 +2649,17 @@
 void
 gfs_scand_internal(struct gfs_sbd *sdp)
 {
-	unsigned int x;
+	unsigned int x, purge_nr;
+
+	if (!sdp->sd_tune.gt_glock_purge)
+		purge_nr = 0;
+	else
+		purge_nr = (atomic_read(&sdp->sd_glock_count) -
+			atomic_read(&sdp->sd_glock_held_count)) *
+			sdp->sd_tune.gt_glock_purge / 100 / GFS_GL_HASH_SIZE;
 
 	for (x = 0; x < GFS_GL_HASH_SIZE; x++) {
-		examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
+		examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], purge_nr);
 		cond_resched();
 	}
 }
@@ -2630,7 +2679,7 @@
  */
 
 static void
-clear_glock(struct gfs_glock *gl)
+clear_glock(struct gfs_glock *gl, unsigned int *unused)
 {
 	struct gfs_sbd *sdp = gl->gl_sbd;
 	struct gfs_gl_hash_bucket *bucket = gl->gl_bucket;
@@ -2695,7 +2744,7 @@
 		cont = FALSE;
 
 		for (x = 0; x < GFS_GL_HASH_SIZE; x++)
-			if (examine_bucket(clear_glock, sdp, &sdp->sd_gl_hash[x]))
+			if (examine_bucket(clear_glock, sdp, &sdp->sd_gl_hash[x], 0))
 				cont = TRUE;
 
 		if (!wait || !cont)
--- cluster/gfs-kernel/src/gfs/incore.h	2006/12/21 20:55:24	1.30.2.1
+++ cluster/gfs-kernel/src/gfs/incore.h	2007/06/17 05:16:52	1.30.2.2
@@ -878,6 +878,7 @@
 	unsigned int gt_logd_secs; /* Update log tail as AIL flushes */
 	unsigned int gt_quotad_secs; /* Sync changes to quota file, clean*/
 	unsigned int gt_inoded_secs; /* Toss unused inodes */
+	unsigned int gt_glock_purge; /* Purge glock */
 
 	unsigned int gt_quota_simul_sync; /* Max # quotavals to sync at once */
 	unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
--- cluster/gfs-kernel/src/gfs/ioctl.c	2006/11/13 19:28:36	1.13.2.1
+++ cluster/gfs-kernel/src/gfs/ioctl.c	2007/06/17 05:16:52	1.13.2.2
@@ -458,6 +458,7 @@
         gfs_printf("logd_secs %u\n", gt->gt_logd_secs);
         gfs_printf("quotad_secs %u\n", gt->gt_quotad_secs);
         gfs_printf("inoded_secs %u\n", gt->gt_inoded_secs);
+	gfs_printf("glock_purge %u\n", gt->gt_glock_purge);
         gfs_printf("quota_simul_sync %u\n", gt->gt_quota_simul_sync);
         gfs_printf("quota_warn_period %u\n", gt->gt_quota_warn_period);
         gfs_printf("atime_quantum %u\n", gt->gt_atime_quantum);
@@ -606,6 +607,11 @@
 		tune_set(gt_inoded_secs, x);
 		wake_up_process(sdp->sd_inoded_process);
 
+	} else if (strcmp(param, "glock_purge") == 0) {
+		if (sscanf(value, "%u", &x) != 1)
+			return -EINVAL;
+		tune_set(gt_glock_purge, x);
+
 	} else if (strcmp(param, "quota_simul_sync") == 0) {
 		if (sscanf(value, "%u", &x) != 1 || !x)
 			return -EINVAL;
--- cluster/gfs-kernel/src/gfs/super.c	2006/10/13 19:57:07	1.22
+++ cluster/gfs-kernel/src/gfs/super.c	2007/06/17 05:16:52	1.22.2.1
@@ -60,6 +60,7 @@
 	gt->gt_logd_secs = 1;
 	gt->gt_quotad_secs = 5;
 	gt->gt_inoded_secs = 15;
+	gt->gt_glock_purge = 0;
 	gt->gt_quota_simul_sync = 64;
 	gt->gt_quota_warn_period = 10;
 	gt->gt_atime_quantum = 3600;




More information about the Cluster-devel mailing list