[Cluster-devel] cluster/gfs-kernel/src/gfs glock.c incore.h io ...
wcheng at sourceware.org
wcheng at sourceware.org
Mon Jan 22 07:43:53 UTC 2007
CVSROOT: /cvs/cluster
Module name: cluster
Branch: RHEL4
Changes by: wcheng at sourceware.org 2007-01-22 07:43:52
Modified files:
gfs-kernel/src/gfs: glock.c incore.h ioctl.c super.c
Log message:
Bugzilla 214239 - trimming glock
Add glock_purge into gfs_tool to trim glock(s). The patch walks thru
glock hash table to examine inode glock state. If the lock is in unlock
state, it is purged togather with iopen glock based on requested purge
count. GFS-only solution - no base kernel change.
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/glock.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.20.2.3&r2=1.20.2.4
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/incore.h.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.17.2.7&r2=1.17.2.8
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/ioctl.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.7.2.5&r2=1.7.2.6
http://sourceware.org/cgi-bin/cvsweb.cgi/cluster/gfs-kernel/src/gfs/super.c.diff?cvsroot=cluster&only_with_tag=RHEL4&r1=1.12.2.5&r2=1.12.2.6
--- cluster/gfs-kernel/src/gfs/glock.c 2006/05/17 15:26:03 1.20.2.3
+++ cluster/gfs-kernel/src/gfs/glock.c 2007/01/22 07:43:52 1.20.2.4
@@ -41,7 +41,7 @@
struct work_struct gr_work;
};
-typedef void (*glock_examiner) (struct gfs_glock * gl);
+typedef void (*glock_examiner) (struct gfs_glock * gl, unsigned int *p_cnt);
/**
* relaxed_state_ok - is a requested lock compatible with the current lock mode?
@@ -2483,12 +2483,14 @@
static int
examine_bucket(glock_examiner examiner,
- struct gfs_sbd *sdp, struct gfs_gl_hash_bucket *bucket)
+ struct gfs_sbd *sdp, struct gfs_gl_hash_bucket *bucket,
+ unsigned int purge_nr)
{
struct glock_plug plug;
struct list_head *tmp;
struct gfs_glock *gl;
int entries;
+ unsigned int p_cnt=purge_nr;
/* Add "plug" to end of bucket list, work back up list from there */
memset(&plug.gl_flags, 0, sizeof(unsigned long));
@@ -2529,10 +2531,43 @@
write_unlock(&bucket->hb_lock);
- examiner(gl);
+ examiner(gl, &p_cnt);
}
}
+static void
+try_purge_iopen(struct gfs_glock *gl, unsigned int *p_count)
+{
+ struct gfs_glock *i_gl;
+
+ if (*p_count == 0)
+ return;
+
+ /* find the associated inode glock */
+ i_gl = gl2gl(gl);
+ if (!i_gl)
+ return;
+
+ /*
+ * If the associated inode glock has been in unlocked
+ * state for a while, try to purge it.
+ */
+ if (trylock_on_glock(i_gl)) {
+ if (i_gl->gl_state == LM_ST_UNLOCKED) {
+ *p_count = *p_count - 1;
+ unlock_on_glock(i_gl);
+ gfs_iopen_go_callback(gl, LM_ST_UNLOCKED);
+ handle_callback(gl, LM_ST_UNLOCKED);
+ spin_lock(&gl->gl_spin);
+ run_queue(gl);
+ spin_unlock(&gl->gl_spin);
+ } else
+ unlock_on_glock(i_gl);
+ }
+
+ return;
+}
+
/**
* scan_glock - look at a glock and see if we can reclaim it
* @gl: the glock to look at
@@ -2548,7 +2583,7 @@
*/
static void
-scan_glock(struct gfs_glock *gl)
+scan_glock(struct gfs_glock *gl, unsigned int *p_count)
{
if (trylock_on_glock(gl)) {
if (queue_empty(gl, &gl->gl_holders)) {
@@ -2570,10 +2605,14 @@
goto out;
}
}
-
+ /* iopen always has holder(s) */
+ if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
+ unlock_on_glock(gl);
+ try_purge_iopen(gl, p_count);
+ goto out;
+ }
unlock_on_glock(gl);
}
-
out:
glock_put(gl); /* see examine_bucket() */
}
@@ -2595,10 +2634,17 @@
void
gfs_scand_internal(struct gfs_sbd *sdp)
{
- unsigned int x;
+ unsigned int x, purge_nr;
+
+ if (!sdp->sd_tune.gt_glock_purge)
+ purge_nr = 0;
+ else
+ purge_nr = (atomic_read(&sdp->sd_glock_count) -
+ atomic_read(&sdp->sd_glock_held_count)) *
+ sdp->sd_tune.gt_glock_purge / 100 / GFS_GL_HASH_SIZE;
for (x = 0; x < GFS_GL_HASH_SIZE; x++) {
- examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
+ examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x], purge_nr);
cond_resched();
}
}
@@ -2618,7 +2664,7 @@
*/
static void
-clear_glock(struct gfs_glock *gl)
+clear_glock(struct gfs_glock *gl, unsigned int *unused)
{
struct gfs_sbd *sdp = gl->gl_sbd;
struct gfs_gl_hash_bucket *bucket = gl->gl_bucket;
@@ -2683,7 +2729,8 @@
cont = FALSE;
for (x = 0; x < GFS_GL_HASH_SIZE; x++)
- if (examine_bucket(clear_glock, sdp, &sdp->sd_gl_hash[x]))
+ if (examine_bucket(clear_glock, sdp,
+ &sdp->sd_gl_hash[x], 0))
cont = TRUE;
if (!wait || !cont)
--- cluster/gfs-kernel/src/gfs/incore.h 2006/10/10 18:35:27 1.17.2.7
+++ cluster/gfs-kernel/src/gfs/incore.h 2007/01/22 07:43:52 1.17.2.8
@@ -867,6 +867,7 @@
unsigned int gt_logd_secs; /* Update log tail as AIL flushes */
unsigned int gt_quotad_secs; /* Sync changes to quota file, clean*/
unsigned int gt_inoded_secs; /* Toss unused inodes */
+ unsigned int gt_glock_purge; /* Purge glock */
unsigned int gt_quota_simul_sync; /* Max # quotavals to sync at once */
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
--- cluster/gfs-kernel/src/gfs/ioctl.c 2007/01/10 17:53:53 1.7.2.5
+++ cluster/gfs-kernel/src/gfs/ioctl.c 2007/01/22 07:43:52 1.7.2.6
@@ -458,6 +458,7 @@
gfs_printf("logd_secs %u\n", gt->gt_logd_secs);
gfs_printf("quotad_secs %u\n", gt->gt_quotad_secs);
gfs_printf("inoded_secs %u\n", gt->gt_inoded_secs);
+ gfs_printf("glock_purge %u\n", gt->gt_glock_purge);
gfs_printf("quota_simul_sync %u\n", gt->gt_quota_simul_sync);
gfs_printf("quota_warn_period %u\n", gt->gt_quota_warn_period);
gfs_printf("atime_quantum %u\n", gt->gt_atime_quantum);
@@ -606,6 +607,11 @@
tune_set(gt_inoded_secs, x);
wake_up_process(sdp->sd_inoded_process);
+ } else if (strcmp(param, "glock_purge") == 0) {
+ if (sscanf(value, "%u", &x) != 1)
+ return -EINVAL;
+ tune_set(gt_glock_purge, x);
+
} else if (strcmp(param, "quota_simul_sync") == 0) {
if (sscanf(value, "%u", &x) != 1 || !x)
return -EINVAL;
--- cluster/gfs-kernel/src/gfs/super.c 2006/09/20 04:45:48 1.12.2.5
+++ cluster/gfs-kernel/src/gfs/super.c 2007/01/22 07:43:52 1.12.2.6
@@ -59,6 +59,7 @@
gt->gt_logd_secs = 1;
gt->gt_quotad_secs = 5;
gt->gt_inoded_secs = 15;
+ gt->gt_glock_purge = 0;
gt->gt_quota_simul_sync = 64;
gt->gt_quota_warn_period = 10;
gt->gt_atime_quantum = 3600;
More information about the Cluster-devel
mailing list