[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[Cluster-devel] [GFS2] Introduce two log thresholds



>From 58e3f72430511ae57378bf96b062ba99ef6575ad Mon Sep 17 00:00:00 2001
From: Steven Whitehouse <swhiteho redhat com>
Date: Fri, 9 Nov 2007 11:16:23 +0000
Subject: [PATCH] [GFS2] Introduce two log thresholds

This patch intriduces two new log thresholds:

 o thresh1 is the point at which we wake up gfs2_logd due to the pinned
   block count. It is initialised at mount time to 1/3rd of the size
   of the journal. Currently it does not change during the course of
   the mount, but the intention is to adjust it automatically based
   upon various conditions. This automatic adjustment will be the subject
   of later patches.

 o thresh2 is the point at which we wake up gfs2_logd due to the total
   of pinned blocks and AIL blocks. It is initialised at mount time
   to 2/3rd of the size of the journal. The reason for not making it
   equal to 100% of journal size is to give gfs2_logd time to start up
   and do something before the processes filling the journal before
   they land up stalling, and waiting on journal flushes.

At the same time, the incore_log_blocks tunable is removed since it
was wrong (just a basic fixed threshold set to a number plucked out
of the air) and it was being compared against the wrong thing (the
amount of metadata in the journal) rather than the total number of
blocks.

Also, since the free blocks count is now an atomic variable, a
number of these comparisons now do not need locking, so that
the log lock has been removed around some operations.

Signed-off-by: Steven Whitehouse <swhiteho redhat com>

diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 330f4c7..d2b52d7 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -409,7 +409,6 @@ struct gfs2_tune {
 	spinlock_t gt_spin;
 
 	unsigned int gt_demote_secs; /* Cache retention for unheld glock */
-	unsigned int gt_incore_log_blocks;
 	unsigned int gt_log_flush_secs;
 
 	unsigned int gt_recoverd_secs;
@@ -583,6 +582,7 @@ struct gfs2_sbd {
 	unsigned int sd_log_commited_databuf;
 	unsigned int sd_log_commited_revoke;
 
+	atomic_t sd_log_pinned;
 	unsigned int sd_log_num_buf;
 	unsigned int sd_log_num_revoke;
 	unsigned int sd_log_num_rg;
@@ -594,6 +594,8 @@ struct gfs2_sbd {
 	struct list_head sd_log_le_databuf;
 	struct list_head sd_log_le_ordered;
 
+	atomic_t sd_log_thresh1;
+	atomic_t sd_log_thresh2;
 	atomic_t sd_log_blks_free;
 	struct mutex sd_log_reserve_mutex;
 
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 4dcc7a8..7b2417c 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -331,11 +331,9 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
 {
 
-	gfs2_log_lock(sdp);
 	atomic_add(blks, &sdp->sd_log_blks_free);
-	gfs2_assert_withdraw(sdp,
-			     atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
-	gfs2_log_unlock(sdp);
+	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
+				  sdp->sd_jdesc->jd_blocks);
 	up_read(&sdp->sd_log_flush_lock);
 }
 
@@ -560,10 +558,9 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
 
 	ail2_empty(sdp, new_tail);
 
-	gfs2_log_lock(sdp);
 	atomic_add(dist, &sdp->sd_log_blks_free);
-	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks);
-	gfs2_log_unlock(sdp);
+	gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
+				  sdp->sd_jdesc->jd_blocks);
 
 	sdp->sd_log_tail = new_tail;
 }
@@ -793,6 +790,13 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
  * @sdp: the filesystem
  * @tr: the transaction
  *
+ * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
+ * or the total number of used blocks (pinned blocks plus AIL blocks)
+ * is greater than thresh2.
+ *
+ * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
+ * journal size.
+ *
  * Returns: errno
  */
 
@@ -804,10 +808,10 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
 	sdp->sd_vfs->s_dirt = 1;
 	up_read(&sdp->sd_log_flush_lock);
 
-	gfs2_log_lock(sdp);
-	if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
+	if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
+	    ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) <
+	    atomic_read(&sdp->sd_log_thresh2)))
 		wake_up_process(sdp->sd_logd_process);
-	gfs2_log_unlock(sdp);
 }
 
 /**
@@ -874,7 +878,6 @@ int gfs2_logd(void *data)
 {
 	struct gfs2_sbd *sdp = data;
 	unsigned long t;
-	int need_flush;
 
 	while (!kthread_should_stop()) {
 		/* Advance the log tail */
@@ -883,10 +886,9 @@ int gfs2_logd(void *data)
 		    gfs2_tune_get(sdp, gt_log_flush_secs) * HZ;
 
 		gfs2_ail1_empty(sdp, DIO_ALL);
-		gfs2_log_lock(sdp);
-		need_flush = sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks);
-		gfs2_log_unlock(sdp);
-		if (need_flush || time_after_eq(jiffies, t)) {
+		if ((atomic_read(&sdp->sd_log_pinned) >
+		     atomic_read(&sdp->sd_log_thresh1)) ||
+		    time_after_eq(jiffies, t)) {
 			gfs2_log_flush(sdp, NULL);
 			sdp->sd_log_flush_time = jiffies;
 		}
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index fae59d6..29a8689 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -52,6 +52,7 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
 	if (bd->bd_ail)
 		list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
 	get_bh(bh);
+	atomic_inc(&sdp->sd_log_pinned);
 }
 
 /**
@@ -90,6 +91,7 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
 	gfs2_log_unlock(sdp);
 	unlock_buffer(bh);
+	atomic_dec(&sdp->sd_log_pinned);
 }
 
 
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 1bba6ac..3a29340 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -76,7 +76,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
 	mutex_init(&sdp->sd_quota_mutex);
 
 	spin_lock_init(&sdp->sd_log_lock);
-
+	atomic_set(&sdp->sd_log_pinned, 0);
 	INIT_LIST_HEAD(&sdp->sd_log_le_buf);
 	INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
 	INIT_LIST_HEAD(&sdp->sd_log_le_rg);
@@ -340,6 +340,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
 	if (sdp->sd_args.ar_spectator) {
 		sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
 		atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
+		atomic_set(&sdp->sd_log_thresh1, sdp->sd_jdesc->jd_blocks/3);
+		atomic_set(&sdp->sd_log_thresh2, 2*sdp->sd_jdesc->jd_blocks/3);
 	} else {
 		if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
 			fs_err(sdp, "can't mount journal #%u\n",
@@ -377,6 +379,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
 			goto fail_jinode_gh;
 		}
 		atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
+		atomic_set(&sdp->sd_log_thresh1, sdp->sd_jdesc->jd_blocks/3);
+		atomic_set(&sdp->sd_log_thresh2, 2*sdp->sd_jdesc->jd_blocks/3);
 	}
 
 	if (sdp->sd_lockstruct.ls_first) {
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 2e74792..7a92c21 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -52,7 +52,6 @@ void gfs2_tune_init(struct gfs2_tune *gt)
 	spin_lock_init(&gt->gt_spin);
 
 	gt->gt_demote_secs = 300;
-	gt->gt_incore_log_blocks = 1024;
 	gt->gt_log_flush_secs = 60;
 	gt->gt_recoverd_secs = 60;
 	gt->gt_logd_secs = 1;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 7f828a2..76b7bef 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -426,7 +426,6 @@ static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
 TUNE_ATTR_2(name, name##_store)
 
 TUNE_ATTR(demote_secs, 0);
-TUNE_ATTR(incore_log_blocks, 0);
 TUNE_ATTR(log_flush_secs, 0);
 TUNE_ATTR(quota_warn_period, 0);
 TUNE_ATTR(quota_quantum, 0);
@@ -447,7 +446,6 @@ TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
 
 static struct attribute *tune_attrs[] = {
 	&tune_attr_demote_secs.attr,
-	&tune_attr_incore_log_blocks.attr,
 	&tune_attr_log_flush_secs.attr,
 	&tune_attr_quota_warn_period.attr,
 	&tune_attr_quota_quantum.attr,
-- 
1.5.1.2




[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]