[Cluster-devel] [GFS2 PATCH] Don't output rgrps at every bit change during unlink

Bob Peterson rpeterso at redhat.com
Wed Mar 16 13:54:10 UTC 2011


Hi,

This is another performance enhancement to speed up clustered unlinks.
Functions __gfs2_free_data and __gfs2_free_meta were outputting the
rgrp and attaching it to the journal transaction every call.  This
patch adds a new rlist function gfs2_rlist_out to similarly output all
the rgrps in an rlist, and this function is called by do_strip after
all the bits have been changed.  This is all done under the same
transaction so there should be no risk of corruption.  Similar functions
gfs2_free_data and gfs2_free_meta now fetch the rgrp from the helper
functions and output the rgrp.

Regards,

Bob Peterson
Red Hat File Systems

Signed-off-by: Bob Peterson <rpeterso at redhat.com> 
--
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index ef3dc4b..5076896 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -876,6 +876,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
 		btotal += blen;
 	}
 
+	gfs2_rlist_out(&rlist);
 	gfs2_statfs_change(sdp, 0, +btotal, 0);
 	gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
 			  ip->i_inode.i_gid);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index cf930cd..3ab858f 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1602,21 +1602,17 @@ rgrp_error:
  *
  */
 
-void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
+struct gfs2_rgrpd *__gfs2_free_data(struct gfs2_inode *ip, u64 bstart,
+				    u32 blen)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct gfs2_rgrpd *rgd;
 
 	rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
-	if (!rgd)
-		return;
 	trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE);
-	rgd->rd_free += blen;
-
-	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
-
-	gfs2_trans_add_rg(rgd);
+	if (rgd)
+		rgd->rd_free += blen;
+	return rgd;
 }
 
 /**
@@ -1630,8 +1626,14 @@ void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
 void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+	struct gfs2_rgrpd *rgd;
 
-	__gfs2_free_data(ip, bstart, blen);
+	rgd = __gfs2_free_data(ip, bstart, blen);
+	if (rgd) {
+		gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
+		gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+		gfs2_trans_add_rg(rgd);
+	}
 	gfs2_statfs_change(sdp, 0, +blen, 0);
 	gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
 }
@@ -1644,22 +1646,19 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
  *
  */
 
-void __gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
+struct gfs2_rgrpd *__gfs2_free_meta(struct gfs2_inode *ip, u64 bstart,
+				    u32 blen)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct gfs2_rgrpd *rgd;
 
 	rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
-	if (!rgd)
-		return;
 	trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE);
-	rgd->rd_free += blen;
+	if (rgd)
+		rgd->rd_free += blen;
 
-	gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
-	gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
-
-	gfs2_trans_add_rg(rgd);
 	gfs2_meta_wipe(ip, bstart, blen);
+	return rgd;
 }
 
 /**
@@ -1673,8 +1672,15 @@ void __gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+	struct gfs2_rgrpd *rgd;
 
-	__gfs2_free_meta(ip, bstart, blen);
+	rgd = __gfs2_free_meta(ip, bstart, blen);
+	if (rgd) {
+		gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
+		gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+
+		gfs2_trans_add_rg(rgd);
+	}
 	gfs2_statfs_change(sdp, 0, +blen, 0);
 	gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
 }
@@ -1850,7 +1856,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
 
 /**
  * gfs2_rlist_free - free a resource group list
- * @list: the list of resource groups
+ * @rlist: the list of resource groups
  *
  */
 
@@ -1867,3 +1873,21 @@ void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
 	}
 }
 
+/**
+ * gfs2_rlist_out - output a resource group list and add to transaction
+ * @rlist: the list of resource groups
+ *
+ */
+
+void gfs2_rlist_out(struct gfs2_rgrp_list *rlist)
+{
+	unsigned int x;
+	struct gfs2_rgrpd *rgd;
+
+	for (x = 0; x < rlist->rl_rgrps; x++) {
+		rgd = rlist->rl_ghs[x].gh_gl->gl_object;
+		gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
+		gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+		gfs2_trans_add_rg(rgd);
+	}
+}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index a80e303..a3819da 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -52,9 +52,11 @@ extern int gfs2_ri_update(struct gfs2_inode *ip);
 extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n);
 extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation);
 
-extern void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen);
+extern struct gfs2_rgrpd *__gfs2_free_data(struct gfs2_inode *ip, u64 bstart,
+					   u32 blen);
 extern void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen);
-extern void __gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
+extern struct gfs2_rgrpd *__gfs2_free_meta(struct gfs2_inode *ip, u64 bstart,
+					   u32 blen);
 extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
 extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
 extern void gfs2_unlink_di(struct inode *inode);
@@ -72,6 +74,7 @@ extern void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
 			   u64 block);
 extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state);
 extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
+extern void gfs2_rlist_out(struct gfs2_rgrp_list *rlist);
 extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
 extern int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl);
 




More information about the Cluster-devel mailing list