[Cluster-devel] [GFS2 Patch] GFS2: speed up delete/unlink performance for large files

Bob Peterson rpeterso at redhat.com
Thu Sep 15 13:59:56 UTC 2011


Hi,

This patch improves the performance of delete/unlink
operations in a GFS2 file system where the files are large
by adding a layer of metadata read-ahead for indirect blocks.
Mileage will vary, but on my system, deleting an 8.6G file
dropped from 22 seconds to about 4.5 seconds.

Regards,

Bob Peterson
Red Hat File Systems

Signed-off-by: Bob Peterson <rpeterso at redhat.com> 
--
 fs/gfs2/bmap.c |   26 +++++++++++++++++++++++---
 1 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 7878c47..7efa38c 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -691,7 +691,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
 {
 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 	struct buffer_head *bh = NULL;
-	__be64 *top, *bottom;
+	__be64 *top, *bottom, *t2;
 	u64 bn;
 	int error;
 	int mh_size = sizeof(struct gfs2_meta_header);
@@ -719,7 +719,27 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
 	if (error)
 		goto out;
 
-	if (height < ip->i_height - 1)
+	if (height < ip->i_height - 1) {
+		struct buffer_head *rabh;
+
+		for (t2 = top; t2 < bottom; t2++, first = 0) {
+			if (!*t2)
+				continue;
+
+			bn = be64_to_cpu(*t2);
+			rabh = gfs2_getbuf(ip->i_gl, bn, CREATE);
+			if (trylock_buffer(rabh)) {
+				if (buffer_uptodate(rabh)) {
+					unlock_buffer(rabh);
+					brelse(rabh);
+					continue;
+				}
+				rabh->b_end_io = end_buffer_read_sync;
+				submit_bh(READA | REQ_META, rabh);
+				continue;
+			}
+			brelse(rabh);
+		}
 		for (; top < bottom; top++, first = 0) {
 			if (!*top)
 				continue;
@@ -731,7 +751,7 @@ static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
 			if (error)
 				break;
 		}
-
+	}
 out:
 	brelse(bh);
 	return error;




More information about the Cluster-devel mailing list