[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[Cluster-devel] [GFS2] Faster gfs2_bitfit algorithm



Hi,

This version of the gfs2_bitfit algorithm is up to four
times faster than its predecessor.

Regards,

Bob Peterson

Signed-off-by: Bob Peterson <rpeterso redhat com>
--
 fs/gfs2/rgrp.c |   79 +++++++++++++++++++++++++++++++++----------------------
 1 files changed, 47 insertions(+), 32 deletions(-)

diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 4291375..4dee88b 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -33,6 +33,16 @@
 #define BFITNOENT ((u32)~0)
 #define NO_BLOCK ((u64)~0)
 
+#if BITS_PER_LONG == 32
+#define LBITMASK   (unsigned long)(0x55555555)
+#define LBITSKIP55 (unsigned long)(0x55555555)
+#define LBITSKIP00 (unsigned long)(0x00000000)
+#else
+#define LBITMASK   (unsigned long)(0x5555555555555555)
+#define LBITSKIP55 (unsigned long)(0x5555555555555555)
+#define LBITSKIP00 (unsigned long)(0x0000000000000000)
+#endif
+
 /*
  * These routines are used by the resource group routines (rgrp.c)
  * to keep track of block allocation.  Each block is represented by two
@@ -138,43 +148,48 @@ static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
 static u32 gfs2_bitfit(const u8 *buffer, unsigned int buflen, u32 goal,
 		       u8 old_state)
 {
-	const u8 *byte;
-	u32 blk = goal;
-	unsigned int bit, bitlong;
-	const unsigned long *plong;
-#if BITS_PER_LONG == 32
-	const unsigned long plong55 = 0x55555555;
-#else
-	const unsigned long plong55 = 0x5555555555555555;
-#endif
-
-	byte = buffer + (goal / GFS2_NBBY);
-	plong = (const unsigned long *)(buffer + (goal / GFS2_NBBY));
-	bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
-	bitlong = bit;
-
-	while (byte < buffer + buflen) {
-
-		if (bitlong == 0 && old_state == 0 && *plong == plong55) {
-			plong++;
-			byte += sizeof(unsigned long);
-			blk += sizeof(unsigned long) * GFS2_NBBY;
-			continue;
+	const u8 *byte, *start, *end;
+	int bit, startbit;
+	u32 g1, g2, misaligned;
+	unsigned long *plong, lskipval;
+	unsigned long lskipvals[4] = {LBITSKIP55, LBITSKIP00,
+				      LBITSKIP55, LBITSKIP00};
+
+	lskipval = lskipvals[old_state];
+	g1 = (goal / GFS2_NBBY);
+	start = buffer + g1;
+	byte = start;
+        end = buffer + buflen;
+	g2 = ALIGN(g1, sizeof(unsigned long));
+	plong = (unsigned long *)(buffer + g2);
+	startbit = bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
+	misaligned = g2 - g1;
+	while (byte < end) {
+
+		if (bit == 0 && !misaligned) {
+			if (((*plong) & LBITMASK) == lskipval) {
+				plong++;
+				byte += sizeof(unsigned long);
+				continue;
+			}
+		}
+		if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) {
+			return goal +
+				(((byte - start) * GFS2_NBBY) +
+				 ((bit - startbit) >> 1));
 		}
-		if (((*byte >> bit) & GFS2_BIT_MASK) == old_state)
-			return blk;
 		bit += GFS2_BIT_SIZE;
-		if (bit >= 8) {
+		if (bit >= GFS2_NBBY * GFS2_BIT_SIZE) {
 			bit = 0;
 			byte++;
+			/* If we were misaligned, adjust the counter */
+			if (misaligned)
+				misaligned--;
+			else { /* If we were aligned, we're not anymore. */
+				misaligned += sizeof(unsigned long) - 1;
+				plong++;
+			}
 		}
-		bitlong += GFS2_BIT_SIZE;
-		if (bitlong >= sizeof(unsigned long) * 8) {
-			bitlong = 0;
-			plong++;
-		}
-
-		blk++;
 	}
 
 	return BFITNOENT;



[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]