[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]

[Cluster-devel] Cluster Project branch, STABLE2, updated. cluster-2.02.00-36-gbd99cec



This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "Cluster Project".

http://sources.redhat.com/git/gitweb.cgi?p=cluster.git;a=commitdiff;h=bd99cecbf93fb6105b2503d24d0da3b3b00ed38e

The branch, STABLE2 has been updated
       via  bd99cecbf93fb6105b2503d24d0da3b3b00ed38e (commit)
      from  f8e55ed986aa3fb37e943224689e21f451e79692 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit bd99cecbf93fb6105b2503d24d0da3b3b00ed38e
Author: Abhijith Das <adas redhat com>
Date:   Mon Apr 7 10:39:42 2008 -0500

    gfs-kernel: fix for bz 429343 gfs_glock_is_locked_by_me assertion
    
    This assertion shows up when gfs_readpage gets called without the inode glock
    being held through the madvise syscall when the kernel attempts to readahead.
    
    This patch unlocks the page, locks the inode glock and returns
    AOP_TRUNCATED_PAGE.
    
    I had to change gfs_glock_is_locked_by_me() to return the holder if glock is
    held or NULL otherwise (instead of the TRUE/FALSE integer value it used to
    return earlier).
    
    I also added a new GL_READPAGE flag. If we need to get an inode glock in
    gfs_readpage(), this flag is set on the holder. We must not unlock another
    holder that we might have had on the glock before we entered gfs_readpage;
    checking for this flag before unlocking ensures that.

-----------------------------------------------------------------------

Summary of changes:
 gfs-kernel/src/gfs/glock.h       |   15 +++++++--------
 gfs-kernel/src/gfs/ops_address.c |   29 +++++++++++++++++++++++++++--
 2 files changed, 34 insertions(+), 10 deletions(-)

diff --git a/gfs-kernel/src/gfs/glock.h b/gfs-kernel/src/gfs/glock.h
index 37630b0..fb1e210 100644
--- a/gfs-kernel/src/gfs/glock.h
+++ b/gfs-kernel/src/gfs/glock.h
@@ -33,16 +33,16 @@
 #define GL_NOCACHE        (0x00000400) /* Release glock when done, don't cache */
 #define GL_SYNC           (0x00000800) /* Sync to disk when no more holders */
 #define GL_NOCANCEL       (0x00001000) /* Don't ever cancel this request */
+#define GL_READPAGE       (0x00002000) /* gfs_readpage() issued this lock request */
 
 #define GLR_TRYFAILED     (13)
 #define GLR_CANCELED      (14)
 
-static __inline__ int
+static __inline__ struct gfs_holder*
 gfs_glock_is_locked_by_me(struct gfs_glock *gl)
 {
 	struct list_head *tmp, *head;
 	struct gfs_holder *gh;
-	int locked = FALSE;
 
 	/* Look in glock's list of holders for one with current task as owner */
 	spin_lock(&gl->gl_spin);
@@ -50,14 +50,13 @@ gfs_glock_is_locked_by_me(struct gfs_glock *gl)
 	     tmp != head;
 	     tmp = tmp->next) {
 		gh = list_entry(tmp, struct gfs_holder, gh_list);
-		if (gh->gh_owner == current) {
-			locked = TRUE;
-			break;
-		}
+		if (gh->gh_owner == current)
+			goto out;
 	}
+	gh = NULL;
+out:
 	spin_unlock(&gl->gl_spin);
-
-	return locked;
+	return gh;
 }
 static __inline__ int
 gfs_glock_is_held_excl(struct gfs_glock *gl)
diff --git a/gfs-kernel/src/gfs/ops_address.c b/gfs-kernel/src/gfs/ops_address.c
index cee591d..9360ebe 100644
--- a/gfs-kernel/src/gfs/ops_address.c
+++ b/gfs-kernel/src/gfs/ops_address.c
@@ -272,13 +272,33 @@ gfs_readpage(struct file *file, struct page *page)
 {
 	struct gfs_inode *ip = get_v2ip(page->mapping->host);
 	struct gfs_sbd *sdp = ip->i_sbd;
+	struct gfs_holder *gh;
 	int error;
 
 	atomic_inc(&sdp->sd_ops_address);
 
-	if (gfs_assert_warn(sdp, gfs_glock_is_locked_by_me(ip->i_gl))) {
+	/* When gfs_readpage is called from the sys_madvise code through the 
+	 * readahead code, the inode glock is not held. In this case, we hold 
+	 * the inode glock, unlock the page and return AOP_TRUNCATED_PAGE. The
+	 * caller will then reload the page and call gfs_readpage again. We 
+	 * also add the flag GL_READPAGE to denote that the glock was held in
+	 * this function and if so, we unlock it before leaving this function
+	 */
+	gh = gfs_glock_is_locked_by_me(ip->i_gl);
+	if (!gh) {
+		gh = kmalloc(sizeof(struct gfs_holder), GFP_NOFS);
+		if (!gh)
+			return -ENOBUFS;
+		gfs_holder_init(ip->i_gl, LM_ST_SHARED, 
+				GL_READPAGE | LM_FLAG_ANY, gh);
 		unlock_page(page);
-		return -ENOSYS;
+		error = gfs_glock_nq(gh);
+		if (error) {
+			gfs_holder_uninit(gh);
+			kfree(gh);
+			goto out;
+		}
+		return AOP_TRUNCATED_PAGE;
 	}
 
 	if (!gfs_is_jdata(ip)) {
@@ -293,6 +313,11 @@ gfs_readpage(struct file *file, struct page *page)
 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 		error = -EIO;
 
+	if (gh->gh_flags & GL_READPAGE) { /* If we grabbed the glock here */
+		gfs_glock_dq_uninit(gh);
+		kfree(gh);
+	}
+out:
 	return error;
 }
 


hooks/post-receive
--
Cluster Project


[Date Prev][Date Next]   [Thread Prev][Thread Next]   [Thread Index] [Date Index] [Author Index]