This is the mail archive of the cluster-cvs@sourceware.org mailing list for the cluster.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

RHEL52 - gfs-kernel: fix for bz 429343 gfs_glock_is_locked_by_meassertion


Gitweb:        http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=072cdf1f10c2ac16e2a20e2fdff75c77beb4c173
Commit:        072cdf1f10c2ac16e2a20e2fdff75c77beb4c173
Parent:        0f993f17b6bbfc8b5f41d9bf5dacc6a392d68081
Author:        Abhijith Das <adas@redhat.com>
AuthorDate:    Mon Apr 7 10:39:42 2008 -0500
Committer:     Abhijith Das <adas@redhat.com>
CommitterDate: Thu Aug 14 13:15:43 2008 -0500

gfs-kernel: fix for bz 429343 gfs_glock_is_locked_by_me assertion

This assertion shows up when gfs_readpage gets called without the inode glock
being held through the madvise syscall when the kernel attempts to readahead.

This patch unlocks the page, locks the inode glock and returns
AOP_TRUNCATED_PAGE.

I had to change gfs_glock_is_locked_by_me() to return the holder if glock is
held or NULL otherwise (instead of the TRUE/FALSE integer value it used to
return earlier).

I also added a new GL_READPAGE flag. If we need to get an inode glock in
gfs_readpage(), this flag is set on the holder. We must not unlock another
holder that we might have had on the glock before we entered gfs_readpage;
checking for this flag before unlocking ensures that.
---
 gfs-kernel/src/gfs/glock.h       |   15 +++++++--------
 gfs-kernel/src/gfs/ops_address.c |   29 +++++++++++++++++++++++++++--
 2 files changed, 34 insertions(+), 10 deletions(-)

diff --git a/gfs-kernel/src/gfs/glock.h b/gfs-kernel/src/gfs/glock.h
index 37630b0..fb1e210 100644
--- a/gfs-kernel/src/gfs/glock.h
+++ b/gfs-kernel/src/gfs/glock.h
@@ -33,16 +33,16 @@
 #define GL_NOCACHE        (0x00000400) /* Release glock when done, don't cache */
 #define GL_SYNC           (0x00000800) /* Sync to disk when no more holders */
 #define GL_NOCANCEL       (0x00001000) /* Don't ever cancel this request */
+#define GL_READPAGE       (0x00002000) /* gfs_readpage() issued this lock request */
 
 #define GLR_TRYFAILED     (13)
 #define GLR_CANCELED      (14)
 
-static __inline__ int
+static __inline__ struct gfs_holder*
 gfs_glock_is_locked_by_me(struct gfs_glock *gl)
 {
 	struct list_head *tmp, *head;
 	struct gfs_holder *gh;
-	int locked = FALSE;
 
 	/* Look in glock's list of holders for one with current task as owner */
 	spin_lock(&gl->gl_spin);
@@ -50,14 +50,13 @@ gfs_glock_is_locked_by_me(struct gfs_glock *gl)
 	     tmp != head;
 	     tmp = tmp->next) {
 		gh = list_entry(tmp, struct gfs_holder, gh_list);
-		if (gh->gh_owner == current) {
-			locked = TRUE;
-			break;
-		}
+		if (gh->gh_owner == current)
+			goto out;
 	}
+	gh = NULL;
+out:
 	spin_unlock(&gl->gl_spin);
-
-	return locked;
+	return gh;
 }
 static __inline__ int
 gfs_glock_is_held_excl(struct gfs_glock *gl)
diff --git a/gfs-kernel/src/gfs/ops_address.c b/gfs-kernel/src/gfs/ops_address.c
index 05a550e..cc64122 100644
--- a/gfs-kernel/src/gfs/ops_address.c
+++ b/gfs-kernel/src/gfs/ops_address.c
@@ -272,13 +272,33 @@ gfs_readpage(struct file *file, struct page *page)
 {
 	struct gfs_inode *ip = get_v2ip(page->mapping->host);
 	struct gfs_sbd *sdp = ip->i_sbd;
+	struct gfs_holder *gh;
 	int error;
 
 	atomic_inc(&sdp->sd_ops_address);
 
-	if (gfs_assert_warn(sdp, gfs_glock_is_locked_by_me(ip->i_gl))) {
+	/* When gfs_readpage is called from the sys_madvise code through the 
+	 * readahead code, the inode glock is not held. In this case, we hold 
+	 * the inode glock, unlock the page and return AOP_TRUNCATED_PAGE. The
+	 * caller will then reload the page and call gfs_readpage again. We 
+	 * also add the flag GL_READPAGE to denote that the glock was held in
+	 * this function and if so, we unlock it before leaving this function
+	 */
+	gh = gfs_glock_is_locked_by_me(ip->i_gl);
+	if (!gh) {
+		gh = kmalloc(sizeof(struct gfs_holder), GFP_NOFS);
+		if (!gh)
+			return -ENOBUFS;
+		gfs_holder_init(ip->i_gl, LM_ST_SHARED, 
+				GL_READPAGE | LM_FLAG_ANY, gh);
 		unlock_page(page);
-		return -ENOSYS;
+		error = gfs_glock_nq(gh);
+		if (error) {
+			gfs_holder_uninit(gh);
+			kfree(gh);
+			goto out;
+		}
+		return AOP_TRUNCATED_PAGE;
 	}
 
 	if (!gfs_is_jdata(ip)) {
@@ -293,6 +313,11 @@ gfs_readpage(struct file *file, struct page *page)
 	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 		error = -EIO;
 
+	if (gh->gh_flags & GL_READPAGE) { /* If we grabbed the glock here */
+		gfs_glock_dq_uninit(gh);
+		kfree(gh);
+	}
+out:
 	return error;
 }
 


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]