cluster: STABLE3 - gfs-kernel: Flock on GFS fs file will error with "Resource tempory unavailable" for EWOULDBLOCK

Abhijith Das adas at fedoraproject.org
Thu Feb 11 16:53:09 UTC 2010


Gitweb:        http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=bc271e80a8221d264d269a74ccf6e46c4b6cce4d
Commit:        bc271e80a8221d264d269a74ccf6e46c4b6cce4d
Parent:        04f7da379c163876eb346f3828de0ff152e25f5a
Author:        Abhijith Das <adas at redhat.com>
AuthorDate:    Thu Feb 11 10:48:24 2010 -0600
Committer:     Abhijith Das <adas at redhat.com>
CommitterDate: Thu Feb 11 10:52:23 2010 -0600

gfs-kernel: Flock on GFS fs file will error with "Resource tempory unavailable" for EWOULDBLOCK

This patch allows a process to queue multiple flocks
through multiple descriptors and the behavior is similar
to that of GFS2. This patch also eliminates the race
condition involving a single process where a holder from
a previous unlock that has still not completely been
dequeued prevents a subsequent non-blocking flock from
going through and instead returns EAGAIN. This patch will
cause the non-blocking request to block until the
aforementioned unlock holder has been cleared.

rhbz#515717
---
 gfs-kernel/src/gfs/glock.c    |   74 +++++++++++++++++++++++++++++++----------
 gfs-kernel/src/gfs/glock.h    |    2 +
 gfs-kernel/src/gfs/incore.h   |    2 +
 gfs-kernel/src/gfs/ops_file.c |   15 ++++----
 4 files changed, 67 insertions(+), 26 deletions(-)

diff --git a/gfs-kernel/src/gfs/glock.c b/gfs-kernel/src/gfs/glock.c
index 3040c60..80f4466 100644
--- a/gfs-kernel/src/gfs/glock.c
+++ b/gfs-kernel/src/gfs/glock.c
@@ -986,6 +986,33 @@ state_change(struct gfs_glock *gl, unsigned int new_state)
 	gl->gl_state = new_state;
 }
 
+static int gfs_glock_demote_wait(void *word)
+{
+	schedule();
+	return 0;
+}
+
+static void gfs_wait_on_demote(struct gfs_glock *gl)
+{
+	might_sleep();
+	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs_glock_demote_wait, TASK_UNINTERRUPTIBLE);
+}
+
+static void gfs_demote_wake(struct gfs_glock *gl)
+{
+	clear_bit(GLF_DEMOTE, &gl->gl_flags);
+	smp_mb__after_clear_bit();
+	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
+}
+
+void gfs_glock_dq_wait(struct gfs_holder *gh)
+{
+	struct gfs_glock *gl = gh->gh_gl;
+	set_bit(GLF_DEMOTE, &gl->gl_flags);
+	gfs_glock_dq(gh);
+	gfs_wait_on_demote(gl);
+}
+
 /**
  * xmote_bh - Called after the lock module is done acquiring a lock
  * @gl: The glock in question
@@ -1091,6 +1118,8 @@ xmote_bh(struct gfs_glock *gl, unsigned int ret)
 		gl->gl_req_bh = NULL;
 		clear_bit(GLF_LOCK, &gl->gl_flags);
 		run_queue(gl);
+		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
+			gfs_demote_wake(gl);
 		spin_unlock(&gl->gl_spin);
 	}
 
@@ -1200,8 +1229,9 @@ drop_bh(struct gfs_glock *gl, unsigned int ret)
 	gl->gl_req_bh = NULL;
 	clear_bit(GLF_LOCK, &gl->gl_flags);
 	run_queue(gl);
-	spin_unlock(&gl->gl_spin);
-
+	if (test_bit(GLF_DEMOTE, &gl->gl_flags))
+		gfs_demote_wake(gl);
+	spin_unlock(&gl->gl_spin);	
 	glock_put(gl);
 
 	if (gh) {
@@ -1312,6 +1342,11 @@ glock_wait_internal(struct gfs_holder *gh)
 		if (gl->gl_req_gh != gh &&
 		    !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
 		    !list_empty(&gh->gh_list)) {
+			if (gh->gh_flags & GL_FLOCK &&
+			    list_empty(&gl->gl_holders)) {
+				spin_unlock(&gl->gl_spin);
+				goto skip_try_flag;
+			}
 			list_del_init(&gh->gh_list);
 			gh->gh_error = GLR_TRYFAILED;
 			if (test_bit(HIF_RECURSE, &gh->gh_iflags))
@@ -1323,6 +1358,7 @@ glock_wait_internal(struct gfs_holder *gh)
 		spin_unlock(&gl->gl_spin);
 	}
 
+skip_try_flag:
 	if ((gh->gh_flags & LM_FLAG_PRIORITY) &&
 	    !(gh->gh_flags & GL_NOCANCEL_OTHER))
 		do_cancels(gh);
@@ -1402,13 +1438,14 @@ add_to_queue(struct gfs_holder *gh)
 			if (tmp_gh->gh_owner == gh->gh_owner) {
 				/* Make sure pre-existing holder is compatible
 				   with this new one. */
-				if (gfs_assert_warn(sdp, (gh->gh_flags & LM_FLAG_ANY) ||
-						    !(tmp_gh->gh_flags & LM_FLAG_ANY)) ||
-				    gfs_assert_warn(sdp, (tmp_gh->gh_flags & GL_LOCAL_EXCL) ||
-						    !(gh->gh_flags & GL_LOCAL_EXCL)) ||
-				    gfs_assert_warn(sdp, relaxed_state_ok(gl->gl_state,
-									  gh->gh_state,
-									  gh->gh_flags)))
+				if (!(gh->gh_flags & GL_FLOCK) && 
+				    (gfs_assert_warn(sdp, (gh->gh_flags & LM_FLAG_ANY) ||
+						     !(tmp_gh->gh_flags & LM_FLAG_ANY)) ||
+				     gfs_assert_warn(sdp, (tmp_gh->gh_flags & GL_LOCAL_EXCL) ||
+						     !(gh->gh_flags & GL_LOCAL_EXCL)) ||
+				     gfs_assert_warn(sdp, relaxed_state_ok(gl->gl_state,
+									   gh->gh_state,
+									   gh->gh_flags))))
 					goto fail;
 
 				/* We're good!  Grant the hold. */
@@ -1430,15 +1467,16 @@ add_to_queue(struct gfs_holder *gh)
 			tmp_gh = list_entry(tmp, struct gfs_holder, gh_list);
 			if (tmp_gh->gh_owner == gh->gh_owner) {
 				/* Yes, make sure it is compatible with new */
-				if (gfs_assert_warn(sdp, test_bit(HIF_PROMOTE,
-								  &tmp_gh->gh_iflags)) ||
-				    gfs_assert_warn(sdp, (gh->gh_flags & LM_FLAG_ANY) ||
-						    !(tmp_gh->gh_flags & LM_FLAG_ANY)) ||
-				    gfs_assert_warn(sdp, (tmp_gh->gh_flags & GL_LOCAL_EXCL) ||
-						    !(gh->gh_flags & GL_LOCAL_EXCL)) ||
-				    gfs_assert_warn(sdp, relaxed_state_ok(tmp_gh->gh_state,
-									  gh->gh_state,
-									  gh->gh_flags)))
+				if (!(gh->gh_flags & GL_FLOCK) &&
+				    (gfs_assert_warn(sdp, test_bit(HIF_PROMOTE,
+								   &tmp_gh->gh_iflags)) ||
+				     gfs_assert_warn(sdp, (gh->gh_flags & LM_FLAG_ANY) ||
+						     !(tmp_gh->gh_flags & LM_FLAG_ANY)) ||
+				     gfs_assert_warn(sdp, (tmp_gh->gh_flags & GL_LOCAL_EXCL) ||
+						     !(gh->gh_flags & GL_LOCAL_EXCL)) ||
+				     gfs_assert_warn(sdp, relaxed_state_ok(tmp_gh->gh_state,
+									   gh->gh_state,
+									   gh->gh_flags))))
 					goto fail;
 
 				/* OK, make sure they're marked, so
diff --git a/gfs-kernel/src/gfs/glock.h b/gfs-kernel/src/gfs/glock.h
index a0342b1..9de5e94 100644
--- a/gfs-kernel/src/gfs/glock.h
+++ b/gfs-kernel/src/gfs/glock.h
@@ -21,6 +21,7 @@
 #define GL_SYNC           (0x00000800) /* Sync to disk when no more holders */
 #define GL_NOCANCEL       (0x00001000) /* Don't ever cancel this request */
 #define GL_NOCANCEL_OTHER (0x00004000) /* Don't cancel other locks for this */
+#define GL_FLOCK          (0x00008000) /* This is an flock */
 
 #define GLR_TRYFAILED     (13)
 #define GLR_CANCELED      (14)
@@ -96,6 +97,7 @@ int gfs_glock_nq(struct gfs_holder *gh);
 int gfs_glock_poll(struct gfs_holder *gh);
 int gfs_glock_wait(struct gfs_holder *gh);
 void gfs_glock_dq(struct gfs_holder *gh);
+void gfs_glock_dq_wait(struct gfs_holder *gh);
 
 void gfs_glock_prefetch(struct gfs_glock *gl, unsigned int state, int flags);
 void gfs_glock_force_drop(struct gfs_glock *gl);
diff --git a/gfs-kernel/src/gfs/incore.h b/gfs-kernel/src/gfs/incore.h
index 1e00fc2..921b79d 100644
--- a/gfs-kernel/src/gfs/incore.h
+++ b/gfs-kernel/src/gfs/incore.h
@@ -474,6 +474,8 @@ struct gfs_holder {
                                       *   (demote/greedy) holders */
 #define GLF_GREEDY              (7)  /* This lock is ignoring callbacks
                                       *   (requests from other nodes) for now */
+#define GLF_DEMOTE              (8)  /* This is used to check for unflock completion
+				      * different from HIF_DEMOTE */
 
 struct gfs_glock {
 	struct list_head gl_list;    /* Link to hb_list in one of superblock's
diff --git a/gfs-kernel/src/gfs/ops_file.c b/gfs-kernel/src/gfs/ops_file.c
index 6a64958..e675772 100644
--- a/gfs-kernel/src/gfs/ops_file.c
+++ b/gfs-kernel/src/gfs/ops_file.c
@@ -1732,7 +1732,8 @@ do_flock(struct file *file, int cmd, struct file_lock *fl)
 	int error = 0;
 
 	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
-	flags = ((IS_SETLKW(cmd)) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
+	flags = ((IS_SETLKW(cmd)) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE
+		| GL_FLOCK;
 
 	down(&fp->f_fl_lock);
 
@@ -1740,21 +1741,19 @@ do_flock(struct file *file, int cmd, struct file_lock *fl)
 	if (gl) {
 		if (fl_gh->gh_state == state)
 			goto out;
-		gfs_glock_hold(gl);
 		flock_lock_file_wait(file,
-				     &(struct file_lock){.fl_type = F_UNLCK});		
-		gfs_glock_dq_uninit(fl_gh);
+				     &(struct file_lock){.fl_type = F_UNLCK});
+		gfs_glock_dq_wait(fl_gh);
+		gfs_holder_reinit(state, flags, fl_gh);
 	} else {
 		error = gfs_glock_get(ip->i_sbd,
 				      ip->i_num.no_formal_ino, &gfs_flock_glops,
 				      CREATE, &gl);
 		if (error)
 			goto out;
+		gfs_holder_init(gl, state, flags, fl_gh);
+		gfs_glock_put(gl);
 	}
-
-	gfs_holder_init(gl, state, flags, fl_gh);
-	gfs_glock_put(gl);
-
 	error = gfs_glock_nq(fl_gh);
 	if (error) {
 		gfs_holder_uninit(fl_gh);


More information about the cluster-commits mailing list