dlm: master - dlm_controld: clear waiting plocks for closed files

David Teigland teigland at fedoraproject.org
Wed Jun 1 17:16:12 UTC 2011


Gitweb:        http://git.fedorahosted.org/git/dlm.git?p=dlm.git;a=commitdiff;h=1e95a3d8c8fb8ef889b4e69fca9ac7d9c062517b
Commit:        1e95a3d8c8fb8ef889b4e69fca9ac7d9c062517b
Parent:        7675151a06bc021d32a8ceba0c62f270efda448c
Author:        David Teigland <teigland at redhat.com>
AuthorDate:    Wed Jun 1 12:14:56 2011 -0500
Committer:     David Teigland <teigland at redhat.com>
CommitterDate: Wed Jun 1 12:14:56 2011 -0500

dlm_controld: clear waiting plocks for closed files

The new CLOSE flag is set in unlock operations that are
generated by the vfs removing locks that were not unlocked
by the process, when the process closes the file or exits.

The kernel does not take a reply for these unlock-close
operations.

plock requests can now be interrupted in the kernel when the
process is killed.  So the unlock-close also needs to clear
any waiting plocks that were abandoned by the killed process.

The corresponding kernel patch:
https://lkml.org/lkml/2011/5/23/237

bz 678585

Signed-off-by: David Teigland <teigland at redhat.com>
---
 group/dlm_controld/plock.c |   32 ++++++++++++++++++++++++++++++++
 1 files changed, 32 insertions(+), 0 deletions(-)

diff --git a/group/dlm_controld/plock.c b/group/dlm_controld/plock.c
index 66b20a1..967f3c0 100644
--- a/group/dlm_controld/plock.c
+++ b/group/dlm_controld/plock.c
@@ -3,6 +3,10 @@
 
 #include <linux/dlm_plock.h>
 
+/* FIXME: remove this once everyone is using the version of
+ * dlm_plock.h which defines it */
+#define DLM_PLOCK_FL_CLOSE 1
+
 static uint32_t plock_read_count;
 static uint32_t plock_recv_count;
 static uint32_t plock_rate_delays;
@@ -679,6 +683,27 @@ static int unlock_internal(struct lockspace *ls, struct resource *r,
 	return rv;
 }
 
+static void clear_waiters(struct lockspace *ls, struct resource *r,
+			  struct dlm_plock_info *in)
+{
+	struct lock_waiter *w, *safe;
+
+	list_for_each_entry_safe(w, safe, &r->waiters, list) {
+		if (w->info.nodeid != in->nodeid || w->info.owner != in->owner)
+			continue;
+
+		list_del(&w->list);
+
+		log_plock_error(ls, "clear waiter %llx %llx-%llx %d/%u/%llx",
+				(unsigned long long)in->number,
+				(unsigned long long)in->start,
+				(unsigned long long)in->end,
+				in->nodeid, in->pid,
+				(unsigned long long)in->owner);
+		free(w);
+	}
+}
+
 static int add_waiter(struct lockspace *ls, struct resource *r,
 		      struct dlm_plock_info *in)
 
@@ -764,9 +789,16 @@ static void do_unlock(struct lockspace *ls, struct dlm_plock_info *in,
 
 	rv = unlock_internal(ls, r, in);
 
+	if (in->flags & DLM_PLOCK_FL_CLOSE) {
+		clear_waiters(ls, r, in);
+		/* no replies for unlock-close ops */
+		goto skip_result;
+	}
+
 	if (in->nodeid == our_nodeid)
 		write_result(ls, in, rv);
 
+ skip_result:
 	do_waiters(ls, r);
 	put_resource(ls, r);
 }


More information about the cluster-commits mailing list