Gitweb:
http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=...
Commit: 77602315f5d8803d9d00b2379ab1ceb9da16df93
Parent: e6ce10a475c1d9967a62e79546d23d5fedf936c1
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Thu Oct 28 14:06:43 2010 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Dec 10 09:04:37 2010 -0600
dlm-kernel: close-purge option
When enabled, the dlm does not try to cancel/unlock any
locks that a process holds when it exits. Instead it
uses a newly implemented "purge" routine that walks through
all resources/locks, forcibly deleting any locks held by
the given pid.
bz 645531
Signed-off-by: David Teigland <teigland(a)redhat.com>
---
dlm-kernel/src/config.c | 8 ++-
dlm-kernel/src/config.h | 1 +
dlm-kernel/src/device.c | 104 +++++++++++++++++++++++++++++-
dlm-kernel/src/dlm_internal.h | 1 +
dlm-kernel/src/lockqueue.c | 39 +++++++++++
dlm-kernel/src/lockqueue.h | 1 +
dlm-kernel/src/midcomms.c | 7 ++
dlm-kernel/src/rsb.c | 142 +++++++++++++++++++++++++++++++++++++++++
dlm-kernel/src/rsb.h | 1 +
9 files changed, 302 insertions(+), 2 deletions(-)
diff --git a/dlm-kernel/src/config.c b/dlm-kernel/src/config.c
index 3ce052c..a0554b1 100644
--- a/dlm-kernel/src/config.c
+++ b/dlm-kernel/src/config.c
@@ -30,6 +30,7 @@
#define DEFAULT_DEADLOCKTIME 10
#define DEFAULT_RECOVER_TIMER 5
#define DEFAULT_USER_GRANT_NOW 1
+#define DEFAULT_CLOSE_PURGE 0
struct config_info dlm_config = {
.tcp_port = DEFAULT_TCP_PORT,
@@ -42,7 +43,8 @@ struct config_info dlm_config = {
.conn_increment = DEFAULT_CONN_INCREMENT,
.deadlocktime = DEFAULT_DEADLOCKTIME,
.recover_timer = DEFAULT_RECOVER_TIMER,
- .user_grant_now = DEFAULT_USER_GRANT_NOW
+ .user_grant_now = DEFAULT_USER_GRANT_NOW,
+ .close_purge = DEFAULT_CLOSE_PURGE
};
@@ -93,6 +95,10 @@ static struct config_proc_info {
{
.name = "user_grant_now",
.value = &dlm_config.user_grant_now,
+ },
+ {
+ .name = "close_purge",
+ .value = &dlm_config.close_purge,
}
};
static struct proc_dir_entry *dlm_dir;
diff --git a/dlm-kernel/src/config.h b/dlm-kernel/src/config.h
index e0dab07..d8c92f0 100644
--- a/dlm-kernel/src/config.h
+++ b/dlm-kernel/src/config.h
@@ -26,6 +26,7 @@ struct config_info {
int deadlocktime;
int recover_timer;
int user_grant_now;
+ int close_purge;
};
extern struct config_info dlm_config;
diff --git a/dlm-kernel/src/device.c b/dlm-kernel/src/device.c
index 44e1379..018ca03 100644
--- a/dlm-kernel/src/device.c
+++ b/dlm-kernel/src/device.c
@@ -39,6 +39,9 @@
#include "device.h"
#include "config.h"
#include "lockspace.h"
+#include "rsb.h"
+#include "lockqueue.h"
+#include "nodes.h"
extern struct dlm_lkb *dlm_get_lkb(struct dlm_ls *, int);
static struct file_operations _dlm_fops;
@@ -120,6 +123,7 @@ struct file_info {
struct user_ls *fi_ls;
atomic_t fi_refcnt; /* Number of users */
unsigned long fi_flags; /* Bit 1 means the device is open */
+ int fi_pid;
};
@@ -430,6 +434,7 @@ static int dlm_open(struct inode *inode, struct file *file)
atomic_set(&f->fi_refcnt, 1);
f->fi_flags = 0;
set_bit(1, &f->fi_flags);
+ f->fi_pid = 0;
file->private_data = f;
@@ -455,8 +460,94 @@ static int check_version(struct dlm_write_request *req)
return 0;
}
+static int dlm_close_purge(struct inode *inode, struct file *file)
+{
+ struct file_info *f = file->private_data;
+ struct lock_info *old_li, *safe;
+ sigset_t tmpsig;
+ sigset_t allsigs;
+ struct user_ls *lsinfo;
+ struct dlm_ls *ls;
+ int lkb_count = 0, li_count = 0;
+ int pid = f->fi_pid;
+ int ournodeid = our_nodeid();
+
+ lsinfo = find_lockspace(iminor(inode));
+ if (!lsinfo)
+ return -ENOENT;
+
+ if (pid != current->pid) {
+ printk("close_purge by different pid %d first %d\n",
+ current->pid, pid);
+ }
+
+ /* Mark this closed so that ASTs will not be delivered any more */
+ clear_bit(1, &f->fi_flags);
+
+ /* Block signals while we are doing this */
+ sigfillset(&allsigs);
+ sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
+
+ list_for_each_entry_safe(old_li, safe, &f->fi_lkb_list, li_ownerqueue) {
+ struct dlm_lkb *lkb;
+
+ lkb = dlm_get_lkb(f->fi_ls->ls_lockspace, old_li->li_lksb.sb_lkid);
+ if (lkb) {
+ lkb->lkb_astparam = (long)NULL;
+ lkb_count++;
+ }
+
+ list_del(&old_li->li_ownerqueue);
+ kfree(old_li);
+ put_file_info(f);
+ li_count++;
+ }
+
+ printk("dlm_close_purge our_nodeid %d pid %d lkb_count %d li_count %d\n",
+ ournodeid, pid, lkb_count, li_count);
+
+ ls = find_lockspace_by_local_id(lsinfo->ls_lockspace);
+ if (!ls) {
+ printk("dlm_purge: no lockspace found\n");
+ goto out;
+ }
+
+ send_purge_all(ls, ournodeid, pid);
+ dlm_purge(ls, ournodeid, pid, -1);
+
+ /* must happen prior to dlm_release_lockspace() */
+ put_lockspace(ls);
+ out:
+ /*
+ * If this is the last reference to the lockspace
+ * then free the struct. If it's an AUTOFREE lockspace
+ * then free the whole thing.
+ */
+ down(&user_ls_lock);
+ if (atomic_dec_and_test(&lsinfo->ls_refcnt)) {
+
+ if (lsinfo->ls_lockspace) {
+ if (test_bit(LS_FLAG_AUTOFREE, &lsinfo->ls_flags)) {
+ unregister_lockspace(lsinfo, 1);
+ }
+ }
+ else {
+ kfree(lsinfo->ls_miscinfo.name);
+ kfree(lsinfo);
+ }
+ }
+ up(&user_ls_lock);
+ put_file_info(f);
+
+ /* Restore signals */
+ sigprocmask(SIG_SETMASK, &tmpsig, NULL);
+ recalc_sigpending();
+
+ return 0;
+}
+
/* Close on lockspace device */
-static int dlm_close(struct inode *inode, struct file *file)
+static int dlm_close_unlock(struct inode *inode, struct file *file)
{
struct file_info *f = file->private_data;
struct lock_info li;
@@ -608,6 +699,14 @@ static int dlm_close(struct inode *inode, struct file *file)
return 0;
}
+static int dlm_close(struct inode *inode, struct file *file)
+{
+ if (dlm_config.close_purge)
+ return dlm_close_purge(inode, file);
+ else
+ return dlm_close_unlock(inode, file);
+}
+
/*
* ioctls to create/remove lockspaces, and check how many
* outstanding ASTs there are against a particular LS.
@@ -933,6 +1032,9 @@ static int do_user_lock(struct file_info *fi, uint8_t cmd, struct
dlm_lock_param
if ((kparams->flags & DLM_LKF_PERSISTENT) && kparams->parent)
return -EINVAL;
+ if (dlm_config.close_purge && !fi->fi_pid)
+ fi->fi_pid = current->pid;
+
/* For conversions, the lock will already have a lock_info
block squirelled away in astparam */
if (kparams->flags & DLM_LKF_CONVERT) {
diff --git a/dlm-kernel/src/dlm_internal.h b/dlm-kernel/src/dlm_internal.h
index ed54db3..3ed07ac 100644
--- a/dlm-kernel/src/dlm_internal.h
+++ b/dlm-kernel/src/dlm_internal.h
@@ -590,6 +590,7 @@ struct dlm_query_reply {
#define GDLM_REMCMD_SENDBAST 7
#define GDLM_REMCMD_SENDCAST 8
#define GDLM_REMCMD_REM_RESDATA 9
+#define GDLM_REMCMD_PURGE 10
#define GDLM_REMCMD_RECOVERMESSAGE 20
#define GDLM_REMCMD_RECOVERREPLY 21
#define GDLM_REMCMD_QUERY 30
diff --git a/dlm-kernel/src/lockqueue.c b/dlm-kernel/src/lockqueue.c
index d203e16..9e3db73 100644
--- a/dlm-kernel/src/lockqueue.c
+++ b/dlm-kernel/src/lockqueue.c
@@ -710,6 +710,41 @@ void remote_remove_direntry(struct dlm_ls *ls, int nodeid, char
*name,
midcomms_send_buffer(&req->rr_header, e);
}
+static int send_purge(struct dlm_ls *ls, int to_nodeid, int pid)
+{
+ struct writequeue_entry *e;
+ struct dlm_request *req;
+
+ printk("dlm: send_purge to nodeid %d pid %d\n", to_nodeid, pid);
+
+ e = lowcomms_get_buffer(to_nodeid,
+ sizeof(struct dlm_request),
+ ls->ls_allocation,
+ (char **) &req);
+ if (!e) {
+ printk("send_purge get_buffer error\n");
+ return -1;
+ }
+ memset(req, 0, sizeof(struct dlm_request));
+
+ req->rr_header.rh_cmd = GDLM_REMCMD_PURGE;
+ req->rr_header.rh_length = sizeof(struct dlm_request);
+ req->rr_header.rh_lockspace = ls->ls_global_id;
+ req->rr_pid = pid;
+ midcomms_send_buffer(&req->rr_header, e);
+ return 0;
+}
+
+void send_purge_all(struct dlm_ls *ls, int ournodeid, int pid)
+{
+ struct dlm_csb *csb;
+
+ list_for_each_entry(csb, &ls->ls_nodes, list) {
+ if (csb->node->nodeid != ournodeid)
+ send_purge(ls, csb->node->nodeid, pid);
+ }
+}
+
/*
* Send remote cluster request to directory or master node before the request
* is put on the lock queue. Runs in the context of the locking caller.
@@ -912,6 +947,10 @@ int process_cluster_request(int nodeid, struct dlm_header *req, int
recovery)
send_reply = 1;
break;
+ case GDLM_REMCMD_PURGE:
+ dlm_purge(lspace, our_nodeid(), freq->rr_pid, nodeid);
+ break;
+
case GDLM_REMCMD_REM_RESDATA:
namelen = freq->rr_header.rh_length - sizeof(*freq) + 1;
diff --git a/dlm-kernel/src/lockqueue.h b/dlm-kernel/src/lockqueue.h
index 26bfe19..91d8502 100644
--- a/dlm-kernel/src/lockqueue.h
+++ b/dlm-kernel/src/lockqueue.h
@@ -25,5 +25,6 @@ int reply_in_requestqueue(struct dlm_ls * ls, int lkid);
void remote_remove_direntry(struct dlm_ls * ls, int nodeid, char *name,
int namelen);
void allocate_and_copy_lvb(struct dlm_ls * ls, char **lvbptr, char *src);
+void send_purge_all(struct dlm_ls *ls, int ournodeid, int pid);
#endif /* __LOCKQUEUE_DOT_H__ */
diff --git a/dlm-kernel/src/midcomms.c b/dlm-kernel/src/midcomms.c
index aaeefee..992dc61 100644
--- a/dlm-kernel/src/midcomms.c
+++ b/dlm-kernel/src/midcomms.c
@@ -65,6 +65,9 @@ static void host_to_network(void *msg)
switch (req->rr_header.rh_cmd) {
+ case GDLM_REMCMD_PURGE:
+ req->rr_pid = cpu_to_le32(req->rr_pid);
+ break;
case GDLM_REMCMD_LOCKREQUEST:
case GDLM_REMCMD_CONVREQUEST:
req->rr_range_start = cpu_to_le64(req->rr_range_start);
@@ -149,6 +152,10 @@ static void network_to_host(void *msg)
switch (req->rr_header.rh_cmd) {
+ case GDLM_REMCMD_PURGE:
+ req->rr_pid = le32_to_cpu(req->rr_pid);
+ break;
+
case GDLM_REMCMD_LOCKREQUEST:
case GDLM_REMCMD_CONVREQUEST:
req->rr_range_start = le64_to_cpu(req->rr_range_start);
diff --git a/dlm-kernel/src/rsb.c b/dlm-kernel/src/rsb.c
index edcdc6e..68fbff7 100644
--- a/dlm-kernel/src/rsb.c
+++ b/dlm-kernel/src/rsb.c
@@ -19,6 +19,148 @@
#include "dir.h"
#include "util.h"
#include "rsb.h"
+#include "ast.h"
+#include "lkb.h"
+
+static int purge_queue(struct dlm_ls *ls, struct dlm_rsb *r,
+ struct list_head *queue, int pid,
+ int pid_is_local, int rsb_is_master)
+{
+ struct dlm_lkb *lkb, *lkb2;
+ int purge, total = 0;
+
+ list_for_each_entry_safe(lkb, lkb2, queue, lkb_statequeue) {
+
+ /* the one general rule can be factored out of checks below */
+
+ if (lkb->lkb_ownpid != pid)
+ continue;
+
+ purge = 0;
+
+ /* local copy lkb on locally mastered rsb */
+
+ if (pid_is_local && rsb_is_master && lkb->lkb_nodeid == 0)
+ purge = 1;
+
+ /* process copy lkb on remotely mastered rsb */
+
+ if (pid_is_local && !rsb_is_master && lkb->lkb_nodeid != 0)
+ purge = 1;
+
+ /* master copy lkb on locally mastered rsb */
+
+ if (!pid_is_local && rsb_is_master && lkb->lkb_nodeid > 0)
+ purge = 1;
+
+ if (!purge)
+ continue;
+
+ printk("dlm: purge lkid %x pid %d nodeid %d ast %x lqs %d sts %d gr %d rq %d pil
%d rim %d r_nodeid %d r_name %s\n",
+ lkb->lkb_id,
+ lkb->lkb_ownpid,
+ lkb->lkb_nodeid,
+ lkb->lkb_astflags,
+ lkb->lkb_lockqueue_state,
+ lkb->lkb_status,
+ lkb->lkb_grmode,
+ lkb->lkb_rqmode,
+ pid_is_local,
+ rsb_is_master,
+ r->res_nodeid,
+ r->res_name);
+
+ if (!lkb->lkb_status) {
+ printk("dlm: purge lkid %x zero status\n", lkb->lkb_id);
+ continue;
+ }
+
+ if (lkb->lkb_astflags)
+ remove_from_astqueue(lkb);
+
+ if (lkb->lkb_lockqueue_state)
+ remove_from_lockqueue(lkb);
+
+ if (lkb->lkb_duetime)
+ remove_from_deadlockqueue(lkb);
+
+ list_del(&lkb->lkb_statequeue);
+ lkb->lkb_status = 0;
+ release_lkb(ls, lkb);
+ release_rsb(r);
+ total++;
+ }
+
+ return total;
+}
+
+/*
+ * if from_nodeid is -1, the pid is local,
+ * - if an rsb has a remote master we need to send_purge to it so the
+ * master copy lkb can be removed, in addition to the process copy
+ * removed here directly.
+ * - if an rsb is mastered locally, we remove the local copy lkb's
+ * here directly
+ *
+ * if from_nodeid is > 0, the pid is remote, so we are are looking for
+ * master copy lkb's with lkb_nodeid equal to from_nodeid and matching pid
+ */
+
+void dlm_purge(struct dlm_ls *ls, int ournodeid, int pid, int from_nodeid)
+{
+ struct dlm_rsb *r, *r2;
+ int pid_is_local, rsb_is_master;
+ int purged, total_purged = 0;
+
+ printk("dlm: purge our_nodeid %d pid %d from_nodeid %d\n",
+ ournodeid, pid, from_nodeid);
+
+ if (from_nodeid == -1) {
+ /*
+ * pid is local, we're looking for either:
+ * . local copy lkbs on locally mastered rsbs
+ * . process copy lkbs on remotely mastered rsbs
+ * (and send purge to the rsb master)
+ */
+ pid_is_local = 1;
+ } else {
+ /*
+ * pid is remote, we're just looking for master copy
+ * lkbs on locally mastered rsbs
+ */
+ pid_is_local = 0;
+ }
+
+ down_write(&ls->ls_root_lock);
+
+ list_for_each_entry_safe(r, r2, &ls->ls_rootres, res_rootlist) {
+ hold_rsb(r);
+ down_write(&r->res_lock);
+
+ rsb_is_master = (r->res_nodeid == 0) ? 1 : 0;
+
+ purged = 0;
+
+ purged += purge_queue(ls, r, &r->res_waitqueue, pid,
+ pid_is_local, rsb_is_master);
+ purged += purge_queue(ls, r, &r->res_convertqueue, pid,
+ pid_is_local, rsb_is_master);
+ purged += purge_queue(ls, r, &r->res_grantqueue, pid,
+ pid_is_local, rsb_is_master);
+
+ total_purged += purged;
+
+ if (rsb_is_master && purged)
+ grant_pending_locks(r);
+
+ up_write(&r->res_lock);
+ release_rsb_locked(r);
+ }
+
+ up_write(&ls->ls_root_lock);
+
+ printk("dlm: purged %d pid %d\n", total_purged, pid);
+}
static struct dlm_rsb *search_hashchain(struct list_head *head,
struct dlm_rsb *parent,
diff --git a/dlm-kernel/src/rsb.h b/dlm-kernel/src/rsb.h
index 87e1cd7..d1d1a2f 100644
--- a/dlm-kernel/src/rsb.h
+++ b/dlm-kernel/src/rsb.h
@@ -30,5 +30,6 @@ int lkb_dequeue(struct dlm_lkb *lkb);
int res_lkb_dequeue(struct dlm_lkb *lkb);
int lkb_swqueue(struct dlm_rsb *r, struct dlm_lkb *lkb, int type);
int res_lkb_swqueue(struct dlm_rsb *r, struct dlm_lkb *lkb, int type);
+void dlm_purge(struct dlm_ls *ls, int ournodeid, int pid, int from_nodeid);
#endif /* __RSB_DOT_H__ */