cluster: STABLE3 - dlm_controld: add plock checkpoint signatures

David Teigland teigland at fedoraproject.org
Wed Mar 31 20:41:50 UTC 2010


Gitweb:        http://git.fedorahosted.org/git/cluster.git?p=cluster.git;a=commitdiff;h=e2ccbf90543cf1d163d1a067bf5a8ce049a9c134
Commit:        e2ccbf90543cf1d163d1a067bf5a8ce049a9c134
Parent:        71e0466770a402bfbc625169bedf4b872be7bf84
Author:        David Teigland <teigland at redhat.com>
AuthorDate:    Tue Mar 30 15:36:45 2010 -0500
Committer:     David Teigland <teigland at redhat.com>
CommitterDate: Wed Mar 31 15:41:18 2010 -0500

dlm_controld: add plock checkpoint signatures

A signature of the plock checkpoint data is made by the writer and
included in the "checkpoint ready" message it sends.  The reader
then computes the signature of the data it reads from the checkpoint
and compares against the signature of the writer.  If they don't
match, the reader has retrieved incorrect plock state so it disables
plock operations for the given lockspace on that node.

bz 578625

Signed-off-by: David Teigland <teigland at redhat.com>
---
 group/dlm_controld/cpg.c        |   56 ++++++++++-----
 group/dlm_controld/dlm_daemon.h |   14 +++-
 group/dlm_controld/plock.c      |  138 ++++++++++++++++++++++++++++-----------
 3 files changed, 147 insertions(+), 61 deletions(-)

diff --git a/group/dlm_controld/cpg.c b/group/dlm_controld/cpg.c
index 20d59d5..af7ac40 100644
--- a/group/dlm_controld/cpg.c
+++ b/group/dlm_controld/cpg.c
@@ -263,6 +263,7 @@ void dlm_send_message(struct lockspace *ls, char *buf, int len)
 	hd->global_id   = cpu_to_le32(ls->global_id);
 	hd->flags       = cpu_to_le32(hd->flags);
 	hd->msgdata     = cpu_to_le32(hd->msgdata);
+	hd->msgdata2    = cpu_to_le32(hd->msgdata2);
 
 	_send_message(ls->cpg_handle, buf, len, type);
 }
@@ -1081,9 +1082,11 @@ static void receive_plocks_stored(struct lockspace *ls, struct dlm_header *hd,
 {
 	struct ls_info *li;
 	struct id_info *ids;
+	uint32_t sig;
 
-	log_group(ls, "receive_plocks_stored %d:%u need_plocks %d",
-		  hd->nodeid, hd->msgdata, ls->need_plocks);
+	log_group(ls, "receive_plocks_stored %d:%u flags %x sig %x "
+		  "need_plocks %d", hd->nodeid, hd->msgdata, hd->flags,
+		  hd->msgdata2, ls->need_plocks);
 
 	if (!ls->need_plocks)
 		return;
@@ -1109,14 +1112,25 @@ static void receive_plocks_stored(struct lockspace *ls, struct dlm_header *hd,
 		return;
 	}
 
-	retrieve_plocks(ls);
+	retrieve_plocks(ls, &sig);
+
+	if ((hd->flags & DLM_MFLG_PLOCK_SIG) && (sig != hd->msgdata2)) {
+		log_error("lockspace %s plock disabled our sig %x "
+			  "nodeid %d sig %x", ls->name, sig, hd->nodeid,
+			  hd->msgdata2);
+		ls->disable_plock = 1;
+		ls->need_plocks = 1; /* don't set HAVEPLOCK */
+		ls->save_plocks = 0;
+		return;
+	}
+
 	process_saved_plocks(ls);
 	ls->need_plocks = 0;
 	ls->save_plocks = 0;
 }
 
 static void send_info(struct lockspace *ls, struct change *cg, int type,
-		      uint32_t flags)
+		      uint32_t flags, uint32_t msgdata2)
 {
 	struct dlm_header *hd;
 	struct ls_info *li;
@@ -1146,6 +1160,7 @@ static void send_info(struct lockspace *ls, struct change *cg, int type,
 	hd->type = type;
 	hd->msgdata = cg->seq;
 	hd->flags = flags;
+	hd->msgdata2 = msgdata2;
 
 	if (ls->joining)
 		hd->flags |= DLM_MFLG_JOINING;
@@ -1170,10 +1185,11 @@ static void send_info(struct lockspace *ls, struct change *cg, int type,
 		id++;
 	}
 
-	log_group(ls, "send_%s cg %u flags %x counts %u %d %d %d %d",
+	log_group(ls, "send_%s cg %u flags %x data2 %x counts %u %d %d %d %d",
 		  type == DLM_MSG_START ? "start" : "plocks_stored",
-		  cg->seq, hd->flags, ls->started_count, cg->member_count,
-		  cg->joined_count, cg->remove_count, cg->failed_count);
+		  cg->seq, hd->flags, hd->msgdata2, ls->started_count,
+		  cg->member_count, cg->joined_count, cg->remove_count,
+		  cg->failed_count);
 
 	dlm_send_message(ls, buf, len);
 
@@ -1184,14 +1200,14 @@ static void send_start(struct lockspace *ls)
 {
 	struct change *cg = list_first_entry(&ls->changes, struct change, list);
 
-	send_info(ls, cg, DLM_MSG_START, 0);
+	send_info(ls, cg, DLM_MSG_START, 0, 0);
 }
 
-static void send_plocks_stored(struct lockspace *ls)
+static void send_plocks_stored(struct lockspace *ls, uint32_t sig)
 {
 	struct change *cg = list_first_entry(&ls->changes, struct change, list);
 
-	send_info(ls, cg, DLM_MSG_PLOCKS_STORED, 0);
+	send_info(ls, cg, DLM_MSG_PLOCKS_STORED, DLM_MFLG_PLOCK_SIG, sig);
 }
 
 static int same_members(struct change *cg1, struct change *cg2)
@@ -1218,7 +1234,7 @@ static void send_nacks(struct lockspace *ls, struct change *startcg)
 		    same_members(cg, startcg)) {
 			log_group(ls, "send nack old cg %u new cg %u",
 				   cg->seq, startcg->seq);
-			send_info(ls, cg, DLM_MSG_START, DLM_MFLG_NACK);
+			send_info(ls, cg, DLM_MSG_START, DLM_MFLG_NACK, 0);
 		}
 	}
 }
@@ -1238,8 +1254,9 @@ static void prepare_plocks(struct lockspace *ls)
 {
 	struct change *cg = list_first_entry(&ls->changes, struct change, list);
 	struct member *memb;
+	uint32_t sig;
 
-	if (!cfgd_enable_plock)
+	if (!cfgd_enable_plock || ls->disable_plock)
 		return;
 
 	/* if we're the only node in the lockspace, then we are the ckpt_node
@@ -1297,8 +1314,8 @@ static void prepare_plocks(struct lockspace *ls)
 	   previous ckpt_node upon receiving the stored message from us. */
 
 	if (nodes_added(ls))
-		store_plocks(ls);
-	send_plocks_stored(ls);
+		store_plocks(ls, &sig);
+	send_plocks_stored(ls, sig);
 }
 
 static void apply_changes(struct lockspace *ls)
@@ -1532,6 +1549,7 @@ static void dlm_header_in(struct dlm_header *hd)
 	hd->global_id   = le32_to_cpu(hd->global_id);
 	hd->flags       = le32_to_cpu(hd->flags);
 	hd->msgdata     = le32_to_cpu(hd->msgdata);
+	hd->msgdata2    = le32_to_cpu(hd->msgdata2);
 }
 
 static void deliver_cb(cpg_handle_t handle,
@@ -1579,7 +1597,7 @@ static void deliver_cb(cpg_handle_t handle,
 	case DLM_MSG_PLOCK:
 		if (cfgd_enable_plock)
 			receive_plock(ls, hd, len);
-		else
+		else if (!ls->disable_plock)
 			log_error("msg %d nodeid %d enable_plock %d",
 				  hd->type, nodeid, cfgd_enable_plock);
 		break;
@@ -1587,7 +1605,7 @@ static void deliver_cb(cpg_handle_t handle,
 	case DLM_MSG_PLOCK_OWN:
 		if (cfgd_enable_plock && cfgd_plock_ownership)
 			receive_own(ls, hd, len);
-		else
+		else if (!ls->disable_plock)
 			log_error("msg %d nodeid %d enable_plock %d owner %d",
 				  hd->type, nodeid, cfgd_enable_plock,
 				  cfgd_plock_ownership);
@@ -1596,7 +1614,7 @@ static void deliver_cb(cpg_handle_t handle,
 	case DLM_MSG_PLOCK_DROP:
 		if (cfgd_enable_plock && cfgd_plock_ownership)
 			receive_drop(ls, hd, len);
-		else
+		else if (!ls->disable_plock)
 			log_error("msg %d nodeid %d enable_plock %d owner %d",
 				  hd->type, nodeid, cfgd_enable_plock,
 				  cfgd_plock_ownership);
@@ -1606,7 +1624,7 @@ static void deliver_cb(cpg_handle_t handle,
 	case DLM_MSG_PLOCK_SYNC_WAITER:
 		if (cfgd_enable_plock && cfgd_plock_ownership)
 			receive_sync(ls, hd, len);
-		else
+		else if (!ls->disable_plock)
 			log_error("msg %d nodeid %d enable_plock %d owner %d",
 				  hd->type, nodeid, cfgd_enable_plock,
 				  cfgd_plock_ownership);
@@ -1615,7 +1633,7 @@ static void deliver_cb(cpg_handle_t handle,
 	case DLM_MSG_PLOCKS_STORED:
 		if (cfgd_enable_plock)
 			receive_plocks_stored(ls, hd, len);
-		else
+		else if (!ls->disable_plock)
 			log_error("msg %d nodeid %d enable_plock %d",
 				  hd->type, nodeid, cfgd_enable_plock);
 		break;
diff --git a/group/dlm_controld/dlm_daemon.h b/group/dlm_controld/dlm_daemon.h
index b61a636..c2423d2 100644
--- a/group/dlm_controld/dlm_daemon.h
+++ b/group/dlm_controld/dlm_daemon.h
@@ -168,6 +168,7 @@ enum {
 #define DLM_MFLG_HAVEPLOCK 2  /* accompanies start, we have plock state */
 #define DLM_MFLG_NACK      4  /* accompanies start, prevent wrong match when
 				 two outstanding changes are the same */
+#define DLM_MFLG_PLOCK_SIG 8  /* msgdata2 is a plock signature */
 
 struct dlm_header {
 	uint16_t version[3];
@@ -178,8 +179,8 @@ struct dlm_header {
 	uint32_t flags;		/* DLM_MFLG_ */
 	uint32_t msgdata;       /* in-header payload depends on MSG type; lkid
 				   for deadlock, seq for lockspace membership */
-	uint32_t pad1;
-	uint64_t pad2;
+	uint32_t msgdata2;	/* second MSG-specific data */
+	uint64_t pad;
 };
 
 struct lockspace {
@@ -207,6 +208,7 @@ struct lockspace {
 	int			plock_ckpt_node;
 	int			need_plocks;
 	int			save_plocks;
+	int			disable_plock;
 	uint32_t		associated_mg_id;
 	struct list_head	saved_messages;
 	struct list_head	plock_resources;
@@ -214,6 +216,10 @@ struct lockspace {
 	time_t			last_plock_time;
 	struct timeval		drop_resources_last;
 	uint64_t		plock_ckpt_handle;
+	uint64_t		checkpoint_r_num_first;
+	uint64_t		checkpoint_r_num_last;
+	uint32_t		checkpoint_r_count;
+	uint32_t		checkpoint_p_count;
 
 	/* save copy of groupd member callback data for queries */
 
@@ -333,8 +339,8 @@ void receive_sync(struct lockspace *ls, struct dlm_header *hd, int len);
 void receive_drop(struct lockspace *ls, struct dlm_header *hd, int len);
 void process_saved_plocks(struct lockspace *ls);
 void close_plock_checkpoint(struct lockspace *ls);
-void store_plocks(struct lockspace *ls);
-void retrieve_plocks(struct lockspace *ls);
+void store_plocks(struct lockspace *ls, uint32_t *sig);
+void retrieve_plocks(struct lockspace *ls, uint32_t *sig);
 void purge_plocks(struct lockspace *ls, int nodeid, int unmount);
 int fill_plock_dump_buf(struct lockspace *ls);
 
diff --git a/group/dlm_controld/plock.c b/group/dlm_controld/plock.c
index b11341d..1321af4 100644
--- a/group/dlm_controld/plock.c
+++ b/group/dlm_controld/plock.c
@@ -1489,6 +1489,11 @@ void process_plocks(int ci)
 		goto fail;
 	}
 
+	if (ls->disable_plock) {
+		rv = -ENOSYS;
+		goto fail;
+	}
+
 	log_plock(ls, "read plock %llx %s %s %llx-%llx %d/%u/%llx w %d",
 		  (unsigned long long)info.number,
 		  op_str(info.optype),
@@ -1621,7 +1626,8 @@ static void pack_section_buf(struct lockspace *ls, struct resource *r)
 	section_len = count * sizeof(struct pack_plock);
 }
 
-static int unpack_section_buf(struct lockspace *ls, char *numbuf, int buflen)
+static int unpack_section_buf(struct lockspace *ls, char *numbuf, int buflen,
+			      uint64_t *r_num, int *lock_count)
 {
 	struct pack_plock *pp;
 	struct posix_lock *po;
@@ -1648,6 +1654,8 @@ static int unpack_section_buf(struct lockspace *ls, char *numbuf, int buflen)
 	r->owner = owner;
 	r->last_access = now;
 
+	*r_num = num;
+
 	pp = (struct pack_plock *) &section_buf;
 
 	for (i = 0; i < count; i++) {
@@ -1676,6 +1684,7 @@ static int unpack_section_buf(struct lockspace *ls, char *numbuf, int buflen)
 	}
 
 	list_add_tail(&r->list, &ls->plock_resources);
+	*lock_count = count;
 	return 0;
 }
 
@@ -1798,7 +1807,7 @@ void close_plock_checkpoint(struct lockspace *ls)
    it.  The ckpt should then disappear and the new node can create a new ckpt
    for the next mounter. */
 
-void store_plocks(struct lockspace *ls)
+void store_plocks(struct lockspace *ls, uint32_t *sig)
 {
 	SaCkptCheckpointCreationAttributesT attr;
 	SaCkptCheckpointHandleT h;
@@ -1811,15 +1820,21 @@ void store_plocks(struct lockspace *ls)
 	struct resource *r;
 	struct posix_lock *po;
 	struct lock_waiter *w;
-	int r_count, lock_count, total_size, section_size, max_section_size;
+	int total_size, section_size, max_section_size;
 	int len, owner;
+	uint32_t r_count = 0, p_count = 0;
+	uint64_t r_num_first = 0, r_num_last = 0;
 
-	if (!cfgd_enable_plock)
+	if (!cfgd_enable_plock || ls->disable_plock)
 		return;
 
 	/* no change to plock state since we created the last checkpoint */
 	if (ls->last_checkpoint_time > ls->last_plock_time) {
-		log_group(ls, "store_plocks: saved ckpt uptodate");
+		log_group(ls, "store_plocks saved ckpt uptodate");
+		r_num_first = ls->checkpoint_r_num_first;
+		r_num_last = ls->checkpoint_r_num_last;
+		r_count = ls->checkpoint_r_count;
+		p_count = ls->checkpoint_p_count;
 		goto out;
 	}
 	ls->last_checkpoint_time = time(NULL);
@@ -1834,7 +1849,7 @@ void store_plocks(struct lockspace *ls)
 	   the attr fields */
 
 	r_count = 0;
-	lock_count = 0;
+	p_count = 0;
 	total_size = 0;
 	max_section_size = 0;
 
@@ -1846,22 +1861,23 @@ void store_plocks(struct lockspace *ls)
 		section_size = 0;
 		list_for_each_entry(po, &r->locks, list) {
 			section_size += sizeof(struct pack_plock);
-			lock_count++;
+			p_count++;
 		}
 		list_for_each_entry(w, &r->waiters, list) {
 			section_size += sizeof(struct pack_plock);
-			lock_count++;
+			p_count++;
 		}
 		total_size += section_size;
 		if (section_size > max_section_size)
 			max_section_size = section_size;
 	}
 
-	log_group(ls, "store_plocks: r_count %d, lock_count %d, pp %u bytes",
-		  r_count, lock_count, (unsigned int)sizeof(struct pack_plock));
-
-	log_group(ls, "store_plocks: total %d bytes, max_section %d bytes",
-		  total_size, max_section_size);
+	log_group(ls, "store_plocks r_count %u p_count %u "
+		  "total_size %d max_section_size %d",
+		  r_count, p_count, total_size, max_section_size);
+	log_plock(ls, "store_plocks r_count %u p_count %u "
+		  "total_size %d max_section_size %d",
+		  r_count, p_count, total_size, max_section_size);
 
 	attr.creationFlags = SA_CKPT_WR_ALL_REPLICAS;
 	attr.checkpointSize = total_size;
@@ -1877,20 +1893,20 @@ void store_plocks(struct lockspace *ls)
  open_retry:
 	rv = saCkptCheckpointOpen(system_ckpt_handle, &name,&attr,flags,0,&h);
 	if (rv == SA_AIS_ERR_TRY_AGAIN) {
-		log_group(ls, "store_plocks: ckpt open retry");
+		log_group(ls, "store_plocks ckpt open retry");
 		sleep(1);
 		goto open_retry;
 	}
 	if (rv == SA_AIS_ERR_EXIST) {
-		log_group(ls, "store_plocks: ckpt already exists");
+		log_group(ls, "store_plocks ckpt already exists");
 		return;
 	}
 	if (rv != SA_AIS_OK) {
-		log_error("store_plocks: ckpt open error %d %s", rv, ls->name);
+		log_error("store_plocks ckpt open error %d %s", rv, ls->name);
 		return;
 	}
 
-	log_group(ls, "store_plocks: open ckpt handle %llx",
+	log_group(ls, "store_plocks open ckpt handle %llx",
 		  (unsigned long long)h);
 	ls->plock_ckpt_handle = (uint64_t) h;
 
@@ -1935,20 +1951,24 @@ void store_plocks(struct lockspace *ls)
 
 		pack_section_buf(ls, r);
 
-		log_plock(ls, "store_plocks: section size %u id %u \"%s\"",
+		if (!r_num_first)
+			r_num_first = r->number;
+		r_num_last = r->number;
+
+		log_plock(ls, "store_plocks section size %u id %u \"%s\"",
 			  section_len, section_id.idLen, buf);
 
 	 create_retry:
 		rv = saCkptSectionCreate(h, &section_attr, &section_buf,
 					 section_len);
 		if (rv == SA_AIS_ERR_TRY_AGAIN) {
-			log_group(ls, "store_plocks: ckpt create retry");
+			log_group(ls, "store_plocks ckpt create retry");
 			sleep(1);
 			goto create_retry;
 		}
 		if (rv == SA_AIS_ERR_EXIST) {
 			/* this shouldn't happen in general */
-			log_group(ls, "store_plocks: clearing old ckpt");
+			log_group(ls, "store_plocks clearing old ckpt");
 			/* do we need this close or will the close in
 			   the unlink function be ok? */
 			saCkptCheckpointClose(h);
@@ -1956,19 +1976,36 @@ void store_plocks(struct lockspace *ls)
 			goto open_retry;
 		}
 		if (rv != SA_AIS_OK) {
-			log_error("store_plocks: ckpt section create err %d %s",
+			log_error("store_plocks ckpt section create err %d %s",
 				  rv, ls->name);
 			break;
 		}
 	}
  out:
-	return;
+	*sig = (0xFFFFFFFF & r_num_first) ^ (0xFFFFFFFF & r_num_last) ^
+	       r_count ^ p_count;
+
+	log_group(ls, "store_plocks first %llu last %llu r_count %u "
+		  "p_count %u sig %x",
+		  (unsigned long long)r_num_first,
+		  (unsigned long long)r_num_last,
+		  r_count, p_count, *sig);
+	log_plock(ls, "store_plocks first %llu last %llu r_count %u "
+		  "p_count %u sig %x",
+		  (unsigned long long)r_num_first,
+		  (unsigned long long)r_num_last,
+		  r_count, p_count, *sig);
+
+	ls->checkpoint_r_num_first = r_num_first;
+	ls->checkpoint_r_num_last = r_num_last;
+	ls->checkpoint_r_count = r_count;
+	ls->checkpoint_p_count = p_count;
 }
 
 /* called by a node that's just been added to the group to get existing plock
    state */
 
-void retrieve_plocks(struct lockspace *ls)
+void retrieve_plocks(struct lockspace *ls, uint32_t *sig)
 {
 	SaCkptCheckpointHandleT h;
 	SaCkptSectionIterationHandleT itr;
@@ -1977,9 +2014,11 @@ void retrieve_plocks(struct lockspace *ls)
 	SaNameT name;
 	SaAisErrorT rv;
 	char buf[SECTION_NAME_LEN];
-	int len;
+	int len, lock_count;
+	uint32_t r_count = 0, p_count = 0;
+	uint64_t r_num, r_num_first = 0, r_num_last = 0;
 
-	if (!cfgd_enable_plock)
+	if (!cfgd_enable_plock || ls->disable_plock)
 		return;
 
 	log_group(ls, "retrieve_plocks");
@@ -1992,12 +2031,12 @@ void retrieve_plocks(struct lockspace *ls)
 	rv = saCkptCheckpointOpen(system_ckpt_handle, &name, NULL,
 				  SA_CKPT_CHECKPOINT_READ, 0, &h);
 	if (rv == SA_AIS_ERR_TRY_AGAIN) {
-		log_group(ls, "retrieve_plocks: ckpt open retry");
+		log_group(ls, "retrieve_plocks ckpt open retry");
 		sleep(1);
 		goto open_retry;
 	}
 	if (rv != SA_AIS_OK) {
-		log_error("retrieve_plocks: ckpt open error %d %s",
+		log_error("retrieve_plocks ckpt open error %d %s",
 			  rv, ls->name);
 		return;
 	}
@@ -2005,12 +2044,12 @@ void retrieve_plocks(struct lockspace *ls)
  init_retry:
 	rv = saCkptSectionIterationInitialize(h, SA_CKPT_SECTIONS_ANY, 0, &itr);
 	if (rv == SA_AIS_ERR_TRY_AGAIN) {
-		log_group(ls, "retrieve_plocks: ckpt iterinit retry");
+		log_group(ls, "retrieve_plocks ckpt iterinit retry");
 		sleep(1);
 		goto init_retry;
 	}
 	if (rv != SA_AIS_OK) {
-		log_error("retrieve_plocks: ckpt iterinit error %d %s",
+		log_error("retrieve_plocks ckpt iterinit error %d %s",
 			  rv, ls->name);
 		goto out;
 	}
@@ -2021,12 +2060,12 @@ void retrieve_plocks(struct lockspace *ls)
 		if (rv == SA_AIS_ERR_NO_SECTIONS)
 			break;
 		if (rv == SA_AIS_ERR_TRY_AGAIN) {
-			log_group(ls, "retrieve_plocks: ckpt iternext retry");
+			log_group(ls, "retrieve_plocks ckpt iternext retry");
 			sleep(1);
 			goto next_retry;
 		}
 		if (rv != SA_AIS_OK) {
-			log_error("retrieve_plocks: ckpt iternext error %d %s",
+			log_error("retrieve_plocks ckpt iternext error %d %s",
 				  rv, ls->name);
 			goto out_it;
 		}
@@ -2043,19 +2082,19 @@ void retrieve_plocks(struct lockspace *ls)
 		memset(&buf, 0, sizeof(buf));
 		snprintf(buf, SECTION_NAME_LEN, "%s", desc.sectionId.id);
 
-		log_plock(ls, "retrieve_plocks: section size %llu id %u \"%s\"",
+		log_plock(ls, "retrieve_plocks section size %llu id %u \"%s\"",
 			  (unsigned long long)iov.dataSize, iov.sectionId.idLen,
 			  buf);
 
 	 read_retry:
 		rv = saCkptCheckpointRead(h, &iov, 1, NULL);
 		if (rv == SA_AIS_ERR_TRY_AGAIN) {
-			log_group(ls, "retrieve_plocks: ckpt read retry");
+			log_group(ls, "retrieve_plocks ckpt read retry");
 			sleep(1);
 			goto read_retry;
 		}
 		if (rv != SA_AIS_OK) {
-			log_error("retrieve_plocks: ckpt read error %d %s",
+			log_error("retrieve_plocks ckpt read error %d %s",
 				  rv, ls->name);
 			goto out_it;
 		}
@@ -2064,24 +2103,47 @@ void retrieve_plocks(struct lockspace *ls)
 		   no locks, which exist in ownership mode; the resource
 		   name and owner come from the section id */
 
-		log_plock(ls, "retrieve_plocks: ckpt read %llu bytes",
+		log_plock(ls, "retrieve_plocks ckpt read %llu bytes",
 			  (unsigned long long)iov.readSize);
 		section_len = iov.readSize;
 
 		if (section_len % sizeof(struct pack_plock)) {
-			log_error("retrieve_plocks: bad section len %d %s",
+			log_error("retrieve_plocks bad section len %d %s",
 				  section_len, ls->name);
 			continue;
 		}
 
+		r_num = 0;
+		lock_count = 0;
+
 		unpack_section_buf(ls, (char *)desc.sectionId.id,
-				   desc.sectionId.idLen);
+				   desc.sectionId.idLen, &r_num, &lock_count);
+		r_count++;
+		p_count += lock_count;
+
+		if (!r_num_first)
+			r_num_first = r_num;
+		r_num_last = r_num;
 	}
 
  out_it:
 	saCkptSectionIterationFinalize(itr);
  out:
 	saCkptCheckpointClose(h);
+
+	*sig = (0xFFFFFFFF & r_num_first) ^ (0xFFFFFFFF & r_num_last)
+	       ^ r_count ^ p_count;
+
+	log_group(ls, "retrieve_plocks first %llu last %llu r_count %u "
+		  "p_count %u sig %x",
+		  (unsigned long long)r_num_first,
+		  (unsigned long long)r_num_last,
+		  r_count, p_count, *sig);
+	log_plock(ls, "retrieve_plocks first %llu last %llu r_count %u "
+		  "p_count %u sig %x",
+		  (unsigned long long)r_num_first,
+		  (unsigned long long)r_num_last,
+		  r_count, p_count, *sig);
 }
 
 /* Called when a node has failed, or we're unmounting.  For a node failure, we
@@ -2095,7 +2157,7 @@ void purge_plocks(struct lockspace *ls, int nodeid, int unmount)
 	struct resource *r, *r2;
 	int purged = 0;
 
-	if (!cfgd_enable_plock)
+	if (!cfgd_enable_plock || ls->disable_plock)
 		return;
 
 	list_for_each_entry_safe(r, r2, &ls->plock_resources, list) {


More information about the cluster-commits mailing list