Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=8b6226997eecb355…
Commit: 8b6226997eecb35560fd32fcb571478971024bd1
Parent: f0b3e05addf7dfd3584904ff1f094b02664e0efd
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Fri Aug 28 14:40:28 2015 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Aug 28 14:43:58 2015 -0500
lvmlockd: also use vg name in set_vg_info
Include both the VG uuid and name in the lvmetad
set_vg_info message. This works around an obscure
problem where the VG uuid in lvmlockd is wrong
when one host removes a dlm VG, then creates a new
VG with the same name. If the dlm lockspace for
the initial VG was never stopped on another host,
that other host will be using the old uuid in its
lvmetad set_vg_info message. (That can be
corrected with a larger change, but this is an
effective workaround.)
---
daemons/lvmlockd/lvmlockd-core.c | 3 ++-
1 files changed, 2 insertions(+), 1 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index 14705b0..f2a01dc 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -1100,7 +1100,7 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
ls->name, r->name, r_version);
if (!ls->vg_uuid[0] || !strcmp(ls->vg_uuid, "none"))
- uuid = ls->name;
+ uuid = (char *)"none";
else
uuid = ls->vg_uuid;
@@ -1108,6 +1108,7 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
reply = daemon_send_simple(lvmetad_handle, "set_vg_info",
"token = %s", "skip",
"uuid = %s", uuid,
+ "name = %s", ls->vg_name,
"version = %d", (int)r_version,
NULL);
pthread_mutex_unlock(&lvmetad_mutex);
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=09b2649c5fe4259a…
Commit: 09b2649c5fe4259a38265b80f4038b7481d03db2
Parent: cc17210bce2cf08015e19caad3bc6a8307c841c8
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Fri Aug 28 11:37:55 2015 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Aug 28 11:38:26 2015 -0500
man lvmlockd: various improvements
---
man/lvmlockd.8.in | 177 +++++++++++++++++++++++++++++++++++------------------
1 files changed, 117 insertions(+), 60 deletions(-)
diff --git a/man/lvmlockd.8.in b/man/lvmlockd.8.in
index 1daea18..4e7883a 100644
--- a/man/lvmlockd.8.in
+++ b/man/lvmlockd.8.in
@@ -107,7 +107,6 @@ On all hosts running lvmlockd, configure lvm.conf:
.nf
locking_type = 1
use_lvmlockd = 1
-use_lvmetad = 1
.fi
.I sanlock
@@ -138,7 +137,7 @@ vgcreate \-\-shared <vgname> <devices>
The shared option sets the VG lock type to sanlock or dlm depending on
which lock manager is running. LVM commands will perform locking for the
-VG using lvmlockd.
+VG using lvmlockd. lvmlockd will use the chosen lock manager.
.SS 6. start VG on all hosts
@@ -222,9 +221,7 @@ If the lock manager for the lock type is not available (e.g. not started
or failed), lvmlockd is unable to acquire locks for LVM commands. LVM
commands that only read the VG will generally be allowed to continue
without locks in this case (with a warning). Commands to modify or
-activate the VG will fail without the necessary locks. Maintaining a
-properly running lock manager requires knowledge covered in separate
-documentation.
+activate the VG will fail without the necessary locks.
.I "local VG"
@@ -250,10 +247,10 @@ clvmd for clustering. See below for converting a clvm VG to a lockd VG.
.SS lockd VGs from hosts not using lvmlockd
Only hosts that use lockd VGs should be configured to run lvmlockd.
-However, devices with lockd VGs may be visible from hosts not using
-lvmlockd. From a host not using lvmlockd, visible lockd VGs are ignored
-in the same way as foreign VGs, i.e. those with a foreign system ID, see
-.BR lvmsystemid (7).
+However, shared devices used by lockd VGs may be visible from hosts not
+using lvmlockd. From a host not using lvmlockd, visible lockd VGs are
+ignored in the same way as foreign VGs (see
+.BR lvmsystemid (7).)
The \-\-shared option for reporting and display commands causes lockd VGs
to be displayed on a host not using lvmlockd, like the \-\-foreign option
@@ -303,13 +300,13 @@ LVM commands request locks from clvmd to use the VG.
.SS using lockd VGs
-There are some special considerations to be aware of when using lockd VGs.
+There are some special considerations when using lockd VGs.
-When use_lvmlockd is first enabled, and before the first lockd VG is
-created, no global lock will exist. In this initial state, LVM commands
-try and fail to acquire the global lock, producing a warning, and some
-commands are disallowed. Once the first lockd VG is created, the global
-lock will be available, and LVM will be fully operational.
+When use_lvmlockd is first enabled in lvm.conf, and before the first lockd
+VG is created, no global lock will exist. In this initial state, LVM
+commands try and fail to acquire the global lock, producing a warning, and
+some commands are disallowed. Once the first lockd VG is created, the
+global lock will be available, and LVM will be fully operational.
When a new lockd VG is created, its lockspace is automatically started on
the host that creates it. Other hosts need to run 'vgchange
@@ -328,8 +325,8 @@ See the following section for a full description of starting and stopping.
Starting a lockd VG (vgchange \-\-lock\-start) causes the lock manager to
start (join) the lockspace for the VG on the host where it is run. This
makes locks for the VG available to LVM commands on the host. Before a VG
-is started, only LVM commands that read/display the VG without locks are
-allowed.
+is started, only LVM commands that read/display the VG are allowed to
+continue without locks (and with a warning).
Stopping a lockd VG (vgchange \-\-lock\-stop) causes the lock manager to
stop (leave) the lockspace for the VG on the host where it is run. This
@@ -369,13 +366,11 @@ vgchange \-\-lock\-stop <vgname> ...
To make vgchange not wait for start to complete:
.br
-vgchange \-\-lock\-start \-\-lock\-opt nowait
-.br
-vgchange \-\-lock\-start \-\-lock\-opt nowait <vgname>
+vgchange \-\-lock\-start \-\-lock\-opt nowait ...
-To stop all lockspaces and wait for all to complete:
+lvmlockd can be asked directly to stop all lockspaces:
.br
-lvmlockctl \-\-stop\-lockspaces \-\-wait
+lvmlockctl \-\-stop\-lockspaces
To start only selected lockd VGs, use the lvm.conf
activation/lock_start_list. When defined, only VG names in this list are
@@ -455,8 +450,8 @@ acquired.
A VG lock is associated with each VG. The VG lock is acquired in shared
mode to read the VG and in exclusive mode to change the VG (modify the VG
-metadata). This lock serializes modifications to a VG with all other LVM
-commands accessing the VG from all hosts.
+metadata or activate LVs). This lock serializes access to a VG with all
+other LVM commands accessing the VG from all hosts.
The command 'vgs' will not only acquire the GL lock to read the list of
all VG names, but will acquire the VG lock for each VG prior to reading
@@ -537,12 +532,13 @@ There are some special cases related to using a sanlock VG.
vgremove of a sanlock VG will fail if other hosts have the VG started.
Run vgchange \-\-lock-stop <vgname> on all other hosts before vgremove.
-
(It may take several seconds before vgremove recognizes that all hosts
have stopped.)
A sanlock VG contains a hidden LV called "lvmlock" that holds the sanlock
-locks. vgreduce cannot yet remove the PV holding the lvmlockd LV.
+locks. vgreduce cannot yet remove the PV holding the lvmlockd LV. To
+remove this PV, change the VG lock type to "none", run vgreduce, then
+change the VG lock type back to "sanlock".
To place the lvmlock LV on a specific device, create the VG with only that
device, then use vgextend to add other devices.
@@ -570,9 +566,10 @@ deactivated, or activated exclusively to run lvextend.
.SS recover from lost PV holding sanlock locks
-A number of special manual steps must be performed to restore sanlock
-locks if the PV holding the locks is lost. Contact the LVM group for
-help with this process.
+The general approach is to change the VG lock type to "none", and then
+change the lock type back to "sanlock". This recreates the internal
+lvmlock LV and the necessary locks on it. Additional steps may be
+required to deal with the missing PV.
.SS locking system failures
@@ -594,54 +591,103 @@ the dlm/corosync recovery process is complete.
.B sanlock lease storage failure
If a host loses access to the device holding a VG's locks, sanlock cannot
-renew the VG's lockspace lease for those locks. After some time, the
-lease will expire, and locks held by the host can be acquired by other
-hosts.
+renew its lease for the VG's locks. After some time, the lease will
+expire, and locks held by the host can be acquired by other hosts.
If no LVs are active in the VG, the lockspace with an expiring lease will
be shut down, and errors will be reported when trying to use the VG. Use
the lvmlockctl \-\-drop command to clear the stale lockspace from
lvmlockd.
-If the VG has active LVs, the LVs must be quickly deactivated before the
-lockspace lease expires. After all LVs are deactivated, run lvmlockctl
-\-\-drop <vgname> to clear the expiring lockspace from lvmlockd. If all
-LVs in the VG are not deactivated within about 40 seconds, sanlock will
-reset the host using the local watchdog. The host reset is ultimately a
-severe form of "deactivating" LVs before they can be activated on other
-hosts. The reset is considered a better alternative than having LVs used
-by multiple hosts at once, which could easily damage or destroy their
-content. A future enhancement may automatically attempt to deactivate LVs
-before the lockspace lease expires.
+If the VG has active LVs when the lock storage is lost, the LVs must be
+quickly deactivated before the lockspace lease expires. After all LVs are
+deactivated, run lvmlockctl \-\-drop <vgname> to clear the expiring
+lockspace from lvmlockd. If all LVs in the VG are not deactivated within
+about 40 seconds, sanlock will reset the host using the local watchdog.
+The machine reset is effectively a severe form of "deactivating" LVs
+before they can be activated on other hosts. The reset is considered a
+better alternative than having LVs used by multiple hosts at once, which
+could easily damage or destroy their content. A future enhancement may
+automatically attempt to forcibly deactivate LVs before the lockspace
+lease expires.
.B sanlock daemon failure
If the sanlock daemon fails or exits while a lockspace is started, the
-local watchdog will reset the host.
+local watchdog will reset the host. This is necessary to protect any
+application resources that depend on sanlock leases which will be lost
+without sanlock running.
.SS changing dlm cluster name
-When a dlm VG is created, the cluster name is saved in the VG metadata for
-the new VG. To use the VG, a host must be in the named cluster. If the
-cluster name is changed, or the VG is moved to a different cluster, the
-cluster name for the dlm VG must be changed. To do this:
+When a dlm VG is created, the cluster name is saved in the VG metadata.
+To use the VG, a host must be in the named dlm cluster. If the dlm
+cluster name changes, or the VG is moved to a new cluster, the dlm cluster
+name saved in the VG must also be changed.
+
+To see the dlm cluster name saved in the VG, use the command:
+.br
+vgs -o+locktype,lockargs <vgname>
+
+To change the dlm cluster name in the VG when the VG is still used by the
+original cluster:
+
+.IP \[bu] 2
+Stop the VG on all hosts:
+.br
+vgchange --lock-stop <vgname>
+
+.IP \[bu] 2
+Change the VG lock type to none:
+.br
+vgchange \-\-lock\-type none <vgname>
+
+.IP \[bu] 2
+Change the dlm cluster name on the host or move the VG to the new cluster.
+The new dlm cluster must now be active on the host. Verify the new name
+by:
+.br
+cat /sys/kernel/config/dlm/cluster/cluster_name
+
+.IP \[bu] 2
+Change the VG lock type back to dlm which sets the new cluster name:
+.br
+vgchange \-\-lock\-type dlm <vgname>
+
+.IP \[bu] 2
+Start the VG on hosts to use it:
+.br
+vgchange --lock-start <vgname>
+
+.P
+
+To change the dlm cluster name in the VG when the dlm cluster name has
+already changed, or the VG has already moved to a different cluster:
-1. Ensure the VG is not being used by any hosts.
+.IP \[bu] 2
+Ensure the VG is not being used by any hosts.
-2. The new cluster must be active on the node making the change.
+.IP \[bu] 2
+The new dlm cluster must be active on the host making the change.
+The current dlm cluster name can be seen by:
.br
- The current dlm cluster name can be seen by:
+cat /sys/kernel/config/dlm/cluster/cluster_name
+
+.IP \[bu] 2
+Change the VG lock type to none:
.br
- cat /sys/kernel/config/dlm/cluster/cluster_name
+vgchange \-\-lock\-type none \-\-force <vgname>
-3. Change the VG lock type to none:
+.IP \[bu] 2
+Change the VG lock type back to dlm which sets the new cluster name:
.br
- vgchange \-\-lock\-type none \-\-force <vgname>
+vgchange \-\-lock\-type dlm <vgname>
-4. Change the VG lock type back to dlm which sets the new cluster name:
+.IP \[bu] 2
+Start the VG on hosts to use it:
.br
- vgchange \-\-lock\-type dlm <vgname>
+vgchange --lock-start <vgname>
.SS changing a local VG to a lockd VG
@@ -654,11 +700,21 @@ Change a local VG to a lockd VG with the command:
.br
vgchange \-\-lock\-type sanlock|dlm <vgname>
-Start the VG on any hosts that need to use it:
+Start the VG on hosts to use it:
.br
vgchange \-\-lock\-start <vgname>
+.SS changing a lockd VG to a local VG
+
+Stop the lockd VG on all hosts, then run:
+.br
+vgchange \-\-lock\-type none <vgname>
+
+To change a VG from one lockd type to another (i.e. between sanlock and
+dlm), first change it to a local VG, then to the new type.
+
+
.SS changing a clvm VG to a lockd VG
All LVs must be inactive to change the lock type.
@@ -669,8 +725,8 @@ change a clvm VG to a local VG with the command:
vgchange \-cn <vgname>
If the clvm cluster is no longer running on any nodes, then extra options
-can be used forcibly make the VG local. Caution: this is only safe if all
-nodes have stopped using the VG:
+can be used to forcibly make the VG local. Caution: this is only safe if
+all nodes have stopped using the VG:
vgchange \-\-config 'global/locking_type=0 global/use_lvmlockd=0'
.RS
@@ -683,8 +739,6 @@ to a lockd VG".
.SS limitations of lockd VGs
-lvmlockd currently requires using lvmetad and lvmpolld.
-
Things that do not yet work in lockd VGs:
.br
\[bu]
@@ -744,6 +798,9 @@ on a remote host. (The activation option 'l' is not used.)
lvmlockd works with thin and cache pools and LVs.
.IP \[bu] 2
+lvmlockd works with lvmetad.
+
+.IP \[bu] 2
lvmlockd saves the cluster name for a lockd VG using dlm. Only hosts in
the matching cluster can use the VG.
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=e5d99cb9e685ba16…
Commit: e5d99cb9e685ba167f10413694e5d9d34fe90bb0
Parent: 3c1924c9c0d45cbce67aa3c1dad70737a6956cc3
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Thu Aug 27 16:34:51 2015 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Thu Aug 27 16:34:51 2015 -0500
lvmlockd: VG lock can be used when changing lock type
This bit was missed from commit de4db6a that added
changing lock_type.
---
tools/vgchange.c | 9 ---------
1 files changed, 0 insertions(+), 9 deletions(-)
diff --git a/tools/vgchange.c b/tools/vgchange.c
index f57fa15..6d86051 100644
--- a/tools/vgchange.c
+++ b/tools/vgchange.c
@@ -1048,15 +1048,6 @@ static int _lockd_vgchange(struct cmd_context *cmd, int argc, char **argv)
cmd->lockd_vg_disable = 1;
/*
- * In most cases, lockd_vg does not apply when changing lock type.
- * (We don't generally allow changing *from* lockd type yet.)
- * lockd_vg could be called within _vgchange_locktype as needed.
- */
-
- if (arg_is_set(cmd, locktype_ARG))
- cmd->lockd_vg_disable = 1;
-
- /*
* Changing system_id or lock_type must only be done on explicitly
* named vgs.
*/
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=3c1924c9c0d45cbc…
Commit: 3c1924c9c0d45cbce67aa3c1dad70737a6956cc3
Parent: e4d5d05119f5df66121134621d564b8c468ed5f1
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Thu Aug 27 16:00:24 2015 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Thu Aug 27 16:00:24 2015 -0500
lvmlockd: fix starting dlm global lockspace
lvmlockd would fail to recognize that the global lockspace
failed to start if the dlm wasn't running, so future attempts
to start the dlm global lockspace would do nothing, thinking
it was already running.
---
daemons/lvmlockd/lvmlockd-core.c | 13 ++++++++-----
daemons/lvmlockd/lvmlockd-dlm.c | 4 ----
daemons/lvmlockd/lvmlockd-internal.h | 2 +-
3 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index c2c3fef..14705b0 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -2403,6 +2403,9 @@ out_act:
ls->drop_vg = drop_vg;
pthread_mutex_unlock(&lockspaces_mutex);
+ if (gl_use_dlm && !strcmp(ls->name, gl_lsname_dlm))
+ dlm_gl_lockspace_running = 0;
+
/* worker_thread will join this thread, and free the ls */
pthread_mutex_lock(&worker_mutex);
worker_wake = 1;
@@ -2584,21 +2587,21 @@ static int add_dlm_global_lockspace(struct action *act)
{
int rv;
- if (gl_running_dlm)
+ if (dlm_gl_lockspace_running)
return -EEXIST;
- gl_running_dlm = 1;
+ dlm_gl_lockspace_running = 1;
/*
* There's a short period after which a previous gl lockspace thread
- * has set gl_running_dlm = 0, but before its ls struct has been
- * deleted, during which this add_lockspace_thread() can fail with
+ * has set dlm_gl_lockspace_running = 0, but before its ls struct has
+ * been deleted, during which this add_lockspace_thread() can fail with
* -EAGAIN.
*/
rv = add_lockspace_thread(gl_lsname_dlm, NULL, NULL, LD_LM_DLM, NULL, act);
if (rv < 0) {
log_error("add_dlm_global_lockspace add_lockspace_thread %d", rv);
- gl_running_dlm = 0;
+ dlm_gl_lockspace_running = 0;
}
return rv;
diff --git a/daemons/lvmlockd/lvmlockd-dlm.c b/daemons/lvmlockd/lvmlockd-dlm.c
index 676c944..6640821 100644
--- a/daemons/lvmlockd/lvmlockd-dlm.c
+++ b/daemons/lvmlockd/lvmlockd-dlm.c
@@ -260,10 +260,6 @@ int lm_rem_lockspace_dlm(struct lockspace *ls, int free_vg)
out:
free(lmd);
ls->lm_data = NULL;
-
- if (!strcmp(ls->name, gl_lsname_dlm))
- gl_running_dlm = 0;
-
return 0;
}
diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h
index efb04f9..9d360ed 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -313,7 +313,7 @@ static inline int list_empty(const struct list_head *head)
* or when disable_gl matches.
*/
-EXTERN int gl_running_dlm;
+EXTERN int dlm_gl_lockspace_running;
EXTERN int gl_type_static;
EXTERN int gl_use_dlm;
EXTERN int gl_use_sanlock;
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=e4d5d05119f5df66…
Commit: e4d5d05119f5df66121134621d564b8c468ed5f1
Parent: e3f1b1dccb4db799ccbb054ad15a825e3a3eccba
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Thu Aug 27 15:23:14 2015 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Thu Aug 27 15:23:14 2015 -0500
lvmlockd: remove list of inactive lockspaces
This was only used to return two flags indicating specific
reasons for a lock failure so that a more specific error
message could be printed by the command (lockspace had been
stopped, or lockspace had an error starting.)
Remove the list, given its limited usefulness, the fact it
would easily become inaccurate, and the fact it was causing
misleading error messages. The error conditions it was meant
to help could be reported differently.
---
daemons/lvmlockd/lvmlockd-core.c | 136 ++--------------------------------
daemons/lvmlockd/lvmlockd-internal.h | 3 -
lib/locking/lvmlockd.c | 78 -------------------
lib/locking/lvmlockd.h | 2 -
4 files changed, 8 insertions(+), 211 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index ac2d224..c2c3fef 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -185,18 +185,9 @@ static int restart_fds[2];
* Each lockspace has its own thread to do locking.
* The lockspace thread makes synchronous lock requests to dlm/sanlock.
* Every vg with a lockd type, i.e. "dlm", "sanlock", should be on this list.
- *
- * lockspaces_inactive holds old ls structs for vgs that have been
- * stopped, or for vgs that failed to start. The old ls structs
- * are removed from the inactive list and freed when a new ls with
- * the same name is started and added to the standard lockspaces list.
- * Keeping this bit of "history" for the ls allows us to return a
- * more informative error message if a vg lock request is made for
- * an ls that has been stopped or failed to start.
*/
static pthread_mutex_t lockspaces_mutex;
static struct list_head lockspaces;
-static struct list_head lockspaces_inactive;
/*
* Client thread reads client requests and writes client results.
@@ -264,7 +255,6 @@ static int alloc_new_structs; /* used for initializing in setup_structs */
static int add_lock_action(struct action *act);
static int str_to_lm(const char *str);
-static int clear_lockspace_inactive(char *name);
static int setup_dump_socket(void);
static void send_dump_buf(int fd, int dump_len);
static int dump_info(int *dump_len);
@@ -737,8 +727,6 @@ static const char *op_str(int x)
return "running_lm";
case LD_OP_FIND_FREE_LOCK:
return "find_free_lock";
- case LD_OP_FORGET_VG_NAME:
- return "forget_vg_name";
case LD_OP_KILL_VG:
return "kill_vg";
case LD_OP_DROP_VG:
@@ -2009,13 +1997,6 @@ static int other_sanlock_vgs_exist(struct lockspace *ls_rem)
{
struct lockspace *ls;
- list_for_each_entry(ls, &lockspaces_inactive, list) {
- if (ls->lm_type != LD_LM_SANLOCK)
- continue;
- log_debug("other sanlock vg exists inactive %s", ls->name);
- return 1;
- }
-
list_for_each_entry(ls, &lockspaces, list) {
if (ls->lm_type != LD_LM_SANLOCK)
continue;
@@ -2422,10 +2403,7 @@ out_act:
ls->drop_vg = drop_vg;
pthread_mutex_unlock(&lockspaces_mutex);
- /*
- * worker_thread will join this thread, and free the
- * ls or move it to lockspaces_inactive.
- */
+ /* worker_thread will join this thread, and free the ls */
pthread_mutex_lock(&worker_mutex);
worker_wake = 1;
pthread_cond_signal(&worker_cond);
@@ -2580,8 +2558,6 @@ static int add_lockspace_thread(const char *ls_name,
if (act)
list_add(&act->list, &ls->actions);
- clear_lockspace_inactive(ls->name);
-
list_add_tail(&ls->list, &lockspaces);
pthread_mutex_unlock(&lockspaces_mutex);
@@ -2826,65 +2802,6 @@ static int count_lockspace_starting(uint32_t client_id)
return count;
}
-/* lockspaces_mutex is held */
-static struct lockspace *find_lockspace_inactive(char *ls_name)
-{
- struct lockspace *ls;
-
- list_for_each_entry(ls, &lockspaces_inactive, list) {
- if (!strcmp(ls->name, ls_name))
- return ls;
- }
-
- return NULL;
-}
-
-/* lockspaces_mutex is held */
-static int clear_lockspace_inactive(char *ls_name)
-{
- struct lockspace *ls;
-
- ls = find_lockspace_inactive(ls_name);
- if (ls) {
- list_del(&ls->list);
- free(ls);
- return 1;
- }
-
- return 0;
-}
-
-static int forget_lockspace_inactive(char *vg_name)
-{
- char ls_name[MAX_NAME+1];
- int found;
-
- memset(ls_name, 0, sizeof(ls_name));
- vg_ls_name(vg_name, ls_name);
-
- log_debug("forget_lockspace_inactive %s", ls_name);
-
- pthread_mutex_lock(&lockspaces_mutex);
- found = clear_lockspace_inactive(ls_name);
- pthread_mutex_unlock(&lockspaces_mutex);
-
- if (found)
- return 0;
- return -ENOENT;
-}
-
-static void free_lockspaces_inactive(void)
-{
- struct lockspace *ls, *safe;
-
- pthread_mutex_lock(&lockspaces_mutex);
- list_for_each_entry_safe(ls, safe, &lockspaces_inactive, list) {
- list_del(&ls->list);
- free(ls);
- }
- pthread_mutex_unlock(&lockspaces_mutex);
-}
-
/*
* Loop through all lockspaces, and:
* - if do_stop is set, stop any that are not stopped
@@ -2958,15 +2875,14 @@ static int for_each_lockspace(int do_stop, int do_free, int do_force)
pthread_join(ls->thread, NULL);
list_del(&ls->list);
+ /* FIXME: will free_vg ever not be set? */
- /* In future we may need to free ls->actions here */
- free_ls_resources(ls);
-
- if (ls->free_vg)
+ if (ls->free_vg) {
+ /* In future we may need to free ls->actions here */
+ free_ls_resources(ls);
free(ls);
- else
- list_add(&ls->list, &lockspaces_inactive);
- free_count++;
+ free_count++;
+ }
} else {
need_free++;
}
@@ -3486,12 +3402,6 @@ static void client_send_result(struct client *cl, struct action *act)
if (act->flags & LD_AF_DUP_GL_LS)
strcat(result_flags, "DUP_GL_LS,");
- if (act->flags & LD_AF_INACTIVE_LS)
- strcat(result_flags, "INACTIVE_LS,");
-
- if (act->flags & LD_AF_ADD_LS_ERROR)
- strcat(result_flags, "ADD_LS_ERROR,");
-
if (act->flags & LD_AF_WARN_GL_REMOVED)
strcat(result_flags, "WARN_GL_REMOVED,");
@@ -3635,19 +3545,8 @@ static int add_lock_action(struct action *act)
pthread_mutex_lock(&lockspaces_mutex);
if (ls_name[0])
ls = find_lockspace_name(ls_name);
+ pthread_mutex_unlock(&lockspaces_mutex);
if (!ls) {
- int ls_inactive = 0;
- int ls_create_fail = 0;
-
- if (ls_name[0])
- ls = find_lockspace_inactive(ls_name);
- if (ls) {
- ls_inactive = 1;
- ls_create_fail = ls->create_fail;
- ls = NULL;
- }
- pthread_mutex_unlock(&lockspaces_mutex);
-
if (act->op == LD_OP_UPDATE && act->rt == LD_RT_VG) {
log_debug("lockspace not found ignored for vg update");
return -ENOLS;
@@ -3676,14 +3575,6 @@ static int add_lock_action(struct action *act)
log_debug("lockspace not found ignored for unlock");
return -ENOLS;
- } else if (act->op == LD_OP_LOCK && act->rt == LD_RT_VG && ls_inactive) {
- /* ls has been stopped or previously failed to start */
- log_debug("lockspace inactive create_fail %d %s",
- ls_create_fail, ls_name);
- act->flags |= LD_AF_INACTIVE_LS;
- if (ls_create_fail)
- act->flags |= LD_AF_ADD_LS_ERROR;
- return -ENOLS;
} else {
log_debug("lockspace not found %s", ls_name);
return -ENOLS;
@@ -3853,11 +3744,6 @@ static int str_to_op_rt(const char *req_name, int *op, int *rt)
*rt = LD_RT_VG;
return 0;
}
- if (!strcmp(req_name, "forget_vg_name")) {
- *op = LD_OP_FORGET_VG_NAME;
- *rt = LD_RT_VG;
- return 0;
- }
if (!strcmp(req_name, "kill_vg")) {
*op = LD_OP_KILL_VG;
*rt = LD_RT_VG;
@@ -4437,10 +4323,6 @@ static void client_recv_action(struct client *cl)
case LD_OP_DROP_VG:
rv = add_lock_action(act);
break;
- case LD_OP_FORGET_VG_NAME:
- act->result = forget_lockspace_inactive(act->vg_name);
- add_client_result(act);
- break;
default:
rv = -EINVAL;
};
@@ -5619,7 +5501,6 @@ static int main_loop(daemon_state *ds_arg)
strcpy(gl_lsname_dlm, S_NAME_GL_DLM);
INIT_LIST_HEAD(&lockspaces);
- INIT_LIST_HEAD(&lockspaces_inactive);
pthread_mutex_init(&lockspaces_mutex, NULL);
pthread_mutex_init(&pollfd_mutex, NULL);
pthread_mutex_init(&log_mutex, NULL);
@@ -5759,7 +5640,6 @@ static int main_loop(daemon_state *ds_arg)
}
for_each_lockspace_retry(DO_STOP, DO_FREE, DO_FORCE);
- free_lockspaces_inactive();
close_worker_thread();
close_client_thread();
closelog();
diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h
index 46ae67f..efb04f9 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -50,7 +50,6 @@ enum {
LD_OP_RENAME_FINAL,
LD_OP_RUNNING_LM,
LD_OP_FIND_FREE_LOCK,
- LD_OP_FORGET_VG_NAME,
LD_OP_KILL_VG,
LD_OP_DROP_VG,
};
@@ -101,8 +100,6 @@ struct client {
#define LD_AF_SEARCH_LS 0x00000200
#define LD_AF_WAIT_STARTING 0x00001000
#define LD_AF_DUP_GL_LS 0x00002000
-#define LD_AF_INACTIVE_LS 0x00004000
-#define LD_AF_ADD_LS_ERROR 0x00008000
#define LD_AF_ADOPT 0x00010000
#define LD_AF_WARN_GL_REMOVED 0x00020000
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index 394657c..f3e2e2c 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -118,12 +118,6 @@ static void _flags_str_to_lockd_flags(const char *flags_str, uint32_t *lockd_fla
if (strstr(flags_str, "DUP_GL_LS"))
*lockd_flags |= LD_RF_DUP_GL_LS;
- if (strstr(flags_str, "INACTIVE_LS"))
- *lockd_flags |= LD_RF_INACTIVE_LS;
-
- if (strstr(flags_str, "ADD_LS_ERROR"))
- *lockd_flags |= LD_RF_ADD_LS_ERROR;
-
if (strstr(flags_str, "WARN_GL_REMOVED"))
*lockd_flags |= LD_RF_WARN_GL_REMOVED;
}
@@ -825,37 +819,6 @@ static int _free_vg_sanlock(struct cmd_context *cmd, struct volume_group *vg)
return ret;
}
-/*
- * Tell lvmlockd to forget about an old VG name.
- * lvmlockd remembers previous lockd VGs so that it can provide more
- * informative error messages (see INACTIVE_LS, ADD_LS_ERROR).
- *
- * If a new local VG is created with the same name as a previous lockd VG,
- * lvmlockd's memory of the previous lockd VG interferes (causes incorrect
- * lockd_vg failures).
- *
- * We could also remove the list of inactive (old) VG names from lvmlockd,
- * and then this function would not be needed, but this would also reduce
- * the ability to have helpful error messages.
- */
-
-static void _forget_vg_name(struct cmd_context *cmd, struct volume_group *vg)
-{
- daemon_reply reply;
-
- if (!_use_lvmlockd)
- return;
- if (!_lvmlockd_connected)
- return;
-
- reply = _lockd_send("forget_vg_name",
- "pid = %d", getpid(),
- "vg_name = %s", vg->name,
- NULL);
-
- daemon_reply_destroy(reply);
-}
-
/* vgcreate */
int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg,
@@ -863,7 +826,6 @@ int lockd_init_vg(struct cmd_context *cmd, struct volume_group *vg,
{
switch (get_lock_type_from_string(lock_type)) {
case LOCK_TYPE_NONE:
- _forget_vg_name(cmd, vg);
return 1;
case LOCK_TYPE_CLVM:
return 1;
@@ -1841,46 +1803,6 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
}
/*
- * An unused/previous lockspace for the VG was found.
- * This means it must be a lockd VG, not local. The
- * lockspace needs to be started to be used.
- */
- if ((result == -ENOLS) && (lockd_flags & LD_RF_INACTIVE_LS)) {
- if (!strcmp(mode, "un")) {
- ret = 1;
- goto out;
- } else if (!strcmp(mode, "sh")) {
- log_warn("VG %s lock skipped: lockspace is inactive", vg_name);
- ret = 1;
- goto out;
- } else {
- log_error("VG %s lock failed: lockspace is inactive", vg_name);
- ret = 0;
- goto out;
- }
- }
-
- /*
- * An unused lockspace for the VG was found. The previous
- * start of the lockspace failed, so we can print a more useful
- * error message.
- */
- if ((result == -ENOLS) && (lockd_flags & LD_RF_ADD_LS_ERROR)) {
- if (!strcmp(mode, "un")) {
- ret = 1;
- goto out;
- } else if (!strcmp(mode, "sh")) {
- log_warn("VG %s lock skipped: lockspace start error", vg_name);
- ret = 1;
- goto out;
- } else {
- log_error("VG %s lock failed: lockspace start error", vg_name);
- ret = 0;
- goto out;
- }
- }
-
- /*
* No lockspace for the VG was found. It may be a local
* VG that lvmlockd doesn't keep track of, or it may be
* a lockd VG that lvmlockd doesn't yet know about (it hasn't
diff --git a/lib/locking/lvmlockd.h b/lib/locking/lvmlockd.h
index 4bec782..51c2905 100644
--- a/lib/locking/lvmlockd.h
+++ b/lib/locking/lvmlockd.h
@@ -28,8 +28,6 @@
#define LD_RF_NO_GL_LS 0x00000002
#define LD_RF_WARN_GL_REMOVED 0x00000004
#define LD_RF_DUP_GL_LS 0x00000008
-#define LD_RF_INACTIVE_LS 0x00000010
-#define LD_RF_ADD_LS_ERROR 0x00000020
/* lockd_state flags */
#define LDST_EX 0x00000001
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=58713d34dd71873a…
Commit: 58713d34dd71873ab97378e96d7ab0b8e9bda8ef
Parent: 32e22a00378192f226932258d9c5b55b073b4528
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Aug 26 14:55:27 2015 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Thu Aug 27 10:27:24 2015 -0500
lvmlockd: detect when dlm lvb is invalidated
The lvb content can be lost during dlm recovery,
and we need to detect when this happens to revalidate.
---
daemons/lvmlockd/lvmlockd-core.c | 13 ++++++++++++-
1 files changed, 12 insertions(+), 1 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index 3d41bd2..ac2d224 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -1057,7 +1057,18 @@ static int res_lock(struct lockspace *ls, struct resource *r, struct action *act
/* lm_lock() reads new r_version */
- if ((r_version > r->version) || (!r->version && !r->version_zero_valid)) {
+ if ((r_version != r->version) || (!r->version && !r->version_zero_valid)) {
+
+ /*
+ * r_version only increases, so if it goes down, it means the
+ * dlm lvb became invalid (happens during recovery if the
+ * resource master leaves).
+ */
+ if (r_version < r->version) {
+ log_debug("S %s R %s res_lock lvb invalid r_version %u prev %u",
+ ls->name, r->name, r_version, r->version);
+ }
+
/*
* New r_version of the lock: means that another
* host has changed data protected by this lock