master - lvextend: refresh shared LV with vgname as arg
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=27cfeb1d39b2515b9c4...
Commit: 27cfeb1d39b2515b9c4b81a1d217e05ae954a68b
Parent: 86b96ede2af32404d0b7b5f516a35dafc4da442e
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Fri Mar 22 15:01:29 2019 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Mar 22 15:01:29 2019 -0500
lvextend: refresh shared LV with vgname as arg
Update the previous commit to leave the vgname as
an arg instead of moving it into the select option,
(the compound select option rule is confusing the
dlm arg processing.)
---
daemons/lvmlockd/lvmlockd-dlm.c | 3 ++-
1 files changed, 2 insertions(+), 1 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-dlm.c b/daemons/lvmlockd/lvmlockd-dlm.c
index 21aaa2d..ee39c3e 100644
--- a/daemons/lvmlockd/lvmlockd-dlm.c
+++ b/daemons/lvmlockd/lvmlockd-dlm.c
@@ -818,9 +818,10 @@ int lm_refresh_lv_start_dlm(struct action *act)
memset(run_uuid, 0, sizeof(run_uuid));
/* todo: add --readonly */
+ /* FIXME: move vgname into the select option once the dlm can handle compound select args */
snprintf(command, DLMC_RUN_COMMAND_LEN,
- "lvm lvchange --refresh --partial --nolocking --select 'lvname=%s && vgname=%s'",
+ "lvm lvchange --refresh --partial --nolocking --select lvname=%s %s",
lvname, vgname);
rv = dlmc_run_start(command, strlen(command), 0,
4 years, 8 months
master - lvextend: refresh shared LV using select option
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=86b96ede2af32404d0b...
Commit: 86b96ede2af32404d0b7b5f516a35dafc4da442e
Parent: 85e68a8333722b7694d607652dd1f834fadfd8c4
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Fri Mar 22 14:28:02 2019 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Fri Mar 22 14:35:02 2019 -0500
lvextend: refresh shared LV using select option
Using --select 'lvname=LV && vgname=VG' avoids the problem
of the lvchange exit code not distinguishing an actual error
result vs the VG or LV not existing. (This is in case there
is an odd dlm/gfs2 setup where some nodes are running the dlm
but do not have access to the VG.)
---
daemons/lvmlockd/lvmlockd-dlm.c | 34 +++++++++++++++++++++++++++++++++-
1 files changed, 33 insertions(+), 1 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-dlm.c b/daemons/lvmlockd/lvmlockd-dlm.c
index 385d533..21aaa2d 100644
--- a/daemons/lvmlockd/lvmlockd-dlm.c
+++ b/daemons/lvmlockd/lvmlockd-dlm.c
@@ -781,15 +781,47 @@ int lm_is_running_dlm(void)
int lm_refresh_lv_start_dlm(struct action *act)
{
+ char path[PATH_MAX];
char command[DLMC_RUN_COMMAND_LEN];
char run_uuid[DLMC_RUN_UUID_LEN];
+ char *p, *vgname, *lvname;
int rv;
+ /* split /dev/vgname/lvname into vgname and lvname strings */
+ strncpy(path, act->path, strlen(act->path));
+
+ /* skip past dev */
+ p = strchr(path + 1, '/');
+
+ /* skip past slashes */
+ while (*p == '/')
+ p++;
+
+ /* start of vgname */
+ vgname = p;
+
+ /* skip past vgname */
+ while (*p != '/')
+ p++;
+
+ /* terminate vgname */
+ *p = '\0';
+ p++;
+
+ /* skip past slashes */
+ while (*p == '/')
+ p++;
+
+ lvname = p;
+
memset(command, 0, sizeof(command));
memset(run_uuid, 0, sizeof(run_uuid));
+ /* todo: add --readonly */
+
snprintf(command, DLMC_RUN_COMMAND_LEN,
- "lvm lvchange --refresh --nolocking %s", act->path);
+ "lvm lvchange --refresh --partial --nolocking --select 'lvname=%s && vgname=%s'",
+ lvname, vgname);
rv = dlmc_run_start(command, strlen(command), 0,
DLMC_FLAG_RUN_START_NODE_NONE,
4 years, 8 months
v2_02_184 annotated tag has been created
by Marian Csontos
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=44fed6a37b24bb27891...
Commit: 44fed6a37b24bb278914e59909cb406b78217bb9
Parent: 0000000000000000000000000000000000000000
Author: Marian Csontos <mcsontos(a)redhat.com>
AuthorDate: 2019-03-22 10:04 +0000
Committer: Marian Csontos <mcsontos(a)redhat.com>
CommitterDate: 2019-03-22 10:04 +0000
annotated tag: v2_02_184 has been created
at 44fed6a37b24bb278914e59909cb406b78217bb9 (tag)
tagging 7cbee7e9cf5445ee5c50898e20536734cbb0f4a6 (commit)
replaces v2_02_183
Release 2.02.184
Bug fix release.
Notable changes:
New scan_lvs option with default setting 0, which filters out logical volumes
from scanning for PV headers.
Fix issue with cleaner policy not working with large cache volumes where
migration threshold was smaller than chunk size.
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.22 (GNU/Linux)
iQIcBAABAgAGBQJclLTzAAoJELkRJDHlCQOf3cYQAJ0c4pnE0WZuXzPv2AGbgPBi
qlEXe7IWA6BcffGb31IvyDTQZhheKhsf4B3ogYatg8naCFZw6grBZZaO8A2XWq9U
rd8TDomna2HBCeFrrYQNLnQTajlOVFrvQXYoQbpk4OkF9qRPVEzWH4LSbG3Qicrt
6mRQt8FiTnjBixw/fynarkDMWy+TDBtucBgTc6L62kuqCqbZMaeooaBBCRpKublZ
RM22U087Ll1ZMPuunj1AYWiQp+qEiHbSPzuaRjhTacWm3ObchrKgPy3rlGqbULte
14UYRorjsjGUJ0uQKCOq46iDtZkQzbBHYacfxdiiO25V8SmV/egJLjtj9Wv+oHz1
e8UPwr1xXmhoxrAeLdsQQlFwyOaxXNlD8fgWIe8/z+ReWRs204i4liMdqnrDa/8M
FhiaExWgD+e+guEefCHrD0DXYuNf6rn9UIdZxyeNvppYM7JxpKtnBr2HeDElR/4R
SvJaTj9/X8ObKhbLpRCNpikPgYvdhDVgPuDV2JE/1fTnoRr0LKtPg+pT3FITRSw0
lqkkXYnwl5te4m9WYLuDtnGTzgslrrl59dH9D39WQG0uayPgmoRHyn2PwEtD4E/v
X9YZplS6LB0VwLDjUftRHkcs+7Dn+/4Xw9eU7zCSCPFWu8Nv6iYfe3nt9BIb8TCA
IbACPxvMAMLov01Um4co
=VcrO
-----END PGP SIGNATURE-----
Alasdair G Kergon (1):
dmsetup: Fix multi-line concise table parsing
David Teigland (13):
lvmlockd: make lockstart wait for existing start
lvmlockd: fix make lockstart wait
apply obtain_device_list_from_udev to all libudev usage
filter: add config setting to skip scanning LVs
config: change scan_lvs default to 0
tests: set scan_lvs=1 in tests that stack PVs on LVs
WHATS_NEW: scan_lvs
tests: add scan_lvs.sh
config: add new setting io_memory_size
io: warn when metadata size approaches io memory size
io: increase the default io memory from 4 to 8 MiB
config: improve scan_lvs description
pvscan: lvmetad init should set updating before scanning
Heinz Mauelshagen (1):
raid: fix (de)activation of RaidLVs with visible SubLVs
Marcos Paulo de Souza (1):
pvscan.service.in: Move StartLimitInterval to Service section
Marian Csontos (5):
post-release
spec: Use python3 setuptools with python3
cov: dmstats check for failing malloc
build: make generate
pre_release
Ming-Hung Tsai (1):
lvmanip: uninitialized members in struct pv_list (#10)
Zdenek Kabelac (15):
cov: dm stats missed terminating null
cov: ensure vars are set
revert "cov: dm stats missed terminating null"
cov: shutdown warning
cov: split check for type assignment
cov: mark warning as expected one
cov: hide intentionaly ptr arithmetic report
cov: remove unused assigns
cleanup: move cast to det_t into MKDEV macro
libdm: add DM_DEVICE_ARM_POLL
libdm: print params only for ioctls using them
libdm: add memory barrier
stats: fix error path when region is NULL
stats: initilize regions to NULL
dm: migration_threshold for old linked tools
4 years, 8 months
stable-2.02 - post-release
by Marian Csontos
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=81d954df4ed844600c4...
Commit: 81d954df4ed844600c44b99ee68453e1e53fcf31
Parent: 7cbee7e9cf5445ee5c50898e20536734cbb0f4a6
Author: Marian Csontos <mcsontos(a)redhat.com>
AuthorDate: Fri Mar 22 11:13:08 2019 +0100
Committer: Marian Csontos <mcsontos(a)redhat.com>
CommitterDate: Fri Mar 22 11:13:08 2019 +0100
post-release
---
VERSION | 2 +-
VERSION_DM | 2 +-
WHATS_NEW | 3 +++
WHATS_NEW_DM | 3 +++
4 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/VERSION b/VERSION
index 37f89d5..b6970e5 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.02.184(2) (2019-03-22)
+2.02.185(2)-git (2019-03-22)
diff --git a/VERSION_DM b/VERSION_DM
index f874e31..57cbc8d 100644
--- a/VERSION_DM
+++ b/VERSION_DM
@@ -1 +1 @@
-1.02.156 (2019-03-22)
+1.02.158-git (2019-03-22)
diff --git a/WHATS_NEW b/WHATS_NEW
index ffdd5aa..1d9032d 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,3 +1,6 @@
+Version 2.02.185 -
+==================================
+
Version 2.02.184 - 22nd March 2019
==================================
Fix (de)activation of RaidLVs with visible SubLVs
diff --git a/WHATS_NEW_DM b/WHATS_NEW_DM
index 4e245fb..8258273 100644
--- a/WHATS_NEW_DM
+++ b/WHATS_NEW_DM
@@ -1,3 +1,6 @@
+Version 1.02.158 -
+==================================
+
Version 1.02.156 - 22nd March 2019
==================================
Ensure migration_threshold for cache is at least 8 chunks.
4 years, 8 months
stable-2.02 - pre_release
by Marian Csontos
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=7cbee7e9cf5445ee5c5...
Commit: 7cbee7e9cf5445ee5c50898e20536734cbb0f4a6
Parent: 717957ddc5bcca529a4fe830e272c678a935e2ee
Author: Marian Csontos <mcsontos(a)redhat.com>
AuthorDate: Fri Mar 22 11:04:15 2019 +0100
Committer: Marian Csontos <mcsontos(a)redhat.com>
CommitterDate: Fri Mar 22 11:04:15 2019 +0100
pre_release
---
VERSION | 2 +-
VERSION_DM | 2 +-
WHATS_NEW | 4 ++--
WHATS_NEW_DM | 4 ++--
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/VERSION b/VERSION
index 9ae261d..37f89d5 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.02.184(2)-git (2018-12-07)
+2.02.184(2) (2019-03-22)
diff --git a/VERSION_DM b/VERSION_DM
index 0ccad6e..f874e31 100644
--- a/VERSION_DM
+++ b/VERSION_DM
@@ -1 +1 @@
-1.02.156-git (2018-12-07)
+1.02.156 (2019-03-22)
diff --git a/WHATS_NEW b/WHATS_NEW
index 0488a50..ffdd5aa 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,5 @@
-Version 2.02.184 -
-=====================================
+Version 2.02.184 - 22nd March 2019
+==================================
Fix (de)activation of RaidLVs with visible SubLVs
Change scan_lvs default to 0 so LVs are not scanned for PVs.
Add scan_lvs config setting to control if lvm scans LVs for PVs.
diff --git a/WHATS_NEW_DM b/WHATS_NEW_DM
index 5d3c9db..4e245fb 100644
--- a/WHATS_NEW_DM
+++ b/WHATS_NEW_DM
@@ -1,5 +1,5 @@
-Version 1.02.156 -
-=====================================
+Version 1.02.156 - 22nd March 2019
+==================================
Ensure migration_threshold for cache is at least 8 chunks.
Enhance ioctl flattening and add parameters only when needed.
Add DM_DEVICE_ARM_POLL for API completness matching kernel.
4 years, 8 months
stable-2.02 - build: make generate
by Marian Csontos
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=717957ddc5bcca529a4...
Commit: 717957ddc5bcca529a4fe830e272c678a935e2ee
Parent: 9b04851fc574ce9cffd30a51d2b750955239f316
Author: Marian Csontos <mcsontos(a)redhat.com>
AuthorDate: Fri Mar 22 11:00:13 2019 +0100
Committer: Marian Csontos <mcsontos(a)redhat.com>
CommitterDate: Fri Mar 22 11:01:04 2019 +0100
build: make generate
---
conf/example.conf.in | 24 ++++++++++++++++++++++++
1 files changed, 24 insertions(+), 0 deletions(-)
diff --git a/conf/example.conf.in b/conf/example.conf.in
index e6f3462..38855e9 100644
--- a/conf/example.conf.in
+++ b/conf/example.conf.in
@@ -185,6 +185,20 @@ devices {
# present on the system. sysfs must be part of the kernel and mounted.)
sysfs_scan = 1
+ # Configuration option devices/scan_lvs.
+ # Scan LVM LVs for layered PVs, allowing LVs to be used as PVs.
+ # When 1, LVM will detect PVs layered on LVs, and caution must be
+ # taken to avoid a host accessing a layered VG that may not belong
+ # to it, e.g. from a guest image. This generally requires excluding
+ # the LVs with device filters. Also, when this setting is enabled,
+ # every LVM command will scan every active LV on the system (unless
+ # filtered), which can cause performance problems on systems with
+ # many active LVs. When this setting is 0, LVM will not detect or
+ # use PVs that exist on LVs, and will not allow a PV to be created on
+ # an LV. The LVs are ignored using a built in device filter that
+ # identifies and excludes LVs.
+ scan_lvs = 0
+
# Configuration option devices/multipath_component_detection.
# Ignore devices that are components of DM multipath devices.
multipath_component_detection = 1
@@ -1113,6 +1127,16 @@ global {
# When enabled, an LVM command that changes PVs, changes VG metadata,
# or changes the activation state of an LV will send a notification.
notify_dbus = 1
+
+ # Configuration option global/io_memory_size.
+ # The amount of memory in KiB that LVM allocates to perform disk io.
+ # LVM performance may benefit from more io memory when there are many
+ # disks or VG metadata is large. Increasing this size may be necessary
+ # when a single copy of VG metadata is larger than the current setting.
+ # This value should usually not be decreased from the default; setting
+ # it too low can result in lvm failing to read VGs.
+ # This configuration option has an automatic default value.
+ # io_memory_size = 8192
}
# Configuration section activation.
4 years, 8 months
stable-2.02 - raid: fix (de)activation of RaidLVs with visible SubLVs
by Marian Csontos
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=9b04851fc574ce9cffd...
Commit: 9b04851fc574ce9cffd30a51d2b750955239f316
Parent: dcf8f3111aea1179be1083ace772f3108b06c508
Author: Heinz Mauelshagen <heinzm(a)redhat.com>
AuthorDate: Wed Oct 31 23:05:08 2018 +0100
Committer: Marian Csontos <mcsontos(a)redhat.com>
CommitterDate: Thu Mar 21 08:05:23 2019 +0100
raid: fix (de)activation of RaidLVs with visible SubLVs
There's a small window during creation of a new RaidLV when
rmeta SubLVs are made visible to wipe them in order to prevent
erroneous discovery of stale RAID metadata. In case a crash
prevents the SubLVs from being committed hidden after such
wiping, the RaidLV can still be activated with the SubLVs visible.
During deactivation though, a deadlock occurs because the visible
SubLVs are deactivated before the RaidLV.
The patch adds _check_raid_sublvs to the raid validation in merge.c,
an activation check to activate.c (paranoid, because the merge.c check
will prevent activation in case of visible SubLVs) and shares the
existing wiping function _clear_lvs in raid_manip.c moved to lv_manip.c
and renamed to activate_and_wipe_lvlist to remove code duplication.
Whilst on it, introduce activate_and_wipe_lv to share with
(lvconvert|lvchange).c.
Resolves: rhbz1633167
(cherry picked from commit dd5716ddf258c4a44819fa90d3356833ccf767b4)
Conflicts:
WHATS_NEW
lib/activate/activate.c
lib/metadata/lv_manip.c
lib/metadata/raid_manip.c
tools/lvchange.c
tools/lvconvert.c
---
WHATS_NEW | 1 +
lib/activate/activate.c | 6 +
lib/metadata/lv_manip.c | 190 ++++++++++++++++++++++++++++----------
lib/metadata/merge.c | 30 ++++++
lib/metadata/metadata-exported.h | 8 ++
lib/metadata/raid_manip.c | 91 ++++--------------
tools/lvchange.c | 32 +------
tools/lvconvert.c | 22 +---
8 files changed, 214 insertions(+), 166 deletions(-)
diff --git a/WHATS_NEW b/WHATS_NEW
index cbb5505..0488a50 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,6 @@
Version 2.02.184 -
=====================================
+ Fix (de)activation of RaidLVs with visible SubLVs
Change scan_lvs default to 0 so LVs are not scanned for PVs.
Add scan_lvs config setting to control if lvm scans LVs for PVs.
Fix missing proper initialization of pv_list struct when adding pv.
diff --git a/lib/activate/activate.c b/lib/activate/activate.c
index 2a85ef9..4c83231 100644
--- a/lib/activate/activate.c
+++ b/lib/activate/activate.c
@@ -2789,6 +2789,12 @@ static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
goto out;
}
+ if (lv_raid_has_visible_sublvs(lv)) {
+ log_error("Refusing activation of RAID LV %s with "
+ "visible SubLVs.", display_lvname(lv));
+ goto out;
+ }
+
if (test_mode()) {
_skip("Activating %s.", display_lvname(lv));
r = 1;
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index d1389e7..d039686 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -3958,6 +3958,25 @@ bad:
return 0;
}
+/* Add all rmeta SubLVs for @seg to @lvs and return allocated @lvl to free by caller. */
+static struct lv_list *_raid_list_metalvs(struct lv_segment *seg, struct dm_list *lvs)
+{
+ uint32_t s;
+ struct lv_list *lvl;
+
+ dm_list_init(lvs);
+
+ if (!(lvl = dm_pool_alloc(seg->lv->vg->vgmem, sizeof(*lvl) * seg->area_count)))
+ return_NULL;
+
+ for (s = 0; s < seg->area_count; s++) {
+ lvl[s].lv = seg_metalv(seg, s);
+ dm_list_add(lvs, &lvl[s].list);
+ }
+
+ return lvl;
+}
+
static int _lv_extend_layered_lv(struct alloc_handle *ah,
struct logical_volume *lv,
uint32_t extents, uint32_t first_area,
@@ -3969,7 +3988,6 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
uint32_t fa, s;
int clear_metadata = 0;
uint32_t area_multiple = 1;
- int fail;
if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
return_0;
@@ -4047,74 +4065,50 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
return_0;
if (clear_metadata) {
+ struct volume_group *vg = lv->vg;
+
/*
* We must clear the metadata areas upon creation.
*/
- /* FIXME VG is not in a fully-consistent state here and should not be committed! */
- if (!vg_write(lv->vg) || !vg_commit(lv->vg))
- return_0;
-
- if (test_mode())
- log_verbose("Test mode: Skipping wiping of metadata areas.");
- else {
- fail = 0;
- /* Activate all rmeta devices locally first (more efficient) */
- for (s = 0; !fail && s < seg->area_count; s++) {
- meta_lv = seg_metalv(seg, s);
-
- if (!activate_lv_local(meta_lv->vg->cmd, meta_lv)) {
- log_error("Failed to activate %s for clearing.",
- display_lvname(meta_lv));
- fail = 1;
- }
- }
-
- /* Clear all rmeta devices */
- for (s = 0; !fail && s < seg->area_count; s++) {
- meta_lv = seg_metalv(seg, s);
- log_verbose("Clearing metadata area of %s.",
- display_lvname(meta_lv));
- /*
- * Rather than wiping meta_lv->size, we can simply
- * wipe '1' to remove the superblock of any previous
- * RAID devices. It is much quicker.
- */
- if (!wipe_lv(meta_lv, (struct wipe_params)
- { .do_zero = 1, .zero_sectors = 1 })) {
- stack;
- fail = 1;
- }
- }
-
- /* Deactivate all rmeta devices */
- for (s = 0; s < seg->area_count; s++) {
- meta_lv = seg_metalv(seg, s);
+ /*
+ * Declare the new RaidLV as temporary to avoid visible SubLV
+ * failures on activation until after we wiped them so that
+ * we can avoid activating crashed, potentially partially
+ * wiped RaidLVs.
+ */
+ lv->status |= LV_ACTIVATION_SKIP;
- if (!deactivate_lv(meta_lv->vg->cmd, meta_lv)) {
- log_error("Failed to deactivate %s after clearing.",
- display_lvname(meta_lv));
- fail = 1;
- }
+ if (test_mode()) {
+ /* FIXME VG is not in a fully-consistent state here and should not be committed! */
+ if (!vg_write(vg) || !vg_commit(vg))
+ return_0;
- /* Wipe any temporary tags required for activation. */
- str_list_wipe(&meta_lv->tags);
- }
+ log_verbose("Test mode: Skipping wiping of metadata areas.");
+ } else {
+ struct dm_list meta_lvs;
+ struct lv_list *lvl;
- if (fail) {
- /* Fail, after trying to deactivate all we could */
- struct volume_group *vg = lv->vg;
+ if (!(lvl = _raid_list_metalvs(seg, &meta_lvs)))
+ return 0;
+ /* Wipe lv list committing metadata */
+ if (!activate_and_wipe_lvlist(&meta_lvs, 1)) {
+ /* If we failed clearing rmeta SubLVs, try removing the new RaidLV */
if (!lv_remove(lv))
log_error("Failed to remove LV");
else if (!vg_write(vg) || !vg_commit(vg))
log_error("Failed to commit VG %s", vg->name);
return_0;
}
+
+ dm_pool_free(vg->vgmem, lvl);
}
for (s = 0; s < seg->area_count; s++)
lv_set_hidden(seg_metalv(seg, s));
+
+ lv->status &= ~LV_ACTIVATION_SKIP;
}
return 1;
@@ -7200,6 +7194,100 @@ out:
return 1;
}
+/*
+ * Optionally makes on-disk metadata changes if @commit
+ *
+ * If LV is active:
+ * wipe any signatures and clear first sector of LVs listed on @lv_list
+ * otherwise:
+ * activate, wipe (as above), deactivate
+ *
+ * Returns: 1 on success, 0 on failure
+ */
+int activate_and_wipe_lvlist(struct dm_list *lv_list, int commit)
+{
+ struct lv_list *lvl;
+ struct volume_group *vg = NULL;
+ unsigned i = 0, sz = dm_list_size(lv_list);
+ char *was_active;
+ int r = 1;
+
+ if (!sz) {
+ log_debug_metadata(INTERNAL_ERROR "Empty list of LVs given for wiping.");
+ return 1;
+ }
+
+ dm_list_iterate_items(lvl, lv_list) {
+ if (!lv_is_visible(lvl->lv)) {
+ log_error(INTERNAL_ERROR
+ "LVs must be set visible before wiping.");
+ return 0;
+ }
+ vg = lvl->lv->vg;
+ }
+
+ if (test_mode())
+ return 1;
+
+ /*
+ * FIXME: only vg_[write|commit] if LVs are not already written
+ * as visible in the LVM metadata (which is never the case yet).
+ */
+ if (commit &&
+ (!vg || !vg_write(vg) || !vg_commit(vg)))
+ return_0;
+
+ was_active = alloca(sz);
+
+ dm_list_iterate_items(lvl, lv_list)
+ if (!(was_active[i++] = lv_is_active(lvl->lv))) {
+ lvl->lv->status |= LV_TEMPORARY;
+ if (!activate_lv(vg->cmd, lvl->lv)) {
+ log_error("Failed to activate localy %s for wiping.",
+ display_lvname(lvl->lv));
+ r = 0;
+ goto out;
+ }
+ lvl->lv->status &= ~LV_TEMPORARY;
+ }
+
+ dm_list_iterate_items(lvl, lv_list) {
+ log_verbose("Wiping metadata area %s.", display_lvname(lvl->lv));
+ /* Wipe any know signatures */
+ if (!wipe_lv(lvl->lv, (struct wipe_params) { .do_wipe_signatures = 1, .do_zero = 1, .zero_sectors = 1 })) {
+ log_error("Failed to wipe %s.", display_lvname(lvl->lv));
+ r = 0;
+ goto out;
+ }
+ }
+out:
+ /* TODO: deactivation is only needed with clustered locking
+ * in normal case we should keep device active
+ */
+ sz = 0;
+ dm_list_iterate_items(lvl, lv_list)
+ if ((i > sz) && !was_active[sz++] &&
+ !deactivate_lv(vg->cmd, lvl->lv)) {
+ log_error("Failed to deactivate %s.", display_lvname(lvl->lv));
+ r = 0; /* Continue deactivating as many as possible. */
+ }
+
+ return r;
+}
+
+/* Wipe logical volume @lv, optionally with @commit of metadata */
+int activate_and_wipe_lv(struct logical_volume *lv, int commit)
+{
+ struct dm_list lv_list;
+ struct lv_list lvl;
+
+ lvl.lv = lv;
+ dm_list_init(&lv_list);
+ dm_list_add(&lv_list, &lvl.list);
+
+ return activate_and_wipe_lvlist(&lv_list, commit);
+}
+
static struct logical_volume *_create_virtual_origin(struct cmd_context *cmd,
struct volume_group *vg,
const char *lv_name,
diff --git a/lib/metadata/merge.c b/lib/metadata/merge.c
index 7299620..a024877 100644
--- a/lib/metadata/merge.c
+++ b/lib/metadata/merge.c
@@ -234,6 +234,30 @@ static void _check_non_raid_seg_members(struct lv_segment *seg, int *error_count
/* .... more members? */
}
+static void _check_raid_sublvs(struct lv_segment *seg, int *error_count)
+{
+ unsigned s;
+
+ for (s = 0; s < seg->area_count; s++) {
+ if (seg_type(seg, s) != AREA_LV)
+ raid_seg_error("no raid image SubLV");
+
+ if ((seg_lv(seg, s)->status & LVM_WRITE) &&
+ !(seg->lv->status & LV_ACTIVATION_SKIP) &&
+ lv_is_visible(seg_lv(seg, s)))
+ raid_seg_error("visible raid image LV");
+
+ if (!seg_is_raid_with_meta(seg) || !seg->meta_areas)
+ continue;
+
+ if (seg_metatype(seg, s) != AREA_LV)
+ raid_seg_error("no raid meta SubLV");
+ else if (!(seg->lv->status & LV_ACTIVATION_SKIP) &&
+ lv_is_visible(seg_metalv(seg, s)))
+ raid_seg_error("visible raid meta LV");
+ }
+}
+
/*
* Check RAID segment struct members of @seg for acceptable
* properties and increment @error_count for any bogus ones.
@@ -287,10 +311,14 @@ static void _check_raid_seg(struct lv_segment *seg, int *error_count)
/* Check for any MetaLV flaws like non-existing ones or size variations */
if (seg->meta_areas)
for (area_len = s = 0; s < seg->area_count; s++) {
+ if (seg_metatype(seg, s) == AREA_UNASSIGNED)
+ continue;
+
if (seg_metatype(seg, s) != AREA_LV) {
raid_seg_error("no MetaLV");
continue;
}
+
if (!lv_is_raid_metadata(seg_metalv(seg, s)))
raid_seg_error("MetaLV without RAID metadata flag");
if (area_len &&
@@ -314,6 +342,8 @@ static void _check_raid_seg(struct lv_segment *seg, int *error_count)
_check_raid45610_seg(seg, error_count);
else
raid_seg_error("bogus RAID segment type");
+
+ _check_raid_sublvs(seg, error_count);
}
/* END: RAID segment property checks. */
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index f4fb112..75caba1 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -784,6 +784,12 @@ struct wipe_params {
/* Zero out LV and/or wipe signatures */
int wipe_lv(struct logical_volume *lv, struct wipe_params params);
+/* Wipe any signatures and zero first sector on @lv */
+int activate_and_wipe_lv(struct logical_volume *lv, int commit);
+
+/* Wipe any signatures and zero first sector of LVs listed on @lv_list */
+int activate_and_wipe_lvlist(struct dm_list *lv_list, int commit);
+
int lv_change_tag(struct logical_volume *lv, const char *tag, int add_tag);
/* Reduce the size of an LV by extents */
@@ -1213,6 +1219,8 @@ int lv_raid_change_region_size(struct logical_volume *lv,
int lv_raid_in_sync(const struct logical_volume *lv);
uint32_t lv_raid_data_copies(const struct segment_type *segtype, uint32_t area_count);
int lv_raid_free_reshape_space(const struct logical_volume *lv);
+int lv_raid_clear_lv(struct logical_volume *lv, int commit);
+int lv_raid_has_visible_sublvs(const struct logical_volume *lv);
/* -- metadata/raid_manip.c */
/* ++ metadata/cache_manip.c */
diff --git a/lib/metadata/raid_manip.c b/lib/metadata/raid_manip.c
index cb7202a..bffae60 100644
--- a/lib/metadata/raid_manip.c
+++ b/lib/metadata/raid_manip.c
@@ -689,86 +689,33 @@ static int _lv_update_and_reload_list(struct logical_volume *lv, int origin_only
return r;
}
-/* Makes on-disk metadata changes
- * If LV is active:
- * clear first block of device
- * otherwise:
- * activate, clear, deactivate
- *
- * Returns: 1 on success, 0 on failure
- */
+/* Wipe all LVs listsed on @lv_list committing lvm metadata */
static int _clear_lvs(struct dm_list *lv_list)
{
- struct lv_list *lvl;
- struct volume_group *vg = NULL;
- unsigned i = 0, sz = dm_list_size(lv_list);
- char *was_active;
- int r = 1;
-
- if (!sz) {
- log_debug_metadata(INTERNAL_ERROR "Empty list of LVs given for clearing.");
- return 1;
- }
-
- dm_list_iterate_items(lvl, lv_list) {
- if (!lv_is_visible(lvl->lv)) {
- log_error(INTERNAL_ERROR
- "LVs must be set visible before clearing.");
- return 0;
- }
- vg = lvl->lv->vg;
- }
-
- if (test_mode())
- return 1;
+ return activate_and_wipe_lvlist(lv_list, 1);
+}
- /*
- * FIXME: only vg_[write|commit] if LVs are not already written
- * as visible in the LVM metadata (which is never the case yet).
- */
- if (!vg || !vg_write(vg) || !vg_commit(vg))
- return_0;
+/* External interface to clear logical volumes on @lv_list */
+int lv_raid_has_visible_sublvs(const struct logical_volume *lv)
+{
+ unsigned s;
+ struct lv_segment *seg = first_seg(lv);
- was_active = alloca(sz);
+ if (!lv_is_raid(lv) || (lv->status & LV_TEMPORARY) || !seg)
+ return 0;
- dm_list_iterate_items(lvl, lv_list)
- if (!(was_active[i++] = lv_is_active_locally(lvl->lv))) {
- lvl->lv->status |= LV_TEMPORARY;
- if (!activate_lv_excl_local(vg->cmd, lvl->lv)) {
- log_error("Failed to activate localy %s for clearing.",
- display_lvname(lvl->lv));
- r = 0;
- goto out;
- }
- lvl->lv->status &= ~LV_TEMPORARY;
- }
+ if (lv_is_raid_image(lv) || lv_is_raid_metadata(lv))
+ return 0;
- dm_list_iterate_items(lvl, lv_list) {
- log_verbose("Clearing metadata area %s.", display_lvname(lvl->lv));
- /*
- * Rather than wiping lv->size, we can simply
- * wipe the first sector to remove the superblock of any previous
- * RAID devices. It is much quicker.
- */
- if (!wipe_lv(lvl->lv, (struct wipe_params) { .do_zero = 1, .zero_sectors = 1 })) {
- log_error("Failed to zero %s.", display_lvname(lvl->lv));
- r = 0;
- goto out;
- }
+ for (s = 0; s < seg->area_count; s++) {
+ if ((seg_lv(seg, s)->status & LVM_WRITE) && /* Split off track changes raid1 leg */
+ lv_is_visible(seg_lv(seg, s)))
+ return 1;
+ if (seg->meta_areas && lv_is_visible(seg_metalv(seg, s)))
+ return 1;
}
-out:
- /* TODO: deactivation is only needed with clustered locking
- * in normal case we should keep device active
- */
- sz = 0;
- dm_list_iterate_items(lvl, lv_list)
- if ((i > sz) && !was_active[sz++] &&
- !deactivate_lv(vg->cmd, lvl->lv)) {
- log_error("Failed to deactivate %s.", display_lvname(lvl->lv));
- r = 0; /* continue deactivating */
- }
- return r;
+ return 0;
}
/* raid0* <-> raid10_near area reorder helper: swap 2 LV segment areas @a1 and @a2 */
diff --git a/tools/lvchange.c b/tools/lvchange.c
index 6144852..4195a80 100644
--- a/tools/lvchange.c
+++ b/tools/lvchange.c
@@ -321,7 +321,6 @@ static int _lvchange_resync(struct cmd_context *cmd, struct logical_volume *lv)
int monitored;
struct lv_segment *seg = first_seg(lv);
struct dm_list device_list;
- struct lv_list *lvl;
dm_list_init(&device_list);
@@ -405,6 +404,7 @@ static int _lvchange_resync(struct cmd_context *cmd, struct logical_volume *lv)
* Now we handle mirrors with log devices
*/
lv->status &= ~LV_NOTSYNCED;
+ lv->status |= LV_ACTIVATION_SKIP;
/* Separate mirror log or metadata devices so we can clear them */
if (!_detach_metadata_devices(seg, &device_list)) {
@@ -423,32 +423,8 @@ static int _lvchange_resync(struct cmd_context *cmd, struct logical_volume *lv)
/* No backup for intermediate metadata, so just unlock memory */
memlock_unlock(lv->vg->cmd);
- dm_list_iterate_items(lvl, &device_list) {
- if (!activate_lv_excl_local(cmd, lvl->lv)) {
- log_error("Unable to activate %s for %s clearing.",
- display_lvname(lvl->lv), (seg_is_raid(seg)) ?
- "metadata area" : "mirror log");
- return 0;
- }
-
- if (!wipe_lv(lvl->lv, (struct wipe_params)
- { .do_zero = 1, .zero_sectors = lvl->lv->size })) {
- log_error("Unable to reset sync status for %s.",
- display_lvname(lv));
- if (!deactivate_lv(cmd, lvl->lv))
- log_error("Failed to deactivate log LV after "
- "wiping failed");
- return 0;
- }
-
- if (!deactivate_lv(cmd, lvl->lv)) {
- log_error("Unable to deactivate %s LV %s "
- "after wiping for resync.",
- (seg_is_raid(seg)) ? "metadata" : "log",
- display_lvname(lvl->lv));
- return 0;
- }
- }
+ if (!activate_and_wipe_lvlist(&device_list, 0))
+ return 0;
/* Wait until devices are away */
if (!sync_local_dev_names(lv->vg->cmd)) {
@@ -464,6 +440,8 @@ static int _lvchange_resync(struct cmd_context *cmd, struct logical_volume *lv)
return 0;
}
+ lv->status &= ~LV_ACTIVATION_SKIP;
+
if (!_vg_write_commit(lv, NULL))
return 0;
diff --git a/tools/lvconvert.c b/tools/lvconvert.c
index 028781f..2b3c3ea 100644
--- a/tools/lvconvert.c
+++ b/tools/lvconvert.c
@@ -2003,19 +2003,9 @@ static int _lvconvert_snapshot(struct cmd_context *cmd,
if (!zero || !(lv->status & LVM_WRITE))
log_warn("WARNING: %s not zeroed.", snap_name);
- else {
- lv->status |= LV_TEMPORARY;
- if (!activate_lv_excl_local(cmd, lv) ||
- !wipe_lv(lv, (struct wipe_params) { .do_zero = 1 })) {
- log_error("Aborting. Failed to wipe snapshot exception store.");
- return 0;
- }
- lv->status &= ~LV_TEMPORARY;
- /* Deactivates cleared metadata LV */
- if (!deactivate_lv(lv->vg->cmd, lv)) {
- log_error("Failed to deactivate zeroed snapshot exception store.");
- return 0;
- }
+ else if (!activate_and_wipe_lv(lv, 0)) {
+ log_error("Aborting. Failed to wipe snapshot exception store.");
+ return 0;
}
if (!archive(lv->vg))
@@ -3176,12 +3166,12 @@ static int _lvconvert_to_pool(struct cmd_context *cmd,
goto_bad;
if (zero_metadata) {
- metadata_lv->status |= LV_TEMPORARY;
- if (!activate_lv_excl_local(cmd, metadata_lv)) {
+ metadata_lv->status |= LV_ACTIVATION_SKIP;
+ if (!activate_lv(cmd, metadata_lv)) {
log_error("Aborting. Failed to activate metadata lv.");
goto bad;
}
- metadata_lv->status &= ~LV_TEMPORARY;
+ metadata_lv->status &= ~LV_ACTIVATION_SKIP;
if (!wipe_lv(metadata_lv, (struct wipe_params) { .do_zero = 1 })) {
log_error("Aborting. Failed to wipe metadata lv.");
4 years, 8 months
master - lvextend: refresh shared LV remotely using dlm/corosync
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=85e68a8333722b7694d...
Commit: 85e68a8333722b7694d607652dd1f834fadfd8c4
Parent: d369de8399e14e82fb1ea45e7977d917411fbc21
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Mar 20 13:20:26 2019 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Thu Mar 21 12:38:20 2019 -0500
lvextend: refresh shared LV remotely using dlm/corosync
When lvextend extends an LV that is active with a shared
lock, use this as a signal that other hosts may also have
the LV active, with gfs2 mounted, and should have the LV
refreshed to reflect the new size. Use the libdlmcontrol
run api, which uses dlm_controld/corosync to run an
lvchange --refresh command on other cluster nodes.
---
configure.ac | 20 ++++
daemons/lvmlockd/Makefile.in | 1 +
daemons/lvmlockd/lvmlockd-core.c | 39 ++++++++
daemons/lvmlockd/lvmlockd-dlm.c | 73 +++++++++++++++
daemons/lvmlockd/lvmlockd-internal.h | 14 +++
lib/commands/toolcontext.h | 1 +
lib/locking/lvmlockd.c | 165 +++++++++++++++++++++++++++++++---
lib/locking/lvmlockd.h | 17 ++++-
lib/metadata/lv_manip.c | 2 +-
lib/metadata/metadata-exported.h | 4 +
tools/lvresize.c | 4 +
11 files changed, 324 insertions(+), 16 deletions(-)
diff --git a/configure.ac b/configure.ac
index 2ece3f9..98a39f6 100644
--- a/configure.ac
+++ b/configure.ac
@@ -42,6 +42,7 @@ case "$host_os" in
BUILD_LVMPOLLD=no
LOCKDSANLOCK=no
LOCKDDLM=no
+ LOCKDDLM_CONTROL=no
ODIRECT=yes
DM_IOCTLS=yes
SELINUX=yes
@@ -917,6 +918,24 @@ if test "$BUILD_LOCKDDLM" = yes; then
fi
################################################################################
+dnl -- Build lvmlockddlmcontrol
+AC_MSG_CHECKING(whether to build lvmlockddlmcontrol)
+AC_ARG_ENABLE(lvmlockd-dlmcontrol,
+ AC_HELP_STRING([--enable-lvmlockd-dlmcontrol],
+ [enable lvmlockd remote refresh using libdlmcontrol]),
+ LOCKDDLM_CONTROL=$enableval)
+AC_MSG_RESULT($LOCKDDLM_CONTROL)
+
+BUILD_LOCKDDLM_CONTROL=$LOCKDDLM_CONTROL
+
+dnl -- Look for libdlmcontrol libraries
+if test "$BUILD_LOCKDDLM_CONTROL" = yes; then
+ PKG_CHECK_MODULES(LOCKD_DLM_CONTROL, libdlmcontrol >= 3.2, [HAVE_LOCKD_DLM_CONTROL=yes], $bailout)
+ AC_DEFINE([LOCKDDLM_CONTROL_SUPPORT], 1, [Define to 1 to include code that uses lvmlockd dlm control option.])
+ BUILD_LVMLOCKD=yes
+fi
+
+################################################################################
dnl -- Build lvmlockd
AC_MSG_CHECKING(whether to build lvmlockd)
AC_MSG_RESULT($BUILD_LVMLOCKD)
@@ -1642,6 +1661,7 @@ AC_SUBST(BUILD_LVMPOLLD)
AC_SUBST(BUILD_LVMLOCKD)
AC_SUBST(BUILD_LOCKDSANLOCK)
AC_SUBST(BUILD_LOCKDDLM)
+AC_SUBST(BUILD_LOCKDDLM_CONTROL)
AC_SUBST(BUILD_DMFILEMAPD)
AC_SUBST(CACHE)
AC_SUBST(CFLAGS)
diff --git a/daemons/lvmlockd/Makefile.in b/daemons/lvmlockd/Makefile.in
index 3ca4167..dca05b8 100644
--- a/daemons/lvmlockd/Makefile.in
+++ b/daemons/lvmlockd/Makefile.in
@@ -27,6 +27,7 @@ endif
ifeq ("@BUILD_LOCKDDLM@", "yes")
SOURCES += lvmlockd-dlm.c
LOCK_LIBS += -ldlm_lt
+ LOCK_LIBS += -ldlmcontrol
endif
SOURCES2 = lvmlockctl.c
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index da3de54..ab413fb 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -502,6 +502,10 @@ static struct lock *alloc_lock(void)
static void free_action(struct action *act)
{
+ if (act->path) {
+ free(act->path);
+ act->path = NULL;
+ }
pthread_mutex_lock(&unused_struct_mutex);
if (unused_action_count >= MAX_UNUSED_ACTION) {
free(act);
@@ -739,6 +743,8 @@ static const char *op_str(int x)
return "dump_info";
case LD_OP_BUSY:
return "busy";
+ case LD_OP_REFRESH_LV:
+ return "refresh_lv";
default:
return "op_unknown";
};
@@ -3421,6 +3427,15 @@ static void *worker_thread_main(void *arg_in)
else
list_add(&act->list, &delayed_list);
+ } else if (act->op == LD_OP_REFRESH_LV) {
+ log_debug("work refresh_lv %s %s", act->lv_uuid, act->path);
+ rv = lm_refresh_lv_start_dlm(act);
+ if (rv < 0) {
+ act->result = rv;
+ add_client_result(act);
+ } else
+ list_add(&act->list, &delayed_list);
+
} else {
log_error("work unknown op %d", act->op);
act->result = -EINVAL;
@@ -3456,6 +3471,19 @@ static void *worker_thread_main(void *arg_in)
act->result = 0;
add_client_result(act);
}
+
+ } else if (act->op == LD_OP_REFRESH_LV) {
+ log_debug("work delayed refresh_lv");
+ rv = lm_refresh_lv_check_dlm(act);
+ if (!rv) {
+ list_del(&act->list);
+ act->result = 0;
+ add_client_result(act);
+ } else if ((rv < 0) && (rv != -EAGAIN)) {
+ list_del(&act->list);
+ act->result = rv;
+ add_client_result(act);
+ }
}
}
@@ -4061,6 +4089,11 @@ static int str_to_op_rt(const char *req_name, int *op, int *rt)
*rt = LD_RT_VG;
return 0;
}
+ if (!strcmp(req_name, "refresh_lv")) {
+ *op = LD_OP_REFRESH_LV;
+ *rt = 0;
+ return 0;
+ }
out:
return -1;
}
@@ -4422,6 +4455,7 @@ static void client_recv_action(struct client *cl)
const char *vg_name;
const char *vg_uuid;
const char *vg_sysid;
+ const char *path;
const char *str;
int64_t val;
uint32_t opts = 0;
@@ -4508,6 +4542,7 @@ static void client_recv_action(struct client *cl)
opts = str_to_opts(str);
str = daemon_request_str(req, "vg_lock_type", NULL);
lm = str_to_lm(str);
+ path = daemon_request_str(req, "path", NULL);
if (cl_pid && cl_pid != cl->pid)
log_error("client recv bad message pid %d client %d", cl_pid, cl->pid);
@@ -4540,6 +4575,9 @@ static void client_recv_action(struct client *cl)
act->flags = opts;
act->lm_type = lm;
+ if (path)
+ act->path = strdup(path);
+
if (vg_name && strcmp(vg_name, "none"))
strncpy(act->vg_name, vg_name, MAX_NAME);
@@ -4616,6 +4654,7 @@ static void client_recv_action(struct client *cl)
case LD_OP_STOP_ALL:
case LD_OP_RENAME_FINAL:
case LD_OP_RUNNING_LM:
+ case LD_OP_REFRESH_LV:
add_work_action(act);
rv = 0;
break;
diff --git a/daemons/lvmlockd/lvmlockd-dlm.c b/daemons/lvmlockd/lvmlockd-dlm.c
index e73be51..385d533 100644
--- a/daemons/lvmlockd/lvmlockd-dlm.c
+++ b/daemons/lvmlockd/lvmlockd-dlm.c
@@ -24,6 +24,7 @@
* link with non-threaded version of library, libdlm_lt.
*/
#include "libdlm.h"
+#include "libdlmcontrol.h"
#include <stddef.h>
#include <poll.h>
@@ -776,3 +777,75 @@ int lm_is_running_dlm(void)
return 1;
}
+#ifdef LOCKDDLM_CONTROL_SUPPORT
+
+int lm_refresh_lv_start_dlm(struct action *act)
+{
+ char command[DLMC_RUN_COMMAND_LEN];
+ char run_uuid[DLMC_RUN_UUID_LEN];
+ int rv;
+
+ memset(command, 0, sizeof(command));
+ memset(run_uuid, 0, sizeof(run_uuid));
+
+ snprintf(command, DLMC_RUN_COMMAND_LEN,
+ "lvm lvchange --refresh --nolocking %s", act->path);
+
+ rv = dlmc_run_start(command, strlen(command), 0,
+ DLMC_FLAG_RUN_START_NODE_NONE,
+ run_uuid);
+ if (rv < 0) {
+ log_debug("refresh_lv run_start error %d", rv);
+ return rv;
+ }
+
+ log_debug("refresh_lv run_start %s", run_uuid);
+
+ /* Bit of a hack here, we don't need path once started,
+ but we do need to save the run_uuid somewhere, so just
+ replace the path with the uuid. */
+
+ free(act->path);
+ act->path = strdup(run_uuid);
+ return 0;
+}
+
+int lm_refresh_lv_check_dlm(struct action *act)
+{
+ uint32_t check_status = 0;
+ int rv;
+
+ /* NB act->path was replaced with run_uuid */
+
+ rv = dlmc_run_check(act->path, strlen(act->path), 0,
+ DLMC_FLAG_RUN_CHECK_CLEAR,
+ &check_status);
+ if (rv < 0) {
+ log_debug("refresh_lv check error %d", rv);
+ return rv;
+ }
+
+ log_debug("refresh_lv check %s status %x", act->path, check_status);
+
+ if (!(check_status & DLMC_RUN_STATUS_DONE))
+ return -EAGAIN;
+
+ if (check_status & DLMC_RUN_STATUS_FAILED)
+ return -1;
+
+ return 0;
+}
+
+#else /* LOCKDDLM_CONTROL_SUPPORT */
+
+int lm_refresh_lv_start_dlm(struct action *act)
+{
+ return 0;
+}
+
+int lm_refresh_lv_check_dlm(struct action *act)
+{
+ return 0;
+}
+
+#endif /* LOCKDDLM_CONTROL_SUPPORT */
diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h
index 50015f1..85e8caf 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -54,6 +54,7 @@ enum {
LD_OP_DROP_VG,
LD_OP_BUSY,
LD_OP_QUERY_LOCK,
+ LD_OP_REFRESH_LV,
};
/* resource types */
@@ -129,6 +130,7 @@ struct action {
int max_retries;
int result;
int lm_rv; /* return value from lm_ function */
+ char *path;
char vg_uuid[64];
char vg_name[MAX_NAME+1];
char lv_name[MAX_NAME+1];
@@ -391,6 +393,8 @@ int lm_get_lockspaces_dlm(struct list_head *ls_rejoin);
int lm_data_size_dlm(void);
int lm_is_running_dlm(void);
int lm_hosts_dlm(struct lockspace *ls, int notify);
+int lm_refresh_lv_start_dlm(struct action *act);
+int lm_refresh_lv_check_dlm(struct action *act);
static inline int lm_support_dlm(void)
{
@@ -467,6 +471,16 @@ static inline int lm_hosts_dlm(struct lockspace *ls, int notify)
return 0;
}
+static inline int lm_refresh_lv_start_dlm(struct action *act)
+{
+ return 0;
+}
+
+static inline int lm_refresh_lv_check_dlm(struct action *act)
+{
+ return 0;
+}
+
#endif /* dlm support */
#ifdef LOCKDSANLOCK_SUPPORT
diff --git a/lib/commands/toolcontext.h b/lib/commands/toolcontext.h
index 959c153..2ce243e 100644
--- a/lib/commands/toolcontext.h
+++ b/lib/commands/toolcontext.h
@@ -160,6 +160,7 @@ struct cmd_context {
unsigned lockd_vg_default_sh:1;
unsigned lockd_vg_enforce_sh:1;
unsigned lockd_lv_sh:1;
+ unsigned lockd_lv_sh_for_ex:1;
unsigned vg_notify:1;
unsigned lv_notify:1;
unsigned pv_notify:1;
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index bc6e66f..60e0617 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -17,6 +17,8 @@
#include "lib/cache/lvmcache.h"
#include "daemons/lvmlockd/lvmlockd-client.h"
+#include <mntent.h>
+
static daemon_handle _lvmlockd;
static const char *_lvmlockd_socket = NULL;
static int _use_lvmlockd = 0; /* is 1 if command is configured to use lvmlockd */
@@ -2120,22 +2122,17 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
* and using --lockopt skiplv to skip the incompat ex
* lock, then check if an existing sh lock exists.
*/
-
- if (!strcmp(cmd->name, "lvextend") ||
- !strcmp(cmd->name, "lvresize") ||
- !strcmp(cmd->name, "lvchange") ||
- !strcmp(cmd->name, "lvconvert")) {
+ if (!strcmp(cmd->name, "lvextend") || !strcmp(cmd->name, "lvresize") ||
+ !strcmp(cmd->name, "lvchange") || !strcmp(cmd->name, "lvconvert")) {
int ex = 0, sh = 0;
if (!_query_lock_lv(cmd, vg, lv_name, lv_uuid, lock_args, &ex, &sh))
return 1;
-
if (sh) {
log_warn("WARNING: shared LV may require refresh on other hosts where it is active.");
return 1;
}
}
-
return 1;
}
@@ -2209,15 +2206,10 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
* sh LV lock.
*/
- /*
- * Special case to allow lvextend under gfs2.
- *
- * FIXME: verify the LV actually holds gfs2/ocfs2 which we know
- * allow this (other users of the LV may not.)
- */
if (lockd_flags & LD_RF_SH_EXISTS) {
- if (flags & LDLV_EXTEND) {
+ if (flags & LDLV_SH_EXISTS_OK) {
log_warn("WARNING: extending LV with a shared lock, other hosts may require LV refresh.");
+ cmd->lockd_lv_sh_for_ex = 1;
return 1;
}
}
@@ -2399,6 +2391,110 @@ int lockd_lv(struct cmd_context *cmd, struct logical_volume *lv,
lv->lock_args, def_mode, flags);
}
+/*
+ * Check if the LV being resized is used by gfs2/ocfs2 which we
+ * know allow resizing under a shared lock.
+ */
+static int _shared_fs_can_resize(struct logical_volume *lv)
+{
+ FILE *f = NULL;
+ struct mntent *m;
+ int ret = 0;
+
+ if (!(f = setmntent("/etc/mtab", "r")))
+ return 0;
+
+ while ((m = getmntent(f))) {
+ if (!strcmp(m->mnt_type, "gfs2") || !strcmp(m->mnt_type, "ocfs2")) {
+ /* FIXME: check if this mntent is for lv */
+ ret = 1;
+ break;
+ }
+ }
+ endmntent(f);
+ return ret;
+}
+
+/*
+ * A special lockd_lv function is used for lvresize so that details can
+ * be saved for doing cluster "refresh" at the end of the command.
+ */
+
+int lockd_lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
+ const char *def_mode, uint32_t flags,
+ struct lvresize_params *lp)
+{
+ char lv_uuid[64] __attribute__((aligned(8)));
+ char path[PATH_MAX];
+ int shupdate = (lp->lockopt && strstr(lp->lockopt, "shupdate"));
+ int norefresh = (lp->lockopt && strstr(lp->lockopt, "norefresh"));
+ int rv;
+
+ if (!vg_is_shared(lv->vg))
+ return 1;
+
+ if (!_use_lvmlockd) {
+ log_error("LV in VG %s with lock_type %s requires lvmlockd.",
+ lv->vg->name, lv->vg->lock_type);
+ return 0;
+ }
+
+ if (!_lvmlockd_connected)
+ return 0;
+
+ /*
+ * A special case for gfs2 where we want to allow lvextend
+ * of an LV that has an existing shared lock, which is normally
+ * incompatible with the ex lock required by lvextend.
+ *
+ * Check if gfs2 or ocfs2 is mounted on the LV, and enable this
+ * SH_EXISTS_OK flag if so. Other users of the LV may not want
+ * to allow this. --lockopt shupdate allows the shared lock in
+ * place of ex even we don't detect gfs2/ocfs2.
+ */
+ if (lp->resize == LV_EXTEND) {
+ if (shupdate || _shared_fs_can_resize(lv))
+ flags |= LDLV_SH_EXISTS_OK;
+ }
+
+ rv = lockd_lv(cmd, lv, def_mode, flags);
+
+ if (norefresh)
+ return rv;
+
+ /*
+ * If lockd_lv found an existing sh lock in lvmlockd and
+ * used that in place of the usual ex lock (we allowed this
+ * with SH_EXISTS_OK), then it sets this flag.
+ *
+ * We use this as a signal that we should try to refresh
+ * the LV on remote nodes through dlm/corosync at the end
+ * of the command.
+ *
+ * If lockd_lv sucessfully acquired the LV lock ex (did not
+ * need to make use of SH_EXISTS_OK), then we know the LV
+ * is active here only (or not active anywhere) and we
+ * don't need to do any remote refresh.
+ *
+ * lvresize --lockopt norefresh disables the remote refresh.
+ */
+ if (cmd->lockd_lv_sh_for_ex) {
+ if (!id_write_format(&lv->lvid.id[1], lv_uuid, sizeof(lv_uuid)))
+ return 0;
+ if (dm_snprintf(path, sizeof(path), "%s/%s/%s",
+ cmd->dev_dir, lv->vg->name, lv->name) < 0) {
+ log_error("LV path too long for lvmlockd refresh.");
+ return 0;
+ }
+
+ /* These will be used at the end of lvresize to do lockd_lv_refresh */
+ lp->lockd_lv_refresh_path = dm_pool_strdup(cmd->mem, path);
+ lp->lockd_lv_refresh_uuid = dm_pool_strdup(cmd->mem, lv_uuid);
+ }
+
+ return rv;
+}
+
static int _init_lv_sanlock(struct cmd_context *cmd, struct volume_group *vg,
const char *lv_name, struct id *lv_id,
const char **lock_args_ret)
@@ -2915,3 +3011,44 @@ int lockd_lv_uses_lock(struct logical_volume *lv)
return 1;
}
+
+/*
+ * send lvmlockd a request to use libdlmcontrol dlmc_run_start/dlmc_run_check
+ * to run a command on all nodes running dlm_controld:
+ * lvm lvchange --refresh --nolocking <path>
+ */
+
+int lockd_lv_refresh(struct cmd_context *cmd, struct lvresize_params *lp)
+{
+ daemon_reply reply;
+ char *lv_uuid = lp->lockd_lv_refresh_uuid;
+ char *path = lp->lockd_lv_refresh_path;
+ int result;
+
+ if (!lv_uuid || !path)
+ return 1;
+
+ log_warn("Refreshing LV %s on other hosts...", path);
+
+ reply = _lockd_send("refresh_lv",
+ "pid = " FMTd64, (int64_t) getpid(),
+ "opts = %s", "none",
+ "lv_uuid = %s", lv_uuid,
+ "path = %s", path,
+ NULL);
+
+ if (!_lockd_result(reply, &result, NULL)) {
+ /* No result from lvmlockd, it is probably not running. */
+ log_error("LV refresh failed for LV %s", path);
+ return 0;
+ }
+ daemon_reply_destroy(reply);
+
+ if (result < 0) {
+ log_error("Failed to refresh LV on all hosts.");
+ log_error("Manual lvchange --refresh required on all hosts for %s.", path);
+ return 0;
+ }
+ return 1;
+}
+
diff --git a/lib/locking/lvmlockd.h b/lib/locking/lvmlockd.h
index 53d077e..ecd39bb 100644
--- a/lib/locking/lvmlockd.h
+++ b/lib/locking/lvmlockd.h
@@ -22,7 +22,7 @@
/* lockd_lv flags */
#define LDLV_MODE_NO_SH 0x00000001
#define LDLV_PERSISTENT 0x00000002
-#define LDLV_EXTEND 0x00000004
+#define LDLV_SH_EXISTS_OK 0x00000004
/* lvmlockd result flags */
#define LD_RF_NO_LOCKSPACES 0x00000001
@@ -82,6 +82,8 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
const char *lock_args, const char *def_mode, uint32_t flags);
int lockd_lv(struct cmd_context *cmd, struct logical_volume *lv,
const char *def_mode, uint32_t flags);
+int lockd_lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
+ const char *def_mode, uint32_t flags, struct lvresize_params *lp);
/* lvcreate/lvremove use init/free */
@@ -98,6 +100,8 @@ int handle_sanlock_lv(struct cmd_context *cmd, struct volume_group *vg);
int lockd_lv_uses_lock(struct logical_volume *lv);
+int lockd_lv_refresh(struct cmd_context *cmd, struct lvresize_params *lp);
+
#else /* LVMLOCKD_SUPPORT */
static inline void lvmlockd_set_socket(const char *sock)
@@ -208,6 +212,12 @@ static inline int lockd_lv(struct cmd_context *cmd, struct logical_volume *lv,
return 1;
}
+static inline int lockd_lv_resize(struct cmd_context *cmd, struct logical_volume *lv,
+ const char *def_mode, uint32_t flags, struct lvresize_params *lp)
+{
+ return 0;
+}
+
static inline int lockd_init_lv(struct cmd_context *cmd, struct volume_group *vg,
struct logical_volume *lv, struct lvcreate_params *lp)
{
@@ -242,6 +252,11 @@ static inline int lockd_lv_uses_lock(struct logical_volume *lv)
return 0;
}
+static inline int lockd_lv_refresh(struct cmd_context *cmd, struct lvresize_params *lp)
+{
+ return 0;
+}
+
#endif /* LVMLOCKD_SUPPORT */
#endif /* _LVMLOCKD_H */
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index c21a0f9..cc391b6 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -5762,7 +5762,7 @@ int lv_resize(struct logical_volume *lv,
* If the LV is locked from activation, this lock call is a no-op.
* Otherwise, this acquires a transient lock on the lv (not PERSISTENT).
*/
- if (!lockd_lv(cmd, lock_lv, "ex", (lp->resize == LV_EXTEND) ? LDLV_EXTEND : 0))
+ if (!lockd_lv_resize(cmd, lock_lv, "ex", 0, lp))
return_0;
if (!archive(vg))
diff --git a/lib/metadata/metadata-exported.h b/lib/metadata/metadata-exported.h
index df217db..0683d5f 100644
--- a/lib/metadata/metadata-exported.h
+++ b/lib/metadata/metadata-exported.h
@@ -667,6 +667,10 @@ struct lvresize_params {
int approx_alloc;
int extents_are_pes; /* Is 'extents' counting PEs or LEs? */
int size_changed; /* Was there actually a size change */
+
+ const char *lockopt;
+ char *lockd_lv_refresh_path; /* set during resize to use for refresh at the end */
+ char *lockd_lv_refresh_uuid; /* set during resize to use for refresh at the end */
};
void pvcreate_params_set_defaults(struct pvcreate_params *pp);
diff --git a/tools/lvresize.c b/tools/lvresize.c
index 9b061ac..0c72a81 100644
--- a/tools/lvresize.c
+++ b/tools/lvresize.c
@@ -152,6 +152,7 @@ static int _lvresize_params(struct cmd_context *cmd, int argc, char **argv,
lp->nofsck = arg_is_set(cmd, nofsck_ARG);
lp->nosync = arg_is_set(cmd, nosync_ARG);
lp->resizefs = arg_is_set(cmd, resizefs_ARG);
+ lp->lockopt = arg_str_value(cmd, lockopt_ARG, NULL);
return 1;
}
@@ -205,5 +206,8 @@ int lvresize(struct cmd_context *cmd, int argc, char **argv)
destroy_processing_handle(cmd, handle);
+ if (lp.lockd_lv_refresh_path && !lockd_lv_refresh(cmd, &lp))
+ ret = ECMD_FAILED;
+
return ret;
}
4 years, 8 months
master - lvextend: allow on LV active with a shared lock
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=d369de8399e14e82fb1...
Commit: d369de8399e14e82fb1ea45e7977d917411fbc21
Parent: 9b4926aaff7f8644c8492cd68ab0b7079416ef3a
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Tue Mar 19 14:38:38 2019 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Thu Mar 21 12:38:20 2019 -0500
lvextend: allow on LV active with a shared lock
Detect when a shared lock exists, don't require the
normal exclusive lock, and allow the lvextend.
---
daemons/lvmlockd/lvmlockd-core.c | 5 ++++-
daemons/lvmlockd/lvmlockd-internal.h | 1 +
lib/locking/lvmlockd.c | 17 +++++++++++++++++
lib/locking/lvmlockd.h | 2 ++
lib/metadata/lv_manip.c | 2 +-
5 files changed, 25 insertions(+), 2 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index 40a2f21..da3de54 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -1816,9 +1816,9 @@ static void res_process(struct lockspace *ls, struct resource *r,
add_client_result(act);
} else {
/* persistent lock is sh, transient request is ex */
- /* FIXME: can we remove this case? do a convert here? */
log_debug("res_process %s existing persistent lock new transient", r->name);
r->last_client_id = act->client_id;
+ act->flags |= LD_AF_SH_EXISTS;
act->result = -EEXIST;
list_del(&act->list);
add_client_result(act);
@@ -3661,6 +3661,9 @@ static int client_send_result(struct client *cl, struct action *act)
if ((act->flags & LD_AF_WARN_GL_REMOVED) || gl_vg_removed)
strcat(result_flags, "WARN_GL_REMOVED,");
+ if (act->flags & LD_AF_SH_EXISTS)
+ strcat(result_flags, "SH_EXISTS,");
+
if (act->op == LD_OP_INIT) {
/*
* init is a special case where lock args need
diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h
index f0fa85f..50015f1 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -106,6 +106,7 @@ struct client {
#define LD_AF_WARN_GL_REMOVED 0x00020000
#define LD_AF_LV_LOCK 0x00040000
#define LD_AF_LV_UNLOCK 0x00080000
+#define LD_AF_SH_EXISTS 0x00100000
/*
* Number of times to repeat a lock request after
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index a143689..bc6e66f 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -118,6 +118,9 @@ static void _flags_str_to_lockd_flags(const char *flags_str, uint32_t *lockd_fla
if (strstr(flags_str, "WARN_GL_REMOVED"))
*lockd_flags |= LD_RF_WARN_GL_REMOVED;
+
+ if (strstr(flags_str, "SH_EXISTS"))
+ *lockd_flags |= LD_RF_SH_EXISTS;
}
/*
@@ -2205,6 +2208,20 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
* LV with an ex LV lock when the LV is already active with a
* sh LV lock.
*/
+
+ /*
+ * Special case to allow lvextend under gfs2.
+ *
+ * FIXME: verify the LV actually holds gfs2/ocfs2 which we know
+ * allow this (other users of the LV may not.)
+ */
+ if (lockd_flags & LD_RF_SH_EXISTS) {
+ if (flags & LDLV_EXTEND) {
+ log_warn("WARNING: extending LV with a shared lock, other hosts may require LV refresh.");
+ return 1;
+ }
+ }
+
log_error("LV is already locked with incompatible mode: %s/%s", vg->name, lv_name);
return 0;
}
diff --git a/lib/locking/lvmlockd.h b/lib/locking/lvmlockd.h
index e5ae331..53d077e 100644
--- a/lib/locking/lvmlockd.h
+++ b/lib/locking/lvmlockd.h
@@ -22,6 +22,7 @@
/* lockd_lv flags */
#define LDLV_MODE_NO_SH 0x00000001
#define LDLV_PERSISTENT 0x00000002
+#define LDLV_EXTEND 0x00000004
/* lvmlockd result flags */
#define LD_RF_NO_LOCKSPACES 0x00000001
@@ -29,6 +30,7 @@
#define LD_RF_WARN_GL_REMOVED 0x00000004
#define LD_RF_DUP_GL_LS 0x00000008
#define LD_RF_NO_LM 0x00000010
+#define LD_RF_SH_EXISTS 0x00000020
/* lockd_state flags */
#define LDST_EX 0x00000001
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index e128336..c21a0f9 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -5762,7 +5762,7 @@ int lv_resize(struct logical_volume *lv,
* If the LV is locked from activation, this lock call is a no-op.
* Otherwise, this acquires a transient lock on the lv (not PERSISTENT).
*/
- if (!lockd_lv(cmd, lock_lv, "ex", 0))
+ if (!lockd_lv(cmd, lock_lv, "ex", (lp->resize == LV_EXTEND) ? LDLV_EXTEND : 0))
return_0;
if (!archive(vg))
4 years, 8 months
master - warn about changes to an active lv with shared lock
by David Teigland
Gitweb: https://sourceware.org/git/?p=lvm2.git;a=commitdiff;h=9b4926aaff7f8644c84...
Commit: 9b4926aaff7f8644c8492cd68ab0b7079416ef3a
Parent: 7f757ab6166074dd3286983c05a30623095845ff
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Thu Mar 7 11:20:41 2019 -0600
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Thu Mar 21 12:38:20 2019 -0500
warn about changes to an active lv with shared lock
When an LV is active with a shared lock, a command can be
run to change the LV with --lockopt skiplv (to override the
exclusive lock the command ordinarily requires which is not
compatible with the outstanding shared lock.)
In this case, other commands may have the LV active and may
need to refresh the LV, so print warning stating this.
---
daemons/lvmlockd/lvmlockd-core.c | 41 ++++++++++++++++
daemons/lvmlockd/lvmlockd-internal.h | 1 +
lib/locking/lvmlockd.c | 84 ++++++++++++++++++++++++++++++++-
3 files changed, 123 insertions(+), 3 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index 60ae537..40a2f21 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -725,6 +725,8 @@ static const char *op_str(int x)
return "rename_final";
case LD_OP_RUNNING_LM:
return "running_lm";
+ case LD_OP_QUERY_LOCK:
+ return "query_lock";
case LD_OP_FIND_FREE_LOCK:
return "find_free_lock";
case LD_OP_KILL_VG:
@@ -2196,6 +2198,7 @@ static int process_op_during_kill(struct action *act)
case LD_OP_UPDATE:
case LD_OP_RENAME_BEFORE:
case LD_OP_RENAME_FINAL:
+ case LD_OP_QUERY_LOCK:
case LD_OP_FIND_FREE_LOCK:
return 0;
};
@@ -2420,6 +2423,19 @@ static void *lockspace_thread_main(void *arg_in)
break;
}
+ if (act->op == LD_OP_QUERY_LOCK) {
+ r = find_resource_act(ls, act, 0);
+ if (!r)
+ act->result = -ENOENT;
+ else {
+ act->result = 0;
+ act->mode = r->mode;
+ }
+ list_del(&act->list);
+ add_client_result(act);
+ continue;
+ }
+
if (act->op == LD_OP_FIND_FREE_LOCK && act->rt == LD_RT_VG) {
uint64_t free_offset = 0;
int sector_size = 0;
@@ -3673,6 +3689,20 @@ static int client_send_result(struct client *cl, struct action *act)
"result_flags = %s", result_flags[0] ? result_flags : "none",
NULL);
+ } else if (act->op == LD_OP_QUERY_LOCK) {
+
+ log_debug("send %s[%d] cl %u %s %s rv %d mode %d",
+ cl->name[0] ? cl->name : "client", cl->pid, cl->id,
+ op_str(act->op), rt_str(act->rt),
+ act->result, act->mode);
+
+ res = daemon_reply_simple("OK",
+ "op = " FMTd64, (int64_t)act->op,
+ "op_result = " FMTd64, (int64_t) act->result,
+ "lock_type = %s", lm_str(act->lm_type),
+ "mode = %s", mode_str(act->mode),
+ NULL);
+
} else if (act->op == LD_OP_DUMP_LOG || act->op == LD_OP_DUMP_INFO) {
/*
* lvmlockctl creates the unix socket then asks us to write to it.
@@ -4003,6 +4033,16 @@ static int str_to_op_rt(const char *req_name, int *op, int *rt)
*rt = 0;
return 0;
}
+ if (!strcmp(req_name, "query_lock_vg")) {
+ *op = LD_OP_QUERY_LOCK;
+ *rt = LD_RT_VG;
+ return 0;
+ }
+ if (!strcmp(req_name, "query_lock_lv")) {
+ *op = LD_OP_QUERY_LOCK;
+ *rt = LD_RT_LV;
+ return 0;
+ }
if (!strcmp(req_name, "find_free_lock")) {
*op = LD_OP_FIND_FREE_LOCK;
*rt = LD_RT_VG;
@@ -4582,6 +4622,7 @@ static void client_recv_action(struct client *cl)
case LD_OP_DISABLE:
case LD_OP_FREE:
case LD_OP_RENAME_BEFORE:
+ case LD_OP_QUERY_LOCK:
case LD_OP_FIND_FREE_LOCK:
case LD_OP_KILL_VG:
case LD_OP_DROP_VG:
diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h
index 04645fa..f0fa85f 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -53,6 +53,7 @@ enum {
LD_OP_KILL_VG,
LD_OP_DROP_VG,
LD_OP_BUSY,
+ LD_OP_QUERY_LOCK,
};
/* resource types */
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index 5ecdc64..a143689 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -2031,6 +2031,59 @@ int lockd_vg_update(struct volume_group *vg)
return ret;
}
+static int _query_lock_lv(struct cmd_context *cmd, struct volume_group *vg,
+ const char *lv_name, char *lv_uuid,
+ const char *lock_args, int *ex, int *sh)
+{
+ daemon_reply reply;
+ const char *opts = NULL;
+ const char *reply_str;
+ int result;
+ int ret;
+
+ log_debug("lockd query LV %s/%s", vg->name, lv_name);
+
+ reply = _lockd_send("query_lock_lv",
+ "pid = " FMTd64, (int64_t) getpid(),
+ "opts = %s", opts ?: "none",
+ "vg_name = %s", vg->name,
+ "lv_name = %s", lv_name,
+ "lv_uuid = %s", lv_uuid,
+ "vg_lock_type = %s", vg->lock_type,
+ "vg_lock_args = %s", vg->lock_args,
+ "lv_lock_args = %s", lock_args ?: "none",
+ NULL);
+
+ if (!_lockd_result(reply, &result, NULL)) {
+ /* No result from lvmlockd, it is probably not running. */
+ log_error("Lock query failed for LV %s/%s", vg->name, lv_name);
+ return 0;
+ } else {
+ ret = (result < 0) ? 0 : 1;
+ }
+
+ if (!ret)
+ log_error("query_lock_lv lvmlockd result %d", result);
+
+ if (!(reply_str = daemon_reply_str(reply, "mode", NULL))) {
+ log_error("query_lock_lv mode not returned");
+ ret = 0;
+ }
+
+ if (reply_str && !strcmp(reply_str, "ex"))
+ *ex = 1;
+ else if (reply_str && !strcmp(reply_str, "sh"))
+ *sh = 1;
+
+ daemon_reply_destroy(reply);
+
+ /* The lv was not active/locked. */
+ if (result == -ENOENT)
+ return 1;
+
+ return 1;
+}
+
/*
* When this is called directly (as opposed to being called from
* lockd_lv), the caller knows that the LV has a lock.
@@ -2055,6 +2108,34 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
return 0;
}
+ if (!id_write_format(lv_id, lv_uuid, sizeof(lv_uuid)))
+ return_0;
+
+ if (cmd->lockd_lv_disable && !strcmp(vg->lock_type, "dlm")) {
+ /*
+ * If the command is updating an LV with a shared lock,
+ * and using --lockopt skiplv to skip the incompat ex
+ * lock, then check if an existing sh lock exists.
+ */
+
+ if (!strcmp(cmd->name, "lvextend") ||
+ !strcmp(cmd->name, "lvresize") ||
+ !strcmp(cmd->name, "lvchange") ||
+ !strcmp(cmd->name, "lvconvert")) {
+ int ex = 0, sh = 0;
+
+ if (!_query_lock_lv(cmd, vg, lv_name, lv_uuid, lock_args, &ex, &sh))
+ return 1;
+
+ if (sh) {
+ log_warn("WARNING: shared LV may require refresh on other hosts where it is active.");
+ return 1;
+ }
+ }
+
+ return 1;
+ }
+
if (cmd->lockd_lv_disable)
return 1;
@@ -2063,9 +2144,6 @@ int lockd_lv_name(struct cmd_context *cmd, struct volume_group *vg,
if (!_lvmlockd_connected)
return 0;
- if (!id_write_format(lv_id, lv_uuid, sizeof(lv_uuid)))
- return_0;
-
/*
* For lvchange/vgchange activation, def_mode is "sh" or "ex"
* according to the specific -a{e,s}y mode designation.
4 years, 8 months