Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=32e22a00378192f2…
Commit: 32e22a00378192f226932258d9c5b55b073b4528
Parent: 231b7df6cc80ffb1a93db6f948670d9c37b169ea
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Aug 26 14:06:39 2015 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Thu Aug 27 10:27:24 2015 -0500
lvmlockd: rescan lockd VG in two new cases
Previously, a command would only rescan a lockd VG
when lvmetad returned the "vg_invalid" flag indicating
that the cached copy was invalid (which is done by
lvmlockd.) This is still the only usual reason for
rescanning a lockd VG, but two new special cases are
added where we also do the rescan:
. When the --shared option is used to display lockd VGs
from hosts not using lvmlockd. This is the same case
as using --foreign to display foreign VGs, but --shared
was missing the corresponding bits to rescan the VGs.
. When a lockd VG is allowed to be read for displaying
after failing to acquire the lock from lvmlockd. In
this case, the usual mechanism for validating the
cache is missed, so assume the cache would have been
invalidated. (This had been a previous todo item
that was lost during other cleanup.)
These were long-standing todos that were lost track of.
---
lib/cache/lvmetad.c | 45 ++++++++++++++++++++++++++++++++++++++++++-
lib/commands/toolcontext.h | 1 +
lib/locking/lvmlockd.c | 35 ++++++++++++++++++++++++++++++++-
3 files changed, 77 insertions(+), 4 deletions(-)
diff --git a/lib/cache/lvmetad.c b/lib/cache/lvmetad.c
index 856b30f..b2e2f55 100644
--- a/lib/cache/lvmetad.c
+++ b/lib/cache/lvmetad.c
@@ -434,6 +434,7 @@ struct volume_group *lvmetad_vg_lookup(struct cmd_context *cmd, const char *vgna
struct format_type *fmt;
struct dm_config_node *pvcn;
struct pv_list *pvl;
+ int rescan = 0;
if (!lvmetad_active())
return NULL;
@@ -493,15 +494,55 @@ struct volume_group *lvmetad_vg_lookup(struct cmd_context *cmd, const char *vgna
goto_out;
/*
+ * Read the VG from disk, ignoring the lvmetad copy in these
+ * cases:
+ *
+ * 1. The host is not using lvmlockd, but is reading lockd VGs
+ * using the --shared option. The shared option is meant to
+ * let hosts not running lvmlockd look at lockd VGs, like the
+ * foreign option allows hosts to look at foreign VGs. When
+ * --foreign is used, the code forces a rescan since the local
+ * lvmetad cache of foreign VGs is likely stale. Similarly,
+ * for --shared, have the code reading the shared VGs below
+ * not use the cached copy from lvmetad but to rescan the VG.
+ *
+ * 2. The host failed to acquire the VG lock from lvmlockd for
+ * the lockd VG. In this case, the usual mechanisms for
+ * updating the lvmetad copy of the VG have been missed. Since
+ * we don't know if the cached copy is valid, assume it's not.
+ *
+ * 3. lvmetad has returned the "vg_invalid" flag, which is the
+ * usual mechanism used by lvmlockd/lvmetad to cause a host to
+ * reread a VG from disk that has been modified from another
+ * host.
+ */
+
+ if (is_lockd_type(vg->lock_type) && cmd->include_shared_vgs) {
+ log_debug_lvmetad("Rescan VG %s because including shared", vgname);
+ rescan = 1;
+ } else if (is_lockd_type(vg->lock_type) && cmd->lockd_vg_rescan) {
+ log_debug_lvmetad("Rescan VG %s because no lvmlockd lock is held", vgname);
+ rescan = 1;
+ } else if (dm_config_find_node(reply.cft->root, "vg_invalid")) {
+ log_debug_lvmetad("Rescan VG %s because lvmetad returned invalid", vgname);
+ rescan = 1;
+ }
+
+ /*
* locking may have detected a newer vg version and
* invalidated the cached vg.
*/
- if (dm_config_find_node(reply.cft->root, "vg_invalid")) {
+ if (rescan) {
log_debug_lvmetad("Update invalid lvmetad cache for VG %s", vgname);
vg2 = lvmetad_pvscan_vg(cmd, vg);
release_vg(vg);
vg = vg2;
- fid = vg->fid;
+ if (!vg) {
+ log_debug_lvmetad("VG %s from lvmetad not found during rescan.", vgname);
+ fid = NULL;
+ goto out;
+ } else
+ fid = vg->fid;
}
dm_list_iterate_items(pvl, &vg->pvs) {
diff --git a/lib/commands/toolcontext.h b/lib/commands/toolcontext.h
index 0440163..cd55348 100644
--- a/lib/commands/toolcontext.h
+++ b/lib/commands/toolcontext.h
@@ -134,6 +134,7 @@ struct cmd_context {
unsigned lockd_vg_disable:1;
unsigned lockd_lv_disable:1;
unsigned lockd_gl_removed:1;
+ unsigned lockd_vg_rescan:1;
unsigned lockd_vg_default_sh:1;
unsigned lockd_vg_enforce_sh:1;
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index e647c96..45159d2 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -1323,6 +1323,9 @@ int lockd_gl_create(struct cmd_context *cmd, const char *def_mode, const char *v
return 0;
}
+ /* --shared with vgcreate does not mean include_shared_vgs */
+ cmd->include_shared_vgs = 0;
+
lvmetad_validate_global_cache(cmd, 1);
return 1;
@@ -1631,8 +1634,31 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
int result;
int ret;
+ /*
+ * The result of the VG lock request is saved in lockd_state to be
+ * passed into vg_read where the lock result is needed once we
+ * know if this is a local VG or lockd VG.
+ */
*lockd_state = 0;
+ /*
+ * Use of lockd_vg_rescan.
+ *
+ * This is the VG equivalent of using lvmetad_validate_global_cache()
+ * for the global lock (after failing to acquire the global lock). If
+ * we fail to acquire the VG lock from lvmlockd, then the lvmlockd
+ * mechanism has been missed that would have updated the cached lvmetad
+ * copy of the VG. So, set lockd_vg_rescan to tell the VG reading code
+ * to treat the lvmetad copy as if the invalid flag had been returned.
+ * i.e. If a lockd VG is read without a lock, ignore the lvmetad copy
+ * and read it from disk since we don't know if the cache is stale.
+ *
+ * Because lvmlockd requests return an error for local VGs, this will
+ * be set for local VGs, but it ends up being ignored once the VG is
+ * read and found to be a local VG.
+ */
+ cmd->lockd_vg_rescan = 0;
+
if (!is_real_vg(vg_name))
return 1;
@@ -1703,6 +1729,7 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
*/
if (!_use_lvmlockd) {
*lockd_state |= LDST_FAIL_REQUEST;
+ cmd->lockd_vg_rescan = 1;
return 1;
}
@@ -1719,6 +1746,7 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
* this error for local VGs, but we do care for lockd VGs.
*/
*lockd_state |= LDST_FAIL_REQUEST;
+ cmd->lockd_vg_rescan = 1;
return 1;
}
@@ -1737,12 +1765,15 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
break;
case -ENOLS:
*lockd_state |= LDST_FAIL_NOLS;
+ cmd->lockd_vg_rescan = 1;
break;
case -ESTARTING:
*lockd_state |= LDST_FAIL_STARTING;
+ cmd->lockd_vg_rescan = 1;
break;
default:
*lockd_state |= LDST_FAIL_OTHER;
+ cmd->lockd_vg_rescan = 1;
}
/*
@@ -1758,8 +1789,8 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
* since a sanlock VG must be stopped everywhere before it's removed.
*/
if (result == -EREMOVED) {
- log_error("VG %s lock is removed", vg_name);
- ret = 0;
+ log_error("VG %s lock failed: removed", vg_name);
+ ret = 1;
goto out;
}
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=231b7df6cc80ffb1…
Commit: 231b7df6cc80ffb1a93db6f948670d9c37b169ea
Parent: 521136181b6c6cfde4c07d5aecf1dcc5e0ab9594
Author: David Teigland <teigland(a)redhat.com>
AuthorDate: Wed Aug 26 10:01:05 2015 -0500
Committer: David Teigland <teigland(a)redhat.com>
CommitterDate: Thu Aug 27 10:27:24 2015 -0500
lvmlockd: improve VG removal for lock_type dlm
This makes lvmlockd removal steps for dlm VGs closely match
sanlock VGs. Because dlm lockspaces are not required to be
stopped on all hosts before vgremove, there is an extra bit
for dlm lockspaces, where a flag is set in the VG lock lvb
indicating that the VG was removed. If other hosts happen
to use the VG lock they will see this flag and stop their
lockspace.
---
daemons/lvmlockd/lvmlockd-client.h | 1 +
daemons/lvmlockd/lvmlockd-core.c | 7 +++-
daemons/lvmlockd/lvmlockd-dlm.c | 25 ++++++++++++----
daemons/lvmlockd/lvmlockd-internal.h | 4 ++
lib/locking/lvmlockd.c | 51 ++++++++++++++++++++++++---------
5 files changed, 66 insertions(+), 22 deletions(-)
diff --git a/daemons/lvmlockd/lvmlockd-client.h b/daemons/lvmlockd/lvmlockd-client.h
index 0a1424f..67fcbe3 100644
--- a/daemons/lvmlockd/lvmlockd-client.h
+++ b/daemons/lvmlockd/lvmlockd-client.h
@@ -47,5 +47,6 @@ static inline void lvmlockd_close(daemon_handle h)
#define ELOCKD 216
#define EVGKILLED 217 /* sanlock lost access to leases and VG is killed. */
#define ELOCKIO 218 /* sanlock io errors during lock op, may be transient. */
+#define EREMOVED 219
#endif /* _LVM_LVMLOCKD_CLIENT_H */
diff --git a/daemons/lvmlockd/lvmlockd-core.c b/daemons/lvmlockd/lvmlockd-core.c
index b99cb0b..3d41bd2 100644
--- a/daemons/lvmlockd/lvmlockd-core.c
+++ b/daemons/lvmlockd/lvmlockd-core.c
@@ -1763,7 +1763,7 @@ static void res_process(struct lockspace *ls, struct resource *r,
list_del(&act->list);
add_client_result(act);
}
- if (rv == -EUNATCH)
+ if (rv == -EUNATCH || rv == -EREMOVED)
goto r_free;
}
}
@@ -1796,7 +1796,7 @@ static void res_process(struct lockspace *ls, struct resource *r,
list_del(&act->list);
add_client_result(act);
}
- if (rv == -EUNATCH)
+ if (rv == -EUNATCH || rv == -EREMOVED)
goto r_free;
break;
}
@@ -1817,6 +1817,9 @@ r_free:
lm_rem_resource(ls, r);
list_del(&r->list);
free_resource(r);
+
+ if (rv == -EREMOVED)
+ ls->thread_stop = 1;
}
#define LOCKS_EXIST_ANY 1
diff --git a/daemons/lvmlockd/lvmlockd-dlm.c b/daemons/lvmlockd/lvmlockd-dlm.c
index e242685..676c944 100644
--- a/daemons/lvmlockd/lvmlockd-dlm.c
+++ b/daemons/lvmlockd/lvmlockd-dlm.c
@@ -443,6 +443,7 @@ int lm_lock_dlm(struct lockspace *ls, struct resource *r, int ld_mode,
struct val_blk vb;
uint32_t flags = 0;
uint16_t vb_version;
+ uint16_t vb_flags;
int mode;
int rv;
@@ -522,6 +523,7 @@ lockrv:
memcpy(&vb, lksb->sb_lvbptr, sizeof(struct val_blk));
vb_version = le16_to_cpu(vb.version);
+ vb_flags = le16_to_cpu(vb.flags);
if (vb_version && ((vb_version & 0xFF00) > (VAL_BLK_VERSION & 0xFF00))) {
log_error("S %s R %s lock_dlm ignore vb_version %x",
@@ -536,8 +538,14 @@ lockrv:
*r_version = le32_to_cpu(vb.r_version);
memcpy(rdd->vb, &vb, sizeof(vb)); /* rdd->vb saved as le */
- log_debug("S %s R %s lock_dlm get r_version %u",
- ls->name, r->name, *r_version);
+ log_debug("S %s R %s lock_dlm get r_version %u flags %x",
+ ls->name, r->name, *r_version, vb_flags);
+
+ if (vb_flags & VBF_REMOVED) {
+ log_debug("S %s R %s lock_dlm VG has been removed",
+ ls->name, r->name);
+ return -EREMOVED;
+ }
}
out:
return 0;
@@ -593,7 +601,7 @@ int lm_convert_dlm(struct lockspace *ls, struct resource *r,
}
int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
- uint32_t r_version, uint32_t lmuf_flags)
+ uint32_t r_version, uint32_t lmu_flags)
{
struct lm_dlm *lmd = (struct lm_dlm *)ls->lm_data;
struct rd_dlm *rdd = (struct rd_dlm *)r->lm_data;
@@ -602,7 +610,7 @@ int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
int rv;
log_debug("S %s R %s unlock_dlm r_version %u flags %x",
- ls->name, r->name, r_version, lmuf_flags);
+ ls->name, r->name, r_version, lmu_flags);
/*
* Do not set PERSISTENT, because we don't need an orphan
@@ -611,12 +619,17 @@ int lm_unlock_dlm(struct lockspace *ls, struct resource *r,
flags |= LKF_CONVERT;
- if (rdd->vb && r_version && (r->mode == LD_LK_EX)) {
+ if (rdd->vb && (r->mode == LD_LK_EX)) {
if (!rdd->vb->version) {
/* first time vb has been written */
rdd->vb->version = cpu_to_le16(VAL_BLK_VERSION);
}
- rdd->vb->r_version = cpu_to_le32(r_version);
+ if (r_version)
+ rdd->vb->r_version = cpu_to_le32(r_version);
+
+ if ((lmu_flags & LMUF_FREE_VG) && (r->type == LD_RT_VG))
+ rdd->vb->flags = cpu_to_le16(VBF_REMOVED);
+
memcpy(lksb->sb_lvbptr, rdd->vb, sizeof(struct val_blk));
log_debug("S %s R %s unlock_dlm set r_version %u",
diff --git a/daemons/lvmlockd/lvmlockd-internal.h b/daemons/lvmlockd/lvmlockd-internal.h
index 8e0582b..46ae67f 100644
--- a/daemons/lvmlockd/lvmlockd-internal.h
+++ b/daemons/lvmlockd/lvmlockd-internal.h
@@ -194,8 +194,12 @@ struct lockspace {
struct list_head resources; /* resource/lock state for gl/vg/lv */
};
+/* val_blk version */
#define VAL_BLK_VERSION 0x0101
+/* val_blk flags */
+#define VBF_REMOVED 0x0001
+
struct val_blk {
uint16_t version;
uint16_t flags;
diff --git a/lib/locking/lvmlockd.c b/lib/locking/lvmlockd.c
index 3f73cf3..e647c96 100644
--- a/lib/locking/lvmlockd.c
+++ b/lib/locking/lvmlockd.c
@@ -694,7 +694,8 @@ out:
static int _free_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
{
- uint32_t lockd_flags;
+ daemon_reply reply;
+ uint32_t lockd_flags = 0;
int result;
int ret;
@@ -704,23 +705,31 @@ static int _free_vg_dlm(struct cmd_context *cmd, struct volume_group *vg)
return 0;
/*
- * Unlocking the vg lock here preempts the lvmlockd unlock in
- * toollib.c which happens too late since the lockspace is
- * left here.
+ * For the dlm, free_vg means unlock the ex VG lock,
+ * and include an indication in the lvb that the VG
+ * has been removed. Then, leave the lockspace.
+ * If another host tries to acquire the VG lock, it
+ * will see that the VG has been removed by looking
+ * at the lvb value.
*/
- /* Equivalent to a standard unlock. */
- ret = _lockd_request(cmd, "lock_vg",
- vg->name, NULL, NULL, NULL, NULL, NULL, "un", NULL,
- &result, &lockd_flags);
+ reply = _lockd_send("free_vg",
+ "pid = %d", getpid(),
+ "vg_name = %s", vg->name,
+ "vg_lock_type = %s", vg->lock_type,
+ "vg_lock_args = %s", vg->lock_args,
+ NULL);
- if (!ret || result < 0) {
- log_error("_free_vg_dlm lvmlockd result %d", result);
- return 0;
+ if (!_lockd_result(reply, &result, &lockd_flags)) {
+ ret = 0;
+ } else {
+ ret = (result < 0) ? 0 : 1;
}
- /* Leave the dlm lockspace. */
- lockd_stop_vg(cmd, vg);
+ if (!ret)
+ log_error("_free_vg_dlm lvmlockd result %d", result);
+
+ daemon_reply_destroy(reply);
return 1;
}
@@ -893,7 +902,11 @@ static int _lockd_all_lvs(struct cmd_context *cmd, struct volume_group *vg)
int lockd_free_vg_before(struct cmd_context *cmd, struct volume_group *vg,
int changing)
{
- /* Check that no LVs are active on other hosts. */
+ /*
+ * Check that no LVs are active on other hosts.
+ * When removing (not changing), each LV is locked
+ * when it is removed, they do not need checking here.
+ */
if (changing && !_lockd_all_lvs(cmd, vg)) {
log_error("Cannot change VG %s with active LVs", vg->name);
return 0;
@@ -1741,6 +1754,16 @@ int lockd_vg(struct cmd_context *cmd, const char *vg_name, const char *def_mode,
}
/*
+ * The VG has been removed. This will only happen with a dlm VG
+ * since a sanlock VG must be stopped everywhere before it's removed.
+ */
+ if (result == -EREMOVED) {
+ log_error("VG %s lock is removed", vg_name);
+ ret = 0;
+ goto out;
+ }
+
+ /*
* The lockspace for the VG is starting (the VG must not
* be local), and is not yet ready to do locking. Allow
* reading without a sh lock during this period.
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=a37fd93fbb51eed8…
Commit: a37fd93fbb51eed8b102b373f2338f6d2802ae5f
Parent: 8740b7cb77699dbb78e80ed4e8bd6c742626558e
Author: Alasdair G Kergon <agk(a)redhat.com>
AuthorDate: Wed Aug 26 23:11:13 2015 +0100
Committer: Alasdair G Kergon <agk(a)redhat.com>
CommitterDate: Wed Aug 26 23:11:13 2015 +0100
pre-release
---
VERSION | 2 +-
VERSION_DM | 2 +-
WHATS_NEW | 2 +-
WHATS_NEW_DM | 2 +-
conf/example.conf.in | 1661 +++++++++++++++++++++++++++----------------------
conf/lvmlocal.conf.in | 35 +-
6 files changed, 938 insertions(+), 766 deletions(-)
diff --git a/VERSION b/VERSION
index 256a299..6b45e2d 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.02.129(2)-git (2015-08-17)
+2.02.129(2)-git (2015-08-26)
diff --git a/VERSION_DM b/VERSION_DM
index 84f6bdb..d481fb6 100644
--- a/VERSION_DM
+++ b/VERSION_DM
@@ -1 +1 @@
-1.02.106-git (2015-08-17)
+1.02.106-git (2015-08-26)
diff --git a/WHATS_NEW b/WHATS_NEW
index cac567f..42179b9 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,4 +1,4 @@
-Version 2.02.129 -
+Version 2.02.129 - 26th August 2015
===================================
Drop error message when vgdisplay encounters an exported VG. (2.02.27)
Fix shared library generation to stop exporting internal functions.(2.02.120)
diff --git a/WHATS_NEW_DM b/WHATS_NEW_DM
index 28bc933..8dc5076 100644
--- a/WHATS_NEW_DM
+++ b/WHATS_NEW_DM
@@ -1,4 +1,4 @@
-Version 1.02.106 -
+Version 1.02.106 - 26th August 2015
===================================
Add 'precise' column to statistics reports.
Add --precise switch to 'dmstats create' to request nanosecond counters.
diff --git a/conf/example.conf.in b/conf/example.conf.in
index 014aa1a..c0afcb7 100644
--- a/conf/example.conf.in
+++ b/conf/example.conf.in
@@ -28,11 +28,11 @@ config {
# Configuration option config/checks.
# If enabled, any LVM configuration mismatch is reported.
- # This implies checking that the configuration key is understood
- # by LVM and that the value of the key is the proper type.
- # If disabled, any configuration mismatch is ignored and the default
- # value is used without any warning (a message about the
- # configuration key not being found is issued in verbose mode only).
+ # This implies checking that the configuration key is understood by
+ # LVM and that the value of the key is the proper type. If disabled,
+ # any configuration mismatch is ignored and the default value is used
+ # without any warning (a message about the configuration key not being
+ # found is issued in verbose mode only).
checks = 1
# Configuration option config/abort_on_errors.
@@ -61,102 +61,103 @@ devices {
# Configuration option devices/obtain_device_list_from_udev.
# Obtain the list of available devices from udev.
- # This avoids opening or using any inapplicable non-block
- # devices or subdirectories found in the udev directory.
- # Any device node or symlink not managed by udev in the udev
- # directory is ignored. This setting applies only to the
- # udev-managed device directory; other directories will be
- # scanned fully. LVM needs to be compiled with udev support
- # for this setting to apply.
+ # This avoids opening or using any inapplicable non-block devices or
+ # subdirectories found in the udev directory. Any device node or
+ # symlink not managed by udev in the udev directory is ignored. This
+ # setting applies only to the udev-managed device directory; other
+ # directories will be scanned fully. LVM needs to be compiled with
+ # udev support for this setting to apply.
obtain_device_list_from_udev = 1
# Configuration option devices/external_device_info_source.
# Select an external device information source.
- # Some information may already be available in the system and
- # LVM can use this information to determine the exact type
- # or use of devices it processes. Using an existing external
- # device information source can speed up device processing
- # as LVM does not need to run its own native routines to acquire
- # this information. For example, this information is used to
- # drive LVM filtering like MD component detection, multipath
+ # Some information may already be available in the system and LVM can
+ # use this information to determine the exact type or use of devices it
+ # processes. Using an existing external device information source can
+ # speed up device processing as LVM does not need to run its own native
+ # routines to acquire this information. For example, this information
+ # is used to drive LVM filtering like MD component detection, multipath
# component detection, partition detection and others.
- # Possible options are: none, udev.
- # none - No external device information source is used.
- # udev - Reuse existing udev database records. Applicable
- # only if LVM is compiled with udev support.
+ #
+ # Accepted values:
+ # none
+ # No external device information source is used.
+ # udev
+ # Reuse existing udev database records. Applicable only if LVM is
+ # compiled with udev support.
+ #
external_device_info_source = "none"
# Configuration option devices/preferred_names.
# Select which path name to display for a block device.
- # If multiple path names exist for a block device,
- # and LVM needs to display a name for the device,
- # the path names are matched against each item in
- # this list of regular expressions. The first match is used.
- # Try to avoid using undescriptive /dev/dm-N names, if present.
- # If no preferred name matches, or if preferred_names are not
- # defined, built-in rules are used until one produces a preference.
- # Rule 1 checks path prefixes and gives preference in this order:
- # /dev/mapper, /dev/disk, /dev/dm-*, /dev/block (/dev from devices/dev)
- # Rule 2 prefers the path with the least slashes.
- # Rule 3 prefers a symlink.
- # Rule 4 prefers the path with least value in lexicographical order.
- # Example:
+ # If multiple path names exist for a block device, and LVM needs to
+ # display a name for the device, the path names are matched against
+ # each item in this list of regular expressions. The first match is
+ # used. Try to avoid using undescriptive /dev/dm-N names, if present.
+ # If no preferred name matches, or if preferred_names are not defined,
+ # the following built-in preferences are applied in order until one
+ # produces a preferred name:
+ # Prefer names with path prefixes in the order of:
+ # /dev/mapper, /dev/disk, /dev/dm-*, /dev/block.
+ # Prefer the name with the least number of slashes.
+ # Prefer a name that is a symlink.
+ # Prefer the path with least value in lexicographical order.
+ #
+ # Example
# preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
+ #
# This configuration option does not have a default value defined.
# Configuration option devices/filter.
# Limit the block devices that are used by LVM commands.
- # This is a list of regular expressions used to accept or
- # reject block device path names. Each regex is delimited
- # by a vertical bar '|' (or any character) and is preceded
- # by 'a' to accept the path, or by 'r' to reject the path.
- # The first regex in the list to match the path is used,
- # producing the 'a' or 'r' result for the device.
- # When multiple path names exist for a block device, if any
- # path name matches an 'a' pattern before an 'r' pattern,
- # then the device is accepted. If all the path names match
- # an 'r' pattern first, then the device is rejected.
- # Unmatching path names do not affect the accept or reject
- # decision. If no path names for a device match a pattern,
- # then the device is accepted.
- # Be careful mixing 'a' and 'r' patterns, as the combination
- # might produce unexpected results (test any changes.)
+ # This is a list of regular expressions used to accept or reject block
+ # device path names. Each regex is delimited by a vertical bar '|'
+ # (or any character) and is preceded by 'a' to accept the path, or
+ # by 'r' to reject the path. The first regex in the list to match the
+ # path is used, producing the 'a' or 'r' result for the device.
+ # When multiple path names exist for a block device, if any path name
+ # matches an 'a' pattern before an 'r' pattern, then the device is
+ # accepted. If all the path names match an 'r' pattern first, then the
+ # device is rejected. Unmatching path names do not affect the accept
+ # or reject decision. If no path names for a device match a pattern,
+ # then the device is accepted. Be careful mixing 'a' and 'r' patterns,
+ # as the combination might produce unexpected results (test changes.)
# Run vgscan after changing the filter to regenerate the cache.
# See the use_lvmetad comment for a special case regarding filters.
- # Example:
- # Accept every block device.
+ #
+ # Example
+ # Accept every block device:
# filter = [ "a|.*/|" ]
- # Example:
- # Reject the cdrom drive.
+ # Reject the cdrom drive:
# filter = [ "r|/dev/cdrom|" ]
- # Example:
- # Work with just loopback devices, e.g. for testing.
+ # Work with just loopback devices, e.g. for testing:
# filter = [ "a|loop|", "r|.*|" ]
- # Example:
- # Accept all loop devices and ide drives except hdc.
+ # Accept all loop devices and ide drives except hdc:
# filter = [ "a|loop|", "r|/dev/hdc|", "a|/dev/ide|", "r|.*|" ]
- # Example:
- # Use anchors to be very specific.
+ # Use anchors to be very specific:
# filter = [ "a|^/dev/hda8$|", "r|.*/|" ]
+ #
+ # This configuration option has an automatic default value.
# filter = [ "a|.*/|" ]
# Configuration option devices/global_filter.
# Limit the block devices that are used by LVM system components.
- # Because devices/filter may be overridden from the command line,
- # it is not suitable for system-wide device filtering, e.g. udev
- # and lvmetad. Use global_filter to hide devices from these LVM
- # system components. The syntax is the same as devices/filter.
- # Devices rejected by global_filter are not opened by LVM.
+ # Because devices/filter may be overridden from the command line, it is
+ # not suitable for system-wide device filtering, e.g. udev and lvmetad.
+ # Use global_filter to hide devices from these LVM system components.
+ # The syntax is the same as devices/filter. Devices rejected by
+ # global_filter are not opened by LVM.
+ # This configuration option has an automatic default value.
# global_filter = [ "a|.*/|" ]
# Configuration option devices/cache_dir.
# Directory in which to store the device cache file.
- # The results of filtering are cached on disk to avoid
- # rescanning dud devices (which can take a very long time).
- # By default this cache is stored in a file named .cache.
- # It is safe to delete this file; the tools regenerate it.
- # If obtain_device_list_from_udev is enabled, the list of devices
- # is obtained from udev and any existing .cache file is removed.
+ # The results of filtering are cached on disk to avoid rescanning dud
+ # devices (which can take a very long time). By default this cache is
+ # stored in a file named .cache. It is safe to delete this file; the
+ # tools regenerate it. If obtain_device_list_from_udev is enabled, the
+ # list of devices is obtained from udev and any existing .cache file
+ # is removed.
cache_dir = "@DEFAULT_SYS_DIR@/@DEFAULT_CACHE_SUBDIR@"
# Configuration option devices/cache_file_prefix.
@@ -169,18 +170,19 @@ devices {
# Configuration option devices/types.
# List of additional acceptable block device types.
- # These are of device type names from /proc/devices,
- # followed by the maximum number of partitions.
- # Example:
+ # These are of device type names from /proc/devices, followed by the
+ # maximum number of partitions.
+ #
+ # Example
# types = [ "fd", 16 ]
+ #
# This configuration option is advanced.
# This configuration option does not have a default value defined.
# Configuration option devices/sysfs_scan.
# Restrict device scanning to block devices appearing in sysfs.
- # This is a quick way of filtering out block devices that are
- # not present on the system. sysfs must be part of the kernel
- # and mounted.)
+ # This is a quick way of filtering out block devices that are not
+ # present on the system. sysfs must be part of the kernel and mounted.)
sysfs_scan = 1
# Configuration option devices/multipath_component_detection.
@@ -193,8 +195,8 @@ devices {
# Configuration option devices/fw_raid_component_detection.
# Ignore devices that are components of firmware RAID devices.
- # LVM must use an external_device_info_source other than none
- # for this detection to execute.
+ # LVM must use an external_device_info_source other than none for this
+ # detection to execute.
fw_raid_component_detection = 0
# Configuration option devices/md_chunk_alignment.
@@ -206,16 +208,16 @@ devices {
# Default alignment of the start of a PV data area in MB.
# If set to 0, a value of 64KiB will be used.
# Set to 1 for 1MiB, 2 for 2MiB, etc.
+ # This configuration option has an automatic default value.
# default_data_alignment = 1
# Configuration option devices/data_alignment_detection.
# Detect PV data alignment based on sysfs device information.
- # The start of a PV data area will be a multiple of
- # minimum_io_size or optimal_io_size exposed in sysfs.
- # minimum_io_size is the smallest request the device can perform
- # without incurring a read-modify-write penalty, e.g. MD chunk size.
- # optimal_io_size is the device's preferred unit of receiving I/O,
- # e.g. MD stripe width.
+ # The start of a PV data area will be a multiple of minimum_io_size or
+ # optimal_io_size exposed in sysfs. minimum_io_size is the smallest
+ # request the device can perform without incurring a read-modify-write
+ # penalty, e.g. MD chunk size. optimal_io_size is the device's
+ # preferred unit of receiving I/O, e.g. MD stripe width.
# minimum_io_size is used if optimal_io_size is undefined (0).
# If md_chunk_alignment is enabled, that detects the optimal_io_size.
# This setting takes precedence over md_chunk_alignment.
@@ -223,21 +225,21 @@ devices {
# Configuration option devices/data_alignment.
# Alignment of the start of a PV data area in KiB.
- # If a PV is placed directly on an md device and
- # md_chunk_alignment or data_alignment_detection are enabled,
- # then this setting is ignored. Otherwise, md_chunk_alignment
- # and data_alignment_detection are disabled if this is set.
- # Set to 0 to use the default alignment or the page size, if larger.
+ # If a PV is placed directly on an md device and md_chunk_alignment or
+ # data_alignment_detection are enabled, then this setting is ignored.
+ # Otherwise, md_chunk_alignment and data_alignment_detection are
+ # disabled if this is set. Set to 0 to use the default alignment or the
+ # page size, if larger.
data_alignment = 0
# Configuration option devices/data_alignment_offset_detection.
# Detect PV data alignment offset based on sysfs device information.
# The start of a PV aligned data area will be shifted by the
- # alignment_offset exposed in sysfs. This offset is often 0, but
- # may be non-zero. Certain 4KiB sector drives that compensate for
- # windows partitioning will have an alignment_offset of 3584 bytes
- # (sector 7 is the lowest aligned logical block, the 4KiB sectors start
- # at LBA -1, and consequently sector 63 is aligned on a 4KiB boundary).
+ # alignment_offset exposed in sysfs. This offset is often 0, but may
+ # be non-zero. Certain 4KiB sector drives that compensate for windows
+ # partitioning will have an alignment_offset of 3584 bytes (sector 7
+ # is the lowest aligned logical block, the 4KiB sectors start at
+ # LBA -1, and consequently sector 63 is aligned on a 4KiB boundary).
# pvcreate --dataalignmentoffset will skip this detection.
data_alignment_offset_detection = 1
@@ -249,32 +251,29 @@ devices {
# Configuration option devices/ignore_lvm_mirrors.
# Do not scan 'mirror' LVs to avoid possible deadlocks.
- # This avoids possible deadlocks when using the 'mirror'
- # segment type. This setting determines whether logical volumes
- # using the 'mirror' segment type are scanned for LVM labels.
- # This affects the ability of mirrors to be used as physical volumes.
- # If this setting is enabled, it becomes impossible to create VGs
- # on top of mirror LVs, i.e. to stack VGs on mirror LVs.
- # If this setting is disabled, allowing mirror LVs to be scanned,
- # it may cause LVM processes and I/O to the mirror to become blocked.
- # This is due to the way that the mirror segment type handles failures.
- # In order for the hang to occur, an LVM command must be run just after
- # a failure and before the automatic LVM repair process takes place,
- # or there must be failures in multiple mirrors in the same VG at the
- # same time with write failures occurring moments before a scan of the
- # mirror's labels.
- # The 'mirror' scanning problems do not apply to LVM RAID types like
- # 'raid1' which handle failures in a different way, making them a
- # better choice for VG stacking.
+ # This avoids possible deadlocks when using the 'mirror' segment type.
+ # This setting determines whether LVs using the 'mirror' segment type
+ # are scanned for LVM labels. This affects the ability of mirrors to
+ # be used as physical volumes. If this setting is enabled, it is
+ # impossible to create VGs on top of mirror LVs, i.e. to stack VGs on
+ # mirror LVs. If this setting is disabled, allowing mirror LVs to be
+ # scanned, it may cause LVM processes and I/O to the mirror to become
+ # blocked. This is due to the way that the mirror segment type handles
+ # failures. In order for the hang to occur, an LVM command must be run
+ # just after a failure and before the automatic LVM repair process
+ # takes place, or there must be failures in multiple mirrors in the
+ # same VG at the same time with write failures occurring moments before
+ # a scan of the mirror's labels. The 'mirror' scanning problems do not
+ # apply to LVM RAID types like 'raid1' which handle failures in a
+ # different way, making them a better choice for VG stacking.
ignore_lvm_mirrors = 1
# Configuration option devices/disable_after_error_count.
# Number of I/O errors after which a device is skipped.
- # During each LVM operation, errors received from each device
- # are counted. If the counter of a device exceeds the limit set
- # here, no further I/O is sent to that device for the remainder
- # of the operation.
- # Setting this to 0 disables the counters altogether.
+ # During each LVM operation, errors received from each device are
+ # counted. If the counter of a device exceeds the limit set here,
+ # no further I/O is sent to that device for the remainder of the
+ # operation. Setting this to 0 disables the counters altogether.
disable_after_error_count = 0
# Configuration option devices/require_restorefile_with_uuid.
@@ -284,21 +283,21 @@ devices {
# Configuration option devices/pv_min_size.
# Minimum size in KiB of block devices which can be used as PVs.
# In a clustered environment all nodes must use the same value.
- # Any value smaller than 512KiB is ignored. The previous built-in
+ # Any value smaller than 512KiB is ignored. The previous built-in
# value was 512.
pv_min_size = 2048
# Configuration option devices/issue_discards.
# Issue discards to PVs that are no longer used by an LV.
- # Discards are sent to an LV's underlying physical volumes when
- # the LV is no longer using the physical volumes' space, e.g.
- # lvremove, lvreduce. Discards inform the storage that a region
- # is no longer used. Storage that supports discards advertise
- # the protocol-specific way discards should be issued by the
- # kernel (TRIM, UNMAP, or WRITE SAME with UNMAP bit set).
- # Not all storage will support or benefit from discards, but SSDs
- # and thinly provisioned LUNs generally do. If enabled, discards
- # will only be issued if both the storage and kernel provide support.
+ # Discards are sent to an LV's underlying physical volumes when the LV
+ # is no longer using the physical volumes' space, e.g. lvremove,
+ # lvreduce. Discards inform the storage that a region is no longer
+ # used. Storage that supports discards advertise the protocol-specific
+ # way discards should be issued by the kernel (TRIM, UNMAP, or
+ # WRITE SAME with UNMAP bit set). Not all storage will support or
+ # benefit from discards, but SSDs and thinly provisioned LUNs
+ # generally do. If enabled, discards will only be issued if both the
+ # storage and kernel provide support.
issue_discards = 0
}
@@ -308,61 +307,56 @@ allocation {
# Configuration option allocation/cling_tag_list.
# Advise LVM which PVs to use when searching for new space.
- # When searching for free space to extend an LV, the 'cling'
- # allocation policy will choose space on the same PVs as the last
- # segment of the existing LV. If there is insufficient space and a
- # list of tags is defined here, it will check whether any of them are
- # attached to the PVs concerned and then seek to match those PV tags
- # between existing extents and new extents.
- # Example:
- # Use the special tag "@*" as a wildcard to match any PV tag.
+ # When searching for free space to extend an LV, the 'cling' allocation
+ # policy will choose space on the same PVs as the last segment of the
+ # existing LV. If there is insufficient space and a list of tags is
+ # defined here, it will check whether any of them are attached to the
+ # PVs concerned and then seek to match those PV tags between existing
+ # extents and new extents.
+ #
+ # Example
+ # Use the special tag "@*" as a wildcard to match any PV tag:
# cling_tag_list = [ "@*" ]
- # Example:
- # LVs are mirrored between two sites within a single VG.
+ # LVs are mirrored between two sites within a single VG, and
# PVs are tagged with either @site1 or @site2 to indicate where
- # they are situated.
+ # they are situated:
# cling_tag_list = [ "@site1", "@site2" ]
+ #
# This configuration option does not have a default value defined.
# Configuration option allocation/maximise_cling.
# Use a previous allocation algorithm.
# Changes made in version 2.02.85 extended the reach of the 'cling'
# policies to detect more situations where data can be grouped onto
- # the same disks. This setting can be used to disable the changes
+ # the same disks. This setting can be used to disable the changes
# and revert to the previous algorithm.
maximise_cling = 1
# Configuration option allocation/use_blkid_wiping.
# Use blkid to detect existing signatures on new PVs and LVs.
- # The blkid library can detect more signatures than the
- # native LVM detection code, but may take longer.
- # LVM needs to be compiled with blkid wiping support for
- # this setting to apply.
- # LVM native detection code is currently able to recognize:
- # MD device signatures, swap signature, and LUKS signatures.
- # To see the list of signatures recognized by blkid, check the
- # output of the 'blkid -k' command.
+ # The blkid library can detect more signatures than the native LVM
+ # detection code, but may take longer. LVM needs to be compiled with
+ # blkid wiping support for this setting to apply. LVM native detection
+ # code is currently able to recognize: MD device signatures,
+ # swap signature, and LUKS signatures. To see the list of signatures
+ # recognized by blkid, check the output of the 'blkid -k' command.
use_blkid_wiping = @DEFAULT_USE_BLKID_WIPING@
# Configuration option allocation/wipe_signatures_when_zeroing_new_lvs.
# Look for and erase any signatures while zeroing a new LV.
- # Zeroing is controlled by the -Z/--zero option, and if not
- # specified, zeroing is used by default if possible.
- # Zeroing simply overwrites the first 4KiB of a new LV
- # with zeroes and does no signature detection or wiping.
- # Signature wiping goes beyond zeroing and detects exact
- # types and positions of signatures within the whole LV.
- # It provides a cleaner LV after creation as all known
- # signatures are wiped. The LV is not claimed incorrectly
- # by other tools because of old signatures from previous use.
- # The number of signatures that LVM can detect depends on the
- # detection code that is selected (see use_blkid_wiping.)
- # Wiping each detected signature must be confirmed.
- # The command line option -W/--wipesignatures takes precedence
- # over this setting.
- # When this setting is disabled, signatures on new LVs are
- # not detected or erased unless the -W/--wipesignatures y
- # option is used directly.
+ # The --wipesignatures option overrides this setting.
+ # Zeroing is controlled by the -Z/--zero option, and if not specified,
+ # zeroing is used by default if possible. Zeroing simply overwrites the
+ # first 4KiB of a new LV with zeroes and does no signature detection or
+ # wiping. Signature wiping goes beyond zeroing and detects exact types
+ # and positions of signatures within the whole LV. It provides a
+ # cleaner LV after creation as all known signatures are wiped. The LV
+ # is not claimed incorrectly by other tools because of old signatures
+ # from previous use. The number of signatures that LVM can detect
+ # depends on the detection code that is selected (see
+ # use_blkid_wiping.) Wiping each detected signature must be confirmed.
+ # When this setting is disabled, signatures on new LVs are not detected
+ # or erased unless the --wipesignatures option is used directly.
wipe_signatures_when_zeroing_new_lvs = 1
# Configuration option allocation/mirror_logs_require_separate_pvs.
@@ -376,38 +370,41 @@ allocation {
# Configuration option allocation/cache_mode.
# The default cache mode used for new cache.
- # Possible options are: writethrough, writeback.
- # writethrough - Data blocks are immediately written from
- # the cache to disk.
- # writeback - Data blocks are written from the cache back
- # to disk after some delay to improve performance.
+ #
+ # Accepted values:
+ # writethrough
+ # Data blocks are immediately written from the cache to disk.
+ # writeback
+ # Data blocks are written from the cache back to disk after some
+ # delay to improve performance.
+ #
# This setting replaces allocation/cache_pool_cachemode.
+ # This configuration option has an automatic default value.
# cache_mode = "writethrough"
# Configuration option allocation/cache_policy.
# The default cache policy used for new cache volume.
- # For the kernel 4.2 and newer the default policy is smq
- # (Stochastic multique), otherwise the older mq (Multiqueue),
- # policy is selected.
+ # Since kernel 4.2 the default policy is smq (Stochastic multique),
+ # otherwise the older mq (Multiqueue) policy is selected.
# This configuration option does not have a default value defined.
# Configuration section allocation/cache_settings.
# Individual settings for policies.
# See the help for individual policies for more info.
+ # This configuration section has an automatic default value.
# cache_settings {
# }
# Configuration option allocation/cache_pool_chunk_size.
# The minimal chunk size in KiB for cache pool volumes.
- # Using a chunk_size that is too large can result in wasteful
- # use of the cache, where small reads and writes can cause
- # large sections of an LV to be mapped into the cache. However,
- # choosing a chunk_size that is too small can result in more
- # overhead trying to manage the numerous chunks that become mapped
- # into the cache. The former is more of a problem than the latter
- # in most cases, so we default to a value that is on the smaller
- # end of the spectrum. Supported values range from 32KiB to
- # 1GiB in multiples of 32.
+ # Using a chunk_size that is too large can result in wasteful use of
+ # the cache, where small reads and writes can cause large sections of
+ # an LV to be mapped into the cache. However, choosing a chunk_size
+ # that is too small can result in more overhead trying to manage the
+ # numerous chunks that become mapped into the cache. The former is
+ # more of a problem than the latter in most cases, so the default is
+ # on the smaller end of the spectrum. Supported values range from
+ # 32KiB to 1GiB in multiples of 32.
# This configuration option does not have a default value defined.
# Configuration option allocation/thin_pool_metadata_require_separate_pvs.
@@ -417,38 +414,50 @@ allocation {
# Configuration option allocation/thin_pool_zero.
# Thin pool data chunks are zeroed before they are first used.
# Zeroing with a larger thin pool chunk size reduces performance.
+ # This configuration option has an automatic default value.
# thin_pool_zero = 1
# Configuration option allocation/thin_pool_discards.
# The discards behaviour of thin pool volumes.
- # Possible options are: ignore, nopassdown, passdown.
+ #
+ # Accepted values:
+ # ignore
+ # nopassdown
+ # passdown
+ #
+ # This configuration option has an automatic default value.
# thin_pool_discards = "passdown"
# Configuration option allocation/thin_pool_chunk_size_policy.
# The chunk size calculation policy for thin pool volumes.
- # Possible options are: generic, performance.
- # generic - If thin_pool_chunk_size is defined, use it.
- # Otherwise, calculate the chunk size based on estimation and
- # device hints exposed in sysfs - the minimum_io_size.
- # The chunk size is always at least 64KiB.
- # performance - If thin_pool_chunk_size is defined, use it.
- # Otherwise, calculate the chunk size for performance based on
- # device hints exposed in sysfs - the optimal_io_size.
- # The chunk size is always at least 512KiB.
+ #
+ # Accepted values:
+ # generic
+ # If thin_pool_chunk_size is defined, use it. Otherwise, calculate
+ # the chunk size based on estimation and device hints exposed in
+ # sysfs - the minimum_io_size. The chunk size is always at least
+ # 64KiB.
+ # performance
+ # If thin_pool_chunk_size is defined, use it. Otherwise, calculate
+ # the chunk size for performance based on device hints exposed in
+ # sysfs - the optimal_io_size. The chunk size is always at least
+ # 512KiB.
+ #
+ # This configuration option has an automatic default value.
# thin_pool_chunk_size_policy = "generic"
# Configuration option allocation/thin_pool_chunk_size.
# The minimal chunk size in KiB for thin pool volumes.
- # Larger chunk sizes may improve performance for plain
- # thin volumes, however using them for snapshot volumes
- # is less efficient, as it consumes more space and takes
- # extra time for copying. When unset, lvm tries to estimate
- # chunk size starting from 64KiB. Supported values are in
- # the range 64KiB to 1GiB.
+ # Larger chunk sizes may improve performance for plain thin volumes,
+ # however using them for snapshot volumes is less efficient, as it
+ # consumes more space and takes extra time for copying. When unset,
+ # lvm tries to estimate chunk size starting from 64KiB. Supported
+ # values are in the range 64KiB to 1GiB.
# This configuration option does not have a default value defined.
# Configuration option allocation/physical_extent_size.
# Default physical extent size in KiB to use for new VGs.
+ # This configuration option has an automatic default value.
# physical_extent_size = 4096
}
@@ -462,14 +471,13 @@ log {
# Configuration option log/silent.
# Suppress all non-essential messages from stdout.
- # This has the same effect as -qq.
- # When enabled, the following commands still produce output:
- # dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
- # pvs, version, vgcfgrestore -l, vgdisplay, vgs.
+ # This has the same effect as -qq. When enabled, the following commands
+ # still produce output: dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck,
+ # pvdisplay, pvs, version, vgcfgrestore -l, vgdisplay, vgs.
# Non-essential messages are shifted from log level 4 to log level 5
# for syslog and lvm2_log_fn purposes.
- # Any 'yes' or 'no' questions not overridden by other arguments
- # are suppressed and default to 'no'.
+ # Any 'yes' or 'no' questions not overridden by other arguments are
+ # suppressed and default to 'no'.
silent = 0
# Configuration option log/syslog.
@@ -513,21 +521,18 @@ log {
# Configuration option log/debug_classes.
# Select log messages by class.
- # Some debugging messages are assigned to a class
- # and only appear in debug output if the class is
- # listed here. Classes currently available:
- # memory, devices, activation, allocation,
- # lvmetad, metadata, cache, locking, lvmpolld.
- # Use "all" to see everything.
+ # Some debugging messages are assigned to a class and only appear in
+ # debug output if the class is listed here. Classes currently
+ # available: memory, devices, activation, allocation, lvmetad,
+ # metadata, cache, locking, lvmpolld. Use "all" to see everything.
debug_classes = [ "memory", "devices", "activation", "allocation", "lvmetad", "metadata", "cache", "locking", "lvmpolld" ]
}
# Configuration section backup.
# How LVM metadata is backed up and archived.
-# In LVM, a 'backup' is a copy of the metadata for the
-# current system, and an 'archive' contains old metadata
-# configurations. They are stored in a human readable
-# text format.
+# In LVM, a 'backup' is a copy of the metadata for the current system,
+# and an 'archive' contains old metadata configurations. They are
+# stored in a human readable text format.
backup {
# Configuration option backup/backup.
@@ -590,15 +595,14 @@ global {
# Distinguish between powers of 1024 and 1000 bytes.
# The LVM commands distinguish between powers of 1024 bytes,
# e.g. KiB, MiB, GiB, and powers of 1000 bytes, e.g. KB, MB, GB.
- # If scripts depend on the old behaviour, disable
- # this setting temporarily until they are updated.
+ # If scripts depend on the old behaviour, disable this setting
+ # temporarily until they are updated.
si_unit_consistency = 1
# Configuration option global/suffix.
# Display unit suffix for sizes.
- # This setting has no effect if the units are in human-readable
- # form (global/units = "h") in which case the suffix is always
- # displayed.
+ # This setting has no effect if the units are in human-readable form
+ # (global/units = "h") in which case the suffix is always displayed.
suffix = 1
# Configuration option global/activation.
@@ -611,17 +615,22 @@ global {
# Configuration option global/fallback_to_lvm1.
# Try running LVM1 tools if LVM cannot communicate with DM.
- # This option only applies to 2.4 kernels and is provided to
- # help switch between device-mapper kernels and LVM1 kernels.
- # The LVM1 tools need to be installed with .lvm1 suffices,
- # e.g. vgscan.lvm1. They will stop working once the lvm2
- # on-disk metadata format is used.
+ # This option only applies to 2.4 kernels and is provided to help
+ # switch between device-mapper kernels and LVM1 kernels. The LVM1
+ # tools need to be installed with .lvm1 suffices, e.g. vgscan.lvm1.
+ # They will stop working once the lvm2 on-disk metadata format is used.
+ # This configuration option has an automatic default value.
# fallback_to_lvm1 = @DEFAULT_FALLBACK_TO_LVM1@
# Configuration option global/format.
# The default metadata format that commands should use.
- # "lvm1" or "lvm2".
- # The command line override is -M1 or -M2.
+ # The -M 1|2 option overrides this setting.
+ #
+ # Accepted values:
+ # lvm1
+ # lvm2
+ #
+ # This configuration option has an automatic default value.
# format = "lvm2"
# Configuration option global/format_libraries.
@@ -644,24 +653,33 @@ global {
# Configuration option global/locking_type.
# Type of locking to use.
- # Type 0: turns off locking. Warning: this risks metadata
- # corruption if commands run concurrently.
- # Type 1: uses local file-based locking, the standard mode.
- # Type 2: uses the external shared library locking_library.
- # Type 3: uses built-in clustered locking with clvmd.
- # This is incompatible with lvmetad. If use_lvmetad is enabled,
- # lvm prints a warning and disables lvmetad use.
- # Type 4: uses read-only locking which forbids any operations
- # that might change metadata.
- # Type 5: offers dummy locking for tools that do not need any locks.
- # You should not need to set this directly; the tools will select
- # when to use it instead of the configured locking_type.
- # Do not use lvmetad or the kernel device-mapper driver with this
- # locking type. It is used by the --readonly option that offers
- # read-only access to Volume Group metadata that cannot be locked
- # safely because it belongs to an inaccessible domain and might be
- # in use, for example a virtual machine image or a disk that is
- # shared by a clustered machine.
+ #
+ # Accepted values:
+ # 0
+ # Turns off locking. Warning: this risks metadata corruption if
+ # commands run concurrently.
+ # 1
+ # LVM uses local file-based locking, the standard mode.
+ # 2
+ # LVM uses the external shared library locking_library.
+ # 3
+ # LVM uses built-in clustered locking with clvmd.
+ # This is incompatible with lvmetad. If use_lvmetad is enabled,
+ # LVM prints a warning and disables lvmetad use.
+ # 4
+ # LVM uses read-only locking which forbids any operations that
+ # might change metadata.
+ # 5
+ # Offers dummy locking for tools that do not need any locks.
+ # You should not need to set this directly; the tools will select
+ # when to use it instead of the configured locking_type.
+ # Do not use lvmetad or the kernel device-mapper driver with this
+ # locking type. It is used by the --readonly option that offers
+ # read-only access to Volume Group metadata that cannot be locked
+ # safely because it belongs to an inaccessible domain and might be
+ # in use, for example a virtual machine image or a disk that is
+ # shared by a clustered machine.
+ #
locking_type = 1
# Configuration option global/wait_for_locks.
@@ -670,39 +688,34 @@ global {
# Configuration option global/fallback_to_clustered_locking.
# Attempt to use built-in cluster locking if locking_type 2 fails.
- # If using external locking (type 2) and initialisation fails,
- # with this enabled, an attempt will be made to use the built-in
- # clustered locking.
- # If you are using a customised locking_library you should disable this.
+ # If using external locking (type 2) and initialisation fails, with
+ # this enabled, an attempt will be made to use the built-in clustered
+ # locking. Disable this if using a customised locking_library.
fallback_to_clustered_locking = 1
# Configuration option global/fallback_to_local_locking.
# Use locking_type 1 (local) if locking_type 2 or 3 fail.
- # If an attempt to initialise type 2 or type 3 locking failed,
- # perhaps because cluster components such as clvmd are not
- # running, with this enabled, an attempt will be made to use
- # local file-based locking (type 1). If this succeeds, only
- # commands against local volume groups will proceed.
- # Volume Groups marked as clustered will be ignored.
+ # If an attempt to initialise type 2 or type 3 locking failed, perhaps
+ # because cluster components such as clvmd are not running, with this
+ # enabled, an attempt will be made to use local file-based locking
+ # (type 1). If this succeeds, only commands against local VGs will
+ # proceed. VGs marked as clustered will be ignored.
fallback_to_local_locking = 1
# Configuration option global/locking_dir.
# Directory to use for LVM command file locks.
- # Local non-LV directory that holds file-based locks
- # while commands are in progress. A directory like
- # /tmp that may get wiped on reboot is OK.
+ # Local non-LV directory that holds file-based locks while commands are
+ # in progress. A directory like /tmp that may get wiped on reboot is OK.
locking_dir = "@DEFAULT_LOCK_DIR@"
# Configuration option global/prioritise_write_locks.
# Allow quicker VG write access during high volume read access.
- # When there are competing read-only and read-write access
- # requests for a volume group's metadata, instead of always
- # granting the read-only requests immediately, delay them to
- # allow the read-write requests to be serviced. Without this
- # setting, write access may be stalled by a high volume of
- # read-only requests.
- # This option only affects locking_type 1 viz.
- # local file-based locking.
+ # When there are competing read-only and read-write access requests for
+ # a volume group's metadata, instead of always granting the read-only
+ # requests immediately, delay them to allow the read-write requests to
+ # be serviced. Without this setting, write access may be stalled by a
+ # high volume of read-only requests. This option only affects
+ # locking_type 1 viz. local file-based locking.
prioritise_write_locks = 1
# Configuration option global/library_dir.
@@ -711,85 +724,93 @@ global {
# Configuration option global/locking_library.
# The external locking library to use for locking_type 2.
+ # This configuration option has an automatic default value.
# locking_library = "liblvm2clusterlock.so"
# Configuration option global/abort_on_internal_errors.
# Abort a command that encounters an internal error.
- # Treat any internal errors as fatal errors, aborting
- # the process that encountered the internal error.
- # Please only enable for debugging.
+ # Treat any internal errors as fatal errors, aborting the process that
+ # encountered the internal error. Please only enable for debugging.
abort_on_internal_errors = 0
# Configuration option global/detect_internal_vg_cache_corruption.
# Internal verification of VG structures.
- # Check if CRC matches when a parsed VG is
- # used multiple times. This is useful to catch
- # unexpected changes to cached VG structures.
+ # Check if CRC matches when a parsed VG is used multiple times. This
+ # is useful to catch unexpected changes to cached VG structures.
# Please only enable for debugging.
detect_internal_vg_cache_corruption = 0
# Configuration option global/metadata_read_only.
# No operations that change on-disk metadata are permitted.
- # Additionally, read-only commands that encounter metadata
- # in need of repair will still be allowed to proceed exactly
- # as if the repair had been performed (except for the unchanged
- # vg_seqno). Inappropriate use could mess up your system,
- # so seek advice first!
+ # Additionally, read-only commands that encounter metadata in need of
+ # repair will still be allowed to proceed exactly as if the repair had
+ # been performed (except for the unchanged vg_seqno). Inappropriate
+ # use could mess up your system, so seek advice first!
metadata_read_only = 0
# Configuration option global/mirror_segtype_default.
# The segment type used by the short mirroring option -m.
- # Possible options are: mirror, raid1.
- # mirror - the original RAID1 implementation from LVM/DM.
- # It is characterized by a flexible log solution (core,
- # disk, mirrored), and by the necessity to block I/O while
- # handling a failure.
- # There is an inherent race in the dmeventd failure
- # handling logic with snapshots of devices using this
- # type of RAID1 that in the worst case could cause a
- # deadlock. (Also see devices/ignore_lvm_mirrors.)
- # raid1 - a newer RAID1 implementation using the MD RAID1
- # personality through device-mapper. It is characterized
- # by a lack of log options. (A log is always allocated for
- # every device and they are placed on the same device as the
- # image - no separate devices are required.) This mirror
- # implementation does not require I/O to be blocked while
- # handling a failure. This mirror implementation is not
- # cluster-aware and cannot be used in a shared (active/active)
- # fashion in a cluster.
- # The '--type mirror|raid1' option overrides this setting.
+ # The --type mirror|raid1 option overrides this setting.
+ #
+ # Accepted values:
+ # mirror
+ # The original RAID1 implementation from LVM/DM. It is
+ # characterized by a flexible log solution (core, disk, mirrored),
+ # and by the necessity to block I/O while handling a failure.
+ # There is an inherent race in the dmeventd failure handling logic
+ # with snapshots of devices using this type of RAID1 that in the
+ # worst case could cause a deadlock. (Also see
+ # devices/ignore_lvm_mirrors.)
+ # raid1
+ # This is a newer RAID1 implementation using the MD RAID1
+ # personality through device-mapper. It is characterized by a
+ # lack of log options. (A log is always allocated for every
+ # device and they are placed on the same device as the image,
+ # so no separate devices are required.) This mirror
+ # implementation does not require I/O to be blocked while
+ # handling a failure. This mirror implementation is not
+ # cluster-aware and cannot be used in a shared (active/active)
+ # fashion in a cluster.
+ #
mirror_segtype_default = "@DEFAULT_MIRROR_SEGTYPE@"
# Configuration option global/raid10_segtype_default.
# The segment type used by the -i -m combination.
- # The --stripes/-i and --mirrors/-m options can both
- # be specified during the creation of a logical volume
- # to use both striping and mirroring for the LV.
- # There are two different implementations.
- # Possible options are: raid10, mirror.
- # raid10 - LVM uses MD's RAID10 personality through DM.
- # mirror - LVM layers the 'mirror' and 'stripe' segment types.
- # The layering is done by creating a mirror LV on top of
- # striped sub-LVs, effectively creating a RAID 0+1 array.
- # The layering is suboptimal in terms of providing redundancy
- # and performance. The 'raid10' option is perferred.
- # The '--type raid10|mirror' option overrides this setting.
+ # The --type raid10|mirror option overrides this setting.
+ # The --stripes/-i and --mirrors/-m options can both be specified
+ # during the creation of a logical volume to use both striping and
+ # mirroring for the LV. There are two different implementations.
+ #
+ # Accepted values:
+ # raid10
+ # LVM uses MD's RAID10 personality through DM. This is the
+ # preferred option.
+ # mirror
+ # LVM layers the 'mirror' and 'stripe' segment types. The layering
+ # is done by creating a mirror LV on top of striped sub-LVs,
+ # effectively creating a RAID 0+1 array. The layering is suboptimal
+ # in terms of providing redundancy and performance.
+ #
raid10_segtype_default = "@DEFAULT_RAID10_SEGTYPE@"
# Configuration option global/sparse_segtype_default.
# The segment type used by the -V -L combination.
- # The combination of -V and -L options creates a
- # sparse LV. There are two different implementations.
- # Possible options are: snapshot, thin.
- # snapshot - The original snapshot implementation from LVM/DM.
- # It uses an old snapshot that mixes data and metadata within
- # a single COW storage volume and performs poorly when the
- # size of stored data passes hundreds of MB.
- # thin - A newer implementation that uses thin provisioning.
- # It has a bigger minimal chunk size (64KiB) and uses a separate
- # volume for metadata. It has better performance, especially
- # when more data is used. It also supports full snapshots.
- # The '--type snapshot|thin' option overrides this setting.
+ # The --type snapshot|thin option overrides this setting.
+ # The combination of -V and -L options creates a sparse LV. There are
+ # two different implementations.
+ #
+ # Accepted values:
+ # snapshot
+ # The original snapshot implementation from LVM/DM. It uses an old
+ # snapshot that mixes data and metadata within a single COW
+ # storage volume and performs poorly when the size of stored data
+ # passes hundreds of MB.
+ # thin
+ # A newer implementation that uses thin provisioning. It has a
+ # bigger minimal chunk size (64KiB) and uses a separate volume for
+ # metadata. It has better performance, especially when more data
+ # is used. It also supports full snapshots.
+ #
sparse_segtype_default = "@DEFAULT_SPARSE_SEGTYPE@"
# Configuration option global/lvdisplay_shows_full_device_path.
@@ -798,176 +819,182 @@ global {
# in version 2.02.89 to show the LV name and path separately.
# Previously this was always shown as /dev/vgname/lvname even when that
# was never a valid path in the /dev filesystem.
+ # This configuration option has an automatic default value.
# lvdisplay_shows_full_device_path = 0
# Configuration option global/use_lvmetad.
# Use lvmetad to cache metadata and reduce disk scanning.
- # When enabled (and running), lvmetad provides LVM commands
- # with VG metadata and PV state. LVM commands then avoid
- # reading this information from disks which can be slow.
- # When disabled (or not running), LVM commands fall back to
- # scanning disks to obtain VG metadata.
- # lvmetad is kept updated via udev rules which must be set
- # up for LVM to work correctly. (The udev rules should be
- # installed by default.) Without a proper udev setup, changes
- # in the system's block device configuration will be unknown
- # to LVM, and ignored until a manual 'pvscan --cache' is run.
- # If lvmetad was running while use_lvmetad was disabled,
- # it must be stopped, use_lvmetad enabled, and then started.
- # When using lvmetad, LV activation is switched to an automatic,
- # event-based mode. In this mode, LVs are activated based on
- # incoming udev events that inform lvmetad when PVs appear on
- # the system. When a VG is complete (all PVs present), it is
- # auto-activated. The auto_activation_volume_list setting
+ # When enabled (and running), lvmetad provides LVM commands with VG
+ # metadata and PV state. LVM commands then avoid reading this
+ # information from disks which can be slow. When disabled (or not
+ # running), LVM commands fall back to scanning disks to obtain VG
+ # metadata. lvmetad is kept updated via udev rules which must be set
+ # up for LVM to work correctly. (The udev rules should be installed
+ # by default.) Without a proper udev setup, changes in the system's
+ # block device configuration will be unknown to LVM, and ignored
+ # until a manual 'pvscan --cache' is run. If lvmetad was running
+ # while use_lvmetad was disabled, it must be stopped, use_lvmetad
+ # enabled, and then started. When using lvmetad, LV activation is
+ # switched to an automatic, event-based mode. In this mode, LVs are
+ # activated based on incoming udev events that inform lvmetad when
+ # PVs appear on the system. When a VG is complete (all PVs present),
+ # it is auto-activated. The auto_activation_volume_list setting
# controls which LVs are auto-activated (all by default.)
- # When lvmetad is updated (automatically by udev events, or
- # directly by pvscan --cache), devices/filter is ignored and
- # all devices are scanned by default. lvmetad always keeps
- # unfiltered information which is provided to LVM commands.
- # Each LVM command then filters based on devices/filter.
- # This does not apply to other, non-regexp, filtering settings:
- # component filters such as multipath and MD are checked
- # during pvscan --cache.
- # To filter a device and prevent scanning from the LVM system
- # entirely, including lvmetad, use devices/global_filter.
- # lvmetad is not compatible with locking_type 3 (clustering).
- # LVM prints warnings and ignores lvmetad if this combination
- # is seen.
+ # When lvmetad is updated (automatically by udev events, or directly
+ # by pvscan --cache), devices/filter is ignored and all devices are
+ # scanned by default. lvmetad always keeps unfiltered information
+ # which is provided to LVM commands. Each LVM command then filters
+ # based on devices/filter. This does not apply to other, non-regexp,
+ # filtering settings: component filters such as multipath and MD
+ # are checked during pvscan --cache. To filter a device and prevent
+ # scanning from the LVM system entirely, including lvmetad, use
+ # devices/global_filter.
use_lvmetad = @DEFAULT_USE_LVMETAD@
# Configuration option global/use_lvmlockd.
# Use lvmlockd for locking among hosts using LVM on shared storage.
+ # See lvmlockd(8) for more information.
use_lvmlockd = 0
# Configuration option global/lvmlockd_lock_retries.
# Retry lvmlockd lock requests this many times.
+ # This configuration option has an automatic default value.
# lvmlockd_lock_retries = 3
# Configuration option global/sanlock_lv_extend.
# Size in MiB to extend the internal LV holding sanlock locks.
- # The internal LV holds locks for each LV in the VG, and after
- # enough LVs have been created, the internal LV needs to be extended.
- # lvcreate will automatically extend the internal LV when needed by
- # the amount specified here. Setting this to 0 disables the
- # automatic extension and can cause lvcreate to fail.
+ # The internal LV holds locks for each LV in the VG, and after enough
+ # LVs have been created, the internal LV needs to be extended. lvcreate
+ # will automatically extend the internal LV when needed by the amount
+ # specified here. Setting this to 0 disables the automatic extension
+ # and can cause lvcreate to fail.
+ # This configuration option has an automatic default value.
# sanlock_lv_extend = 256
# Configuration option global/thin_check_executable.
# The full path to the thin_check command.
- # LVM uses this command to check that a thin metadata
- # device is in a usable state.
- # When a thin pool is activated and after it is deactivated,
- # this command is run. Activation will only proceed if the
- # command has an exit status of 0.
- # Set to "" to skip this check. (Not recommended.)
- # Also see thin_check_options.
- # The thin tools are available from the package
- # device-mapper-persistent-data.
+ # LVM uses this command to check that a thin metadata device is in a
+ # usable state. When a thin pool is activated and after it is
+ # deactivated, this command is run. Activation will only proceed if
+ # the command has an exit status of 0. Set to "" to skip this check.
+ # (Not recommended.) Also see thin_check_options.
+ # (See package device-mapper-persistent-data or thin-provisioning-tools)
+ # This configuration option has an automatic default value.
# thin_check_executable = "@THIN_CHECK_CMD@"
# Configuration option global/thin_dump_executable.
# The full path to the thin_dump command.
# LVM uses this command to dump thin pool metadata.
- # (For thin tools, see thin_check_executable.)
+ # (See package device-mapper-persistent-data or thin-provisioning-tools)
+ # This configuration option has an automatic default value.
# thin_dump_executable = "@THIN_DUMP_CMD@"
# Configuration option global/thin_repair_executable.
# The full path to the thin_repair command.
- # LVM uses this command to repair a thin metadata device
- # if it is in an unusable state.
- # Also see thin_repair_options.
- # (For thin tools, see thin_check_executable.)
+ # LVM uses this command to repair a thin metadata device if it is in
+ # an unusable state. Also see thin_repair_options.
+ # (See package device-mapper-persistent-data or thin-provisioning-tools)
+ # This configuration option has an automatic default value.
# thin_repair_executable = "@THIN_REPAIR_CMD@"
# Configuration option global/thin_check_options.
# List of options passed to the thin_check command.
- # With thin_check version 2.1 or newer you can add
- # --ignore-non-fatal-errors to let it pass through
- # ignorable errors and fix them later.
- # With thin_check version 3.2 or newer you should add
- # --clear-needs-check-flag.
+ # With thin_check version 2.1 or newer you can add the option
+ # --ignore-non-fatal-errors to let it pass through ignorable errors
+ # and fix them later. With thin_check version 3.2 or newer you should
+ # include the option --clear-needs-check-flag.
+ # This configuration option has an automatic default value.
# thin_check_options = [ "-q", "--clear-needs-check-flag" ]
# Configuration option global/thin_repair_options.
# List of options passed to the thin_repair command.
+ # This configuration option has an automatic default value.
# thin_repair_options = [ "" ]
# Configuration option global/thin_disabled_features.
# Features to not use in the thin driver.
- # This can be helpful for testing, or to avoid
- # using a feature that is causing problems.
- # Features: block_size, discards, discards_non_power_2,
- # external_origin, metadata_resize, external_origin_extend,
- # error_if_no_space.
- # Example:
+ # This can be helpful for testing, or to avoid using a feature that is
+ # causing problems. Features include: block_size, discards,
+ # discards_non_power_2, external_origin, metadata_resize,
+ # external_origin_extend, error_if_no_space.
+ #
+ # Example
# thin_disabled_features = [ "discards", "block_size" ]
+ #
# This configuration option does not have a default value defined.
# Configuration option global/cache_disabled_features.
# Features to not use in the cache driver.
- # This can be helpful for testing, or to avoid
- # using a feature that is causing problems.
- # Features: policy_mq, policy_smq.
- # Example:
+ # This can be helpful for testing, or to avoid using a feature that is
+ # causing problems. Features include: policy_mq, policy_smq.
+ #
+ # Example
# cache_disabled_features = [ "policy_smq" ]
+ #
# This configuration option does not have a default value defined.
# Configuration option global/cache_check_executable.
# The full path to the cache_check command.
- # LVM uses this command to check that a cache metadata
- # device is in a usable state.
- # When a cached LV is activated and after it is deactivated,
- # this command is run. Activation will only proceed if the
- # command has an exit status of 0.
- # Set to "" to skip this check. (Not recommended.)
- # Also see cache_check_options.
- # The cache tools are available from the package
- # device-mapper-persistent-data.
- # With cache_check version 5.0 or newer you should add
- # --clear-needs-check-flag.
+ # LVM uses this command to check that a cache metadata device is in a
+ # usable state. When a cached LV is activated and after it is
+ # deactivated, this command is run. Activation will only proceed if the
+ # command has an exit status of 0. Set to "" to skip this check.
+ # (Not recommended.) Also see cache_check_options.
+ # (See package device-mapper-persistent-data or thin-provisioning-tools)
+ # This configuration option has an automatic default value.
# cache_check_executable = "@CACHE_CHECK_CMD@"
# Configuration option global/cache_dump_executable.
# The full path to the cache_dump command.
# LVM uses this command to dump cache pool metadata.
- # (For cache tools, see cache_check_executable.)
+ # (See package device-mapper-persistent-data or thin-provisioning-tools)
+ # This configuration option has an automatic default value.
# cache_dump_executable = "@CACHE_DUMP_CMD@"
# Configuration option global/cache_repair_executable.
# The full path to the cache_repair command.
- # LVM uses this command to repair a cache metadata device
- # if it is in an unusable state.
- # Also see cache_repair_options.
- # (For cache tools, see cache_check_executable.)
+ # LVM uses this command to repair a cache metadata device if it is in
+ # an unusable state. Also see cache_repair_options.
+ # (See package device-mapper-persistent-data or thin-provisioning-tools)
+ # This configuration option has an automatic default value.
# cache_repair_executable = "@CACHE_REPAIR_CMD@"
# Configuration option global/cache_check_options.
# List of options passed to the cache_check command.
+ # With cache_check version 5.0 or newer you should include the option
+ # --clear-needs-check-flag.
+ # This configuration option has an automatic default value.
# cache_check_options = [ "-q", "--clear-needs-check-flag" ]
# Configuration option global/cache_repair_options.
# List of options passed to the cache_repair command.
+ # This configuration option has an automatic default value.
# cache_repair_options = [ "" ]
# Configuration option global/system_id_source.
# The method LVM uses to set the local system ID.
- # Volume Groups can also be given a system ID (by
- # vgcreate, vgchange, or vgimport.)
- # A VG on shared storage devices is accessible only
- # to the host with a matching system ID.
- # See 'man lvmsystemid' for information on limitations
- # and correct usage.
- # Possible options are: none, lvmlocal, uname, machineid, file.
- # none - The host has no system ID.
- # lvmlocal - Obtain the system ID from the system_id setting in the
- # 'local' section of an lvm configuration file, e.g. lvmlocal.conf.
- # uname - Set the system ID from the hostname (uname) of the system.
- # System IDs beginning localhost are not permitted.
- # machineid - Use the contents of the machine-id file to set the
- # system ID. Some systems create this file at installation time.
- # See 'man machine-id' and global/etc.
- # file - Use the contents of another file (system_id_file) to set
- # the system ID.
+ # Volume Groups can also be given a system ID (by vgcreate, vgchange,
+ # or vgimport.) A VG on shared storage devices is accessible only to
+ # the host with a matching system ID. See 'man lvmsystemid' for
+ # information on limitations and correct usage.
+ #
+ # Accepted values:
+ # none
+ # The host has no system ID.
+ # lvmlocal
+ # Obtain the system ID from the system_id setting in the 'local'
+ # section of an lvm configuration file, e.g. lvmlocal.conf.
+ # uname
+ # Set the system ID from the hostname (uname) of the system.
+ # System IDs beginning localhost are not permitted.
+ # machineid
+ # Use the contents of the machine-id file to set the system ID.
+ # Some systems create this file at installation time.
+ # See 'man machine-id' and global/etc.
+ # file
+ # Use the contents of another file (system_id_file) to set the
+ # system ID.
+ #
system_id_source = "none"
# Configuration option global/system_id_file.
@@ -979,14 +1006,14 @@ global {
# Configuration option global/use_lvmpolld.
# Use lvmpolld to supervise long running LVM commands.
# When enabled, control of long running LVM commands is transferred
- # from the original LVM command to the lvmpolld daemon. This allows
+ # from the original LVM command to the lvmpolld daemon. This allows
# the operation to continue independent of the original LVM command.
# After lvmpolld takes over, the LVM command displays the progress
- # of the ongoing operation. lvmpolld itself runs LVM commands to manage
- # the progress of ongoing operations. lvmpolld can be used as a native
- # systemd service, which allows it to be started on demand, and to use
- # its own control group. When this option is disabled, LVM commands will
- # supervise long running operations by forking themselves.
+ # of the ongoing operation. lvmpolld itself runs LVM commands to
+ # manage the progress of ongoing operations. lvmpolld can be used as
+ # a native systemd service, which allows it to be started on demand,
+ # and to use its own control group. When this option is disabled, LVM
+ # commands will supervise long running operations by forking themselves.
use_lvmpolld = @DEFAULT_USE_LVMPOLLD@
}
@@ -995,62 +1022,58 @@ activation {
# Configuration option activation/checks.
# Perform internal checks of libdevmapper operations.
- # Useful for debugging problems with activation.
- # Some of the checks may be expensive, so it's best to use
- # this only when there seems to be a problem.
+ # Useful for debugging problems with activation. Some of the checks may
+ # be expensive, so it's best to use this only when there seems to be a
+ # problem.
checks = 0
# Configuration option activation/udev_sync.
# Use udev notifications to synchronize udev and LVM.
- # When disabled, LVM commands will not wait for notifications
- # from udev, but continue irrespective of any possible udev
- # processing in the background. Only use this if udev is not
- # running or has rules that ignore the devices LVM creates.
- # If enabled when udev is not running, and LVM processes
- # are waiting for udev, run 'dmsetup udevcomplete_all' to
- # wake them up.
- # The '--nodevsync' option overrides this setting.
+ # The --nodevsync option overrides this setting.
+ # When disabled, LVM commands will not wait for notifications from
+ # udev, but continue irrespective of any possible udev processing in
+ # the background. Only use this if udev is not running or has rules
+ # that ignore the devices LVM creates. If enabled when udev is not
+ # running, and LVM processes are waiting for udev, run the command
+ # 'dmsetup udevcomplete_all' to wake them up.
udev_sync = 1
# Configuration option activation/udev_rules.
# Use udev rules to manage LV device nodes and symlinks.
- # When disabled, LVM will manage the device nodes and
- # symlinks for active LVs itself.
- # Manual intervention may be required if this setting is
- # changed while LVs are active.
+ # When disabled, LVM will manage the device nodes and symlinks for
+ # active LVs itself. Manual intervention may be required if this
+ # setting is changed while LVs are active.
udev_rules = 1
# Configuration option activation/verify_udev_operations.
# Use extra checks in LVM to verify udev operations.
- # This enables additional checks (and if necessary,
- # repairs) on entries in the device directory after
- # udev has completed processing its events.
- # Useful for diagnosing problems with LVM/udev interactions.
+ # This enables additional checks (and if necessary, repairs) on entries
+ # in the device directory after udev has completed processing its
+ # events. Useful for diagnosing problems with LVM/udev interactions.
verify_udev_operations = 0
# Configuration option activation/retry_deactivation.
# Retry failed LV deactivation.
- # If LV deactivation fails, LVM will retry for a few
- # seconds before failing. This may happen because a
- # process run from a quick udev rule temporarily opened
- # the device.
+ # If LV deactivation fails, LVM will retry for a few seconds before
+ # failing. This may happen because a process run from a quick udev rule
+ # temporarily opened the device.
retry_deactivation = 1
# Configuration option activation/missing_stripe_filler.
# Method to fill missing stripes when activating an incomplete LV.
- # Using 'error' will make inaccessible parts of the device return
- # I/O errors on access. You can instead use a device path, in which
- # case, that device will be used in place of missing stripes.
- # Using anything other than 'error' with mirrored or snapshotted
- # volumes is likely to result in data corruption.
+ # Using 'error' will make inaccessible parts of the device return I/O
+ # errors on access. You can instead use a device path, in which case,
+ # that device will be used in place of missing stripes. Using anything
+ # other than 'error' with mirrored or snapshotted volumes is likely to
+ # result in data corruption.
# This configuration option is advanced.
missing_stripe_filler = "error"
# Configuration option activation/use_linear_target.
# Use the linear target to optimize single stripe LVs.
- # When disabled, the striped target is used. The linear
- # target is an optimised version of the striped target
- # that only handles a single stripe.
+ # When disabled, the striped target is used. The linear target is an
+ # optimised version of the striped target that only handles a single
+ # stripe.
use_linear_target = 1
# Configuration option activation/reserved_stack.
@@ -1071,148 +1094,183 @@ activation {
# Configuration option activation/volume_list.
# Only LVs selected by this list are activated.
- # If this list is defined, an LV is only activated
- # if it matches an entry in this list.
- # If this list is undefined, it imposes no limits
+ # If this list is defined, an LV is only activated if it matches an
+ # entry in this list. If this list is undefined, it imposes no limits
# on LV activation (all are allowed).
- # Possible options are: vgname, vgname/lvname, @tag, @*
- # vgname is matched exactly and selects all LVs in the VG.
- # vgname/lvname is matched exactly and selects the LV.
- # @tag selects if tag matches a tag set on the LV or VG.
- # @* selects if a tag defined on the host is also set on
- # the LV or VG. See tags/hosttags.
- # If any host tags exist but volume_list is not defined,
- # a default single-entry list containing '@*' is assumed.
- # Example:
+ #
+ # Accepted values:
+ # vgname
+ # The VG name is matched exactly and selects all LVs in the VG.
+ # vgname/lvname
+ # The VG name and LV name are matched exactly and selects the LV.
+ # @tag
+ # Selects an LV if the specified tag matches a tag set on the LV
+ # or VG.
+ # @*
+ # Selects an LV if a tag defined on the host is also set on the LV
+ # or VG. See tags/hosttags. If any host tags exist but volume_list
+ # is not defined, a default single-entry list containing '@*' is
+ # assumed.
+ #
+ # Example
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
+ #
# This configuration option does not have a default value defined.
# Configuration option activation/auto_activation_volume_list.
# Only LVs selected by this list are auto-activated.
- # This list works like volume_list, but it is used
- # only by auto-activation commands. It does not apply
- # to direct activation commands.
- # If this list is defined, an LV is only auto-activated
- # if it matches an entry in this list.
- # If this list is undefined, it imposes no limits
- # on LV auto-activation (all are allowed.)
- # If this list is defined and empty, i.e. "[]",
- # then no LVs are selected for auto-activation.
- # An LV that is selected by this list for
- # auto-activation, must also be selected by
- # volume_list (if defined) before it is activated.
- # Auto-activation is an activation command that
- # includes the 'a' argument: --activate ay or -a ay,
- # e.g. vgchange -a ay, or lvchange -a ay vgname/lvname.
- # The 'a' (auto) argument for auto-activation is
- # meant to be used by activation commands that are
- # run automatically by the system, as opposed to
- # LVM commands run directly by a user. A user may
- # also use the 'a' flag directly to perform auto-
- # activation.
- # An example of a system-generated auto-activation
- # command is 'pvscan --cache -aay' which is generated
- # when udev and lvmetad detect a new VG has appeared
- # on the system, and want LVs in it to be auto-activated.
- # Possible options are: vgname, vgname/lvname, @tag, @*
- # See volume_list for how these options are matched to LVs.
+ # This list works like volume_list, but it is used only by
+ # auto-activation commands. It does not apply to direct activation
+ # commands. If this list is defined, an LV is only auto-activated
+ # if it matches an entry in this list. If this list is undefined, it
+ # imposes no limits on LV auto-activation (all are allowed.) If this
+ # list is defined and empty, i.e. "[]", then no LVs are selected for
+ # auto-activation. An LV that is selected by this list for
+ # auto-activation, must also be selected by volume_list (if defined)
+ # before it is activated. Auto-activation is an activation command that
+ # includes the 'a' argument: --activate ay or -a ay. The 'a' (auto)
+ # argument for auto-activation is meant to be used by activation
+ # commands that are run automatically by the system, as opposed to LVM
+ # commands run directly by a user. A user may also use the 'a' flag
+ # directly to perform auto-activation. Also see pvscan(8) for more
+ # information about auto-activation.
+ #
+ # Accepted values:
+ # vgname
+ # The VG name is matched exactly and selects all LVs in the VG.
+ # vgname/lvname
+ # The VG name and LV name are matched exactly and selects the LV.
+ # @tag
+ # Selects an LV if the specified tag matches a tag set on the LV
+ # or VG.
+ # @*
+ # Selects an LV if a tag defined on the host is also set on the LV
+ # or VG. See tags/hosttags. If any host tags exist but volume_list
+ # is not defined, a default single-entry list containing '@*' is
+ # assumed.
+ #
+ # Example
+ # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
+ #
# This configuration option does not have a default value defined.
# Configuration option activation/read_only_volume_list.
# LVs in this list are activated in read-only mode.
- # If this list is defined, each LV that is to be activated
- # is checked against this list, and if it matches, it is
- # activated in read-only mode.
- # This overrides the permission setting stored in the
- # metadata, e.g. from --permission rw.
- # Possible options are: vgname, vgname/lvname, @tag, @*
- # See volume_list for how these options are matched to LVs.
+ # If this list is defined, each LV that is to be activated is checked
+ # against this list, and if it matches, it is activated in read-only
+ # mode. This overrides the permission setting stored in the metadata,
+ # e.g. from --permission rw.
+ #
+ # Accepted values:
+ # vgname
+ # The VG name is matched exactly and selects all LVs in the VG.
+ # vgname/lvname
+ # The VG name and LV name are matched exactly and selects the LV.
+ # @tag
+ # Selects an LV if the specified tag matches a tag set on the LV
+ # or VG.
+ # @*
+ # Selects an LV if a tag defined on the host is also set on the LV
+ # or VG. See tags/hosttags. If any host tags exist but volume_list
+ # is not defined, a default single-entry list containing '@*' is
+ # assumed.
+ #
+ # Example
+ # volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
+ #
# This configuration option does not have a default value defined.
# Configuration option activation/raid_region_size.
# Size in KiB of each raid or mirror synchronization region.
- # For raid or mirror segment types, this is the amount of
- # data that is copied at once when initializing, or moved
- # at once by pvmove.
+ # For raid or mirror segment types, this is the amount of data that is
+ # copied at once when initializing, or moved at once by pvmove.
raid_region_size = 512
# Configuration option activation/error_when_full.
# Return errors if a thin pool runs out of space.
- # When enabled, writes to thin LVs immediately return
- # an error if the thin pool is out of data space.
- # When disabled, writes to thin LVs are queued if the
- # thin pool is out of space, and processed when the
- # thin pool data space is extended.
- # New thin pools are assigned the behavior defined here.
- # The '--errorwhenfull y|n' option overrides this setting.
+ # The --errorwhenfull option overrides this setting.
+ # When enabled, writes to thin LVs immediately return an error if the
+ # thin pool is out of data space. When disabled, writes to thin LVs
+ # are queued if the thin pool is out of space, and processed when the
+ # thin pool data space is extended. New thin pools are assigned the
+ # behavior defined here.
+ # This configuration option has an automatic default value.
# error_when_full = 0
# Configuration option activation/readahead.
# Setting to use when there is no readahead setting in metadata.
- # Possible options are: none, auto.
- # none - Disable readahead.
- # auto - Use default value chosen by kernel.
+ #
+ # Accepted values:
+ # none
+ # Disable readahead.
+ # auto
+ # Use default value chosen by kernel.
+ #
readahead = "auto"
# Configuration option activation/raid_fault_policy.
# Defines how a device failure in a RAID LV is handled.
# This includes LVs that have the following segment types:
# raid1, raid4, raid5*, and raid6*.
- # If a device in the LV fails, the policy determines the
- # steps perfomed by dmeventd automatically, and the steps
- # perfomed by 'lvconvert --repair --use-policies' run manually.
+ # If a device in the LV fails, the policy determines the steps
+ # performed by dmeventd automatically, and the steps perfomed by the
+ # manual command lvconvert --repair --use-policies.
# Automatic handling requires dmeventd to be monitoring the LV.
- # Possible options are: warn, allocate.
- # warn - Use the system log to warn the user that a device
- # in the RAID LV has failed. It is left to the user to run
- # 'lvconvert --repair' manually to remove or replace the failed
- # device. As long as the number of failed devices does not
- # exceed the redundancy of the logical volume (1 device for
- # raid4/5, 2 for raid6, etc) the LV will remain usable.
- # allocate - Attempt to use any extra physical volumes in the
- # volume group as spares and replace faulty devices.
+ #
+ # Accepted values:
+ # warn
+ # Use the system log to warn the user that a device in the RAID LV
+ # has failed. It is left to the user to run lvconvert --repair
+ # manually to remove or replace the failed device. As long as the
+ # number of failed devices does not exceed the redundancy of the LV
+ # (1 device for raid4/5, 2 for raid6), the LV will remain usable.
+ # allocate
+ # Attempt to use any extra physical volumes in the VG as spares and
+ # replace faulty devices.
+ #
raid_fault_policy = "warn"
# Configuration option activation/mirror_image_fault_policy.
# Defines how a device failure in a 'mirror' LV is handled.
- # An LV with the 'mirror' segment type is composed of mirror
- # images (copies) and a mirror log.
- # A disk log ensures that a mirror LV does not need to be
- # re-synced (all copies made the same) every time a machine
- # reboots or crashes.
- # If a device in the LV fails, this policy determines the
- # steps perfomed by dmeventd automatically, and the steps
- # performed by 'lvconvert --repair --use-policies' run manually.
+ # An LV with the 'mirror' segment type is composed of mirror images
+ # (copies) and a mirror log. A disk log ensures that a mirror LV does
+ # not need to be re-synced (all copies made the same) every time a
+ # machine reboots or crashes. If a device in the LV fails, this policy
+ # determines the steps perfomed by dmeventd automatically, and the steps
+ # performed by the manual command lvconvert --repair --use-policies.
# Automatic handling requires dmeventd to be monitoring the LV.
- # Possible options are: remove, allocate, allocate_anywhere.
- # remove - Simply remove the faulty device and run without it.
- # If the log device fails, the mirror would convert to using
- # an in-memory log. This means the mirror will not
- # remember its sync status across crashes/reboots and
- # the entire mirror will be re-synced.
- # If a mirror image fails, the mirror will convert to a
- # non-mirrored device if there is only one remaining good copy.
- # allocate - Remove the faulty device and try to allocate space
- # on a new device to be a replacement for the failed device.
- # Using this policy for the log is fast and maintains the
- # ability to remember sync state through crashes/reboots.
- # Using this policy for a mirror device is slow, as it
- # requires the mirror to resynchronize the devices, but it
- # will preserve the mirror characteristic of the device.
- # This policy acts like 'remove' if no suitable device and
- # space can be allocated for the replacement.
- # allocate_anywhere - Not yet implemented. Useful to place
- # the log device temporarily on the same physical volume as
- # one of the mirror images. This policy is not recommended
- # for mirror devices since it would break the redundant nature
- # of the mirror. This policy acts like 'remove' if no suitable
- # device and space can be allocated for the replacement.
+ #
+ # Accepted values:
+ # remove
+ # Simply remove the faulty device and run without it. If the log
+ # device fails, the mirror would convert to using an in-memory log.
+ # This means the mirror will not remember its sync status across
+ # crashes/reboots and the entire mirror will be re-synced. If a
+ # mirror image fails, the mirror will convert to a non-mirrored
+ # device if there is only one remaining good copy.
+ # allocate
+ # Remove the faulty device and try to allocate space on a new
+ # device to be a replacement for the failed device. Using this
+ # policy for the log is fast and maintains the ability to remember
+ # sync state through crashes/reboots. Using this policy for a
+ # mirror device is slow, as it requires the mirror to resynchronize
+ # the devices, but it will preserve the mirror characteristic of
+ # the device. This policy acts like 'remove' if no suitable device
+ # and space can be allocated for the replacement.
+ # allocate_anywhere
+ # Not yet implemented. Useful to place the log device temporarily
+ # on the same physical volume as one of the mirror images. This
+ # policy is not recommended for mirror devices since it would break
+ # the redundant nature of the mirror. This policy acts like
+ # 'remove' if no suitable device and space can be allocated for the
+ # replacement.
+ #
mirror_image_fault_policy = "remove"
# Configuration option activation/mirror_log_fault_policy.
# Defines how a device failure in a 'mirror' log LV is handled.
- # The mirror_image_fault_policy description for mirrored LVs
- # also applies to mirrored log LVs.
+ # The mirror_image_fault_policy description for mirrored LVs also
+ # applies to mirrored log LVs.
mirror_log_fault_policy = "allocate"
# Configuration option activation/snapshot_autoextend_threshold.
@@ -1221,20 +1279,26 @@ activation {
# The minimum value is 50 (a smaller value is treated as 50.)
# Also see snapshot_autoextend_percent.
# Automatic extension requires dmeventd to be monitoring the LV.
- # Example:
- # With snapshot_autoextend_threshold 70 and
- # snapshot_autoextend_percent 20, whenever a snapshot
- # exceeds 70% usage, it will be extended by another 20%.
- # For a 1G snapshot, using 700M will trigger a resize to 1.2G.
- # When the usage exceeds 840M, the snapshot will be extended
- # to 1.44G, and so on.
+ #
+ # Example
+ # Using 70% autoextend threshold and 20% autoextend size, when a 1G
+ # snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds
+ # 840M, it is extended to 1.44G:
+ # snapshot_autoextend_threshold = 70
+ #
snapshot_autoextend_threshold = 100
# Configuration option activation/snapshot_autoextend_percent.
# Auto-extending a snapshot adds this percent extra space.
# The amount of additional space added to a snapshot is this
# percent of its current size.
- # Also see snapshot_autoextend_threshold.
+ #
+ # Example
+ # Using 70% autoextend threshold and 20% autoextend size, when a 1G
+ # snapshot exceeds 700M, it is extended to 1.2G, and when it exceeds
+ # 840M, it is extended to 1.44G:
+ # snapshot_autoextend_percent = 20
+ #
snapshot_autoextend_percent = 20
# Configuration option activation/thin_pool_autoextend_threshold.
@@ -1243,150 +1307,166 @@ activation {
# The minimum value is 50 (a smaller value is treated as 50.)
# Also see thin_pool_autoextend_percent.
# Automatic extension requires dmeventd to be monitoring the LV.
- # Example:
- # With thin_pool_autoextend_threshold 70 and
- # thin_pool_autoextend_percent 20, whenever a thin pool
- # exceeds 70% usage, it will be extended by another 20%.
- # For a 1G thin pool, using up 700M will trigger a resize to 1.2G.
- # When the usage exceeds 840M, the thin pool will be extended
- # to 1.44G, and so on.
+ #
+ # Example
+ # Using 70% autoextend threshold and 20% autoextend size, when a 1G
+ # thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds
+ # 840M, it is extended to 1.44G:
+ # thin_pool_autoextend_threshold = 70
+ #
thin_pool_autoextend_threshold = 100
# Configuration option activation/thin_pool_autoextend_percent.
# Auto-extending a thin pool adds this percent extra space.
# The amount of additional space added to a thin pool is this
# percent of its current size.
+ #
+ # Example
+ # Using 70% autoextend threshold and 20% autoextend size, when a 1G
+ # thin pool exceeds 700M, it is extended to 1.2G, and when it exceeds
+ # 840M, it is extended to 1.44G:
+ # thin_pool_autoextend_percent = 20
+ #
thin_pool_autoextend_percent = 20
# Configuration option activation/mlock_filter.
# Do not mlock these memory areas.
- # While activating devices, I/O to devices being
- # (re)configured is suspended. As a precaution against
- # deadlocks, LVM pins memory it is using so it is not
- # paged out, and will not require I/O to reread.
- # Groups of pages that are known not to be accessed during
- # activation do not need to be pinned into memory.
- # Each string listed in this setting is compared against
- # each line in /proc/self/maps, and the pages corresponding
- # to lines that match are not pinned. On some systems,
- # locale-archive was found to make up over 80% of the memory
+ # While activating devices, I/O to devices being (re)configured is
+ # suspended. As a precaution against deadlocks, LVM pins memory it is
+ # using so it is not paged out, and will not require I/O to reread.
+ # Groups of pages that are known not to be accessed during activation
+ # do not need to be pinned into memory. Each string listed in this
+ # setting is compared against each line in /proc/self/maps, and the
+ # pages corresponding to lines that match are not pinned. On some
+ # systems, locale-archive was found to make up over 80% of the memory
# used by the process.
- # Example:
+ #
+ # Example
# mlock_filter = [ "locale/locale-archive", "gconv/gconv-modules.cache" ]
+ #
# This configuration option is advanced.
# This configuration option does not have a default value defined.
# Configuration option activation/use_mlockall.
# Use the old behavior of mlockall to pin all memory.
- # Prior to version 2.02.62, LVM used mlockall() to pin
- # the whole process's memory while activating devices.
+ # Prior to version 2.02.62, LVM used mlockall() to pin the whole
+ # process's memory while activating devices.
use_mlockall = 0
# Configuration option activation/monitoring.
# Monitor LVs that are activated.
- # When enabled, LVM will ask dmeventd to monitor LVs
- # that are activated.
- # The '--ignoremonitoring' option overrides this setting.
+ # The --ignoremonitoring option overrides this setting.
+ # When enabled, LVM will ask dmeventd to monitor activated LVs.
monitoring = 1
# Configuration option activation/polling_interval.
# Check pvmove or lvconvert progress at this interval (seconds).
# When pvmove or lvconvert must wait for the kernel to finish
- # synchronising or merging data, they check and report progress
- # at intervals of this number of seconds.
- # If this is set to 0 and there is only one thing to wait for,
- # there are no progress reports, but the process is awoken
- # immediately once the operation is complete.
+ # synchronising or merging data, they check and report progress at
+ # intervals of this number of seconds. If this is set to 0 and there
+ # is only one thing to wait for, there are no progress reports, but
+ # the process is awoken immediately once the operation is complete.
polling_interval = 15
# Configuration option activation/auto_set_activation_skip.
# Set the activation skip flag on new thin snapshot LVs.
- # An LV can have a persistent 'activation skip' flag.
- # The flag causes the LV to be skipped during normal activation.
- # The lvchange/vgchange -K option is required to activate LVs
- # that have the activation skip flag set.
- # When this setting is enabled, the activation skip flag is
+ # The --setactivationskip option overrides this setting.
+ # An LV can have a persistent 'activation skip' flag. The flag causes
+ # the LV to be skipped during normal activation. The lvchange/vgchange
+ # -K option is required to activate LVs that have the activation skip
+ # flag set. When this setting is enabled, the activation skip flag is
# set on new thin snapshot LVs.
- # The '--setactivationskip y|n' option overrides this setting.
+ # This configuration option has an automatic default value.
# auto_set_activation_skip = 1
# Configuration option activation/activation_mode.
# How LVs with missing devices are activated.
- # Possible options are: complete, degraded, partial.
- # complete - Only allow activation of an LV if all of
- # the Physical Volumes it uses are present. Other PVs
- # in the Volume Group may be missing.
- # degraded - Like complete, but additionally RAID LVs of
- # segment type raid1, raid4, raid5, radid6 and raid10 will
- # be activated if there is no data loss, i.e. they have
- # sufficient redundancy to present the entire addressable
- # range of the Logical Volume.
- # partial - Allows the activation of any LV even if a
- # missing or failed PV could cause data loss with a
- # portion of the Logical Volume inaccessible.
- # This setting should not normally be used, but may
- # sometimes assist with data recovery.
- # The '--activationmode' option overrides this setting.
+ # The --activationmode option overrides this setting.
+ #
+ # Accepted values:
+ # complete
+ # Only allow activation of an LV if all of the Physical Volumes it
+ # uses are present. Other PVs in the Volume Group may be missing.
+ # degraded
+ # Like complete, but additionally RAID LVs of segment type raid1,
+ # raid4, raid5, radid6 and raid10 will be activated if there is no
+ # data loss, i.e. they have sufficient redundancy to present the
+ # entire addressable range of the Logical Volume.
+ # partial
+ # Allows the activation of any LV even if a missing or failed PV
+ # could cause data loss with a portion of the LV inaccessible.
+ # This setting should not normally be used, but may sometimes
+ # assist with data recovery.
+ #
activation_mode = "degraded"
# Configuration option activation/lock_start_list.
# Locking is started only for VGs selected by this list.
- # The rules are the same as those for LVs in volume_list.
+ # The rules are the same as those for volume_list.
# This configuration option does not have a default value defined.
# Configuration option activation/auto_lock_start_list.
# Locking is auto-started only for VGs selected by this list.
- # The rules are the same as those for LVs in auto_activation_volume_list.
+ # The rules are the same as those for auto_activation_volume_list.
# This configuration option does not have a default value defined.
}
# Configuration section metadata.
+# This configuration section has an automatic default value.
# metadata {
# Configuration option metadata/pvmetadatacopies.
# Number of copies of metadata to store on each PV.
- # Possible options are: 0, 1, 2.
- # If set to 2, two copies of the VG metadata are stored on
- # the PV, one at the front of the PV, and one at the end.
- # If set to 1, one copy is stored at the front of the PV.
- # If set to 0, no copies are stored on the PV. This may
- # be useful with VGs containing large numbers of PVs.
- # The '--pvmetadatacopies' option overrides this setting.
+ # The --pvmetadatacopies option overrides this setting.
+ #
+ # Accepted values:
+ # 2
+ # Two copies of the VG metadata are stored on the PV, one at the
+ # front of the PV, and one at the end.
+ # 1
+ # One copy of VG metadata is stored at the front of the PV.
+ # 0
+ # No copies of VG metadata are stored on the PV. This may be
+ # useful for VGs containing large numbers of PVs.
+ #
# This configuration option is advanced.
+ # This configuration option has an automatic default value.
# pvmetadatacopies = 1
# Configuration option metadata/vgmetadatacopies.
# Number of copies of metadata to maintain for each VG.
- # If set to a non-zero value, LVM automatically chooses which of
- # the available metadata areas to use to achieve the requested
- # number of copies of the VG metadata. If you set a value larger
- # than the the total number of metadata areas available, then
- # metadata is stored in them all.
- # The value 0 (unmanaged) disables this automatic management
- # and allows you to control which metadata areas are used at
- # the individual PV level using 'pvchange --metadataignore y|n'.
- # The '--vgmetadatacopies' option overrides this setting.
+ # The --vgmetadatacopies option overrides this setting.
+ # If set to a non-zero value, LVM automatically chooses which of the
+ # available metadata areas to use to achieve the requested number of
+ # copies of the VG metadata. If you set a value larger than the the
+ # total number of metadata areas available, then metadata is stored in
+ # them all. The value 0 (unmanaged) disables this automatic management
+ # and allows you to control which metadata areas are used at the
+ # individual PV level using pvchange --metadataignore y|n.
+ # This configuration option has an automatic default value.
# vgmetadatacopies = 0
# Configuration option metadata/pvmetadatasize.
# Approximate number of sectors to use for each metadata copy.
- # VGs with large numbers of PVs or LVs, or VGs containing
- # complex LV structures, may need additional space for VG
- # metadata. The metadata areas are treated as circular buffers,
- # so unused space becomes filled with an archive of the most
- # recent previous versions of the metadata.
+ # VGs with large numbers of PVs or LVs, or VGs containing complex LV
+ # structures, may need additional space for VG metadata. The metadata
+ # areas are treated as circular buffers, so unused space becomes filled
+ # with an archive of the most recent previous versions of the metadata.
+ # This configuration option has an automatic default value.
# pvmetadatasize = 255
# Configuration option metadata/pvmetadataignore.
# Ignore metadata areas on a new PV.
- # If metadata areas on a PV are ignored, LVM will not store
- # metadata in them.
- # The '--metadataignore' option overrides this setting.
+ # The --metadataignore option overrides this setting.
+ # If metadata areas on a PV are ignored, LVM will not store metadata
+ # in them.
# This configuration option is advanced.
+ # This configuration option has an automatic default value.
# pvmetadataignore = 0
# Configuration option metadata/stripesize.
# This configuration option is advanced.
+ # This configuration option has an automatic default value.
# stripesize = 64
# Configuration option metadata/dirs.
@@ -1394,32 +1474,36 @@ activation {
# These directories must not be on logical volumes!
# It's possible to use LVM with a couple of directories here,
# preferably on different (non-LV) filesystems, and with no other
- # on-disk metadata (pvmetadatacopies = 0). Or this can be in
- # addition to on-disk metadata areas.
- # The feature was originally added to simplify testing and is not
- # supported under low memory situations - the machine could lock up.
- # Never edit any files in these directories by hand unless you
- # you are absolutely sure you know what you are doing! Use
- # the supplied toolset to make changes (e.g. vgcfgrestore).
- # Example:
+ # on-disk metadata (pvmetadatacopies = 0). Or this can be in addition
+ # to on-disk metadata areas. The feature was originally added to
+ # simplify testing and is not supported under low memory situations -
+ # the machine could lock up. Never edit any files in these directories
+ # by hand unless you are absolutely sure you know what you are doing!
+ # Use the supplied toolset to make changes (e.g. vgcfgrestore).
+ #
+ # Example
# dirs = [ "/etc/lvm/metadata", "/mnt/disk2/lvm/metadata2" ]
+ #
# This configuration option is advanced.
# This configuration option does not have a default value defined.
# }
# Configuration section report.
# LVM report command output formatting.
+# This configuration section has an automatic default value.
# report {
# Configuration option report/compact_output.
# Do not print empty report fields.
- # Fields that don't have a value set for any of the rows
- # reported are skipped and not printed. Compact output is
- # applicable only if report/buffered is enabled.
+ # Fields that don't have a value set for any of the rows reported are
+ # skipped and not printed. Compact output is applicable only if
+ # report/buffered is enabled.
+ # This configuration option has an automatic default value.
# compact_output = 0
# Configuration option report/aligned.
# Align columns in report output.
+ # This configuration option has an automatic default value.
# aligned = 1
# Configuration option report/buffered.
@@ -1429,31 +1513,38 @@ activation {
# is flushed to output which normally happens at the end of command
# execution. Otherwise, if buffering is not used, each object is
# reported as soon as its processing is finished.
+ # This configuration option has an automatic default value.
# buffered = 1
# Configuration option report/headings.
# Show headings for columns on report.
+ # This configuration option has an automatic default value.
# headings = 1
# Configuration option report/separator.
# A separator to use on report after each field.
+ # This configuration option has an automatic default value.
# separator = " "
# Configuration option report/list_item_separator.
# A separator to use for list items when reported.
+ # This configuration option has an automatic default value.
# list_item_separator = ","
# Configuration option report/prefixes.
# Use a field name prefix for each field reported.
+ # This configuration option has an automatic default value.
# prefixes = 0
# Configuration option report/quoted.
# Quote field values when using field name prefixes.
+ # This configuration option has an automatic default value.
# quoted = 1
# Configuration option report/colums_as_rows.
# Output each column as a row.
# If set, this also implies report/prefixes=1.
+ # This configuration option has an automatic default value.
# colums_as_rows = 0
# Configuration option report/binary_values_as_numeric.
@@ -1461,170 +1552,249 @@ activation {
# For columns that have exactly two valid values to report
# (not counting the 'unknown' value which denotes that the
# value could not be determined).
+ # This configuration option has an automatic default value.
# binary_values_as_numeric = 0
# Configuration option report/time_format.
# Set time format for fields reporting time values.
# Format specification is a string which may contain special character
- # sequences and ordinary character sequences. Ordinary character sequences
- # are copied verbatim. Each special character sequence is introduced by '%'
- # character and such sequence is then substituted with a value as described below:
- # %a The abbreviated name of the day of the week according to the
- # current locale.
- # %A The full name of the day of the week according to the current locale.
- # %b The abbreviated month name according to the current locale.
- # %B The full month name according to the current locale.
- # %c The preferred date and time representation for the current locale. (alt E)
- # %C The century number (year/100) as a 2-digit integer. (alt E)
- # %d The day of the month as a decimal number (range 01 to 31). (alt O)
- # %D Equivalent to %m/%d/%y. (For Americans only. Americans should
- # note that in other countries%d/%m/%y is rather common. This means
- # that in international context this format is ambiguous and should not
- # be used.
- # %e Like %d, the day of the month as a decimal number, but a leading zero
- # is replaced by a space. (alt O)
- # %E Modifier: use alternative local-dependent representation if available.
- # %F Equivalent to %Y-%m-%d (the ISO 8601 date format).
- # %G The ISO 8601 week-based year with century as adecimal number. The 4-digit
- # year corresponding to the ISO week number (see %V). This has the same
- # format and value as %Y, except that if the ISO week number belongs to
- # the previous or next year, that year is used instead.
- # %g Like %G, but without century, that is, with a 2-digit year (00-99).
- # %h Equivalent to %b.
- # %H The hour as a decimal number using a 24-hour clock (range 00 to 23). (alt O)
- # %I The hour as a decimal number using a 12-hour clock (range 01 to 12). (alt O)
- # %j The day of the year as a decimal number (range 001 to 366).
- # %k The hour (24-hour clock) as a decimal number (range 0 to 23);
- # single digits are preceded by a blank. (See also %H.)
- # %l The hour (12-hour clock) as a decimal number (range 1 to 12);
- # single digits are preceded by a blank. (See also %I.)
- # %m The month as a decimal number (range 01 to 12). (alt O)
- # %M The minute as a decimal number (range 00 to 59). (alt O)
- # %O Modifier: use alternative numeric symbols.
- # %p Either "AM" or "PM" according to the given time value,
- # or the corresponding strings for the current locale. Noon is
- # treated as "PM" and midnight as "AM".
- # %P Like %p but in lowercase: "am" or "pm" or a corresponding
- # string for the current locale.
- # %r The time in a.m. or p.m. notation. In the POSIX locale this is
- # equivalent to %I:%M:%S %p.
- # %R The time in 24-hour notation (%H:%M). For a version including
- # the seconds, see %T below.
- # %s The number of seconds since the Epoch, 1970-01-01 00:00:00 +0000 (UTC)
- # %S The second as a decimal number (range 00 to 60).
- # (The range is up to 60 to allow for occasional leap seconds.) (alt O)
- # %t A tab character.
- # %T The time in 24-hour notation (%H:%M:%S).
- # %u The day of the week as a decimal, range 1 to 7, Monday being 1.
- # See also %w. (alt O)
- # %U The week number of the current year as a decimal number,
- # range 00 to 53, starting with the first Sunday as the first
- # day of week 01. See also %V and %W. (alt O)
- # %V The ISO 8601 week number of the current year as a decimal number,
- # range 01 to 53, where week 1 is the first week that has at least 4 days
- # in the new year. See also %U and %W. (alt O)
- # %w The day of the week as a decimal, range 0 to 6, Sunday being 0.
- # See also %u. (alt O)
- # %W The week number of the current year as a decimal number, range 00 to 53,
- # starting with the first Monday as the first day of week 01. (alt O)
- # %x The preferred date representation for the current locale without the time. (alt E)
- # %X The preferred time representation for the current locale without the date. (alt E)
- # %y The year as a decimal number without a century (range 00 to 99). (alt E, alt O)
- # %Y The year as a decimal number including the century. (alt E)
- # %z The +hhmm or -hhmm numeric timezone (that is, the hour and minute
- # offset from UTC).
- # %Z The timezone name or abbreviation.
- # %% A literal '%' character.
+ # sequences and ordinary character sequences. Ordinary character
+ # sequences are copied verbatim. Each special character sequence is
+ # introduced by the '%' character and such sequence is then
+ # substituted with a value as described below.
+ #
+ # Accepted values:
+ # %a
+ # The abbreviated name of the day of the week according to the
+ # current locale.
+ # %A
+ # The full name of the day of the week according to the current
+ # locale.
+ # %b
+ # The abbreviated month name according to the current locale.
+ # %B
+ # The full month name according to the current locale.
+ # %c
+ # The preferred date and time representation for the current
+ # locale (alt E)
+ # %C
+ # The century number (year/100) as a 2-digit integer. (alt E)
+ # %d
+ # The day of the month as a decimal number (range 01 to 31).
+ # (alt O)
+ # %D
+ # Equivalent to %m/%d/%y. (For Americans only. Americans should
+ # note that in other countries%d/%m/%y is rather common. This
+ # means that in international context this format is ambiguous and
+ # should not be used.
+ # %e
+ # Like %d, the day of the month as a decimal number, but a leading
+ # zero is replaced by a space. (alt O)
+ # %E
+ # Modifier: use alternative local-dependent representation if
+ # available.
+ # %F
+ # Equivalent to %Y-%m-%d (the ISO 8601 date format).
+ # %G
+ # The ISO 8601 week-based year with century as adecimal number.
+ # The 4-digit year corresponding to the ISO week number (see %V).
+ # This has the same format and value as %Y, except that if the
+ # ISO week number belongs to the previous or next year, that year
+ # is used instead.
+ # %g
+ # Like %G, but without century, that is, with a 2-digit year
+ # (00-99).
+ # %h
+ # Equivalent to %b.
+ # %H
+ # The hour as a decimal number using a 24-hour clock
+ # (range 00 to 23). (alt O)
+ # %I
+ # The hour as a decimal number using a 12-hour clock
+ # (range 01 to 12). (alt O)
+ # %j
+ # The day of the year as a decimal number (range 001 to 366).
+ # %k
+ # The hour (24-hour clock) as a decimal number (range 0 to 23);
+ # single digits are preceded by a blank. (See also %H.)
+ # %l
+ # The hour (12-hour clock) as a decimal number (range 1 to 12);
+ # single digits are preceded by a blank. (See also %I.)
+ # %m
+ # The month as a decimal number (range 01 to 12). (alt O)
+ # %M
+ # The minute as a decimal number (range 00 to 59). (alt O)
+ # %O
+ # Modifier: use alternative numeric symbols.
+ # %p
+ # Either "AM" or "PM" according to the given time value,
+ # or the corresponding strings for the current locale. Noon is
+ # treated as "PM" and midnight as "AM".
+ # %P
+ # Like %p but in lowercase: "am" or "pm" or a corresponding
+ # string for the current locale.
+ # %r
+ # The time in a.m. or p.m. notation. In the POSIX locale this is
+ # equivalent to %I:%M:%S %p.
+ # %R
+ # The time in 24-hour notation (%H:%M). For a version including
+ # the seconds, see %T below.
+ # %s
+ # The number of seconds since the Epoch,
+ # 1970-01-01 00:00:00 +0000 (UTC)
+ # %S
+ # The second as a decimal number (range 00 to 60). (The range is
+ # up to 60 to allow for occasional leap seconds.) (alt O)
+ # %t
+ # A tab character.
+ # %T
+ # The time in 24-hour notation (%H:%M:%S).
+ # %u
+ # The day of the week as a decimal, range 1 to 7, Monday being 1.
+ # See also %w. (alt O)
+ # %U
+ # The week number of the current year as a decimal number,
+ # range 00 to 53, starting with the first Sunday as the first
+ # day of week 01. See also %V and %W. (alt O)
+ # %V
+ # The ISO 8601 week number of the current year as a decimal number,
+ # range 01 to 53, where week 1 is the first week that has at least
+ # 4 days in the new year. See also %U and %W. (alt O)
+ # %w
+ # The day of the week as a decimal, range 0 to 6, Sunday being 0.
+ # See also %u. (alt O)
+ # %W
+ # The week number of the current year as a decimal number,
+ # range 00 to 53, starting with the first Monday as the first day
+ # of week 01. (alt O)
+ # %x
+ # The preferred date representation for the current locale without
+ # the time. (alt E)
+ # %X
+ # The preferred time representation for the current locale without
+ # the date. (alt E)
+ # %y
+ # The year as a decimal number without a century (range 00 to 99).
+ # (alt E, alt O)
+ # %Y
+ # The year as a decimal number including the century. (alt E)
+ # %z
+ # The +hhmm or -hhmm numeric timezone (that is, the hour and minute
+ # offset from UTC).
+ # %Z
+ # The timezone name or abbreviation.
+ # %%
+ # A literal '%' character.
+ #
+ # This configuration option has an automatic default value.
# time_format = "%Y-%m-%d %T %z"
# Configuration option report/devtypes_sort.
# List of columns to sort by when reporting 'lvm devtypes' command.
# See 'lvm devtypes -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# devtypes_sort = "devtype_name"
# Configuration option report/devtypes_cols.
# List of columns to report for 'lvm devtypes' command.
# See 'lvm devtypes -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# devtypes_cols = "devtype_name,devtype_max_partitions,devtype_description"
# Configuration option report/devtypes_cols_verbose.
# List of columns to report for 'lvm devtypes' command in verbose mode.
# See 'lvm devtypes -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# devtypes_cols_verbose = "devtype_name,devtype_max_partitions,devtype_description"
# Configuration option report/lvs_sort.
# List of columns to sort by when reporting 'lvs' command.
# See 'lvs -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# lvs_sort = "vg_name,lv_name"
# Configuration option report/lvs_cols.
# List of columns to report for 'lvs' command.
# See 'lvs -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# lvs_cols = "lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,metadata_percent,move_pv,mirror_log,copy_percent,convert_lv"
# Configuration option report/lvs_cols_verbose.
# List of columns to report for 'lvs' command in verbose mode.
# See 'lvs -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# lvs_cols_verbose = "lv_name,vg_name,seg_count,lv_attr,lv_size,lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,pool_lv,origin,data_percent,metadata_percent,move_pv,copy_percent,mirror_log,convert_lv,lv_uuid,lv_profile"
# Configuration option report/vgs_sort.
# List of columns to sort by when reporting 'vgs' command.
# See 'vgs -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# vgs_sort = "vg_name"
# Configuration option report/vgs_cols.
# List of columns to report for 'vgs' command.
# See 'vgs -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# vgs_cols = "vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free"
# Configuration option report/vgs_cols_verbose.
# List of columns to report for 'vgs' command in verbose mode.
# See 'vgs -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# vgs_cols_verbose = "vg_name,vg_attr,vg_extent_size,pv_count,lv_count,snap_count,vg_size,vg_free,vg_uuid,vg_profile"
# Configuration option report/pvs_sort.
# List of columns to sort by when reporting 'pvs' command.
# See 'pvs -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# pvs_sort = "pv_name"
# Configuration option report/pvs_cols.
# List of columns to report for 'pvs' command.
# See 'pvs -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# pvs_cols = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free"
# Configuration option report/pvs_cols_verbose.
# List of columns to report for 'pvs' command in verbose mode.
# See 'pvs -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# pvs_cols_verbose = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,dev_size,pv_uuid"
# Configuration option report/segs_sort.
# List of columns to sort by when reporting 'lvs --segments' command.
# See 'lvs --segments -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# segs_sort = "vg_name,lv_name,seg_start"
# Configuration option report/segs_cols.
# List of columns to report for 'lvs --segments' command.
- # See 'lvs --segments -o help' for the list of possible fields.
+ # See 'lvs --segments -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# segs_cols = "lv_name,vg_name,lv_attr,stripes,segtype,seg_size"
# Configuration option report/segs_cols_verbose.
# List of columns to report for 'lvs --segments' command in verbose mode.
# See 'lvs --segments -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# segs_cols_verbose = "lv_name,vg_name,lv_attr,seg_start,seg_size,stripes,segtype,stripesize,chunksize"
# Configuration option report/pvsegs_sort.
# List of columns to sort by when reporting 'pvs --segments' command.
# See 'pvs --segments -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# pvsegs_sort = "pv_name,pvseg_start"
# Configuration option report/pvsegs_cols.
# List of columns to sort by when reporting 'pvs --segments' command.
# See 'pvs --segments -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# pvsegs_cols = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size"
# Configuration option report/pvsegs_cols_verbose.
# List of columns to sort by when reporting 'pvs --segments' command in verbose mode.
# See 'pvs --segments -o help' for the list of possible fields.
+ # This configuration option has an automatic default value.
# pvsegs_cols_verbose = "pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size,lv_name,seg_start_pe,segtype,seg_pe_ranges"
# }
@@ -1635,68 +1805,71 @@ dmeventd {
# Configuration option dmeventd/mirror_library.
# The library dmeventd uses when monitoring a mirror device.
# libdevmapper-event-lvm2mirror.so attempts to recover from
- # failures. It removes failed devices from a volume group and
+ # failures. It removes failed devices from a volume group and
# reconfigures a mirror as necessary. If no mirror library is
# provided, mirrors are not monitored through dmeventd.
mirror_library = "libdevmapper-event-lvm2mirror.so"
# Configuration option dmeventd/raid_library.
+ # This configuration option has an automatic default value.
# raid_library = "libdevmapper-event-lvm2raid.so"
# Configuration option dmeventd/snapshot_library.
# The library dmeventd uses when monitoring a snapshot device.
- # libdevmapper-event-lvm2snapshot.so monitors the filling of
- # snapshots and emits a warning through syslog when the usage
- # exceeds 80%. The warning is repeated when 85%, 90% and
- # 95% of the snapshot is filled.
+ # libdevmapper-event-lvm2snapshot.so monitors the filling of snapshots
+ # and emits a warning through syslog when the usage exceeds 80%. The
+ # warning is repeated when 85%, 90% and 95% of the snapshot is filled.
snapshot_library = "libdevmapper-event-lvm2snapshot.so"
# Configuration option dmeventd/thin_library.
# The library dmeventd uses when monitoring a thin device.
- # libdevmapper-event-lvm2thin.so monitors the filling of
- # a pool and emits a warning through syslog when the usage
- # exceeds 80%. The warning is repeated when 85%, 90% and
- # 95% of the pool is filled.
+ # libdevmapper-event-lvm2thin.so monitors the filling of a pool
+ # and emits a warning through syslog when the usage exceeds 80%. The
+ # warning is repeated when 85%, 90% and 95% of the pool is filled.
thin_library = "libdevmapper-event-lvm2thin.so"
# Configuration option dmeventd/executable.
# The full path to the dmeventd binary.
+ # This configuration option has an automatic default value.
# executable = "@DMEVENTD_PATH@"
}
# Configuration section tags.
# Host tag settings.
+# This configuration section has an automatic default value.
# tags {
# Configuration option tags/hosttags.
# Create a host tag using the machine name.
# The machine name is nodename returned by uname(2).
+ # This configuration option has an automatic default value.
# hosttags = 0
# Configuration section tags/<tag>.
# Replace this subsection name with a custom tag name.
- # Multiple subsections like this can be created.
- # The '@' prefix for tags is optional.
- # This subsection can contain host_list, which is a
- # list of machine names. If the name of the local
- # machine is found in host_list, then the name of
- # this subsection is used as a tag and is applied
- # to the local machine as a 'host tag'.
- # If this subsection is empty (has no host_list), then
- # the subsection name is always applied as a 'host tag'.
- # Example:
+ # Multiple subsections like this can be created. The '@' prefix for
+ # tags is optional. This subsection can contain host_list, which is a
+ # list of machine names. If the name of the local machine is found in
+ # host_list, then the name of this subsection is used as a tag and is
+ # applied to the local machine as a 'host tag'. If this subsection is
+ # empty (has no host_list), then the subsection name is always applied
+ # as a 'host tag'.
+ #
+ # Example
# The host tag foo is given to all hosts, and the host tag
# bar is given to the hosts named machine1 and machine2.
# tags { foo { } bar { host_list = [ "machine1", "machine2" ] } }
+ #
# This configuration section has variable name.
+ # This configuration section has an automatic default value.
# tag {
# Configuration option tags/<tag>/host_list.
# A list of machine names.
- # These machine names are compared to the nodename
- # returned by uname(2). If the local machine name
- # matches an entry in this list, the name of the
- # subsection is applied to the machine as a 'host tag'.
+ # These machine names are compared to the nodename returned
+ # by uname(2). If the local machine name matches an entry in
+ # this list, the name of the subsection is applied to the
+ # machine as a 'host tag'.
# This configuration option does not have a default value defined.
# }
# }
diff --git a/conf/lvmlocal.conf.in b/conf/lvmlocal.conf.in
index c3f6ac1..9fc50c8 100644
--- a/conf/lvmlocal.conf.in
+++ b/conf/lvmlocal.conf.in
@@ -24,34 +24,33 @@ local {
# Configuration option local/system_id.
# Defines the local system ID for lvmlocal mode.
- # This is used when global/system_id_source is set
- # to 'lvmlocal' in the main configuration file,
- # e.g. lvm.conf.
- # When used, it must be set to a unique value
- # among all hosts sharing access to the storage,
+ # This is used when global/system_id_source is set to 'lvmlocal' in the
+ # main configuration file, e.g. lvm.conf. When used, it must be set to
+ # a unique value among all hosts sharing access to the storage,
# e.g. a host name.
- # Example:
- # Set no system ID.
+ #
+ # Example
+ # Set no system ID:
# system_id = ""
- # Example:
- # Set the system_id to the string 'host1'.
+ # Set the system_id to a specific name:
# system_id = "host1"
+ #
+ # This configuration option has an automatic default value.
# system_id = ""
# Configuration option local/extra_system_ids.
# A list of extra VG system IDs the local host can access.
- # VGs with the system IDs listed here (in addition
- # to the host's own system ID) can be fully accessed
- # by the local host. (These are system IDs that the
- # host sees in VGs, not system IDs that identify the
- # local host, which is determined by system_id_source.)
- # Use this only after consulting 'man lvmsystemid'
- # to be certain of correct usage and possible dangers.
+ # VGs with the system IDs listed here (in addition to the host's own
+ # system ID) can be fully accessed by the local host. (These are
+ # system IDs that the host sees in VGs, not system IDs that identify
+ # the local host, which is determined by system_id_source.)
+ # Use this only after consulting 'man lvmsystemid' to be certain of
+ # correct usage and possible dangers.
# This configuration option does not have a default value defined.
# Configuration option local/host_id.
# The lvmlockd sanlock host_id.
- # This must be a unique among all hosts,
- # and must be between 1 and 2000.
+ # This must be unique among all hosts, and must be between 1 and 2000.
+ # This configuration option has an automatic default value.
# host_id = 0
}
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=dd7efa7069096d5b…
Commit: dd7efa7069096d5b5f4aab5785088f647faa528a
Parent: 0000000000000000000000000000000000000000
Author: Alasdair G Kergon <agk(a)redhat.com>
AuthorDate: 2015-08-26 22:12 +0000
Committer: Alasdair G Kergon <agk(a)redhat.com>
CommitterDate: 2015-08-26 22:12 +0000
annotated tag: v2_02_129 has been created
at dd7efa7069096d5b5f4aab5785088f647faa528a (tag)
tagging a37fd93fbb51eed8b102b373f2338f6d2802ae5f (commit)
replaces v2_02_128
Release 2.02.129.
58 files changed, 2760 insertions(+), 1784 deletions(-)
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.14 (GNU/Linux)
iEYEABECAAYFAlXeOegACgkQIoGRwVZ+LBcO5ACg7SrYu/YLwnyurGHNF7gy51Td
eyAAoOB9Z5ZExcf7PzimVHyFcjAwrRtG
=rl8w
-----END PGP SIGNATURE-----
Alasdair G Kergon (5):
post-release
make.tmpl: Mark internal sharedlib symbols local.
libdm: Drop ignored duplicate export designation.
vgdisplay: Drop error message for exported VGs.
pre-release
Bryn M. Reeves (18):
libdm: do not read region before checking dms for NULL (Coverity)
dmsetup: make sure subcommand is initialised (Coverity)
dmstats: check for zero in _nr_areas_from_step() (Coverity)
libdm: check for zero in _nr_areas() (Coverity)
dmsetup: check timerfd reads for valid byte count (Coverity)
dmstats: fix type formatting
dmstats: reduce minimum field widths
man: update dmstats.8.in examples
configure: check for -lm and log10 function
libdm: add dm_message_supports_precise_timestamps()
man: fix program_id string in dmstats.8.in
macros: fix default symbol export control
libdm: add precise timestamps support to libdm-stats
libdm: add per region precise timestamps property methods
dmstats: add --precise switch to enable nanosecond counters.
dmstats: add 'precise' flag field to stats report
makefiles: fix ld version script generation for older make versions
makefiles: remove stray ')'
David Teigland (16):
config: explain automatic default values
config: recognize a blank comment line
config: add empty lines around examples
config: create lists of accepted values in descriptions
lockd: no error when unlock fails
config: improve description text layout
tests: fix lockd options in sanlock-prepare
lvmlockctl: fix debug output
lvmlockd: change log_error to log_debug for non error
lvmlockd: ignore cmd close if no locks were taken
lvmlockd: fix sending debug info to lvmlockctl
test: allow tests with lvmlockd
tests: create/remove improvements for lvmlockd testing
tests: add lib function to test hidden LVs with lvs -a
tests: add test for lvmlockd lock_args
tests: fix check for lvmlockd test
Ferenc Wágner (6):
cmirrord: move generic setup from daemonize() to init_all()
cmirrord: add --foreground and --help options.
cmirrord manual: add --foreground and --help options
cmirrord: fix stack smashing
cmirrord: avoid debugging buffer overflow in LOG_SPRINT
cmirrord: avoid resync buffer overflow in LOG_SPRINT
Heinz Mauelshagen (1):
WHATS_NEW: Update.
Zdenek Kabelac (26):
lvmcache: check for too long pvid
lockd: check for failing unlock
libdm: simplify dmstats formula.
cleanup: unused header files (Coverity)
cleanup: add cast
cleanup: add FMTssize_t
cleanup: typo fix and drop \
cleanup: check pthread result codes
cleanup: trace error from lvmcache_update_vgname_and_id
debug: vgdisplay trace failing result code
Revert "lvmcache: check for too long pvid"
cleanup: log_debug format matches args
cleanup: preserve constness of some pointers
cleanup: move var declaration
cleanup: compare fgets pointer
cache: enable setting cachepolicy in lvconvert
thin: metadata size cannot be reduced
man: fix sqm typo
cleanup: add .
tests: check cachepolicy with lvconvert
WHATS_NEW
cache: no report error for cpool without mode
cache: report cache pool attrs also for pools
cache: lvconvert accepts --cachemode for --cache
man: replace to with for
tests: update cache tests