Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=5ca54c4f0b27125b…
Commit: 5ca54c4f0b27125bc385d55ea0b44853fc7744dd
Parent: 7db95d0d40c3608b7298cb19380430613fa9e532
Author: Jonathan Brassow <jbrassow(a)redhat.com>
AuthorDate: Wed Jul 31 15:23:13 2013 -0500
Committer: Jonathan Brassow <jbrassow(a)redhat.com>
CommitterDate: Wed Jul 31 15:23:13 2013 -0500
dmeventd: Fix memory leak
When creating a timeout thread for snapshots, the thread is not
tracked and thus never joined. This means that the exit status
of the timeout thread is held indefinitely. Saves a bit of
memory to set PTHREAD_CREATE_DETACHED when creating this thread.
I've also added pthread_attr_init|destroy to setup the creation
pthread_attr_t.
Reported-by: NeilBrown <neilb(a)suse.de>
Signed-off-by: Jonathan Brassow <jbrassow(a)redhat.com>
---
WHATS_NEW | 1 +
daemons/dmeventd/dmeventd.c | 41 +++++++++++++++++++++++++++++++++--------
2 files changed, 34 insertions(+), 8 deletions(-)
diff --git a/WHATS_NEW b/WHATS_NEW
index 69a8275..c76dc68 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,6 @@
Version 2.02.100 -
================================
+ Create dmeventd timeout threads as "detached" so exit status is freed.
Add initial support thin pool lvconvert --repair.
Add --with-thin-repair and --with-thin-dump configure options.
Add lvm.conf thin_repair/dump_executable and thin_repair_options.
diff --git a/daemons/dmeventd/dmeventd.c b/daemons/dmeventd/dmeventd.c
index 92c93d9..9c7a6c6 100644
--- a/daemons/dmeventd/dmeventd.c
+++ b/daemons/dmeventd/dmeventd.c
@@ -264,16 +264,44 @@ static struct dso_data *_alloc_dso_data(struct message_data *data)
return ret;
}
-/* Create a device monitoring thread. */
+/*
+ * Create a device monitoring thread.
+ * N.B. Error codes returned are positive.
+ */
static int _pthread_create_smallstack(pthread_t *t, void *(*fun)(void *), void *arg)
{
+ int r;
+ pthread_t tmp;
pthread_attr_t attr;
pthread_attr_init(&attr);
+
+ /*
+ * From pthread_attr_init man page:
+ * POSIX.1-2001 documents an ENOMEM error for pthread_attr_init(); on
+ * Linux these functions always succeed (but portable and future-proof
+ * applications should nevertheless handle a possible error return).
+ */
+ if ((r = pthread_attr_init(&attr)) != 0)
+ return r;
+
/*
* We use a smaller stack since it gets preallocated in its entirety
*/
pthread_attr_setstacksize(&attr, THREAD_STACK_SIZE);
- return pthread_create(t, &attr, fun, arg);
+
+ /*
+ * If no-one will be waiting, we need to detach.
+ */
+ if (!t) {
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+ t = &tmp;
+ }
+
+ r = pthread_create(t, &attr, fun, arg);
+
+ pthread_attr_destroy(&attr);
+
+ return r;
}
static void _free_dso_data(struct dso_data *data)
@@ -548,12 +576,9 @@ static int _register_for_timeout(struct thread_status *thread)
pthread_cond_signal(&_timeout_cond);
}
- if (!_timeout_running) {
- pthread_t timeout_id;
-
- if (!(ret = _pthread_create_smallstack(&timeout_id, _timeout_thread, NULL)))
- _timeout_running = 1;
- }
+ if (!_timeout_running &&
+ !(ret = _pthread_create_smallstack(NULL, _timeout_thread, NULL)))
+ _timeout_running = 1;
pthread_mutex_unlock(&_timeout_mutex);
Gitweb: http://git.fedorahosted.org/git/?p=lvm2.git;a=commitdiff;h=b6bfddcd0a830d0c…
Commit: b6bfddcd0a830d0c9312bc3ab906cb3d1b7a6dd9
Parent: 9b28255aac9386db6d51cd1b501a9f39e81778a5
Author: Alasdair G Kergon <agk(a)redhat.com>
AuthorDate: Mon Jul 29 19:35:45 2013 +0100
Committer: Alasdair G Kergon <agk(a)redhat.com>
CommitterDate: Mon Jul 29 19:35:45 2013 +0100
alloc: fix lvextend when stripe number varies
The PREFERRED allocation mechanism requires the number of areas in the
previous LV segment to match the number in the new segment being
allocated. If they do not match, the code may crash.
E.g. https://bugzilla.redhat.com/989347
Introduce A_AREA_COUNT_MATCHES and when not set avoid referring
to the previous segment with the contiguous and cling policies.
---
WHATS_NEW | 1 +
lib/metadata/lv_manip.c | 21 ++++++++++++++++-----
2 files changed, 17 insertions(+), 5 deletions(-)
diff --git a/WHATS_NEW b/WHATS_NEW
index 3318454..21c1f6d 100644
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,6 @@
Version 2.02.100 -
================================
+ Ignore previous LV seg with alloc contiguous & cling when num stripes varies.
Fix segfault if devices/global_filter is not specified correctly.
Version 2.02.99 - 24th July 2013
diff --git a/lib/metadata/lv_manip.c b/lib/metadata/lv_manip.c
index 088c05d..24c66ef 100644
--- a/lib/metadata/lv_manip.c
+++ b/lib/metadata/lv_manip.c
@@ -47,6 +47,7 @@ typedef enum {
#define A_CLING_BY_TAGS 0x08 /* Must match tags against existing segment */
#define A_CAN_SPLIT 0x10
+#define A_AREA_COUNT_MATCHES 0x20 /* Existing lvseg has same number of areas as new segment */
#define SNAPSHOT_MIN_CHUNKS 3 /* Minimum number of chunks in snapshot */
@@ -1118,8 +1119,12 @@ static void _init_alloc_parms(struct alloc_handle *ah, struct alloc_parms *alloc
alloc_parms->flags = 0;
alloc_parms->extents_still_needed = extents_still_needed;
+ /* Only attempt contiguous/cling allocation to previous segment areas if the number of areas matches. */
+ if (alloc_parms->prev_lvseg && (ah->area_count == prev_lvseg->area_count))
+ alloc_parms->flags |= A_AREA_COUNT_MATCHES;
+
/* Are there any preceding segments we must follow on from? */
- if (alloc_parms->prev_lvseg) {
+ if (alloc_parms->prev_lvseg && (alloc_parms->flags & A_AREA_COUNT_MATCHES)) {
if (alloc_parms->alloc == ALLOC_CONTIGUOUS)
alloc_parms->flags |= A_CONTIGUOUS_TO_LVSEG;
else if ((alloc_parms->alloc == ALLOC_CLING) || (alloc_parms->alloc == ALLOC_CLING_BY_TAGS))
@@ -1721,7 +1726,8 @@ static area_use_t _check_pva(struct alloc_handle *ah, struct pv_area *pva, uint3
/* If maximise_cling is set, perform several checks, otherwise perform exactly one. */
if (!iteration_count && !log_iteration_count && alloc_parms->flags & (A_CONTIGUOUS_TO_LVSEG | A_CLING_TO_LVSEG | A_CLING_TO_ALLOCED)) {
/* Contiguous? */
- if (((alloc_parms->flags & A_CONTIGUOUS_TO_LVSEG) || (ah->maximise_cling && alloc_parms->prev_lvseg)) &&
+ if (((alloc_parms->flags & A_CONTIGUOUS_TO_LVSEG) ||
+ (ah->maximise_cling && alloc_parms->prev_lvseg && (alloc_parms->flags & A_AREA_COUNT_MATCHES))) &&
_check_contiguous(ah->cmd, alloc_parms->prev_lvseg, pva, alloc_state))
return PREFERRED;
@@ -1730,7 +1736,8 @@ static area_use_t _check_pva(struct alloc_handle *ah, struct pv_area *pva, uint3
return NEXT_AREA;
/* Cling to prev_lvseg? */
- if (((alloc_parms->flags & A_CLING_TO_LVSEG) || (ah->maximise_cling && alloc_parms->prev_lvseg)) &&
+ if (((alloc_parms->flags & A_CLING_TO_LVSEG) ||
+ (ah->maximise_cling && alloc_parms->prev_lvseg && (alloc_parms->flags & A_AREA_COUNT_MATCHES))) &&
_check_cling(ah, NULL, alloc_parms->prev_lvseg, pva, alloc_state))
/* If this PV is suitable, use this first area */
return PREFERRED;
@@ -1744,7 +1751,7 @@ static area_use_t _check_pva(struct alloc_handle *ah, struct pv_area *pva, uint3
if (!(alloc_parms->flags & A_CLING_BY_TAGS) || !ah->cling_tag_list_cn)
return NEXT_PV;
- if (alloc_parms->prev_lvseg) {
+ if (alloc_parms->prev_lvseg && (alloc_parms->flags & A_AREA_COUNT_MATCHES)) {
if (_check_cling(ah, ah->cling_tag_list_cn, alloc_parms->prev_lvseg, pva, alloc_state))
return PREFERRED;
} else if (_check_cling_to_alloced(ah, ah->cling_tag_list_cn, pva, alloc_state))
@@ -1967,8 +1974,12 @@ static int _find_some_parallel_space(struct alloc_handle *ah, const struct alloc
/* First area in each list is the largest */
dm_list_iterate_items(pva, &pvm->areas) {
/*
- * There are two types of allocations, which can't be mixed at present.
+ * There are two types of allocations, which can't be mixed at present:
+ *
* PREFERRED are stored immediately in a specific parallel slot.
+ * This requires the number of slots to match, so if comparing with
+ * prev_lvseg then A_AREA_COUNT_MATCHES must be set.
+ *
* USE_AREA are stored for later, then sorted and chosen from.
*/
switch(_check_pva(ah, pva, max_to_allocate, alloc_parms,