src/cmd.c | 4 +-
src/main.c | 65 ++++++++++++++++++++++++++---------------
src/sanlock_internal.h | 2 -
src/timeouts.h | 7 ----
tests/clientn | 77 ++++++++++++++++++++++++++++++++++++++++++++++++-
tests/sanlk_client.c | 4 ++
wdmd/main.c | 40 +++++++++++++++++++------
7 files changed, 155 insertions(+), 44 deletions(-)
New commits:
commit e1548ab53b1f12c0825cb614238a630069a93c1b
Author: David Teigland <teigland(a)redhat.com>
Date: Mon Aug 13 16:49:18 2012 -0500
clientn: add tests
Signed-off-by: David Teigland <teigland(a)redhat.com>
diff --git a/tests/clientn b/tests/clientn
index e47c7d9..85b4181 100755
--- a/tests/clientn
+++ b/tests/clientn
@@ -29,23 +29,85 @@ elif [ "$cmd" == "start" ]; then
./sanlk_client test r$i $dev $off $killpath &
done
-elif [ "$cmd" == "error" ]; then
+elif [ "$cmd" == "delay" ]; then
+
+ sec=$3
+
+ pid=`cat /var/run/sanlock/sanlock.pid`
+
+ echo sync with daemon renewals
+ kill -s SIGSTOP $pid
+ sleep 20
+ kill -s SIGCONT $pid
+ sleep 1
+
+ echo sigstop sanlock pid $pid
+ kill -s SIGSTOP $pid
+
+ echo sleep $sec
+ sleep $sec
+
+ echo sigcont sanlock pid $pid
+ kill -s SIGCONT $pid
+
+elif [ "$cmd" == "iodelay" ]; then
+
+ sec=$4
+ pid=`cat /var/run/sanlock/sanlock.pid`
+
+ echo sync with daemon renewals
+ kill -s SIGSTOP $pid
+ sleep 20
+ kill -s SIGCONT $pid
+ sleep 2
+
+ echo save linear
rm -f /tmp/client-state.txt
+ rm -f /tmp/client-linear.txt
+ rm -f /tmp/client-error.txt
+ dmsetup table $dev > /tmp/client-linear.txt
+ sed "s/linear/error/" /tmp/client-linear.txt > /tmp/client-error.txt
+ echo load error
+ dmsetup suspend $dev
+ dmsetup load $dev /tmp/client-error.txt
+ dmsetup resume $dev
+
+ echo sleep $sec
+ sleep $sec
+
+ echo load linear
+ dmsetup suspend $dev
+ dmsetup load $dev /tmp/client-linear.txt
+ dmsetup resume $dev
+
+elif [ "$cmd" == "error" ]; then
+
+ echo save linear
+ rm -f /tmp/client-state.txt
rm -f /tmp/client-linear.txt
rm -f /tmp/client-error.txt
dmsetup table $dev > /tmp/client-linear.txt
sed "s/linear/error/" /tmp/client-linear.txt > /tmp/client-error.txt
+ echo load error
dmsetup suspend $dev
dmsetup load $dev /tmp/client-error.txt
dmsetup resume $dev
+elif [ "$cmd" == "linear" ]; then
+
+ echo load linear
+ dmsetup suspend $dev
+ dmsetup load $dev /tmp/client-linear.txt
+ dmsetup resume $dev
+
elif [ "$cmd" == "resume" ]; then
hostid=$4
+ echo load linear
dmsetup suspend $dev
dmsetup load $dev /tmp/client-linear.txt
dmsetup resume $dev
@@ -74,6 +136,19 @@ else
echo " sanlock client add_lockspace -s test:HOSTID:DEV:0"
echo " starts N ./sanlk_client processes"
echo ""
+ echo "clientn N delay SEC"
+ echo " sigstop sanlock daemon"
+ echo " sleep SEC"
+ echo " sigcont sanlock daemon"
+ echo ""
+ echo "clientn N iodelay DEV SEC"
+ echo " block i/o to DEV"
+ echo " sleep SEC"
+ echo " unblock i/o to DEV"
+ echo ""
+ echo "clientn N linear DEV"
+ echo " unblock i/o to DEV"
+ echo ""
echo "clientn N error DEV"
echo " blocks i/o to DEV"
echo " causes KILLPATH to run"
commit aa0092c7d0244c0461bc2a13f62c0250ba7e43a8
Author: David Teigland <teigland(a)redhat.com>
Date: Fri Aug 10 15:44:15 2012 -0500
sanlock: base kill sig on last renewal
Tracking progression through the grace time by
counting one retry per second doesn't work in
the case were sanlock doesn't run every second
(e.g. sigstop or delayed scheduling). This will
cause sanlock to attempt to use killpath even
when there's nearly no time left before reset.
So, the transition to sigkill should depend on
the current time from the last lease renewal.
Signed-off-by: David Teigland <teigland(a)redhat.com>
diff --git a/src/cmd.c b/src/cmd.c
index bc7d7da..c293275 100644
--- a/src/cmd.c
+++ b/src/cmd.c
@@ -1359,7 +1359,7 @@ static int print_state_daemon(char *str)
"id_renewal=%d "
"id_renewal_fail=%d "
"id_renewal_warn=%d "
- "kill_count_grace=%d "
+ "kill_grace_seconds=%d "
"helper_pid=%d "
"helper_kill_fd=%d "
"helper_full_count=%u "
@@ -1371,7 +1371,7 @@ static int print_state_daemon(char *str)
main_task.id_renewal_seconds,
main_task.id_renewal_fail_seconds,
main_task.id_renewal_warn_seconds,
- kill_count_grace,
+ kill_grace_seconds,
helper_pid,
helper_kill_fd,
helper_full_count,
diff --git a/src/main.c b/src/main.c
index e5f0885..3ecc1f5 100644
--- a/src/main.c
+++ b/src/main.c
@@ -124,6 +124,11 @@ static void send_helper_kill(struct space *sp, struct client *cl, int sig)
if ((cl->flags & CL_RUNPATH_SENT) && (sig == SIGRUNPATH))
return;
+ if (helper_kill_fd == -1) {
+ log_error("send_helper_kill pid %d no fd", cl->pid);
+ return;
+ }
+
memset(&hm, 0, sizeof(hm));
if (sig == SIGRUNPATH) {
@@ -535,9 +540,9 @@ static int client_using_space(struct client *cl, struct space *sp)
static void kill_pids(struct space *sp)
{
struct client *cl;
- uint64_t now;
+ uint64_t now, last_success;
int ci, fd, pid, sig;
- int do_kill;
+ int do_kill, in_grace;
/*
* all remaining pids using sp are stuck, we've made max attempts to
@@ -546,6 +551,17 @@ static void kill_pids(struct space *sp)
if (sp->killing_pids > 1)
return;
+ /*
+ * If we happen to renew our lease after we've started killing pids,
+ * the period we allow for graceful shutdown will be extended. This
+ * is an incidental effect, although it may be nice. The previous
+ * behavior would still be ok, where we only ever allow up to
+ * kill_grace_seconds for graceful shutdown before moving to sigkill.
+ */
+ pthread_mutex_lock(&sp->mutex);
+ last_success = sp->lease_status.renewal_last_success;
+ pthread_mutex_unlock(&sp->mutex);
+
now = monotime();
for (ci = 0; ci <= client_maxi; ci++) {
@@ -578,32 +594,33 @@ static void kill_pids(struct space *sp)
fd = cl->fd;
pid = cl->pid;
-
/*
- * from zero to kill_count_grace seconds, we try killing
- * the pid with either killpath or sigterm. killpath if
- * it's configured and and we've seen a helper status recently.
- * (sigkill will be used in place of sigterm if restricted.)
- *
- * after kill_count_grace seconds, we'll try killing the
- * pid with sigkill. (sigterm will be used in place of
- * sigkill if restricted.)
+ * the transition from using killpath/sigterm to sigkill
+ * is when now >=
+ * last successful lease renewal +
+ * id_renewal_fail_seconds +
+ * kill_grace_seconds
*/
- if (cl->killpath[0] &&
- (helper_kill_fd != -1) &&
- (kill_count_grace > 0) &&
- (cl->kill_count <= kill_count_grace) &&
- (now - helper_last_status < (HELPER_STATUS_INTERVAL * 2)))
+ in_grace = now < (last_success + main_task.id_renewal_fail_seconds + kill_grace_seconds);
+
+ if ((kill_grace_seconds > 0) && in_grace && cl->killpath[0]) {
sig = SIGRUNPATH;
- else if (cl->restrict & SANLK_RESTRICT_SIGKILL)
+ } else if (in_grace) {
sig = SIGTERM;
- else if (cl->restrict & SANLK_RESTRICT_SIGTERM)
+ } else {
sig = SIGKILL;
- else if ((kill_count_grace > 0) &&
- (cl->kill_count <= kill_count_grace))
+ }
+
+ /*
+ * sigterm will be used in place of sigkill if restricted
+ * sigkill will be used in place of sigterm if restricted
+ */
+
+ if ((sig == SIGKILL) && (cl->restrict & SANLK_RESTRICT_SIGKILL))
sig = SIGTERM;
- else
+
+ if ((sig == SIGTERM) && (cl->restrict & SANLK_RESTRICT_SIGTERM))
sig = SIGKILL;
do_kill = 1;
@@ -1966,7 +1983,7 @@ static int read_command_line(int argc, char *argv[])
if (com.type == COM_DAEMON) {
sec = atoi(optionarg);
if (sec <= 60 && sec >= 0)
- kill_count_grace = sec;
+ kill_grace_seconds = sec;
} else {
com.local_host_generation = atoll(optionarg);
}
@@ -2343,8 +2360,8 @@ int main(int argc, char *argv[])
/* initialize global EXTERN variables */
- kill_count_max = 60;
- kill_count_grace = DEFAULT_GRACE_SEC;
+ kill_count_max = 100;
+ kill_grace_seconds = DEFAULT_GRACE_SEC;
helper_ci = -1;
helper_pid = -1;
helper_kill_fd = -1;
diff --git a/src/sanlock_internal.h b/src/sanlock_internal.h
index 9950ebd..9a30763 100644
--- a/src/sanlock_internal.h
+++ b/src/sanlock_internal.h
@@ -316,7 +316,7 @@ EXTERN int external_shutdown;
EXTERN char our_host_name_global[SANLK_NAME_LEN+1];
EXTERN int kill_count_max;
-EXTERN int kill_count_grace;
+EXTERN int kill_grace_seconds;
EXTERN int helper_ci;
EXTERN int helper_pid;
EXTERN int helper_kill_fd;
diff --git a/src/timeouts.h b/src/timeouts.h
index f62bb6f..80b9fc9 100644
--- a/src/timeouts.h
+++ b/src/timeouts.h
@@ -226,7 +226,7 @@
*
* Working backward from the earlier watchdog firing at T170, leaving 10 seconds
* for SIGKILL to succeed, we need to begin SIGKILL at T160. This means we
- * have from T120 to T160 to allow graceful kill to complete. So, kill_count_grace
+ * have from T120 to T160 to allow graceful kill to complete. So, kill_grace_seconds
* should be set to 40 by default (T120 to T160).
*
* T40: last successful disk renewal
@@ -234,11 +234,6 @@
* T160 - T169: SIGKILL once per second (10 sec)
* T170 - T179: watchdog fires sometime (SIGKILL continues)
* T180: other hosts acquire our leases
- *
- * The interval between each kill count/attempt is approx 1 sec,
- * so kill_count/kill_count_grace/kill_count_max serve as both
- * the number/count of attempts and the number of seconds spent
- * using that kind of termination.
*/
commit 871d47a6b0d9b1e07600b773c2aa3c34d99e2af1
Author: David Teigland <teigland(a)redhat.com>
Date: Fri Aug 10 10:41:47 2012 -0500
wdmd: pet after reopen and use 1 sec interval after failure
We need to do a keepalive ioctl after reopening the device.
Also, delaying the final check until the last second can
sometimes not leave enough time to reactivate the
watchdog, so check every second after a test failure.
Signed-off-by: David Teigland <teigland(a)redhat.com>
diff --git a/tests/sanlk_client.c b/tests/sanlk_client.c
index f63356d..a0efc61 100644
--- a/tests/sanlk_client.c
+++ b/tests/sanlk_client.c
@@ -60,12 +60,16 @@ int main(int argc, char *argv[])
return -1;
}
+ if (!strcmp(path, "none"))
+ goto acquire;
+
rv = sanlock_killpath(sock, SANLK_KILLPATH_PID, path, args);
if (rv < 0) {
fprintf(stderr, "killpath error %d\n", rv);
return -1;
}
+ acquire:
rv = sanlock_acquire(sock, -1, 0, 1, &res, NULL);
if (rv < 0) {
fprintf(stderr, "acquire error %d\n", rv);
diff --git a/wdmd/main.c b/wdmd/main.c
index e289f44..2e41e91 100644
--- a/wdmd/main.c
+++ b/wdmd/main.c
@@ -44,6 +44,7 @@
#define RELEASE_VERSION "2.4"
#define DEFAULT_TEST_INTERVAL 10
+#define RECOVER_TEST_INTERVAL 1
#define DEFAULT_FIRE_TIMEOUT 60
#define DEFAULT_HIGH_PRIORITY 1
@@ -57,6 +58,7 @@ static int daemon_quit;
static int daemon_debug;
static int socket_gid;
static time_t last_keepalive;
+static time_t last_closeunclean;
static char lockfile_path[PATH_MAX];
static int dev_fd = -1;
static int shm_fd;
@@ -70,7 +72,8 @@ struct script_status {
is not very sophisticated, but it's simple. If we wait up to 2 seconds
for each script to exit, and have 5 scripts, that's up to 10 seconds we
spend in test_scripts, and it's simplest if the max time in test_scripts
- does not excede the test_interval (10). */
+ does not excede the test_interval (10). FIXME: this is not entirely
+ true since the test_interval was changed to 1 after a failure. */
#define SCRIPT_WAIT_SECONDS 2
#define MAX_SCRIPTS 4
@@ -387,6 +390,7 @@ static int setup_clients(void)
static int test_clients(void)
{
uint64_t t;
+ time_t last_ping;
int fail_count = 0;
int i;
@@ -398,14 +402,20 @@ static int test_clients(void)
if (!client[i].expire)
continue;
+ if (last_keepalive > last_closeunclean)
+ last_ping = last_keepalive;
+ else
+ last_ping = last_closeunclean;
+
if (t >= client[i].expire) {
- log_error("test failed ci %d pid %d now %llu keepalive %llu renewal %llu expire %llu %s",
- i, client[i].pid,
+ log_error("test failed rem %d now %llu ping %llu close %llu renewal %llu expire %llu client %d %s",
+ DEFAULT_FIRE_TIMEOUT - (int)(t - last_ping),
(unsigned long long)t,
(unsigned long long)last_keepalive,
+ (unsigned long long)last_closeunclean,
(unsigned long long)client[i].renewal,
(unsigned long long)client[i].expire,
- client[i].name);
+ client[i].pid, client[i].name);
fail_count++;
continue;
}
@@ -431,12 +441,13 @@ static int test_clients(void)
*/
if (t >= client[i].expire - DEFAULT_TEST_INTERVAL) {
- log_error("test warning pid %d now %llu keepalive %llu renewal %llu expire %llu",
- client[i].pid,
+ log_error("test warning now %llu ping %llu close %llu renewal %llu expire %llu client %d %s",
(unsigned long long)t,
(unsigned long long)last_keepalive,
+ (unsigned long long)last_closeunclean,
(unsigned long long)client[i].renewal,
- (unsigned long long)client[i].expire);
+ (unsigned long long)client[i].expire,
+ client[i].pid, client[i].name);
fail_count++;
continue;
}
@@ -718,6 +729,8 @@ static void close_watchdog_unclean(void)
log_error("/dev/watchdog closed unclean");
close(dev_fd);
dev_fd = -1;
+
+ last_closeunclean = monotime();
}
static void close_watchdog(void)
@@ -913,23 +926,30 @@ static int test_loop(void)
if (!fail_count) {
if (dev_fd == -1) {
- log_error("/dev/watchdog reopen");
open_dev();
+ pet_watchdog();
+ log_error("/dev/watchdog reopen");
} else {
pet_watchdog();
}
+
+ test_interval = DEFAULT_TEST_INTERVAL;
} else {
/* If we can patch the kernel so that close
does not generate a ping, then we can skip
this close, and just not pet the device in
this case. Also see test_client above. */
close_watchdog_unclean();
+
+ test_interval = RECOVER_TEST_INTERVAL;
}
}
sleep_seconds = test_time + test_interval - monotime();
- poll_timeout = (sleep_seconds > 0) ? sleep_seconds * 1000 : 1;
- log_debug("sleep_seconds %d", sleep_seconds);
+ poll_timeout = (sleep_seconds > 0) ? sleep_seconds * 1000 : 500;
+
+ log_debug("test_interval %d sleep_seconds %d poll_timeout %d",
+ test_interval, sleep_seconds, poll_timeout);
}
return 0;