Repository :
http://git.fedorahosted.org/cgit/copr.git
On branch : master
---------------------------------------------------------------
commit f5faeb173bf2de29b9167b10e255ff61f45b3b71
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Wed Mar 18 17:40:27 2015 +0100
[copr] don't allow acquire VMs that was last checked before server restart.
---------------------------------------------------------------
backend/backend/daemons/vm_master.py | 2 +-
backend/backend/vm_manage/__init__.py | 11 +++--
backend/backend/vm_manage/manager.py | 31 +++++++++----
backend/tests/deamons/test_vm_master.py | 3 +
backend/tests/vm_manager/test_manager.py | 73 +++++++++++++++++++++++++++---
5 files changed, 99 insertions(+), 21 deletions(-)
diff --git a/backend/backend/daemons/vm_master.py b/backend/backend/daemons/vm_master.py
index 3ea33f5..0e0029c 100644
--- a/backend/backend/daemons/vm_master.py
+++ b/backend/backend/daemons/vm_master.py
@@ -149,7 +149,7 @@ class VmMaster(Process):
raise RuntimeError("provide Spawner and Terminator to run VmManager
daemon")
setproctitle("VM master")
-
+ self.vmm.mark_server_start()
self.kill_received = False
self.event_handler = EventHandler(self.vmm)
diff --git a/backend/backend/vm_manage/__init__.py
b/backend/backend/vm_manage/__init__.py
index 8ea5338..556f8d1 100644
--- a/backend/backend/vm_manage/__init__.py
+++ b/backend/backend/vm_manage/__init__.py
@@ -25,17 +25,18 @@ PUBSUB_INTERRUPT_BUILDER =
"copr:backend:interrupt_build:pubsub::{}"
PUBSUB_VM_TERMINATION = "copr:backend:vm_termination:pubsub::{vm_name}"
# message should contain string "terminating"
-KEY_VM_GROUPS = "copr:backend:vm_groups:set::"
-# set of available groups,
-# TODO: remove it, use opts.build_groups_count
-
KEY_VM_POOL = "copr:backend:vm_pool:set::{group}"
# set of vm_names of vm available for `group`
KEY_VM_POOL_INFO = "copr:backend:vm_pool_info:hset::{group}"
-# hashset with additional information for `group`, used fields:
+# hset with additional information for `group`, used fields:
# - "last_vm_spawn_start": latest time when VM spawn was initiated for this
`group`
+KEY_SERVER_INFO = "copr:backend:server_info:hset::"
+# common shared info about server, not stritly related to VMM, maybe move it to helpers
later
+# used fields:
+# "server_start_timestamp" -> unixtime string
+
KEY_VM_INSTANCE = "copr:backend:vm_instance:hset::{vm_name}"
# hset to store VmDescriptor
diff --git a/backend/backend/vm_manage/manager.py b/backend/backend/vm_manage/manager.py
index b4f059f..3f2f9bd 100644
--- a/backend/backend/vm_manage/manager.py
+++ b/backend/backend/vm_manage/manager.py
@@ -14,7 +14,7 @@ from backend.exceptions import VmError, NoVmAvailable
from backend.helpers import get_redis_connection
from .models import VmDescriptor
-from . import VmStates, KEY_VM_INSTANCE, KEY_VM_POOL, EventTopics, PUBSUB_MB
+from . import VmStates, KEY_VM_INSTANCE, KEY_VM_POOL, EventTopics, PUBSUB_MB,
KEY_SERVER_INFO
# KEYS[1]: VMD key
# ARGV[1] current timestamp for `last_health_check`
@@ -43,10 +43,16 @@ local old_state = redis.call("HGET", KEYS[1],
"state")
if old_state ~= "ready" then
return nil
else
- redis.call("HMSET", KEYS[1], "state", "in_use",
"bound_to_user", ARGV[1],
- "used_by_pid", ARGV[2], "in_use_since", ARGV[3],
- "task_id", ARGV[4], "build_id", ARGV[5],
"chroot", ARGV[6])
- return "OK"
+ local last_health_check = tonumber(redis.call("HGET", KEYS[1],
"last_health_check"))
+ local server_restart_time = tonumber(redis.call("HGET", KEYS[2],
"server_start_timestamp"))
+ if last_health_check and server_restart_time and last_health_check >
server_restart_time then
+ redis.call("HMSET", KEYS[1], "state", "in_use",
"bound_to_user", ARGV[1],
+ "used_by_pid", ARGV[2], "in_use_since", ARGV[3],
+ "task_id", ARGV[4], "build_id", ARGV[5],
"chroot", ARGV[6])
+ return "OK"
+ else
+ return nil
+ end
end
"""
@@ -185,19 +191,25 @@ class VmManager(object):
vm_key = KEY_VM_INSTANCE.format(vm_name=vm_name)
self.lua_scripts["mark_vm_check_failed"](keys=[vm_key])
+ def mark_server_start(self):
+ self.rc.hset(KEY_SERVER_INFO, "server_start_timestamp", time.time())
+
def acquire_vm(self, group, username, pid, task_id=None, build_id=None,
chroot=None):
"""
Try to acquire VM from pool
+
:param group: builder group id, as defined in config
:type group: int
:param username: build owner username, VMM prefer to reuse an existing VM which
was use by the same user
:param pid: builder pid to release VM after build process unhandled death
+
+ :rtype: VmDescriptor
+ :raises: NoVmAvailable when manager couldn't find suitable VM for the given
group and user
"""
- # TODO: reject request if user acquired #machines > threshold_vm_per_user
vmd_list = self.get_all_vm_in_group(group)
vm_count_used_by_user = len([
vmd for vmd in vmd_list if
- vmd.bound_to_user==username and vmd.state==VmStates.IN_USE
+ vmd.bound_to_user == username and vmd.state == VmStates.IN_USE
])
if vm_count_used_by_user >=
self.opts.build_groups[group]["max_vm_per_user"]:
raise NoVmAvailable("No VM are available, user `{}` already acquired #{}
VMs"
@@ -213,8 +225,9 @@ class VmManager(object):
# TODO: record last_copr_backend_startup_time at startup
for vmd in all_vms:
vm_key = KEY_VM_INSTANCE.format(vm_name=vmd.vm_name)
- if self.lua_scripts["acquire_vm"](keys=[vm_key], args=[username,
pid, time.time(),
- task_id, build_id,
chroot]) == "OK":
+ if self.lua_scripts["acquire_vm"](keys=[vm_key, KEY_SERVER_INFO],
+ args=[username, pid, time.time(),
+ task_id, build_id, chroot]) ==
"OK":
return vmd
else:
raise NoVmAvailable("No VM are available, please wait in queue. Group:
{}".format(group))
diff --git a/backend/tests/deamons/test_vm_master.py
b/backend/tests/deamons/test_vm_master.py
index 191ba1d..0a8d8e1 100644
--- a/backend/tests/deamons/test_vm_master.py
+++ b/backend/tests/deamons/test_vm_master.py
@@ -355,6 +355,7 @@ class TestVmMaster(object):
None
]
self.vm_master.do_cycle = types.MethodType(mc_do_cycle, self.vm_master)
+ self.vmm.mark_server_start = MagicMock()
self.stage = 0
def on_sleep(*args, **kwargs):
@@ -371,6 +372,8 @@ class TestVmMaster(object):
assert mc_event_handler.called
assert mc_event_handler.return_value.start.called
+ assert self.vmm.mark_server_start.called
+
err_log = self.queue.get(timeout=1)
assert err_log is not None
assert "Unhandled error:" in err_log["what"]
diff --git a/backend/tests/vm_manager/test_manager.py
b/backend/tests/vm_manager/test_manager.py
index 0faff78..1769b21 100644
--- a/backend/tests/vm_manager/test_manager.py
+++ b/backend/tests/vm_manager/test_manager.py
@@ -1,5 +1,6 @@
# coding: utf-8
import json
+import random
import types
import time
@@ -10,7 +11,7 @@ import six
from backend import exceptions
from backend.exceptions import VmError, NoVmAvailable
-from backend.vm_manage import VmStates, KEY_VM_POOL, PUBSUB_VM_TERMINATION, PUBSUB_MB,
EventTopics
+from backend.vm_manage import VmStates, KEY_VM_POOL, PUBSUB_VM_TERMINATION, PUBSUB_MB,
EventTopics, KEY_SERVER_INFO
from backend.vm_manage.manager import VmManager
from backend.daemons.vm_master import VmMaster
from backend.helpers import get_redis_connection
@@ -191,10 +192,43 @@ class TestManager(object):
self.checker.run_check_health.reset_mock()
assert vmd.get_field(self.rc, "state") == state
- def test_acquire_vm_extra_kwargs(self):
+ def test_acquire_vm_no_vm_after_server_restart(self, mc_time):
vmd = self.vmm.add_vm_to_pool(self.vm_ip, self.vm_name, self.group)
vmd.store_field(self.rc, "state", VmStates.READY)
+ # undefined both last_health_check and server_start_timestamp
+ with pytest.raises(NoVmAvailable):
+ self.vmm.acquire_vm(0, self.username, 42)
+
+ # only server start timestamp is defined
+ mc_time.time.return_value = 1
+ self.vmm.mark_server_start()
+ with pytest.raises(NoVmAvailable):
+ self.vmm.acquire_vm(0, self.username, 42)
+
+ # only last_health_check defined
+ self.rc.delete(KEY_SERVER_INFO)
+ vmd.store_field(self.rc, "last_health_check", 0)
+ with pytest.raises(NoVmAvailable):
+ self.vmm.acquire_vm(0, self.username, 42)
+
+ # both defined but last_health_check < server_start_time
+ self.vmm.mark_server_start()
+ with pytest.raises(NoVmAvailable):
+ self.vmm.acquire_vm(0, self.username, 42)
+
+ # and finally last_health_check > server_start_time
+ vmd.store_field(self.rc, "last_health_check", 2)
+ vmd_res = self.vmm.acquire_vm(0, self.username, 42)
+ assert vmd.vm_name == vmd_res.vm_name
+
+ def test_acquire_vm_extra_kwargs(self, mc_time):
+ mc_time.time.return_value = 0
+ self.vmm.mark_server_start()
+ vmd = self.vmm.add_vm_to_pool(self.vm_ip, self.vm_name, self.group)
+ vmd.store_field(self.rc, "state", VmStates.READY)
+ vmd.store_field(self.rc, "last_health_check", 2)
+
kwargs = {
"task_id": "20-fedora-20-x86_64",
"build_id": "20",
@@ -204,13 +238,18 @@ class TestManager(object):
for k, v in kwargs.items():
assert vmd_got.get_field(self.rc, k) == v
- def test_acquire_vm(self):
+ def test_acquire_vm(self, mc_time):
+ mc_time.time.return_value = 0
+ self.vmm.mark_server_start()
+
vmd_main = self.vmm.add_vm_to_pool(self.vm_ip, self.vm_name, self.group)
vmd_alt = self.vmm.add_vm_to_pool(self.vm_ip, "alternative",
self.group)
vmd_main.store_field(self.rc, "state", VmStates.READY)
vmd_alt.store_field(self.rc, "state", VmStates.READY)
vmd_alt.store_field(self.vmm.rc, "bound_to_user", self.username)
+ vmd_main.store_field(self.rc, "last_health_check", 2)
+ vmd_alt.store_field(self.rc, "last_health_check", 2)
vmd_got_first = self.vmm.acquire_vm(group=self.group, username=self.username,
pid=self.pid)
assert vmd_got_first.vm_name == "alternative"
@@ -220,12 +259,16 @@ class TestManager(object):
with pytest.raises(NoVmAvailable):
self.vmm.acquire_vm(group=self.group, username=self.username, pid=self.pid)
- def test_acquire_vm_per_user_limit(self):
+ def test_acquire_vm_per_user_limit(self, mc_time):
+ mc_time.time.return_value = 0
+ self.vmm.mark_server_start()
+
max_vm_per_user = self.opts.build_groups[0]["max_vm_per_user"]
acquired_vmd_list = []
for idx in range(max_vm_per_user + 1):
vmd = self.vmm.add_vm_to_pool("127.0.{}.1".format(idx),
"vm_{}".format(idx), self.group)
vmd.store_field(self.rc, "state", VmStates.READY)
+ vmd.store_field(self.rc, "last_health_check", 2)
acquired_vmd_list.append(vmd)
for idx in range(max_vm_per_user):
@@ -237,8 +280,12 @@ class TestManager(object):
acquired_vmd_list[-1].store_field(self.rc, "state", VmStates.READY)
self.vmm.acquire_vm(0, self.username, 42)
- def test_acquire_only_ready_state(self):
+ def test_acquire_only_ready_state(self, mc_time):
+ mc_time.time.return_value = 0
+ self.vmm.mark_server_start()
+
vmd_main = self.vmm.add_vm_to_pool(self.vm_ip, self.vm_name, self.group)
+ vmd_main.store_field(self.rc, "last_health_check", 2)
for state in [VmStates.IN_USE, VmStates.GOT_IP, VmStates.CHECK_HEALTH,
VmStates.TERMINATING, VmStates.CHECK_HEALTH_FAILED]:
@@ -246,13 +293,19 @@ class TestManager(object):
with pytest.raises(NoVmAvailable):
self.vmm.acquire_vm(group=self.group, username=self.username,
pid=self.pid)
- def test_acquire_and_release_vm(self):
+ def test_acquire_and_release_vm(self, mc_time):
+ mc_time.time.return_value = 0
+ self.vmm.mark_server_start()
+
+
vmd_main = self.vmm.add_vm_to_pool(self.vm_ip, self.vm_name, self.group)
vmd_alt = self.vmm.add_vm_to_pool(self.vm_ip, "alternative",
self.group)
vmd_main.store_field(self.rc, "state", VmStates.READY)
vmd_alt.store_field(self.rc, "state", VmStates.READY)
vmd_alt.store_field(self.vmm.rc, "bound_to_user", self.username)
+ vmd_main.store_field(self.rc, "last_health_check", 2)
+ vmd_alt.store_field(self.rc, "last_health_check", 2)
vmd_got_first = self.vmm.acquire_vm(group=self.group, username=self.username,
pid=self.pid)
assert vmd_got_first.vm_name == "alternative"
@@ -394,3 +447,11 @@ class TestManager(object):
r3 = sorted(r3, key=lambda vmd: vmd.vm_name)
assert r3[0].vm_name == "a1"
assert r3[1].vm_name == "a2"
+
+ def test_mark_server_start(self, mc_time):
+ assert self.rc.hget(KEY_SERVER_INFO, "server_start_timestamp") is None
+ for i in range(100):
+ val = 100 * i + 0.12345
+ mc_time.time.return_value = val
+ self.vmm.mark_server_start()
+ assert self.rc.hget(KEY_SERVER_INFO, "server_start_timestamp") ==
"{}".format(val)