Repository :
http://git.fedorahosted.org/cgit/copr.git
On branch : master
---------------------------------------------------------------
commit c7164c22053af16f6de446b124c74bcbb3bf87b9
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Thu Apr 16 20:17:49 2015 +0200
[backend] defer sending job to worker if job owner acquired too much VMs
---------------------------------------------------------------
backend/backend/daemons/job_grab.py | 24 ++++++++++++++++---
backend/backend/mockremote/__init__.py | 2 +-
backend/backend/vm_manage/manager.py | 39 ++++++++++++++++++++-----------
backend/tests/deamons/test_job_grab.py | 8 +++++-
4 files changed, 52 insertions(+), 21 deletions(-)
diff --git a/backend/backend/daemons/job_grab.py b/backend/backend/daemons/job_grab.py
index e5cb3ef..31d694d 100644
--- a/backend/backend/daemons/job_grab.py
+++ b/backend/backend/daemons/job_grab.py
@@ -26,6 +26,9 @@ from ..frontend import FrontendClient
# TODO: Replace entire model with asynchronous queue, so that frontend push task,
# and workers listen for them
+from ..vm_manage.manager import VmManager
+
+
class CoprJobGrab(Process):
"""
@@ -46,6 +49,11 @@ class CoprJobGrab(Process):
super(CoprJobGrab, self).__init__(name="jobgrab")
self.opts = opts
+ self.arch_to_group_id_map = dict()
+ for group in self.opts.build_groups:
+ for arch in group["archs"]:
+ self.arch_to_group_id_map[arch] = group["id"]
+
self.task_queues_by_arch = {}
self.task_queues_by_group = {}
@@ -60,6 +68,8 @@ class CoprJobGrab(Process):
self.log = get_redis_logger(self.opts, "backend.job_grab",
"job_grab")
+ self.vm_manager = VmManager(opts=self.opts, logger=self.log)
+
def connect_queues(self):
"""
Connects to the retask queues. One queue per builders group.
@@ -100,14 +110,19 @@ class CoprJobGrab(Process):
count = 0
if "task_id" in task:
if task["task_id"] not in self.added_jobs:
-
- # TODO: produces memory leak!
- self.added_jobs.add(task["task_id"])
arch = task["chroot"].split("-")[2]
if arch not in self.task_queues_by_arch:
raise CoprJobGrabError("No builder group for architecture: {},
task: {}"
.format(arch, task))
+ username = task["project_owner"]
+ group_id = int(self.arch_to_group_id_map[arch])
+ if not self.vm_manager.can_user_acquire_more_vm(username, group_id):
+ self.log.debug("User can not acquire more VM, don't schedule
more tasks")
+ return 0
+
+ self.added_jobs.add(task["task_id"])
+
task_obj = Task(task)
self.task_queues_by_arch[arch].enqueue(task_obj)
count += 1
@@ -172,7 +187,7 @@ class CoprJobGrab(Process):
return
try:
msg = json.loads(raw["data"])
- # msg: {"action": ["remove"|"reschedule"],
"task_id": ..., "build_id"..., "chroot": ...}
+ # msg: {"action": ("remove"|"reschedule"),
"task_id": ..., "build_id"..., "chroot": ...}
# Actions: "remove" simply remove `task_id` from self.added_job
# "reschedule" additionally call frontend and set pending
state before removal
if "action" not in msg:
@@ -221,6 +236,7 @@ class CoprJobGrab(Process):
setproctitle("CoprJobGrab")
self.connect_queues()
self.listen_to_pubsub()
+ self.vm_manager.post_init()
self.log.info("JobGrub started.")
try:
diff --git a/backend/backend/mockremote/__init__.py
b/backend/backend/mockremote/__init__.py
index 42e8130..03df070 100755
--- a/backend/backend/mockremote/__init__.py
+++ b/backend/backend/mockremote/__init__.py
@@ -354,7 +354,7 @@ class MockRemote(object):
with open(info_file_path, 'w') as info_file:
info_file.writelines([
"build_id={}".format(self.job.build_id),
- "builder_ip={}".format(self.builder.hostname)])
+ "\nbuilder_ip={}".format(self.builder.hostname)])
except Exception as error:
self.log.exception("Failed to mark build {} with
build_id".format(error))
diff --git a/backend/backend/vm_manage/manager.py b/backend/backend/vm_manage/manager.py
index d519531..e9f92d7 100644
--- a/backend/backend/vm_manage/manager.py
+++ b/backend/backend/vm_manage/manager.py
@@ -222,17 +222,9 @@ class VmManager(object):
def mark_server_start(self):
self.rc.hset(KEY_SERVER_INFO, "server_start_timestamp", time.time())
- def acquire_vm(self, group, username, pid, task_id=None, build_id=None,
chroot=None):
+ def can_user_acquire_more_vm(self, username, group):
"""
- Try to acquire VM from pool
-
- :param group: builder group id, as defined in config
- :type group: int
- :param username: build owner username, VMM prefer to reuse an existing VM which
was used by the same user
- :param pid: builder pid to release VM after build process unhandled death
-
- :rtype: VmDescriptor
- :raises: NoVmAvailable when manager couldn't find suitable VM for the given
group and user
+ :return bool: True when user are allowed to acquire more VM
"""
vmd_list = self.get_all_vm_in_group(group)
vm_count_used_by_user = len([
@@ -242,13 +234,32 @@ class VmManager(object):
self.log.debug("# vm by user: {}, limit:{} ".format(
vm_count_used_by_user,
self.opts.build_groups[group]["max_vm_per_user"]
))
-
if vm_count_used_by_user >=
self.opts.build_groups[group]["max_vm_per_user"]:
# TODO: this check isn't reliable, if two (or more) processes check VM
list
- # at the +- same time, they could acquire more VMs
+ # at the +- same time, they could acquire more VMs
# proper solution: do this check inside lua script
- raise NoVmAvailable("No VM are available, user `{}` already acquired #{}
VMs"
- .format(username, vm_count_used_by_user))
+ self.log.debug("No VM are available, user `{}` already acquired #{}
VMs"
+ .format(username, vm_count_used_by_user))
+ return False
+ else:
+ return True
+
+ def acquire_vm(self, group, username, pid, task_id=None, build_id=None,
chroot=None):
+ """
+ Try to acquire VM from pool
+
+ :param group: builder group id, as defined in config
+ :type group: int
+ :param username: build owner username, VMM prefer to reuse an existing VM which
was used by the same user
+ :param pid: builder pid to release VM after build process unhandled death
+
+ :rtype: VmDescriptor
+ :raises: NoVmAvailable when manager couldn't find suitable VM for the given
group and user
+ """
+ vmd_list = self.get_all_vm_in_group(group)
+ if not self.can_user_acquire_more_vm(username, group):
+ raise NoVmAvailable("No VM are available, user `{}` already acquired too
much VMs"
+ .format(username))
ready_vmd_list = [vmd for vmd in vmd_list if vmd.state == VmStates.READY]
# trying to find VM used by this user
diff --git a/backend/tests/deamons/test_job_grab.py
b/backend/tests/deamons/test_job_grab.py
index 9707488..c6e9735 100644
--- a/backend/tests/deamons/test_job_grab.py
+++ b/backend/tests/deamons/test_job_grab.py
@@ -106,14 +106,17 @@ class TestJobGrab(object):
self.task_dict_1 = dict(
task_id=12345,
chroot="fedora-20-x86_64",
+ project_owner="foobar",
)
self.task_dict_2 = dict(
task_id=12346,
chroot="fedora-20-armv7",
+ project_owner="foobar",
)
self.task_dict_bad_arch = dict(
task_id=12346,
chroot="fedora-20-s390x",
+ project_owner="foobar",
)
def teardown_method(self, method):
@@ -136,6 +139,7 @@ class TestJobGrab(object):
def init_jg(self, mc_retask_queue, mc_grc):
self.jg = CoprJobGrab(self.opts, self.frontend_client, self.lock)
self.jg.connect_queues()
+ self.jg.vm_manager = MagicMock()
def test_connect_queues(self, mc_retask_queue, mc_grc):
mc_rc = MagicMock()
@@ -179,13 +183,13 @@ class TestJobGrab(object):
for obj in self.jg.task_queues_by_arch.values():
assert not obj.enqueue.called
- def test_route_build_task_correct_group_1(self, init_jg):
+ def test_route_build_task_correct_group_1(self, init_jg,):
assert self.jg.route_build_task(self.task_dict_1) == 1
assert self.jg.task_queues_by_arch["x86_64"].enqueue.called
assert not self.jg.task_queues_by_arch["armv7"].enqueue.called
- def test_route_build_task_correct_group_2(self, init_jg):
+ def test_route_build_task_correct_group_2(self, init_jg, ):
assert self.jg.route_build_task(self.task_dict_2) == 1
assert not self.jg.task_queues_by_arch["x86_64"].enqueue.called