[copr] master: [frontend] more specific error message in UrlListValidator (40c4f20)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 40c4f20efdbebaacbb2515da802d124b83b9a05d
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Mon Apr 20 13:05:50 2015 +0200
[frontend] more specific error message in UrlListValidator
>---------------------------------------------------------------
frontend/coprs_frontend/coprs/forms.py | 4 ++--
.../coprs/views/backend_ns/backend_general.py | 1 +
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/frontend/coprs_frontend/coprs/forms.py b/frontend/coprs_frontend/coprs/forms.py
index b91600b..f191944 100644
--- a/frontend/coprs_frontend/coprs/forms.py
+++ b/frontend/coprs_frontend/coprs/forms.py
@@ -16,8 +16,8 @@ class UrlListValidator(object):
def __init__(self, message=None):
if not message:
- message = "A list of URLs separated by whitespace characters"
- " is needed ('{0}' doesn't seem to be a URL)."
+ message = "A list of http[s] URLs separated by whitespace characters"
+ " is needed ('{0}' doesn't seem to be a valid URL)."
self.message = message
def __call__(self, form, field):
diff --git a/frontend/coprs_frontend/coprs/views/backend_ns/backend_general.py b/frontend/coprs_frontend/coprs/views/backend_ns/backend_general.py
index 1d252b8..c2ebdc5 100644
--- a/frontend/coprs_frontend/coprs/views/backend_ns/backend_general.py
+++ b/frontend/coprs_frontend/coprs/views/backend_ns/backend_general.py
@@ -14,6 +14,7 @@ from whoosh.index import LockError
import logging
log = logging.getLogger(__name__)
+
@backend_ns.route("/waiting/")
@misc.backend_authenticated
def waiting():
8 years, 11 months
[copr] master: [frontend] /backend/waiting: filter out cancelled builds (85403cf)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 85403cf78f47479f8b2a099b144934aaebc11c16
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Mon Apr 20 13:00:24 2015 +0200
[frontend] /backend/waiting: filter out cancelled builds
>---------------------------------------------------------------
.../coprs_frontend/coprs/logic/builds_logic.py | 29 ++++++++++---------
.../coprs/views/backend_ns/backend_general.py | 11 +++++---
2 files changed, 22 insertions(+), 18 deletions(-)
diff --git a/frontend/coprs_frontend/coprs/logic/builds_logic.py b/frontend/coprs_frontend/coprs/logic/builds_logic.py
index b088ef9..9d3dc3e 100644
--- a/frontend/coprs_frontend/coprs/logic/builds_logic.py
+++ b/frontend/coprs_frontend/coprs/logic/builds_logic.py
@@ -5,6 +5,7 @@ import pprint
import time
from sqlalchemy import or_
from sqlalchemy import and_
+from sqlalchemy.sql import false
from coprs import app
from coprs import db
@@ -21,14 +22,13 @@ log = app.logger
class BuildsLogic(object):
-
@classmethod
def get(cls, build_id):
return models.Build.query.filter(models.Build.id == build_id)
@classmethod
def get_build_tasks(cls, status):
- return models.BuildChroot.query.filter(models.BuildChroot.status == status)\
+ return models.BuildChroot.query.filter(models.BuildChroot.status == status) \
.order_by(models.BuildChroot.build_id.desc())
@classmethod
@@ -55,15 +55,17 @@ class BuildsLogic(object):
Returns BuildChroots which are - waiting to be built or
- older than 2 hours and unfinished
"""
- query = models.BuildChroot.query.join(models.Build).filter(or_(
- models.BuildChroot.status == helpers.StatusEnum("pending"),
- models.BuildChroot.status == helpers.StatusEnum("starting"),
- and_(
- models.BuildChroot.status.in_([
- helpers.StatusEnum("running"), helpers.StatusEnum("failed")]),
- models.Build.started_on < int(time.time() - 1.1 * MAX_BUILD_TIMEOUT),
- models.Build.ended_on.is_(None)
- )
+ query = (models.BuildChroot.query.join(models.Build)
+ .filter(models.Build.canceled == false())
+ .filter(or_(
+ models.BuildChroot.status == helpers.StatusEnum("pending"),
+ models.BuildChroot.status == helpers.StatusEnum("starting"),
+ and_(
+ models.BuildChroot.status.in_([
+ helpers.StatusEnum("running"), helpers.StatusEnum("failed")]),
+ models.Build.started_on < int(time.time() - 1.1 * MAX_BUILD_TIMEOUT),
+ models.Build.ended_on.is_(None)
+ ))
))
query = query.order_by(models.BuildChroot.build_id.asc())
return query
@@ -192,7 +194,7 @@ class BuildsLogic(object):
# update ended_on when everything really ends
# update results when there is repo initialized for every chroot
if (attr == "ended_on" and build.has_unfinished_chroot) or \
- (attr == "results" and build.has_pending_chroot):
+ (attr == "results" and build.has_pending_chroot):
continue
if attr == "ended_on":
@@ -228,7 +230,7 @@ class BuildsLogic(object):
"Unfinished build")
# Only failed, finished, succeeded get here.
- if build.state not in ["cancelled"]: # has nothing in backend to delete
+ if build.state not in ["cancelled"]: # has nothing in backend to delete
# don't delete skipped chroots
chroots_to_delete = [
chroot.name for chroot in build.build_chroots
@@ -289,7 +291,6 @@ class BuildsLogic(object):
class BuildsMonitorLogic(object):
-
@classmethod
def get_monitor_data(cls, copr):
# builds are sorted by build id descending
diff --git a/frontend/coprs_frontend/coprs/views/backend_ns/backend_general.py b/frontend/coprs_frontend/coprs/views/backend_ns/backend_general.py
index 35607c1..1d252b8 100644
--- a/frontend/coprs_frontend/coprs/views/backend_ns/backend_general.py
+++ b/frontend/coprs_frontend/coprs/views/backend_ns/backend_general.py
@@ -11,6 +11,8 @@ from coprs.views import misc
from coprs.views.backend_ns import backend_ns
from whoosh.index import LockError
+import logging
+log = logging.getLogger(__name__)
@backend_ns.route("/waiting/")
@misc.backend_authenticated
@@ -45,8 +47,8 @@ def waiting():
}
for task in builds_logic.BuildsLogic.get_build_task_queue().limit(200)
]
-
- return flask.jsonify({"actions": actions_list, "builds": builds_list})
+ response_dict = {"actions": actions_list, "builds": builds_list}
+ return flask.jsonify(response_dict)
@backend_ns.route("/update/", methods=["POST", "PUT"])
@@ -54,14 +56,15 @@ def waiting():
def update():
result = {}
+ request_data = flask.request.json
for typ, logic_cls in [("actions", actions_logic.ActionsLogic),
("builds", builds_logic.BuildsLogic)]:
- if typ not in flask.request.json:
+ if typ not in request_data:
continue
to_update = {}
- for obj in flask.request.json[typ]:
+ for obj in request_data[typ]:
to_update[obj["id"]] = obj
existing = {}
8 years, 11 months
[copr] master: [backend] notify job_grab to remove job from added when start_job failed (c6cd80a)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit c6cd80a7e0b774c07bec797593119e0060ba3acc
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Mon Apr 20 12:43:32 2015 +0200
[backend] notify job_grab to remove job from added when start_job failed
>---------------------------------------------------------------
backend/backend/daemons/dispatcher.py | 91 +++++++++++++++++-------------
backend/tests/deamons/test_dispatcher.py | 57 ++++++++++---------
2 files changed, 81 insertions(+), 67 deletions(-)
diff --git a/backend/backend/daemons/dispatcher.py b/backend/backend/daemons/dispatcher.py
index 08566ca..8897aa8 100644
--- a/backend/backend/daemons/dispatcher.py
+++ b/backend/backend/daemons/dispatcher.py
@@ -235,13 +235,30 @@ class Worker(multiprocessing.Process):
self.notify_job_grab_about_task_end(job)
self._announce_end(job)
+ def can_start_job(self, job):
+ """
+ Checks if we can and/or should start job
+ :type job: BuildJob
+ :rtype: bool
+ """
+ # Checking whether the build is not cancelled
+ if not self.starting_build(job):
+ self.log.info("Couldn't start job: {}".format(job))
+ return False
+
+ # Checking whether to build or skip
+ if self.pkg_built_before(job.pkg, job.chroot, job.destdir):
+ self.on_pkg_skip(job)
+ return False
+
+ return True
+
def obtain_job(self):
"""
Retrieves new build task from queue.
Checks if the new job can be started and not skipped.
"""
- self.update_process_title(suffix="No task")
-
+ # ToDo: remove retask, use redis lua fsm logic similiar to VMM
# this sometimes caused TypeError in random worker
# when another one picekd up a task to build
# why?
@@ -256,20 +273,6 @@ class Worker(multiprocessing.Process):
self.update_process_title(suffix="Task: {} chroot: {}, obtained at {}"
.format(job.build_id, job.chroot, str(datetime.now())))
- # Checking whether the build is not cancelled
- if not self.starting_build(job):
- self.log.info("Couldn't start job: {}".format(job))
- return
-
- # Checking whether to build or skip
- if self.pkg_built_before(job.pkg, job.chroot, job.destdir):
- self.on_pkg_skip(job)
- return
-
- # FIXME
- # this is our best place to sanity check the job before starting
- # up any longer process
-
return job
def do_job(self, job):
@@ -382,15 +385,7 @@ class Worker(multiprocessing.Process):
self.rc.publish(JOB_GRAB_TASK_END_PUBSUB, json.dumps(request))
- def run_cycle(self):
- self.update_process_title(suffix="trying to acquire job")
-
- job = self.obtain_job()
- if not job:
- self.update_process_title(suffix="trying to acquire job")
- time.sleep(self.opts.sleeptime)
- return
-
+ def acquire_vm_for_job(self, job):
self.log.info("got job: {}, acquiring VM for build".format(str(job)))
start_vm_wait_time = time.time()
vmd = None
@@ -408,27 +403,43 @@ class Worker(multiprocessing.Process):
self.log.exception("Unhandled exception during VM acquire :{}".format(error))
self.notify_job_grab_about_task_end(job, do_reschedule=True)
time.sleep(self.opts.sleeptime)
- return
+ break
+ return vmd
- try:
+ def run_cycle(self):
+ self.update_process_title(suffix="trying to acquire job")
+
+ time.sleep(self.opts.sleeptime)
+ job = self.obtain_job()
+ if not job:
+ return
+
+ if not self.can_start_job(job):
+ self.notify_job_grab_about_task_end(job)
+ return
+
+ vmd = self.acquire_vm_for_job(job)
+
+ if vmd is not None:
self.log.info("acquired VM: {} ip: {} for build {}".format(vmd.vm_name, vmd.vm_ip, job.task_id))
# TODO: store self.vmd = vmd and use it
self.vm_name = vmd.vm_name
self.vm_ip = vmd.vm_ip
- self.do_job(job)
- self.notify_job_grab_about_task_end(job)
- except VmError as error:
- self.log.exception("Builder error, re-scheduling task: {}".format(error))
- self.notify_job_grab_about_task_end(job, do_reschedule=True)
- except Exception as error:
- self.log.exception("Unhandled build error: {}".format(error))
- self.notify_job_grab_about_task_end(job, do_reschedule=True)
- finally:
- # clean up the instance
- self.vmm.release_vm(vmd.vm_name)
- self.vm_ip = None
- self.vm_name = None
+ try:
+ self.do_job(job)
+ self.notify_job_grab_about_task_end(job)
+ except VmError as error:
+ self.log.exception("Builder error, re-scheduling task: {}".format(error))
+ self.notify_job_grab_about_task_end(job, do_reschedule=True)
+ except Exception as error:
+ self.log.exception("Unhandled build error: {}".format(error))
+ self.notify_job_grab_about_task_end(job, do_reschedule=True)
+ finally:
+ # clean up the instance
+ self.vmm.release_vm(vmd.vm_name)
+ self.vm_ip = None
+ self.vm_name = None
def run(self):
self.log.info("Starting worker")
diff --git a/backend/tests/deamons/test_dispatcher.py b/backend/tests/deamons/test_dispatcher.py
index 70a2e91..143043e 100644
--- a/backend/tests/deamons/test_dispatcher.py
+++ b/backend/tests/deamons/test_dispatcher.py
@@ -338,64 +338,57 @@ class TestDispatcher(object):
mc_tq = MagicMock()
self.worker.task_queue = mc_tq
self.worker.starting_build = MagicMock()
- self.worker.pkg_built_before = MagicMock()
- self.worker.pkg_built_before.return_value = False
mc_tq.dequeue.return_value = MagicMock(data=self.task)
obtained_job = self.worker.obtain_job()
assert obtained_job.__dict__ == self.job.__dict__
- assert self.worker.pkg_built_before.called
- def test_obtain_job_skip_pkg(self, init_worker):
- mc_tq = MagicMock()
- self.worker.task_queue = mc_tq
+ def test_can_start_job_skip_pkg(self, init_worker):
self.worker.starting_build = MagicMock()
+ self.worker.starting_build.return_value = False
self.worker.pkg_built_before = MagicMock()
self.worker.pkg_built_before.return_value = True
- self.worker.mark_started = MagicMock()
- self.worker.return_results = MagicMock()
- self.worker.notify_job_grab_about_task_end = MagicMock()
+ assert self.worker.can_start_job(self.job) is False
- mc_tq.dequeue.return_value = MagicMock(data=self.task)
- assert self.worker.obtain_job() is None
- assert self.worker.pkg_built_before.called
- assert self.worker.notify_job_grab_about_task_end.called
+ def test_can_start_job_not_starting(self, init_worker):
+ self.worker.starting_build = MagicMock()
+ self.worker.starting_build.return_value = False
+ self.worker.pkg_built_before = MagicMock()
+ self.worker.pkg_built_before.return_value = False
- def test_obtain_job_dequeue_type_error(self, init_worker):
- mc_tq = MagicMock()
- self.worker.task_queue = mc_tq
+ assert self.worker.can_start_job(self.job) is False
+
+ def test_can_start_job(self, init_worker):
self.worker.starting_build = MagicMock()
+ self.worker.starting_build.return_value = True
self.worker.pkg_built_before = MagicMock()
self.worker.pkg_built_before.return_value = False
- mc_tq.dequeue.side_effect = TypeError()
- assert self.worker.obtain_job() is None
- assert not self.worker.starting_build.called
- assert not self.worker.pkg_built_before.called
+ assert self.worker.can_start_job(self.job) is True
- def test_obtain_job_dequeue_none_result(self, init_worker):
+ def test_obtain_job_dequeue_type_error(self, init_worker):
mc_tq = MagicMock()
self.worker.task_queue = mc_tq
self.worker.starting_build = MagicMock()
self.worker.pkg_built_before = MagicMock()
self.worker.pkg_built_before.return_value = False
- mc_tq.dequeue.return_value = None
+ mc_tq.dequeue.side_effect = TypeError()
assert self.worker.obtain_job() is None
assert not self.worker.starting_build.called
assert not self.worker.pkg_built_before.called
- def test_obtain_job_on_starting_build(self, init_worker):
+ def test_obtain_job_dequeue_none_result(self, init_worker):
mc_tq = MagicMock()
self.worker.task_queue = mc_tq
self.worker.starting_build = MagicMock()
- self.worker.starting_build.return_value = False
self.worker.pkg_built_before = MagicMock()
self.worker.pkg_built_before.return_value = False
- mc_tq.dequeue.return_value = MagicMock(data=self.task)
+ mc_tq.dequeue.return_value = None
assert self.worker.obtain_job() is None
+ assert not self.worker.starting_build.called
assert not self.worker.pkg_built_before.called
def test_dummy_run(self, init_worker, mc_time, mc_grc):
@@ -423,7 +416,6 @@ class TestDispatcher(object):
self.worker.update_process_title()
base_title = 'worker-{} {} '.format(self.group_id, self.worker_num)
assert mc_setproctitle.call_args[0][0] == base_title
-
#mc_setproctitle.reset_mock()
self.worker.vm_ip = self.vm_ip
self.worker.update_process_title()
@@ -513,7 +505,6 @@ class TestDispatcher(object):
assert self.worker.notify_job_grab_about_task_end.call_args[1]["do_reschedule"]
assert self.worker.vmm.release_vm.called
-
### handle other errors
self.worker.notify_job_grab_about_task_end.reset_mock()
self.worker.vmm.release_vm.reset_mock()
@@ -522,3 +513,15 @@ class TestDispatcher(object):
assert self.worker.notify_job_grab_about_task_end.call_args[1]["do_reschedule"]
assert self.worker.vmm.release_vm.called
+
+ def test_run_cycle_halt_on_can_start_job_false(self, init_worker):
+ self.worker.notify_job_grab_about_task_end = MagicMock()
+ self.worker.obtain_job = MagicMock()
+ self.worker.obtain_job.return_value = self.job
+ self.worker.can_start_job = MagicMock()
+ self.worker.can_start_job.return_value = False
+ self.worker.acquire_vm_for_job = MagicMock()
+
+ self.worker.run_cycle()
+ assert self.worker.can_start_job.called
+ assert not self.worker.acquire_vm_for_job.called
8 years, 11 months
[copr] master: [backend] [vmm] terminate VM with state IN_USE only when builder process is missed (be7d2f1)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit be7d2f1ab808d2b1531f560510336e8f375a9ca3
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Fri Apr 17 19:26:05 2015 +0200
[backend] [vmm] terminate VM with state IN_USE only when builder process is missed
>---------------------------------------------------------------
backend/backend/daemons/dispatcher.py | 2 +
backend/backend/daemons/vm_master.py | 35 +++++++++++++------------
backend/backend/helpers.py | 2 +-
backend/backend/vm_manage/event_handle.py | 2 +-
backend/backend/vm_manage/manager.py | 4 +++
backend/tests/deamons/test_vm_master.py | 6 +++-
backend/tests/vm_manager/test_event_handle.py | 5 +--
7 files changed, 32 insertions(+), 24 deletions(-)
diff --git a/backend/backend/daemons/dispatcher.py b/backend/backend/daemons/dispatcher.py
index 2931cd2..08566ca 100644
--- a/backend/backend/daemons/dispatcher.py
+++ b/backend/backend/daemons/dispatcher.py
@@ -258,6 +258,7 @@ class Worker(multiprocessing.Process):
# Checking whether the build is not cancelled
if not self.starting_build(job):
+ self.log.info("Couldn't start job: {}".format(job))
return
# Checking whether to build or skip
@@ -386,6 +387,7 @@ class Worker(multiprocessing.Process):
job = self.obtain_job()
if not job:
+ self.update_process_title(suffix="trying to acquire job")
time.sleep(self.opts.sleeptime)
return
diff --git a/backend/backend/daemons/vm_master.py b/backend/backend/daemons/vm_master.py
index bc4080f..db32ef7 100644
--- a/backend/backend/daemons/vm_master.py
+++ b/backend/backend/daemons/vm_master.py
@@ -54,8 +54,22 @@ class VmMaster(Process):
.format(vmd.vm_name, not_re_acquired_in))
self.vmm.start_vm_termination(vmd.vm_name, allowed_pre_state=VmStates.READY)
- def check_one_vm_for_dead_builder(self, vmd):
+ def request_build_reschedule(self, vmd):
+ self.log.info("trying to publish reschedule")
+ vmd_dict = vmd.to_dict()
+ if all(x in vmd_dict for x in ["build_id", "task_id", "chroot"]):
+ request = {
+ "action": "reschedule",
+ "build_id": vmd.build_id,
+ "task_id": vmd.task_id,
+ "chroot": vmd.chroot,
+ }
+ self.log.info("trying to publish reschedule: {}".format(request))
+ self.vmm.rc.publish(JOB_GRAB_TASK_END_PUBSUB, json.dumps(request))
+ # else:
+ # self.log.info("Failed to release VM: {}".format(vmd.vm_name))
+ def check_one_vm_for_dead_builder(self, vmd):
in_use_since = vmd.get_field(self.vmm.rc, "in_use_since")
pid = vmd.get_field(self.vmm.rc, "used_by_pid")
@@ -71,22 +85,9 @@ class VmMaster(Process):
if psutil.pid_exists(pid) and vmd.vm_name in psutil.Process(pid).cmdline[0]:
return
- self.log.info("Process `{}` not exists anymore, releasing VM: {} ".format(pid, vmd.vm_name))
- # from celery.contrib import rdb; rdb.set_trace()
- if self.vmm.release_vm(vmd.vm_name):
- self.log.info("trying to publish reschedule")
- vmd_dict = vmd.to_dict()
- if all(x in vmd_dict for x in ["build_id", "task_id", "chroot"]):
- request = {
- "action": "reschedule",
- "build_id": vmd.build_id,
- "task_id": vmd.task_id,
- "chroot": vmd.chroot,
- }
- self.log.info("trying to publish reschedule: {}".format(request))
- self.vmm.rc.publish(JOB_GRAB_TASK_END_PUBSUB, json.dumps(request))
- else:
- self.log.info("Failed to release VM: {}".format(vmd.vm_name))
+ self.log.info("Process `{}` not exists anymore, terminating VM: {} ".format(pid, vmd.vm_name))
+ self.vmm.start_vm_termination(vmd.vm_name, allowed_pre_state=VmStates.IN_USE)
+ self.request_build_reschedule(vmd)
def remove_vm_with_dead_builder(self):
# TODO: rewrite build manage at backend and move functionality there
diff --git a/backend/backend/helpers.py b/backend/backend/helpers.py
index e6571d6..94eeab6 100644
--- a/backend/backend/helpers.py
+++ b/backend/backend/helpers.py
@@ -151,7 +151,7 @@ class BackendConfigReader(object):
cp, "backend", "group{}_max_vm_total".format(group_id),
# default=16, mode="int"),
default=8, mode="int"),
- "max_vm_per_user": _get_conf(
+ "max_vm_per_user": _get_conf(
cp, "backend", "group{}_max_vm_per_user".format(group_id),
default=4, mode="int"),
"max_builds_per_vm": _get_conf(
diff --git a/backend/backend/vm_manage/event_handle.py b/backend/backend/vm_manage/event_handle.py
index 88c3e69..c12076f 100644
--- a/backend/backend/vm_manage/event_handle.py
+++ b/backend/backend/vm_manage/event_handle.py
@@ -104,7 +104,7 @@ class EventHandler(Process):
self.log.debug("recording check fail: {}".format(msg))
self.lua_scripts["record_failure"](keys=[vmd.vm_key])
too_much_fails = int(vmd.get_field(self.vmm.rc, "check_fails") or 0) > Thresholds.max_check_fails
- if too_much_fails:
+ if too_much_fails and vmd.state != VmStates.IN_USE:
self.log.info("check fail threshold reached: {}, terminating: {}"
.format(check_fails_count, msg))
self.vmm.start_vm_termination(vmd.vm_name)
diff --git a/backend/backend/vm_manage/manager.py b/backend/backend/vm_manage/manager.py
index 546b360..ab954bd 100644
--- a/backend/backend/vm_manage/manager.py
+++ b/backend/backend/vm_manage/manager.py
@@ -84,6 +84,10 @@ end
terminate_vm_lua = """
local old_state = redis.call("HGET", KEYS[1], "state")
+if old_state == "in_use" and ARGV[1] ~= "in_use" then
+ return "Termination of VM in in_use state are forbidden"
+end
+
if ARGV[1] and ARGV[1] ~= "None" and old_state ~= ARGV[1] then
return "Old state != `allowed_pre_state`"
elseif old_state == "terminating" and ARGV[1] ~= "terminating" then
diff --git a/backend/tests/deamons/test_vm_master.py b/backend/tests/deamons/test_vm_master.py
index b31e397..be8c5f8 100644
--- a/backend/tests/deamons/test_vm_master.py
+++ b/backend/tests/deamons/test_vm_master.py
@@ -200,8 +200,10 @@ class TestVmMaster(object):
def test_remove_vm_with_dead_builder(self, mc_time, add_vmd, mc_psutil):
mc_time.time.return_value = time.time()
+ self.vm_master.log = MagicMock()
- self.vmm.release_vm = types.MethodType(MagicMock(), self.vmm)
+ self.vmm.start_vm_termination = MagicMock()
+ self.vmm.start_vm_termination.return_value = "OK"
for idx, vmd in enumerate([self.vmd_a1, self.vmd_a2,
self.vmd_b1, self.vmd_b2, self.vmd_b3]):
@@ -245,7 +247,7 @@ class TestVmMaster(object):
self.vm_master.remove_vm_with_dead_builder()
msg_list = self.rcv_from_ps_message_bus()
-
+ print(self.vm_master.log.call_args_list)
assert set(["2", "4"]) == set([json.loads(m["data"])["build_id"] for m in msg_list])
def test_check_vms_health(self, mc_time, add_vmd):
diff --git a/backend/tests/vm_manager/test_event_handle.py b/backend/tests/vm_manager/test_event_handle.py
index fe0ef1c..b5bb82f 100644
--- a/backend/tests/vm_manager/test_event_handle.py
+++ b/backend/tests/vm_manager/test_event_handle.py
@@ -243,11 +243,10 @@ class TestEventHandle(object):
assert self.vmd.get_field(self.rc, "state") == VmStates.IN_USE
assert int(self.vmd.get_field(self.rc, "check_fails")) == 2
- # when threshold exceeded request termination
+ # when threshold exceeded request termination do NOT terminate it
self.eh.on_health_check_result(msg)
assert self.vmd.get_field(self.rc, "state") == VmStates.IN_USE
-
- assert self.vmm.start_vm_termination.called
+ assert not self.vmm.start_vm_termination.called
def test_health_check_result_on_wrong_states(self):
self.vmd = VmDescriptor(self.vm_ip, self.vm_name, self.group, VmStates.GOT_IP)
8 years, 11 months
[copr] master: [backend] bugfix VMM.get_all_vm_in_group : VM could be removed before load occures (6dd9543)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 6dd9543f5f3dfe4732765d77a3fdff779cb7a038
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Fri Apr 17 18:11:07 2015 +0200
[backend] bugfix VMM.get_all_vm_in_group : VM could be removed before load occures
>---------------------------------------------------------------
backend/backend/vm_manage/manager.py | 20 +++++++++++++++-----
backend/backend/vm_manage/models.py | 7 +++++++
backend/tests/vm_manager/test_manager.py | 6 +++---
3 files changed, 25 insertions(+), 8 deletions(-)
diff --git a/backend/backend/vm_manage/manager.py b/backend/backend/vm_manage/manager.py
index e9f92d7..546b360 100644
--- a/backend/backend/vm_manage/manager.py
+++ b/backend/backend/vm_manage/manager.py
@@ -11,7 +11,7 @@ import time
import weakref
from cStringIO import StringIO
import datetime
-from backend.exceptions import VmError, NoVmAvailable
+from backend.exceptions import VmError, NoVmAvailable, VmDescriptorNotFound
from backend.helpers import get_redis_connection
from .models import VmDescriptor
@@ -331,21 +331,31 @@ class VmManager(object):
pipe.execute()
self.log.info("removed vm `{}` from pool".format(vm_name))
+ def _load_multi_safe(self, vm_name_list):
+ result = []
+ for vm_name in vm_name_list:
+ try:
+ result.append(VmDescriptor.load(self.rc, vm_name))
+ except VmDescriptorNotFound:
+ self.log.debug("Failed to load VMD: {}".format(vm_name))
+ pass
+ return result
+
def get_all_vm_in_group(self, group):
"""
:rtype: list of VmDescriptor
"""
vm_name_list = self.rc.smembers(KEY_VM_POOL.format(group=group))
- return [VmDescriptor.load(self.rc, vm_name) for vm_name in vm_name_list]
+ return self._load_multi_safe(vm_name_list)
def get_all_vm(self):
"""
:rtype: list of VmDescriptor
"""
- vm_name_list = []
+ vmd_list = []
for group in self.vm_groups:
- vm_name_list.extend(self.rc.smembers(KEY_VM_POOL.format(group=group)))
- return [VmDescriptor.load(self.rc, vm_name) for vm_name in vm_name_list]
+ vmd_list.extend(self.get_all_vm_in_group(group))
+ return vmd_list
def get_vm_by_name(self, vm_name):
"""
diff --git a/backend/backend/vm_manage/models.py b/backend/backend/vm_manage/models.py
index 17b4a0f..6f6b26e 100644
--- a/backend/backend/vm_manage/models.py
+++ b/backend/backend/vm_manage/models.py
@@ -40,6 +40,13 @@ class VmDescriptor(object):
@classmethod
def load(cls, rc, vm_name):
+ """
+
+ :param rc:
+ :param vm_name:
+ :rtype: VmDescriptor
+ :raises VmDescriptorNotFound:
+ """
raw = rc.hgetall(KEY_VM_INSTANCE.format(vm_name=vm_name))
if not raw:
raise VmDescriptorNotFound("VmDescriptor for `{}` not found".format(vm_name))
diff --git a/backend/tests/vm_manager/test_manager.py b/backend/tests/vm_manager/test_manager.py
index b77ca02..270d087 100644
--- a/backend/tests/vm_manager/test_manager.py
+++ b/backend/tests/vm_manager/test_manager.py
@@ -428,19 +428,19 @@ class TestManager(object):
self.vmm.info()
def test_look_up_vms_by_ip(self, f_second_group, capsys):
- vmd_1 = self.vmm.add_vm_to_pool(self.vm_ip, "a1", self.group)
+ self.vmm.add_vm_to_pool(self.vm_ip, "a1", self.group)
r1 = self.vmm.lookup_vms_by_ip(self.vm_ip)
assert len(r1) == 1
assert r1[0].vm_name == "a1"
- vmd_2 = self.vmm.add_vm_to_pool(self.vm_ip, "a2", self.group)
+ self.vmm.add_vm_to_pool(self.vm_ip, "a2", self.group)
r2 = self.vmm.lookup_vms_by_ip(self.vm_ip)
assert len(r2) == 2
r2 = sorted(r2, key=lambda vmd: vmd.vm_name)
assert r2[0].vm_name == "a1"
assert r2[1].vm_name == "a2"
- vmd_3 = self.vmm.add_vm_to_pool("127.1.1.111", "b1", 1)
+ self.vmm.add_vm_to_pool("127.1.1.111", "b1", 1)
r3 = self.vmm.lookup_vms_by_ip(self.vm_ip)
assert len(r3) == 2
8 years, 11 months
[copr] master: [backend] job_grab: postpone build is we already serving more builds than`max_vm_per_user` option (1fecb6d)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 1fecb6d004b4a38f6bef280356b13b7e49754e35
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Fri Apr 17 17:51:17 2015 +0200
[backend] job_grab: postpone build is we already serving more builds than`max_vm_per_user` option
>---------------------------------------------------------------
backend/backend/daemons/job_grab.py | 27 ++++++++++++++-------------
backend/backend/mockremote/builder.py | 2 +-
backend/copr-backend.spec | 8 ++++----
backend/tests/deamons/test_job_grab.py | 22 +++++++++++++++++-----
backend/tests/mockremote/test_builder.py | 2 +-
5 files changed, 37 insertions(+), 24 deletions(-)
diff --git a/backend/backend/daemons/job_grab.py b/backend/backend/daemons/job_grab.py
index 31d694d..246e3e0 100644
--- a/backend/backend/daemons/job_grab.py
+++ b/backend/backend/daemons/job_grab.py
@@ -57,7 +57,7 @@ class CoprJobGrab(Process):
self.task_queues_by_arch = {}
self.task_queues_by_group = {}
- self.added_jobs = set()
+ self.added_jobs_dict = dict() # task_id -> task dict
self.lock = lock
self.frontend_client = frontend_client
@@ -68,8 +68,6 @@ class CoprJobGrab(Process):
self.log = get_redis_logger(self.opts, "backend.job_grab", "job_grab")
- self.vm_manager = VmManager(opts=self.opts, logger=self.log)
-
def connect_queues(self):
"""
Connects to the retask queues. One queue per builders group.
@@ -109,7 +107,7 @@ class CoprJobGrab(Process):
"""
count = 0
if "task_id" in task:
- if task["task_id"] not in self.added_jobs:
+ if task["task_id"] not in self.added_jobs_dict:
arch = task["chroot"].split("-")[2]
if arch not in self.task_queues_by_arch:
raise CoprJobGrabError("No builder group for architecture: {}, task: {}"
@@ -117,11 +115,15 @@ class CoprJobGrab(Process):
username = task["project_owner"]
group_id = int(self.arch_to_group_id_map[arch])
- if not self.vm_manager.can_user_acquire_more_vm(username, group_id):
- self.log.debug("User can not acquire more VM, don't schedule more tasks")
+ active_jobs_count = len([t for t_id, t in self.added_jobs_dict.items()
+ if t["project_owner"] == username])
+
+ if active_jobs_count > self.opts.build_groups[group_id]["max_vm_per_user"]:
+ self.log.debug("User can not acquire more VM (active builds #{}), "
+ "don't schedule more tasks".format(active_jobs_count))
return 0
- self.added_jobs.add(task["task_id"])
+ self.added_jobs_dict[task["task_id"]] = task
task_obj = Task(task)
self.task_queues_by_arch[arch].enqueue(task_obj)
@@ -208,12 +210,12 @@ class CoprJobGrab(Process):
self.log.info("Rescheduling task `{}`".format(task_id))
self.frontend_client.reschedule_build(msg["build_id"], msg["chroot"])
- if task_id not in self.added_jobs:
+ if task_id not in self.added_jobs_dict:
self.log.debug("Task `{}` not present in added jobs, msg ignored: {}".format(task_id, msg))
return
if action in ["remove", "reschedule"]:
- self.added_jobs.remove(task_id)
+ self.added_jobs_dict.pop(task_id)
self.log.info("Removed task `{}` from added_jobs".format(task_id))
except Exception as err:
@@ -221,9 +223,9 @@ class CoprJobGrab(Process):
.format(raw, err))
def log_queue_info(self):
- if self.added_jobs:
- self.log.debug("Added jobs after remove and load: {}".format(self.added_jobs))
- self.log.debug("# of executed jobs: {}".format(len(self.added_jobs)))
+ if self.added_jobs_dict:
+ self.log.debug("Added jobs after remove and load: {}".format(self.added_jobs_dict))
+ self.log.debug("# of executed jobs: {}".format(len(self.added_jobs_dict)))
for group, queue in self.task_queues_by_group.items():
if queue.length > 0:
@@ -236,7 +238,6 @@ class CoprJobGrab(Process):
setproctitle("CoprJobGrab")
self.connect_queues()
self.listen_to_pubsub()
- self.vm_manager.post_init()
self.log.info("JobGrub started.")
try:
diff --git a/backend/backend/mockremote/builder.py b/backend/backend/mockremote/builder.py
index 6fd9b62..8016fdf 100644
--- a/backend/backend/mockremote/builder.py
+++ b/backend/backend/mockremote/builder.py
@@ -331,7 +331,7 @@ class Builder(object):
remote_src = "{0}@{1}:{2}".format(self.opts.build_user, self.hostname, rpd)
ssh_opts = "'ssh -o PasswordAuthentication=no -o StrictHostKeyChecking=no'"
- rsync_log_filepath = os.path.join(destdir, "build-{}.rsync.log".format(self.job.build_id))
+ rsync_log_filepath = os.path.join(destdir, "build-{:08d}.rsync.log".format(self.job.build_id))
command = "{} -avH -e {} {} {}/ &> {}".format(
rsync, ssh_opts, remote_src, destdir,
rsync_log_filepath)
diff --git a/backend/copr-backend.spec b/backend/copr-backend.spec
index 9d04321..708a983 100644
--- a/backend/copr-backend.spec
+++ b/backend/copr-backend.spec
@@ -177,12 +177,12 @@ cp -a conf/logstash/copr_backend.conf %{buildroot}%{_pkgdocdir}/examples/%{_sysc
%check
-# redis-server --port 7777 &> /dev/null &
+redis-server --port 7777 &> /dev/null &
-#PYTHONPATH=backend:run:$PYTHONPATH python -B -m pytest \
-# -s -v --cov-report term-missing --cov ./backend --cov ./run ./tests/
+PYTHONPATH=backend:run:$PYTHONPATH python -B -m pytest \
+ -s -v --cov-report term-missing --cov ./backend --cov ./run ./tests/
-# kill %1
+kill %1
%pre
getent group copr >/dev/null || groupadd -r copr
diff --git a/backend/tests/deamons/test_job_grab.py b/backend/tests/deamons/test_job_grab.py
index c6e9735..a5736fe 100644
--- a/backend/tests/deamons/test_job_grab.py
+++ b/backend/tests/deamons/test_job_grab.py
@@ -88,8 +88,11 @@ class TestJobGrab(object):
logfile=self.log_file,
verbose=False,
build_groups=[
- {"id": 0, "name": "x86", "archs": ["i386", "i686", "x86_64"]},
- {"id": 1, "name": "arm", "archs": ["armv7"]},
+ {"id": 0, "name": "x86",
+ "archs": ["i386", "i686", "x86_64"],
+ "max_vm_per_user": 5},
+ {"id": 1, "name": "arm", "archs": ["armv7"],
+ "max_vm_per_user": 5},
],
destdir="/dev/null",
frontend_base_url="http://example.com/",
@@ -165,19 +168,28 @@ class TestJobGrab(object):
assert not mc_rc.pubsub.called
self.jg.listen_to_pubsub()
-
assert mc_rc.pubsub.called
assert mc_rc.pubsub.return_value.subscribe.called
def test_route_build_task_skip_added(self, init_jg):
- self.jg.added_jobs.add(12345)
- self.jg.added_jobs.add(12346)
+ for d in [self.task_dict_1, self.task_dict_2]:
+ self.jg.added_jobs_dict[d["task_id"]] = d
assert self.jg.route_build_task(self.task_dict_1) == 0
assert self.jg.route_build_task(self.task_dict_2) == 0
for obj in self.jg.task_queues_by_arch.values():
assert not obj.enqueue.called
+ def test_route_build_task_skip_too_much_added(self, init_jg):
+ for i in range(10):
+ task = dict(self.task_dict_1)
+ task["task_id"] = 1000 + i
+ self.jg.added_jobs_dict[task["task_id"]] = task
+
+ assert self.jg.route_build_task(self.task_dict_1) == 0
+ for obj in self.jg.task_queues_by_arch.values():
+ assert not obj.enqueue.called
+
def test_route_build_task_missing_task_ud(self, init_jg):
assert self.jg.route_build_task({"task": "wrong_key"}) == 0
for obj in self.jg.task_queues_by_arch.values():
diff --git a/backend/tests/mockremote/test_builder.py b/backend/tests/mockremote/test_builder.py
index 1ce13af..9f42a26 100644
--- a/backend/tests/mockremote/test_builder.py
+++ b/backend/tests/mockremote/test_builder.py
@@ -702,7 +702,7 @@ class TestBuilder(object):
expected_arg = (
"/usr/bin/rsync -avH -e 'ssh -o PasswordAuthentication=no -o StrictHostKeyChecking=no'"
" copr_builder@example.com:/tmp/copr-backend-test-tmp/build/results/fedora-20-i386/foovar-2.41.f21 "
- "'/tmp/copr-backend-test'/ &> '/tmp/copr-backend-test'/build-12345.rsync.log")
+ "'/tmp/copr-backend-test'/ &> '/tmp/copr-backend-test'/build-00012345.rsync.log")
assert mc_popen.call_args[0][0] == expected_arg
8 years, 11 months
[copr] master: [frontend] make option gpgcheck in copr.repo configurable (ff681dd)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit ff681dd61bd58b2b1bc0abbf2b508f3d02ef4b6c
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Fri Apr 17 14:20:02 2015 +0200
[frontend] make option gpgcheck in copr.repo configurable
>---------------------------------------------------------------
frontend/coprs_frontend/config/copr_devel.conf | 2 ++
frontend/coprs_frontend/coprs/config.py | 2 ++
.../coprs_frontend/coprs/templates/coprs/copr.repo | 2 +-
3 files changed, 5 insertions(+), 1 deletions(-)
diff --git a/frontend/coprs_frontend/config/copr_devel.conf b/frontend/coprs_frontend/config/copr_devel.conf
index c586406..7ded978 100644
--- a/frontend/coprs_frontend/config/copr_devel.conf
+++ b/frontend/coprs_frontend/config/copr_devel.conf
@@ -47,3 +47,5 @@ LOG_FILENAME="/tmp/copr_frontend.log"
# IP or subnet
INTRANET_IPS = ["127.0.0.1", "192.168.1.0/24"]
+
+REPO_GPGCHECK = 0
diff --git a/frontend/coprs_frontend/coprs/config.py b/frontend/coprs_frontend/coprs/config.py
index aac1e66..8775e81 100644
--- a/frontend/coprs_frontend/coprs/config.py
+++ b/frontend/coprs_frontend/coprs/config.py
@@ -46,6 +46,8 @@ class Config(object):
INTRANET_IPS = ["127.0.0.1"]
DEBUG = True
+ REPO_GPGCHECK = 1
+
class ProductionConfig(Config):
DEBUG = False
diff --git a/frontend/coprs_frontend/coprs/templates/coprs/copr.repo b/frontend/coprs_frontend/coprs/templates/coprs/copr.repo
index 07b2762..777a745 100644
--- a/frontend/coprs_frontend/coprs/templates/coprs/copr.repo
+++ b/frontend/coprs_frontend/coprs/templates/coprs/copr.repo
@@ -2,7 +2,7 @@
name=Copr repo for {{ copr.name }} owned by {{ copr.owner.name }}
baseurl={{ url | fix_url_https_backend }}
skip_if_unavailable=True
-gpgcheck=1
+gpgcheck={{ config.REPO_GPGCHECK | default("1")}}
gpgkey={{ pubkey_url | fix_url_https_backend }}
enabled=1
8 years, 11 months
[copr] master: [backend] fix build logging (885a2cb)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 885a2cb596d073fa9b6e1b85174ae6e33c7f94a6
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Fri Apr 17 12:49:51 2015 +0200
[backend] fix build logging
>---------------------------------------------------------------
backend/backend/daemons/dispatcher.py | 11 +++++++++--
1 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/backend/backend/daemons/dispatcher.py b/backend/backend/daemons/dispatcher.py
index 56fbf2f..2931cd2 100644
--- a/backend/backend/daemons/dispatcher.py
+++ b/backend/backend/daemons/dispatcher.py
@@ -315,11 +315,12 @@ class Worker(multiprocessing.Process):
chroot_logfile = "{}/build-{:08d}.log".format(chroot_destdir, job.build_id)
+ build_logger = create_file_logger("{}.builder.mr".format(self.logger_name),
+ chroot_logfile, fmt=build_log_format)
try:
mr = MockRemote(
builder_host=self.vm_ip, job=job,
- logger=create_file_logger("{}.builder.mr".format(self.logger_name),
- chroot_logfile, fmt=build_log_format),
+ logger=build_logger,
repos=chroot_repos,
opts=self.opts, lock=self.lock,
)
@@ -341,6 +342,12 @@ class Worker(multiprocessing.Process):
)
status = BuildStatus.FAILURE
register_build_result(self.opts, failed=True)
+ finally:
+ # TODO: kind of ugly solution
+ # we should remove handler from build loger, otherwise we would write
+ # to the previous project
+ for h in build_logger.handlers[:]:
+ build_logger.removeHandler(h)
self.log.info(
"Finished build: id={} builder={} timeout={} destdir={}"
8 years, 11 months
[copr] master: [backend] defer sending job to worker if job owner acquired too much VMs (c7164c2)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit c7164c22053af16f6de446b124c74bcbb3bf87b9
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Thu Apr 16 20:17:49 2015 +0200
[backend] defer sending job to worker if job owner acquired too much VMs
>---------------------------------------------------------------
backend/backend/daemons/job_grab.py | 24 ++++++++++++++++---
backend/backend/mockremote/__init__.py | 2 +-
backend/backend/vm_manage/manager.py | 39 ++++++++++++++++++++-----------
backend/tests/deamons/test_job_grab.py | 8 +++++-
4 files changed, 52 insertions(+), 21 deletions(-)
diff --git a/backend/backend/daemons/job_grab.py b/backend/backend/daemons/job_grab.py
index e5cb3ef..31d694d 100644
--- a/backend/backend/daemons/job_grab.py
+++ b/backend/backend/daemons/job_grab.py
@@ -26,6 +26,9 @@ from ..frontend import FrontendClient
# TODO: Replace entire model with asynchronous queue, so that frontend push task,
# and workers listen for them
+from ..vm_manage.manager import VmManager
+
+
class CoprJobGrab(Process):
"""
@@ -46,6 +49,11 @@ class CoprJobGrab(Process):
super(CoprJobGrab, self).__init__(name="jobgrab")
self.opts = opts
+ self.arch_to_group_id_map = dict()
+ for group in self.opts.build_groups:
+ for arch in group["archs"]:
+ self.arch_to_group_id_map[arch] = group["id"]
+
self.task_queues_by_arch = {}
self.task_queues_by_group = {}
@@ -60,6 +68,8 @@ class CoprJobGrab(Process):
self.log = get_redis_logger(self.opts, "backend.job_grab", "job_grab")
+ self.vm_manager = VmManager(opts=self.opts, logger=self.log)
+
def connect_queues(self):
"""
Connects to the retask queues. One queue per builders group.
@@ -100,14 +110,19 @@ class CoprJobGrab(Process):
count = 0
if "task_id" in task:
if task["task_id"] not in self.added_jobs:
-
- # TODO: produces memory leak!
- self.added_jobs.add(task["task_id"])
arch = task["chroot"].split("-")[2]
if arch not in self.task_queues_by_arch:
raise CoprJobGrabError("No builder group for architecture: {}, task: {}"
.format(arch, task))
+ username = task["project_owner"]
+ group_id = int(self.arch_to_group_id_map[arch])
+ if not self.vm_manager.can_user_acquire_more_vm(username, group_id):
+ self.log.debug("User can not acquire more VM, don't schedule more tasks")
+ return 0
+
+ self.added_jobs.add(task["task_id"])
+
task_obj = Task(task)
self.task_queues_by_arch[arch].enqueue(task_obj)
count += 1
@@ -172,7 +187,7 @@ class CoprJobGrab(Process):
return
try:
msg = json.loads(raw["data"])
- # msg: {"action": ["remove"|"reschedule"], "task_id": ..., "build_id"..., "chroot": ...}
+ # msg: {"action": ("remove"|"reschedule"), "task_id": ..., "build_id"..., "chroot": ...}
# Actions: "remove" simply remove `task_id` from self.added_job
# "reschedule" additionally call frontend and set pending state before removal
if "action" not in msg:
@@ -221,6 +236,7 @@ class CoprJobGrab(Process):
setproctitle("CoprJobGrab")
self.connect_queues()
self.listen_to_pubsub()
+ self.vm_manager.post_init()
self.log.info("JobGrub started.")
try:
diff --git a/backend/backend/mockremote/__init__.py b/backend/backend/mockremote/__init__.py
index 42e8130..03df070 100755
--- a/backend/backend/mockremote/__init__.py
+++ b/backend/backend/mockremote/__init__.py
@@ -354,7 +354,7 @@ class MockRemote(object):
with open(info_file_path, 'w') as info_file:
info_file.writelines([
"build_id={}".format(self.job.build_id),
- "builder_ip={}".format(self.builder.hostname)])
+ "\nbuilder_ip={}".format(self.builder.hostname)])
except Exception as error:
self.log.exception("Failed to mark build {} with build_id".format(error))
diff --git a/backend/backend/vm_manage/manager.py b/backend/backend/vm_manage/manager.py
index d519531..e9f92d7 100644
--- a/backend/backend/vm_manage/manager.py
+++ b/backend/backend/vm_manage/manager.py
@@ -222,17 +222,9 @@ class VmManager(object):
def mark_server_start(self):
self.rc.hset(KEY_SERVER_INFO, "server_start_timestamp", time.time())
- def acquire_vm(self, group, username, pid, task_id=None, build_id=None, chroot=None):
+ def can_user_acquire_more_vm(self, username, group):
"""
- Try to acquire VM from pool
-
- :param group: builder group id, as defined in config
- :type group: int
- :param username: build owner username, VMM prefer to reuse an existing VM which was used by the same user
- :param pid: builder pid to release VM after build process unhandled death
-
- :rtype: VmDescriptor
- :raises: NoVmAvailable when manager couldn't find suitable VM for the given group and user
+ :return bool: True when user are allowed to acquire more VM
"""
vmd_list = self.get_all_vm_in_group(group)
vm_count_used_by_user = len([
@@ -242,13 +234,32 @@ class VmManager(object):
self.log.debug("# vm by user: {}, limit:{} ".format(
vm_count_used_by_user, self.opts.build_groups[group]["max_vm_per_user"]
))
-
if vm_count_used_by_user >= self.opts.build_groups[group]["max_vm_per_user"]:
# TODO: this check isn't reliable, if two (or more) processes check VM list
- # at the +- same time, they could acquire more VMs
+ # at the +- same time, they could acquire more VMs
# proper solution: do this check inside lua script
- raise NoVmAvailable("No VM are available, user `{}` already acquired #{} VMs"
- .format(username, vm_count_used_by_user))
+ self.log.debug("No VM are available, user `{}` already acquired #{} VMs"
+ .format(username, vm_count_used_by_user))
+ return False
+ else:
+ return True
+
+ def acquire_vm(self, group, username, pid, task_id=None, build_id=None, chroot=None):
+ """
+ Try to acquire VM from pool
+
+ :param group: builder group id, as defined in config
+ :type group: int
+ :param username: build owner username, VMM prefer to reuse an existing VM which was used by the same user
+ :param pid: builder pid to release VM after build process unhandled death
+
+ :rtype: VmDescriptor
+ :raises: NoVmAvailable when manager couldn't find suitable VM for the given group and user
+ """
+ vmd_list = self.get_all_vm_in_group(group)
+ if not self.can_user_acquire_more_vm(username, group):
+ raise NoVmAvailable("No VM are available, user `{}` already acquired too much VMs"
+ .format(username))
ready_vmd_list = [vmd for vmd in vmd_list if vmd.state == VmStates.READY]
# trying to find VM used by this user
diff --git a/backend/tests/deamons/test_job_grab.py b/backend/tests/deamons/test_job_grab.py
index 9707488..c6e9735 100644
--- a/backend/tests/deamons/test_job_grab.py
+++ b/backend/tests/deamons/test_job_grab.py
@@ -106,14 +106,17 @@ class TestJobGrab(object):
self.task_dict_1 = dict(
task_id=12345,
chroot="fedora-20-x86_64",
+ project_owner="foobar",
)
self.task_dict_2 = dict(
task_id=12346,
chroot="fedora-20-armv7",
+ project_owner="foobar",
)
self.task_dict_bad_arch = dict(
task_id=12346,
chroot="fedora-20-s390x",
+ project_owner="foobar",
)
def teardown_method(self, method):
@@ -136,6 +139,7 @@ class TestJobGrab(object):
def init_jg(self, mc_retask_queue, mc_grc):
self.jg = CoprJobGrab(self.opts, self.frontend_client, self.lock)
self.jg.connect_queues()
+ self.jg.vm_manager = MagicMock()
def test_connect_queues(self, mc_retask_queue, mc_grc):
mc_rc = MagicMock()
@@ -179,13 +183,13 @@ class TestJobGrab(object):
for obj in self.jg.task_queues_by_arch.values():
assert not obj.enqueue.called
- def test_route_build_task_correct_group_1(self, init_jg):
+ def test_route_build_task_correct_group_1(self, init_jg,):
assert self.jg.route_build_task(self.task_dict_1) == 1
assert self.jg.task_queues_by_arch["x86_64"].enqueue.called
assert not self.jg.task_queues_by_arch["armv7"].enqueue.called
- def test_route_build_task_correct_group_2(self, init_jg):
+ def test_route_build_task_correct_group_2(self, init_jg, ):
assert self.jg.route_build_task(self.task_dict_2) == 1
assert not self.jg.task_queues_by_arch["x86_64"].enqueue.called
8 years, 11 months
[copr] master: [backend] replaced Thresholds.dirty_vm_terminating_timeout with config option vm_dirty_terminating_timeout (f801e58)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit f801e5809371feb7aad5a188830995bc2da164c0
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Thu Apr 16 16:05:50 2015 +0200
[backend] replaced Thresholds.dirty_vm_terminating_timeout with config option vm_dirty_terminating_timeout
>---------------------------------------------------------------
backend/backend/daemons/vm_master.py | 2 +-
backend/backend/helpers.py | 4 +++-
backend/backend/vm_manage/__init__.py | 1 -
backend/backend/vm_manage/models.py | 4 ++--
backend/tests/deamons/test_vm_master.py | 5 ++++-
5 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/backend/backend/daemons/vm_master.py b/backend/backend/daemons/vm_master.py
index 7f3bbe0..bc4080f 100644
--- a/backend/backend/daemons/vm_master.py
+++ b/backend/backend/daemons/vm_master.py
@@ -49,7 +49,7 @@ class VmMaster(Process):
if last_release is None:
continue
not_re_acquired_in = time.time() - float(last_release)
- if not_re_acquired_in > Thresholds.dirty_vm_terminating_timeout:
+ if not_re_acquired_in > self.opts.build_groups[vmd.group]["vm_dirty_terminating_timeout"]:
self.log.info("dirty VM `{}` not re-acquired in {}, terminating it"
.format(vmd.vm_name, not_re_acquired_in))
self.vmm.start_vm_termination(vmd.vm_name, allowed_pre_state=VmStates.READY)
diff --git a/backend/backend/helpers.py b/backend/backend/helpers.py
index dc97854..e6571d6 100644
--- a/backend/backend/helpers.py
+++ b/backend/backend/helpers.py
@@ -163,7 +163,9 @@ class BackendConfigReader(object):
"vm_spawn_min_interval": _get_conf(
cp, "backend", "group{}_vm_spawn_min_interval".format(group_id),
default=30, mode="int"),
-
+ "vm_dirty_terminating_timeout": _get_conf(
+ cp, "backend", "group{}_vm_dirty_terminating_timeout".format(group_id),
+ default=120, mode="int"),
}
opts.build_groups.append(group)
diff --git a/backend/backend/vm_manage/__init__.py b/backend/backend/vm_manage/__init__.py
index 2532853..d2f781a 100644
--- a/backend/backend/vm_manage/__init__.py
+++ b/backend/backend/vm_manage/__init__.py
@@ -47,5 +47,4 @@ class Thresholds(object):
health_check_max_time = 120 # [s.] if health check wasn't done for this time, mark check fail
max_check_fails = 2 # maximum number of fails before starting VM termination
terminating_timeout = 600 # [s.] time before we try to terminate VM again
- dirty_vm_terminating_timeout = 20 # [s.] how long we keep released vms
cycle_timeout = 10 # [s.] timeout for all periodical checks
diff --git a/backend/backend/vm_manage/models.py b/backend/backend/vm_manage/models.py
index de72b72..17b4a0f 100644
--- a/backend/backend/vm_manage/models.py
+++ b/backend/backend/vm_manage/models.py
@@ -10,7 +10,7 @@ class VmDescriptor(object):
self.vm_ip = vm_ip
self.vm_name = vm_name
self.state = state
- self.group = group
+ self.group = int(group)
self.check_fails = 0
@@ -34,7 +34,7 @@ class VmDescriptor(object):
@classmethod
def from_dict(cls, raw):
- vmd = cls(raw["vm_ip"], raw["vm_name"], raw["group"], raw["state"])
+ vmd = cls(raw.pop("vm_ip"), raw.pop("vm_name"), raw.pop("group"), raw.pop("state"))
vmd.__dict__.update(raw)
return vmd
diff --git a/backend/tests/deamons/test_vm_master.py b/backend/tests/deamons/test_vm_master.py
index 61edf8e..b31e397 100644
--- a/backend/tests/deamons/test_vm_master.py
+++ b/backend/tests/deamons/test_vm_master.py
@@ -91,11 +91,13 @@ class TestVmMaster(object):
"max_vm_total": 5,
"max_spawn_processes": 3,
"vm_spawn_min_interval": self.vm_spawn_min_interval,
+ "vm_dirty_terminating_timeout": 120,
},
1: {
"name": "arm",
"archs": ["armV7"],
"vm_spawn_min_interval": self.vm_spawn_min_interval,
+ "vm_dirty_terminating_timeout": 120,
}
},
@@ -187,7 +189,8 @@ class TestVmMaster(object):
self.vm_master.remove_old_dirty_vms()
assert not self.vmm.start_vm_termination.called
- mc_time.time.return_value = Thresholds.dirty_vm_terminating_timeout + 1
+ mc_time.time.return_value = self.opts.build_groups[0]["vm_dirty_terminating_timeout"] + 1
+
# only "a1" and "b1" should be terminated
self.vm_master.remove_old_dirty_vms()
assert self.vmm.start_vm_termination.called
8 years, 11 months