Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
commit 5c46b75de4667e6c0ea527ab9cbc97f50ea1c702 Author: Valentin Gologuzov vgologuz@redhat.com Date: Fri Nov 28 14:04:52 2014 +0100
[backend] split copr-be.py into more files
backend/backend/daemons/__init__.py | 5 + backend/backend/daemons/backend.py | 182 +++++++++++++++++++ backend/backend/daemons/job_grab.py | 97 ++++++++++ backend/backend/daemons/log.py | 68 +++++++ backend/run/copr-be.py | 335 +---------------------------------- 5 files changed, 354 insertions(+), 333 deletions(-)
diff --git a/backend/backend/daemons/__init__.py b/backend/backend/daemons/__init__.py new file mode 100644 index 0000000..d60155d --- /dev/null +++ b/backend/backend/daemons/__init__.py @@ -0,0 +1,5 @@ +# coding: utf-8 + +from .job_grab import CoprJobGrab +from .log import CoprLog +from .backend import CoprBackend, run_backend diff --git a/backend/backend/daemons/backend.py b/backend/backend/daemons/backend.py new file mode 100644 index 0000000..ff942c8 --- /dev/null +++ b/backend/backend/daemons/backend.py @@ -0,0 +1,182 @@ +# coding: utf-8 + +from __future__ import print_function +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +import grp +import multiprocessing +import os +import pwd +import signal +import sys +import time +from collections import defaultdict + +import lockfile +import daemon +from retask.queue import Queue +from retask import ConnectionError + +from ..exceptions import CoprBackendError +from ..dispatcher import Worker +from ..helpers import BackendConfigReader +from . import CoprJobGrab, CoprLog + + +class CoprBackend(object): + + """ + Core process - starts/stops/initializes workers + """ + + def __init__(self, config_file=None, ext_opts=None): + # read in config file + # put all the config items into a single self.opts bunch + + if not config_file: + raise CoprBackendError("Must specify config_file") + + self.config_file = config_file + self.ext_opts = ext_opts # to stow our cli options for read_conf() + self.workers_by_group_id = defaultdict(list) + self.max_worker_num_by_group_id = defaultdict(int) + + self.config_reader = BackendConfigReader(self.config_file, self.ext_opts) + self.opts = None + self.update_conf() + + self.lock = multiprocessing.Lock() + + self.task_queues = [] + try: + for group in self.opts.build_groups: + group_id = group["id"] + self.task_queues.append(Queue("copr-be-{0}".format(group_id))) + self.task_queues[group_id].connect() + except ConnectionError: + raise CoprBackendError( + "Could not connect to a task queue. Is Redis running?") + + # make sure there is nothing in our task queues + self.clean_task_queues() + + self.events = multiprocessing.Queue() + # event format is a dict {when:time, who:[worker|logger|job|main], + # what:str} + + # create logger + self._logger = CoprLog(self.opts, self.events) + self._logger.start() + + self.event("Starting up Job Grabber") + # create job grabber + self._jobgrab = CoprJobGrab(self.opts, self.events, self.lock) + self._jobgrab.start() + self.abort = False + + if not os.path.exists(self.opts.worker_logdir): + os.makedirs(self.opts.worker_logdir, mode=0o750) + + def event(self, what): + self.events.put({"when": time.time(), "who": "main", "what": what}) + + def update_conf(self): + self.opts = self.config_reader.read() + + def clean_task_queues(self): + try: + for queue in self.task_queues: + while queue.length: + queue.dequeue() + except ConnectionError: + raise CoprBackendError( + "Could not connect to a task queue. Is Redis running?") + + def run(self): + self.abort = False + while not self.abort: + # re-read config into opts + self.update_conf() + + for group in self.opts.build_groups: + group_id = group["id"] + self.event( + "# jobs in {0} queue: {1}" + .format(group["name"], self.task_queues[group_id].length) + ) + # this handles starting/growing the number of workers + if len(self.workers_by_group_id[group_id]) < group["max_workers"]: + self.event("Spinning up more workers") + for _ in range(group["max_workers"] - len(self.workers_by_group_id[group_id])): + self.max_worker_num_by_group_id[group_id] += 1 + w = Worker( + self.opts, self.events, + self.max_worker_num_by_group_id[group_id], + group_id, lock=self.lock + ) + + self.workers_by_group_id[group_id].append(w) + w.start() + self.event("Finished starting worker processes") + # FIXME - prune out workers + # if len(self.workers) > self.opts.num_workers: + # killnum = len(self.workers) - self.opts.num_workers + # for w in self.workers[:killnum]: + # insert a poison pill? Kill after something? I dunno. + # FIXME - if a worker bombs out - we need to check them + # and startup a new one if it happens + # check for dead workers and abort + preserved_workers = [] + for w in self.workers_by_group_id[group_id]: + if not w.is_alive(): + self.event("Worker {0} died unexpectedly".format(w.worker_num)) + if self.opts.exit_on_worker: + raise CoprBackendError( + "Worker died unexpectedly, exiting") + else: + w.terminate() # kill it with a fire + else: + preserved_workers.append(w) + self.workers_by_group_id[group_id] = preserved_workers + + time.sleep(self.opts.sleeptime) + + def terminate(self): + """ + Cleanup backend processes (just workers for now) + And also clean all task queues as they would survive copr restart + """ + + self.abort = True + for group in self.opts.build_groups: + group_id = group["id"] + for w in self.workers_by_group_id[group_id]: + self.workers_by_group_id[group_id].remove(w) + w.terminate() + self.clean_task_queues() + + +def run_backend(opts): + try: + context = daemon.DaemonContext( + pidfile=lockfile.FileLock(opts.pidfile), + gid=grp.getgrnam("copr").gr_gid, + uid=pwd.getpwnam("copr").pw_uid, + detach_process=opts.daemonize, + umask=0o22, + stderr=sys.stderr, + signal_map={ + signal.SIGTERM: "terminate", + signal.SIGHUP: "terminate", + }, + ) + with context: + cbe = CoprBackend(opts.config_file, ext_opts=opts) + cbe.run() + except (Exception, KeyboardInterrupt): + sys.stderr.write("Killing/Dying\n") + if "cbe" in locals(): + cbe.terminate() + raise diff --git a/backend/backend/daemons/job_grab.py b/backend/backend/daemons/job_grab.py new file mode 100644 index 0000000..c9accd4 --- /dev/null +++ b/backend/backend/daemons/job_grab.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +from __future__ import print_function +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +import multiprocessing +import time +import setproctitle + +import requests +from retask.task import Task +from retask.queue import Queue + +from backend.actions import Action +from backend.frontend import FrontendClient + + +class CoprJobGrab(multiprocessing.Process): + + """ + Fetch jobs from the Frontend + - submit them to the jobs queue for workers + """ + + def __init__(self, opts, events, lock): + # base class initialization + multiprocessing.Process.__init__(self, name="jobgrab") + + self.opts = opts + self.events = events + self.task_queues = [] + for group in self.opts.build_groups: + self.task_queues.append(Queue("copr-be-{0}".format(group["id"]))) + self.task_queues[group["id"]].connect() + self.added_jobs = [] + self.lock = lock + + def event(self, what): + self.events.put({"when": time.time(), "who": "jobgrab", "what": what}) + + def load_tasks(self): + try: + r = requests.get( + "{0}/waiting/".format(self.opts.frontend_url), + auth=("user", self.opts.frontend_auth)) + r_json = r.json() + + except requests.RequestException as e: + self.event("Error retrieving jobs from {0}: {1}".format( + self.opts.frontend_url, e)) + return + + except ValueError as e: + self.event("Error getting JSON build list from FE {0}" + .format(e)) + return + + if "builds" in r_json and r_json["builds"]: + self.event("{0} jobs returned".format(len(r_json["builds"]))) + count = 0 + for task in r_json["builds"]: + if "task_id" in task and task["task_id"] not in self.added_jobs: + # this will ignore and throw away unconfigured architectures + # FIXME: don't do ^ + arch = task["chroot"].split("-")[2] + for group in self.opts.build_groups: + if arch in group["archs"]: + self.added_jobs.append(task["task_id"]) + task_obj = Task(task) + self.task_queues[group["id"]].enqueue(task_obj) + count += 1 + break + if count: + self.event("New jobs: %s" % count) + + if "actions" in r_json and r_json["actions"]: + self.event("{0} actions returned".format( + len(r_json["actions"]))) + + for action in r_json["actions"]: + ao = Action(self.events, action, self.lock, destdir=self.opts.destdir, + frontend_callback=FrontendClient(self.opts, self.events), + front_url=self.opts.frontend_base_url, + results_root_url=self.opts.results_baseurl) + ao.run() + + def run(self): + setproctitle.setproctitle("CoprJobGrab") + abort = False + try: + while not abort: + self.load_tasks() + time.sleep(self.opts.sleeptime) + except KeyboardInterrupt: + return diff --git a/backend/backend/daemons/log.py b/backend/backend/daemons/log.py new file mode 100644 index 0000000..f8a128c --- /dev/null +++ b/backend/backend/daemons/log.py @@ -0,0 +1,68 @@ +# coding: utf-8 + +from __future__ import print_function +from __future__ import unicode_literals +from __future__ import division +from __future__ import absolute_import + +import logging +import multiprocessing +import os +import sys +import time +import setproctitle + + +class CoprLog(multiprocessing.Process): + + """log mechanism where items from the events queue get recorded""" + + def __init__(self, opts, events): + + # base class initialization + multiprocessing.Process.__init__(self, name="logger") + + self.opts = opts + self.events = events + + logdir = os.path.dirname(self.opts.logfile) + if not os.path.exists(logdir): + os.makedirs(logdir, mode=0o750) + + def setup_log_handler(self): + sys.stderr.write("Running setup handler {} \n".format(self.opts)) + # setup a log file to write to + logging.basicConfig(filename=self.opts.logfile, level=logging.DEBUG) + + self.log({"when": time.time(), "who": self.__class__.__name__, "what": "Logger iniated"}) + + def log(self, event): + + when = time.strftime("%F %T", time.gmtime(event["when"])) + msg = "{0} : {1}: {2}".format(when, + event["who"], + event["what"].strip()) + try: + if self.opts.verbose: + sys.stderr.write("{0}\n".format(msg)) + sys.stderr.flush() + logging.debug(msg) + + except (IOError, OSError) as e: + + sys.stderr.write("Could not write to logfile {0} - {1}\n".format( + self.logfile, e)) + + # event format is a dict {when:time, who:[worker|logger|job|main], + # what:str} + def run(self): + setproctitle.setproctitle("CoprLog") + self.setup_log_handler() + abort = False + try: + while not abort: + e = self.events.get() + if "when" in e and "who" in e and "what" in e: + self.log(e) + except KeyboardInterrupt: + return diff --git a/backend/run/copr-be.py b/backend/run/copr-be.py index bdca6e8..61f62b8 100755 --- a/backend/run/copr-be.py +++ b/backend/run/copr-be.py @@ -5,323 +5,13 @@ from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import
-import ConfigParser -import grp -import lockfile -import logging -import multiprocessing import optparse import os -import pwd -import signal import sys -import time -from collections import defaultdict
-import daemon -import requests -import setproctitle from bunch import Bunch -from retask.task import Task -from retask.queue import Queue -from retask import ConnectionError
-from backend.exceptions import CoprBackendError -from backend.dispatcher import Worker -from backend.actions import Action -from backend.frontend import FrontendClient -from backend.helpers import BackendConfigReader - - -def _get_conf(cp, section, option, default, mode=None): - """ - To make returning items from config parser less irritating - - :param mode: convert obtained value, possible modes: - - None (default): do nothing - - "bool" or "boolean" - - "int" - - "float" - """ - - if cp.has_section(section) and cp.has_option(section, option): - if mode is None: - return cp.get(section, option) - elif mode in ["bool", "boolean"]: - return cp.getboolean(section, option) - elif mode == "int": - return cp.getint(section, option) - elif mode == "float": - return cp.getfloat(section, option) - return default - - -class CoprJobGrab(multiprocessing.Process): - - """ - Fetch jobs from the Frontend - - submit them to the jobs queue for workers - """ - - def __init__(self, opts, events, lock): - # base class initialization - multiprocessing.Process.__init__(self, name="jobgrab") - - self.opts = opts - self.events = events - self.task_queues = [] - for group in self.opts.build_groups: - self.task_queues.append(Queue("copr-be-{0}".format(group["id"]))) - self.task_queues[group["id"]].connect() - self.added_jobs = [] - self.lock = lock - - def event(self, what): - self.events.put({"when": time.time(), "who": "jobgrab", "what": what}) - - def load_tasks(self): - try: - r = requests.get( - "{0}/waiting/".format(self.opts.frontend_url), - auth=("user", self.opts.frontend_auth)) - r_json = r.json() - - except requests.RequestException as e: - self.event("Error retrieving jobs from {0}: {1}".format( - self.opts.frontend_url, e)) - return - - except ValueError as e: - self.event("Error getting JSON build list from FE {0}" - .format(e)) - return - - if "builds" in r_json and r_json["builds"]: - self.event("{0} jobs returned".format(len(r_json["builds"]))) - count = 0 - for task in r_json["builds"]: - if "task_id" in task and task["task_id"] not in self.added_jobs: - # this will ignore and throw away unconfigured architectures - # FIXME: don't do ^ - arch = task["chroot"].split("-")[2] - for group in self.opts.build_groups: - if arch in group["archs"]: - self.added_jobs.append(task["task_id"]) - task_obj = Task(task) - self.task_queues[group["id"]].enqueue(task_obj) - count += 1 - break - if count: - self.event("New jobs: %s" % count) - - if "actions" in r_json and r_json["actions"]: - self.event("{0} actions returned".format( - len(r_json["actions"]))) - - for action in r_json["actions"]: - ao = Action(self.events, action, self.lock, destdir=self.opts.destdir, - frontend_callback=FrontendClient(self.opts, self.events), - front_url=self.opts.frontend_base_url, - results_root_url=self.opts.results_baseurl) - ao.run() - - def run(self): - setproctitle.setproctitle("CoprJobGrab") - abort = False - try: - while not abort: - self.load_tasks() - time.sleep(self.opts.sleeptime) - except KeyboardInterrupt: - return - - -class CoprLog(multiprocessing.Process): - - """log mechanism where items from the events queue get recorded""" - - def __init__(self, opts, events): - - # base class initialization - multiprocessing.Process.__init__(self, name="logger") - - self.opts = opts - self.events = events - - logdir = os.path.dirname(self.opts.logfile) - if not os.path.exists(logdir): - os.makedirs(logdir, mode=0o750) - - def setup_log_handler(self): - sys.stderr.write("Running setup handler {} \n".format(self.opts)) - # setup a log file to write to - logging.basicConfig(filename=self.opts.logfile, level=logging.DEBUG) - - self.log({"when": time.time(), "who": self.__class__.__name__, "what": "Logger iniated"}) - - def log(self, event): - - when = time.strftime("%F %T", time.gmtime(event["when"])) - msg = "{0} : {1}: {2}".format(when, - event["who"], - event["what"].strip()) - try: - if self.opts.verbose: - sys.stderr.write("{0}\n".format(msg)) - sys.stderr.flush() - logging.debug(msg) - - except (IOError, OSError) as e: - - sys.stderr.write("Could not write to logfile {0} - {1}\n".format( - self.logfile, e)) - - # event format is a dict {when:time, who:[worker|logger|job|main], - # what:str} - def run(self): - setproctitle.setproctitle("CoprLog") - self.setup_log_handler() - abort = False - try: - while not abort: - e = self.events.get() - if "when" in e and "who" in e and "what" in e: - self.log(e) - except KeyboardInterrupt: - return - - -class CoprBackend(object): - - """ - Core process - starts/stops/initializes workers - """ - - def __init__(self, config_file=None, ext_opts=None): - # read in config file - # put all the config items into a single self.opts bunch - - if not config_file: - raise CoprBackendError("Must specify config_file") - - self.config_file = config_file - self.ext_opts = ext_opts # to stow our cli options for read_conf() - self.workers_by_group_id = defaultdict(list) - self.max_worker_num_by_group_id = defaultdict(int) - - self.config_reader = BackendConfigReader(self.config_file, self.ext_opts) - self.opts = None - self.update_conf() - - self.lock = multiprocessing.Lock() - - self.task_queues = [] - try: - for group in self.opts.build_groups: - group_id = group["id"] - self.task_queues.append(Queue("copr-be-{0}".format(group_id))) - self.task_queues[group_id].connect() - except ConnectionError: - raise CoprBackendError( - "Could not connect to a task queue. Is Redis running?") - - # make sure there is nothing in our task queues - self.clean_task_queues() - - self.events = multiprocessing.Queue() - # event format is a dict {when:time, who:[worker|logger|job|main], - # what:str} - - # create logger - self._logger = CoprLog(self.opts, self.events) - self._logger.start() - - self.event("Starting up Job Grabber") - # create job grabber - self._jobgrab = CoprJobGrab(self.opts, self.events, self.lock) - self._jobgrab.start() - self.abort = False - - if not os.path.exists(self.opts.worker_logdir): - os.makedirs(self.opts.worker_logdir, mode=0o750) - - def event(self, what): - self.events.put({"when": time.time(), "who": "main", "what": what}) - - def update_conf(self): - self.opts = self.config_reader.read() - - def clean_task_queues(self): - try: - for queue in self.task_queues: - while queue.length: - queue.dequeue() - except ConnectionError: - raise CoprBackendError( - "Could not connect to a task queue. Is Redis running?") - - def run(self): - self.abort = False - while not self.abort: - # re-read config into opts - self.update_conf() - - for group in self.opts.build_groups: - group_id = group["id"] - self.event( - "# jobs in {0} queue: {1}" - .format(group["name"], self.task_queues[group_id].length) - ) - # this handles starting/growing the number of workers - if len(self.workers_by_group_id[group_id]) < group["max_workers"]: - self.event("Spinning up more workers") - for _ in range(group["max_workers"] - len(self.workers_by_group_id[group_id])): - self.max_worker_num_by_group_id[group_id] += 1 - w = Worker( - self.opts, self.events, - self.max_worker_num_by_group_id[group_id], - group_id, lock=self.lock - ) - - self.workers_by_group_id[group_id].append(w) - w.start() - self.event("Finished starting worker processes") - # FIXME - prune out workers - # if len(self.workers) > self.opts.num_workers: - # killnum = len(self.workers) - self.opts.num_workers - # for w in self.workers[:killnum]: - # insert a poison pill? Kill after something? I dunno. - # FIXME - if a worker bombs out - we need to check them - # and startup a new one if it happens - # check for dead workers and abort - preserved_workers = [] - for w in self.workers_by_group_id[group_id]: - if not w.is_alive(): - self.event("Worker {0} died unexpectedly".format(w.worker_num)) - if self.opts.exit_on_worker: - raise CoprBackendError( - "Worker died unexpectedly, exiting") - else: - w.terminate() # kill it with a fire - else: - preserved_workers.append(w) - self.workers_by_group_id[group_id] = preserved_workers - - time.sleep(self.opts.sleeptime) - - def terminate(self): - """ - Cleanup backend processes (just workers for now) - And also clean all task queues as they would survive copr restart - """ - - self.abort = True - for group in self.opts.build_groups: - group_id = group["id"] - for w in self.workers_by_group_id[group_id]: - self.workers_by_group_id[group_id].remove(w) - w.terminate() - self.clean_task_queues() +from backend.daemons import run_backend
def parse_args(args): @@ -356,28 +46,7 @@ def parse_args(args):
def main(args): opts = parse_args(args) - - try: - context = daemon.DaemonContext( - pidfile=lockfile.FileLock(opts.pidfile), - gid=grp.getgrnam("copr").gr_gid, - uid=pwd.getpwnam("copr").pw_uid, - detach_process=opts.daemonize, - umask=0o22, - stderr=sys.stderr, - signal_map={ - signal.SIGTERM: "terminate", - signal.SIGHUP: "terminate", - }, - ) - with context: - cbe = CoprBackend(opts.config_file, ext_opts=opts) - cbe.run() - except (Exception, KeyboardInterrupt): - sys.stderr.write("Killing/Dying\n") - if "cbe" in locals(): - cbe.terminate() - raise + run_backend(opts)
if __name__ == "__main__": try:
copr-commits@lists.fedorahosted.org