[copr] master: Merge branch 'master' of ssh://git.fedorahosted.org/git/copr (d6baf2b)

bkabrda at fedoraproject.org bkabrda at fedoraproject.org
Fri Jan 18 15:06:53 UTC 2013


Repository : http://git.fedorahosted.org/cgit/copr.git

On branch  : master

>---------------------------------------------------------------

commit d6baf2b77e6bdf2fa001b89e711d2e5dddb44389
Merge: 44e1d1d f6508e3
Author: Bohuslav Kabrda <bkabrda at redhat.com>
Date:   Fri Jan 18 16:05:45 2013 +0100

    Merge branch 'master' of ssh://git.fedorahosted.org/git/copr



>---------------------------------------------------------------

 backend/dispatcher.py |   42 +++++++++----
 backend/mockremote.py |   10 ++-
 copr-be.py            |  165 +++++++++++++++++++++++++++++++++----------------
 3 files changed, 147 insertions(+), 70 deletions(-)

diff --git a/backend/dispatcher.py b/backend/dispatcher.py
index 92395ce..5fa6aa0 100644
--- a/backend/dispatcher.py
+++ b/backend/dispatcher.py
@@ -68,18 +68,16 @@ class WorkerCallback(object):
         self.logfile = logfile
     
     def log(self, msg):
-        if not self.logfile:
-            return
-            
-        now = time.strftime('%F %T')
-        try:
-            open(self.logfile, 'a').write(str(now) + ': ' + msg + '\n')
-        except (IOError, OSError), e:
-            print >>sys.stderr, 'Could not write to logfile %s - %s' % (self.logfile, str(e))
+        if self.logfile:
+            now = time.strftime('%F %T')
+            try:
+                open(self.logfile, 'a').write(str(now) + ': ' + msg + '\n')
+            except (IOError, OSError), e:
+                print >>sys.stderr, 'Could not write to logfile %s - %s' % (self.logfile, str(e))
             
         
 class Worker(multiprocessing.Process):
-    def __init__(self, opts, jobs, worker_num, ip=None, create=True, callback=None):
+    def __init__(self, opts, jobs, events, worker_num, ip=None, create=True, callback=None):
  
         # base class initialization
         multiprocessing.Process.__init__(self, name="worker-builder")
@@ -87,6 +85,7 @@ class Worker(multiprocessing.Process):
             
         # job management stuff
         self.jobs = jobs
+        self.events = events # event queue for communicating back to dispatcher
         self.worker_num = worker_num
         self.ip = ip
         self.opts = opts
@@ -99,11 +98,23 @@ class Worker(multiprocessing.Process):
         
         if ip:
             self.callback.log('creating worker: %s' % ip)
+            self.event('creating worker: %s' % ip)
         else:
             self.callback.log('creating worker: dynamic ip')
+            self.event('creating worker: dynamic ip')
+
+    def event(self, what):
+        if self.ip:
+            who = 'worker-%s-%s' % (self.worker_num, self.ip)
+        else:
+            who = 'worker-%s' % (self.worker_num)
+        
+        self.events.put({'when':time.time(), 'who':who, 'what':what})
 
     def spawn_instance(self):
         """call the spawn playbook to startup/provision a building instance"""
+        
+        
         self.callback.log('spawning instance begin')
         start = time.time()
         
@@ -143,7 +154,7 @@ class Worker(multiprocessing.Process):
         stats = callbacks.AggregateStats()
         playbook_cb = SilentPlaybookCallbacks(verbose=False)
         runner_cb = callbacks.DefaultRunnerCallbacks()
-        play = ansible.playbook.PlayBook(host_list=[ip], stats=stats, playbook=self.opts.terminate_playbook, 
+        play = ansible.playbook.PlayBook(host_list=ip +',', stats=stats, playbook=self.opts.terminate_playbook, 
                              callbacks=playbook_cb, runner_callbacks=runner_cb, 
                              remote_user='root')
 
@@ -167,6 +178,8 @@ class Worker(multiprocessing.Process):
         jobdata.repos.append(jobdata.results)
         jobdata.copr_id = build['copr']['id']
         jobdata.user_id = build['user_id']
+        jobdata.user_name = build['copr']['owner']['name']
+        jobdata.copr_name = build['copr']['name']
         return jobdata
 
     # maybe we move this to the callback?
@@ -195,6 +208,7 @@ class Worker(multiprocessing.Process):
     # maybe we move this to the callback?
     def mark_started(self, job):
         
+
         build = {'id':job.build_id,
                  'started_on': job.started_on,
                  'results': job.results,
@@ -206,8 +220,8 @@ class Worker(multiprocessing.Process):
     
     # maybe we move this to the callback?    
     def return_results(self, job):
-        self.callback.log('%s status %s. Took %s seconds' % (job.build_id, job.status, job.ended_on - job.started_on))
 
+        self.callback.log('%s status %s. Took %s seconds' % (job.build_id, job.status, job.ended_on - job.started_on))
         build = {'id':job.build_id,
                  'ended_on': job.ended_on,
                  'status': job.status,
@@ -256,8 +270,10 @@ class Worker(multiprocessing.Process):
             job.started_on = time.time()
             self.mark_started(job)
             
-            for chroot in job.chroots:
+            self.event('build start: user:%s copr:%s build:%s ip:%s  pid:%s' % (job.user_name, job.copr_name, job.build_id, ip, self.pid))            
                 
+            for chroot in job.chroots:
+                self.event('chroot start: chroot:%s user:%s copr:%s build:%s ip:%s  pid:%s' % (chroot, job.user_name, job.copr_name, job.build_id, ip, self.pid))            
                 chroot_destdir = job.destdir + '/' + chroot
                 # setup our target dir locally
                 if not os.path.exists(chroot_destdir):
@@ -296,12 +312,12 @@ class Worker(multiprocessing.Process):
                     if mr.failed: 
                         status = 0
                 self.callback.log('Finished build: id=%r builder=%r timeout=%r destdir=%r chroot=%r repos=%r' % (job.build_id, ip, job.timeout, job.destdir, chroot, str(job.repos)))
-            
             job.ended_on = time.time()
             
             job.status = status
             self.return_results(job)
             self.callback.log('worker finished build: %s' % ip)
+            self.event('build end: user:%s copr:%s build:%s ip:%s  pid:%s status:%s' % (job.user_name, job.copr_name, job.build_id, ip, self.pid, job.status))
             # clean up the instance
             if self.create:
                 self.terminate_instance(ip)
diff --git a/backend/mockremote.py b/backend/mockremote.py
index 7017a51..2e1e19f 100755
--- a/backend/mockremote.py
+++ b/backend/mockremote.py
@@ -96,7 +96,7 @@ def get_ans_results(results, hostname):
 
 def _create_ans_conn(hostname, username, timeout):
     ans_conn = ansible.runner.Runner(remote_user=username,
-          host_list=[hostname], pattern=hostname, forks=1,
+          host_list=hostname + ',', pattern=hostname, forks=1,
           timeout=timeout)
     return ans_conn
     
@@ -203,7 +203,7 @@ class CliLogCallBack(DefaultCallBack):
             try:
                 open(self.logfn, 'a').write(str(now) + ':' + msg + '\n')
             except (IOError, OSError), e:
-                print >>sys.stderr, 'Could not write to logfile %s - %s' % (self.lf, str(e))
+                print >>sys.stderr, 'Could not write to logfile %s - %s' % (self.logfn, str(e))
         if not self.quiet:
             print msg
 
@@ -299,6 +299,7 @@ class Builder(object):
         #print '  Running %s on %s' % (buildcmd, hostname)
         # run the mockchain command async
         # this runs it sync - FIXME
+        self.mockremote.callback.log('executing: %r' % buildcmd)
         self.conn.module_name="shell"
         self.conn.module_args = str(buildcmd)
         results = self.conn.run()
@@ -362,7 +363,8 @@ class Builder(object):
             raise BuilderError('%s could not be resolved' % self.hostname)
             
         # connect as user 
-        ans = ansible.runner.Runner(host_list=[self.hostname], pattern='*', 
+        
+        ans = ansible.runner.Runner(host_list=self.hostname + ',', pattern='*', 
               remote_user=self.username, forks=1, timeout=20)
         ans.module_name = "shell"
         ans.module_args = str("/bin/rpm -q mock rsync")
@@ -486,7 +488,7 @@ class MockRemote(object):
                 if not os.path.exists(self.destdir + '/' + self.chroot):
                     os.makedirs(self.destdir + '/' + self.chroot)
                 r_log = open(self.destdir + '/' + self.chroot + '/mockchain.log', 'a')
-                r_log.write('%s\n' % pkg)
+                r_log.write('\n\n%s\n\n' % pkg)
                 r_log.write(b_out)
                 if b_err:
                     r_log.write('\nstderr\n')
diff --git a/copr-be.py b/copr-be.py
index 6631ac4..dfcd4c0 100644
--- a/copr-be.py
+++ b/copr-be.py
@@ -21,7 +21,102 @@ def _get_conf(cp, section, option, default):
     return default
         
 
+
+class CoprJobGrab(multiprocessing.Process):
+    """Fetch jobs from the Frontend - submit them to the jobs queue for workers"""
+
+    def __init__(self, opts, events, jobs):
+        # base class initialization
+        multiprocessing.Process.__init__(self, name="jobgrab")
+
+        self.opts = opts
+        self.events = events
+        self.jobs = jobs
+        self.added_jobs = []
+
+    def event(self, what):
+        self.events.put({'when':time.time(), 'who':'jobgrab', 'what':what})
+        
+    def fetch_jobs(self):
+        try:
+            r = requests.get('%s/waiting_builds/' % self.opts.frontend_url) # auth stuff here? maybe/maybenot
+        except requests.RequestException, e:
+            self.event('Error retrieving jobs from %s: %s' % (self.opts.frontend_url, e))
+        else:
+            try:
+                r_json = json.loads(r.content) # using old requests on el6 :(
+            except ValueError, e:
+                self.event('Error getting JSON build list from FE %s' % e)
+                return
+            
+            if 'builds' in r_json and r_json['builds']:
+                self.event('%s jobs returned' % len(r_json['builds']))
+                count = 0
+                for b in r_json['builds']:
+                    if 'id' in b:
+                        jobfile = self.opts.jobsdir + '/%s.json' % b['id']
+                        if not os.path.exists(jobfile) and b['id'] not in self.added_jobs:
+                            count += 1
+                            open(jobfile, 'w').write(json.dumps(b))
+                            self.event('Wrote job: %s' % b['id'])
+                if count:
+                    self.event('New jobs: %s' % count)
+
+    def run(self):
+        abort = False
+        while not abort:
+            self.fetch_jobs()
+            for f in sorted(glob.glob(self.opts.jobsdir + '/*.json')):
+                n = os.path.basename(f).replace('.json', '')
+                if n not in self.added_jobs:
+                    self.jobs.put(f)
+                    self.added_jobs.append(n)
+                    self.event('adding to work queue id %s' % n)
+            time.sleep(self.opts.sleeptime)
+
+
+class CoprLog(multiprocessing.Process):
+    """log mechanism where items from the events queue get recorded"""
+    def __init__(self, opts, events):
+
+        # base class initialization
+        multiprocessing.Process.__init__(self, name="logger")
+
+        self.opts = opts
+        self.events = events
+
+        logdir = os.path.dirname(self.opts.logfile)
+        if not os.path.exists(logdir):
+            os.makedirs(logdir, mode=0750)
+
+        if not os.path.exists(self.opts.destdir):
+            os.makedirs(self.opts.destdir, mode=0755)
+
+        # setup a log file to write to
+        self.logfile = self.opts.logfile
+    
+    def log(self, event):
+        
+        when =  time.strftime('%F %T', time.gmtime(event['when']))
+        msg = '%s : %s: %s' % (when, event['who'], event['what'].strip())
+            
+        try:
+            open(self.logfile, 'a').write(msg + '\n')
+        except (IOError, OSError), e:
+            print >>sys.stderr, 'Could not write to logfile %s - %s' % (self.logfile, str(e))
+
+
+    # event format is a dict {when:time, who:[worker|logger|job|main], what:str}
+    def run(self):
+        abort = False
+        while not abort:
+            e = self.events.get()
+            if 'when' in e and 'who' in e and 'what' in e:
+                self.log(e)
+
 class CoprBackend(object):
+    """core process - starts/stops/initializes workers"""
+    
     def __init__(self, config_file=None, ext_opts=None):
         # read in config file
         # put all the config items into a single self.opts bunch
@@ -33,25 +128,28 @@ class CoprBackend(object):
         self.ext_opts = ext_opts # to stow our cli options for read_conf()
         self.opts = self.read_conf()
 
-        logdir = os.path.dirname(self.opts.logfile)
-        if not os.path.exists(logdir):
-            os.makedirs(logdir, mode=0750)
+        self.jobs = multiprocessing.Queue() # job is a path to a jobfile on the localfs
+        self.events = multiprocessing.Queue()
+        # event format is a dict {when:time, who:[worker|logger|job|main], what:str}
 
-        if not os.path.exists(self.opts.destdir):
-            os.makedirs(self.opts.destdir, mode=0755)
 
-        # setup a log file to write to
-        self.logfile = self.opts.logfile
-        self.log("Starting up new copr-be instance")
+        # create logger
+        self._logger = CoprLog(self.opts, self.events)
+        self._logger.start()
 
+        self.event('Starting up Job Grabber')
+        # create job grabber
+        self._jobgrab = CoprJobGrab(self.opts, self.events, self.jobs)
+        self._jobgrab.start()
         
         if not os.path.exists(self.opts.worker_logdir):
             os.makedirs(self.opts.worker_logdir, mode=0750)
             
-        self.jobs = multiprocessing.Queue()
         self.workers = []
         self.added_jobs = []
 
+    def event(self, what):
+        self.events.put({'when':time.time(), 'who':'main', 'what':what})
         
     def read_conf(self):
         "read in config file - return Bunch of config data"
@@ -90,64 +188,25 @@ class CoprBackend(object):
         return opts
         
         
-    def log(self, msg):
-        now =  time.strftime('%F %T')
-        output = str(now) + ': ' + msg
-        if not self.opts.daemonize:
-            print output
-            
-        try:
-            open(self.logfile, 'a').write(output + '\n')
-        except (IOError, OSError), e:
-            print >>sys.stderr, 'Could not write to logfile %s - %s' % (self.logfile, str(e))
-
-
-    def fetch_jobs(self):
-        self.log('fetching jobs')
-        try:
-            r = requests.get('%s/waiting_builds/' % self.opts.frontend_url) # auth stuff here? maybe/maybenot
-        except requests.RequestException, e:
-            self.log('Error retrieving jobs from %s: %s' % (self.opts.frontend_url, e))
-        else:
-            r_json = json.loads(r.content) # using old requests on el6 :(
-            if 'builds' in r_json:
-                self.log('%s jobs returned' % len(r_json['builds']))
-                count = 0
-                for b in r_json['builds']:
-                    if 'id' in b:
-                        jobfile = self.opts.jobsdir + '/%s.json' % b['id']
-                        if not os.path.exists(jobfile) and b['id'] not in self.added_jobs:
-                            count += 1
-                            open(jobfile, 'w').write(json.dumps(b))
-                            self.log('Wrote job: %s' % b['id'])
-                self.log('New jobs: %s' % count)
     
     def run(self):
 
         abort = False
         while not abort:
-            self.fetch_jobs()
-            for f in sorted(glob.glob(self.opts.jobsdir + '/*.json')):
-                n = os.path.basename(f).replace('.json', '')
-                if n not in self.added_jobs:
-                    self.jobs.put(f)
-                    self.added_jobs.append(n)
-                    self.log('adding to work queue id %s' % n)
-
             # re-read config into opts
             self.opts = self.read_conf()
             
             if self.jobs.qsize():
-                self.log("# jobs in queue: %s" % self.jobs.qsize())
+                self.event("# jobs in queue: %s" % self.jobs.qsize())
                 # this handles starting/growing the number of workers
                 if len(self.workers) < self.opts.num_workers:
-                    self.log("Spinning up more workers for jobs")
+                    self.event("Spinning up more workers for jobs")
                     for i in range(self.opts.num_workers - len(self.workers)):
                         worker_num = len(self.workers) + 1
-                        w = Worker(self.opts, self.jobs, worker_num)
+                        w = Worker(self.opts, self.jobs, self.events, worker_num)
                         self.workers.append(w)
                         w.start()
-                    self.log("Finished starting worker processes")
+                    self.event("Finished starting worker processes")
                 # FIXME - prune out workers
                 #if len(self.workers) > self.opts.num_workers:
                 #    killnum = len(self.workers) - self.opts.num_workers
@@ -158,7 +217,7 @@ class CoprBackend(object):
             # check for dead workers and abort
             for w in self.workers:
                 if not w.is_alive():
-                    self.log('Worker %d died unexpectedly' % w.worker_num)
+                    self.event('Worker %d died unexpectedly' % w.worker_num)
                     if self.opts.exit_on_worker:
                         raise errors.CoprBackendError, "Worker died unexpectedly, exiting"
                     else:



More information about the copr-devel mailing list