On 04/02/2013 06:18 PM, Don Zickus wrote:
> This is the bulk of the harness file for enabling beaker.
>
> The idea is to bootstrap it by downloading the remote xml file, parse it,
> convert it into a control file and pass that to autotest.
>
> The rest of the harness has various hacks to deal with passing info
> from autotest to beaker.
>
> Two of the biggest pieces are converting the xml recipe into a control
> file. This will still need tweaking.
>
> The other piece is trying to figure out when recipe/tasks/sub-tasks
> start and stop. Again probably needs tweaking.
>
> Overall, this patch has been tested with some stub recipes and responds
> well. Work still needs to be done on figuring out which files to
> upload and real workflow testing.
>
> This patch will need to be reviewed by autotest and beaker folks.
>
> Signed-off-by: Don Zickus <dzickus(a)redhat.com>
> ---
> client/harness_beaker.py | 463 ++++++++++++++++++++++++++++++++++++++++++++++
> 1 files changed, 463 insertions(+), 0 deletions(-)
> create mode 100644 client/harness_beaker.py
>
> diff --git a/client/harness_beaker.py b/client/harness_beaker.py
> new file mode 100644
> index 0000000..e901fab
> --- /dev/null
> +++ b/client/harness_beaker.py
> @@ -0,0 +1,463 @@
> +# harness_beaker.py
> +#
> +# Copyright (C) 2011 Jan Stancek <jstancek(a)redhat.com>
> +#
> +# This program is free software; you can redistribute it and/or modify
> +# it under the terms of the GNU General Public License as published by
> +# the Free Software Foundation; either version 2 of the License, or
> +# (at your option) any later version.
> +#
> +# This program is distributed in the hope that it will be useful,
> +# but WITHOUT ANY WARRANTY; without even the implied warranty of
> +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> +# GNU General Public License for more details.
> +#
> +# You should have received a copy of the GNU General Public License
> +# along with this program; if not, write to the Free Software
> +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
> +#
> +# started by Jan Stancek <jstancek(a)redhat.com> 2011
> +"""
> +The harness interface
> +The interface between the client and beaker lab controller.
> +"""
> +__author__ = """Don Zickus 2013"""
> +
> +import os
> +import optparse
> +import logging as log
> +import harness
> +import time
> +import re
> +import sys
> +from time import gmtime, strftime
> +from autotest.client.shared import utils,error
> +
> +from autotest.client.bkr_xml import BeakerXMLParser
> +from autotest.client.bkr_proxy import BkrProxy
> +
> +'''Use 5 minutes for console heartbeat'''
> +BEAKER_CONSOLE_HEARTBEAT = 60 * 5
> +
> +class harness_beaker(harness.harness):
> + def __init__(self, job, harness_args):
> + log.debug('harness_beaker __init__')
> + super(harness_beaker, self).__init__(job)
> +
> + #temporary hack until BEAKER_RECIPE_ID and BEAKER_LAB_CONTROLLER_URL is setup in beaker
> + os.environ['BEAKER_RECIPE_ID'] = open('/root/RECIPE.TXT', 'r').read().strip()
> + os.environ['BEAKER_LAB_CONTROLLER_URL'] = re.sub("/bkr/", ":8000", os.environ['BEAKER'])
> +
> + #control whether bootstrap environment remotely connects or stays offline
> + #cheap hack to support flexible debug environment
> + #the bootstrap job object is just a stub and won't have the '_state' attribute
> + if hasattr(job, '_state'):
> + is_bootstrap = False
> + else:
> + is_bootstrap = True
> +
> + self.state_file = os.path.join(os.path.dirname(__file__), 'harness_beaker.state')
> + self.recipe_id = os.environ.get('BEAKER_RECIPE_ID')
> + self.labc_url = os.environ.get('BEAKER_LAB_CONTROLLER_URL')
> + self.hostname = os.environ.get('HOSTNAME')
> + #self.common_results_uploaded_after = False
> + self.watchdog_pid = None
> + self.current_task_id = None
> + self.skip_upload = False #hack to work around reservesys
> + self.offline = False
> +
> + if harness_args:
> + log.info('harness_args: %s' % harness_args)
> + self.args = self.parse_args(harness_args, is_bootstrap)
> +
> +
> + log.debug('harness_beaker: state_file: <%s>', self.state_file)
> +
> + log.debug('harness_beaker: hostname: <%s>', self.hostname)
> + log.debug('harness_beaker: labc_url: <%s>', self.labc_url)
> +
> + if not self.hostname:
> + raise error.HarnessError('Need valid hostname')
> +
> + #hack for flexible debug environment
> + labc = not self.offline and self.labc_url or None
> +
> + self.bkr_proxy = BkrProxy(self.recipe_id, labc)
> +
> + #self.setupInitSymlink()
> + #self.setupAutotestConfFile()
> +
> + def parse_args(self, args, is_bootstrap):
> + if not args:
> + return
> +
> + for a in args.split(','):
> + if a == 'offline':
> + #use cached recipe and stay offline whole time
> + self.offline = True
> +
> + elif a[:5] == 'cache':
> + if len(a) > 5 and a[5] == '=':
> + #cache a different recipe instead
> + self.recipe_id = a[6:]
> +
> + #remotely retrieve recipe, but stay offline during run
> + if not is_bootstrap:
> + self.offline = True
> +
> + else:
> + raise error.HarnessError("Unknown beaker harness arg: %s" % a)
> +
> + def bootstrap(self, fetchdir):
> + '''How to kickstart autotest when you have no control file?
> + You download the beaker XML, convert it to a control file
> + and pass it back to autotest. Much like bootstrapping.. :-)
> + '''
> +
> + recipe = self.init_recipe_from_beaker()
> +
> + #sanity check
> + if self.recipe_id != recipe.id:
> + raise error.HarnessError('Recipe mismatch: machine %s.. != XML %s..' %
> + (self.recipe_id, recipe.id))
> +
> + #create unique name
> + control_file_name = recipe.job_id + '_' + recipe.id + '.control'
> + control_file_path = fetchdir + '/' + control_file_name
> +
> + log.debug('setting up control file - %s' % control_file_path)
> + control_file = open(control_file_path, 'w')
> + try:
> + #convert recipe xml into control file
> + for task in recipe.tasks:
> + self.convert_task_to_control(fetchdir, control_file, task)
> + control_file.close()
> + except Exception, ex:
> + os.remove(control_file_path)
> + raise error.HarnessError('beaker_harness: convert failed with -> %s' % ex)
> +
> + #autotest should find this under FETCHDIRTEST because it is unique
> + return control_file_path
> +
> + def init_recipe_from_beaker(self):
> + log.debug('Contacting beaker to get task details')
> + bxp = BeakerXMLParser()
> + recipe_xml = self.get_recipe_from_LC()
> + recipes_dict = bxp.parse_xml(recipe_xml)
> +
> + return self.find_recipe(recipes_dict)
> +
> + def init_task_params(self, task):
> + log.debug('PrepareTaskParams')
> + if task == None:
> + raise error.HarnessError('No valid task')
> +
> + for (name, value) in task.params.items():
> + log.debug('adding to os.environ: <%s=%s>', name, value)
> + os.environ[name] = value
> +
> + def get_recipe_from_LC(self):
> + log.debug('trying to get recipe from LC:')
> + try:
> + recipe = self.bkr_proxy.get_recipe()
> + except Exception, exc:
> + raise error.HarnessError('Failed to retrieve xml: %s' % exc)
> + return recipe
> +
> + def find_recipe(self, recipes_dict):
> + if self.hostname in recipes_dict:
> + return recipes_dict[self.hostname]
> + for h in recipes_dict:
> + if self.recipe_id == recipes_dict[h].id:
> + return recipes_dict[h]
> + raise error.HarnessError('No valid recipe for host %s' % self.hostname)
> +
> + def get_test_name(self, task):
> + name = re.sub('-','_', task.rpmName)
> + return re.sub('\.','_', name)
> +
> + def convert_task_to_control(self, fetchdir, control, task):
> + """Tasks are really just:
> + # yum install $TEST
> + # cd /mnt/tests/$TEST
> + # make run
> +
> + Convert that into a test module with a control file
> + """
> + timeout = ''
> + if task.timeout:
> + timeout = ", timeout=%s" % task.timeout
> +
> + #python doesn't like '-' in its class names
> + rpm_name = self.get_test_name(task)
> + rpm_dir = fetchdir + '/' + rpm_name
> + rpm_file = rpm_dir + '/' + rpm_name + '.py'
> +
> + if not self.offline and task.status == 'Completed':
> + log.debug("SKIP Completed test %s" % rpm_name)
> + return
> +
> + if task.status == 'Running' and not re.search('reservesys', task.rpmName):
> + log.warning("Found Running test %s that isn't reservesys" % task.rpmName)
> +
> + #append test name to control file
> + log.debug('adding test %s to control file' % rpm_name)
> +
> + #Trick to avoid downloading XML all the time
> + #statically update each TASK_ID
> + control.write("os.environ['BEAKER_TASK_ID']='%s'\n" % task.id)
> + control.write("job.run_test('%s'%s)\n" % (rpm_name, timeout))
> +
> + #TODO check for git commands in task.params
> +
> + #create the test itself
> + log.debug('setting up test %s' % (rpm_file))
> + if not os.path.exists(rpm_dir):
> + os.mkdir(rpm_dir)
> + test = open(rpm_file, 'w')
> + test.write("import os\n")
> + test.write("from autotest.client import test, utils\n\n")
> + test.write("class %s(test.test):\n" % rpm_name)
> + test.write(" version=1\n\n")
> + test.write(" def setup(self):\n")
> + test.write(" utils.system('yum install -y %s')\n" % task.rpmName)
> + for param in task.params:
> + print "param is %s" % param
> + test.write(" os.environ['%s']='%s'\n" % (param, task.params[param]))
> + test.write(" def run_once(self):\n")
> + test.write(" os.chdir('%s')\n" % task.rpmPath)
> + #test.write(" raw_output = utils.system_output('make run', retain_output=True)\n")
> + test.write(" raw_output = utils.system_output('make help', retain_output=True)\n")
> + test.write(" self.results = raw_output\n")
> + if rpm_name == '/distribution/reservesys':
> + test.write(" self.job.harness.skip_upload=True\n")
> + test.close()
> +
> + def run_start(self):
> + """A run within this job is starting"""
> + log.debug('run_start')
> + try:
> + self.start_watchdog(BEAKER_CONSOLE_HEARTBEAT)
> + except Exception, exc:
> + log.critical('ERROR: Failed to start watchdog')
> +
> + def run_pause(self):
> + """A run within this job is completing (expect continue)"""
> + log.debug('run_pause')
> +
> + def run_reboot(self):
> + """A run within this job is performing a reboot
> + (expect continue following reboot)
> + """
> + log.debug('run_reboot')
> +
> + def run_abort(self):
> + """A run within this job is aborting. It all went wrong"""
> + log.debug('run_abort')
> + self.bkr_proxy.recipe_abort()
> + self.tear_down()
> +
> + def run_complete(self):
> + """A run within this job is completing (all done)"""
> + log.debug('run_complete')
> + self.tear_down()
> +
> + def run_test_complete(self):
> + """A test run by this job is complete. Note that if multiple
> + tests are run in parallel, this will only be called when all
> + of the parallel runs complete."""
> + log.debug('run_test_complete')
> +
> + def test_status(self, status, tag):
> + """A test within this job is completing"""
> + log.debug('test_status ' + status + ' / ' + tag)
> +
> + def test_status_detail(self, code, subdir, operation, status, tag,
> + optional_fields):
> + """A test within this job is completing (detail)"""
> +
> + log.debug('test_status_detail %s / %s / %s / %s / %s / %s',
> + code, subdir, operation, status, tag, str(optional_fields))
> +
> + """The mapping between a beaker recipe/task to an autotest test is not
> + always clear because of various reasons. One is the first/last
> + START/END maps to a recipe. The next set of START/END maps to a task.
> + But the set after might map to a sub-test not in beaker thus needs to
> + be recorded as a sub-task. Or it could map to the next task in the XML.
> +
> + So we control this by using START/END as boundary markers and an 'id' stack
> + to let us know wheter we are starting/finishing a recipe, task or sub-task.
> +
> + Based on this info, we know which beaker 'status' API to use and where to
> + upload 'results'
> +
> + This is entirely hinged on the environment variable BEAKER_TASK_ID being
> + updated correctly in the control file. If it isn't being updated correctly,
> + then everything may look like a sub-task and beaker gets out of sync.
> +
> + If that is the case, running the harness 'sync' command should re-write the
> + control file based on where beaker thinks we should be and all should be
> + good again.
> + """
> + if code.startswith('START'):
> + '''Task ID not setup in first START'''
> + if self.current_task_id:
> + if 'BEAKER_TASK_ID' not in os.environ:
> + raise error.HarnessError("No BEAKER_TASK_ID set")
> + task_id = os.environ['BEAKER_TASK_ID']
> +
> + if self.current_task_id is None:
> + '''First START'''
> + log.debug('START: Pushing recipe id (%s) onto stack' % self.recipe_id)
> + self.current_task_id = [self.recipe_id]
> + elif self.current_task_id and self.current_task_id != task_id:
> + '''New beaker task START'''
> + log.debug('START: Pushing task id (%s) onto stack' % task_id)
> + self.current_task_id.append(task_id)
> + self.bkr_proxy.task_start(task_id)
> + else:
> + '''Sub task not known to beaker'''
> + log.debug('START: Pushing sub-task (0) onto stack')
> + self.current_task_id.append(0)
> +
> + return
> + elif code.startswith('END'):
> + task_id = self.current_task_id.pop()
> +
> + if task_id == self.recipe_id:
> + '''Last END'''
> + log.debug('END: Popping recipe id (%s) off stack' % task_id)
> + self.bkr_proxy.recipe_stop()
> + elif task_id:
> + '''Valid Beaker task id, cleanup'''
> + log.debug('END: Popping task id (%s) off stack' % task_id)
> + self.bkr_proxy.task_stop(task_id)
> + #self.upload_task_files()
> + #FIXME upload task log files
> + else:
> + '''Sub task not known to beaker, skip'''
> + log.debug('END: Popping sub-task (0) off stack')
> + #FIXME upload sub-task results
> + #FIXME report sub-task Status
> +
> + return
> + else:
> + '''Normal sub-task completions'''
> + bkr_status = get_beaker_code(code)
> + task_id = self.current_task_id.pop()
> + self.current_task_id.append(task_id)
> + try:
> + resultid = self.bkr_proxy.task_result(task_id, bkr_status,
> + subdir, 1, '')
> + self.upload_test_results(task_id, resultid, subdir)
> + except Exception, exc:
> + log.critical('ERROR: Failed to process test results')
> +
> + def tear_down(self):
> + '''called from complete and abort. clean up and shutdown'''
> + #self.recipe_upload_files(self.recipe_xml.id)
> + self.kill_watchdog()
> + pass
> +
> + def start_watchdog(self, heartbeat):
> + log.debug('harness: Starting watchdog process, heartbeat: %d' % heartbeat)
> + try:
> + pid = os.fork()
> + if pid == 0:
> + self.watchdog_loop(heartbeat)
> + else:
> + self.watchdog_pid = pid
> + log.debug('harness: Watchdog process started, pid: %d', self.watchdog_pid)
> + except OSError, e:
> + log.error('harness: fork in start_watchdog failed: %d (%s)\n' % (e.errno, e.strerror))
> +
> + def kill_watchdog(self):
> + log.debug('harness: Killing watchdog, pid: %d', self.watchdog_pid)
> + utils.nuke_pid(self.watchdog_pid)
> + self.watchdog_pid = None
> +
> + def watchdog_loop(self, heartbeat):
> + while True:
> + time.sleep(heartbeat)
> + log.info('[-- MARK -- %s]' % time.asctime( time.localtime(time.time())))
> + sys.exit()
> +
> + def get_processed_tests(self):
> + subdirs = []
> + if os.path.isfile(self.state_file):
> + f = open(self.state_file, 'r')
> + lines = f.readlines()
> + f.close()
> + for line in lines:
> + subdirs.append(line.strip())
> + return subdirs
> +
> + def write_processed_tests(self, subdirs):
> + f = open(self.state_file, 'w')
> + for subdir in subdirs:
> + f.write(subdir + '\n')
> + f.close()
> +
> + # filter our some of the dirs, because this one is called at start/stop of test
> + def upload_common_results(self):
> + try:
> + log.debug('Uploading common test results')
> + filter_dirs = self.get_processed_tests()
> + filter_dirs.extend(['analysis', 'sysinfo'])
> +
> + #log.info('sleeping for 3 seconds')
> + #time.sleep(3)
> +
> + self.upload_task_files(filter_dirs)
> + except Exception, exc:
> + log.critical('ERROR: Failed to upload task results')
> +
> + def upload_task_files(self, except_dirs):
> + log.debug('Uploading results except dirs: ' + str(except_dirs))
> + task = self.recipe_xml.task[0]
> + path = self.job.resultdir + '/' + self.getTestName(task)
> +
> + for root, dirnames, files in sorted(os.walk(path)):
> + for name in files:
> + #strip full path
> + basedir = re.sub(path + "/", "", root)
> + result_file = root + '/' + name
> + self.bkr_proxy.task_upload_file(result_file,
> + task.id, name, basedir)
> +
> + '''do not upload previously uploaded results files'''
> + for edir in task.excluded_dir:
> + if edir in dirnames:
> + dirnames.remove(edir)
> +
> + '''add test name in future excluded directories for recipe'''
> + self.recipe_xml.excluded_dir.append(self.getTestName(task))
Here in the method above you forgot to update the calls to the
task_upload_file API, which has changed when you rewrote bkr_proxy. I
took a shot at trying to fix the function, check it out:
> def upload_task_files(self, except_dirs):
> - log.debug('Uploading results except dirs: ' + str(except_dirs))
> + log.debug('Uploading results except dirs: %s', str(except_dirs))
> task = self.recipe_xml.task[0]
> - path = self.job.resultdir + '/' + self.getTestName(task)
> + path = os.path.join(self.job.resultdir, self.getTestName(task))
>
> for root, dirnames, files in sorted(os.walk(path)):
> for name in files:
> - #strip full path
> + # Strip the base dir (path) for the remote path
> basedir = re.sub(path + "/", "", root)
> - result_file = root + '/' + name
> - self.bkr_proxy.task_upload_file(result_file,
> - task.id, name, basedir)
> + remotepath = os.path.join(basedir, name)
> + # The localfile has the full path
> + localfile = os.path.join(root, name)
> + # Upload the file
> + self.bkr_proxy.task_upload_file(task.id, localfile,
> + remotepath)
I *think* this is right, it was just something that came up with the
basic checks I usually put the patchsets through. Other than this small
mistake, I found some trailing whitespace errors and other little
indentation problems. Would you please fix this real quick, use
utils/reindent.py to reindent both client/harness_beaker.py and
client/bkr_proxy.py and send an updated patchset? Thanks!