def wait_for_port(self, port, timeout=60): '''Wait until port is open. Repeatedly tries to socket.connect on given port. :param port: port to check :param timeout: timeout in seconds :raises CheckbInstanceError: when timeouted ''' s = socket.socket() start_time = time() log.debug('Waiting up to %d seconds for %s:%s to open.' % (timeout, self.ipaddr, port)) while True: try: s.connect((self.ipaddr, port)) except socket.error: pass else: s.close() break if (start_time + timeout) < time(): raise TestcloudInstanceError( "Waiting for %s:%s to open timed out (%ds)" % (self.ipaddr, port, timeout)) sleep(0.1)
def _spawn_vm(self, uuid, playbook_vars): '''Spawn a virtual machine using testcloud. :param str uuid: unicode string uuid for the task being executed :param dict playbook_vars: a vars dict created by :meth:`_create_playbook_vars` :returns: str ip address of spawned vm ''' log.info('Spawning disposable client') env = image_utils.devise_environment(self.arg_data, playbook_vars) self.task_vm = vm.TestCloudMachine(uuid) retries = config.get_config().spawn_vm_retries while retries > 0: retries -= 1 try: self.task_vm.prepare(**env) self.task_vm.wait_for_port(22) log.debug('Disposable client (%s %s) ready', self.task_vm.instancename, self.task_vm.ipaddr) return self.task_vm.ipaddr except vm.TestcloudInstanceError as e: if retries <= 0: raise exc.CheckbMinionError( 'Disposable client failed ' 'to boot: %s', e) else: log.warning( 'Disposable client failed to boot, retrying: ' '%s', e) self.task_vm.teardown()
def run_yumrepoinfo(self, arch, koji_tag): """Get yum repoinfo for given arch and koji tag""" output = {} if self.repoinfo is None: self.repoinfo = yumrepoinfo.get_yumrepoinfo(arch, self.filelist) if koji_tag.endswith('-pending'): koji_tag = koji_tag[:-len('-pending')] if koji_tag == 'rawhide': koji_tag = self.repoinfo.repo('rawhide')['tag'] while koji_tag: repo = self.repoinfo.repo_by_tag(koji_tag) if repo is None: raise CheckbDirectiveError('Repo with tag'\ '%r not found.' % koji_tag) output[repo['name']] = repo['url'] koji_tag = repo['parent'] log.debug("Found %s repos for %s: %r" % (len(output), koji_tag, output)) return output
def _dump_playbook_vars(self, playbook_vars): '''Save playbook variables into json files, so that they can serve later for forwarding vars to task playbooks and for debugging. :param dict playbook_vars: vars dict created by :meth:`_create_playbook_vars` :return: tuple of ``(varsfile, allvarsfile)``, where ``varsfile`` is a path to json file with variables to be forwarded to task playbooks, and ``allvarsfile`` is a path to json file with whole ``playbook_vars`` content (useful for debugging) ''' # save forwarded variables, so that task playbook can load them fwdvars = {} for fwdvar in self.FORWARDED_VARS: fwdvars[fwdvar] = playbook_vars[fwdvar] file_utils.makedirs(os.path.join(playbook_vars['artifacts'], 'checkb')) varsfile = os.path.join(playbook_vars['artifacts'], 'checkb', playbook_vars['varsfile']) with open(varsfile, 'w') as vf: vars_str = json.dumps(fwdvars, indent=2, sort_keys=True) vf.write(vars_str) log.debug('Saved task vars file %s with contents:\n%s', varsfile, vars_str) # save also all runner playbook variables, for debugging allvarsfile = os.path.join(playbook_vars['artifacts'], 'checkb', 'internal_vars.json') with open(allvarsfile, 'w') as vf: vars_str = json.dumps(playbook_vars, indent=2, sort_keys=True) vf.write(vars_str) log.debug('Saved internal ansible vars file %s with contents:\n%s', allvarsfile, vars_str) return (varsfile, allvarsfile)
def get_latest(cls, distro, release, flavor, arch='x86_64', imagesdir=None): """Search for the most recent image available on the system. :param distro: distro of the image (e.g. ``fedora``) :param release: release of the image (e.g. ``23``) :param flavor: flavor of the image (e.g. ``minimal``) :param imagesdir: absolute path to directory containing the images, path from config is used if None :param arch: architecture of the image :return: file:// url of the latest image available :raises CheckbImageError: if no such image for given release and flavor was found """ if not imagesdir: imagesdir = config.get_config().imagesdir latest_metadata = cls.get_latest_metadata(distro, release, flavor, arch, imagesdir) if not latest_metadata: raise exc.CheckbImageNotFoundError( 'No image for DISTRO: %s, RELEASE: %s, FLAVOR: %s, ARCH: %s in %s' % (distro, release, flavor, arch, imagesdir)) else: url = "file://" + os.path.join(imagesdir, latest_metadata['filename']) log.debug("Found image: %s" % url) return url
def _prepare_instance(self, tc_image): '''Prepare an instance for booting and boot it with testcloud''' log.debug("preparing testcloud instance {}".format(self.instancename)) tc_instance = instance.Instance(self.instancename, tc_image, hostname=self.hostname) tc_instance.prepare() log.debug("spawning testcloud instance {}".format(self.instancename)) tc_instance.spawn_vm() tc_instance.start()
def _check_playbook_syntax(playbook): '''Run ansible-playbook --syntax-check on a playbook :param str playbook: path to an ansible playbook :raise CheckbPlaybookError: when the playbook is not syntactically correct ''' try: subprocess.check_call( ['ansible-playbook', '--syntax-check', playbook]) log.debug('Playbook is syntactically correct: %s', playbook) except subprocess.CalledProcessError as e: log.error('Syntax check failed for playbook %s: %s', playbook, e) raise exc.CheckbPlaybookError(e)
def main(): '''Main entry point executed by runtask script''' # Preliminary initialization of logging, so all messages before regular # initialization can be logged to stream. logger.init_prior_config() log.info('Execution started at: %s', datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) log.debug('Using checkb %s', checkb.__version__) # parse cmdline parser = get_argparser() args = parser.parse_args() check_args(parser, vars(args)) log.debug('Parsed arguments: %s', args) arg_data = process_args(vars(args)) # create artifacts directory + subdirs try: artif_subdir = os.path.join(arg_data['artifactsdir'], 'checkb') file_utils.makedirs(artif_subdir) log.info("Task artifacts will be saved in: %s", arg_data['artifactsdir']) except OSError: log.error("Can't create artifacts directory %s", artif_subdir) raise # initialize logging level_stream = logging.DEBUG if args.debug else None logger.init(level_stream=level_stream) logpath = os.path.join(artif_subdir, 'checkb.log') logger.add_filehandler(level_file=logging.DEBUG, filelog_path=logpath) logger.remove_mem_handler() # start execution executor = Executor(arg_data) success = executor.execute() # finalize log.info('Task artifacts were saved in: %s', arg_data['artifactsdir']) if config.get_config().profile == config.ProfileName.PRODUCTION: log.info('External URL for task artifacts: %s/%s', config.get_config().artifacts_baseurl, arg_data['uuid']) log.info('Execution finished at: %s', datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')) if not success: log.error('Some playbooks failed. Exiting with non-zero exit code.') sys.exit(0 if success else 1)
def _get_instance(): '''Do everything necessary to fully initialize Config instance, update it with external configuration, make all final touches. :return: :class:`.Config` instance ready to be used :raise CheckbConfigError: if file config parsing and handling failed ''' config = _load() _check_sanity(config) log.debug('Using config profile: %s', config.profile) # make sure required directories exist _create_dirs(config) return config
def get_update(self, updateid): '''Get the last Bodhi update for the specified update ID. :param str updateid: update ID, e.g. 'FEDORA-2015-13787' :return: Bodhi update object with that ID, or ``None`` when no such update is found :rtype: :class:`munch.Munch` ''' log.debug('Searching Bodhi updates for: %s', updateid) res = self.client.query(updateid=updateid) assert 0 <= len(res['updates']) <= 1 if res['updates']: return res['updates'][0] else: return None
def __init__(self, client=None): '''Create a new BodhiUtils instance. :param client: custom :class:`Bodhi2Client` instance. If ``None``, a default Bodhi2Client instance is used. ''' self.config = config.get_config() if not client: self.client = bodhi.client.bindings.BodhiClient( staging=self.config.bodhi_staging) log.debug('Created Bodhi client to: %s', self.client.base_url) # automatically retry failed requests (HTTP 5xx and similar) self.client.retries = 10 else: self.client = client
def process(self, params, arg_data): repodir = params['repodir'] log.info('running createrepo_c on %s', repodir) p = sub.Popen(['createrepo_c', repodir], stdout=sub.PIPE, stderr=sub.PIPE) output, errors = p.communicate() if p.returncode: raise CheckbDirectiveError(errors) log.debug(output) return output
def _load(): '''Load the configuration, internal (defaults) and external (config files) :return: :class:`.Config` instance that was updated by external config file contents :raise CheckbConfigError: if file config parsing and handling failed ''' # first load the defaults and make sure we even want to load config files env_profile = os.getenv(PROFILE_VAR) config = _load_defaults(env_profile) if config.profile == ProfileName.TESTING: log.debug('Testing profile, not loading config files from disk') _customize_values(config) return config # load config files filename = _search_dirs(CONF_DIRS, CONF_FILE) if not filename: log.warning('No config file %s found in dirs: %s' % (CONF_FILE, CONF_DIRS)) return config log.debug('Using config file: %s', filename) file_config = _load_file(filename) file_profile = file_config.get('profile', None) # reload the config defaults if the profile was specified in the config file # (and not present in env variable) if file_profile and not env_profile: config = _load_defaults(file_profile) # merge the changes from the config file _merge_config(config, file_config) # do additional values customization _customize_values(config) # set config filename used, this is set after merging # so it doesn't get overridden config._config_filename = filename return config
def _prepare_image(self, distro, release, flavor, arch): '''Use testcloud to prepare an image for local booting :param str distro: Distro to use in image discovery :param str release: Distro's release to use in image discovery :param str flavor: base-image flavor to use in image discovery :param str arch: arch to use in image discovery :raises CheckbImageNotFoundError: when base image of the required type is not found :raises CheckbImageError: for errors in preparing the image with testcloud ''' tc_image = None try: if config.get_config().force_imageurl: img_url = config.get_config().imageurl else: log.debug( "Looking for image with DISTRO: %s, RELEASE: %s, FLAVOR: %s, ARCH: %s" % (distro, release, flavor, arch)) img_url = ImageFinder.get_latest(distro=distro, release=release, flavor=flavor, arch=arch) except exc.CheckbImageNotFoundError as e: log.error(e) raise log.debug("Preparing image {} for task {}".format(img_url, self.uuid)) try: tc_image = image.Image(img_url) # symlink the image instead of copying it to the testcloud dir, because our user only # expects image handling in checkb dirs, and we remove all minion instances # immediately after task execution anyway tc_image.prepare(copy=False) except TestcloudImageError as e: log.exception(e) raise exc.CheckbImageError( "There was an error while preparing the " "testcloud image", e) return tc_image
def install(pkgs): '''Install packages from system repositories using DNF. Either root or sudo access required. :param pkgs: packages to be installed, e.g. ``['pidgin']``, or any other argument supported by ``dnf install`` command :type pkgs: list of str :raise CheckbPermissionError: if we don't have permissions to run DNF as admin :raise CheckbError: if DNF return code is not zero ''' if not pkgs: return log.info('Installing %d packages...', len(pkgs)) pkglist = ' '.join([pipes.quote(pkg) for pkg in pkgs]) if not os_utils.is_root() and not os_utils.has_sudo(): raise exc.CheckbPermissionError( "Can't install packages without root or sudo access. " 'Packages requested: %s' % pkglist) cmd = ['dnf', '--assumeyes', 'install'] cmd.extend(pkgs) if not os_utils.is_root( ): # we must have sudo at this point, don't test again needlessly cmd = ['sudo', '--non-interactive'] + cmd log.debug('Running: %s', ' '.join([pipes.quote(c) for c in cmd])) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: log.error( u'✘ Package installation failed. We tried to install following packages:\n%s' '\nDNF returned exit code %d and output:\n%s', pkglist, e.returncode, e.output.rstrip()) raise exc.CheckbError("Unable to install packages: %s" % pkglist) else: log.debug( u'✔ Package installation completed successfully. DNF output was:\n%s', output.rstrip()) return
def _switch_to_mirror(self): '''If the baseurl is a round-robin redirect (as in case of ``download.fedoraproject.org``), resolve the ``url`` in each section and save it back. This avoids issues with multiple network requests being directed each to a different mirror (possibly with different contents). This is supposed to be called only once during initialization. ''' log.info( 'Resolving URLs in yumrepoinfo config, because the baseurl ' 'is a well-known redirect. Provide a custom mirror to skip this ' 'in the future.') for section in self.parser.sections(): if section == 'DEFAULT': continue retries = 0 url = self.parser.get(section, 'url') newurl = None while retries <= self.resolve_retry: try: log.debug('Resolving url: %s', url) response = urlopen(url, timeout=20) newurl = response.geturl() break except (URLError, socket.error) as e: retries += 1 log.warning( 'Received %s when resolving %s . %s', e, url, 'Trying again...' if retries <= self.resolve_retry else 'Giving up.') if retries > self.resolve_retry: raise exc.CheckbRemoteError( 'Failed to resolve %s : %s' % (url, e)) self.parser.set(section, 'url', newurl) log.debug('Set %s section to use url: %s', section, newurl)
def process(self, params, arg_data): # keys 'result_worst' and 'result_last' must be mutually exclusive if ('result_worst' in params) == ('result_last' in params): raise CheckbDirectiveError( "The exitcode directive requires exactly one of keys " "'result_worst' or 'result_last'.") if 'result_worst' in params: details = check.import_YAML(params['result_worst']) code = SUCCESS for detail in details: if detail.outcome not in ['PASSED', 'INFO']: code = FAILURE log.debug("Returning exitcode %d" % code) return code elif 'result_last' in params: details = check.import_YAML(params['result_last']) if details and details[-1].outcome not in ['PASSED', 'INFO']: log.debug("Returning exitcode %d" % FAILURE) return FAILURE else: log.debug("Returning exitcode %d" % SUCCESS) return SUCCESS else: assert False, "This should never occur"
def load_namespaces_config(): ''' Load and parse namespaces config file into a dictionary. :return: dictionary constructed from the yaml document ''' config = {'namespaces_safe': ['scratch'], 'namespaces_whitelist': {}} if get_config().profile == ProfileName.TESTING: log.debug('Testing profile, not loading config files from disk') return config filename = _search_dirs(CONF_DIRS, NS_CONF_FILE) if filename is None: log.warning('Could not find namespaces config in %s. Using defaults.' % ', '.join(CONF_DIRS)) return config data = parse_yaml_from_file(filename) config.update(data) return config
def _read(self): '''Read first available config file from the list of provided config files.''' log.debug('Looking for yumrepoinfo config files in: %s', self.filelist) for cfg in self.filelist: if self.parser.read(cfg): log.debug('Successfully loaded yumrepoinfo config file: %s', cfg) break else: log.debug('Failed to load yumrepoinfo config file: %s', cfg)
def _get_client_ipaddr(self): '''Get an IP address of the machine the task is going to be executed on. :returns: str ip address of the machine or None if the machine is yet to be created ''' # when running remotely, run directly over ssh, instead of using # libvirt persistent = False runtask_mode = config.get_config().runtask_mode if runtask_mode == config.RuntaskModeName.LOCAL: self.run_remotely = False elif runtask_mode == config.RuntaskModeName.LIBVIRT: self.run_remotely = True else: assert False, 'This should never occur' if self.arg_data['local']: log.debug("Forcing local execution (option --local)") self.run_remotely = False elif self.arg_data['libvirt']: log.debug("Forcing remote execution (option --libvirt)") self.run_remotely = True persistent = False elif self.arg_data['ssh']: log.debug('Forcing remote execution (option --ssh)') self.run_remotely = True persistent = True log.debug('Execution mode: %s', 'remote' if self.run_remotely else 'local') ipaddr = '127.0.0.1' if self.run_remotely: ipaddr = self.arg_data['machine'] if persistent else None return ipaddr
def check_namespace(self, checkname, arg_data): '''Determine if current task can submit results into its namespace. Return if it can, raise error if it can't. :param str checkname: full check name (including namespace prefix) :return: ``None``, it means task is allowed to post into the namespace :raise CheckbDirectiveError: if task is not allowed to post into the namespace ''' conf_ns = config.load_namespaces_config() ns_repos = None # check if the namespace exists for ns in conf_ns['namespaces_safe']: if checkname.startswith(ns + '.'): log.debug('Namespace %s found in the safe namespaces.', ns) return for ns, repos in conf_ns['namespaces_whitelist'].items(): if checkname.startswith(ns + '.'): log.debug('Namespace %s found in the namespace whitelist.', ns) ns_repos = repos break else: raise CheckbDirectiveError('No namespace for task %s exists.' % checkname) taskdir = os.path.dirname(os.path.abspath(arg_data['task'])) task_repo_url = git_origin_url(taskdir) if not task_repo_url: raise CheckbDirectiveError( "Could not find task's git remote 'origin' url" "in %s" % os.path.join(taskdir, '.git/config')) try: if not [ ns_repo for ns_repo in ns_repos if task_repo_url.strip().startswith(ns_repo) ]: log.warning( 'No namespace whitelist seems to match the task ' 'repo URL: %s', task_repo_url) raise CheckbDirectiveError log.debug('Repo %s found in the whitelist', task_repo_url) except CheckbDirectiveError: raise CheckbDirectiveError( "This repo is not allowed to post results into %s " "namespace. Not posting results." % checkname)
def has_sudo(): '''Determine whether we currently have a password-less access to sudo. Note: It's not possible to say whether the access will stay password-less in the future (the credentials might be set to expire in time), just for this exact moment. :rtype: bool ''' # Note: We could run "sudo --reset-timestamp" first to make sure any cached password is # invalidated and therefore be sure we always have a password-less access. But this might be # seen as an impolite thing to do (esp. when we're not granted the sudo access, we would still # reset the cache with every run) and it might obstruct some use cases. Not implemented ATM. cmd = ['sudo', '--validate', '--non-interactive'] log.debug('Deciding whether we have a password-less sudo access. Running: %s', ' '.join(cmd)) try: subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: log.debug(u"✘ Sudo access is not available. Received exit code %d and output:\n%s", e.returncode, e.output.rstrip()) return False else: log.debug(u'✔ Sudo access is available') return True
def devise_environment(arg_data, playbook_vars): '''Takes an input item and type, and returns a required run-environment, or a default one, if the task doesn't require anything specific. :param dict arg_data: parsed command-line arguments (item, type and arch are used in this method) :param dict playbook_vars: vars dict created by :meth:`executor._create_playbook_vars` :return: dict containing ``distro``, ``release``, ``flavor`` and ``arch``. Each either set, or ``None``. ''' env = {'distro': None, 'release': None, 'flavor': None, 'arch': None} item = arg_data['item'] item_type = arg_data['type'] arch = arg_data['arch'] if playbook_vars['checkb_match_host_distro']: if item_type == 'koji_build': # FIXME: find a way to make this not Fedora-specific # For `xchat-2.8.8-21.fc20` disttag is `fc20` for example try: distro = rpm_utils.get_dist_tag(item)[:2] env['distro'] = {'fc': 'fedora'}.get(distro) except exc.CheckbValueError: log.debug( 'Failed to parse distro from koji build %s, using ' 'default', item) elif item_type == 'koji_tag': if re.match(r'^f[0-9]{2}-.*', item): env['distro'] = 'fedora' if playbook_vars['checkb_match_host_release']: if item_type == 'koji_build': # FIXME: find a way to make this not Fedora-specific # Last two characters in rpm's disttag are the Fedora release. # For `xchat-2.8.8-21.fc20` disttag is `fc20` for example try: env['release'] = rpm_utils.get_dist_tag(item)[-2:] except exc.CheckbValueError: log.debug( 'Failed to parse release from koji build %s, using ' 'default', item) elif item_type == 'koji_tag': if re.match(r'^f[0-9]{2}-.*', item): env['release'] = item[1:3] if playbook_vars['checkb_match_host_arch']: if arch != 'noarch': env['arch'] = arch log.debug('Forced environment values: %s', env) env['distro'] = env['distro'] or config.get_config( ).default_disposable_distro env['release'] = env['release'] or config.get_config( ).default_disposable_release env['flavor'] = env['flavor'] or config.get_config( ).default_disposable_flavor env['arch'] = env['arch'] or config.get_config().default_disposable_arch log.debug('Environment to be used: %s', env) return env
def download(url, dirname, filename=None, cachedir=None): '''Download a file. :param str url: file URL to download :param str dirname: directory path; if the directory does not exist, it gets created (and all its parent directories). :param str filename: name of downloaded file; if not provided, the basename is extracted from URL :param str cachedir: If set, the file will be downloaded to a cache directory specified by this parameter. If the file is already present and of the same length, download is skipped. The requested destination file (``dirname/filename``) will be a symlink to the cached file. This directory is automatically created if not present. :return: the path to the downloaded file :rtype: str :raise CheckbRemoteError: if download fails ''' if not filename: filename = os.path.basename(url) dl_dest = dest = os.path.abspath(os.path.join(dirname, filename)) dirs_to_create = [dirname] if cachedir: dl_dest = os.path.join(cachedir, filename) dirs_to_create.append(cachedir) for directory in dirs_to_create: makedirs(directory) # check file existence and validity download = True if os.path.exists(dl_dest): if _same_length(dl_dest, url): log.debug('Already downloaded: %s', dl_dest) download = False else: log.debug( 'Cached file %s differs from its online version. ' 'Redownloading.', dl_dest) # download the file if download: log.debug('Downloading%s: %s', ' (cached)' if cachedir else '', url) try: _download(url, dl_dest) except requests.exceptions.RequestException as e: log.debug('Download failed: %s', e) # the file can be incomplete, remove if os.path.exists(dl_dest): try: os.remove(dl_dest) except OSError: log.warning('Could not delete incomplete file: %s', dl_dest) raise CheckbRemoteError(e, errno=e.response.status_code) # create a symlink if the download was cached if cachedir: try: if os.path.exists(dest): # if there already is something at the destination, we need to # remove it first os.remove(dest) os.symlink(dl_dest, dest) except OSError: log.exception("Can't create symlink %s -> %s", dl_dest, dest) raise return dest
def process(self, params, arg_data): if 'file' in params and 'results' in params: raise CheckbDirectiveError( "Either `file` or `results` can be used, not both.") if 'artifactsdir' not in params: detected_args = ', '.join(params.keys()) raise exc.CheckbDirectiveError( "The directive requires 'artifactsdir' as an " "argument. Detected arguments: %s" % detected_args) artifactsdir = params['artifactsdir'] try: if params.get('file', None): with open(params['file']) as resultfile: params['results'] = resultfile.read() check_details = check.import_YAML(params['results']) log.debug("YAML output parsed OK.") except (CheckbValueError, IOError) as e: raise CheckbDirectiveError("Failed to load results: %s" % e.message) log.debug('Generating report of %s results...', len(check_details)) results = [] for detail in check_details: results.append({ 'checkname': detail.checkname, 'outcome': detail.outcome, 'note': detail.note or '---', 'item': detail.item, 'type': detail.report_type, 'artifact': os.path.basename(detail.artifact) if detail.artifact else None, }) if 'path' in params: report_fname = os.path.join(artifactsdir, params['path']) else: report_fname = os.path.join(artifactsdir, 'report.html') if 'template' in params: template_fname = params['template'] else: template_fname = os.path.join(config.get_config()._data_dir, 'report_templates/html.j2') try: file_utils.makedirs(os.path.dirname(report_fname)) except OSError as e: raise CheckbDirectiveError(e) with open(template_fname) as f_template: with open(report_fname, 'w') as f_report: template = jinja2.Template(f_template.read()) report = template.render(results=results, artifactsdir=artifactsdir) f_report.write(report) log.info('Report generated in: %s', os.path.abspath(report_fname))
def process(self, params, arg_data): # checking if reporting is enabled is done after importing yaml which # serves as validation of input results if 'file' in params and 'results' in params: raise CheckbDirectiveError( "Either `file` or `results` can be used, not both.") try: if params.get('file', None): with open(params['file']) as resultfile: params['results'] = resultfile.read() check_details = check.import_YAML(params['results']) log.debug("YAML output parsed OK.") except (CheckbValueError, IOError) as e: raise CheckbDirectiveError("Failed to load results: %s" % e) for detail in check_details: if not (detail.item and detail.report_type and detail.checkname): raise CheckbDirectiveError( "The resultsdb directive requires 'item', 'type' and " "'checkname' to be present in the YAML data.") conf = config.get_config() if not conf.report_to_resultsdb: log.info( "Reporting to ResultsDB is disabled. Once enabled, the " "following would get reported:\n%s", params['results']) return check.export_YAML(check_details) artifactsdir_url = '%s/%s' % (self.artifacts_baseurl, arg_data['uuid']) # for now, we're creating the resultsdb group at reporting time group_data = self.create_resultsdb_group(uuid=arg_data['uuid']) log.info('Posting %s results to ResultsDB...' % len(check_details)) for detail in check_details: checkname = detail.checkname # find out if the task is allowed to post results into the namespace if config.get_config().profile == config.ProfileName.PRODUCTION: self.check_namespace(checkname, arg_data) self.ensure_testcase_exists(checkname) result_log_url = artifactsdir_url if detail.artifact: artifact_path = self.get_artifact_path( arg_data['artifactsdir'], detail.artifact) if artifact_path: result_log_url = "%s/%s" % (artifactsdir_url, artifact_path) try: result = self.resultsdb.create_result(outcome=detail.outcome, testcase=checkname, groups=[group_data], note=detail.note or None, ref_url=result_log_url, item=detail.item, type=detail.report_type, **detail.keyvals) log.debug('Result saved in ResultsDB:\n%s', pprint.pformat(result)) detail._internal['resultsdb_result_id'] = result['id'] except resultsdb_api.ResultsDBapiException as e: log.error(e) log.error("Failed to store to ResultsDB: `%s` `%s` `%s`", detail.item, checkname, detail.outcome) return check.export_YAML(check_details)
def _run_playbook(self, test_playbook, ipaddr, playbook_vars): '''Run the ansible-playbook command to execute given playbook containing the task. :param str test_playbook: name of the playbook, relative to the task directory :param str ipaddr: IP address of the machine the task will be run on :param dict playbook_vars: vars dict created by :meth:`_create_playbook_vars` :return: stream output of the ansible-playbook command (stdout and stderr merged together) :rtype: str :raise CheckbPlaybookError: when the playbook is not syntactically correct ''' ansible_dir = os.path.join(config.get_config()._data_dir, 'ansible') # dump variables for future use varsfile, allvarsfile = self._dump_playbook_vars(playbook_vars) # figure out the ansible-playbook command cmd = [ 'ansible-playbook', 'runner.yml', '--inventory=%s,' % ipaddr, # the ending comma is important '--extra-vars=@%s' % allvarsfile, ] # for local execution, run as root unless instructed otherwise if not self.run_remotely and playbook_vars['become_root']: cmd.append('--become') if self.run_remotely: if self.arg_data['ssh_privkey']: cmd.append('--private-key=%s' % self.arg_data['ssh_privkey']) else: cmd.append('--connection=local') if self.arg_data['debug']: cmd.append('-vv') log.debug('Running ansible playbook %s', ' '.join(cmd)) try: # during playbook execution, handle system signals asking us to # quit signal.signal(signal.SIGINT, self._interrupt_handler) signal.signal(signal.SIGTERM, self._interrupt_handler) output, _ = os_utils.popen_rt(cmd, cwd=ansible_dir) return output except subprocess.CalledProcessError as e: log.error('ansible-playbook ended with %d return code', e.returncode) log.debug(e.output) raise exc.CheckbError(e) except exc.CheckbInterruptError as e: log.error( 'System interrupt %s detected. Pulling logs and ' 'stopping execution.', e) cmd_failsafe = cmd + ['--tags', 'failsafe'] try: os_utils.popen_rt(cmd_failsafe, cwd=ansible_dir) except (subprocess.CalledProcessError, exc.CheckbInterruptError) as e2: log.error( 'Error during failsafe pulling logs, ignoring and ' 'raising the original error. The current error is: %s:%s', e2.__class__.__name__, e2) raise e finally: # reset signal handling to default behavior signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)
def process(self, params, arg_data): if ('package' not in params and 'nvr' not in params) or 'path' not in params \ or 'target_dir' not in params: detected_args = ', '.join(params.keys()) raise exc.CheckbDirectiveError( "The distgit directive requires 'package' (or 'nvr') and 'path' and 'target_dir' arguments." "Detected arguments: %s" % detected_args) package = None gitref = None namespace = None if 'nvr' in params: nvr = params['nvr'] package = rpm_utils.rpmformat(nvr, fmt='n') gitref = rpm_utils.get_dist_tag(nvr).replace('c', '') rawhide_tag = yumrepoinfo.YumRepoInfo(resolve_baseurl=False).get( 'rawhide', 'tag') if gitref == rawhide_tag: gitref = 'master' namespace = 'rpms' # Assign defaults package = params.get('package', package) gitref = params.get('gitref', gitref or 'master') namespace = params.get('namespace', namespace or 'rpms') baseurl = params.get('baseurl', BASEURL) target_dir = params['target_dir'] ignore_missing = params.get('ignore_missing', False) if not python_utils.iterable(params['path']): raise exc.CheckbValueError( "Incorrect value type of the 'path' argument: " "%s" % type(params['path'])) target_path = params['path'] output_data = {} if 'localpath' in params: if not python_utils.iterable(params['localpath']): raise exc.CheckbValueError( "Incorrect value type of the 'localpath' argument: " "%s" % type(params['path'])) if not len(params['path']) == len(params['localpath']): raise exc.CheckbValueError( 'path and localpath lists must be of the same ' 'length.') target_path = params['localpath'] format_fields = { 'package': package, 'gitref': gitref, 'namespace': namespace, 'baseurl': baseurl, } output_data['downloaded_files'] = [] for path, localpath in zip(params['path'], target_path): localpath = os.path.join(target_dir, localpath) file_utils.makedirs(os.path.dirname(localpath)) url = URL_FMT.format(path=path, **format_fields) try: output_data['downloaded_files'].append( file_utils.download(url, '.', localpath)) except exc.CheckbRemoteError as e: if e.errno == 404 and ignore_missing: log.debug('File not found, ignoring: %s', url) else: raise e return output_data