def _load_file(conf_file): '''Parse a configuration file and return it as a dictionary. The option values are checked for type correctness against a default Config object. :param conf_file: file path (string) or file handler to the configuration file in YAML syntax :return: dictionary parsed from the configuration file :raise CheckbConfigError: if any problem occurs during the parsing or some values have incorrect variable type ''' # convert file path to file handle if needed if isinstance(conf_file, basestring): try: conf_file = open(conf_file) except IOError as e: log.exception('Could not open config file: %s', conf_file) raise exc.CheckbConfigError(e) filename = (conf_file.name if hasattr(conf_file, 'name') else '<unnamed file>') try: conf_obj = yaml.safe_load(conf_file) except yaml.YAMLError as e: log.exception('Could not parse config file: %s', filename) raise exc.CheckbConfigError(e) # config file might be empty (all commented out), returning None. For # further processing, let's replace it with empty dict if conf_obj is None: conf_obj = {} # check correct types # we should receive a single dictionary with keyvals if not isinstance(conf_obj, abc.Mapping): raise exc.CheckbConfigError( 'The config file %s does not have ' 'a valid structure. Instead of a mapping, it is recognized as: %s' % (filename, type(conf_obj))) default_conf = Config() for option, value in conf_obj.items(): # check for unknown options try: default_value = getattr(default_conf, option) except AttributeError: log.warning('Unknown option "%s" in the config file %s', option, filename) continue # check for correct type assert default_value is not None, \ "Default values must not be None: %s" % option if type(default_value) is not type(value): raise exc.CheckbConfigError( 'Option "%s" in config file %s ' 'has an invalid type. Expected: %s, Found: %s' % (option, filename, type(default_value), type(value))) return conf_obj
def _spawn_vm(self, uuid, playbook_vars): '''Spawn a virtual machine using testcloud. :param str uuid: unicode string uuid for the task being executed :param dict playbook_vars: a vars dict created by :meth:`_create_playbook_vars` :returns: str ip address of spawned vm ''' log.info('Spawning disposable client') env = image_utils.devise_environment(self.arg_data, playbook_vars) self.task_vm = vm.TestCloudMachine(uuid) retries = config.get_config().spawn_vm_retries while retries > 0: retries -= 1 try: self.task_vm.prepare(**env) self.task_vm.wait_for_port(22) log.debug('Disposable client (%s %s) ready', self.task_vm.instancename, self.task_vm.ipaddr) return self.task_vm.ipaddr except vm.TestcloudInstanceError as e: if retries <= 0: raise exc.CheckbMinionError( 'Disposable client failed ' 'to boot: %s', e) else: log.warning( 'Disposable client failed to boot, retrying: ' '%s', e) self.task_vm.teardown()
def _interrupt_handler(self, signum, frame): '''Catch system signals (like SIGINT, SIGTERM) and raise them as a TasktoronInterruptError''' signals_to_names = dict((getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG') and '_' not in n) signame = signals_to_names.get(signum, 'UNKNOWN') log.warning('Received system signal %d (%s). Raising exception.', signum, signame) raise exc.CheckbInterruptError(signum, signame)
def check_namespace(self, checkname, arg_data): '''Determine if current task can submit results into its namespace. Return if it can, raise error if it can't. :param str checkname: full check name (including namespace prefix) :return: ``None``, it means task is allowed to post into the namespace :raise CheckbDirectiveError: if task is not allowed to post into the namespace ''' conf_ns = config.load_namespaces_config() ns_repos = None # check if the namespace exists for ns in conf_ns['namespaces_safe']: if checkname.startswith(ns + '.'): log.debug('Namespace %s found in the safe namespaces.', ns) return for ns, repos in conf_ns['namespaces_whitelist'].items(): if checkname.startswith(ns + '.'): log.debug('Namespace %s found in the namespace whitelist.', ns) ns_repos = repos break else: raise CheckbDirectiveError('No namespace for task %s exists.' % checkname) taskdir = os.path.dirname(os.path.abspath(arg_data['task'])) task_repo_url = git_origin_url(taskdir) if not task_repo_url: raise CheckbDirectiveError( "Could not find task's git remote 'origin' url" "in %s" % os.path.join(taskdir, '.git/config')) try: if not [ ns_repo for ns_repo in ns_repos if task_repo_url.strip().startswith(ns_repo) ]: log.warning( 'No namespace whitelist seems to match the task ' 'repo URL: %s', task_repo_url) raise CheckbDirectiveError log.debug('Repo %s found in the whitelist', task_repo_url) except CheckbDirectiveError: raise CheckbDirectiveError( "This repo is not allowed to post results into %s " "namespace. Not posting results." % checkname)
def _load(): '''Load the configuration, internal (defaults) and external (config files) :return: :class:`.Config` instance that was updated by external config file contents :raise CheckbConfigError: if file config parsing and handling failed ''' # first load the defaults and make sure we even want to load config files env_profile = os.getenv(PROFILE_VAR) config = _load_defaults(env_profile) if config.profile == ProfileName.TESTING: log.debug('Testing profile, not loading config files from disk') _customize_values(config) return config # load config files filename = _search_dirs(CONF_DIRS, CONF_FILE) if not filename: log.warning('No config file %s found in dirs: %s' % (CONF_FILE, CONF_DIRS)) return config log.debug('Using config file: %s', filename) file_config = _load_file(filename) file_profile = file_config.get('profile', None) # reload the config defaults if the profile was specified in the config file # (and not present in env variable) if file_profile and not env_profile: config = _load_defaults(file_profile) # merge the changes from the config file _merge_config(config, file_config) # do additional values customization _customize_values(config) # set config filename used, this is set after merging # so it doesn't get overridden config._config_filename = filename return config
def _switch_to_mirror(self): '''If the baseurl is a round-robin redirect (as in case of ``download.fedoraproject.org``), resolve the ``url`` in each section and save it back. This avoids issues with multiple network requests being directed each to a different mirror (possibly with different contents). This is supposed to be called only once during initialization. ''' log.info( 'Resolving URLs in yumrepoinfo config, because the baseurl ' 'is a well-known redirect. Provide a custom mirror to skip this ' 'in the future.') for section in self.parser.sections(): if section == 'DEFAULT': continue retries = 0 url = self.parser.get(section, 'url') newurl = None while retries <= self.resolve_retry: try: log.debug('Resolving url: %s', url) response = urlopen(url, timeout=20) newurl = response.geturl() break except (URLError, socket.error) as e: retries += 1 log.warning( 'Received %s when resolving %s . %s', e, url, 'Trying again...' if retries <= self.resolve_retry else 'Giving up.') if retries > self.resolve_retry: raise exc.CheckbRemoteError( 'Failed to resolve %s : %s' % (url, e)) self.parser.set(section, 'url', newurl) log.debug('Set %s section to use url: %s', section, newurl)
def makedirs(fullpath): '''This is the same as :meth:`os.makedirs`, but does not raise an exception when the destination directory already exists. :raise OSError: if directory doesn't exist and can't be created ''' try: os.makedirs(fullpath) assert os.path.isdir(fullpath) except OSError as e: if e.errno == 17: # "[Errno 17] File exists" # if it is a directory everything is ok if os.path.isdir(fullpath): return # otherwise it is a file/socket/etc and it is an error else: log.warning( "Can't create directory, something else already exists: %s", fullpath) raise else: log.warning("Can't create directory: %s", fullpath) raise
def get_artifact_path(self, artifactsdir, artifact): """Return the relative path of :attr str artifact: inside the :attr str artifactsdir:. :returns: relative path to the artifact file or None, if the file does not exist, or is outside the artifactsdir. """ artifactsdir = os.path.realpath(artifactsdir) if os.path.isabs(artifact): artifact_path = artifact else: artifact_path = os.path.join(artifactsdir, artifact) artifact_path = os.path.realpath(artifact_path) if not os.path.exists(artifact_path): log.warning('Artifact %r does not exist, ignoring' % artifact_path) return None elif not artifact_path.startswith(artifactsdir): log.warning( 'Artifact %r is placed outside of artifacts directory %r, ignoring', artifact_path, artifactsdir) return None return os.path.relpath(artifact_path, start=artifactsdir)
def load_namespaces_config(): ''' Load and parse namespaces config file into a dictionary. :return: dictionary constructed from the yaml document ''' config = {'namespaces_safe': ['scratch'], 'namespaces_whitelist': {}} if get_config().profile == ProfileName.TESTING: log.debug('Testing profile, not loading config files from disk') return config filename = _search_dirs(CONF_DIRS, NS_CONF_FILE) if filename is None: log.warning('Could not find namespaces config in %s. Using defaults.' % ', '.join(CONF_DIRS)) return config data = parse_yaml_from_file(filename) config.update(data) return config
def download(url, dirname, filename=None, cachedir=None): '''Download a file. :param str url: file URL to download :param str dirname: directory path; if the directory does not exist, it gets created (and all its parent directories). :param str filename: name of downloaded file; if not provided, the basename is extracted from URL :param str cachedir: If set, the file will be downloaded to a cache directory specified by this parameter. If the file is already present and of the same length, download is skipped. The requested destination file (``dirname/filename``) will be a symlink to the cached file. This directory is automatically created if not present. :return: the path to the downloaded file :rtype: str :raise CheckbRemoteError: if download fails ''' if not filename: filename = os.path.basename(url) dl_dest = dest = os.path.abspath(os.path.join(dirname, filename)) dirs_to_create = [dirname] if cachedir: dl_dest = os.path.join(cachedir, filename) dirs_to_create.append(cachedir) for directory in dirs_to_create: makedirs(directory) # check file existence and validity download = True if os.path.exists(dl_dest): if _same_length(dl_dest, url): log.debug('Already downloaded: %s', dl_dest) download = False else: log.debug( 'Cached file %s differs from its online version. ' 'Redownloading.', dl_dest) # download the file if download: log.debug('Downloading%s: %s', ' (cached)' if cachedir else '', url) try: _download(url, dl_dest) except requests.exceptions.RequestException as e: log.debug('Download failed: %s', e) # the file can be incomplete, remove if os.path.exists(dl_dest): try: os.remove(dl_dest) except OSError: log.warning('Could not delete incomplete file: %s', dl_dest) raise CheckbRemoteError(e, errno=e.response.status_code) # create a symlink if the download was cached if cachedir: try: if os.path.exists(dest): # if there already is something at the destination, we need to # remove it first os.remove(dest) os.symlink(dl_dest, dest) except OSError: log.exception("Can't create symlink %s -> %s", dl_dest, dest) raise return dest
def add(self, file_): if file_.mode.startswith('w'): self._files.append(file_) else: name = file_.name if hasattr(file_, 'name') else '<unnamed file>' log.warning('File %s not opened for writing. Not adding.', name)
def process(self, params, arg_data): # process params valid_actions = ['download', 'download_tag', 'download_latest_stable'] action = params['action'] if action not in valid_actions: raise exc.CheckbDirectiveError('%s is not a valid action for koji ' 'directive' % action) if 'arch' not in params or 'target_dir' not in params: detected_args = ', '.join(params.keys()) raise exc.CheckbDirectiveError( "The koji directive requires 'arch' and 'target_dir' as an " "argument. Detected arguments: %s" % detected_args) # convert str to list for param in ('arch', 'arch_exclude'): if param in params and isinstance(params[param], basestring): params[param] = [params[param]] arches = list(params['arch']) if arches and ('all' not in arches) and ('noarch' not in arches): arches.append('noarch') arch_exclude = params.get('arch_exclude', []) debuginfo = params.get('debuginfo', False) src = params.get('src', False) build_log = params.get('build_log', False) target_dir = params['target_dir'] file_utils.makedirs(target_dir) # download files output_data = {} if action == 'download': if 'koji_build' not in params: detected_args = ', '.join(params.keys()) raise exc.CheckbDirectiveError( "The koji directive requires 'koji_build' for the 'download' " "action. Detected arguments: %s" % detected_args) nvr = rpm_utils.rpmformat(params['koji_build'], 'nvr') output_data['downloaded_rpms'] = self.koji.get_nvr_rpms( nvr, target_dir, arches=arches, arch_exclude=arch_exclude, debuginfo=debuginfo, src=src) elif action == 'download_tag': if 'koji_tag' not in params: detected_args = ', '.join(params.keys()) raise exc.CheckbDirectiveError( "The koji directive requires 'koji_tag' for the 'download_tag' " "action. Detected arguments: %s" % detected_args) koji_tag = params['koji_tag'] output_data['downloaded_rpms'] = self.koji.get_tagged_rpms( koji_tag, target_dir, arches=arches, arch_exclude=arch_exclude, debuginfo=debuginfo, src=src) elif action == 'download_latest_stable': if 'koji_build' not in params: detected_args = ', '.join(params.keys()) raise exc.CheckbDirectiveError( "The koji directive requires 'koji_build' for the 'download_latest_stable' " "action. Detected arguments: %s" % detected_args) name = rpm_utils.rpmformat(params['koji_build'], 'n') disttag = rpm_utils.get_dist_tag(params['koji_build']) # we need to do 'fc22' -> 'f22' conversion tag = disttag.replace('c', '') # first we need to check updates tag and if that fails, the latest # stable nvr is in the base repo tags = ['%s-updates' % tag, tag] nvr = self.koji.latest_by_tag(tags, name) if not nvr: log.info("There's no previous stable build for %s, skipping.", params['koji_build']) assert output_data == {} return output_data output_data['downloaded_rpms'] = self.koji.get_nvr_rpms( nvr, target_dir, arch_exclude=arch_exclude, arches=arches, debuginfo=debuginfo, src=src) # download build.log if requested if build_log: if action in ('download', 'download_latest_stable'): ret_log = self.koji.get_build_log( nvr, target_dir, arches=arches, arch_exclude=arch_exclude) output_data['downloaded_logs'] = ret_log['ok'] output_data['log_errors'] = ret_log['error'] else: log.warning("Downloading build logs is not supported for action '%s', ignoring.", action) return output_data
def execute(self): '''Execute all the tasks in the taskdir :return: ``True`` if execution finished successfully for all playbooks present. ``False`` if some of them crashed, haven't produced any results or the execution was interrupted (e.g. a system signal). :rtype: bool ''' test_playbooks = fnmatch.filter(os.listdir(self.arg_data['taskdir']), 'tests*.yml') if not test_playbooks: raise exc.CheckbError('No tests*.yml found in dir %s' % self.arg_data['taskdir']) failed = [] for test_playbook in test_playbooks: playbook_vars = None try: # syntax check self._check_playbook_syntax( os.path.join(self.arg_data['taskdir'], test_playbook)) # compute variables playbook_vars = self._create_playbook_vars(test_playbook) if not playbook_vars['checkb_generic_task']: raise exc.CheckbPlaybookError( 'This playbook is not ' 'marked as a Checkb generic task. See ' 'documentation how to write a task.') # spawn VM if needed ipaddr = self.ipaddr if ipaddr is None: ipaddr = self._spawn_vm(self.arg_data['uuid'], playbook_vars) # execute log.info('Running playbook %s on machine: %s', test_playbook, ipaddr) self._run_playbook(test_playbook, ipaddr, playbook_vars) # report results self._report_results(test_playbook) except exc.CheckbInterruptError as e: log.error( 'Caught system interrupt during execution of ' 'playbook %s: %s. Not executing any other playbooks.', test_playbook, e) failed.append(test_playbook) break except exc.CheckbError as e: log.error('Error during execution of playbook %s: %s', test_playbook, e) failed.append(test_playbook) finally: try: if playbook_vars and config.get_config( ).profile != config.ProfileName.TESTING: os.remove(playbook_vars['checkb_secrets_file']) except OSError as e: log.warning("Could not delete the secrets file at %r. %s", playbook_vars['checkb_secrets_file'], e) if self.task_vm is not None: if self.arg_data['no_destroy']: log.info( 'Not destroying disposable client as ' 'requested, access it at: %s . Skipping any ' 'other playbooks.', ipaddr) break else: self.task_vm.teardown() log.info('Playbook execution finished: %s', test_playbook) if failed: log.error('Some playbooks failed during execution: %s', ', '.join(failed)) else: log.info('All playbooks finished successfully') return not failed