Exemple #1
0
    def _spawn_vm(self, uuid, playbook_vars):
        '''Spawn a virtual machine using testcloud.

        :param str uuid: unicode string uuid for the task being executed
        :param dict playbook_vars: a vars dict created by
            :meth:`_create_playbook_vars`
        :returns: str ip address of spawned vm
        '''
        log.info('Spawning disposable client')

        env = image_utils.devise_environment(self.arg_data, playbook_vars)
        self.task_vm = vm.TestCloudMachine(uuid)

        retries = config.get_config().spawn_vm_retries

        while retries > 0:
            retries -= 1
            try:
                self.task_vm.prepare(**env)
                self.task_vm.wait_for_port(22)

                log.debug('Disposable client (%s %s) ready',
                          self.task_vm.instancename, self.task_vm.ipaddr)

                return self.task_vm.ipaddr
            except vm.TestcloudInstanceError as e:
                if retries <= 0:
                    raise exc.CheckbMinionError(
                        'Disposable client failed '
                        'to boot: %s', e)
                else:
                    log.warning(
                        'Disposable client failed to boot, retrying: '
                        '%s', e)
                    self.task_vm.teardown()
Exemple #2
0
    def _report_results(self, test_playbook):
        '''Report results from playbook's ``results.yml`` (stored in
        artifactsdir) into ResultsDB.

        :param str test_playbook: base name of the playbook file
        :raise CheckbDirectiveError: when there's a problem processing
            ``results.yml`` file
        '''
        results_file = os.path.join(self.arg_data['artifactsdir'],
                                    test_playbook, 'checkb', 'results.yml')
        log.info('Reporting results from: %s', results_file)

        if not os.path.exists(results_file):
            raise exc.CheckbDirectiveError(
                "Results file doesn't exist, "
                'assuming the task crashed. If you wish to report no results, '
                'the results file still needs to exist - consult '
                'documentation. Expected results file location: %s' %
                results_file)

        rdb = resultsdb_directive.ResultsdbDirective()
        rdb.process(params={"file": results_file}, arg_data=self.arg_data)

        # create file indicating that results were reported to resultsdb
        reported_file = os.path.join(self.arg_data['artifactsdir'],
                                     test_playbook, 'checkb',
                                     'results.yml.reported_ok')
        open(reported_file, 'a').close()
Exemple #3
0
def main():
    '''Main entry point executed by runtask script'''
    # Preliminary initialization of logging, so all messages before regular
    # initialization can be logged to stream.
    logger.init_prior_config()

    log.info('Execution started at: %s',
             datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC'))
    log.debug('Using checkb %s', checkb.__version__)

    # parse cmdline
    parser = get_argparser()
    args = parser.parse_args()

    check_args(parser, vars(args))
    log.debug('Parsed arguments: %s', args)
    arg_data = process_args(vars(args))

    # create artifacts directory + subdirs
    try:
        artif_subdir = os.path.join(arg_data['artifactsdir'], 'checkb')
        file_utils.makedirs(artif_subdir)
        log.info("Task artifacts will be saved in: %s",
                 arg_data['artifactsdir'])
    except OSError:
        log.error("Can't create artifacts directory %s", artif_subdir)
        raise

    # initialize logging
    level_stream = logging.DEBUG if args.debug else None
    logger.init(level_stream=level_stream)
    logpath = os.path.join(artif_subdir, 'checkb.log')
    logger.add_filehandler(level_file=logging.DEBUG, filelog_path=logpath)
    logger.remove_mem_handler()

    # start execution
    executor = Executor(arg_data)
    success = executor.execute()

    # finalize
    log.info('Task artifacts were saved in: %s', arg_data['artifactsdir'])
    if config.get_config().profile == config.ProfileName.PRODUCTION:
        log.info('External URL for task artifacts: %s/%s',
                 config.get_config().artifacts_baseurl, arg_data['uuid'])
    log.info('Execution finished at: %s',
             datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC'))
    if not success:
        log.error('Some playbooks failed. Exiting with non-zero exit code.')
    sys.exit(0 if success else 1)
    def process(self, params, arg_data):
        repodir = params['repodir']

        log.info('running createrepo_c on %s', repodir)

        p = sub.Popen(['createrepo_c', repodir],
                      stdout=sub.PIPE,
                      stderr=sub.PIPE)

        output, errors = p.communicate()

        if p.returncode:
            raise CheckbDirectiveError(errors)

        log.debug(output)

        return output
Exemple #5
0
    def process(self, params, arg_data):
        output_data = {}
        valid_actions = ['download']
        action = params.get('action', None)

        if action not in valid_actions:
            raise CheckbDirectiveError(
                '%s is not a valid command for bodhi directive' % action)

        if 'arch' not in params or 'target_dir' not in params:
            detected_args = ', '.join(params.keys())
            raise exc.CheckbDirectiveError(
                "The bodhi directive requires 'arch' and 'target_dir' as an "
                "argument. Detected arguments: %s" % detected_args)

        # convert str to list
        if isinstance(params['arch'], basestring):
            params['arch'] = [params['arch']]

        if action == 'download':
            if 'update_id' not in params or 'arch' not in params:
                detected_args = ', '.join(params.keys())
                raise CheckbDirectiveError(
                    "The bodhi directive 'download' requires both 'update_id' and "
                    "'arch' arguments. Detected arguments: %s" % detected_args)

            target_dir = params['target_dir']
            updateid = params['update_id']
            if 'all' in params['arch']:
                arches = config.get_config().supported_arches + ['noarch']
            else:
                arches = params['arch']
            if arches and ('noarch' not in arches):
                arches.append('noarch')

            src = params.get('src', False)

            log.info("getting rpms for update %s (%s) and downloading to %s",
                     updateid, arches, target_dir)

            output_data['downloaded_rpms'] = self.action_download(
                updateid, arches, src, target_dir)

        return output_data
Exemple #6
0
def install(pkgs):
    '''Install packages from system repositories using DNF. Either root or sudo access required.

    :param pkgs: packages to be installed, e.g. ``['pidgin']``, or any other argument supported by
                 ``dnf install`` command
    :type pkgs: list of str
    :raise CheckbPermissionError: if we don't have permissions to run DNF as admin
    :raise CheckbError: if DNF return code is not zero
    '''
    if not pkgs:
        return

    log.info('Installing %d packages...', len(pkgs))
    pkglist = ' '.join([pipes.quote(pkg) for pkg in pkgs])

    if not os_utils.is_root() and not os_utils.has_sudo():
        raise exc.CheckbPermissionError(
            "Can't install packages without root or sudo access. "
            'Packages requested: %s' % pkglist)

    cmd = ['dnf', '--assumeyes', 'install']
    cmd.extend(pkgs)

    if not os_utils.is_root(
    ):  # we must have sudo at this point, don't test again needlessly
        cmd = ['sudo', '--non-interactive'] + cmd

    log.debug('Running: %s', ' '.join([pipes.quote(c) for c in cmd]))
    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as e:
        log.error(
            u'✘ Package installation failed. We tried to install following packages:\n%s'
            '\nDNF returned exit code %d and output:\n%s', pkglist,
            e.returncode, e.output.rstrip())
        raise exc.CheckbError("Unable to install packages: %s" % pkglist)
    else:
        log.debug(
            u'✔ Package installation completed successfully. DNF output was:\n%s',
            output.rstrip())
        return
Exemple #7
0
    def _switch_to_mirror(self):
        '''If the baseurl is a round-robin redirect (as in case of
        ``download.fedoraproject.org``), resolve the ``url`` in each section
        and save it back. This avoids issues with multiple network requests
        being directed each to a different mirror (possibly with different
        contents).

        This is supposed to be called only once during initialization.
        '''
        log.info(
            'Resolving URLs in yumrepoinfo config, because the baseurl '
            'is a well-known redirect. Provide a custom mirror to skip this '
            'in the future.')

        for section in self.parser.sections():
            if section == 'DEFAULT':
                continue

            retries = 0
            url = self.parser.get(section, 'url')
            newurl = None

            while retries <= self.resolve_retry:
                try:
                    log.debug('Resolving url: %s', url)
                    response = urlopen(url, timeout=20)
                    newurl = response.geturl()
                    break
                except (URLError, socket.error) as e:
                    retries += 1
                    log.warning(
                        'Received %s when resolving %s . %s', e, url,
                        'Trying again...'
                        if retries <= self.resolve_retry else 'Giving up.')
                    if retries > self.resolve_retry:
                        raise exc.CheckbRemoteError(
                            'Failed to resolve %s : %s' % (url, e))

            self.parser.set(section, 'url', newurl)
            log.debug('Set %s section to use url: %s', section, newurl)
Exemple #8
0
    def process(self, params, arg_data):

        if 'file' in params and 'results' in params:
            raise CheckbDirectiveError(
                "Either `file` or `results` can be used, not both.")

        if 'artifactsdir' not in params:
            detected_args = ', '.join(params.keys())
            raise exc.CheckbDirectiveError(
                "The directive requires 'artifactsdir' as an "
                "argument. Detected arguments: %s" % detected_args)
        artifactsdir = params['artifactsdir']

        try:
            if params.get('file', None):
                with open(params['file']) as resultfile:
                    params['results'] = resultfile.read()

            check_details = check.import_YAML(params['results'])
            log.debug("YAML output parsed OK.")
        except (CheckbValueError, IOError) as e:
            raise CheckbDirectiveError("Failed to load results: %s" %
                                       e.message)

        log.debug('Generating report of %s results...', len(check_details))

        results = []
        for detail in check_details:
            results.append({
                'checkname':
                detail.checkname,
                'outcome':
                detail.outcome,
                'note':
                detail.note or '---',
                'item':
                detail.item,
                'type':
                detail.report_type,
                'artifact':
                os.path.basename(detail.artifact) if detail.artifact else None,
            })

        if 'path' in params:
            report_fname = os.path.join(artifactsdir, params['path'])
        else:
            report_fname = os.path.join(artifactsdir, 'report.html')

        if 'template' in params:
            template_fname = params['template']
        else:
            template_fname = os.path.join(config.get_config()._data_dir,
                                          'report_templates/html.j2')

        try:
            file_utils.makedirs(os.path.dirname(report_fname))
        except OSError as e:
            raise CheckbDirectiveError(e)

        with open(template_fname) as f_template:
            with open(report_fname, 'w') as f_report:
                template = jinja2.Template(f_template.read())
                report = template.render(results=results,
                                         artifactsdir=artifactsdir)
                f_report.write(report)

        log.info('Report generated in: %s', os.path.abspath(report_fname))
Exemple #9
0
    def process(self, params, arg_data):
        # checking if reporting is enabled is done after importing yaml which
        # serves as validation of input results

        if 'file' in params and 'results' in params:
            raise CheckbDirectiveError(
                "Either `file` or `results` can be used, not both.")

        try:
            if params.get('file', None):
                with open(params['file']) as resultfile:
                    params['results'] = resultfile.read()

            check_details = check.import_YAML(params['results'])
            log.debug("YAML output parsed OK.")
        except (CheckbValueError, IOError) as e:
            raise CheckbDirectiveError("Failed to load results: %s" % e)

        for detail in check_details:
            if not (detail.item and detail.report_type and detail.checkname):
                raise CheckbDirectiveError(
                    "The resultsdb directive requires 'item', 'type' and "
                    "'checkname' to be present in the YAML data.")

        conf = config.get_config()
        if not conf.report_to_resultsdb:
            log.info(
                "Reporting to ResultsDB is disabled. Once enabled, the "
                "following would get reported:\n%s", params['results'])
            return check.export_YAML(check_details)

        artifactsdir_url = '%s/%s' % (self.artifacts_baseurl, arg_data['uuid'])

        # for now, we're creating the resultsdb group at reporting time
        group_data = self.create_resultsdb_group(uuid=arg_data['uuid'])

        log.info('Posting %s results to ResultsDB...' % len(check_details))
        for detail in check_details:
            checkname = detail.checkname

            # find out if the task is allowed to post results into the namespace
            if config.get_config().profile == config.ProfileName.PRODUCTION:
                self.check_namespace(checkname, arg_data)

            self.ensure_testcase_exists(checkname)
            result_log_url = artifactsdir_url
            if detail.artifact:
                artifact_path = self.get_artifact_path(
                    arg_data['artifactsdir'], detail.artifact)
                if artifact_path:
                    result_log_url = "%s/%s" % (artifactsdir_url,
                                                artifact_path)
            try:
                result = self.resultsdb.create_result(outcome=detail.outcome,
                                                      testcase=checkname,
                                                      groups=[group_data],
                                                      note=detail.note or None,
                                                      ref_url=result_log_url,
                                                      item=detail.item,
                                                      type=detail.report_type,
                                                      **detail.keyvals)
                log.debug('Result saved in ResultsDB:\n%s',
                          pprint.pformat(result))
                detail._internal['resultsdb_result_id'] = result['id']

            except resultsdb_api.ResultsDBapiException as e:
                log.error(e)
                log.error("Failed to store to ResultsDB: `%s` `%s` `%s`",
                          detail.item, checkname, detail.outcome)

        return check.export_YAML(check_details)
Exemple #10
0
    def build2update(self, builds, strict=False):
        '''Find matching Bodhi updates for provided builds.

        :param builds: builds to search for in N(E)VR format (``foo-1.2-3.fc20``
                       or ``foo-4:1.2-3.fc20``)
        :type builds: iterable of str
        :param bool strict: if ``False``, incomplete Bodhi updates are allowed.
                            If ``True``, every Bodhi update will be compared
                            with the set of provided builds. If there is an
                            Bodhi update which contains builds not provided in
                            ``builds``, that update is marked as incomplete and
                            removed from the result - i.e. all builds from
                            ``builds`` that were part of this incomplete update
                            are placed in the second dictionary of the result
                            tuple.
        :return: a tuple of two dictionaries:

                 * The first dict provides mapping between ``builds`` and their
                   updates where no error occured.

                   ``{build (string): Bodhi update (Munch)}``
                 * The second dict provides mapping between ``builds`` and their
                   updates where some error occured. The value is ``None`` if
                   the matching Bodhi update could not be found (the only
                   possible cause of failure if ``strict=False``). Or the value
                   is a Bodhi update that was incomplete (happens only if
                   ``strict=True``).

                   ``{build (string): Bodhi update (Munch) or None}``
                 * The set of keys in both these dictionaries correspond exactly
                   to ``builds``. For every build provided in ``builds`` you'll
                   get an answer in either the first or the second dictionary,
                   and there will be no extra builds that you haven't specified.
        :raise CheckbValueError: if ``builds`` type is incorrect
        '''
        # validate input params
        if not python_utils.iterable(builds, basestring):
            raise exc.CheckbValueError(
                "Param 'builds' must be an iterable of strings, and yours was: %s"
                % type(builds))

        updates = []
        build2update = {}
        failures = {}
        # Bodhi works with NVR only, but we have to ensure we receive and return
        # even NEVR format. So we need to convert internally.
        builds_nvr = set(
            [rpm_utils.rpmformat(build, 'nvr') for build in builds])
        builds_queue = list(builds_nvr)

        log.info('Querying Bodhi to map %d builds to their updates...',
                 len(builds))

        # retrieve all update data
        while builds_queue:
            builds_chunk = builds_queue[:self._MULTICALL_REQUEST_SIZE]
            builds_chunk = ' '.join(builds_chunk)
            res = self.client.query(builds=builds_chunk)

            updates.extend(res['updates'])
            builds_queue = builds_queue[self._MULTICALL_REQUEST_SIZE:]

            # don't query for builds which were already found
            for update in res['updates']:
                for build in update['builds']:
                    if build['nvr'] in builds_queue:
                        builds_queue.remove(build['nvr'])

            log.info('Bodhi queries done: %d/%d',
                     len(builds_nvr) - len(builds_queue), len(builds_nvr))

        # separate builds into OK and failures
        for update in updates:
            # all builds listed in the update
            bodhi_builds = set([build['nvr'] for build in update['builds']])
            # builds *not* provided in @param builds but part of the update (NVRs)
            missing_builds = bodhi_builds.difference(builds_nvr)
            # builds provided in @param builds and part of the update
            matched_builds = [
                build for build in builds
                if rpm_utils.rpmformat(build, 'nvr') in bodhi_builds
            ]

            # reject incomplete updates when strict
            if missing_builds and strict:
                for build in matched_builds:
                    failures[build] = update
                continue

            # otherwise the update is complete or we don't care
            for build in matched_builds:
                build2update[build] = update

        # mark builds without any associated update as a failure
        for build in builds:
            if build not in build2update and build not in failures:
                failures[build] = None

        diff = set(builds).symmetric_difference(
            set(build2update.keys()).union(set(failures.keys())))
        assert not diff, "Returned NVRs different from input NVRs: %s" % diff

        return (build2update, failures)
Exemple #11
0
    def process(self, params, arg_data):
        # process params
        valid_actions = ['download', 'download_tag', 'download_latest_stable']
        action = params['action']
        if action not in valid_actions:
            raise exc.CheckbDirectiveError('%s is not a valid action for koji '
                                              'directive' % action)

        if 'arch' not in params or 'target_dir' not in params:
            detected_args = ', '.join(params.keys())
            raise exc.CheckbDirectiveError(
                "The koji directive requires 'arch' and 'target_dir' as an "
                "argument. Detected arguments: %s" % detected_args)

        # convert str to list
        for param in ('arch', 'arch_exclude'):
            if param in params and isinstance(params[param], basestring):
                params[param] = [params[param]]

        arches = list(params['arch'])
        if arches and ('all' not in arches) and ('noarch' not in arches):
            arches.append('noarch')

        arch_exclude = params.get('arch_exclude', [])
        debuginfo = params.get('debuginfo', False)
        src = params.get('src', False)
        build_log = params.get('build_log', False)

        target_dir = params['target_dir']
        file_utils.makedirs(target_dir)

        # download files
        output_data = {}

        if action == 'download':
            if 'koji_build' not in params:
                detected_args = ', '.join(params.keys())
                raise exc.CheckbDirectiveError(
                    "The koji directive requires 'koji_build' for the 'download' "
                    "action. Detected arguments: %s" % detected_args)

            nvr = rpm_utils.rpmformat(params['koji_build'], 'nvr')

            output_data['downloaded_rpms'] = self.koji.get_nvr_rpms(
                nvr, target_dir, arches=arches, arch_exclude=arch_exclude,
                debuginfo=debuginfo, src=src)

        elif action == 'download_tag':
            if 'koji_tag' not in params:
                detected_args = ', '.join(params.keys())
                raise exc.CheckbDirectiveError(
                    "The koji directive requires 'koji_tag' for the 'download_tag' "
                    "action. Detected arguments: %s" % detected_args)

            koji_tag = params['koji_tag']

            output_data['downloaded_rpms'] = self.koji.get_tagged_rpms(
                koji_tag, target_dir, arches=arches, arch_exclude=arch_exclude,
                debuginfo=debuginfo, src=src)

        elif action == 'download_latest_stable':
            if 'koji_build' not in params:
                detected_args = ', '.join(params.keys())
                raise exc.CheckbDirectiveError(
                    "The koji directive requires 'koji_build' for the 'download_latest_stable' "
                    "action. Detected arguments: %s" % detected_args)

            name = rpm_utils.rpmformat(params['koji_build'], 'n')
            disttag = rpm_utils.get_dist_tag(params['koji_build'])
            # we need to do 'fc22' -> 'f22' conversion
            tag = disttag.replace('c', '')

            # first we need to check updates tag and if that fails, the latest
            # stable nvr is in the base repo
            tags = ['%s-updates' % tag, tag]
            nvr = self.koji.latest_by_tag(tags, name)

            if not nvr:
                log.info("There's no previous stable build for %s, skipping.",
                         params['koji_build'])
                assert output_data == {}
                return output_data

            output_data['downloaded_rpms'] = self.koji.get_nvr_rpms(
                nvr, target_dir, arch_exclude=arch_exclude,
                arches=arches, debuginfo=debuginfo, src=src)

        # download build.log if requested
        if build_log:
            if action in ('download', 'download_latest_stable'):
                ret_log = self.koji.get_build_log(
                        nvr, target_dir, arches=arches, arch_exclude=arch_exclude)
                output_data['downloaded_logs'] = ret_log['ok']
                output_data['log_errors'] = ret_log['error']
            else:
                log.warning("Downloading build logs is not supported for action '%s', ignoring.",
                            action)

        return output_data
Exemple #12
0
    def execute(self):
        '''Execute all the tasks in the taskdir

        :return: ``True`` if execution finished successfully for all playbooks
            present. ``False`` if some of them crashed, haven't produced any
            results or the execution was interrupted (e.g. a system signal).
        :rtype: bool
        '''
        test_playbooks = fnmatch.filter(os.listdir(self.arg_data['taskdir']),
                                        'tests*.yml')
        if not test_playbooks:
            raise exc.CheckbError('No tests*.yml found in dir %s' %
                                  self.arg_data['taskdir'])

        failed = []
        for test_playbook in test_playbooks:
            playbook_vars = None
            try:
                # syntax check
                self._check_playbook_syntax(
                    os.path.join(self.arg_data['taskdir'], test_playbook))

                # compute variables
                playbook_vars = self._create_playbook_vars(test_playbook)

                if not playbook_vars['checkb_generic_task']:
                    raise exc.CheckbPlaybookError(
                        'This playbook is not '
                        'marked as a Checkb generic task. See '
                        'documentation how to write a task.')

                # spawn VM if needed
                ipaddr = self.ipaddr
                if ipaddr is None:
                    ipaddr = self._spawn_vm(self.arg_data['uuid'],
                                            playbook_vars)

                # execute
                log.info('Running playbook %s on machine: %s', test_playbook,
                         ipaddr)
                self._run_playbook(test_playbook, ipaddr, playbook_vars)

                # report results
                self._report_results(test_playbook)
            except exc.CheckbInterruptError as e:
                log.error(
                    'Caught system interrupt during execution of '
                    'playbook %s: %s. Not executing any other playbooks.',
                    test_playbook, e)
                failed.append(test_playbook)
                break
            except exc.CheckbError as e:
                log.error('Error during execution of playbook %s: %s',
                          test_playbook, e)
                failed.append(test_playbook)
            finally:
                try:
                    if playbook_vars and config.get_config(
                    ).profile != config.ProfileName.TESTING:
                        os.remove(playbook_vars['checkb_secrets_file'])
                except OSError as e:
                    log.warning("Could not delete the secrets file at %r. %s",
                                playbook_vars['checkb_secrets_file'], e)
                if self.task_vm is not None:
                    if self.arg_data['no_destroy']:
                        log.info(
                            'Not destroying disposable client as '
                            'requested, access it at: %s . Skipping any '
                            'other playbooks.', ipaddr)
                        break
                    else:
                        self.task_vm.teardown()
                log.info('Playbook execution finished: %s', test_playbook)

        if failed:
            log.error('Some playbooks failed during execution: %s',
                      ', '.join(failed))
        else:
            log.info('All playbooks finished successfully')

        return not failed