Beispiel #1
0
    def _check_playbook_syntax(playbook):
        '''Run ansible-playbook --syntax-check on a playbook

        :param str playbook: path to an ansible playbook
        :raise CheckbPlaybookError: when the playbook is not syntactically
            correct
        '''
        try:
            subprocess.check_call(
                ['ansible-playbook', '--syntax-check', playbook])
            log.debug('Playbook is syntactically correct: %s', playbook)
        except subprocess.CalledProcessError as e:
            log.error('Syntax check failed for playbook %s: %s', playbook, e)
            raise exc.CheckbPlaybookError(e)
Beispiel #2
0
def main():
    '''Main entry point executed by runtask script'''
    # Preliminary initialization of logging, so all messages before regular
    # initialization can be logged to stream.
    logger.init_prior_config()

    log.info('Execution started at: %s',
             datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC'))
    log.debug('Using checkb %s', checkb.__version__)

    # parse cmdline
    parser = get_argparser()
    args = parser.parse_args()

    check_args(parser, vars(args))
    log.debug('Parsed arguments: %s', args)
    arg_data = process_args(vars(args))

    # create artifacts directory + subdirs
    try:
        artif_subdir = os.path.join(arg_data['artifactsdir'], 'checkb')
        file_utils.makedirs(artif_subdir)
        log.info("Task artifacts will be saved in: %s",
                 arg_data['artifactsdir'])
    except OSError:
        log.error("Can't create artifacts directory %s", artif_subdir)
        raise

    # initialize logging
    level_stream = logging.DEBUG if args.debug else None
    logger.init(level_stream=level_stream)
    logpath = os.path.join(artif_subdir, 'checkb.log')
    logger.add_filehandler(level_file=logging.DEBUG, filelog_path=logpath)
    logger.remove_mem_handler()

    # start execution
    executor = Executor(arg_data)
    success = executor.execute()

    # finalize
    log.info('Task artifacts were saved in: %s', arg_data['artifactsdir'])
    if config.get_config().profile == config.ProfileName.PRODUCTION:
        log.info('External URL for task artifacts: %s/%s',
                 config.get_config().artifacts_baseurl, arg_data['uuid'])
    log.info('Execution finished at: %s',
             datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC'))
    if not success:
        log.error('Some playbooks failed. Exiting with non-zero exit code.')
    sys.exit(0 if success else 1)
Beispiel #3
0
    def _get_vault_secrets(self, taskdir):
        '''Load secrets from the Vault server and store them in a file

        :param str taskdir: path to the directory with test suite (on overlord)
        :return: a filename with decrypted secrets
        '''
        cfg = config.get_config()
        secrets = {}
        if cfg.vault_enabled:
            task_repo_url = resultsdb_directive.git_origin_url(taskdir)
            if task_repo_url:
                try:
                    session = file_utils._get_session()
                    r = session.get(
                        "%s/buckets" % cfg.vault_server,
                        auth=(cfg.vault_username, cfg.vault_password),
                    )
                except requests.exceptions.RequestException as e:
                    log.error("Connection to Vault server failed. %s", e)
                    r = None

                if r and r.ok:
                    data = r.json()['data']
                    valid_buckets = []
                    re_enabler = re.compile(r'checkb_enable\((.*?)\)')
                    for b in data:
                        desc = b['description']
                        if not desc:
                            continue
                        enabled_for = ', '.join(re_enabler.findall(desc))
                        if not task_repo_url in enabled_for:
                            continue
                        valid_buckets.append(b)
                    for b in valid_buckets:
                        secrets[b['uuid']] = b['secrets']
                elif r and not r.ok:
                    log.error("Could not get data from vault. %r, %r",
                              r.status_code, r.reason)

        if config.get_config().profile == config.ProfileName.TESTING:
            return secrets

        fd, fname = tempfile.mkstemp(prefix='checkb_secrets')
        os.close(fd)
        with open(fname, 'w') as fd:
            fd.write(json.dumps(secrets, indent=2, sort_keys=True))
        return fname
Beispiel #4
0
    def _prepare_image(self, distro, release, flavor, arch):
        '''Use testcloud to prepare an image for local booting
        :param str distro: Distro to use in image discovery
        :param str release: Distro's release to use in image discovery
        :param str flavor: base-image flavor to use in image discovery
        :param str arch: arch to use in image discovery
        :raises CheckbImageNotFoundError: when base image of the required type is not found
        :raises CheckbImageError: for errors in preparing the image with testcloud
        '''

        tc_image = None

        try:
            if config.get_config().force_imageurl:
                img_url = config.get_config().imageurl
            else:
                log.debug(
                    "Looking for image with DISTRO: %s, RELEASE: %s, FLAVOR: %s, ARCH: %s"
                    % (distro, release, flavor, arch))

                img_url = ImageFinder.get_latest(distro=distro,
                                                 release=release,
                                                 flavor=flavor,
                                                 arch=arch)
        except exc.CheckbImageNotFoundError as e:
            log.error(e)
            raise

        log.debug("Preparing image {} for task {}".format(img_url, self.uuid))

        try:
            tc_image = image.Image(img_url)
            # symlink the image instead of copying it to the testcloud dir, because our user only
            # expects image handling in checkb dirs, and we remove all minion instances
            # immediately after task execution anyway
            tc_image.prepare(copy=False)
        except TestcloudImageError as e:
            log.exception(e)
            raise exc.CheckbImageError(
                "There was an error while preparing the "
                "testcloud image", e)

        return tc_image
Beispiel #5
0
def install(pkgs):
    '''Install packages from system repositories using DNF. Either root or sudo access required.

    :param pkgs: packages to be installed, e.g. ``['pidgin']``, or any other argument supported by
                 ``dnf install`` command
    :type pkgs: list of str
    :raise CheckbPermissionError: if we don't have permissions to run DNF as admin
    :raise CheckbError: if DNF return code is not zero
    '''
    if not pkgs:
        return

    log.info('Installing %d packages...', len(pkgs))
    pkglist = ' '.join([pipes.quote(pkg) for pkg in pkgs])

    if not os_utils.is_root() and not os_utils.has_sudo():
        raise exc.CheckbPermissionError(
            "Can't install packages without root or sudo access. "
            'Packages requested: %s' % pkglist)

    cmd = ['dnf', '--assumeyes', 'install']
    cmd.extend(pkgs)

    if not os_utils.is_root(
    ):  # we must have sudo at this point, don't test again needlessly
        cmd = ['sudo', '--non-interactive'] + cmd

    log.debug('Running: %s', ' '.join([pipes.quote(c) for c in cmd]))
    try:
        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
    except subprocess.CalledProcessError as e:
        log.error(
            u'✘ Package installation failed. We tried to install following packages:\n%s'
            '\nDNF returned exit code %d and output:\n%s', pkglist,
            e.returncode, e.output.rstrip())
        raise exc.CheckbError("Unable to install packages: %s" % pkglist)
    else:
        log.debug(
            u'✔ Package installation completed successfully. DNF output was:\n%s',
            output.rstrip())
        return
Beispiel #6
0
    def process(self, params, arg_data):
        # checking if reporting is enabled is done after importing yaml which
        # serves as validation of input results

        if 'file' in params and 'results' in params:
            raise CheckbDirectiveError(
                "Either `file` or `results` can be used, not both.")

        try:
            if params.get('file', None):
                with open(params['file']) as resultfile:
                    params['results'] = resultfile.read()

            check_details = check.import_YAML(params['results'])
            log.debug("YAML output parsed OK.")
        except (CheckbValueError, IOError) as e:
            raise CheckbDirectiveError("Failed to load results: %s" % e)

        for detail in check_details:
            if not (detail.item and detail.report_type and detail.checkname):
                raise CheckbDirectiveError(
                    "The resultsdb directive requires 'item', 'type' and "
                    "'checkname' to be present in the YAML data.")

        conf = config.get_config()
        if not conf.report_to_resultsdb:
            log.info(
                "Reporting to ResultsDB is disabled. Once enabled, the "
                "following would get reported:\n%s", params['results'])
            return check.export_YAML(check_details)

        artifactsdir_url = '%s/%s' % (self.artifacts_baseurl, arg_data['uuid'])

        # for now, we're creating the resultsdb group at reporting time
        group_data = self.create_resultsdb_group(uuid=arg_data['uuid'])

        log.info('Posting %s results to ResultsDB...' % len(check_details))
        for detail in check_details:
            checkname = detail.checkname

            # find out if the task is allowed to post results into the namespace
            if config.get_config().profile == config.ProfileName.PRODUCTION:
                self.check_namespace(checkname, arg_data)

            self.ensure_testcase_exists(checkname)
            result_log_url = artifactsdir_url
            if detail.artifact:
                artifact_path = self.get_artifact_path(
                    arg_data['artifactsdir'], detail.artifact)
                if artifact_path:
                    result_log_url = "%s/%s" % (artifactsdir_url,
                                                artifact_path)
            try:
                result = self.resultsdb.create_result(outcome=detail.outcome,
                                                      testcase=checkname,
                                                      groups=[group_data],
                                                      note=detail.note or None,
                                                      ref_url=result_log_url,
                                                      item=detail.item,
                                                      type=detail.report_type,
                                                      **detail.keyvals)
                log.debug('Result saved in ResultsDB:\n%s',
                          pprint.pformat(result))
                detail._internal['resultsdb_result_id'] = result['id']

            except resultsdb_api.ResultsDBapiException as e:
                log.error(e)
                log.error("Failed to store to ResultsDB: `%s` `%s` `%s`",
                          detail.item, checkname, detail.outcome)

        return check.export_YAML(check_details)
Beispiel #7
0
    def execute(self):
        '''Execute all the tasks in the taskdir

        :return: ``True`` if execution finished successfully for all playbooks
            present. ``False`` if some of them crashed, haven't produced any
            results or the execution was interrupted (e.g. a system signal).
        :rtype: bool
        '''
        test_playbooks = fnmatch.filter(os.listdir(self.arg_data['taskdir']),
                                        'tests*.yml')
        if not test_playbooks:
            raise exc.CheckbError('No tests*.yml found in dir %s' %
                                  self.arg_data['taskdir'])

        failed = []
        for test_playbook in test_playbooks:
            playbook_vars = None
            try:
                # syntax check
                self._check_playbook_syntax(
                    os.path.join(self.arg_data['taskdir'], test_playbook))

                # compute variables
                playbook_vars = self._create_playbook_vars(test_playbook)

                if not playbook_vars['checkb_generic_task']:
                    raise exc.CheckbPlaybookError(
                        'This playbook is not '
                        'marked as a Checkb generic task. See '
                        'documentation how to write a task.')

                # spawn VM if needed
                ipaddr = self.ipaddr
                if ipaddr is None:
                    ipaddr = self._spawn_vm(self.arg_data['uuid'],
                                            playbook_vars)

                # execute
                log.info('Running playbook %s on machine: %s', test_playbook,
                         ipaddr)
                self._run_playbook(test_playbook, ipaddr, playbook_vars)

                # report results
                self._report_results(test_playbook)
            except exc.CheckbInterruptError as e:
                log.error(
                    'Caught system interrupt during execution of '
                    'playbook %s: %s. Not executing any other playbooks.',
                    test_playbook, e)
                failed.append(test_playbook)
                break
            except exc.CheckbError as e:
                log.error('Error during execution of playbook %s: %s',
                          test_playbook, e)
                failed.append(test_playbook)
            finally:
                try:
                    if playbook_vars and config.get_config(
                    ).profile != config.ProfileName.TESTING:
                        os.remove(playbook_vars['checkb_secrets_file'])
                except OSError as e:
                    log.warning("Could not delete the secrets file at %r. %s",
                                playbook_vars['checkb_secrets_file'], e)
                if self.task_vm is not None:
                    if self.arg_data['no_destroy']:
                        log.info(
                            'Not destroying disposable client as '
                            'requested, access it at: %s . Skipping any '
                            'other playbooks.', ipaddr)
                        break
                    else:
                        self.task_vm.teardown()
                log.info('Playbook execution finished: %s', test_playbook)

        if failed:
            log.error('Some playbooks failed during execution: %s',
                      ', '.join(failed))
        else:
            log.info('All playbooks finished successfully')

        return not failed
Beispiel #8
0
    def _run_playbook(self, test_playbook, ipaddr, playbook_vars):
        '''Run the ansible-playbook command to execute given playbook
        containing the task.

        :param str test_playbook: name of the playbook, relative to the task
            directory
        :param str ipaddr: IP address of the machine the task will be run on
        :param dict playbook_vars: vars dict created by
            :meth:`_create_playbook_vars`
        :return: stream output of the ansible-playbook command (stdout and
            stderr merged together)
        :rtype: str
        :raise CheckbPlaybookError: when the playbook is not syntactically
            correct
        '''
        ansible_dir = os.path.join(config.get_config()._data_dir, 'ansible')

        # dump variables for future use
        varsfile, allvarsfile = self._dump_playbook_vars(playbook_vars)

        # figure out the ansible-playbook command
        cmd = [
            'ansible-playbook',
            'runner.yml',
            '--inventory=%s,' % ipaddr,  # the ending comma is important
            '--extra-vars=@%s' % allvarsfile,
        ]

        # for local execution, run as root unless instructed otherwise
        if not self.run_remotely and playbook_vars['become_root']:
            cmd.append('--become')

        if self.run_remotely:
            if self.arg_data['ssh_privkey']:
                cmd.append('--private-key=%s' % self.arg_data['ssh_privkey'])
        else:
            cmd.append('--connection=local')

        if self.arg_data['debug']:
            cmd.append('-vv')

        log.debug('Running ansible playbook %s', ' '.join(cmd))
        try:
            # during playbook execution, handle system signals asking us to
            # quit
            signal.signal(signal.SIGINT, self._interrupt_handler)
            signal.signal(signal.SIGTERM, self._interrupt_handler)

            output, _ = os_utils.popen_rt(cmd, cwd=ansible_dir)
            return output
        except subprocess.CalledProcessError as e:
            log.error('ansible-playbook ended with %d return code',
                      e.returncode)
            log.debug(e.output)
            raise exc.CheckbError(e)
        except exc.CheckbInterruptError as e:
            log.error(
                'System interrupt %s detected. Pulling logs and '
                'stopping execution.', e)
            cmd_failsafe = cmd + ['--tags', 'failsafe']
            try:
                os_utils.popen_rt(cmd_failsafe, cwd=ansible_dir)
            except (subprocess.CalledProcessError,
                    exc.CheckbInterruptError) as e2:
                log.error(
                    'Error during failsafe pulling logs, ignoring and '
                    'raising the original error. The current error is: %s:%s',
                    e2.__class__.__name__, e2)
            raise e
        finally:
            # reset signal handling to default behavior
            signal.signal(signal.SIGINT, signal.SIG_DFL)
            signal.signal(signal.SIGTERM, signal.SIG_DFL)