Esempio n. 1
0
def create_job_archive(job_name, job_archive_path, archive_dir):
    log.info('Creating job\'s archive dir %s', job_archive_path)
    safe_archive = safepath.munge(job_name)
    run_archive = os.path.join(archive_dir, safe_archive)
    if not os.path.exists(run_archive):
        safepath.makedirs('/', run_archive)
    safepath.makedirs('/', job_archive_path)
Esempio n. 2
0
def pull_directory(remote, remotedir, localdir):
    """
    Copy a remote directory to a local directory.
    """
    log.debug('Transferring archived files from %s:%s to %s',
              remote.shortname, remotedir, localdir)
    if not os.path.exists(localdir):
        os.mkdir(localdir)
    r = remote.get_tar_stream(remotedir, sudo=True)
    tar = tarfile.open(mode='r|gz', fileobj=r.stdout)
    while True:
        ti = tar.next()
        if ti is None:
            break

        if ti.isdir():
            # ignore silently; easier to just create leading dirs below
            # XXX this mean empty dirs are not transferred
            pass
        elif ti.isfile():
            sub = safepath.munge(ti.name)
            safepath.makedirs(root=localdir, path=os.path.dirname(sub))
            tar.makefile(ti, targetpath=os.path.join(localdir, sub))
        else:
            if ti.isdev():
                type_ = 'device'
            elif ti.issym():
                type_ = 'symlink'
            elif ti.islnk():
                type_ = 'hard link'
            else:
                type_ = 'unknown'
            log.info('Ignoring tar entry: %r type %r', ti.name, type_)
Esempio n. 3
0
def pull_directory(remote, remotedir, localdir):
    """
    Copy a remote directory to a local directory.
    """
    log.debug('Transferring archived files from %s:%s to %s', remote.shortname,
              remotedir, localdir)
    if not os.path.exists(localdir):
        os.mkdir(localdir)
    r = remote.get_tar_stream(remotedir, sudo=True)
    tar = tarfile.open(mode='r|gz', fileobj=r.stdout)
    while True:
        ti = tar.next()
        if ti is None:
            break

        if ti.isdir():
            # ignore silently; easier to just create leading dirs below
            # XXX this mean empty dirs are not transferred
            pass
        elif ti.isfile():
            sub = safepath.munge(ti.name)
            safepath.makedirs(root=localdir, path=os.path.dirname(sub))
            tar.makefile(ti, targetpath=os.path.join(localdir, sub))
        else:
            if ti.isdev():
                type_ = 'device'
            elif ti.issym():
                type_ = 'symlink'
            elif ti.islnk():
                type_ = 'hard link'
            else:
                type_ = 'unknown'
            log.info('Ignoring tar entry: %r type %r', ti.name, type_)
Esempio n. 4
0
def prep_job(job_config, log_file_path, archive_dir):
    job_id = job_config['job_id']
    safe_archive = safepath.munge(job_config['name'])
    job_config['worker_log'] = log_file_path
    archive_path_full = os.path.join(archive_dir, safe_archive, str(job_id))
    job_config['archive_path'] = archive_path_full

    # If the teuthology branch was not specified, default to master and
    # store that value.
    teuthology_branch = job_config.get('teuthology_branch', 'master')
    job_config['teuthology_branch'] = teuthology_branch
    teuthology_sha1 = job_config.get('teuthology_sha1')
    if not teuthology_sha1:
        repo_url = build_git_url('teuthology', 'ceph')
        teuthology_sha1 = ls_remote(repo_url, teuthology_branch)
        if not teuthology_sha1:
            reason = "Teuthology branch {} not found; marking job as dead".format(
                teuthology_branch)
            log.error(reason)
            report.try_push_job_info(
                job_config, dict(status='dead', failure_reason=reason))
            raise SkipJob()
        log.info('Using teuthology sha1 %s', teuthology_sha1)

    try:
        if teuth_config.teuthology_path is not None:
            teuth_path = teuth_config.teuthology_path
        else:
            teuth_path = fetch_teuthology(branch=teuthology_branch,
                                          commit=teuthology_sha1)
        # For the teuthology tasks, we look for suite_branch, and if we
        # don't get that, we look for branch, and fall back to 'master'.
        # last-in-suite jobs don't have suite_branch or branch set.
        ceph_branch = job_config.get('branch', 'master')
        suite_branch = job_config.get('suite_branch', ceph_branch)
        suite_sha1 = job_config.get('suite_sha1')
        suite_repo = job_config.get('suite_repo')
        if suite_repo:
            teuth_config.ceph_qa_suite_git_url = suite_repo
        job_config['suite_path'] = os.path.normpath(
            os.path.join(
                fetch_qa_suite(suite_branch, suite_sha1),
                job_config.get('suite_relpath', ''),
            ))
    except (BranchNotFoundError, CommitNotFoundError) as exc:
        log.exception("Requested version not found; marking job as dead")
        report.try_push_job_info(job_config,
                                 dict(status='dead', failure_reason=str(exc)))
        raise SkipJob()
    except MaxWhileTries as exc:
        log.exception("Failed to fetch or bootstrap; marking job as dead")
        report.try_push_job_info(job_config,
                                 dict(status='dead', failure_reason=str(exc)))
        raise SkipJob()

    teuth_bin_path = os.path.join(teuth_path, 'virtualenv', 'bin')
    if not os.path.isdir(teuth_bin_path):
        raise RuntimeError("teuthology branch %s at %s not bootstrapped!" %
                           (teuthology_branch, teuth_bin_path))
    return job_config, teuth_bin_path
Esempio n. 5
0
def pull_directory(remote, remotedir, localdir):
    """
    Copy a remote directory to a local directory.
    """
    log.debug('Transferring archived files from %s:%s to %s', remote.shortname,
              remotedir, localdir)
    if not os.path.exists(localdir):
        os.mkdir(localdir)
    proc = remote.run(
        args=[
            'sudo',
            'tar',
            'c',
            '-f',
            '-',
            '-C',
            remotedir,
            '--',
            '.',
        ],
        stdout=run.PIPE,
        wait=False,
    )
    tar = tarfile.open(mode='r|', fileobj=proc.stdout)
    while True:
        ti = tar.next()
        if ti is None:
            break

        if ti.isdir():
            # ignore silently; easier to just create leading dirs below
            pass
        elif ti.isfile():
            sub = safepath.munge(ti.name)
            safepath.makedirs(root=localdir, path=os.path.dirname(sub))
            tar.makefile(ti, targetpath=os.path.join(localdir, sub))
        else:
            if ti.isdev():
                type_ = 'device'
            elif ti.issym():
                type_ = 'symlink'
            elif ti.islnk():
                type_ = 'hard link'
            else:
                type_ = 'unknown'
                log.info('Ignoring tar entry: %r type %r', ti.name, type_)
                continue
    proc.exitstatus.get()
Esempio n. 6
0
def pull_directory(remote, remotedir, localdir):
    """
    Copy a remote directory to a local directory.
    """
    log.debug('Transferring archived files from %s:%s to %s',
              remote.shortname, remotedir, localdir)
    if not os.path.exists(localdir):
        os.mkdir(localdir)
    proc = remote.run(
        args=[
            'sudo',
            'tar',
            'c',
            '-f', '-',
            '-C', remotedir,
            '--',
            '.',
            ],
        stdout=run.PIPE,
        wait=False,
        )
    tar = tarfile.open(mode='r|', fileobj=proc.stdout)
    while True:
        ti = tar.next()
        if ti is None:
            break

        if ti.isdir():
            # ignore silently; easier to just create leading dirs below
            pass
        elif ti.isfile():
            sub = safepath.munge(ti.name)
            safepath.makedirs(root=localdir, path=os.path.dirname(sub))
            tar.makefile(ti, targetpath=os.path.join(localdir, sub))
        else:
            if ti.isdev():
                type_ = 'device'
            elif ti.issym():
                type_ = 'symlink'
            elif ti.islnk():
                type_ = 'hard link'
            else:
                type_ = 'unknown'
                log.info('Ignoring tar entry: %r type %r', ti.name, type_)
                continue
    proc.exitstatus.get()
Esempio n. 7
0
def pull_directory(remote, remotedir, localdir, write_to=copy_fileobj):
    """
    Copy a remote directory to a local directory.

    :param remote: the remote object representing the remote host from where
                   the specified directory is pulled
    :param remotedir: the source directory on remote host
    :param localdir: the destination directory on localhost
    :param write_to: optional function to write the file to localdir.
                     its signature should be:
                     func(src: fileobj,
                          tarinfo: tarfile.TarInfo,
                          local_path: str)
    """
    log.debug('Transferring archived files from %s:%s to %s',
              remote.shortname, remotedir, localdir)
    if not os.path.exists(localdir):
        os.mkdir(localdir)
    r = remote.get_tar_stream(remotedir, sudo=True)
    tar = tarfile.open(mode='r|gz', fileobj=r.stdout)
    while True:
        ti = tar.next()
        if ti is None:
            break

        if ti.isdir():
            # ignore silently; easier to just create leading dirs below
            # XXX this mean empty dirs are not transferred
            pass
        elif ti.isfile():
            sub = safepath.munge(ti.name)
            safepath.makedirs(root=localdir, path=os.path.dirname(sub))
            with tar.extractfile(ti) as src:
                write_to(src, ti, os.path.join(localdir, sub))
        else:
            if ti.isdev():
                type_ = 'device'
            elif ti.issym():
                type_ = 'symlink'
            elif ti.islnk():
                type_ = 'hard link'
            else:
                type_ = 'unknown'
            log.info('Ignoring tar entry: %r type %r', ti.name, type_)
Esempio n. 8
0
def pull_directory(remote, remotedir, localdir):
    """
    Copy a remote directory to a local directory.
    """
    log.debug('Transferring archived files from %s:%s to %s', remote.shortname,
              remotedir, localdir)
    if not os.path.exists(localdir):
        os.mkdir(localdir)
    _, local_tarfile = tempfile.mkstemp(dir=localdir)
    remote.get_tar(remotedir, local_tarfile, sudo=True)
    with open(local_tarfile, 'r+') as fb1:
        tar = tarfile.open(mode='r|gz', fileobj=fb1)
        while True:
            ti = tar.next()
            if ti is None:
                break

            if ti.isdir():
                # ignore silently; easier to just create leading dirs below
                pass
            elif ti.isfile():
                sub = safepath.munge(ti.name)
                safepath.makedirs(root=localdir, path=os.path.dirname(sub))
                tar.makefile(ti, targetpath=os.path.join(localdir, sub))
            else:
                if ti.isdev():
                    type_ = 'device'
                elif ti.issym():
                    type_ = 'symlink'
                elif ti.islnk():
                    type_ = 'hard link'
                else:
                    type_ = 'unknown'
                    log.info('Ignoring tar entry: %r type %r', ti.name, type_)
                    continue
    os.remove(local_tarfile)
Esempio n. 9
0
def pull_directory(remote, remotedir, localdir):
    """
    Copy a remote directory to a local directory.
    """
    log.debug('Transferring archived files from %s:%s to %s',
              remote.shortname, remotedir, localdir)
    if not os.path.exists(localdir):
        os.mkdir(localdir)
    _, local_tarfile = tempfile.mkstemp(dir=localdir)
    remote.get_tar(remotedir, local_tarfile, sudo=True)
    with open(local_tarfile, 'r+') as fb1:
        tar = tarfile.open(mode='r|gz', fileobj=fb1)
        while True:
            ti = tar.next()
            if ti is None:
                break

            if ti.isdir():
                # ignore silently; easier to just create leading dirs below
                pass
            elif ti.isfile():
                sub = safepath.munge(ti.name)
                safepath.makedirs(root=localdir, path=os.path.dirname(sub))
                tar.makefile(ti, targetpath=os.path.join(localdir, sub))
            else:
                if ti.isdev():
                    type_ = 'device'
                elif ti.issym():
                    type_ = 'symlink'
                elif ti.islnk():
                    type_ = 'hard link'
                else:
                    type_ = 'unknown'
                    log.info('Ignoring tar entry: %r type %r', ti.name, type_)
                    continue
    os.remove(local_tarfile)
Esempio n. 10
0
 def test_absolute(self):
     got = safepath.munge('/evil')
     assert got == 'evil'
Esempio n. 11
0
 def test_absolute_subdir(self):
     got = safepath.munge('/evil/here')
     assert got == 'evil/here'
Esempio n. 12
0
 def test_dot_leading(self):
     got = safepath.munge('./foo')
     assert got == 'foo'
Esempio n. 13
0
def archive(ctx, config):
    log.info('Creating archive directory...')
    run.wait(
        ctx.cluster.run(
            args=[
                'install', '-d', '-m0755', '--',
                '/tmp/cephtest/archive',
                ],
            wait=False,
            )
        )

    try:
        yield
    finally:
        if ctx.archive is not None:

            log.info('Transferring archived files...')
            logdir = os.path.join(ctx.archive, 'remote')
            os.mkdir(logdir)
            for remote in ctx.cluster.remotes.iterkeys():
                path = os.path.join(logdir, remote.shortname)
                os.mkdir(path)
                log.debug('Transferring archived files from %s to %s', remote.shortname, path)
                proc = remote.run(
                    args=[
                        'tar',
                        'c',
                        '-f', '-',
                        '-C', '/tmp/cephtest/archive',
                        '--',
                        '.',
                        ],
                    stdout=run.PIPE,
                    wait=False,
                    )
                tar = tarfile.open(mode='r|', fileobj=proc.stdout)
                while True:
                    ti = tar.next()
                    if ti is None:
                        break

                    if ti.isdir():
                        # ignore silently; easier to just create leading dirs below
                        pass
                    elif ti.isfile():
                        sub = safepath.munge(ti.name)
                        safepath.makedirs(root=path, path=os.path.dirname(sub))
                        tar.makefile(ti, targetpath=os.path.join(path, sub))
                    else:
                        if ti.isdev():
                            type_ = 'device'
                        elif ti.issym():
                            type_ = 'symlink'
                        elif ti.islnk():
                            type_ = 'hard link'
                        else:
                            type_ = 'unknown'
                        log.info('Ignoring tar entry: %r type %r', ti.name, type_)
                        continue
                proc.exitstatus.get()

        log.info('Removing archive directory...')
        run.wait(
            ctx.cluster.run(
                args=[
                    'rm',
                    '-rf',
                    '--',
                    '/tmp/cephtest/archive',
                    ],
                wait=False,
                ),
            )
Esempio n. 14
0
 def test_dot_middle(self):
     got = safepath.munge('evil/./foo')
     assert got == 'evil/foo'
Esempio n. 15
0
 def test_dotdot(self):
     got = safepath.munge('../evil/foo')
     assert got == '_./evil/foo'
Esempio n. 16
0
def archive(ctx, config):
    log.info('Creating archive directory...')
    run.wait(
        ctx.cluster.run(
            args=[
                'install',
                '-d',
                '-m0755',
                '--',
                '/tmp/cephtest/archive',
            ],
            wait=False,
        ))

    try:
        yield
    finally:
        if ctx.archive is not None:

            log.info('Transferring archived files...')
            logdir = os.path.join(ctx.archive, 'remote')
            os.mkdir(logdir)
            for remote in ctx.cluster.remotes.iterkeys():
                path = os.path.join(logdir, remote.shortname)
                os.mkdir(path)
                log.debug('Transferring archived files from %s to %s',
                          remote.shortname, path)
                proc = remote.run(
                    args=[
                        'tar',
                        'c',
                        '-f',
                        '-',
                        '-C',
                        '/tmp/cephtest/archive',
                        '--',
                        '.',
                    ],
                    stdout=run.PIPE,
                    wait=False,
                )
                tar = tarfile.open(mode='r|', fileobj=proc.stdout)
                while True:
                    ti = tar.next()
                    if ti is None:
                        break

                    if ti.isdir():
                        # ignore silently; easier to just create leading dirs below
                        pass
                    elif ti.isfile():
                        sub = safepath.munge(ti.name)
                        safepath.makedirs(root=path, path=os.path.dirname(sub))
                        tar.makefile(ti, targetpath=os.path.join(path, sub))
                    else:
                        if ti.isdev():
                            type_ = 'device'
                        elif ti.issym():
                            type_ = 'symlink'
                        elif ti.islnk():
                            type_ = 'hard link'
                        else:
                            type_ = 'unknown'
                        log.info('Ignoring tar entry: %r type %r', ti.name,
                                 type_)
                        continue
                proc.exitstatus.get()

        log.info('Removing archive directory...')
        run.wait(
            ctx.cluster.run(
                args=[
                    'rm',
                    '-rf',
                    '--',
                    '/tmp/cephtest/archive',
                ],
                wait=False,
            ), )
Esempio n. 17
0
def run_job(job_config, teuth_bin_path, archive_dir, verbose):
    safe_archive = safepath.munge(job_config['name'])
    if job_config.get('first_in_suite') or job_config.get('last_in_suite'):
        job_archive = os.path.join(archive_dir, safe_archive)
        args = [
            os.path.join(teuth_bin_path, 'teuthology-results'),
            '--archive-dir', job_archive,
            '--name', job_config['name'],
        ]
        if job_config.get('first_in_suite'):
            log.info('Generating memo for %s', job_config['name'])
            if job_config.get('seed'):
                args.extend(['--seed', job_config['seed']])
            if job_config.get('subset'):
                args.extend(['--subset', job_config['subset']])
        else:
            log.info('Generating results for %s', job_config['name'])
            timeout = job_config.get('results_timeout',
                                     teuth_config.results_timeout)
            args.extend(['--timeout', str(timeout)])
            if job_config.get('email'):
                args.extend(['--email', job_config['email']])
        # Execute teuthology-results, passing 'preexec_fn=os.setpgrp' to
        # make sure that it will continue to run if this worker process
        # dies (e.g. because of a restart)
        result_proc = subprocess.Popen(args=args, preexec_fn=os.setpgrp)
        log.info("teuthology-results PID: %s", result_proc.pid)
        # Remove unnecessary logs for first and last jobs in run
        log.info('Deleting job\'s archive dir %s', job_config['archive_path'])
        for f in os.listdir(job_config['archive_path']):
            os.remove(os.path.join(job_config['archive_path'], f))
        os.rmdir(job_config['archive_path'])
        return

    log.info('Running job %s', job_config['job_id'])

    arg = [
        os.path.join(teuth_bin_path, 'teuthology'),
    ]
    # The following is for compatibility with older schedulers, from before we
    # started merging the contents of job_config['config'] into job_config
    # itself.
    if 'config' in job_config:
        inner_config = job_config.pop('config')
        if not isinstance(inner_config, dict):
            log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
                     str(type(inner_config)))
        else:
            job_config.update(inner_config)

    if verbose or job_config['verbose']:
        arg.append('-v')

    arg.extend([
        '--owner', job_config['owner'],
        '--archive', job_config['archive_path'],
        '--name', job_config['name'],
    ])
    if job_config['description'] is not None:
        arg.extend(['--description', job_config['description']])
    job_archive = os.path.join(job_config['archive_path'], 'orig.config.yaml')
    arg.extend(['--', job_archive])

    log.debug("Running: %s" % ' '.join(arg))
    p = subprocess.Popen(args=arg)
    log.info("Job archive: %s", job_config['archive_path'])
    log.info("Job PID: %s", str(p.pid))

    if teuth_config.results_server:
        log.info("Running with watchdog")
        try:
            run_with_watchdog(p, job_config)
        except Exception:
            log.exception("run_with_watchdog had an unhandled exception")
            raise
    else:
        log.info("Running without watchdog")
        # This sleep() is to give the child time to start up and create the
        # archive dir.
        time.sleep(5)
        p.wait()

    if p.returncode != 0:
        log.error('Child exited with code %d', p.returncode)
    else:
        log.info('Success!')
    if 'targets' in job_config:
        unlock_targets(job_config)
Esempio n. 18
0
 def test_empty(self):
     # really odd corner case
     got = safepath.munge('')
     assert got == '_'
Esempio n. 19
0
 def test_dot_trailing(self):
     got = safepath.munge('evil/foo/.')
     assert got == 'evil/foo'
Esempio n. 20
0
 def test_hidden_subdir(self):
     got = safepath.munge('foo/.evil')
     assert got == 'foo/_evil'
Esempio n. 21
0
 def test_hidden(self):
     got = safepath.munge('.evil')
     assert got == '_evil'
Esempio n. 22
0
 def test_dotdot_subdir(self):
     got = safepath.munge('evil/../foo')
     assert got == 'evil/_./foo'
Esempio n. 23
0
def worker():
    parser = argparse.ArgumentParser(
        description="""
Grab jobs from a beanstalk queue and run the teuthology tests they
describe. One job is run at a time.
"""
    )
    parser.add_argument("-v", "--verbose", action="store_true", default=None, help="be more verbose")
    parser.add_argument("--archive-dir", metavar="DIR", help="path under which to archive results", required=True)
    parser.add_argument("-l", "--log-dir", help="path in which to store logs", required=True)
    parser.add_argument("-t", "--tube", help="which beanstalk tube to read jobs from", required=True)

    ctx = parser.parse_args()

    loglevel = logging.INFO
    if ctx.verbose:
        loglevel = logging.DEBUG

    logging.basicConfig(
        level=loglevel,
        filename=os.path.join(ctx.log_dir, "worker.{pid}".format(pid=os.getpid())),
        format="%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s",
        datefmt="%Y-%m-%dT%H:%M:%S",
    )

    if not os.path.isdir(ctx.archive_dir):
        sys.exit(
            "{prog}: archive directory must exist: {path}".format(
                prog=os.path.basename(sys.argv[0]), path=ctx.archive_dir
            )
        )

    from teuthology.misc import read_config

    read_config(ctx)

    beanstalk = connect(ctx)
    beanstalk.watch(ctx.tube)
    beanstalk.ignore("default")

    while True:
        job = beanstalk.reserve(timeout=60)
        if job is None:
            continue

        # bury the job so it won't be re-run if it fails
        job.bury()
        log.debug("Reserved job %d", job.jid)
        log.debug("Config is: %s", job.body)
        job_config = yaml.safe_load(job.body)
        safe_archive = safepath.munge(job_config["name"])

        if job_config.get("last_in_suite", False):
            log.debug("Generating coverage for %s", job_config["name"])
            args = [
                os.path.join(os.path.dirname(sys.argv[0]), "teuthology-results"),
                "--timeout",
                str(job_config.get("results_timeout", 21600)),
                "--email",
                job_config["email"],
                "--archive-dir",
                os.path.join(ctx.archive_dir, safe_archive),
                "--name",
                job_config["name"],
            ]
            subprocess.Popen(args=args)
        else:
            log.debug("Creating archive dir...")
            safepath.makedirs(ctx.archive_dir, safe_archive)
            archive_path = os.path.join(ctx.archive_dir, safe_archive, str(job.jid))
            log.info("Running job %d", job.jid)
            run_job(job_config, archive_path)
        job.delete()
Esempio n. 24
0
def main():
    parser = argparse.ArgumentParser(description="""
Run a suite of ceph integration tests.

A suite is a set of collections.

A collection is a directory containing facets.

A facet is a directory containing config snippets.

Running a collection means running teuthology for every configuration
combination generated by taking one config snippet from each facet.

Any config files passed on the command line will be used for every
combination, and will override anything in the suite.
""")
    parser.add_argument(
        '-v', '--verbose',
        action='store_true', default=None,
        help='be more verbose',
        )
    parser.add_argument(
        '--name',
        help='name for this suite',
        required=True,
        )
    parser.add_argument(
        '--collections',
        metavar='DIR',
        nargs='+',
        required=True,
        help='the collections to run',
        )
    parser.add_argument(
        '--owner',
        help='job owner',
        )
    parser.add_argument(
        '--email',
        help='address to email test failures to',
        )
    parser.add_argument(
        '--timeout',
        help='how many seconds to wait for jobs to finish before emailing results',
        )
    parser.add_argument(
        'config',
        metavar='CONFFILE',
        nargs='*',
        default=[],
        help='config file to read',
        )

    args = parser.parse_args()

    loglevel = logging.INFO
    if args.verbose:
        loglevel = logging.DEBUG

    logging.basicConfig(
        level=loglevel,
        )

    base_arg = [
        os.path.join(os.path.dirname(sys.argv[0]), 'teuthology-schedule'),
        '--name', args.name,
        ]
    if args.verbose:
        base_arg.append('-v')
    if args.owner:
        base_arg.extend(['--owner', args.owner])

    for collection in args.collections:
        if not os.path.isdir(collection):
            print >>sys.stderr, 'Collection %s is not a directory' % collection
            sys.exit(1)

    collections = [
        (collection,
         os.path.basename(safepath.munge(collection)))
        for collection in args.collections
        ]

    for collection, collection_name in sorted(collections):
        log.info('Collection %s in %s' % (collection_name, collection))
        facets = [
            f for f in sorted(os.listdir(collection))
            if not f.startswith('.')
            and os.path.isdir(os.path.join(collection, f))
            ]
        facet_configs = (
            [(f, name, os.path.join(collection, f, name))
             for name in sorted(os.listdir(os.path.join(collection, f)))
             if not name.startswith('.')
             and name.endswith('.yaml')
             ]
            for f in facets
            )
        for configs in itertools.product(*facet_configs):
            description = 'collection:%s ' % (collection_name);
            description += ' '.join('{facet}:{name}'.format(
                    facet=facet, name=name)
                                 for facet, name, path in configs)
            log.info(
                'Running teuthology-schedule with facets %s', description
                )
            arg = copy.deepcopy(base_arg)
            arg.extend([
                    '--description', description,
                    '--',
                    ])
            arg.extend(args.config)
            arg.extend(path for facet, name, path in configs)
            subprocess.check_call(
                args=arg,
                )

    arg = copy.deepcopy(base_arg)
    arg.append('--last-in-suite')
    if args.email:
        arg.extend(['--email', args.email])
    if args.timeout:
        arg.extend(['--timeout', args.timeout])
    subprocess.check_call(
        args=arg,
        )
Esempio n. 25
0
 def test_slashslash(self):
     got = safepath.munge('//')
     assert got == '_'
Esempio n. 26
0
def worker():
    parser = argparse.ArgumentParser(description="""
Grab jobs from a beanstalk queue and run the teuthology tests they
describe. One job is run at a time.
""")
    parser.add_argument(
        '-v', '--verbose',
        action='store_true', default=None,
        help='be more verbose',
        )
    parser.add_argument(
        '--archive-dir',
        metavar='DIR',
        help='path under which to archive results',
        required=True,
        )
    parser.add_argument(
        '-l', '--log-dir',
        help='path in which to store logs',
        required=True,
        )
    parser.add_argument(
        '-t', '--tube',
        help='which beanstalk tube to read jobs from',
        required=True,
        )

    ctx = parser.parse_args()

    loglevel = logging.INFO
    if ctx.verbose:
        loglevel = logging.DEBUG

    logging.basicConfig(
        level=loglevel,
        filename=os.path.join(ctx.log_dir, 'worker.{pid}'.format(pid=os.getpid())),
        format='%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
        datefmt='%Y-%m-%dT%H:%M:%S',
        )

    if not os.path.isdir(ctx.archive_dir):
        sys.exit("{prog}: archive directory must exist: {path}".format(
                prog=os.path.basename(sys.argv[0]),
                path=ctx.archive_dir,
                ))

    from teuthology.misc import read_config
    read_config(ctx)

    beanstalk = connect(ctx)
    beanstalk.watch(ctx.tube)
    beanstalk.ignore('default')

    while True:
        job = beanstalk.reserve(timeout=60)
        if job is None:
            continue

        # bury the job so it won't be re-run if it fails
        job.bury()
        log.debug('Reserved job %d', job.jid)
        log.debug('Config is: %s', job.body)
        job_config = yaml.safe_load(job.body)
        safe_archive = safepath.munge(job_config['name'])
        teuthology_branch = job_config.get('config', {}).get('teuthology_branch', 'master')

        teuth_path = os.path.join(os.getenv("HOME"), 'teuthology-' + teuthology_branch, 'virtualenv', 'bin')
        if not os.path.isdir(teuth_path):
            raise Exception('Teuthology branch ' + teuthology_branch + ' not found at ' + teuth_path)
        if job_config.get('last_in_suite'):
            log.debug('Generating coverage for %s', job_config['name'])
            args = [
                os.path.join(teuth_path, 'teuthology-results'),
                '--timeout',
                str(job_config.get('results_timeout', 21600)),
                '--email',
                job_config['email'],
                '--archive-dir',
                os.path.join(ctx.archive_dir, safe_archive),
                '--name',
                job_config['name'],
                ]
            subprocess.Popen(args=args)
        else:
            log.debug('Creating archive dir...')
            safepath.makedirs(ctx.archive_dir, safe_archive)
            archive_path = os.path.join(ctx.archive_dir, safe_archive, str(job.jid))
            log.info('Running job %d', job.jid)
            run_job(job_config, archive_path, teuth_path)
        job.delete()
Esempio n. 27
0
def worker():
    parser = argparse.ArgumentParser(description="""
Grab jobs from a beanstalk queue and run the teuthology tests they
describe. One job is run at a time.
""")
    parser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        default=None,
        help='be more verbose',
    )
    parser.add_argument(
        '--archive-dir',
        metavar='DIR',
        help='path under which to archive results',
        required=True,
    )
    parser.add_argument(
        '-l',
        '--log-dir',
        help='path in which to store logs',
        required=True,
    )

    ctx = parser.parse_args()

    loglevel = logging.INFO
    if ctx.verbose:
        loglevel = logging.DEBUG

    logging.basicConfig(
        level=loglevel,
        filename=os.path.join(ctx.log_dir,
                              'worker.{pid}'.format(pid=os.getpid())),
        format='%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
        datefmt='%Y-%m-%dT%H:%M:%S',
    )

    if not os.path.isdir(ctx.archive_dir):
        sys.exit("{prog}: archive directory must exist: {path}".format(
            prog=os.path.basename(sys.argv[0]),
            path=ctx.archive_dir,
        ))

    from teuthology.misc import read_config
    read_config(ctx)

    beanstalk = connect(ctx)
    beanstalk.watch('teuthology')
    beanstalk.ignore('default')

    while True:
        job = beanstalk.reserve(timeout=60)
        if job is None:
            continue

        # bury the job so it won't be re-run if it fails
        job.bury()
        log.debug('Reserved job %d', job.jid)
        log.debug('Config is: %s', job.body)
        job_config = yaml.safe_load(job.body)
        safe_archive = safepath.munge(job_config['name'])

        if job_config.get('last_in_suite', False):
            log.debug('Generating coverage for %s', job_config['name'])
            args = [
                os.path.join(os.path.dirname(sys.argv[0]),
                             'teuthology-results'),
                '--timeout',
                str(job_config.get('results_timeout', 21600)),
                '--email',
                job_config['email'],
                '--archive-dir',
                os.path.join(ctx.archive_dir, safe_archive),
                '--name',
                job_config['name'],
            ]
            subprocess.Popen(args=args)
        else:
            log.debug('Creating archive dir...')
            safepath.makedirs(ctx.archive_dir, safe_archive)
            archive_path = os.path.join(ctx.archive_dir, safe_archive,
                                        str(job.jid))
            log.info('Running job %d', job.jid)
            run_job(job_config, archive_path)
        job.delete()
Esempio n. 28
0
 def test_simple(self):
     got = safepath.munge('foo')
     assert got == 'foo'
Esempio n. 29
0
def run_job(job_config, teuth_bin_path, archive_dir, verbose):
    safe_archive = safepath.munge(job_config['name'])
    if job_config.get('first_in_suite') or job_config.get('last_in_suite'):
        if teuth_config.results_server:
            try:
                report.try_delete_jobs(job_config['name'],
                                       job_config['job_id'])
            except Exception as e:
                log.warning("Unable to delete job %s, exception occurred: %s",
                            job_config['job_id'], e)
        suite_archive_dir = os.path.join(archive_dir, safe_archive)
        safepath.makedirs('/', suite_archive_dir)
        args = [
            os.path.join(teuth_bin_path, 'teuthology-results'),
            '--archive-dir',
            suite_archive_dir,
            '--name',
            job_config['name'],
        ]
        if job_config.get('first_in_suite'):
            log.info('Generating memo for %s', job_config['name'])
            if job_config.get('seed'):
                args.extend(['--seed', job_config['seed']])
            if job_config.get('subset'):
                args.extend(['--subset', job_config['subset']])
        else:
            log.info('Generating results for %s', job_config['name'])
            timeout = job_config.get('results_timeout',
                                     teuth_config.results_timeout)
            args.extend(['--timeout', str(timeout)])
            if job_config.get('email'):
                args.extend(['--email', job_config['email']])
        # Execute teuthology-results, passing 'preexec_fn=os.setpgrp' to
        # make sure that it will continue to run if this worker process
        # dies (e.g. because of a restart)
        result_proc = subprocess.Popen(args=args, preexec_fn=os.setpgrp)
        log.info("teuthology-results PID: %s", result_proc.pid)
        return

    log.info('Creating archive dir %s', job_config['archive_path'])
    safepath.makedirs('/', job_config['archive_path'])
    log.info('Running job %s', job_config['job_id'])

    suite_path = job_config['suite_path']
    arg = [
        os.path.join(teuth_bin_path, 'teuthology'),
    ]
    # The following is for compatibility with older schedulers, from before we
    # started merging the contents of job_config['config'] into job_config
    # itself.
    if 'config' in job_config:
        inner_config = job_config.pop('config')
        if not isinstance(inner_config, dict):
            log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
                     str(type(inner_config)))
        else:
            job_config.update(inner_config)

    if verbose or job_config['verbose']:
        arg.append('-v')

    arg.extend([
        '--lock',
        '--block',
        '--owner',
        job_config['owner'],
        '--archive',
        job_config['archive_path'],
        '--name',
        job_config['name'],
    ])
    if job_config['description'] is not None:
        arg.extend(['--description', job_config['description']])
    arg.append('--')

    with tempfile.NamedTemporaryFile(prefix='teuthology-worker.',
                                     suffix='.tmp',
                                     mode='w+t') as tmp:
        yaml.safe_dump(data=job_config, stream=tmp)
        tmp.flush()
        arg.append(tmp.name)
        env = os.environ.copy()
        python_path = env.get('PYTHONPATH', '')
        python_path = ':'.join([suite_path, python_path]).strip(':')
        env['PYTHONPATH'] = python_path
        log.debug("Running: %s" % ' '.join(arg))
        p = subprocess.Popen(args=arg, env=env)
        log.info("Job archive: %s", job_config['archive_path'])
        log.info("Job PID: %s", str(p.pid))

        if teuth_config.results_server:
            log.info("Running with watchdog")
            try:
                run_with_watchdog(p, job_config)
            except Exception:
                log.exception("run_with_watchdog had an unhandled exception")
                raise
        else:
            log.info("Running without watchdog")
            # This sleep() is to give the child time to start up and create the
            # archive dir.
            time.sleep(5)
            symlink_worker_log(job_config['worker_log'],
                               job_config['archive_path'])
            p.wait()

        if p.returncode != 0:
            log.error('Child exited with code %d', p.returncode)
        else:
            log.info('Success!')
Esempio n. 30
0
def main():
    parser = argparse.ArgumentParser(description="""
Run a suite of ceph integration tests.

A suite is a set of collections.

A collection is a directory containing facets.

A facet is a directory containing config snippets.

Running a collection means running teuthology for every configuration
combination generated by taking one config snippet from each facet.

Any config files passed on the command line will be used for every
combination, and will override anything in the suite.
""")
    parser.add_argument(
        '-v', '--verbose',
        action='store_true', default=None,
        help='be more verbose',
        )
    parser.add_argument(
        '--name',
        help='name for this suite',
        required=True,
        )
    parser.add_argument(
        '--collections',
        metavar='DIR',
        nargs='+',
        required=True,
        help='the collections to run',
        )
    parser.add_argument(
        '--owner',
        help='job owner',
        )
    parser.add_argument(
        '--email',
        help='address to email test failures to',
        )
    parser.add_argument(
        '--timeout',
        help='how many seconds to wait for jobs to finish before emailing results',
        )
    parser.add_argument(
        'config',
        metavar='CONFFILE',
        nargs='*',
        default=[],
        help='config file to read',
        )

    args = parser.parse_args()

    loglevel = logging.INFO
    if args.verbose:
        loglevel = logging.DEBUG

    logging.basicConfig(
        level=loglevel,
        )

    base_arg = [
        os.path.join(os.path.dirname(sys.argv[0]), 'teuthology-schedule'),
        '--name', args.name,
        ]
    if args.verbose:
        base_arg.append('-v')
    if args.owner:
        base_arg.extend(['--owner', args.owner])

    for collection in args.collections:
        if not os.path.isdir(collection):
            print >>sys.stderr, 'Collection %s is not a directory' % collection
            sys.exit(1)

    collections = [
        (collection,
         os.path.basename(safepath.munge(collection)))
        for collection in args.collections
        ]

    for collection, collection_name in sorted(collections):
        log.info('Collection %s in %s' % (collection_name, collection))
        facets = [
            f for f in sorted(os.listdir(collection))
            if not f.startswith('.')
            and os.path.isdir(os.path.join(collection, f))
            ]
        facet_configs = (
            [(f, name, os.path.join(collection, f, name))
             for name in sorted(os.listdir(os.path.join(collection, f)))
             if not name.startswith('.')
             and name.endswith('.yaml')
             ]
            for f in facets
            )
        for configs in itertools.product(*facet_configs):
            description = 'collection:%s ' % (collection_name);
            description += ' '.join('{facet}:{name}'.format(
                    facet=facet, name=name)
                                 for facet, name, path in configs)
            log.info(
                'Running teuthology-schedule with facets %s', description
                )
            arg = copy.deepcopy(base_arg)
            arg.extend([
                    '--description', description,
                    '--',
                    ])
            arg.extend(args.config)
            arg.extend(path for facet, name, path in configs)
            subprocess.check_call(
                args=arg,
                )

    arg = copy.deepcopy(base_arg)
    arg.append('--last-in-suite')
    if args.email:
        arg.extend(['--email', args.email])
    if args.timeout:
        arg.extend(['--timeout', args.timeout])
    subprocess.check_call(
        args=arg,
        )