コード例 #1
0
 def process_job(self, job_id):
     job_config = self.jobs[job_id]['job_config']
     job_name = job_config['name']
     print('Deleting {job_name}/{job_id}'.format(
         job_id=job_id,
         job_name=job_name,
     ))
     job_obj = self.jobs[job_id].get('job_obj')
     if job_obj:
         job_obj.delete()
     report.try_delete_jobs(job_name, job_id)
コード例 #2
0
ファイル: kill.py プロジェクト: tuan-hoang1/teuthology
def remove_paddles_jobs(run_name):
    jobs = report.ResultsReporter().get_jobs(run_name, fields=['status'])
    job_ids = [job['job_id'] for job in jobs if job['status'] == 'queued']
    if job_ids:
        log.info("Deleting jobs from paddles: %s", str(job_ids))
        report.try_delete_jobs(run_name, job_ids)
コード例 #3
0
ファイル: worker.py プロジェクト: varshar16/teuthology
def run_job(job_config, teuth_bin_path, archive_dir, verbose):
    safe_archive = safepath.munge(job_config['name'])
    if job_config.get('first_in_suite') or job_config.get('last_in_suite'):
        if teuth_config.results_server:
            try:
                report.try_delete_jobs(job_config['name'],
                                       job_config['job_id'])
            except Exception as e:
                log.warning("Unable to delete job %s, exception occurred: %s",
                            job_config['job_id'], e)
        suite_archive_dir = os.path.join(archive_dir, safe_archive)
        safepath.makedirs('/', suite_archive_dir)
        args = [
            os.path.join(teuth_bin_path, 'teuthology-results'),
            '--archive-dir',
            suite_archive_dir,
            '--name',
            job_config['name'],
        ]
        if job_config.get('first_in_suite'):
            log.info('Generating memo for %s', job_config['name'])
            if job_config.get('seed'):
                args.extend(['--seed', job_config['seed']])
            if job_config.get('subset'):
                args.extend(['--subset', job_config['subset']])
        else:
            log.info('Generating results for %s', job_config['name'])
            timeout = job_config.get('results_timeout',
                                     teuth_config.results_timeout)
            args.extend(['--timeout', str(timeout)])
            if job_config.get('email'):
                args.extend(['--email', job_config['email']])
        # Execute teuthology-results, passing 'preexec_fn=os.setpgrp' to
        # make sure that it will continue to run if this worker process
        # dies (e.g. because of a restart)
        result_proc = subprocess.Popen(args=args, preexec_fn=os.setpgrp)
        log.info("teuthology-results PID: %s", result_proc.pid)
        return

    log.info('Creating archive dir %s', job_config['archive_path'])
    safepath.makedirs('/', job_config['archive_path'])
    log.info('Running job %s', job_config['job_id'])

    suite_path = job_config['suite_path']
    arg = [
        os.path.join(teuth_bin_path, 'teuthology'),
    ]
    # The following is for compatibility with older schedulers, from before we
    # started merging the contents of job_config['config'] into job_config
    # itself.
    if 'config' in job_config:
        inner_config = job_config.pop('config')
        if not isinstance(inner_config, dict):
            log.warn("run_job: job_config['config'] isn't a dict, it's a %s",
                     str(type(inner_config)))
        else:
            job_config.update(inner_config)

    if verbose or job_config['verbose']:
        arg.append('-v')

    arg.extend([
        '--lock',
        '--block',
        '--owner',
        job_config['owner'],
        '--archive',
        job_config['archive_path'],
        '--name',
        job_config['name'],
    ])
    if job_config['description'] is not None:
        arg.extend(['--description', job_config['description']])
    arg.append('--')

    with tempfile.NamedTemporaryFile(prefix='teuthology-worker.',
                                     suffix='.tmp',
                                     mode='w+t') as tmp:
        yaml.safe_dump(data=job_config, stream=tmp)
        tmp.flush()
        arg.append(tmp.name)
        env = os.environ.copy()
        python_path = env.get('PYTHONPATH', '')
        python_path = ':'.join([suite_path, python_path]).strip(':')
        env['PYTHONPATH'] = python_path
        log.debug("Running: %s" % ' '.join(arg))
        p = subprocess.Popen(args=arg, env=env)
        log.info("Job archive: %s", job_config['archive_path'])
        log.info("Job PID: %s", str(p.pid))

        if teuth_config.results_server:
            log.info("Running with watchdog")
            try:
                run_with_watchdog(p, job_config)
            except Exception:
                log.exception("run_with_watchdog had an unhandled exception")
                raise
        else:
            log.info("Running without watchdog")
            # This sleep() is to give the child time to start up and create the
            # archive dir.
            time.sleep(5)
            symlink_worker_log(job_config['worker_log'],
                               job_config['archive_path'])
            p.wait()

        if p.returncode != 0:
            log.error('Child exited with code %d', p.returncode)
        else:
            log.info('Success!')
コード例 #4
0
def main(ctx):
    if ctx.owner is None:
        ctx.owner = 'scheduled_{user}'.format(user=get_user())
    read_config(ctx)

    beanstalk = teuthology.beanstalk.connect()

    tube = ctx.worker
    beanstalk.use(tube)

    if ctx.show:
        for job_id in ctx.show:
            job = beanstalk.peek(job_id)
            if job is None and ctx.verbose:
                print 'job {jid} is not in the queue'.format(jid=job_id)
            else:
                print '--- job {jid} priority {prio} ---\n'.format(
                    jid=job_id,
                    prio=job.stats()['pri']), job.body
        return

    if ctx.delete:
        for job_id in ctx.delete:
            job = beanstalk.peek(job_id)
            if job is None:
                print 'job {jid} is not in the queue'.format(jid=job_id)
            else:
                job.delete()
                name = yaml.safe_load(job.body).get('name')
                if name:
                    report.try_delete_jobs(name, job_id)
        return

    # strip out targets; the worker will allocate new ones when we run
    # the job with --lock.
    if ctx.config.get('targets'):
        del ctx.config['targets']

    job_config = dict(
        name=ctx.name,
        last_in_suite=ctx.last_in_suite,
        email=ctx.email,
        description=ctx.description,
        owner=ctx.owner,
        verbose=ctx.verbose,
        machine_type=ctx.worker,
    )
    # Merge job_config and ctx.config
    job_config.update(ctx.config)
    if ctx.timeout is not None:
        job_config['results_timeout'] = ctx.timeout

    job = yaml.safe_dump(job_config)
    num = ctx.num
    while num > 0:
        jid = beanstalk.put(
            job,
            ttr=60 * 60 * 24,
            priority=ctx.priority,
        )
        print 'Job scheduled with name {name} and ID {jid}'.format(
            name=ctx.name, jid=jid)
        job_config['job_id'] = str(jid)
        report.try_push_job_info(job_config, dict(status='queued'))
        num -= 1
コード例 #5
0
ファイル: schedule.py プロジェクト: alsall/teuthology
def main(ctx):
    if ctx.owner is None:
        ctx.owner = 'scheduled_{user}'.format(user=get_user())
    read_config(ctx)

    beanstalk = teuthology.beanstalk.connect()

    tube = ctx.worker
    beanstalk.use(tube)

    if ctx.show:
        for job_id in ctx.show:
            job = beanstalk.peek(job_id)
            if job is None and ctx.verbose:
                print 'job {jid} is not in the queue'.format(jid=job_id)
            else:
                print '--- job {jid} priority {prio} ---\n'.format(
                    jid=job_id, prio=job.stats()['pri']), job.body
        return

    if ctx.delete:
        for job_id in ctx.delete:
            job = beanstalk.peek(job_id)
            if job is None:
                print 'job {jid} is not in the queue'.format(jid=job_id)
            else:
                job.delete()
                name = yaml.safe_load(job.body).get('name')
                if name:
                    report.try_delete_jobs(name, job_id)
        return

    # strip out targets; the worker will allocate new ones when we run
    # the job with --lock.
    if ctx.config.get('targets'):
        del ctx.config['targets']

    job_config = dict(
        name=ctx.name,
        last_in_suite=ctx.last_in_suite,
        email=ctx.email,
        description=ctx.description,
        owner=ctx.owner,
        verbose=ctx.verbose,
        machine_type=ctx.worker,
    )
    # Merge job_config and ctx.config
    job_config.update(ctx.config)
    if ctx.timeout is not None:
        job_config['results_timeout'] = ctx.timeout

    job = yaml.safe_dump(job_config)
    num = ctx.num
    while num > 0:
        jid = beanstalk.put(
            job,
            ttr=60 * 60 * 24,
            priority=ctx.priority,
        )
        print 'Job scheduled with name {name} and ID {jid}'.format(
            name=ctx.name, jid=jid)
        job_config['job_id'] = str(jid)
        report.try_push_job_info(job_config, dict(status='queued'))
        num -= 1