def results(archive_dir, name, email, timeout, dry_run): starttime = time.time() if timeout: log.info('Waiting up to %d seconds for tests to finish...', timeout) reporter = ResultsReporter() while timeout > 0: if time.time() - starttime > timeout: log.warning('test(s) did not finish before timeout of %d seconds', timeout) break jobs = reporter.get_jobs(name, fields=['job_id', 'status']) unfinished_jobs = [job for job in jobs if job['status'] in UNFINISHED_STATUSES] if not unfinished_jobs: log.info('Tests finished! gathering results...') break time.sleep(60) (subject, body) = build_email_body(name) Scraper(archive_dir).analyze() if email and dry_run: print("From: %s" % (config.results_sending_email or 'teuthology')) print("To: %s" % email) print("Subject: %s" % subject) print(body) elif email: email_results( subject=subject, from_=(config.results_sending_email or 'teuthology'), to=email, body=body, )
def get_rerun_filters(name, statuses): reporter = ResultsReporter() run = reporter.get_run(name) filters = dict() filters['suite'] = run['suite'] jobs = [] for job in run['jobs']: if job['status'] in statuses: jobs.append(job) filters['descriptions'] = [job['description'] for job in jobs if job['description']] return filters
def build_email_body(name, _reporter=None): stanzas = OrderedDict([ ('fail', dict()), ('dead', dict()), ('running', dict()), ('waiting', dict()), ('queued', dict()), ('pass', dict()), ]) reporter = _reporter or ResultsReporter() fields = ('job_id', 'status', 'description', 'duration', 'failure_reason', 'sentry_event', 'log_href') jobs = reporter.get_jobs(name, fields=fields) jobs.sort(key=lambda job: job['job_id']) for job in jobs: job_stanza = format_job(name, job) stanzas[job['status']][job['job_id']] = job_stanza sections = OrderedDict.fromkeys(stanzas.keys(), '') subject_fragments = [] for status in sections.keys(): stanza = stanzas[status] if stanza: subject_fragments.append('%s %s' % (len(stanza), status)) sections[status] = email_templates['sect_templ'].format( title=status.title(), jobs=''.join(stanza.values()), ) subject = ', '.join(subject_fragments) + ' ' if config.archive_server: log_root = os.path.join(config.archive_server, name, '') else: log_root = None body = email_templates['body_templ'].format( name=name, info_root=misc.get_results_url(name), log_root=log_root, fail_count=len(stanzas['fail']), dead_count=len(stanzas['dead']), running_count=len(stanzas['running']), waiting_count=len(stanzas['waiting']), queued_count=len(stanzas['queued']), pass_count=len(stanzas['pass']), fail_sect=sections['fail'], dead_sect=sections['dead'], running_sect=sections['running'], waiting_sect=sections['waiting'], queued_sect=sections['queued'], pass_sect=sections['pass'], ) subject += 'in {suite}'.format(suite=name) return (subject.strip(), body.strip())
def wait(name, max_job_time, upload_url): stale_job = max_job_time + Run.WAIT_MAX_JOB_TIME reporter = ResultsReporter() past_unfinished_jobs = [] progress = time.time() log.info("waiting for the suite to complete") log.debug("the list of unfinished jobs will be displayed " "every " + str(Run.WAIT_PAUSE / 60) + " minutes") exit_code = 0 while True: jobs = reporter.get_jobs(name, fields=['job_id', 'status']) unfinished_jobs = [] for job in jobs: if job['status'] in UNFINISHED_STATUSES: unfinished_jobs.append(job) elif job['status'] != 'pass': exit_code = 1 if len(unfinished_jobs) == 0: log.info("wait is done") break if (len(past_unfinished_jobs) == len(unfinished_jobs) and time.time() - progress > stale_job): raise WaitException("no progress since " + str(config.max_job_time) + " + " + str(Run.WAIT_PAUSE) + " seconds") if len(past_unfinished_jobs) != len(unfinished_jobs): past_unfinished_jobs = unfinished_jobs progress = time.time() time.sleep(Run.WAIT_PAUSE) job_ids = [job['job_id'] for job in unfinished_jobs] log.debug('wait for jobs ' + str(job_ids)) jobs = reporter.get_jobs( name, fields=['job_id', 'status', 'description', 'log_href']) # dead, fail, pass : show fail/dead jobs first jobs = sorted(jobs, key=lambda x: x['status']) for job in jobs: if upload_url: url = os.path.join(upload_url, name, job['job_id']) else: url = job['log_href'] log.info(job['status'] + " " + url + " " + job['description']) return exit_code
def get_rerun_conf(conf): reporter = ResultsReporter() try: subset, seed = reporter.get_rerun_conf(conf.rerun) except IOError: return conf.subset, conf.seed if seed is None: return conf.subset, conf.seed if conf.seed < 0: log.info('Using stored seed=%s', seed) elif conf.seed != seed: log.error('--seed {conf_seed} does not match with ' + 'stored seed: {stored_seed}', conf_seed=conf.seed, stored_seed=seed) if conf.subset is None: log.info('Using stored subset=%s', subset) elif conf.subset != subset: log.error('--subset {conf_subset} does not match with ' + 'stored subset: {stored_subset}', conf_subset=conf.subset, stored_subset=subset) return subset, seed