Пример #1
0
            logfile = testlog
    if sumfile is None and logfile is not None:
        # TODOXXX: warn that results may not be accurate
        sumfile, logfile = logfile, None

    testrun = Testrun()
    outcomes = []
    if sumfile is not None:
        parse_dejagnu_log(testrun,
                          sumfile,
                          outcomes=outcomes,
                          validate=(logfile is None))
    if logfile is not None:
        annotate_dejagnu_log(testrun,
                             logfile,
                             outcomes=outcomes,
                             validate=True)
    # TODOXXX: warn if testrun is empty / did not validate (may still want to add to repo)
    return testrun


b = Bunsen()
if __name__ == '__main__':
    opts = b.cmdline_args(sys.argv,
                          info=info,
                          args=cmdline_args,
                          optional_args=['sumfile', 'logfile'])
    testlogs = collect_testlogs(opts.logfile, opts.sumfile, opts.logdir)
    testrun = parse_testlogs(testlogs)
    print(testrun.to_json(pretty=True))
Пример #2
0
                testcases[ix]['origin_log'] = last_test_cur
            elif i is not None and i < len(
                    testcases
            ) and 'subtest' in testcases[i] and next_outcome in line:
                testcases[i]['origin_log'] = last_test_cur
                i += 1  # XXX advance testcases, assuming they are in order
                if i < len(testcases):
                    next_outcome = get_outcome_line(testcases[i])
            j += 1  # XXX advance outcome_lines
            last_test_cur = Cursor(start=cur)
            last_test_cur.line_start += 1

    return testrun


b = Bunsen()
if __name__ == '__main__':
    # TODO: enable the following default command line arguments
    #wd_defaults = ['systemtap.log', 'systemtap.sum']
    opts = b.cmdline_args(sys.argv,
                          info=info,
                          args=cmdline_args,
                          required_args=['logfile', 'sumfile'])
    # TODO: use Bunsen library to load testlogs
    # TODO: support reading testlogs from script's cwd or Bunsen repo
    #logfile = b.logfile(opts.logfile)
    #sumfile = b.logfile(opts.sumfile)
    testrun = Testrun()
    all_cases = []
    testrun = parse_dejagnu_log(testrun,
                                opts.sumfile,
Пример #3
0
        name_plus_subtest = None
        if 'subtest' in tc2:
            name_plus_subtest = name + '+' + subtest_name(tc2['subtest'])
        if name_plus_subtest is not None and \
           name_plus_subtest in tc1_subtest_map:
            diff_2or_testcases(diff2, tc1_subtest_map[name_plus_subtest], tc2)
        elif name in tc1_name_map:  # XXX no subtest
            diff_2or_testcases(diff2, tc1_name_map[name], tc2)
        elif name in tc1_map:  # XXX subtest in tc1, no subtest in tc2
            diff_2or_testcases(diff2, tc1_map[name], tc2)
        else:  # XXX tc2 has no equivalent in baseline, use None as baseline outcome
            outcome2 = outcome_2or(tc2)
            if outcome2 != 'null->null':
                tc2 = dict(tc2)
                tc2['outcome'] = outcome_2or(tc2)
                tc2['baseline_outcome'] = None
                add_2or_origins(tc2, 'origins', tc2)
                diff2.testcases.append(tc2)

    return diff2


b = Bunsen()
if __name__ == '__main__':
    # TODO: Handle tag:commit format for baseline_id, testrun_id in b.testrun().
    baseline_id, testrun_id = b.cmdline_args(sys.argv, 2, usage=usage)
    baseline = b.testrun(baseline_id)
    testrun = b.testrun(testrun_id)
    testdiff = diff_testruns(baseline, testrun)
    print(testdiff.to_json(pretty=True))
Пример #4
0
            diff_2or_testcases(diff2, tc1_name_map[name], tc2)
        elif name in tc1_map:  # XXX subtest in tc1, no subtest in tc2
            diff_2or_testcases(diff2, tc1_map[name], tc2)
        else:  # XXX tc2 has no equivalent in baseline, use None as baseline outcome
            outcome2 = outcome_2or(tc2)
            if outcome2 != 'null->null':
                tc2 = dict(tc2)
                tc2['outcome'] = outcome_2or(tc2)
                tc2['baseline_outcome'] = None
                add_2or_origins(tc2, 'origins', tc2)
                diff2.testcases.append(tc2)

    return diff2


b = Bunsen()
if __name__ == '__main__':
    opts = b.cmdline_args(sys.argv,
                          info=info,
                          args=cmdline_args,
                          required_args=['baseline', 'latest'])
    out = get_formatter(b, opts)
    baseline = b.testrun(opts.baseline)
    testrun = b.testrun(opts.latest)

    testdiff = diff_testruns(baseline, testrun)
    if opts.pretty == False:
        print(testdiff.to_json(pretty=True))
    else:
        out.message(baseline=opts.baseline, latest=opts.latest)
        out.show_testrun(baseline, header_fields=['kind'], kind='baseline')
Пример #5
0
    'diff_commits', 'diff_runs', 'show_logs'
}


def validate_cmd(script_name, args):
    global fail_reason
    if script_name not in valid_cmds:
        fail_reason = 'script {} not permitted, try one of {}'.format(
            script_name, valid_cmds)
        return False  # TODO: reason
    # TODO: also validate args
    return True


# TODOXXX Use Bunsen.from_cgi_query()
b = Bunsen(
    base_dir="../.bunsen")  # TODO: Need to correctly find bunsen.base_dir
form = cgi.FieldStorage()
script_name, args = form['cmd'].value if 'cmd' in form else 'list_commits', {}
for field in form.keys():
    if field == 'cmd': continue
    args[field] = form[field].value
if 'pretty' not in args:
    # XXX override defaults
    args['pretty'] = 'html'
if validate_cmd(script_name, args):
    # TODO: integrate with BunsenOptions and b.run_command:
    script_path = b.find_script(script_name)
    cmdline_args = []
    for k, v in args.items():
        cmdline_args.append('{}={}'.format(k, v))
    # TODOXXX fix out-of-order WARNING
Пример #6
0
            wd.destroy()
            #wd_index.destroy()
            #wd_testruns.destroy()
            wd = b.checkout_wd()
            #wd_index = b.checkout_wd(postfix="index")
            #wd_testruns = b.checkout_wd(postfix="testruns")

    if profiler is not None:
        profiler.disable()

    # TODO: Add an option to test parser performance across a log
    # collection by skipping the commit+push steps.

    wd.push_all()
    #wd_index.push_all()
    #wd_testruns.push_all() # XXX this failed requring manual fixup
    #wd.destroy() # TODO: enable, control with a command line option

    shutil.rmtree(tmpdir)

    progress.close()
    print("Added {} new testruns from {} directories of {} total" \
          .format(new_runs, new_dirs, total_dirs))

b = Bunsen()
if __name__=='__main__':
    opts = b.cmdline_args(sys.argv, info=info, args=cmdline_args,
                          required_args=['raw_logs'])
    opts.timeslice = opts.get_list('timeslice')
    commit_repo_logs(b, opts.raw_logs, opts=opts)
Пример #7
0
                ix = testcase_outcomes[outcome_lines[j]]
                testcases[ix]['origin_log'] = last_test_cur
            elif i is not None and i < len(
                    testcases
            ) and 'subtest' in testcases[i] and next_outcome in line:
                testcases[i]['origin_log'] = last_test_cur
                i += 1  # XXX advance testcases, assuming they are in order
                if i < len(testcases):
                    next_outcome = get_outcome_line(testcases[i])
            j += 1  # XXX advance outcome_lines
            last_test_cur = Cursor(start=cur)
            last_test_cur.line_start += 1

    return testrun


b = Bunsen()
if __name__ == '__main__':
    # TODO: enable the following default command line arguments
    #wd_defaults = ['systemtap.log', 'systemtap.sum']
    logfile, sumfile = b.cmdline_args(sys.argv, 2, usage=usage)
    # TODO: use Bunsen library to load testlogs
    # TODO: support reading testlogs from script's cwd or Bunsen repo
    #logfile = b.logfile(logfile)
    #sumfile = b.logfile(sumfile)
    testrun = Testrun()
    all_cases = []
    testrun = parse_dejagnu_log(testrun, sumfile, all_cases=all_cases)
    testrun = annotate_dejagnu_log(testrun, logfile, all_cases)
    print(testrun.to_json(pretty=True))
Пример #8
0
                    #wd_testruns.destroy()
                    wd = b.checkout_wd()
                    #wd_index = b.checkout_wd(postfix="index")
                    #wd_testruns = b.checkout_wd(postfix="testruns")

    if profiler is not None:
        profiler.disable()

    # TODO: Add an option to test parser performance across a log
    # collection by skipping the commit+push steps.

    wd.push_all()
    #wd_index.push_all()
    #wd_testruns.push_all() # XXX this failed requring manual fixup
    #wd.destroy() # TODO: enable, control with a command line option

    progress.close()
    print("Added {} new testruns from {} directories of {} total" \
          .format(new_runs, new_dirs, total_dirs))


b = Bunsen()
if __name__ == '__main__':
    log_src = b.cmdline_args(sys.argv, 1, usage=usage)
    if ':' in log_src:
        host, _sep, log_src = log_src.partition(':')
        print("Log repo downloading is currently not supported!")
        exit(1)

    commit_logs(b, log_src)
Пример #9
0
        if tarinfo.isreg():
            kind = "file"
        elif tarinfo.isdir():
            kind = "directory"
        print("* {} ({} bytes, {})".format(tarinfo.name, tarinfo.size, kind),
              file=outfile)
    outfile.flush()


def to_module_name(commit_module):
    # TODO: Munge commit_module name? e.g. +gdb/commit-logs -> gdb.commit_logs
    # Strip starting '+', replace '.' -> '/', '-' -> '_'.
    return commit_module


b = Bunsen(script_name='bunsen-upload')
if __name__ == '__main__':
    cgitb.enable()  # TODO: configure logging

    # XXX Read args from config only.
    opts = b.cmdline_args([],
                          info=info,
                          args=config_opts,
                          required_args=['manifest', 'commit_module'],
                          use_config=True)
    opts.add_config(
        "bunsen-upload")  # TODO: Also handle [bunsen-upload "<tag>"]
    opts.service_id = 'bunsen-upload'

    # TODOXXX Also allow standard options for _commit_logs.commit_logs()!
    opts.manifest = opts.get_list('manifest')
Пример #10
0
        if tarinfo.isreg():
            kind = "file"
        elif tarinfo.isdir():
            kind = "directory"
        print("* {} ({} bytes, {})".format(tarinfo.name, tarinfo.size, kind),
              file=outfile)
    outfile.flush()


def to_module_name(commit_module):
    # TODO: Munge commit_module name? e.g. +gdb/commit-logs -> gdb.commit_logs
    # Strip starting '+', replace '.' -> '/', '-' -> '_'.
    return commit_module


b = Bunsen(script_name='bunsen-add')
if __name__ == '__main__':
    opts = b.cmdline_args(
        sys.argv,
        info=info,
        args=config_opts,
        required_args=['tar', 'manifest', 'commit_module'],
        check_required=False,  # XXX required args can come from allow_unknown
        allow_unknown=True)  # XXX allow_unknown for allowed_fields
    opts.add_config(
        "bunsen-upload")  # TODO: Also handle [bunsen-upload "<tag>"]
    opts.check_required()
    if opts.should_print_help:
        opts.print_help()
        exit()
    opts.service_id = 'bunsen-upload'
Пример #11
0
        return counts

    def commit_dist(self, baseline, latest):
        """Returns distance in number of commits between baseline and latest."""
        return self.commit_indices[latest] - self.commit_indices[baseline]

    def grid_dist(self, gk_baseline, gk_latest):
        """Returns distance in number of commits between grid cells gk_baseline and gk_latest."""
        baseline = self.commits_grid[gk_baseline].hexsha
        latest = self.commits_grid[gk_latest].hexsha
        return self.commit_dist(baseline, latest)


if __name__ == '__main__':
    b, opts = Bunsen.from_cmdline(info=info,
                                  required_args=['baseline', 'latest'],
                                  optional_args=['source_repo'])

    opts.pretty = 'html'  # XXX for now, always output HTML
    out = get_formatter(b, opts)

    projects = opts.get_list('project', default=b.projects)
    repo = git.Repo(opts.source_repo)

    # (1a) Use Timecube class to collect test results for commits in the specified range
    cube = Timecube(b, opts, repo)

    # (1b) Find summary fields present in all testruns
    header_fields, summary_fields = index_summary_fields(cube.all_testruns)
    # XXX summary_fields will also include source_commit, version
    # which are not used in get_summary. header_fields excludes these.