コード例 #1
0
def test_args():
    parser = argparse.ArgumentParser(description='NPF Tester')
    npf.add_verbosity_options(parser)
    npf.add_building_options(parser)
    npf.add_graph_options(parser)
    npf.add_testing_options(parser)
    args = parser.parse_args(args="")
    args.tags = {}
    npf.set_args(args)
    return args
コード例 #2
0
def main():
    parser = argparse.ArgumentParser(
        description='NPF cross-repository comparator')

    npf.add_verbosity_options(parser)

    parser.add_argument('repos',
                        metavar='repo',
                        type=str,
                        nargs='+',
                        help='names of the repositories to watch')

    parser.add_argument('--graph-title',
                        type=str,
                        nargs='?',
                        help='Graph title')

    b = npf.add_building_options(parser)
    t = npf.add_testing_options(parser)
    g = npf.add_graph_options(parser)
    args = parser.parse_args()

    npf.parse_nodes(args)

    # Parsing repo list and getting last_build
    repo_list = []
    for repo_name in args.repos:
        repo = Repository.get_instance(repo_name, args)
        repo.last_build = None
        repo_list.append(repo)

    comparator = Comparator(repo_list)

    filename = npf.build_output_filename(args, repo_list)

    savedir = Path(os.path.dirname(filename))
    if not savedir.exists():
        os.makedirs(savedir.as_posix())

    if not os.path.isabs(filename):
        filename = os.getcwd() + os.sep + filename

    series, time_series = comparator.run(
        testie_name=args.test_files,
        tags=args.tags,
        options=args,
        on_finish=lambda series, time_series: do_graph(
            filename, args, series, time_series, options=args)
        if args.iterative else None)

    do_graph(filename, args, series, time_series, options=args)
コード例 #3
0
ファイル: npf-run.py プロジェクト: palanik1/npf
def main():
    parser = argparse.ArgumentParser(description='NPF Testie runner')
    v = npf.add_verbosity_options(parser)

    b = npf.add_building_options(parser)
    b.add_argument('--allow-old-build',
                   help='Re-build and run test for old versions (compare-version and graph-version) without results. '
                        'By default, only building for the regression versions (see --history or --version) is done',
                   dest='allow_oldbuild', action='store_true', default=False)
    b.add_argument('--force-old-build',
                   help='Force to rebuild the old versions. Ignored if allow-old-build is not set', dest='force_oldbuild',
                   action='store_true', default=False)

    t = npf.add_testing_options(parser, True)

    g = parser.add_argument_group('Versioning options')
    g.add_argument('--regress',
                    help='Do a regression comparison against old version of the software', dest='compare', action='store_true',
                    default=False)
    gf = g.add_mutually_exclusive_group()
    gf.add_argument('--history',
                    help='Number of commits in the history on which to execute the regression tests. By default, '
                         'this is 1 meaning that the regression test is done on HEAD, and will be compared '
                         'against HEAD~1. This parameter allows to '
                         'start at commits HEAD~N as if it was HEAD, doing the regression test for each'
                         'commits up to now. Difference with --allow-old-build is that the regression test '
                         'will be done for each commit instead of just graphing the results, so error message and'
                         'return code will concern any regression between HEAD and HEAD~N. '
                         'Ignored if --version is given.',
                    dest='history', metavar='N',
                    nargs='?', type=int, default=1)
    g.add_argument('--branch', help='Branch', type=str, nargs='?', default=None)
    g.add_argument('--compare-version', dest='compare_version', metavar='version', type=str, nargs='?',
                   help='A version to compare against the last version. Default is the first parent of the last version containing some results.')


    a = npf.add_graph_options(parser)
    af = a.add_mutually_exclusive_group()
    af.add_argument('--graph-version', metavar='version', type=str, nargs='*',
                    help='versions to simply graph')
    af.add_argument('--graph-num', metavar='N', type=int, nargs='?', default=-1,
                    help='Number of olds versions to graph after --compare-version, unused if --graph-version is given. Default is 0 or 8 if --regress is given.')
    # a.add_argument('--graph-allvariables', help='Graph only the latest variables (usefull when you restrict variables '
    #                                             'with tags)', dest='graph_newonly', action='store_true', default=False)
    # a.add_argument('--graph-serie', dest='graph_serie', metavar='variable', type=str, nargs=1, default=[None],
    #                 help='Set which variable will be used as serie when creating graph');

    parser.add_argument('repo', metavar='repo name', type=str, nargs='?', help='name of the repo/group of builds', default=None)

    args = parser.parse_args()


    npf.parse_nodes(args)


    if args.force_oldbuild and not args.allow_oldbuild:
        print("--force-old-build needs --allow-old-build")
        parser.print_help()
        return 1

    if args.repo:
        repo = Repository.get_instance(args.repo, args)
    else:
        if os.path.exists(args.testie) and os.path.isfile(args.testie):
            tmptestie = Testie(args.testie,options=args)
            if "default_repo" in tmptestie.config and tmptestie.config["default_repo"] is not None:
                repo = Repository.get_instance(tmptestie.config["default_repo"], args)
            else:
                print("This testie has no default repository")
                sys.exit(1)
        else:
            print("Please specify a repository to use to the command line or only a single testie with a default_repo")
            sys.exit(1)

    if args.graph_num == -1:
        args.graph_num = 8 if args.compare else 0


    tags = args.tags
    tags += repo.tags

    #Overwrite config if a build folder is given
    if args.use_local:
        repo.url = None
        repo._build_path = args.use_local + '/'
        versions = ['local']
    elif repo.url:
        versions = repo.method.get_last_versions(limit=args.history,branch=args.branch)
    else:
        versions = ['local']

    # Builds of the regression versions
    builds = []

    for version in versions:
        builds.append(Build(repo, version))

    last_rebuilds = []

    last_build = None
    if args.compare:
        if args.compare_version and len(args.compare_version):
            compare_version = args.compare_version
            last_build = Build(repo, compare_version)
        else:
            old_versions = repo.method.get_history(versions[-1],100)
            for i, version in enumerate(old_versions):
                last_build = Build(repo, version)
                if last_build.hasResults():
                    break
                elif args.allow_oldbuild:
                    last_rebuilds.append(last_build)
                    break
                if i > 100:
                    last_build = None
                    break
            if last_build:
                print("Comparaison version is %s" % last_build.version)

    graph_builds = []
    if args.graph_version and len(args.graph_version) > 0:
        for g in args.graph_version:
            graph_builds.append(Build(repo, g))
    else:
        if args.graph_num > 1 and repo.url:
            old_versions = repo.method.get_history(last_build.version if last_build else builds[-1].version, 100)
            for i, version in enumerate(old_versions):
                g_build = Build(repo, version)
                if g_build in builds or g_build == last_build:
                    continue
                i += 1
                if g_build.hasResults() and not args.force_oldbuild:
                    graph_builds.append(g_build)
                elif args.allow_oldbuild:
                    last_rebuilds.append(g_build)
                    graph_builds.append(g_build)
                if len(graph_builds) > args.graph_num:
                    break

    testies = Testie.expand_folder(testie_path=args.testie, options=args, tags=tags)
    if not testies:
        sys.exit(errno.ENOENT)

    npf.override(args, testies)

    for b in last_rebuilds:
        print("Last version %s had no result. Re-executing tests for it." % b.version)
        did_something = False
        for testie in testies:
            prev_results = b.load_results(testie)
            print("Executing testie %s" % testie.filename)
            try:
                all_results, time_results, init_done = testie.execute_all(b,options=args, prev_results=prev_results)

                if all_results is None and time_results is None:
                    continue
            except ScriptInitException:
                continue
            else:
                did_something = True
            b.writeversion(testie, all_results, allow_overwrite=True)
        if did_something:
            b.writeResults()

    returncode = 0

    for build in reversed(builds):
        if len(builds) > 1 or repo.version:
            print("Starting tests for version %s" % build.version)

        nok = 0
        ntests = 0

        for testie in testies:
            print("Executing testie %s" % testie.filename)

            regression = Regression(testie)

            print(testie.get_title())

            old_all_results = None
            if last_build:
                try:
                    old_all_results = last_build.load_results(testie)
                except FileNotFoundError:
                    print("Previous build %s could not be found, we will not compare !" % last_build.version)
                    last_build = None

            try:
                prev_results = build.load_results(testie)
                prev_kind_results = build.load_results(testie, kind=True)
            except FileNotFoundError:
                prev_results = None
                prev_kind_results = None

            all_results = None
            time_results = None
            try:
                if all_results is None and time_results is None:
                    all_results, time_results, init_done = testie.execute_all(build, prev_results=prev_results, prev_kind_results=prev_kind_results, do_test=args.do_test, options=args)
                if not all_results and not time_results:
                    returncode+=1
                    continue
            except ScriptInitException:
                continue

            if args.compare:
                variables_passed,variables_passed = regression.compare(testie, testie.variables, all_results, build, old_all_results, last_build)
                if variables_passed == variables_passed:
                    nok += 1
                else:
                    returncode += 1
                ntests += 1

            if all_results and len(all_results) > 0:
                build.writeResults()

            #Filtered results are results only for the given current variables
            filtered_results = {}
            for v in testie.variables:
                run = Run(v)
                if run in all_results:
                    filtered_results[run] = all_results[run]

            if args.statistics:
                Statistics.run(build,filtered_results, testie, max_depth=args.statistics_maxdepth, filename=args.statistics_filename)

            grapher = Grapher()

            g_series = []
            if last_build and old_all_results and args.compare:
                g_series.append((testie, last_build, old_all_results))

            for g_build in graph_builds:
                try:
                    g_all_results = g_build.load_results(testie)
                    if (g_all_results and len(g_all_results) > 0):
                        g_series.append((testie, g_build, g_all_results))
                except FileNotFoundError:
                    print("Previous build %s could not be found, we will not graph it !" % g_build.version)

            filename = args.graph_filename if args.graph_filename else build.result_path(testie.filename, 'pdf')
            grapher.graph(series=[(testie, build, all_results)] + g_series,
                          title=testie.get_title(),
                          filename=filename,
                          graph_variables=[Run(x) for x in testie.variables],
                          options = args)
            if time_results:
                for find, results in time_results.items():
                    if not results:
                        continue
                    grapher.graph(series=[(testie, build, results)],
                          title=testie.get_title(),
                          filename=filename,
                          options = args)
        if last_build and args.graph_num > 0:
            graph_builds = [last_build] + graph_builds[:-1]
        last_build = build
        if args.compare:
            print("[%s] Finished run for %s, %d/%d tests passed" % (repo.name, build.version, nok, ntests))

    sys.exit(returncode)
コード例 #4
0
ファイル: npf-compare.py プロジェクト: SDarayan/npf
def main():
    parser = argparse.ArgumentParser(
        description='NPF cross-repository comparator')

    npf.add_verbosity_options(parser)

    parser.add_argument('repos',
                        metavar='repo',
                        type=str,
                        nargs='+',
                        help='names of the repositories to watch')

    b = npf.add_building_options(parser)
    t = npf.add_testing_options(parser)
    g = npf.add_graph_options(parser)
    args = parser.parse_args()

    npf.parse_nodes(args)

    # Parsing repo list and getting last_build
    repo_list = []
    for repo_name in args.repos:
        repo = Repository.get_instance(repo_name, args)
        repo.last_build = None
        repo_list.append(repo)

    comparator = Comparator(repo_list)

    series = comparator.run(testie_name=args.testie,
                            tags=args.tags,
                            options=args)

    if series is None:
        return

    if args.graph_filename is None:
        filename = 'compare/' + os.path.splitext(os.path.basename(
            args.testie))[0] + '_' + '_'.join(
                ["%s" % repo.reponame for repo in repo_list]) + '.pdf'
    else:
        filename = args.graph_filename[0]

    dir = Path(os.path.dirname(filename))
    if not dir.exists():
        os.makedirs(dir.as_posix())

    # We must find the common variables to all repo, and change dataset to reflect only those
    all_variables = []
    for testie, build, dataset in series:
        v_list = set()
        for name, variable in testie.variables.vlist.items():
            v_list.add(name)
        all_variables.append(v_list)
    common_variables = set.intersection(*map(set, all_variables))

    for i, (testie, build, dataset) in enumerate(series):
        ndataset = {}
        for run, results in dataset.items():
            ndataset[run.intersect(common_variables)] = results
        series[i] = (testie, build, ndataset)

    grapher = Grapher()
    g = grapher.graph(series=series, filename=filename, options=args)
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(description='NPF Watcher')
    parser.add_argument('repos',
                        metavar='repo name',
                        type=str,
                        nargs='+',
                        help='names of the repositories to watch')
    parser.add_argument(
        '--interval',
        metavar='secs',
        type=int,
        nargs=1,
        default=60,
        help='interval in seconds between polling of repositories')
    parser.add_argument('--history',
                        dest='history',
                        metavar='N',
                        type=int,
                        default=1,
                        help='assume last N commits as untested (default 0)')

    v = npf.add_verbosity_options(parser)

    t = npf.add_testing_options(parser)

    b = npf.add_building_options(parser)

    a = npf.add_graph_options(parser)
    a.add_argument('--graph-num',
                   metavar='N',
                   type=int,
                   nargs='?',
                   default=8,
                   help='Number of versions to graph')

    m = parser.add_argument_group('Mail options')
    m.add_argument('--mail-to',
                   metavar='email',
                   type=str,
                   nargs='+',
                   help='list of e-mails for report',
                   default=[])
    m.add_argument('--mail-from',
                   metavar='email',
                   type=str,
                   nargs=1,
                   dest='mail_from',
                   default='*****@*****.**',
                   help='list of e-mails for report')
    m.add_argument('--mail-erroronly',
                   default=True,
                   dest='mail_always',
                   action='store_false',
                   help='e-mail even if there is an error')
    m.add_argument('--mail-smtp',
                   metavar='address',
                   type=str,
                   dest='mail_smtp',
                   default='localhost',
                   help='smtp server address. Default is localhost')

    m.add_argument(
        '--onerun',
        default=False,
        dest='onerun',
        action='store_true',
        help=
        'Do only one loop of regression test, usefull for testing that this software mainly works'
    )

    parser.set_defaults(graph_size=[6, 2.5])
    args = parser.parse_args()

    npf.parse_nodes(args)

    history = args.history

    if len(args.mail_to) == 0:
        print(
            "Warning: No mail-to e-mail address given. NPF Watcher will not send any e-mail."
        )

    # Parsing repo list and getting last_build
    repo_list = []
    for repo_name in args.repos:
        repo = Repository.get_instance(repo_name, args)
        tags = args.tags.copy()
        tags += repo.tags

        last_build = repo.get_last_build(history, with_results=True)
        if last_build is not None:
            print("[%s] Last tested version is %s" %
                  (repo.name, last_build.version))
        repo.last_build = last_build

        testies = Testie.expand_folder(args.testie, tags=tags, options=args)

        if len(testies) == 0:
            print("[%s] No valid testies. Ignoring this repo." % (repo.name))
        else:
            repo_list.append((repo, testies))

    if len(repo_list) == 0:
        print("ERROR : No valid repositories to use !")
        sys.exit(-1)

    watcher = Watcher(repo_list,
                      mail_from=args.mail_from,
                      mail_to=args.mail_to,
                      interval=args.interval,
                      mail_always=args.mail_always,
                      mail_smtp=args.mail_smtp,
                      history=history,
                      options=args)
    watcher.run(args)