Exemplo n.º 1
0
def pytest_generate_tests(metafunc):
    # This is where the list of models to test can be configured
    # e.g. by using info in metafunc.config
    all_models = list_models()
    if metafunc.cls and metafunc.cls.__name__ == "TestBenchNetwork":
        metafunc.parametrize('model_class',
                             all_models,
                             ids=[m.name for m in all_models],
                             scope="class")
        metafunc.parametrize('device', ['cpu', 'cuda'], scope='class')
        metafunc.parametrize('compiler', ['jit', 'eager'], scope='class')
Exemplo n.º 2
0
def iter_models(args):
    for benchmark_cls in list_models():
        if (not re.search("|".join(args.filter), benchmark_cls.name, re.I)
                or re.search("|".join(args.exclude), benchmark_cls.name, re.I)
                or benchmark_cls.name in SKIP):
            continue
        try:
            benchmark = benchmark_cls(device=args.device, jit=False)
            model, example_inputs = benchmark.get_module()
            model.eval()
            gc.collect()
            yield short_name(benchmark.name), model, example_inputs
        except NotImplementedError:
            pass
Exemplo n.º 3
0
def pytest_generate_tests(metafunc):
    # This is where the list of models to test can be configured
    # e.g. by using info in metafunc.config
    devices = ['cpu', 'cuda']
    if metafunc.config.option.cpu_only:
        devices = ['cpu']
    all_models = list_models()
    if metafunc.cls and metafunc.cls.__name__ == "TestBenchNetwork":
        is_eval = metafunc.function.__name__ == "test_eval"
        test_name = lambda m: m.name + ("-freeze" if is_eval and hasattr(
            m, "optimized_for_inference") else "")
        metafunc.parametrize('model_class',
                             all_models,
                             ids=[test_name(m) for m in all_models],
                             scope="class")
        metafunc.parametrize('device', devices, scope='class')
        metafunc.parametrize('compiler', ['jit', 'eager'], scope='class')
Exemplo n.º 4
0
def pytest_generate_tests(metafunc, display_len=24):
    # This is where the list of models to test can be configured
    # e.g. by using info in metafunc.config
    all_models = list_models()
    short_names = []
    for model_class in all_models:
        short = model_class.name
        if len(short) > display_len:
            short = short[:display_len] + "..."
        short_names.append(short)

    if metafunc.cls and metafunc.cls.__name__ == "TestBenchNetwork":
        metafunc.parametrize('model_class',
                             all_models,
                             ids=short_names,
                             scope="class")
        metafunc.parametrize('device', ['cpu', 'cuda'], scope='class')
        metafunc.parametrize('compiler', ['jit', 'eager'], scope='class')
Exemplo n.º 5
0
def iter_models(args):
    device = "cpu"
    for benchmark_cls in list_models():
        bench_name = benchmark_cls.name
        if args.benchmark and args.benchmark != bench_name:
            continue
        if bench_name in NO_GET_MODULE:
            print(f"{bench_name} has no get_module, skipped")
            continue
        if bench_name in NO_JIT:
            print(f"{bench_name} has no scripted module, skipped")
            continue
        try:
            # disable profiling mode so that the collected graph does not contain
            # profiling node
            if args.no_profiling:
                torch._C._jit_set_profiling_mode(False)

            benchmark = benchmark_cls(device=device, jit=True)
            model, example_inputs = benchmark.get_module()

            # extract ScriptedModule object for BERT model
            if bench_name == "BERT_pytorch":
                model = model.bert

            fname = get_dump_filename(bench_name, device, args)
            print(f"Dump Graph IR for {bench_name} to {fname}")

            # default mode need to warm up ProfileExecutor
            if not (args.no_profiling or args.inlined_graph):
                model.graph_for(*example_inputs)

            with open(fname, 'w') as dump_file:
                if args.inlined_graph:
                    print(model.inlined_graph, file=dump_file)
                else:
                    print(model.graph_for(*example_inputs), file=dump_file)
        except NotImplementedError:
            print(f"Cannot collect graph IR dump for {bench_name}")
            pass
Exemplo n.º 6
0
                        "--mode",
                        choices=["eager", "jit"],
                        default="eager",
                        help="Which mode to run.")
    parser.add_argument("-t",
                        "--test",
                        choices=["eval", "train"],
                        default="eval",
                        help="Which test to run.")
    parser.add_argument("--profile",
                        action="store_true",
                        help="Run the profiler around the function")
    args = parser.parse_args()

    found = False
    for Model in list_models():
        if args.model.lower() in Model.name.lower():
            found = True
            break
    if found:
        print(
            f"Running {args.test} method from {Model.name} on {args.device} in {args.mode} mode"
        )
    else:
        print(f"Unable to find model matching {args.model}")
        exit(-1)

    # build the model and get the chosen test method
    m = Model(device=args.device, jit=(args.mode == "jit"))
    test = getattr(m, args.test)
Exemplo n.º 7
0
def _load_tests():
    for Model in list_models():
        for device in ('cpu', 'cuda'):
            _load_test(Model, device)