示例#1
0
def main():
    print("Python version " + str(sys.version_info[0]))

    parser = argparse.ArgumentParser(
        description="Run microbenchmarks.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )

    parser.add_argument('--run_mode',
                        help='Run mode. '
                        'short: run all operators with few shapes'
                        'long: run all operators with all shapes',
                        choices=benchmark_core.RUN_MODES.keys(),
                        default='short')

    # This option is used to filter test cases to run.
    # Currently, the matching is sub-string but we can consider support regex.
    # For example, if test_case_filter = 'matmul', in will match these test
    # cases:
    # matmul_benchmark.Caffe2OperatorTestCase.matmul_512_128_512_transa_transb
    # matmul_benchmark.PyTorchOperatorTestCase.matmul_100_200_150
    # ...
    parser.add_argument(
        '--operator',
        help='Only run the test cases that contain the provided operator'
        ' as a substring of their names',
        default=None)

    parser.add_argument('--list_tests',
                        help='List all test cases without running them',
                        action='store_true')

    parser.add_argument(
        "--iterations",
        help="Repeat each operator for the number of iterations",
        type=int)

    parser.add_argument(
        "--warmup_iterations",
        help="Number of iterations to ignore before measuring performance",
        default=10,
        type=int)

    parser.add_argument("--ai_pep_format",
                        help="Print result when running on AI-PEP",
                        default=False,
                        type=bool)

    parser.add_argument(
        '--framework',
        help='Comma-delimited list of frameworks to test (Caffe2, PyTorch)',
        default="Caffe2,PyTorch")

    args = parser.parse_args()

    if benchmark_utils.is_caffe2_enabled(args.framework):
        workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
        workspace.ClearGlobalNetObserver()

    benchmark_core.BenchmarkRunner(args).run()
示例#2
0
    parser.add_argument('--list_tests',
                        help='List all test cases without running them',
                        action='store_true')

    parser.add_argument(
        "--iterations",
        help="Repeat each operator for the number of iterations",
        type=int)

    parser.add_argument(
        "--warmup_iterations",
        help="Number of iterations to ignore before measuring performance",
        default=10,
        type=int)

    parser.add_argument("--ai_pep_format",
                        help="Print result when running on AI-PEP",
                        default=False,
                        type=bool)

    parser.add_argument('--framework',
                        help='Run PyTorch or Caffe2 operators',
                        default=None)

    args = parser.parse_args()

    workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
    workspace.ClearGlobalNetObserver()

    benchmark_core.BenchmarkRunner(args).run()
示例#3
0
def main():
    parser = argparse.ArgumentParser(
        description="Run microbenchmarks.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )

    parser.add_argument(
        '--tag_filter',
        help=
        'tag_filter can be used to run the benchmarks which matches the tag',
        default='short')

    # This option is used to filter test cases to run.
    parser.add_argument(
        '--operators',
        help='Filter tests based on comma-delimited list of operators to test',
        default=None)

    parser.add_argument('--test_name',
                        help='Run tests that have the provided test_name',
                        default=None)

    parser.add_argument('--list_ops',
                        help='List operators without running them',
                        action='store_true')

    parser.add_argument('--list_tests',
                        help='List all test cases without running them',
                        action='store_true')

    parser.add_argument(
        "--iterations",
        help="Repeat each operator for the number of iterations",
        type=int)

    parser.add_argument(
        "--num_runs",
        help=
        "Run each test for num_runs. Each run executes an operator for number of <--iterations>",
        type=int,
        default=1,
    )

    parser.add_argument(
        "--min_time_per_test",
        help="Set the minimum time (unit: seconds) to run each test",
        type=int,
        default=0,
    )

    parser.add_argument(
        "--warmup_iterations",
        help="Number of iterations to ignore before measuring performance",
        default=10,
        type=int)

    parser.add_argument(
        "--omp_num_threads",
        help="Number of OpenMP threads used in PyTorch/Caffe2 runtime",
        default=None,
        type=int)

    parser.add_argument(
        "--mkl_num_threads",
        help="Number of MKL threads used in PyTorch/Caffe2 runtime",
        default=None,
        type=int)

    parser.add_argument("--ai_pep_format",
                        help="Print result when running on AI-PEP",
                        default=False,
                        type=bool)

    parser.add_argument("--use_jit",
                        help="Run operators with PyTorch JIT mode",
                        action='store_true')

    parser.add_argument("--forward_only",
                        help="Only run the forward path of operators",
                        action='store_true')

    parser.add_argument(
        '--framework',
        help='Comma-delimited list of frameworks to test (Caffe2, PyTorch)',
        default="Caffe2,PyTorch")

    args, _ = parser.parse_known_args()

    if benchmark_utils.is_caffe2_enabled(args.framework):
        workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
        workspace.ClearGlobalNetObserver()
    if args.omp_num_threads:
        benchmark_utils.set_omp_threads(args.omp_num_threads)
    if args.mkl_num_threads:
        benchmark_utils.set_mkl_threads(args.mkl_num_threads)

    benchmark_core.BenchmarkRunner(args).run()
示例#4
0
def main():
    print("Python version " + str(sys.version_info[0]))

    parser = argparse.ArgumentParser(
        description="Run microbenchmarks.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )

    parser.add_argument(
        '--tag_filter',
        help=
        'tag_filter can be used to run the benchmarks which matches the tag',
        default='short')

    # This option is used to filter test cases to run.
    parser.add_argument(
        '--operator',
        help='Run the test cases that contain the provided operator'
        ' as a substring of their names',
        default=None)

    parser.add_argument('--test_name',
                        help='Run tests that have the provided test_name',
                        default=None)

    parser.add_argument('--list_ops',
                        help='List operators without running them',
                        action='store_true')

    parser.add_argument('--list_tests',
                        help='List all test cases without running them',
                        action='store_true')

    parser.add_argument(
        "--iterations",
        help="Repeat each operator for the number of iterations",
        type=int)

    parser.add_argument(
        "--min_time_per_test",
        help="Set the minimum time (unit: seconds) to run each test",
        type=int,
        default=0,
    )

    parser.add_argument(
        "--warmup_iterations",
        help="Number of iterations to ignore before measuring performance",
        default=10,
        type=int)

    parser.add_argument("--ai_pep_format",
                        help="Print result when running on AI-PEP",
                        default=False,
                        type=bool)

    parser.add_argument("--forward_only",
                        help="Only run the forward path of operators",
                        action='store_true')

    parser.add_argument(
        '--framework',
        help='Comma-delimited list of frameworks to test (Caffe2, PyTorch)',
        default="Caffe2,PyTorch")

    args = parser.parse_args()

    if benchmark_utils.is_caffe2_enabled(args.framework):
        workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
        workspace.ClearGlobalNetObserver()

    benchmark_core.BenchmarkRunner(args).run()
示例#5
0
def main():
    parser = argparse.ArgumentParser(
        description="Run microbenchmarks.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
    )

    parser.add_argument(
        '--tag_filter',
        help='tag_filter can be used to run the benchmarks which matches the tag',
        default='short')

    # This option is used to filter test cases to run.
    parser.add_argument(
        '--operators',
        help='Filter tests based on comma-delimited list of operators to test',
        default=None)

    parser.add_argument(
        '--test_name',
        help='Run tests that have the provided test_name',
        default=None)

    parser.add_argument(
        '--list_ops',
        help='List operators without running them',
        action='store_true')

    parser.add_argument(
        '--list_tests',
        help='List all test cases without running them',
        action='store_true')

    parser.add_argument(
        "--iterations",
        help="Repeat each operator for the number of iterations",
        type=int
    )

    parser.add_argument(
        "--num_runs",
        help="Run each test for num_runs. Each run executes an operator for number of <--iterations>",
        type=int,
        default=1,
    )

    parser.add_argument(
        "--min_time_per_test",
        help="Set the minimum time (unit: seconds) to run each test",
        type=int,
        default=0,
    )

    parser.add_argument(
        "--warmup_iterations",
        help="Number of iterations to ignore before measuring performance",
        default=100,
        type=int
    )

    parser.add_argument(
        "--omp_num_threads",
        help="Number of OpenMP threads used in PyTorch/Caffe2 runtime",
        default=None,
        type=int
    )

    parser.add_argument(
        "--mkl_num_threads",
        help="Number of MKL threads used in PyTorch/Caffe2 runtime",
        default=None,
        type=int
    )

    parser.add_argument(
        "--ai_pep_format",
        help="Print result when running on AI-PEP",
        default=False,
        type=bool
    )

    parser.add_argument(
        "--use_jit",
        help="Run operators with PyTorch JIT mode",
        action='store_true'
    )

    parser.add_argument(
        "--forward_only",
        help="Only run the forward path of operators",
        action='store_true'
    )

    parser.add_argument(
        '--framework',
        help='Comma-delimited list of frameworks to test (Caffe2, PyTorch)',
        default="Caffe2,PyTorch")

    parser.add_argument(
        '--wipe_cache',
        help='Wipe cache before benchmarking each operator',
        action='store_true',
        default=False
    )

    args, _ = parser.parse_known_args()

    if benchmark_utils.is_caffe2_enabled(args.framework):
        workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
        workspace.ClearGlobalNetObserver()
    if args.omp_num_threads:
        # benchmark_utils.set_omp_threads sets the env variable OMP_NUM_THREADS
        # which doesn't have any impact as C2 init logic has already been called
        # before setting the env var.

        # In general, OMP_NUM_THREADS (and other OMP env variables) needs to be set
        # before the program is started.
        # From Chapter 4 in OMP standard: https://www.openmp.org/wp-content/uploads/openmp-4.5.pdf
        # "Modifications to the environment variables after the program has started,
        # even if modified by the program itself, are ignored by the OpenMP implementation"
        benchmark_utils.set_omp_threads(args.omp_num_threads)
        if benchmark_utils.is_pytorch_enabled(args.framework):
            torch.set_num_threads(args.omp_num_threads)
    if args.mkl_num_threads:
        benchmark_utils.set_mkl_threads(args.mkl_num_threads)

    benchmark_core.BenchmarkRunner(args).run()