Пример #1
0
def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform,
                       mock_os, mock_chdir, mock_path_exists, mock_is_dir,
                       mock_listdir, mock_rmtree, mock_mkdir, test_args,
                       expected_cmd):
    """
    Runs through executing the specified run_tf_benchmarks.py command from the
    test_args and verifying that the model_init file calls run_command with
    the expected_cmd string.
    """
    mock_path_exists.return_value = True
    mock_is_dir.return_value = True
    parse_model_args_file()
    mock_listdir.return_value = True
    clear_kmp_env_vars()
    platform_config.set_mock_system_type(mock_platform)
    platform_config.set_mock_os_access(mock_os)
    platform_config.set_mock_lscpu_subprocess_values(mock_subprocess)
    test_arg_list = test_args.split(" ")
    with patch.object(sys, "argv", test_arg_list):
        model_benchmark = ModelBenchmarkUtil()
        model_benchmark.main()
    assert len(mock_run_command.call_args_list) == 1
    call_args = mock_run_command.call_args_list[0][0][0]
    # python3 argparse parses things in different order than python2
    # we'll check that the args are all there though
    for actual_arg, expected_arg in zip(sorted(call_args.split()),
                                        sorted(expected_cmd.split())):
        # use fnmatch in case we have file names with wildcards (like timestamps in output files)
        assert fnmatch.fnmatch(actual_arg, expected_arg), \
            "Expected: {}\nActual: {}".format(expected_cmd, call_args)
Пример #2
0
def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform,
                       mock_os, mock_chdir, mock_path_exists, mock_is_dir,
                       mock_listdir, mock_rmtree, mock_mkdir, test_args,
                       expected_cmd):
    """
    Runs through executing the specified run_tf_benchmarks.py command from the
    test_args and verifying that the model_init file calls run_command with
    the expected_cmd string.
    """
    mock_path_exists.return_value = True
    mock_is_dir.return_value = True
    parse_model_args_file()
    mock_listdir.return_value = True
    platform_config.set_mock_system_type(mock_platform)
    platform_config.set_mock_os_access(mock_os)
    platform_config.set_mock_lscpu_subprocess_values(mock_subprocess)
    test_arg_list = test_args.split(" ")
    with patch.object(sys, "argv", test_arg_list):
        model_benchmark = ModelBenchmarkUtil()
        model_benchmark.main()
    assert len(mock_run_command.call_args_list) == 1
    call_args = mock_run_command.call_args_list[0][0][0]
    # python3 argparse parses things in different order than python2
    # we'll check that the args are all there though
    assert sorted(call_args.split()) == sorted(expected_cmd.split())
Пример #3
0
def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform,
                       mock_os, mock_get_cpuset, mock_glob, mock_remove,
                       mock_chdir, mock_stat, mock_path_exists, mock_is_file,
                       mock_is_dir, mock_listdir, mock_rmtree, mock_mkdir,
                       test_args, expected_cmd, comment, cpuset):
    """
    Runs through executing the specified run_tf_benchmarks.py command from the
    test_args and verifying that the model_init file calls run_command with
    the expected_cmd string.
    """

    print("****** Running The {} test ******".format(comment))
    os.environ["PYTHON_EXE"] = "python"
    if "mpi" not in test_args:
        os.environ["MPI_NUM_PROCESSES"] = "None"
        os.environ["MPI_HOSTNAMES"] = "None"
    else:
        if "--mpi_num_processes=" in test_args:
            match_mpi_procs = re.search('--mpi_num_processes=([0-9]+)',
                                        test_args)
            if match_mpi_procs and match_mpi_procs.lastindex >= 1:
                os.environ["MPI_NUM_PROCESSES"] = match_mpi_procs.group(1)
        if "--mpi_num_processes_per_socket=" in test_args:
            match_per_socket = re.search(
                '--mpi_num_processes_per_socket=([0-9]+)', test_args)
            if match_per_socket and match_per_socket.lastindex >= 1:
                os.environ[
                    "MPI_NUM_PROCESSES_PER_SOCKET"] = match_per_socket.group(1)

    mock_os.path.exists.side_effect = True
    mock_get_cpuset.return_value = cpuset
    mock_is_dir.return_value = True
    mock_is_file.return_value = True
    mock_stat.return_value = MagicMock(st_nlink=0)
    parse_model_args_file()
    mock_listdir.return_value = ["data.record"]
    mock_glob.return_value = ["/usr/lib/libtcmalloc.so.4.2.6"]
    clear_kmp_env_vars()
    platform_config.set_mock_system_type(mock_platform)
    platform_config.set_mock_os_access(mock_os)
    platform_config.set_mock_lscpu_subprocess_values(mock_subprocess)
    test_args = re.sub(
        " +", " ",
        test_args)  # get rid of extra spaces in the test_args string
    expected_cmd = re.sub(
        " +", " ",
        expected_cmd)  # get rid of extra spaces in the expected_cmd string
    test_arg_list = test_args.split(" ")
    with patch.object(sys, "argv", test_arg_list):
        model_benchmark = ModelBenchmarkUtil()
        model_benchmark.main()
    assert len(mock_run_command.call_args_list) == 1
    call_args = mock_run_command.call_args_list[0][0][0]
    # python3 argparse parses things in different order than python2
    # we'll check that the args are all there though
    for actual_arg, expected_arg in zip(sorted(call_args.split()),
                                        sorted(expected_cmd.split())):
        # use fnmatch in case we have file names with wildcards (like timestamps in output files)
        assert fnmatch.fnmatch(actual_arg, expected_arg), \
            "Expected: {}\nActual: {}".format(expected_cmd, call_args)
Пример #4
0
def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform,
                       mock_os, mock_glob, mock_remove, mock_chdir, mock_stat,
                       mock_path_exists, mock_is_file, mock_is_dir,
                       mock_listdir, mock_rmtree, mock_mkdir, test_args,
                       expected_cmd, comment):
    """
    Runs through executing the specified run_tf_benchmarks.py command from the
    test_args and verifying that the model_init file calls run_command with
    the expected_cmd string.
    """
    if comment in [
            "tf_ssd_resnet34_args.json :: ssd_resnet34_fp32_training",
            "tf_gnmt_args.json :: gnmt_fp32_throughput",
            "tf_gnmt_args.json :: gnmt_fp32_latency"
    ]:
        pytest.skip()

    print("****** Running The {} test ******".format(comment))
    os.environ["PYTHON_EXE"] = "python"
    if "mpi" not in mock_run_command:
        os.environ["MPI_NUM_PROCESSES"] = "None"
        os.environ["MPI_HOSTNAMES"] = "None"
    mock_path_exists.return_value = True
    mock_is_dir.return_value = True
    mock_is_file.return_value = True
    mock_stat.return_value = MagicMock(st_nlink=0)
    parse_model_args_file()
    mock_listdir.return_value = ["data.record"]
    mock_glob.return_value = ["/usr/lib/libtcmalloc.so.4.2.6"]
    clear_kmp_env_vars()
    platform_config.set_mock_system_type(mock_platform)
    platform_config.set_mock_os_access(mock_os)
    platform_config.set_mock_lscpu_subprocess_values(mock_subprocess)
    test_args = re.sub(
        " +", " ",
        test_args)  # get rid of extra spaces in the test_args string
    expected_cmd = re.sub(
        " +", " ",
        expected_cmd)  # get rid of extra spaces in the expected_cmd string
    test_arg_list = test_args.split(" ")
    with patch.object(sys, "argv", test_arg_list):
        model_benchmark = ModelBenchmarkUtil()
        model_benchmark.main()
    assert len(mock_run_command.call_args_list) == 1
    call_args = mock_run_command.call_args_list[0][0][0]
    # python3 argparse parses things in different order than python2
    # we'll check that the args are all there though
    for actual_arg, expected_arg in zip(sorted(call_args.split()),
                                        sorted(expected_cmd.split())):
        # use fnmatch in case we have file names with wildcards (like timestamps in output files)
        assert fnmatch.fnmatch(actual_arg, expected_arg), \
            "Expected: {}\nActual: {}".format(expected_cmd, call_args)
Пример #5
0
def test_run_benchmark_bad_socket(mock_run_command, mock_subprocess,
                                  mock_platform, mock_os, mock_get_cpuset,
                                  mock_glob, mock_remove, mock_chdir,
                                  mock_stat, mock_path_exists, mock_is_file,
                                  mock_is_dir, mock_listdir, mock_rmtree,
                                  mock_mkdir, test_args, socket_id, cpuset):
    """
    Checks to ensure that the proper error handling is done when the cpuset does not include any cores
    for the specified socket_id
    """

    os.environ["PYTHON_EXE"] = "python"
    if "mpi" not in test_args:
        os.environ["MPI_NUM_PROCESSES"] = "None"
        os.environ["MPI_HOSTNAMES"] = "None"
    else:
        if "--mpi_num_processes=" in test_args:
            match_mpi_procs = re.search('--mpi_num_processes=([0-9]+)',
                                        test_args)
            if match_mpi_procs and match_mpi_procs.lastindex >= 1:
                os.environ["MPI_NUM_PROCESSES"] = match_mpi_procs.group(1)
        if "--mpi_num_processes_per_socket=" in test_args:
            match_per_socket = re.search(
                '--mpi_num_processes_per_socket=([0-9]+)', test_args)
            if match_per_socket and match_per_socket.lastindex >= 1:
                os.environ[
                    "MPI_NUM_PROCESSES_PER_SOCKET"] = match_per_socket.group(1)

    mock_os.path.exists.side_effect = True
    mock_get_cpuset.return_value = cpuset
    mock_is_dir.return_value = True
    mock_is_file.return_value = True
    mock_stat.return_value = MagicMock(st_nlink=0)
    parse_model_args_file()
    mock_listdir.return_value = ["data.record"]
    mock_glob.return_value = ["/usr/lib/libtcmalloc.so.4.2.6"]
    clear_kmp_env_vars()
    platform_config.set_mock_system_type(mock_platform)
    platform_config.set_mock_os_access(mock_os)
    platform_config.set_mock_lscpu_subprocess_values(mock_subprocess)
    test_args = re.sub(
        " +", " ",
        test_args)  # get rid of extra spaces in the test_args string
    test_arg_list = test_args.split(" ")
    with pytest.raises(
            SystemExit,
            match="ERROR: There are no socket id {} cores in the cpuset.".
            format(socket_id)):
        with patch.object(sys, "argv", test_arg_list):
            model_benchmark = ModelBenchmarkUtil()
            model_benchmark.main()
Пример #6
0
def setup_mock_values(mock_platform, mock_os, mock_subprocess):
    platform_config.set_mock_system_type(mock_platform)
    platform_config.set_mock_os_access(mock_os)
    platform_config.set_mock_lscpu_subprocess_values(mock_subprocess)
Пример #7
0
def setup_mock_values(platform_mock, os_mock, subprocess_mock):
    platform_config.set_mock_system_type(platform_mock)
    platform_config.set_mock_os_access(os_mock)
    platform_config.set_mock_lscpu_subprocess_values(subprocess_mock)
Пример #8
0
def setup_mock_values(mock_platform):
    platform_config.set_mock_system_type(mock_platform)