Пример #1
0
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r'(FLOAT16)')

    if testname:
        backend_test.include(testname + '.*')
    else:
        # Tests that are failing temporarily and should be fixed
        current_failing_tests = (
            '^test_cast_STRING_to_FLOAT_cpu.*',
            '^test_cast_FLOAT_to_STRING_cpu.*',
            '^test_qlinearconv_cpu.*',
            '^test_gru_seq_length_cpu.*',
            '^test_bitshift_right_uint16_cpu.*',
            '^test_bitshift_right_uint32_cpu.*',
            '^test_bitshift_right_uint64_cpu.*',
            '^test_bitshift_right_uint8_cpu.*',
            '^test_bitshift_left_uint16_cpu.*',
            '^test_bitshift_left_uint32_cpu.*',
            '^test_bitshift_left_uint64_cpu.*',
            '^test_bitshift_left_uint8_cpu.*',
            '^test_round_cpu.*',
            '^test_cumsum_1d_cpu.*',
            '^test_cumsum_1d_exclusive_cpu.*',
            '^test_cumsum_1d_reverse_cpu.*',
            '^test_cumsum_1d_reverse_exclusive_cpu.*',
            '^test_cumsum_2d_axis_0_cpu.*',
            '^test_cumsum_2d_axis_1_cpu.*',
            '^test_dynamicquantizelinear_expanded*',
            '^test_dynamicquantizelinear_max_adjusted_expanded*',
            '^test_dynamicquantizelinear_min_adjusted_expanded*',
            '^test_depthtospace*',
            '^test_gather_elements*',
            '^test_scatter_elements*',
            '^test_top_k*',
            '^test_unique_*',
            '^test_mod_float_mixed_sign_example_cpu.*',  #onnxruntime::Mod::Compute fmod_ was false. fmod attribute must be true for float, float16 and double types
            '^test_shrink_cpu.*',  #Invalid rank for input: x Got: 1 Expected: 2 Please fix either the inputs or the model.
        )

        # Example of how to disable tests for a specific provider.
        # if c2.supports_device('NGRAPH'):
        #    current_failing_tests = current_failing_tests + ('|^test_operator_repeat_dim_overflow_cpu.*',)
        if c2.supports_device('NGRAPH'):
            current_failing_tests = current_failing_tests + ('|^test_clip*', )

        filters = current_failing_tests + \
                  tests_with_pre_opset7_dependencies_filters() + \
                  unsupported_usages_filters()

        backend_test.exclude('(' + '|'.join(filters) + ')')
        print('excluded tests:', filters)

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
Пример #2
0
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r'(FLOAT16)')

    if testname:
        backend_test.include(testname + '.*')
    else:
        # read filters data
        with open(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             'testdata',
                             'onnx_backend_test_series_filters.jsonc')) as f:
            filters_lines = f.readlines()
        filters_lines = [x.split('//')[0] for x in filters_lines]
        filters = json.loads('\n'.join(filters_lines))

        current_failing_tests = filters['current_failing_tests']

        if platform.architecture()[0] == '32bit':
            current_failing_tests += filters['current_failing_tests_x86']

        if c2.supports_device('NGRAPH'):
            current_failing_tests += filters['current_failing_tests_NGRAPH']

        if c2.supports_device('DNNL'):
            current_failing_tests += filters['current_failing_tests_DNNL']

        if c2.supports_device('NNAPI'):
            current_failing_tests += filters['current_failing_tests_NNAPI']

        if c2.supports_device('OPENVINO_GPU_FP32') or c2.supports_device(
                'OPENVINO_GPU_FP16'):
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_GPU']

        if c2.supports_device('OPENVINO_GPU_FP32'):
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_GPU_FP32']

        if c2.supports_device('OPENVINO_CPU_FP32'):
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_CPU_FP32']

        filters = current_failing_tests + \
                  filters['tests_with_pre_opset7_dependencies'] + \
                  filters['unsupported_usages'] + \
                  filters['failing_permanently'] + \
                  filters['test_with_types_disabled_due_to_binary_size_concerns']

        backend_test.exclude('(' + '|'.join(filters) + ')')
        print('excluded tests:', filters)

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
Пример #3
0
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r'(FLOAT16)')

    if testname:
        backend_test.include(testname + '.*')
    else:
        # Tests that are failing temporarily and should be fixed
        current_failing_tests = ('^test_cast_STRING_to_FLOAT_cpu.*',
                                 '^test_cast_FLOAT_to_STRING_cpu.*',
                                 '^test_dequantizelinear_cpu.*',
                                 '^test_qlinearconv_cpu.*',
                                 '^test_quantizelinear_cpu.*',
                                 '^test_gru_seq_length_cpu.*')
        global version_tag
        if version_tag == 'onnx141' or onnx.__version__ == '1.4.1':
            current_failing_tests = current_failing_tests + (
                '^test_shrink_cpu.*',
                '^test_constantofshape_*.*',
            )
        if version_tag == 'onnx150' or onnx.__version__ == '1.5.0':
            current_failing_tests = current_failing_tests + (
                '^test_constantofshape_*.*', )

        # Failing for nGraph.
        if c2.supports_device('NGRAPH'):
            current_failing_tests = current_failing_tests + (
                '|^test_operator_repeat_dim_overflow_cpu.*', )

        filters = current_failing_tests + \
                  tests_with_pre_opset7_dependencies_filters() + \
                  unsupported_usages_filters()

        backend_test.exclude('(' + '|'.join(filters) + ')')
        print('excluded tests:', filters)

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r'(FLOAT16)')

    if testname:
        backend_test.include(testname + '.*')
    else:
        # Tests that are failing temporarily and should be fixed
        current_failing_tests = [  #'^test_cast_STRING_to_FLOAT_cpu',  # old test data that is bad on Linux CI builds
            '^test_unique_not_sorted_without_axis_cpu',  # bad expected data. enable after https://github.com/onnx/onnx/pull/2381 is picked up
            '^test_mod_float_mixed_sign_example_cpu',  #onnxruntime::Mod::Compute fmod_ was false. fmod attribute must be true for float, float16 and double types
            '^test_resize_downsample_scales_cubic_align_corners_cpu',  # results mismatch with onnx tests
            '^test_resize_downsample_scales_linear_align_corners_cpu',  # results mismatch with onnx tests
            '^test_resize_tf_crop_and_resize_cpu',  # bad expected data, needs test fix
            '^test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu',  # bad expected data, needs test fix
            '^test_resize_upsample_sizes_nearest_floor_align_corners_cpu',  # bad expected data, needs test fix
            '^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu',  # bad expected data, needs test fix
            '^test_maxunpool_export_with_output_shape_cpu',  # Invalid output in ONNX test. See https://github.com/onnx/onnx/issues/2398'
        ]

        # Example of how to disable tests for a specific provider.
        # if c2.supports_device('NGRAPH'):
        #    current_failing_tests.append('^test_operator_repeat_dim_overflow_cpu')
        if c2.supports_device('NGRAPH'):
            current_failing_tests += [
                '^test_clip.*', '^test_qlinearconv_cpu',
                '^test_depthtospace_crd.*', '^test_argmax_negative_axis.*',
                '^test_argmin_negative_axis.*',
                '^test_hardmax_negative_axis.*',
                '^test_gemm_default_no_bias_cpu',
                '^test_flatten_negative_axis.*',
                '^test_reduce_[a-z1-9_]*_negative_axes_.*',
                'test_squeeze_negative_axes_cpu',
                'test_unsqueeze_negative_axes_cpu', 'test_constant_pad_cpu',
                'test_edge_pad_cpu', 'test_reflect_pad_cpu'
            ]

        if c2.supports_device('MKL-DNN'):
            current_failing_tests += [
                '^test_range_float_type_positive_delta_expanded_cpu',
                '^test_range_int32_type_negative_delta_expanded_cpu'
            ]

        if c2.supports_device('OPENVINO_GPU_FP32') or c2.supports_device(
                'OPENVINO_GPU_FP16'):
            current_failing_tests.append('^test_div_cpu*')

        if c2.supports_device('OPENVINO_CPU_FP32'):
            current_failing_tests += [
                '^test_scan9_sum_cpu',  #sum_out output node not defined, temporarily disabling test
                '^test_scan_sum_cpu'
            ]  #sum_out output node not defined, temporarily disabling test

        filters = current_failing_tests + \
                  tests_with_pre_opset7_dependencies_filters() + \
                  unsupported_usages_filters() + \
                  other_tests_failing_permanently_filters() + \
                  test_with_types_disabled_due_to_binary_size_concerns_filters()

        backend_test.exclude('(' + '|'.join(filters) + ')')
        print('excluded tests:', filters)

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r'(FLOAT16)')

    if testname:
        backend_test.include(testname + '.*')
    else:
        # Tests that are failing temporarily and should be fixed
        current_failing_tests = [
            '^test_adagrad_cpu',
            '^test_adagrad_multiple_cpu',
            '^test_batchnorm_epsilon_old_cpu',
            '^test_batchnorm_epsilon_training_mode_cpu',
            '^test_batchnorm_example_old_cpu',
            '^test_batchnorm_example_training_mode_cpu',
            '^test_celu_cpu',
            '^test_dropout_default_cpu',
            '^test_dropout_random_cpu',
            '^test_einsum_batch_diagonal_cpu',
            '^test_einsum_batch_matmul_cpu',
            '^test_einsum_inner_prod_cpu',
            '^test_einsum_sum_cpu',
            '^test_einsum_transpose_cpu',
            '^test_gathernd_example_int32_batch_dim1_cpu',
            '^test_inverse_batched_cpu',
            '^test_inverse_cpu',
            '^test_max_int16_cpu',
            '^test_max_int8_cpu',
            '^test_max_uint16_cpu',
            '^test_max_uint8_cpu',
            '^test_mean_square_distance_mean_3d_cpu',
            '^test_mean_square_distance_mean_3d_expanded_cpu',
            '^test_mean_square_distance_mean_4d_cpu',
            '^test_mean_square_distance_mean_4d_expanded_cpu',
            '^test_mean_square_distance_mean_cpu',
            '^test_mean_square_distance_mean_expanded_cpu',
            '^test_mean_square_distance_none_cpu',
            '^test_mean_square_distance_none_expanded_cpu',
            '^test_mean_square_distance_none_weights_cpu',
            '^test_mean_square_distance_none_weights_expanded_cpu',
            '^test_mean_square_distance_sum_cpu',
            '^test_mean_square_distance_sum_expanded_cpu',
            '^test_min_int16_cpu',
            '^test_min_int8_cpu',
            '^test_min_uint16_cpu',
            '^test_min_uint8_cpu',
            '^test_momentum_cpu',
            '^test_momentum_multiple_cpu',
            '^test_negative_log_likelihood_loss_input_shape_is_NC_cpu',
            '^test_negative_log_likelihood_loss_input_shape_is_NCd1d2_cpu',
            '^test_negative_log_likelihood_loss_input_shape_is_NCd1d2_reduction_mean_cpu',
            '^test_negative_log_likelihood_loss_input_shape_is_NCd1d2_reduction_sum_cpu',
            '^test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_cpu',
            '^test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_mean_cpu',
            '^test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_sum_cpu',
            '^test_negative_log_likelihood_loss_input_shape_is_NCd1d2_with_weight_reduction_sum_ignore_index_cpu',
            '^test_nesterov_momentum_cpu',
            '^test_pow_bcast_array_cpu',
            '^test_pow_bcast_scalar_cpu',
            '^test_pow_cpu',
            '^test_pow_example_cpu',
            '^test_pow_types_float32_int32_cpu',
            '^test_pow_types_float32_int64_cpu',
            '^test_pow_types_float32_uint32_cpu',
            '^test_pow_types_float32_uint64_cpu',
            '^test_pow_types_float_cpu',
            '^test_pow_types_int32_float32_cpu',
            '^test_pow_types_int32_int32_cpu',
            '^test_pow_types_int64_float32_cpu',
            '^test_pow_types_int64_int64_cpu',
            '^test_pow_types_int_cpu',
            '^test_softmax_cross_entropy_mean_3d_cpu',
            '^test_softmax_cross_entropy_mean_3d_expanded_cpu',
            '^test_softmax_cross_entropy_mean_cpu',
            '^test_softmax_cross_entropy_mean_expanded_cpu',
            '^test_softmax_cross_entropy_mean_weight_cpu',
            '^test_softmax_cross_entropy_mean_weight_expanded_cpu',
            '^test_softmax_cross_entropy_mean_weight_ignore_index_cpu',
            '^test_softmax_cross_entropy_mean_weight_ignore_index_expanded_cpu',
            '^test_softmax_cross_entropy_none_cpu',
            '^test_softmax_cross_entropy_none_expanded_cpu',
            '^test_softmax_cross_entropy_none_weights_cpu',
            '^test_softmax_cross_entropy_none_weights_expanded_cpu',
            '^test_softmax_cross_entropy_sum_cpu',
            '^test_softmax_cross_entropy_sum_expanded_cpu',
            '^test_unfoldtodepth_with_padding_cpu',
            '^test_unfoldtodepth_with_padding_stride_cpu',
            '^test_unfoldtodepth_without_padding_cpu',
            '^test_gradient_of_add_and_mul_cpu',
            '^test_gradient_of_add_cpu',
            '^test_batchnorm_example_training_mode_cpu',
            '^test_batchnorm_epsilon_training_mode_cpu',
            '^test_maxunpool_export_with_output_shape_cpu',  #result mismatch
            '^test_resize_downsample_scales_cubic_align_corners_cpu',  # results mismatch with onnx tests
            '^test_resize_downsample_scales_linear_align_corners_cpu'  # results mismatch with onnx tests
        ]
        if platform.architecture()[0] == '32bit':
            current_failing_tests += [
                '^test_vgg19', '^test_zfnet512', '^test_bvlc_alexnet_cpu'
            ]
        # Example of how to disable tests for a specific provider.
        # if c2.supports_device('NGRAPH'):
        #    current_failing_tests.append('^test_operator_repeat_dim_overflow_cpu')
        if c2.supports_device('NGRAPH'):
            current_failing_tests += [
                '^test_clip.*', '^test_qlinearconv_cpu',
                '^test_depthtospace_crd.*', '^test_argmax_negative_axis.*',
                '^test_argmin_negative_axis.*',
                '^test_hardmax_negative_axis.*',
                '^test_gemm_default_no_bias_cpu',
                '^test_flatten_negative_axis.*',
                '^test_reduce_[a-z1-9_]*_negative_axes_.*',
                'test_squeeze_negative_axes_cpu',
                'test_unsqueeze_negative_axes_cpu', 'test_constant_pad_cpu',
                'test_edge_pad_cpu', 'test_reflect_pad_cpu',
                '^test_split_zero_size_splits_.*',
                '^test_argmax_keepdims_example_select_last_index_.*',
                '^test_argmax_no_keepdims_example_select_last_index_.*',
                '^test_argmin_no_keepdims_example_select_last_index_.*',
                '^test_argmin_keepdims_example_select_last_index_.*'
            ]

        if c2.supports_device('DNNL'):
            current_failing_tests += [
                '^test_range_float_type_positive_delta_expanded_cpu',
                '^test_range_int32_type_negative_delta_expanded_cpu',
                '^test_averagepool_2d_ceil_cpu', '^test_maxpool_2d_ceil_cpu',
                '^test_maxpool_2d_dilations_cpu', '^test_maxpool_2d_uint8'
            ]

        if c2.supports_device('NNAPI'):
            current_failing_tests += ['^test_maxpool_2d_uint8']

        if c2.supports_device('OPENVINO_GPU_FP32') or c2.supports_device(
                'OPENVINO_GPU_FP16'):
            current_failing_tests.append('^test_div_cpu')
            # temporarily exclude vgg19 test which comsumes too much memory, run out of memory on Upsquared device.
            # single test pass for vgg19, need furture investigation
            current_failing_tests.append('^test_vgg19_cpu')

        if c2.supports_device('OPENVINO_CPU_FP32'):
            current_failing_tests += [
                '^test_operator_permute2_cpu', '^test_operator_repeat_cpu',
                '^test_operator_repeat_dim_overflow_cpu'
            ]
        if c2.supports_device('OPENVINO_GPU_FP32'):
            current_failing_tests += [
                '^test_operator_permute2_cpu', '^test_operator_repeat_cpu',
                '^test_operator_repeat_dim_overflow_cpu',
                '^test_add_bcast_cpu', '^test_batchnorm_epsilon_cpu',
                '^test_div_bcast_cpu', '^test_mul_bcast_cpu',
                '^test_pow_bcast_array_cpu', '^test_sub_bcast_cpu',
                '^test_batchnorm_example_cpu',
                '^test_clip_default_inbounds_cpu',
                '^test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu',
                '^test_resize_upsample_sizes_nearest_floor_align_corners_cpu',
                '^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu',
                '^test_unique_not_sorted_without_axis_cpu'
            ]


        filters = current_failing_tests + \
                  tests_with_pre_opset7_dependencies_filters() + \
                  unsupported_usages_filters() + \
                  other_tests_failing_permanently_filters() + \
                  test_with_types_disabled_due_to_binary_size_concerns_filters()

        backend_test.exclude('(' + '|'.join(filters) + ')')
        print('excluded tests:', filters)

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r'(FLOAT16)')

    if testname:
        backend_test.include(testname + '.*')
    else:
        # read filters data
        with open(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             'testdata',
                             'onnx_backend_test_series_filters.jsonc')) as f:
            filters_lines = f.readlines()
        filters_lines = [x.split('//')[0] for x in filters_lines]
        filters = json.loads('\n'.join(filters_lines))

        current_failing_tests = filters['current_failing_tests']

        if platform.architecture()[0] == '32bit':
            current_failing_tests += filters['current_failing_tests_x86']

        if c2.supports_device('NGRAPH'):
            current_failing_tests += filters['current_failing_tests_NGRAPH']

        if c2.supports_device('DNNL'):
            current_failing_tests += filters['current_failing_tests_DNNL']

        if c2.supports_device('NNAPI'):
            current_failing_tests += filters['current_failing_tests_NNAPI']

        if c2.supports_device('OPENVINO_GPU_FP32') or c2.supports_device(
                'OPENVINO_GPU_FP16'):
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_GPU']

        if c2.supports_device('OPENVINO_MYRIAD'):
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_GPU']
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_MYRIAD']

        if c2.supports_device('OPENVINO_CPU_FP32'):
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_CPU_FP32']

        if c2.supports_device('MIGRAPHX'):
            current_failing_tests += [
                '^test_constant_pad_cpu', '^test_softmax_axis_1_cpu',
                '^test_softmax_axis_0_cpu', '^test_softmax_default_axis_cpu',
                '^test_round_cpu', '^test_lrn_default_cpu', '^test_lrn_cpu',
                '^test_logsoftmax_axis_0_cpu', '^test_logsoftmax_axis_1_cpu',
                '^test_logsoftmax_default_axis_cpu',
                '^test_dynamicquantizelinear_expanded_cpu',
                '^test_dynamicquantizelinear_max_adjusted_cpu',
                '^test_dynamicquantizelinear_max_adjusted_expanded_cpu',
                '^test_dynamicquantizelinear_min_adjusted_cpu',
                '^test_dynamicquantizelinear_min_adjusted_expanded_cpu',
                '^test_range_float_type_positive_delta_expanded_cpu',
                '^test_range_int32_type_negative_delta_expanded_cpu',
                '^test_operator_symbolic_override_nested_cpu',
                '^test_negative_log_likelihood_loss',
                '^test_softmax_cross_entropy', '^test_greater_equal',
                '^test_less_equal'
            ]

        # Skip these tests for a "pure" DML onnxruntime python wheel. We keep these tests enabled for instances where both DML and CUDA
        # EPs are available (Windows GPU CI pipeline has this config) - these test will pass because CUDA has higher precendence than DML
        # and the nodes are assigned to only the CUDA EP (which supports these tests)
        if c2.supports_device('DML') and not c2.supports_device('GPU'):
            current_failing_tests += [
                '^test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_cpu',
                '^test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_expanded_cpu',
                '^test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_cpu',
                '^test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_expanded_cpu',
                '^test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob_cpu',
                '^test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob_expanded_cpu',
                '^test_asin_example_cpu',
                '^test_dynamicquantizelinear_expanded_cpu',
                '^test_resize_downsample_scales_linear_cpu',
                '^test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu',
                '^test_resize_downsample_sizes_nearest_cpu',
                '^test_resize_upsample_sizes_nearest_cpu', '^test_roialign_cpu'
            ]

        filters = current_failing_tests + \
            filters['tests_with_pre_opset7_dependencies'] + \
            filters['unsupported_usages'] + \
            filters['failing_permanently'] + \
            filters['test_with_types_disabled_due_to_binary_size_concerns']

        backend_test.exclude('(' + '|'.join(filters) + ')')
        print('excluded tests:', filters)

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r"(FLOAT16)")

    if testname:
        backend_test.include(testname + ".*")
    else:
        # read filters data
        with open(
                os.path.join(
                    os.path.dirname(os.path.realpath(__file__)),
                    "testdata",
                    "onnx_backend_test_series_filters.jsonc",
                )) as f:
            filters_lines = f.readlines()
        filters_lines = [x.split("//")[0] for x in filters_lines]
        filters = json.loads("\n".join(filters_lines))

        current_failing_tests = filters["current_failing_tests"]

        if platform.architecture()[0] == "32bit":
            current_failing_tests += filters["current_failing_tests_x86"]

        if c2.supports_device("DNNL"):
            current_failing_tests += filters["current_failing_tests_DNNL"]

        if c2.supports_device("NNAPI"):
            current_failing_tests += filters["current_failing_tests_NNAPI"]

        if c2.supports_device("OPENVINO_GPU_FP32") or c2.supports_device(
                "OPENVINO_GPU_FP16"):
            current_failing_tests += filters[
                "current_failing_tests_OPENVINO_GPU"]

        if c2.supports_device("OPENVINO_MYRIAD"):
            current_failing_tests += filters[
                "current_failing_tests_OPENVINO_GPU"]
            current_failing_tests += filters[
                "current_failing_tests_OPENVINO_MYRIAD"]

        if c2.supports_device("OPENVINO_CPU_FP32"):
            current_failing_tests += filters[
                "current_failing_tests_OPENVINO_CPU_FP32"]

        if c2.supports_device("MIGRAPHX"):
            current_failing_tests += [
                "^test_constant_pad_cpu",
                "^test_round_cpu",
                "^test_lrn_default_cpu",
                "^test_lrn_cpu",
                "^test_dynamicquantizelinear_expanded_cpu",
                "^test_dynamicquantizelinear_max_adjusted_cpu",
                "^test_dynamicquantizelinear_max_adjusted_expanded_cpu",
                "^test_dynamicquantizelinear_min_adjusted_cpu",
                "^test_dynamicquantizelinear_min_adjusted_expanded_cpu",
                "^test_range_float_type_positive_delta_expanded_cpu",
                "^test_range_int32_type_negative_delta_expanded_cpu",
                "^test_operator_symbolic_override_nested_cpu",
                "^test_negative_log_likelihood_loss",
                "^test_softmax_cross_entropy",
                "^test_greater_equal",
                "^test_if_seq_cpu",
                "^test_loop11_cpu",
                "^test_loop13_seq_cpu",
                "^test_sequence_insert_at_back_cpu",
                "^test_sequence_insert_at_front_cpu",
                "^test_nonmaxsuppression_two_classes_cpu",
                "^test_nonmaxsuppression_two_batches_cpu",
                "^test_nonmaxsuppression_suppress_by_IOU_cpu",
                "^test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu",
                "^test_nonmaxsuppression_limit_output_size_cpu",
                "^test_nonmaxsuppression_identical_boxes_cpu",
                "^test_nonmaxsuppression_flipped_coordinates_cpu",
                "^test_nonmaxsuppression_center_point_box_format_cpu",
            ]

        # Skip these tests for a "pure" DML onnxruntime python wheel. We keep these tests enabled for instances where both DML and CUDA
        # EPs are available (Windows GPU CI pipeline has this config) - these test will pass because CUDA has higher precendence than DML
        # and the nodes are assigned to only the CUDA EP (which supports these tests)
        if c2.supports_device("DML") and not c2.supports_device("GPU"):
            current_failing_tests += [
                "^test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_cpu",
                "^test_negative_log_likelihood_loss_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_expanded_cpu",
                "^test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_cpu",
                "^test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_expanded_cpu",
                "^test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob_cpu",
                "^test_softmax_cross_entropy_input_shape_is_NCd1d2d3_none_no_weight_negative_ignore_index_log_prob_expanded_cpu",
                "^test_asin_example_cpu",
                "^test_dynamicquantizelinear_cpu",
                "^test_dynamicquantizelinear_expanded_cpu",
                "^test_resize_downsample_scales_linear_cpu",
                "^test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu",
                "^test_resize_downsample_sizes_nearest_cpu",
                "^test_resize_upsample_sizes_nearest_cpu",
                "^test_roialign_cpu",
            ]

        filters = (
            current_failing_tests +
            filters["tests_with_pre_opset7_dependencies"] +
            filters["unsupported_usages"] + filters["failing_permanently"] +
            filters["test_with_types_disabled_due_to_binary_size_concerns"])

        backend_test.exclude("(" + "|".join(filters) + ")")
        print("excluded tests:", filters)

        # exclude TRT EP temporarily and only test CUDA EP to retain previous behavior
        os.environ[
            "ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS"] = "TensorrtExecutionProvider"

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
Пример #8
0
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r'(FLOAT16)')

    if testname:
        backend_test.include(testname + '.*')
    else:
        # Tests that are failing temporarily and should be fixed
        current_failing_tests = (
            '^test_cast_STRING_to_FLOAT_cpu.*',
            '^test_cast_FLOAT_to_STRING_cpu.*',
            '^test_qlinearconv_cpu.*',
            '^test_gru_seq_length_cpu.*',
            '^test_bitshift_right_uint16_cpu.*',
            '^test_bitshift_right_uint32_cpu.*',
            '^test_bitshift_right_uint64_cpu.*',
            '^test_bitshift_right_uint8_cpu.*',
            '^test_bitshift_left_uint16_cpu.*',
            '^test_bitshift_left_uint32_cpu.*',
            '^test_bitshift_left_uint64_cpu.*',
            '^test_bitshift_left_uint8_cpu.*',
            '^test_round_cpu.*',
            '^test_cumsum_1d_cpu.*',
            '^test_cumsum_1d_exclusive_cpu.*',
            '^test_cumsum_1d_reverse_cpu.*',
            '^test_cumsum_1d_reverse_exclusive_cpu.*',
            '^test_cumsum_2d_axis_0_cpu.*',
            '^test_cumsum_2d_axis_1_cpu.*',
            '^test_cumsum_2d_negative_axis_cpu.*',
            '^test_dynamicquantizelinear_expanded*',
            '^test_dynamicquantizelinear_max_adjusted_expanded*',
            '^test_dynamicquantizelinear_min_adjusted_expanded*',
            '^test_gather_elements*',
            '^test_scatter_elements*',
            '^test_top_k*',
            '^test_unique_*',
            '^test_mod_float_mixed_sign_example_cpu.*',  #onnxruntime::Mod::Compute fmod_ was false. fmod attribute must be true for float, float16 and double types
            '^test_shrink_cpu.*',  #Invalid rank for input: x Got: 1 Expected: 2 Please fix either the inputs or the model.
            '^test_range_float_type_positive_delta_cpu.*',
            '^test_range_float_type_positive_delta_expanded_cpu.*',
            '^test_range_int32_type_negative_delta_cpu.*',
            '^test_range_int32_type_negative_delta_expanded_cpu.*',
            '^test_det_2d_cpu.*',
            '^test_det_nd_cpu.*',
            '^test_gathernd_example_float32_cpu.*',
            '^test_gathernd_example_int32_cpu.*',
            '^test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu.*',
            '^test_resize_downsample_scales_cubic_align_corners_cpu.*',
            '^test_resize_downsample_scales_cubic_cpu.*',
            '^test_resize_downsample_scales_linear_align_corners_cpu.*',
            '^test_resize_downsample_scales_linear_cpu.*',
            '^test_resize_downsample_scales_nearest_cpu.*',
            '^test_resize_downsample_sizes_cubic_cpu.*',
            '^test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu.*',
            '^test_resize_downsample_sizes_nearest_cpu.*',
            '^test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu.*',
            '^test_resize_tf_crop_and_resize_cpu.*',
            '^test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu.*',
            '^test_resize_upsample_scales_cubic_align_corners_cpu.*',
            '^test_resize_upsample_scales_cubic_asymmetric_cpu.*',
            '^test_resize_upsample_scales_cubic_cpu.*',
            '^test_resize_upsample_scales_linear_align_corners_cpu.*',
            '^test_resize_upsample_scales_linear_cpu.*',
            '^test_resize_upsample_scales_nearest_cpu.*',
            '^test_resize_upsample_sizes_cubic_cpu.*',
            '^test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu.*',
            '^test_resize_upsample_sizes_nearest_cpu.*',
            '^test_resize_upsample_sizes_nearest_floor_align_corners_cpu.*',
            '^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu.*',
            '^test_scatternd_cpu.*',
            '^test_sequence_*',
            '^test_unsqueeze_*',
            '^test_squeeze_*',
            '^test_slice_*',
            '^test_scatter_*',
            '^test_reduce_*',
            '^test_onehot_*',
            '^test_flatten_*',
            '^test_concat_*',
            '^test_compress_*',
            '^test_constant_pad_cpu.*',
            '^test_gemm_default_scalar_bias_cpu.*',
            '^test_gather_negative_indices_cpu.*',
            '^test_gemm_*',
            '^test_edge_pad_cpu.*',
            '^test_reflect_pad_cpu.*')

        # Example of how to disable tests for a specific provider.
        # if c2.supports_device('NGRAPH'):
        #    current_failing_tests = current_failing_tests + ('|^test_operator_repeat_dim_overflow_cpu.*',)
        if c2.supports_device('NGRAPH'):
            current_failing_tests = current_failing_tests + ('|^test_clip*', )
            current_failing_tests = current_failing_tests + (
                '|^test_depthtospace_crd*', )
            current_failing_tests = current_failing_tests + (
                '|^test_argmax_negative_axis*', )
            current_failing_tests = current_failing_tests + (
                '|^test_argmin_negative_axis*', )
            current_failing_tests = current_failing_tests + (
                '|^test_hadmax_negative_axis*', )
            current_failing_tests = current_failing_tests + (
                '|^test_gemm_default_no_bias_cpu.*', )

        if c2.supports_device('OPENVINO_GPU_FP32') or c2.supports_device(
                'OPENVINO_GPU_FP16'):
            current_failing_tests = current_failing_tests + (
                '^test_div_cpu*', )

        filters = current_failing_tests + \
                  tests_with_pre_opset7_dependencies_filters() + \
                  unsupported_usages_filters()

        backend_test.exclude('(' + '|'.join(filters) + ')')
        print('excluded tests:', filters)

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
Пример #9
0
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r'(FLOAT16)')

    if testname:
        backend_test.include(testname + '.*')
    else:
        # read filters data
        with open(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             'testdata',
                             'onnx_backend_test_series_filters.jsonc')) as f:
            filters_lines = f.readlines()
        filters_lines = [x.split('//')[0] for x in filters_lines]
        filters = json.loads('\n'.join(filters_lines))

        current_failing_tests = filters['current_failing_tests']

        if platform.architecture()[0] == '32bit':
            current_failing_tests += filters['current_failing_tests_x86']

        if c2.supports_device('NGRAPH'):
            current_failing_tests += filters['current_failing_tests_NGRAPH']

        if c2.supports_device('DNNL'):
            current_failing_tests += filters['current_failing_tests_DNNL']

        if c2.supports_device('NNAPI'):
            current_failing_tests += filters['current_failing_tests_NNAPI']

        if c2.supports_device('OPENVINO_GPU_FP32') or c2.supports_device(
                'OPENVINO_GPU_FP16'):
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_GPU']

        if c2.supports_device('OPENVINO_GPU_FP32'):
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_GPU_FP32']

        if c2.supports_device('OPENVINO_CPU_FP32'):
            current_failing_tests += filters[
                'current_failing_tests_OPENVINO_CPU_FP32']

        if c2.supports_device('MIGRAPHX'):
            current_failing_tests += [
                '^test_constant_pad_cpu', '^test_softmax_axis_1_cpu',
                '^test_softmax_axis_0_cpu', '^test_softmax_default_axis_cpu',
                '^test_round_cpu', '^test_lrn_default_cpu', '^test_lrn_cpu',
                '^test_logsoftmax_axis_0_cpu', '^test_logsoftmax_axis_1_cpu',
                '^test_logsoftmax_default_axis_cpu',
                '^test_dynamicquantizelinear_expanded_cpu',
                '^test_dynamicquantizelinear_max_adjusted_cpu',
                '^test_dynamicquantizelinear_max_adjusted_expanded_cpu',
                '^test_dynamicquantizelinear_min_adjusted_cpu',
                '^test_dynamicquantizelinear_min_adjusted_expanded_cpu',
                '^test_range_float_type_positive_delta_expanded_cpu',
                '^test_range_int32_type_negative_delta_expanded_cpu',
                '^test_operator_symbolic_override_nested_cpu'
            ]

        filters = current_failing_tests + \
            filters['tests_with_pre_opset7_dependencies'] + \
            filters['unsupported_usages'] + \
            filters['failing_permanently'] + \
            filters['test_with_types_disabled_due_to_binary_size_concerns']

        backend_test.exclude('(' + '|'.join(filters) + ')')
        print('excluded tests:', filters)

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
Пример #10
0
def create_backend_test(testname=None):
    backend_test = OrtBackendTest(c2, __name__)

    # Type not supported
    backend_test.exclude(r'(FLOAT16)')

    if testname:
        backend_test.include(testname + '.*')
    else:
        # Tests that are failing temporarily and should be fixed
        current_failing_tests = [#'^test_cast_STRING_to_FLOAT_cpu',  # old test data that is bad on Linux CI builds
                                 '^test_qlinearconv_cpu',
                                 '^test_gru_seq_length_cpu',
                                 '^test_dynamicquantizelinear_expanded.*',
                                 '^test_dynamicquantizelinear_max_adjusted_expanded.*',
                                 '^test_dynamicquantizelinear_min_adjusted_expanded.*',
                                 '^test_top_k.*',
                                 '^test_unique_not_sorted_without_axis_cpu', # bad expected data. enable after https://github.com/onnx/onnx/pull/2381 is picked up
                                 '^test_mod_float_mixed_sign_example_cpu', #onnxruntime::Mod::Compute fmod_ was false. fmod attribute must be true for float, float16 and double types
                                 '^test_shrink_cpu', #Invalid rank for input: x Got: 1 Expected: 2 Please fix either the inputs or the model.
                                 '^test_range_float_type_positive_delta_cpu',
                                 '^test_range_float_type_positive_delta_expanded_cpu',
                                 '^test_range_int32_type_negative_delta_cpu',
                                 '^test_range_int32_type_negative_delta_expanded_cpu',
                                 '^test_det_2d_cpu',
                                 '^test_det_nd_cpu',
                                 '^test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu',
                                 '^test_resize_downsample_scales_cubic_align_corners_cpu',
                                 '^test_resize_downsample_scales_cubic_cpu',
                                 '^test_resize_downsample_scales_linear_align_corners_cpu',
                                 '^test_resize_downsample_scales_linear_cpu',
                                 '^test_resize_downsample_scales_nearest_cpu',
                                 '^test_resize_downsample_sizes_cubic_cpu',
                                 '^test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu',
                                 '^test_resize_downsample_sizes_nearest_cpu',
                                 '^test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu',
                                 '^test_resize_tf_crop_and_resize_cpu',
                                 '^test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu',
                                 '^test_resize_upsample_scales_cubic_align_corners_cpu',
                                 '^test_resize_upsample_scales_cubic_asymmetric_cpu',
                                 '^test_resize_upsample_scales_cubic_cpu',
                                 '^test_resize_upsample_scales_linear_align_corners_cpu',
                                 '^test_resize_upsample_scales_linear_cpu',
                                 '^test_resize_upsample_scales_nearest_cpu',
                                 '^test_resize_upsample_sizes_cubic_cpu',
                                 '^test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu',
                                 '^test_resize_upsample_sizes_nearest_cpu',
                                 '^test_resize_upsample_sizes_nearest_floor_align_corners_cpu',
                                 '^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu',
                                 '^test_sequence_*',
                                 '^test_scatter_.*',
                                 '^test_onehot_.*',
                                 '^test_edge_pad_cpu.*',  # test data type `int32_t` not supported yet, the `float` equivalent is covered via unit tests
                                 '^test_reflect_pad_cpu.*'  # test data type `int32_t` not supported yet, the `float` equivalent is covered via unit tests
        ]

        # Example of how to disable tests for a specific provider.
        # if c2.supports_device('NGRAPH'):
        #    current_failing_tests.append('^test_operator_repeat_dim_overflow_cpu')
        if c2.supports_device('NGRAPH'):
            current_failing_tests += ['^test_clip.*',
                                      '^test_depthtospace_crd.*',
                                      '^test_argmax_negative_axis.*',
                                      '^test_argmin_negative_axis.*',
                                      '^test_hardmax_negative_axis.*',
                                      '^test_gemm_default_no_bias_cpu',
                                      '^test_flatten_negative_axis.*',
                                      '^test_reduce_[a-z1-9_]*_negative_axes_.*',
                                      'test_squeeze_negative_axes_cpu',
                                      'test_unsqueeze_negative_axes_cpu',
                                      'test_constant_pad_cpu']

        if c2.supports_device('OPENVINO_GPU_FP32') or c2.supports_device('OPENVINO_GPU_FP16'):
            current_failing_tests.append('^test_div_cpu*')

        filters = current_failing_tests + \
                  tests_with_pre_opset7_dependencies_filters() + \
                  unsupported_usages_filters() + \
                  other_tests_failing_permanently_filters() + \
                  test_with_types_disabled_due_to_binary_size_concerns_filters()

        backend_test.exclude('(' + '|'.join(filters) + ')')
        print('excluded tests:', filters)

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)

    return backend_test
def create_backend_test(test_name=None):
    """Creates an OrtBackendTest and adds its TestCase's to global scope so unittest will find them."""

    overrides = load_jsonc("onnx_backend_test_series_overrides.jsonc")
    rtol_default = overrides["rtol_default"]
    atol_default = overrides["atol_default"]
    rtol_overrides = collections.defaultdict(lambda: rtol_default)
    rtol_overrides.update(overrides["rtol_overrides"])
    atol_overrides = collections.defaultdict(lambda: atol_default)
    atol_overrides.update(overrides["atol_overrides"])

    backend_test = OrtBackendTest(rtol_overrides, atol_overrides)

    # Type not supported
    backend_test.exclude(r"(FLOAT16)")

    if test_name:
        backend_test.include(test_name + ".*")
    else:
        filters = load_jsonc("onnx_backend_test_series_filters.jsonc")
        current_failing_tests = apply_filters(filters, "current_failing_tests")

        if platform.architecture()[0] == "32bit":
            current_failing_tests += apply_filters(filters, "current_failing_tests_x86")

        if backend.supports_device("DNNL"):
            current_failing_tests += apply_filters(filters, "current_failing_tests_DNNL")

        if backend.supports_device("NNAPI"):
            current_failing_tests += apply_filters(filters, "current_failing_tests_NNAPI")

        if backend.supports_device("OPENVINO_GPU_FP32") or backend.supports_device("OPENVINO_GPU_FP16"):
            current_failing_tests += apply_filters(filters, "current_failing_tests_OPENVINO_GPU")

        if backend.supports_device("OPENVINO_MYRIAD"):
            current_failing_tests += apply_filters(filters, "current_failing_tests_OPENVINO_GPU")
            current_failing_tests += apply_filters(filters, "current_failing_tests_OPENVINO_MYRIAD")

        if backend.supports_device("OPENVINO_CPU_FP32"):
            current_failing_tests += apply_filters(filters, "current_failing_tests_OPENVINO_CPU_FP32")

        if backend.supports_device("MIGRAPHX"):
            current_failing_tests += apply_filters(filters, "current_failing_tests_MIGRAPHX")

        # Skip these tests for a "pure" DML onnxruntime python wheel. We keep these tests enabled for instances where both DML and CUDA
        # EPs are available (Windows GPU CI pipeline has this config) - these test will pass because CUDA has higher precedence than DML
        # and the nodes are assigned to only the CUDA EP (which supports these tests)
        if backend.supports_device("DML") and not backend.supports_device("GPU"):
            current_failing_tests += apply_filters(filters, "current_failing_tests_pure_DML")

        filters = (
            current_failing_tests
            + apply_filters(filters, "tests_with_pre_opset7_dependencies")
            + apply_filters(filters, "unsupported_usages")
            + apply_filters(filters, "failing_permanently")
            + apply_filters(filters, "test_with_types_disabled_due_to_binary_size_concerns")
        )

        backend_test.exclude("(" + "|".join(filters) + ")")
        print("excluded tests:", filters)

        # exclude TRT EP temporarily and only test CUDA EP to retain previous behavior
        os.environ["ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS"] = "TensorrtExecutionProvider"

    # import all test cases at global scope to make
    # them visible to python.unittest.
    globals().update(backend_test.enable_report().test_cases)