def test_random_select_subpolicy():
    ds.config.set_seed(0)

    def test_config(arr, policy):
        try:
            data = ds.NumpySlicesDataset(arr,
                                         column_names="col",
                                         shuffle=False)
            data = data.map(operations=visions.RandomSelectSubpolicy(policy),
                            input_columns=["col"])
            res = []
            for i in data.create_dict_iterator(num_epochs=1,
                                               output_numpy=True):
                res.append(i["col"].tolist())
            return res
        except (TypeError, ValueError) as e:
            return str(e)

    # 3 possible outcomes
    policy1 = [[(ops.PadEnd([4], 0), 0.5),
                (ops.Compose([ops.Duplicate(),
                              ops.Concatenate()]), 1)],
               [(ops.Slice([0, 1]), 0.5), (ops.Duplicate(), 1),
                (ops.Concatenate(), 1)]]
    res1 = test_config([[1, 2, 3]], policy1)
    assert res1 in [[[1, 2, 1, 2]], [[1, 2, 3, 1, 2, 3]],
                    [[1, 2, 3, 0, 1, 2, 3, 0]]]

    # test exceptions
    assert "policy can not be empty." in test_config([[1, 2, 3]], [])
    assert "policy[0] can not be empty." in test_config([[1, 2, 3]], [[]])
    assert "op of (op, prob) in policy[1][0] is neither a c_transform op (TensorOperation) nor a callable pyfunc" \
           in test_config([[1, 2, 3]], [[(ops.PadEnd([4], 0), 0.5)], [(1, 0.4)]])
    assert "prob of (op, prob) policy[1][0] is not within the required interval of [0, 1]" in test_config(
        [[1]], [[(ops.Duplicate(), 0)], [(ops.Duplicate(), -0.1)]])
def test_random_choice():
    """
    Test RandomChoice op
    """
    ds.config.set_seed(0)

    def test_config(arr, op_list):
        try:
            data = ds.NumpySlicesDataset(arr,
                                         column_names="col",
                                         shuffle=False)
            data = data.map(operations=ops.RandomChoice(op_list),
                            input_columns=["col"])
            res = []
            for i in data.create_dict_iterator(num_epochs=1,
                                               output_numpy=True):
                res.append(i["col"].tolist())
            return res
        except (TypeError, ValueError) as e:
            return str(e)

    # Test whether an operation would be randomly chosen.
    # In order to prevent random failure, both results need to be checked.
    res1 = test_config([[0, 1, 2]], [ops.PadEnd([4], 0), ops.Slice([0, 2])])
    assert res1 in [[[0, 1, 2, 0]], [[0, 2]]]

    # Test nested structure
    res2 = test_config([[0, 1, 2]], [
        ops.Compose([ops.Duplicate(), ops.Concatenate()]),
        ops.Compose([ops.Slice([0, 1]), ops.OneHot(2)])
    ])
    assert res2 in [[[[1, 0], [0, 1]]], [[0, 1, 2, 0, 1, 2]]]
    # Test RandomChoice where there is only 1 operation
    assert test_config([[4, 3], [2, 1]], [ops.Slice([0])]) == [[4], [2]]
Example #3
0
def test_compose():
    """
    Test C++ and Python Compose Op
    """
    ds.config.set_seed(0)

    def test_config(arr, op_list):
        try:
            data = ds.NumpySlicesDataset(arr,
                                         column_names="col",
                                         shuffle=False)
            data = data.map(input_columns=["col"], operations=op_list)
            res = []
            for i in data.create_dict_iterator(output_numpy=True):
                res.append(i["col"].tolist())
            return res
        except (TypeError, ValueError) as e:
            return str(e)

    # Test simple compose with only 1 op, this would generate a warning
    assert test_config([[1, 0], [3, 4]],
                       ops.Compose([ops.Fill(2)])) == [[2, 2], [2, 2]]
    # Test 1 column -> 2 columns -> 1 -> 2 -> 1
    assert test_config([[1, 0]],
                       ops.Compose([ops.Duplicate(), ops.Concatenate(), ops.Duplicate(), ops.Concatenate()])) \
           == [[1, 0] * 4]
    # Test one Python transform followed by a C transform. Type after OneHot is a float (mixed use-case)
    assert test_config(
        [1, 0], ops.Compose([py_ops.OneHotOp(2),
                             ops.TypeCast(mstype.int32)])) == [[[0, 1]],
                                                               [[1, 0]]]
    # Test exceptions.
    with pytest.raises(TypeError) as error_info:
        ops.Compose([1, ops.TypeCast(mstype.int32)])
    assert "op_list[0] is not a c_transform op (TensorOp) nor a callable pyfunc." in str(
        error_info.value)
    # Test empty op list
    with pytest.raises(ValueError) as error_info:
        test_config([1, 0], ops.Compose([]))
    assert "op_list can not be empty." in str(error_info.value)

    # Test Python compose op
    assert test_config([1, 0],
                       py_ops.Compose([py_ops.OneHotOp(2)])) == [[[0, 1]],
                                                                 [[1, 0]]]
    assert test_config([1, 0],
                       py_ops.Compose([py_ops.OneHotOp(2),
                                       (lambda x: x + x)])) == [[[0, 2]],
                                                                [[2, 0]]]
    # Test nested Python compose op
    assert test_config([1, 0],
                       py_ops.Compose([py_ops.Compose([py_ops.OneHotOp(2)]), (lambda x: x + x)])) \
           == [[[0, 2]], [[2, 0]]]

    with pytest.raises(TypeError) as error_info:
        py_ops.Compose([(lambda x: x + x)])()
    assert "Compose was called without an image. Fix invocation (avoid it being invoked as Compose([...])())." in str(
        error_info.value)
def test_eager_concatenate():
    """
    Test Concatenate op is callable
    """
    prepend_tensor = np.array([1.4, 2., 3., 4., 4.5], dtype=np.float)
    append_tensor = np.array([9., 10.3, 11., 12.], dtype=np.float)
    concatenate_op = data_trans.Concatenate(0, prepend_tensor, append_tensor)
    expected = np.array(
        [1.4, 2., 3., 4., 4.5, 5., 6., 7., 8., 9., 10.3, 11., 12.])
    assert np.array_equal(concatenate_op([5., 6., 7., 8.]), expected)
def test_concatenate_op_none():
    def gen():
        yield (np.array([5., 6., 7., 8.], dtype=np.float),)

    data = ds.GeneratorDataset(gen, column_names=["col"])
    concatenate_op = data_trans.Concatenate()

    data = data.map(input_columns=["col"], operations=concatenate_op)
    for data_row in data:
        np.testing.assert_array_equal(data_row[0], np.array([5., 6., 7., 8.], dtype=np.float))
def test_concatenate_op_multi_input_numeric():
    prepend_tensor = np.array([3, 5])

    data = ([[1, 2]], [[3, 4]])
    data = ds.NumpySlicesDataset(data, column_names=["col1", "col2"])

    concatenate_op = data_trans.Concatenate(0, prepend=prepend_tensor)

    data = data.map(input_columns=["col1", "col2"], columns_order=["out1"], output_columns=["out1"],
                    operations=concatenate_op)
    expected = np.array([3, 5, 1, 2, 3, 4])
    for data_row in data:
        np.testing.assert_array_equal(data_row[0], expected)
Example #7
0
def test_concatenate_op_negative_axis():
    def gen():
        yield (np.array([5., 6., 7., 8.], dtype=np.float),)

    prepend_tensor = np.array([1.4, 2., 3., 4., 4.5], dtype=np.float)
    append_tensor = np.array([9., 10.3, 11., 12.], dtype=np.float)
    data = ds.GeneratorDataset(gen, column_names=["col"])
    concatenate_op = data_trans.Concatenate(-1, prepend_tensor, append_tensor)
    data = data.map(operations=concatenate_op, input_columns=["col"])
    expected = np.array([1.4, 2., 3., 4., 4.5, 5., 6., 7., 8., 9., 10.3,
                         11., 12.])
    for data_row in data.create_tuple_iterator(output_numpy=True):
        np.testing.assert_array_equal(data_row[0], expected)
def test_concatenate_op_multi_input_string():
    prepend_tensor = np.array(["dw", "df"], dtype='S')
    append_tensor = np.array(["dwsdf", "df"], dtype='S')

    data = ([["1", "2", "d"]], [["3", "4", "e"]])
    data = ds.NumpySlicesDataset(data, column_names=["col1", "col2"])

    concatenate_op = data_trans.Concatenate(0, prepend=prepend_tensor, append=append_tensor)

    data = data.map(input_columns=["col1", "col2"], columns_order=["out1"], output_columns=["out1"],
                    operations=concatenate_op)
    expected = np.array(["dw", "df", "1", "2", "d", "3", "4", "e", "dwsdf", "df"], dtype='S')
    for data_row in data:
        np.testing.assert_array_equal(data_row[0], expected)
Example #9
0
def test_compose():
    ds.config.set_seed(0)

    def test_config(arr, op_list):
        try:
            data = ds.NumpySlicesDataset(arr,
                                         column_names="col",
                                         shuffle=False)
            data = data.map(operations=ops.Compose(op_list),
                            input_columns=["col"])
            res = []
            for i in data.create_dict_iterator(num_epochs=1,
                                               output_numpy=True):
                res.append(i["col"].tolist())
            return res
        except (TypeError, ValueError) as e:
            return str(e)

    # test simple compose with only 1 op, this would generate a warning
    assert test_config([[1, 0], [3, 4]], [ops.Fill(2)]) == [[2, 2], [2, 2]]
    # test 1 column -> 2columns -> 1 -> 2 -> 1
    assert test_config([[1, 0]], [
        ops.Duplicate(),
        ops.Concatenate(),
        ops.Duplicate(),
        ops.Concatenate()
    ]) == [[1, 0] * 4]
    # test one python transform followed by a C transform. type after oneHot is float (mixed use-case)
    assert test_config(
        [1, 0],
        [py_ops.OneHotOp(2), ops.TypeCast(mstype.int32)]) == [[[0, 1]],
                                                              [[1, 0]]]
    # test exceptions. compose, randomApply randomChoice use the same validator
    assert "op_list[0] is not a c_transform op" in test_config(
        [1, 0], [1, ops.TypeCast(mstype.int32)])
    # test empty op list
    assert "op_list can not be empty." in test_config([1, 0], [])