Beispiel #1
0
 def factory(source: st.DataObject) -> None:
     with type_vars():
         with pure_functions():
             with container_strategies(container_type, settings=settings):
                 source.draw(st.builds(law.definition))
Beispiel #2
0
        def wrapper(shapes: hnp.BroadcastableShapes, data: st.DataObject):
            self.index_to_arr_shapes.update((k, v) for k, v in zip(
                sorted(self.missing_shapes), shapes.input_shapes))

            # list of drawn arrays to feed to functions
            arrs = data.draw(
                st.tuples(*(self.arrays(i).map(Tensor)
                            for i in range(self.num_arrays))),
                label="arrays",
            )

            arr_copies = tuple(copy(arr) for arr in arrs)

            if callable(self.kwargs):
                kwargs = data.draw(self.kwargs(*arrs), label="kwargs")
                if not isinstance(kwargs, dict):
                    raise TypeError(
                        "`kwargs` was a search strategy. This needs to draw dictionaries,"
                        "instead drew: {}".format(kwargs))
            else:
                # The keyword args to be passed to `self.op`. If any provided argument is callable
                # it is assumed to by a hypothesis search strategy, and all of the drawn arrays will
                # be passed to the strategy, in order to draw a value for that keyword argument.
                # Otherwise the provided value is used as-is.
                kwargs = {
                    k: (data.draw(v(*arrs), label="kwarg: {}".format(k))
                        if callable(v) else v)
                    for k, v in self.kwargs.items()
                }

            if self.assumptions is not None:
                assume(self.assumptions(*arrs, **kwargs))

            for i, arr in enumerate(
                    arrs):  # assure arrays don't contain forbidden values
                for value in self.index_to_no_go.get(i, ()):
                    assume(np.all(arr != value))

            # forward pass of the function
            out = self.op(*arrs, **kwargs)

            # gradient to be backpropped through this operation
            grad = data.draw(
                hnp.arrays(
                    shape=out.shape,
                    dtype=float,
                    elements=st.floats(-10, 10),
                    unique=True,
                ),
                label="grad",
            )
            grad_copy = copy(grad)  # keep a copy to check for later mutations

            # compute analytic derivatives via mygrad-backprop
            if any(out.shape != i.shape for i in arrs):
                # Broadcasting occurred
                # Must reduce `out` to scalar
                # first multiply by `grad` to simulate non-trivial back-prop
                (grad * out).sum().backward()
            else:
                out.backward(grad)

            if not self.finite_difference:
                # compute derivatives via numerical approximation of derivative
                # using the complex-step method
                numerical_grad = (numerical_gradient_full
                                  if self.vary_each_element else
                                  numerical_gradient)

            else:
                numerical_grad = finite_difference
            grads_numerical = numerical_grad(self.true_func,
                                             *(i.data for i in arrs),
                                             back_grad=grad,
                                             kwargs=kwargs)

            # check that the analytic and numeric derivatives match
            for n, (arr, d_num) in enumerate(zip(arrs, grads_numerical)):
                assert_allclose(
                    arr.grad,
                    d_num,
                    **self.tolerances,
                    err_msg=
                    "arr-{}: mygrad derivative and numerical derivative do not match"
                    .format(n),
                )

                # check that none of the set derivatives is a view of `grad`
                assert not np.shares_memory(
                    arr.grad,
                    grad), "arr-{}.grad stores a view of grad".format(n)

            # check that none of the set derivatives are views of one another
            for arr_i, arr_j in combinations(arrs, 2):
                assert not np.shares_memory(
                    arr_i.grad, arr_j.grad
                ), "two input arrays were propagated views of the same gradient"

            # verify that null_gradients works
            out.null_gradients()
            assert all(i.grad is None for i in arrs), "null_gradients failed"

            # check if any of the input-arrays were mutated
            for n, (arr, arr_copy) in enumerate(zip(arrs, arr_copies)):
                assert_array_equal(
                    arr.data,
                    arr_copy.data,
                    err_msg="arr-{} was mutated during backward prop".format(
                        n),
                )

            # check if `grad` was mutated
            assert_array_equal(
                grad,
                grad_copy,
                err_msg="`grad` was mutated during backward prop")
Beispiel #3
0
def test_set_color(plotter: LivePlot, colors: dict, data: st.DataObject):
    metric = data.draw(st.sampled_from(plotter.metrics), label="metric")
    plotter.metric_colors = {metric: colors}
    assert plotter.metric_colors[metric] == colors
Beispiel #4
0
def test_is_invalid_gradient(grad, is_invalid, data: st.DataObject):
    if isinstance(grad, st.SearchStrategy):
        grad = data.draw(grad, label="grad")

    assert is_invalid_gradient(grad) is is_invalid, grad
Beispiel #5
0
        def wrapper(shapes: hnp.BroadcastableShapes, constant,
                    data: st.DataObject):
            self.index_to_arr_shapes.update((k, v) for k, v in zip(
                sorted(self.missing_shapes), shapes.input_shapes))

            # list of drawn arrays to feed to functions
            arrs = data.draw(
                st.tuples(*(self.arrays(i) for i in range(self.num_arrays))),
                label="arrays",
            )

            # list of array-copies to check for mutation
            arr_copies = tuple(copy(arr) for arr in arrs)

            if callable(self.kwargs):
                kwargs = data.draw(self.kwargs(*arrs))
                if not isinstance(kwargs, dict):
                    raise TypeError(
                        "`kwargs` was a search strategy. This needs to draw dictionaries,"
                        "instead drew: {}".format(kwargs))
            else:
                # set or draw keyword args to be passed to functions
                kwargs = {
                    k: (data.draw(v(*arrs), label="kwarg: {}".format(k))
                        if callable(v) else v)
                    for k, v in self.kwargs.items()
                }

            if self.assumptions is not None:
                assume(self.assumptions(*arrs, **kwargs))

            for i, arr in enumerate(
                    arrs):  # assure arrays don't contain forbidden values
                for value in self.index_to_no_go.get(i, ()):
                    assume(np.all(arr != value))

            # execute mygrad and "true" functions. Compare outputs and check mygrad behavior
            o = self.op(*(Tensor(i) for i in arrs),
                        **kwargs,
                        constant=constant)
            tensor_out = o.data
            true_out = self.true_func(*arrs, **kwargs)

            assert isinstance(
                o, Tensor
            ), "`mygrad_func` returned type {}, should return `mygrad.Tensor`".format(
                type(o))
            assert (
                o.constant is constant
            ), "`mygrad_func` returned tensor.constant={}, should be constant={}".format(
                o.constant, constant)

            assert_allclose(
                actual=tensor_out,
                desired=true_out,
                err_msg=
                "`mygrad_func(x)` and `true_func(x)` produce different results",
                **self.tolerances,
            )

            for n, (arr, arr_copy) in enumerate(zip(arrs, arr_copies)):
                assert_array_equal(
                    arr,
                    arr_copy,
                    err_msg="arr-{} was mutated during forward prop".format(n),
                )
Beispiel #6
0
def test_properties(data: DataObject,
                    coordinates_limits_type_pair: Tuple[ScalarsLimitsType,
                                                        ScalarsLimitsType],
                    sizes_pair: SizesPair, border_sizes_pair: SizesPair,
                    holes_sizes_pair: SizesPair,
                    hole_sizes_pair: SizesPair) -> None:
    (x_coordinates_limits_type,
     y_coordinates_limits_type) = coordinates_limits_type_pair
    ((x_coordinates, (min_x_value, max_x_value)),
     x_type) = x_coordinates_limits_type
    ((y_coordinates, (min_y_value, max_y_value)),
     y_type) = y_coordinates_limits_type
    min_size, max_size = sizes_pair
    min_border_size, max_border_size = border_sizes_pair
    min_holes_size, max_holes_size = holes_sizes_pair
    min_hole_size, max_hole_size = hole_sizes_pair

    strategy = multipolygons(x_coordinates,
                             y_coordinates,
                             min_size=min_size,
                             max_size=max_size,
                             min_border_size=min_border_size,
                             max_border_size=max_border_size,
                             min_holes_size=min_holes_size,
                             max_holes_size=max_holes_size,
                             min_hole_size=min_hole_size,
                             max_hole_size=max_hole_size)

    result = data.draw(strategy)

    assert is_multipolygon(result)
    assert multipolygon_has_valid_sizes(result,
                                        min_size=min_size,
                                        max_size=max_size,
                                        min_border_size=min_border_size,
                                        max_border_size=max_border_size,
                                        min_holes_size=min_holes_size,
                                        max_holes_size=max_holes_size,
                                        min_hole_size=min_hole_size,
                                        max_hole_size=max_hole_size)
    assert multipolygon_has_coordinates_types(result,
                                              x_type=x_type,
                                              y_type=y_type)
    assert multipolygon_has_coordinates_in_range(result,
                                                 min_x_value=min_x_value,
                                                 max_x_value=max_x_value,
                                                 min_y_value=min_y_value,
                                                 max_y_value=max_y_value)
    assert is_multipolygon_strict(result)
    assert all(
        is_contour_non_self_intersecting(polygon.border) and all(
            is_contour_non_self_intersecting(hole) for hole in polygon.holes)
        for polygon in result.polygons)
    assert contours_do_not_cross_or_overlap(
        [polygon.border for polygon in result.polygons])
    assert all(
        contours_do_not_cross_or_overlap(polygon.holes)
        for polygon in result.polygons)
    assert all(
        is_contour_counterclockwise(polygon.border) and all(
            not is_contour_counterclockwise(hole) for hole in polygon.holes)
        for polygon in result.polygons)
Beispiel #7
0
        def wrapper(shapes: hnp.BroadcastableShapes, constant,
                    data: st.DataObject):
            self.index_to_arr_shapes.update((k, v) for k, v in zip(
                sorted(self.missing_shapes), shapes.input_shapes))

            # list of drawn arrays to feed to functions
            arrs = data.draw(
                st.tuples(*(self.arrays(i) for i in range(self.num_arrays))),
                label="arrays",
            )

            # list of array-copies to check for mutation
            arr_copies = tuple(copy(arr) for arr in arrs)

            if callable(self.kwargs):
                kwargs = data.draw(self.kwargs(*arrs))
                if not isinstance(kwargs, dict):
                    raise TypeError(
                        f"`kwargs` was a search strategy. This needs to draw dictionaries,"
                        f"instead drew: {kwargs}")
            else:
                # set or draw keyword args to be passed to functions
                kwargs = {
                    k: (data.draw(v(
                        *arrs), label=f"kwarg: {f}") if callable(v) else v)
                    for k, v in self.kwargs.items()
                }

            if self.assumptions is not None:
                assume(self.assumptions(*arrs, **kwargs))

            for i, arr in enumerate(
                    arrs):  # assure arrays don't contain forbidden values
                for value in self.index_to_no_go.get(i, ()):
                    assume(np.all(arr != value))

            if self.permit_0d_array_as_float:
                # potentially cast a 0D array as a float
                arrs = tuple(
                    arr.item() if arr.ndim == 0 and data.
                    draw(st.booleans(), label=f"arr-{n} to float") else arr
                    for n, arr in enumerate(arrs))

            # execute mygrad and "true" functions. Compare outputs and check mygrad behavior
            tensor_constants = data.draw(st.tuples(*[st.booleans()] *
                                                   len(arrs)),
                                         label="tensor_constants")
            o = self.op(
                *(Tensor(i, constant=c)
                  for i, c in zip(arrs, tensor_constants)),
                **kwargs,
                constant=constant,
            )
            tensor_out = o.data
            true_out = self.true_func(*arrs, **kwargs)

            assert isinstance(
                o, Tensor
            ), f"`mygrad_func` returned type {type(o)}, should return `mygrad.Tensor`"
            assert o.constant is constant or bool(sum(tensor_constants)), (
                f"`mygrad_func` returned tensor.constant={o.constant}, "
                f"should be constant={constant or  bool(sum(tensor_constants))}"
            )

            assert_allclose(
                actual=tensor_out,
                desired=true_out,
                err_msg=
                "`mygrad_func(x)` and `true_func(x)` produce different results",
                **self.tolerances,
            )

            for n, (arr, arr_copy) in enumerate(zip(arrs, arr_copies)):
                assert_array_equal(
                    arr,
                    arr_copy,
                    err_msg=f"arr-{n} was mutated during forward prop",
                )
Beispiel #8
0
def test_gru_backward(
    data: st.DataObject,
    X: np.ndarray,
    D: int,
    bp_lim: bool,
    dropout: bool,
    U_constants: Tuple[bool, bool, bool],
    W_constants: Tuple[bool, bool, bool],
    b_constants: Tuple[bool, bool, bool],
    X_constant: bool,
    V_constant: bool,
):
    tolerances = dict(atol=1e-5, rtol=1e-5)
    T, N, C = X.shape

    Wz, Wr, Wh = data.draw(
        hnp.arrays(shape=(3, D, D), dtype=float, elements=st.floats(-10.0, 10.0)),
        label="Wz, Wr, Wh",
    )

    Uz, Ur, Uh = data.draw(
        hnp.arrays(shape=(3, C, D), dtype=float, elements=st.floats(-10.0, 10.0)),
        label="Uz, Ur, Uh",
    )

    bz, br, bh = data.draw(
        hnp.arrays(shape=(3, D), dtype=float, elements=st.floats(-10.0, 10.0)),
        label="bz, br, bh",
    )

    V = data.draw(
        hnp.arrays(shape=(D, C), dtype=float, elements=st.floats(-10.0, 10.0)),
        label="V",
    )

    s0 = np.zeros((N, D), dtype=float)

    X = Tensor(X, constant=X_constant)
    X2 = X.__copy__()

    Wz = Tensor(Wz, constant=W_constants[0])
    Wz2 = Wz.__copy__()

    Uz = Tensor(Uz, constant=U_constants[0])
    Uz2 = Uz.__copy__()

    bz = Tensor(bz, constant=b_constants[0])
    bz2 = bz.__copy__()

    Wr = Tensor(Wr, constant=W_constants[1])
    Wr2 = Wr.__copy__()

    Ur = Tensor(Ur, constant=U_constants[1])
    Ur2 = Ur.__copy__()

    br = Tensor(br, constant=b_constants[1])
    br2 = br.__copy__()

    Wh = Tensor(Wh, constant=W_constants[2])
    Wh2 = Wh.__copy__()

    Uh = Tensor(Uh, constant=U_constants[2])
    Uh2 = Uh.__copy__()

    bh = Tensor(bh, constant=b_constants[2])
    bh2 = bh.__copy__()

    V = Tensor(V, constant=V_constant)
    V2 = V.__copy__()

    s0 = Tensor(s0)
    s2 = s0.__copy__()

    # bp_lim = len(X) - 1 should behave the same as no bp-lim
    s = gru(
        X,
        Uz,
        Wz,
        bz,
        Ur,
        Wr,
        br,
        Uh,
        Wh,
        bh,
        dropout=dropout,
        constant=False,
        bp_lim=len(X) - 1 if bp_lim else None,
    )
    o = matmul(s[1:], V)
    ls = o.sum()
    ls.backward()

    stt = s2
    all_s = [s0.data]
    ls2 = 0
    if dropout:
        Wz2d = s.creator._dropWz * Wz2
        Wr2d = s.creator._dropWr * Wr2
        Wh2d = s.creator._dropWh * Wh2
    else:
        Wz2d = Wz2
        Wr2d = Wr2
        Wh2d = Wh2
    for n, x in enumerate(X2):
        if not dropout:
            z = sigmoid(matmul(x, Uz2) + matmul(stt, Wz2d) + bz2)
            r = sigmoid(matmul(x, Ur2) + matmul(stt, Wr2d) + br2)
            h = tanh(matmul(x, Uh2) + matmul((r * stt), Wh2d) + bh2)
        else:
            z = sigmoid(
                (s.creator._dropUz[0] * matmul(x, Uz2)) + matmul(stt, Wz2d) + bz2
            )
            r = sigmoid(
                (s.creator._dropUr[0] * matmul(x, Ur2)) + matmul(stt, Wr2d) + br2
            )
            h = tanh(
                (s.creator._dropUh[0] * matmul(x, Uh2)) + matmul((r * stt), Wh2d) + bh2
            )
        stt = (1 - z) * h + z * stt
        all_s.append(stt)
        o = matmul(stt, V2)
        ls2 += o.sum()
    ls2.backward()

    rec_s_grad = np.stack([i.grad for i in all_s[1:]])

    if not s.constant:
        assert_allclose(rec_s_grad, s.grad, **tolerances)
    else:
        assert s.grad is None

    if not Wz.constant:
        assert_allclose(Wz.grad, Wz2.grad, **tolerances)
    else:
        assert Wz.grad is None

    if not Wr.constant:
        assert_allclose(Wr.grad, Wr2.grad, **tolerances)
    else:
        assert Wr.grad is None

    if not Wh.constant:
        assert_allclose(Wh.grad, Wh2.grad, **tolerances)
    else:
        assert Wh.grad is None

    if not Uz.constant:
        assert_allclose(Uz.grad, Uz2.grad, **tolerances)
    else:
        assert Uz.grad is None

    if not Ur.constant:
        assert_allclose(Ur.grad, Ur2.grad, **tolerances)
    else:
        assert Ur.grad is None

    if not Uh.constant:
        assert_allclose(Uh.grad, Uh2.grad, **tolerances)
    else:
        assert Uh.grad is None

    if not bz.constant:
        assert_allclose(bz.grad, bz2.grad, **tolerances)
    else:
        assert bz.grad is None

    if not br.constant:
        assert_allclose(br.grad, br2.grad, **tolerances)
    else:
        assert br.grad is None

    if not bh.constant:
        assert_allclose(bh.grad, bh2.grad, **tolerances)
    else:
        assert bh.grad is None

    if not V.constant:
        assert_allclose(V.grad, V2.grad, **tolerances)
    else:
        assert V.grad is None

    if not X.constant:
        assert_allclose(X.grad, X2.grad, **tolerances)
    else:
        assert X.grad is None

    ls.null_gradients()
    ls2.null_gradients()

    for x in [s, Wz, Wr, Wh, bz, br, bh, X, Uz, Ur, Uh, V]:
        assert x.grad is None
Beispiel #9
0
def test_gru_fwd(X, D, dropout, data: st.DataObject):
    T, N, C = X.shape

    Wz, Wr, Wh = data.draw(
        hnp.arrays(shape=(3, D, D), dtype=float, elements=st.floats(-10.0, 10.0)),
        label="Wz, Wr, Wh",
    )

    Uz, Ur, Uh = data.draw(
        hnp.arrays(shape=(3, C, D), dtype=float, elements=st.floats(-10.0, 10.0)),
        label="Uz, Ur, Uh",
    )

    bz, br, bh = data.draw(
        hnp.arrays(shape=(3, D), dtype=float, elements=st.floats(-10.0, 10.0)),
        label="bz, br, bh",
    )

    V = data.draw(
        hnp.arrays(shape=(D, C), dtype=float, elements=st.floats(-10.0, 10.0)),
        label="V",
    )

    s0 = np.zeros((N, D), dtype=float)

    X = Tensor(X)
    X2 = X.__copy__()

    Wz = Tensor(Wz)
    Wz2 = Wz.__copy__()

    Uz = Tensor(Uz)
    Uz2 = Uz.__copy__()

    bz = Tensor(bz)
    bz2 = bz.__copy__()

    Wr = Tensor(Wr)
    Wr2 = Wr.__copy__()

    Ur = Tensor(Ur)
    Ur2 = Ur.__copy__()

    br = Tensor(br)
    br2 = br.__copy__()

    Wh = Tensor(Wh)
    Wh2 = Wh.__copy__()

    Uh = Tensor(Uh)
    Uh2 = Uh.__copy__()

    bh = Tensor(bh)
    bh2 = bh.__copy__()

    V = Tensor(V)
    V2 = V.__copy__()

    s0 = Tensor(s0)
    s2 = s0.__copy__()

    s = gru(X, Uz, Wz, bz, Ur, Wr, br, Uh, Wh, bh, dropout=dropout, constant=True)
    o = matmul(s[1:], V)
    ls = o.sum()

    assert s.constant is True

    if dropout:
        for d in [
            s.creator._dropUr,
            s.creator._dropUz,
            s.creator._dropUh,
            s.creator._dropWr,
            s.creator._dropWz,
            s.creator._dropWh,
        ]:
            assert np.all(np.logical_or(d == 1 / (1 - dropout), d == 0))

    stt = s2
    all_s = [s0.data]
    ls2 = 0
    if dropout:
        Wz2d = s.creator._dropWz * Wz2
        Wr2d = s.creator._dropWr * Wr2
        Wh2d = s.creator._dropWh * Wh2
    else:
        Wz2d = Wz2
        Wr2d = Wr2
        Wh2d = Wh2
    for n, x in enumerate(X2):
        if not dropout:
            z = sigmoid(matmul(x, Uz2) + matmul(stt, Wz2d) + bz2)
            r = sigmoid(matmul(x, Ur2) + matmul(stt, Wr2d) + br2)
            h = tanh(matmul(x, Uh2) + matmul((r * stt), Wh2d) + bh2)
        else:
            z = sigmoid(
                (s.creator._dropUz[0] * matmul(x, Uz2)) + matmul(stt, Wz2d) + bz2
            )
            r = sigmoid(
                (s.creator._dropUr[0] * matmul(x, Ur2)) + matmul(stt, Wr2d) + br2
            )
            h = tanh(
                (s.creator._dropUh[0] * matmul(x, Uh2)) + matmul((r * stt), Wh2d) + bh2
            )

        stt = (1 - z) * h + z * stt
        all_s.append(stt)
        o = matmul(stt, V2)
        ls2 += o.sum()

    tolerances = dict(atol=1e-5, rtol=1e-5)
    rec_s_dat = np.stack([i.data for i in all_s])

    assert_allclose(ls.data, ls2.data, **tolerances)

    assert_allclose(rec_s_dat, s.data, **tolerances)

    assert_allclose(Wz.data, Wz2.data, **tolerances)
    assert_allclose(Wr.data, Wr2.data, **tolerances)
    assert_allclose(Wh.data, Wh2.data, **tolerances)

    assert_allclose(Uz.data, Uz2.data, **tolerances)
    assert_allclose(Ur.data, Ur2.data, **tolerances)
    assert_allclose(Uh.data, Uh2.data, **tolerances)

    assert_allclose(bz.data, bz2.data, **tolerances)
    assert_allclose(br.data, br2.data, **tolerances)
    assert_allclose(bh.data, bh2.data, **tolerances)

    assert_allclose(V.data, V2.data, **tolerances)

    assert_allclose(X.data, X2.data, **tolerances)

    ls.null_gradients()
    for x in [s, Wz, Wr, Wh, bz, br, bh, X, Uz, Ur, Uh, V]:
        assert x.grad is None
Beispiel #10
0
def test_tensors_shape(shape, data: st.DataObject):
    tensor = data.draw(tensors(np.int8, shape=shape), label="tensor")
    assert isinstance(tensor, Tensor)
    assert tensor.shape == shape
    assert tensor.grad is None
Beispiel #11
0
def test_tensors_dtype(dtype, data: st.DataObject):
    tensor = data.draw(tensors(dtype=dtype, shape=(2, 3)), label="tensor")
    assert isinstance(tensor, Tensor)
    assert tensor.dtype == dtype
    assert tensor.grad is None
Beispiel #12
0
def test_tensors_static_constant(constant: bool, data: st.DataObject):
    tensor = data.draw(tensors(np.int8, (2, 3), constant=constant),
                       label="tensor")
    assert isinstance(tensor, Tensor)
    assert tensor.constant is constant
    assert tensor.grad is None
Beispiel #13
0
def test_valid_shapes(arr: np.ndarray, data: st.DataObject):
    newshape = data.draw(valid_shapes(arr.size), label="newshape")
    arr.reshape(newshape)
Beispiel #14
0
def test_log_softmax_numerical_stability(x: np.ndarray, data: st.DataObject):
    axis = data.draw(valid_axes(x.ndim), label="axis")
    out = np.exp(logsoftmax(x, axis=axis).data)
    assert np.all(np.logical_and(0 <= out, out <= 1)), out
    assert_allclose(out.sum(axis=axis), 1.0)
def test_ranked_margin(shape: Tuple[int, ...], margin: float,
                       labels_as_tensor: bool, data: st.DataObject):
    x1 = data.draw(
        hnp.arrays(shape=shape, dtype=float, elements=st.floats(-1000, 1000)),
        label="x1",
    )
    x2 = data.draw(
        hnp.arrays(shape=shape, dtype=float, elements=st.floats(-1000, 1000)),
        label="x2",
    )
    y = data.draw(
        st.sampled_from((-1, 1))
        | hnp.arrays(
            shape=shape[:1],
            dtype=hnp.integer_dtypes(),
            elements=st.sampled_from((-1, 1)),
        ).map(lambda x: mg.Tensor(x) if labels_as_tensor else x),
        label="y",
    )

    x1_copy = np.copy(x1)
    x2_copy = np.copy(x2)
    y_copy = y.copy() if isinstance(y, mg.Tensor) else np.copy(y)

    x1_dum = mg.Tensor(x1)
    x2_dum = mg.Tensor(x2)

    x1_real = mg.Tensor(x1)
    x2_real = mg.Tensor(x2)

    loss_dum = simple_loss(x1_dum, x2_dum, y, margin)

    loss_real = margin_ranking_loss(x1_real, x2_real, y, margin)

    assert_allclose(actual=loss_real.data,
                    desired=loss_dum.data,
                    err_msg="losses don't match")

    assert_array_equal(x1, x1_copy, err_msg="`x1` was mutated by forward")
    assert_array_equal(x2, x2_copy, err_msg="`x2` was mutated by forward")
    if isinstance(y, np.ndarray):
        assert_array_equal(y, y_copy, err_msg="`y` was mutated by forward")

    loss_dum.backward()
    loss_real.backward()

    assert_allclose(actual=x1_real.grad,
                    desired=x1_dum.grad,
                    err_msg="x1.grad doesn't match")
    assert_allclose(actual=x2_real.grad,
                    desired=x2_dum.grad,
                    err_msg="x2.grad doesn't match")

    assert_array_equal(x1, x1_copy, err_msg="`x1` was mutated by backward")
    assert_array_equal(x2, x2_copy, err_msg="`x2` was mutated by backward")

    if isinstance(y, mg.Tensor):
        y = y.data

    if isinstance(y, np.ndarray):
        assert_array_equal(y, y_copy, err_msg="`y` was mutated by backward")

    loss_real.null_gradients()
    assert x1_real.grad is None
    assert x2_real.grad is None
def test_properties(data: DataObject) -> None:
    strategy = empty_geometries()

    result = data.draw(strategy)

    assert is_empty(result)