Esempio n. 1
0
def test_variable(object_in, dtype_str, dev_str, call):
    if call is helpers.tf_graph_call:
        # cannot create variables as part of compiled tf graph
        pytest.skip()
    if call in [helpers.mx_call] and dtype_str == 'int16':
        # mxnet does not support int16
        pytest.skip()
    if len(object_in) == 0 and call is helpers.mx_call:
        # mxnet does not support 0-dimensional variables
        pytest.skip()
    # smoke test
    ret = ivy.variable(ivy.array(object_in, dtype_str, dev_str))
    # type test
    if call is not helpers.np_call:
        assert ivy.is_variable(ret)
    # cardinality test
    assert ret.shape == np.array(object_in).shape
    # value test
    assert np.allclose(
        call(ivy.variable, ivy.array(object_in, dtype_str, dev_str)),
        np.array(object_in).astype(dtype_str))
    # compilation test
    if call in [helpers.torch_call]:
        # pytorch scripting does not support string devices
        return
    helpers.assert_compilable(ivy.variable)
Esempio n. 2
0
def test_reduce_mean(x, axis, kd, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x = tensor_fn(x, dtype_str, dev_str)
    ret = ivy.reduce_mean(x, axis, kd)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    if axis is None:
        expected_shape = [1] * len(x.shape) if kd else []
    else:
        axis_ = [axis] if isinstance(axis, int) else axis
        axis_ = [item % len(x.shape) for item in axis_]
        expected_shape = list(x.shape)
        if kd:
            expected_shape = [
                1 if i % len(x.shape) in axis_ else item
                for i, item in enumerate(expected_shape)
            ]
        else:
            [expected_shape.pop(item) for item in axis_]
    expected_shape = [1] if expected_shape == [] else expected_shape
    assert ret.shape == tuple(expected_shape)
    # value test
    assert np.allclose(call(ivy.reduce_mean, x),
                       ivy.numpy.reduce_mean(ivy.to_numpy(x)))
    # compilation test
    helpers.assert_compilable(ivy.reduce_mean)
Esempio n. 3
0
def test_lstm(b_t_ic_hc_otf_sctv, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    b, t, input_channels, hidden_channels, output_true_flat, state_c_true_val = b_t_ic_hc_otf_sctv
    x = ivy.cast(ivy.linspace(ivy.zeros([b, t]), ivy.ones([b, t]), input_channels), 'float32')
    init_h = ivy.ones([b, hidden_channels])
    init_c = ivy.ones([b, hidden_channels])
    kernel = ivy.variable(ivy.ones([input_channels, 4*hidden_channels]))*0.5
    recurrent_kernel = ivy.variable(ivy.ones([hidden_channels, 4*hidden_channels]))*0.5
    output, state_c = ivy.lstm_update(x, init_h, init_c, kernel, recurrent_kernel)
    # type test
    assert ivy.is_array(output)
    assert ivy.is_array(state_c)
    # cardinality test
    assert output.shape == (b, t, hidden_channels)
    assert state_c.shape == (b, hidden_channels)
    # value test
    output_true = np.tile(np.asarray(output_true_flat).reshape((b, t, 1)), (1, 1, hidden_channels))
    state_c_true = np.ones([b, hidden_channels]) * state_c_true_val
    output, state_c = call(ivy.lstm_update, x, init_h, init_c, kernel, recurrent_kernel)
    assert np.allclose(output, output_true, atol=1e-6)
    assert np.allclose(state_c, state_c_true, atol=1e-6)
    # compilation test
    if call in [helpers.torch_call]:
        # this is not a backend implemented function
        pytest.skip()
    helpers.assert_compilable(ivy.lstm_update)
Esempio n. 4
0
def test_linear_layer(bs_ic_oc_target, with_v, dtype_str, tensor_fn, dev_str,
                      call):
    # smoke test
    batch_shape, input_channels, output_channels, target = bs_ic_oc_target
    x = ivy.cast(
        ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape),
                     input_channels), 'float32')
    if with_v:
        np.random.seed(0)
        wlim = (6 / (output_channels + input_channels))**0.5
        w = ivy.variable(
            ivy.array(
                np.random.uniform(-wlim, wlim,
                                  (output_channels, input_channels)),
                'float32'))
        b = ivy.variable(ivy.zeros([output_channels]))
        v = Container({'w': w, 'b': b})
    else:
        v = None
    linear_layer = ivy.Linear(input_channels, output_channels, v=v)
    ret = linear_layer(x)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == tuple(batch_shape + [output_channels])
    # value test
    if not with_v:
        return
    assert np.allclose(call(linear_layer, x), np.array(target))
    # compilation test
    if call is helpers.torch_call:
        # pytest scripting does not **kwargs
        return
    helpers.assert_compilable(linear_layer)
Esempio n. 5
0
def test_is_variable(object_in, dtype_str, dev_str, call):
    if call is helpers.tf_graph_call:
        # cannot create variables as part of compiled tf graph
        pytest.skip()
    if call in [helpers.mx_call] and dtype_str == 'int16':
        # mxnet does not support int16
        pytest.skip()
    if len(object_in) == 0 and call is helpers.mx_call:
        # mxnet does not support 0-dimensional variables
        pytest.skip()
    # smoke test
    non_var = ivy.array(object_in, dtype_str, dev_str)
    var = ivy.variable(ivy.array(object_in, dtype_str, dev_str))
    non_var_res = ivy.is_variable(non_var)
    var_res = ivy.is_variable(var)
    # type test
    assert ivy.is_array(non_var)
    if call is not helpers.np_call:
        assert ivy.is_variable(var)
    if call in [helpers.np_call, helpers.jnp_call]:
        # numpy and jax do not support flagging variables
        pytest.skip()
    # value test
    assert non_var_res is False
    assert var_res is True
    # compilation test
    helpers.assert_compilable(ivy.is_variable)
Esempio n. 6
0
def test_norm(x_n_ord_n_ax_n_kd, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x, order, ax, kd = x_n_ord_n_ax_n_kd
    x = tensor_fn(x, dtype_str, dev_str)
    kwargs = dict([
        (k, v)
        for k, v in zip(['x', 'ord', 'axis', 'keepdims'], [x, order, ax, kd])
        if v is not None
    ])
    ret = ivy.norm(**kwargs)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    if kd:
        expected_shape = [
            1 if i == ax else item for i, item in enumerate(x.shape)
        ]
    else:
        expected_shape = list(x.shape)
        expected_shape.pop(ax)
    assert ret.shape == tuple(expected_shape)
    # value test
    kwargs.pop('x', None)
    assert np.allclose(call(ivy.norm, x, **kwargs),
                       ivy.numpy.norm(ivy.to_numpy(x), **kwargs))
    # compilation test
    helpers.assert_compilable(ivy.norm)
Esempio n. 7
0
def test_adam_update(ws_n_grads_n_lr_n_wsnew, dtype_str, tensor_fn, dev_str,
                     call):
    # smoke test
    ws_raw, dcdws_raw, lr, ws_raw_new = ws_n_grads_n_lr_n_wsnew
    ws = ws_raw.map(lambda x, _: ivy.variable(ivy.array(x)))
    dcdws = dcdws_raw.map(lambda x, _: ivy.array(x))
    ws_true_new = ws_raw_new.map(lambda x, _: ivy.variable(ivy.array(x)))
    mw = dcdws
    vw = dcdws.map(lambda x, _: x**2)
    ws_new, mw_new, vw_new = ivy.adam_update(ws, dcdws, lr, mw, vw,
                                             ivy.array(1))
    # type test
    assert isinstance(ws_new, dict)
    assert isinstance(mw_new, dict)
    assert isinstance(vw_new, dict)
    # cardinality test
    for (w_new, w_true_new) in zip(ws_new.values(), ws_true_new.values()):
        assert w_new.shape == w_true_new.shape
    for (m_new, m_orig) in zip(mw_new.values(), mw.values()):
        assert m_new.shape == m_orig.shape
    for (v_new, v_orig) in zip(vw_new.values(), vw.values()):
        assert v_new.shape == v_orig.shape
    # value test
    for (w_new, w_true_new) in zip(ws_new.values(), ws_true_new.values()):
        assert np.allclose(ivy.to_numpy(w_new), ivy.to_numpy(w_true_new))
    # compilation test
    if call in [helpers.torch_call]:
        # pytorch scripting does not support internal function definitions
        return
    helpers.assert_compilable(ivy.adam_update)
Esempio n. 8
0
def test_lstm_layer(b_t_ic_hc_otf_sctv, with_v, with_initial_state, dtype_str,
                    tensor_fn, dev_str, call):
    # smoke test
    b, t, input_channels, hidden_channels, output_true_flat, state_c_true_val = b_t_ic_hc_otf_sctv
    x = ivy.cast(
        ivy.linspace(ivy.zeros([b, t]), ivy.ones([b, t]), input_channels),
        'float32')
    if with_initial_state:
        init_h = ivy.ones([b, hidden_channels])
        init_c = ivy.ones([b, hidden_channels])
        initial_state = ([init_h], [init_c])
    else:
        initial_state = None
    if with_v:
        kernel = ivy.variable(
            ivy.ones([input_channels, 4 * hidden_channels]) * 0.5)
        recurrent_kernel = ivy.variable(
            ivy.ones([hidden_channels, 4 * hidden_channels]) * 0.5)
        v = Container({
            'input': {
                'layer_0': {
                    'w': kernel
                }
            },
            'recurrent': {
                'layer_0': {
                    'w': recurrent_kernel
                }
            }
        })
    else:
        v = None
    lstm_layer = ivy.LSTM(input_channels, hidden_channels, v=v)
    output, (state_h, state_c) = lstm_layer(x, initial_state=initial_state)
    # type test
    assert ivy.is_array(output)
    assert ivy.is_array(state_h[0])
    assert ivy.is_array(state_c[0])
    # cardinality test
    assert output.shape == (b, t, hidden_channels)
    assert state_h[0].shape == (b, hidden_channels)
    assert state_c[0].shape == (b, hidden_channels)
    # value test
    if not with_v or not with_initial_state:
        return
    output_true = np.tile(
        np.asarray(output_true_flat).reshape((b, t, 1)),
        (1, 1, hidden_channels))
    state_c_true = np.ones([b, hidden_channels]) * state_c_true_val
    output, (state_h, state_c) = call(lstm_layer,
                                      x,
                                      initial_state=initial_state)
    assert np.allclose(output, output_true, atol=1e-6)
    assert np.allclose(state_c, state_c_true, atol=1e-6)
    # compilation test
    if call in [helpers.torch_call]:
        # this is not a backend implemented function
        pytest.skip()
    helpers.assert_compilable(ivy.lstm_update)
Esempio n. 9
0
def test_seed(seed_val, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    ivy.seed(seed_val)
    # compilation test
    if call in [helpers.torch_call]:
        # pytorch scripting does not support functions with None return
        return
    helpers.assert_compilable(ivy.seed)
Esempio n. 10
0
def test_sgd_optimizer(bs_ic_oc_target, with_v, dtype_str, tensor_fn, dev_str,
                       call):
    # smoke test
    if call is helpers.np_call:
        # NumPy does not support gradients
        pytest.skip()
    batch_shape, input_channels, output_channels, target = bs_ic_oc_target
    x = ivy.cast(
        ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape),
                     input_channels), 'float32')
    if with_v:
        np.random.seed(0)
        wlim = (6 / (output_channels + input_channels))**0.5
        w = ivy.variable(
            ivy.array(
                np.random.uniform(-wlim, wlim,
                                  (output_channels, input_channels)),
                'float32'))
        b = ivy.variable(ivy.zeros([output_channels]))
        v = Container({'w': w, 'b': b})
    else:
        v = None
    linear_layer = ivy.Linear(input_channels, output_channels, v=v)

    def loss_fn(v_):
        out = linear_layer(x, v=v_)
        return ivy.reduce_mean(out)[0]

    # optimizer
    optimizer = ivy.SGD()

    # train
    loss_tm1 = 1e12
    loss = None
    grads = None
    for i in range(10):
        loss, grads = ivy.execute_with_gradients(loss_fn, linear_layer.v)
        linear_layer.v = optimizer.step(linear_layer.v, grads)
        assert loss < loss_tm1
        loss_tm1 = loss

    # type test
    assert ivy.is_array(loss)
    assert isinstance(grads, ivy.Container)
    # cardinality test
    if call is helpers.mx_call:
        # mxnet slicing cannot reduce dimension to zero
        assert loss.shape == (1, )
    else:
        assert loss.shape == ()
    # value test
    assert ivy.reduce_max(ivy.abs(grads.b)) > 0
    assert ivy.reduce_max(ivy.abs(grads.w)) > 0
    # compilation test
    if call is helpers.torch_call:
        # pytest scripting does not **kwargs
        return
    helpers.assert_compilable(loss_fn)
Esempio n. 11
0
def test_multinomial(probs, num_samples, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    probs = tensor_fn(probs, dtype_str, dev_str)
    ret = ivy.multinomial(probs, num_samples)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == tuple(list(probs.shape[:-1]) + [num_samples])
    # compilation test
    helpers.assert_compilable(ivy.multinomial)
Esempio n. 12
0
def test_stack_images(shp_n_num_n_ar_n_newshp, dev_str, call):
    # smoke test
    shape, num, ar, new_shape = shp_n_num_n_ar_n_newshp
    xs = [ivy.ones(shape)] * num
    ret = ivy.stack_images(xs, ar)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == new_shape
    # compilation test
    helpers.assert_compilable(ivy.stack_images)
Esempio n. 13
0
def test_acos(x, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x = tensor_fn(x, dtype_str, dev_str)
    ret = ivy.acos(x)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == x.shape
    # value test
    assert np.allclose(call(ivy.acos, x), ivy.numpy.acos(ivy.to_numpy(x)))
    # compilation test
    helpers.assert_compilable(ivy.acos)
Esempio n. 14
0
def test_vector_to_skew_symmetric_matrix(x, dtype_str, tensor_fn, dev_str,
                                         call):
    # smoke test
    x = tensor_fn(x, dtype_str, dev_str)
    ret = ivy.vector_to_skew_symmetric_matrix(x)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == x.shape + (x.shape[-1], )
    # value test
    assert np.allclose(
        call(ivy.vector_to_skew_symmetric_matrix, x),
        ivy.numpy.vector_to_skew_symmetric_matrix(ivy.to_numpy(x)))
    # compilation test
    helpers.assert_compilable(ivy.vector_to_skew_symmetric_matrix)
Esempio n. 15
0
def test_atan2(x1_n_x2, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x1, x2 = x1_n_x2
    x1 = tensor_fn(x1, dtype_str, dev_str)
    x2 = tensor_fn(x2, dtype_str, dev_str)
    ret = ivy.atan2(x1, x2)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == x1.shape
    # value test
    assert np.allclose(call(ivy.atan2, x1, x2),
                       ivy.numpy.atan2(ivy.to_numpy(x1), ivy.to_numpy(x2)))
    # compilation test
    helpers.assert_compilable(ivy.atan2)
Esempio n. 16
0
def test_inv(x, dtype_str, tensor_fn, dev_str, call):
    if call in [helpers.tf_call, helpers.tf_graph_call] and 'cpu' in dev_str:
        # tf.linalg.inv segfaults when CUDA is installed, but array is on CPU
        pytest.skip()
    # smoke test
    x = tensor_fn(x, dtype_str, dev_str)
    ret = ivy.inv(x)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == x.shape
    # value test
    assert np.allclose(call(ivy.inv, x), ivy.numpy.inv(ivy.to_numpy(x)))
    # compilation test
    helpers.assert_compilable(ivy.inv)
Esempio n. 17
0
def test_stop_gradient(x_raw, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x = tensor_fn(x_raw, dtype_str, dev_str)
    ret = ivy.stop_gradient(x)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == x.shape
    # value test
    assert np.array_equal(call(ivy.stop_gradient, x),
                          ivy.numpy.array(x_raw, dtype_str, dev_str))
    # compilation test
    if call in [helpers.torch_call]:
        # pytorch scripting does not support attribute setting
        return
    helpers.assert_compilable(ivy.stop_gradient)
Esempio n. 18
0
def test_logical_not(x, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x = tensor_fn(x, dtype_str, dev_str)
    ret = ivy.logical_not(x)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == x.shape
    # value test
    assert np.allclose(call(ivy.logical_not, x),
                       ivy.numpy.logical_not(ivy.to_numpy(x)))
    # compilation test
    if call in [helpers.torch_call]:
        # pytorch scripting does not support .type() method
        return
    helpers.assert_compilable(ivy.logical_not)
Esempio n. 19
0
def test_linear(x_n_w_n_b_n_res, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x, weight, bias, true_res = x_n_w_n_b_n_res
    x = tensor_fn(x, dtype_str, dev_str)
    weight = tensor_fn(weight, dtype_str, dev_str)
    bias = tensor_fn(bias, dtype_str, dev_str)
    true_res = tensor_fn(true_res, dtype_str, dev_str)
    ret = ivy.linear(x, weight, bias)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == true_res.shape
    # value test
    assert np.allclose(call(ivy.linear, x, weight, bias), ivy.to_numpy(true_res))
    # compilation test
    helpers.assert_compilable(ivy.linear)
Esempio n. 20
0
def test_shuffle(x, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x = tensor_fn(x, dtype_str, dev_str)
    ret = ivy.shuffle(x)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == x.shape
    # value test
    ivy.seed(0)
    first_shuffle = call(ivy.shuffle, x)
    ivy.seed(0)
    second_shuffle = call(ivy.shuffle, x)
    assert np.array_equal(first_shuffle, second_shuffle)
    # compilation test
    helpers.assert_compilable(ivy.shuffle)
Esempio n. 21
0
def test_module_training(bs_ic_oc, dev_str, call):
    # smoke test
    if call is helpers.np_call:
        # NumPy does not support gradients
        pytest.skip()
    batch_shape, input_channels, output_channels = bs_ic_oc
    x = ivy.cast(
        ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape),
                     input_channels), 'float32')
    module = TrainableModule(input_channels, output_channels)

    def loss_fn(v_):
        out = module(x, v=v_)
        return ivy.reduce_mean(out)[0]

    # train
    loss_tm1 = 1e12
    loss = None
    grads = None
    for i in range(10):
        loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
        module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
        assert loss < loss_tm1
        loss_tm1 = loss

    # type test
    assert ivy.is_array(loss)
    assert isinstance(grads, ivy.Container)
    # cardinality test
    if call is helpers.mx_call:
        # mxnet slicing cannot reduce dimension to zero
        assert loss.shape == (1, )
    else:
        assert loss.shape == ()
    # value test
    assert ivy.reduce_max(ivy.abs(grads.linear0.b)) > 0
    assert ivy.reduce_max(ivy.abs(grads.linear0.w)) > 0
    assert ivy.reduce_max(ivy.abs(grads.linear1.b)) > 0
    assert ivy.reduce_max(ivy.abs(grads.linear1.w)) > 0
    assert ivy.reduce_max(ivy.abs(grads.linear2.b)) > 0
    assert ivy.reduce_max(ivy.abs(grads.linear2.w)) > 0
    # compilation test
    if call is helpers.torch_call:
        # pytest scripting does not support **kwargs
        return
    helpers.assert_compilable(loss_fn)
Esempio n. 22
0
def test_randint(low, high, shape, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    if call in [helpers.mx_call, helpers.torch_call] and tensor_fn is helpers.var_fn:
        # PyTorch and MXNet do not support non-float variables
        pytest.skip()
    low_tnsr, high_tnsr = tensor_fn(low), tensor_fn(high)
    ret = ivy.randint(low_tnsr, high_tnsr, shape, dev_str=dev_str)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == shape
    # value test
    ret_np = call(ivy.randint, low_tnsr, high_tnsr, shape, dev_str=dev_str)
    assert np.min((ret_np < high).astype(np.int32)) == 1
    assert np.min((ret_np >= low).astype(np.int32)) == 1
    # compilation test
    helpers.assert_compilable(ivy.randint)
Esempio n. 23
0
def test_conv2d(x_n_filters_n_pad_n_res, dtype_str, tensor_fn, dev_str, call):
    if call in [helpers.tf_call, helpers.tf_graph_call] and 'cpu' in dev_str:
        # tf conv2d does not work when CUDA is installed, but array is on CPU
        pytest.skip()
    # smoke test
    x, filters, padding, true_res = x_n_filters_n_pad_n_res
    x = tensor_fn(x, dtype_str, dev_str)
    filters = tensor_fn(filters, dtype_str, dev_str)
    true_res = tensor_fn(true_res, dtype_str, dev_str)
    ret = ivy.conv2d(x, filters, 1, padding)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == true_res.shape
    # value test
    assert np.allclose(call(ivy.conv2d, x, filters, 1, padding), ivy.to_numpy(true_res))
    # compilation test
    helpers.assert_compilable(ivy.conv2d)
Esempio n. 24
0
def test_bilinear_resample(x_n_warp, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x, warp = x_n_warp
    x = tensor_fn(x, dtype_str, dev_str)
    warp = tensor_fn(warp, dtype_str, dev_str)
    ret = ivy.bilinear_resample(x, warp)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == warp.shape[:-1] + x.shape[-1:]
    # value test
    assert np.allclose(
        call(ivy.bilinear_resample, x, warp),
        ivy.numpy.bilinear_resample(ivy.to_numpy(x), ivy.to_numpy(warp)))
    # compilation test
    if call in [helpers.torch_call]:
        # torch scripting does not support builtins
        return
    helpers.assert_compilable(ivy.bilinear_resample)
Esempio n. 25
0
def test_binary_cross_entropy(x_n_y_n_res, dtype_str, tensor_fn, dev_str,
                              call):
    # smoke test
    x, y, true_target = x_n_y_n_res
    x = tensor_fn(x, dtype_str, dev_str)
    y = tensor_fn(y, dtype_str, dev_str)
    ret = ivy.binary_cross_entropy(x, y)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == x.shape
    # value test
    assert np.allclose(call(ivy.binary_cross_entropy, x, y),
                       np.asarray(true_target))
    # compilation test
    if call in [helpers.torch_call]:
        # binary_cross_entropy does not have backend implementation,
        # pytorch scripting requires direct bindings to work, which bypass get_framework()
        return
    helpers.assert_compilable(ivy.binary_cross_entropy)
Esempio n. 26
0
def test_conv2d_transpose(x_n_filters_n_pad_n_outshp_n_res, dtype_str, tensor_fn, dev_str, call):
    if call in [helpers.tf_call, helpers.tf_graph_call] and 'cpu' in dev_str:
        # tf conv2d transpose does not work when CUDA is installed, but array is on CPU
        pytest.skip()
    # smoke test
    if call in [helpers.np_call, helpers.jnp_call]:
        # numpy and jax do not yet support conv2d_transpose
        pytest.skip()
    x, filters, padding, output_shape, true_res = x_n_filters_n_pad_n_outshp_n_res
    x = tensor_fn(x, dtype_str, dev_str)
    filters = tensor_fn(filters, dtype_str, dev_str)
    true_res = tensor_fn(true_res, dtype_str, dev_str)
    ret = ivy.conv2d_transpose(x, filters, 1, padding, output_shape)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == true_res.shape
    # value test
    assert np.allclose(call(ivy.conv2d_transpose, x, filters, 1, padding, output_shape), ivy.to_numpy(true_res))
    # compilation test
    helpers.assert_compilable(ivy.conv2d_transpose)
Esempio n. 27
0
def test_gradient_descent_update(ws_n_grads_n_lr_n_wsnew, dtype_str, tensor_fn,
                                 dev_str, call):
    # smoke test
    ws_raw, dcdws_raw, lr, ws_raw_new = ws_n_grads_n_lr_n_wsnew
    ws = ws_raw.map(lambda x, _: ivy.variable(ivy.array(x)))
    dcdws = dcdws_raw.map(lambda x, _: ivy.array(x))
    ws_true_new = ws_raw_new.map(lambda x, _: ivy.variable(ivy.array(x)))
    ws_new = ivy.gradient_descent_update(ws, dcdws, lr)
    # type test
    assert isinstance(ws_new, dict)
    # cardinality test
    for (w_new, w_true_new) in zip(ws_new.values(), ws_true_new.values()):
        assert w_new.shape == w_true_new.shape
    # value test
    for (w_new, w_true_new) in zip(ws_new.values(), ws_true_new.values()):
        assert np.allclose(ivy.to_numpy(w_new), ivy.to_numpy(w_true_new))
    # compilation test
    if call in [helpers.torch_call]:
        # pytorch scripting does not support internal function definitions
        return
    helpers.assert_compilable(ivy.gradient_descent_update)
Esempio n. 28
0
def test_gradient_image(x_n_dy_n_dx, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    x, dy_true, dx_true = x_n_dy_n_dx
    x = tensor_fn(x, dtype_str, dev_str)
    dy, dx = ivy.gradient_image(x)
    # type test
    assert ivy.is_array(dy)
    assert ivy.is_array(dx)
    # cardinality test
    assert dy.shape == x.shape
    assert dx.shape == x.shape
    # value test
    dy_np, dx_np = call(ivy.gradient_image, x)
    dy_true = ivy.numpy.array(dy_true, dtype_str, dev_str)
    dx_true = ivy.numpy.array(dx_true, dtype_str, dev_str)
    assert np.allclose(dy_np, dy_true)
    assert np.allclose(dx_np, dx_true)
    # compilation test
    if call in [helpers.torch_call]:
        # torch device cannot be assigned value of string while scripting
        return
    helpers.assert_compilable(ivy.gradient_image)
Esempio n. 29
0
def test_random_uniform(low, high, shape, dtype_str, tensor_fn, dev_str, call):
    # smoke test
    if tensor_fn == helpers.var_fn and call is helpers.mx_call:
        # mxnet does not support 0-dimensional variables
        pytest.skip()
    kwargs = dict([(k, tensor_fn(v)) for k, v in zip(['low', 'high'], [low, high]) if v is not None])
    if shape is not None:
        kwargs['shape'] = shape
    ret = ivy.random_uniform(**kwargs, dev_str=dev_str)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    if shape is None:
        assert ret.shape == ()
    else:
        assert ret.shape == shape
    # value test
    ret_np = call(ivy.random_uniform, **kwargs, dev_str=dev_str)
    assert np.min((ret_np < (high if high else 1.)).astype(np.int32)) == 1
    assert np.min((ret_np > (low if low else 0.)).astype(np.int32)) == 1
    # compilation test
    helpers.assert_compilable(ivy.random_uniform)
Esempio n. 30
0
def test_svd(x, dtype_str, tensor_fn, dev_str, call):
    if call in [helpers.tf_call, helpers.tf_graph_call] and 'cpu' in dev_str:
        # tf.linalg.svd segfaults when CUDA is installed, but array is on CPU
        pytest.skip()
    # smoke test
    x = tensor_fn(x, dtype_str, dev_str)
    u, s, vh = ivy.svd(x)
    # type test
    assert ivy.is_array(u)
    assert ivy.is_array(s)
    assert ivy.is_array(vh)
    # cardinality test
    assert u.shape == x.shape
    assert s.shape == x.shape[:-1]
    assert vh.shape == x.shape
    # value test
    pred_u, pred_s, pred_vh = call(ivy.svd, x)
    true_u, true_s, true_vh = ivy.numpy.svd(ivy.to_numpy(x))
    assert np.allclose(pred_u, true_u)
    assert np.allclose(pred_s, true_s)
    assert np.allclose(pred_vh, true_vh)
    # compilation test
    helpers.assert_compilable(ivy.svd)