Пример #1
0
 def __init__(self, num_layers, layer_dim, embedding_length, dev_str='cpu'):
     self._num_layers = num_layers
     self._layer_dim = layer_dim
     self._embedding_length = embedding_length
     embedding_size = 3 + 3 * 2 * embedding_length
     self._fc_layers = [ivy.Linear(embedding_size, layer_dim, dev_str)]
     self._fc_layers += [ivy.Linear(layer_dim + (embedding_size if i % 4 == 0 and i > 0 else 0), layer_dim, dev_str)
                         for i in range(num_layers-2)]
     self._fc_layers.append(ivy.Linear(layer_dim, 4, dev_str))
     super(Model, self).__init__(dev_str)
Пример #2
0
def test_linear_layer(bs_ic_oc_target, with_v, dtype_str, tensor_fn, dev_str,
                      call):
    # smoke test
    batch_shape, input_channels, output_channels, target = bs_ic_oc_target
    x = ivy.cast(
        ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape),
                     input_channels), 'float32')
    if with_v:
        np.random.seed(0)
        wlim = (6 / (output_channels + input_channels))**0.5
        w = ivy.variable(
            ivy.array(
                np.random.uniform(-wlim, wlim,
                                  (output_channels, input_channels)),
                'float32'))
        b = ivy.variable(ivy.zeros([output_channels]))
        v = Container({'w': w, 'b': b})
    else:
        v = None
    linear_layer = ivy.Linear(input_channels, output_channels, v=v)
    ret = linear_layer(x)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == tuple(batch_shape + [output_channels])
    # value test
    if not with_v:
        return
    assert np.allclose(call(linear_layer, x), np.array(target))
    # compilation test
    if call is helpers.torch_call:
        # pytest scripting does not **kwargs
        return
    helpers.assert_compilable(linear_layer)
Пример #3
0
def test_sgd_optimizer(bs_ic_oc_target, with_v, dtype_str, tensor_fn, dev_str,
                       call):
    # smoke test
    if call is helpers.np_call:
        # NumPy does not support gradients
        pytest.skip()
    batch_shape, input_channels, output_channels, target = bs_ic_oc_target
    x = ivy.cast(
        ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape),
                     input_channels), 'float32')
    if with_v:
        np.random.seed(0)
        wlim = (6 / (output_channels + input_channels))**0.5
        w = ivy.variable(
            ivy.array(
                np.random.uniform(-wlim, wlim,
                                  (output_channels, input_channels)),
                'float32'))
        b = ivy.variable(ivy.zeros([output_channels]))
        v = Container({'w': w, 'b': b})
    else:
        v = None
    linear_layer = ivy.Linear(input_channels, output_channels, v=v)

    def loss_fn(v_):
        out = linear_layer(x, v=v_)
        return ivy.reduce_mean(out)[0]

    # optimizer
    optimizer = ivy.SGD()

    # train
    loss_tm1 = 1e12
    loss = None
    grads = None
    for i in range(10):
        loss, grads = ivy.execute_with_gradients(loss_fn, linear_layer.v)
        linear_layer.v = optimizer.step(linear_layer.v, grads)
        assert loss < loss_tm1
        loss_tm1 = loss

    # type test
    assert ivy.is_array(loss)
    assert isinstance(grads, ivy.Container)
    # cardinality test
    if call is helpers.mx_call:
        # mxnet slicing cannot reduce dimension to zero
        assert loss.shape == (1, )
    else:
        assert loss.shape == ()
    # value test
    assert ivy.reduce_max(ivy.abs(grads.b)) > 0
    assert ivy.reduce_max(ivy.abs(grads.w)) > 0
    # compilation test
    if call is helpers.torch_call:
        # pytest scripting does not **kwargs
        return
    helpers.assert_compilable(loss_fn)
Пример #4
0
 def _build(self, *args, **kwargs):
     self._layers = list()
     for i in range(self._spec.num_layers):
         self._layers.append(ivy.Linear(3, 1))
Пример #5
0
 def _build(self, *args, **kwargs):
     self._l1 = ivy.Linear(1, 1)
Пример #6
0
 def __init__(self, in_size, out_size, hidden_size=64):
     self._linear0 = ivy.Linear(in_size, hidden_size)
     self._linear1 = ivy.Linear(hidden_size, hidden_size)
     self._linear2 = ivy.Linear(hidden_size, out_size)
     ivy.Module.__init__(self, 'cpu')
Пример #7
0
 def __init__(self, in_size, out_size, dev_str='cpu', hidden_size=64):
     linear0 = ivy.Linear(in_size, hidden_size)
     linear1 = ivy.Linear(hidden_size, hidden_size)
     linear2 = ivy.Linear(hidden_size, out_size)
     self._layers = [linear0, linear1, linear2]
     ivy.Module.__init__(self, dev_str)