コード例 #1
0
def test_einsum_bkwd3(shape, optimize, data):
    script = "ia, ia, i -> a"
    x = Tensor(np.random.rand(*shape))

    y_shape = data.draw(broadcastable_shape(shape, min_dim=2, max_dim=2))
    y = Tensor(np.random.rand(*y_shape))

    z_shape = data.draw(broadcastable_shape(x.shape[:1], min_dim=1, max_dim=1))
    z = Tensor(np.random.rand(*z_shape))

    grad = np.random.rand(x.shape[1])

    o = einsum(script, x, y, z, optimize=optimize)
    o.backward(grad)

    def f(x, y, z):
        return np.einsum(script, x, y, z)

    dx, dy, dz = numerical_gradient_full(f,
                                         x.data,
                                         y.data,
                                         z.data,
                                         back_grad=grad,
                                         as_decimal=False)

    assert_allclose(x.grad, dx, atol=1e-6)
    assert_allclose(y.grad, dy, atol=1e-6)
    assert_allclose(z.grad, dz, atol=1e-6)
コード例 #2
0
def test_einsum_bkwd3(shape, optimize, data):
    script = "ia, ia, i -> a"
    x = Tensor(np.random.rand(*shape))

    y_shape = data.draw(broadcastable_shapes(shape, min_dims=2, max_dims=2),
                        label="y_shape")
    y = Tensor(np.random.rand(*y_shape))

    z_shape = data.draw(broadcastable_shapes(x.shape[:1],
                                             min_dims=1,
                                             max_dims=1),
                        label="z_shape")
    z = Tensor(np.random.rand(*z_shape))

    try:
        o = einsum(script, x, y, z, optimize=optimize)
    except ValueError:
        assume(False)  # skip over invalid einsum shapes
        return

    grad = np.random.rand(*o.shape)
    o.backward(grad)

    def f(x, y, z):
        return np.einsum(script, x, y, z)

    dx, dy, dz = numerical_gradient_full(f,
                                         x.data,
                                         y.data,
                                         z.data,
                                         back_grad=grad)

    assert_allclose(x.grad, dx, atol=1e-6)
    assert_allclose(y.grad, dy, atol=1e-6)
    assert_allclose(z.grad, dz, atol=1e-6)
コード例 #3
0
 def __init__(self,D,C,P,S,N,rate,reg,bp_lim=5):
     """Initalizes Trainable Params:
         Params:
         ------
         D:(int)Size of hidden layer (S+1,N,D).
         C:(int)Size of input context, usually 1.
         P:(int)Size of output data, usually also 1.
         S:(int)Size of sentences in clump.
         N:(int)Size of clump.
         rate:(float)Learning rate.
         reg:(float)regulation rate."""
     #DxD, where d determines the number of trainable parameters for 1st layer
     self.W=Tensor(0.001 * np.random.randn(*(D,D)))
     #CxD where c is the context (1)
     self.U=Tensor(0.001 * np.random.randn(*(C,D)))
     #DxP where P is the size of the prediction(output)
     self.V=Tensor(0.001 * np.random.randn(*(D,P)))
     
     self.N=N
     self.S=S
     self.bp_lim=bp_lim
     self.rate=rate
     self.reg=reg
     #Initializes the hidden layer for
     self.Hidden=np.zeros((1,D))
コード例 #4
0
def test_static_softmax_integer():
    # reuse the test cases from below with integer arrays
    skew = np.array([0.87566484, 0.53596079, 0.85693981, 0.09526036])
    x = Tensor([0, 1, 2, 3])

    f = (softmax(x, constant=False) * skew).sum()

    out = np.array(0.33911235096116465)
    assert_allclose(actual=f.data, desired=out)

    f.backward()
    dx = np.array([0.01720112, 0.01715422, 0.12266443, -0.15701977])

    assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)

    skew = np.array([
        [0.87566484, 0.53596079, 0.85693981, 0.09526036],
        [0.32024455, 0.81532148, 0.2480434, 0.85119342],
        [0.57943085, 0.33958252, 0.95864464, 0.22881712],
    ])
    x = Tensor([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])

    f = (softmax(x, constant=False) * skew).sum()

    out = np.array(1.449875865467131)
    assert_allclose(actual=f.data, desired=out)

    f.backward()
    dx = np.array([
        [0.01720112, 0.01715422, 0.12266443, -0.15701977],
        [-0.01179518, 0.01108053, -0.10425844, 0.10497309],
        [0.00502799, -0.00723393, 0.12698131, -0.12477536],
    ])

    assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
コード例 #5
0
def test_comparison_ops():
    x = Tensor([1, 3, 5])
    y = Tensor([1, 4, 2])
    for op in ("__lt__", "__le__", "__gt__", "__ge__", "__eq__", "__ne__"):
        tensor_out = getattr(Tensor, op)(x, y)
        array_out = getattr(np.ndarray, op)(x.data, y.data)
        assert_equal(actual=tensor_out, desired=array_out)
コード例 #6
0
def test_einsum_bkwd1(num, optimize, data):
    x = Tensor(np.random.rand(num))
    y_shape = data.draw(broadcastable_shapes(x.shape, min_dims=1, max_dims=1))
    y = Tensor(np.random.rand(*y_shape))

    grad = data.draw(st.floats(-100, 100))
    o = einsum("i, i", x, y, optimize=optimize)
    o.backward(grad)

    def f(x, y):
        return np.einsum("i, i", x, y)

    dx, dy = numerical_gradient_full(f, x.data, y.data, back_grad=grad)

    assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
    assert_allclose(y.grad, dy, atol=1e-5, rtol=1e-5)

    o.null_gradients()
    assert x.grad is None
    assert y.grad is None

    # test broadcasting in reverse direction
    o = einsum("i, i", y, x, optimize=optimize)
    o.backward(grad)

    dy, dx = numerical_gradient_full(f, y.data, x.data, back_grad=grad)

    assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
    assert_allclose(y.grad, dy, atol=1e-5, rtol=1e-5)

    o.null_gradients()
コード例 #7
0
ファイル: test_tensor_manip.py プロジェクト: edkek/MyGrad
def test_squeeze(x, data):
    axes = data.draw(valid_axes(x.ndim), label="axes")
    x_arr = Tensor(np.copy(x))
    x_arr2 = Tensor(np.copy(x))

    def f(x):
        return np.squeeze(x, axes)

    try:
        numpy_out = np.squeeze(x, axes)
    except ValueError:
        with raises(ValueError):
            squeeze(x_arr, axes, constant=False)
        return

    o = squeeze(x_arr, axes, constant=False)
    o_method = x_arr2.squeeze(axes)
    assert_allclose(o.data, numpy_out)
    assert_allclose(o_method.data, numpy_out)

    grad = data.draw(
        hnp.arrays(shape=o.shape,
                   dtype=float,
                   elements=st.floats(1, 10),
                   unique=True),
        label="grad",
    )
    o.backward(grad)
    o_method.backward(grad)

    dx, = numerical_gradient_full(f, x, back_grad=grad)

    assert_allclose(x_arr.grad, dx)
    assert_allclose(x_arr2.grad, dx)
コード例 #8
0
def test_focal_loss(num_datum, num_classes, alpha, gamma, data, grad,
                    target_type):
    scores = data.draw(
        hnp.arrays(shape=(num_datum, num_classes),
                   dtype=float,
                   elements=st.floats(1, 100)))
    assume((abs(scores.sum(axis=1)) > 0.001).all())

    scores_mygrad = Tensor(scores)
    scores_nn = Tensor(scores)

    truth = np.zeros((num_datum, num_classes))
    targets = data.draw(
        st.tuples(*(st.integers(0, num_classes - 1)
                    for i in range(num_datum))))
    truth[range(num_datum), targets] = 1
    targets = target_type(targets)

    fl = focal_loss(softmax(scores_mygrad), targets, alpha=alpha,
                    gamma=gamma).mean()
    fl.backward(grad)

    nn_loss = softmax_focal_loss(scores_nn, targets, alpha=alpha,
                                 gamma=gamma).mean()
    nn_loss.backward(grad)

    assert isinstance(nn_loss, Tensor) and nn_loss.ndim == 0
    assert_allclose(nn_loss.data, fl.data, atol=1e-4, rtol=1e-4)
    assert_allclose(scores_nn.grad, scores_mygrad.grad, atol=1e-4, rtol=1e-4)

    nn_loss.null_gradients()
    assert scores_nn.grad is None
コード例 #9
0
def test_init_data():
    for data in [0, [], (0, 0), ((0, 0), (0, 0)), np.random.rand(3, 4, 2)]:
        assert_equal(actual=Tensor(data).data,
                     desired=np.asarray(data),
                     err_msg="Initialization with non-tensor failed")
        assert_equal(actual=Tensor(Tensor(data)).data,
                     desired=np.asarray(data),
                     err_msg="Initialization with tensor failed")
コード例 #10
0
ファイル: test_normal.py プロジェクト: samaocarpenter/MyGrad
def test_normal(shape, mean, std, dtype, constant):
    tensor = normal(shape,
                    mean=Tensor(mean),
                    std=Tensor(std),
                    dtype=dtype,
                    constant=constant)
    assert tensor.shape == shape
    assert tensor.dtype == dtype
    assert tensor.constant == constant
コード例 #11
0
def test_items(x):
    """ verify that tensor.item() mirrors array.item()"""
    tensor = Tensor(x)
    try:
        value = x.item()
        assert_allclose(value, tensor.item())
    except ValueError:
        with raises(ValueError):
            tensor.item()
コード例 #12
0
def test_items(x):
    """ verify that tensor.item() mirrors array.item()"""
    tensor = Tensor(x)
    try:
        value = np.asarray(x).item()
        assert_array_equal(value, tensor.item())
    except ValueError:
        with raises(ValueError):
            tensor.item()
コード例 #13
0
ファイル: test_uniform.py プロジェクト: samaocarpenter/MyGrad
def test_uniform(shape, bounds, dtype, constant):
    tensor = uniform(shape,
                     lower_bound=Tensor(bounds[0]),
                     upper_bound=Tensor(bounds[1]),
                     dtype=dtype,
                     constant=constant)
    assert tensor.shape == shape
    assert tensor.dtype == dtype
    assert tensor.constant == constant
コード例 #14
0
def test_comparison_ops(
    op: str, x: np.ndarray, x_constant: bool, y_constant: bool, data: st.SearchStrategy
):
    y = data.draw(hnp.arrays(shape=x.shape, dtype=x.dtype, elements=st.floats(-10, 10)))
    x = Tensor(x, constant=x_constant)
    y = Tensor(y, constant=y_constant)
    assert hasattr(Tensor, op), "`Tensor` is missing the attribute {}".format(op)
    tensor_out = getattr(Tensor, op)(x, y)
    array_out = getattr(np.ndarray, op)(x.data, y.data)
    assert_equal(actual=tensor_out, desired=array_out)
コード例 #15
0
def test_null_gradients(x, y, z, clear_graph):
    x = Tensor(x)
    y = Tensor(y)
    z = Tensor(z)

    f = x * y + z
    g = x + z * f * f

    # check side effects
    unused = 2 * g - f
    w = 1 * f
    assert unused is not None

    g.backward()
    assert x.grad is not None
    assert y.grad is not None
    assert z.grad is not None
    assert f.grad is not None
    assert g.grad is not None
    assert len(x._ops) > 0
    assert len(y._ops) > 0
    assert len(z._ops) > 0
    assert len(f._ops) > 0
    assert len(g._ops) > 0
    assert w.grad is None

    g.null_gradients(clear_graph=clear_graph)
    assert x.grad is None
    assert y.grad is None
    assert z.grad is None
    assert f.grad is None
    assert g.grad is None

    if clear_graph:
        assert len(x._ops) == 0
        assert len(y._ops) == 0
        assert len(z._ops) == 0
        assert len(f._ops) == 0
        assert len(g._ops) > 0
        assert x.creator is None
        assert y.creator is None
        assert z.creator is None
        assert f.creator is None
        assert g.creator is None
    else:
        assert len(x._ops) > 0
        assert len(y._ops) > 0
        assert len(z._ops) > 0
        assert len(f._ops) > 0
        assert len(g._ops) > 0
        assert x.creator is None
        assert y.creator is None
        assert z.creator is None
        assert f.creator is not None
        assert g.creator is not None
コード例 #16
0
def test_conv_ND_bkwd(data, x, num_filters):
    """ Test conv-backprop 1D-3D with various strides and dilations."""
    x = x[0:min(x.shape[0], 1), 0:min(x.shape[1], 1)]
    win_dim = x.ndim - 2
    win_shape = data.draw(st.tuples(*(st.integers(1, s)
                                      for s in x.shape[-win_dim:])),
                          label="win_shape")
    kernels = data.draw(hnp.arrays(dtype=float,
                                   shape=(num_filters, x.shape[1], *win_shape),
                                   elements=st.floats(-10, 10)),
                        label="kernels")

    stride = data.draw(st.tuples(*(st.integers(1, s)
                                   for s in x.shape[-win_dim:])),
                       label="stride")

    max_dilation = np.array(x.shape[-win_dim:]) // win_shape
    dilation = data.draw(st.tuples(*(st.integers(1, s) for s in max_dilation)),
                         label="dilation")
    conf = dict(stride=stride, dilation=dilation)

    # skip invalid data/kernel/stride/dilation combinations
    assume(
        get_outshape(x.shape[2:], kernels.shape[2:], stride, dilation)
        is not None)

    x = Tensor(x)
    kernels = Tensor(kernels)

    out = conv_nd(x, kernels, **conf)
    grad = data.draw(hnp.arrays(shape=out.shape,
                                dtype=float,
                                elements=st.floats(-10, 10),
                                unique=True),
                     label="grad")

    out.backward(grad)
    grads_numerical = numerical_gradient_full(_conv_nd,
                                              *(i.data for i in (x, kernels)),
                                              back_grad=grad,
                                              kwargs=conf,
                                              as_decimal=False)

    for n, (arr, d_num) in enumerate(zip((x, kernels), grads_numerical)):
        assert_allclose(
            arr.grad,
            d_num,
            atol=1e-4,
            rtol=1e-4,
            err_msg=
            "arr-{}: numerical derivative and mygrad derivative do not match".
            format(n))
コード例 #17
0
def compare_backprop(*operands, atol=1e-5, rtol=1e-5, optimize=False):
    """ Compare back-propagation through mygrad-einsum, and compare
        against numerical derivative"""
    if isinstance(operands[0], str):
        # operands form: "ijk, ijk", x, y
        script = operands[0]
        vars = operands[1:]
        vars = tuple(np.asarray(i).astype(float) for i in vars)
        tensors = tuple(Tensor(i) for i in vars)

        def f(*args):
            return np.einsum(script, *args)

        out = einsum(script, *tensors, optimize=optimize)
    else:
        # operands form: op0, sublist0, op1, sublist1, ..., [sublistout]
        end = -1 if len(operands) % 2 else None  # -1 if sublistout is included
        vars = tuple(np.asarray(i).astype(float) for i in operands[:end:2])
        tensors = tuple(Tensor(i) for i in vars)

        def f(*args):
            x = tuple(chain.from_iterable(zip(args, operands[1::2])))
            if end is not None:
                x += (operands[-1], )
            return np.einsum(*x)

        x = tuple(chain.from_iterable(zip(tensors, operands[1::2])))
        if end is not None:
            x += (operands[-1], )
        out = einsum(*x, optimize=optimize)

    grad = np.random.rand(*out.shape)
    #    grad = np.ones(out.shape)
    out.backward(grad)

    numerical_derivs = numerical_gradient_full(f,
                                               *vars,
                                               back_grad=grad,
                                               as_decimal=False)

    for n, (dnum, tensor) in enumerate(zip(numerical_derivs, tensors)):
        assert dnum.shape == tensor.grad.shape
        assert_allclose(
            dnum,
            tensor.grad,
            atol=atol,
            rtol=rtol,
            err_msg="The numerical and mygrad derivatives disagree for "
            "variable index {}".format(n))
コード例 #18
0
def test_einsum_bkwd5(optimize):
    x = Tensor(np.random.rand(5, 3, 4, 6))
    y = Tensor(np.random.rand(1, 5, 6, 2))
    grad = np.random.rand(1, 3, 4, 2)

    def f(x, y):
        return np.einsum("iBCj, aijd -> aBCd", x, y)

    o = einsum("iBCj, aijd -> aBCd", x, y, optimize=optimize)
    o.backward(grad)

    dx, dy = numerical_gradient_full(f, x.data, y.data, back_grad=grad)

    assert_allclose(x.grad, dx, atol=1e-6)
    assert_allclose(y.grad, dy, atol=1e-6)
コード例 #19
0
def test_properties(a, constant, scalar, creator):
    array = np.asarray(a)
    if creator:
        ref = Operation()
        tensor = Tensor(a, constant=constant, _creator=ref, _scalar_only=scalar)
    else:
        tensor = Tensor(a, constant=constant, _scalar_only=scalar)

    assert tensor.ndim == array.ndim
    assert tensor.shape == array.shape
    assert tensor.size == array.size
    assert len(tensor) == len(array)
    assert tensor.dtype == array.dtype
    assert_equal(actual=tensor.data, desired=a)
    assert (not creator) or tensor.creator is ref
コード例 #20
0
ファイル: test_conv.py プロジェクト: samaocarpenter/MyGrad
def test_padding(ndim: int, data: st.DataObject):
    """Ensure that convolving a padding-only image with a commensurate kernel yields the single entry: 0"""
    padding = data.draw(st.integers(1, 3)
                        | st.tuples(*[st.integers(1, 3)] * ndim),
                        label="padding")
    x = Tensor(
        data.draw(
            hnp.arrays(shape=(1, 1) + (0, ) * ndim,
                       dtype=float,
                       elements=st.floats()),
            label="x",
        ))
    pad_tuple = padding if isinstance(padding, tuple) else (padding, ) * ndim
    kernel = data.draw(
        hnp.arrays(
            shape=(1, 1) + tuple(2 * p for p in pad_tuple),
            dtype=float,
            elements=st.floats(allow_nan=False, allow_infinity=False),
        ))
    out = conv_nd(x, kernel, padding=padding, stride=1)
    assert out.shape == (1, ) * x.ndim
    assert out.item() == 0.0

    out.sum().backward()
    assert x.grad.shape == x.shape
コード例 #21
0
def test_static_softmax2d():
    # Verified against theano.tensor.softmax

    skew = np.array([
        [0.87566484, 0.53596079, 0.85693981, 0.09526036],
        [0.32024455, 0.81532148, 0.2480434, 0.85119342],
        [0.57943085, 0.33958252, 0.95864464, 0.22881712],
    ])

    x = np.array([[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0],
                  [8.0, 9.0, 10.0, 11.0]])

    x = Tensor(x)
    f = (softmax(x, constant=False) * skew).sum()

    out = np.array(1.449875865467131)
    assert_allclose(actual=f.data, desired=out)

    f.backward()
    dx = np.array([
        [0.01720112, 0.01715422, 0.12266443, -0.15701977],
        [-0.01179518, 0.01108053, -0.10425844, 0.10497309],
        [0.00502799, -0.00723393, 0.12698131, -0.12477536],
    ])

    assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
コード例 #22
0
def test_static_logsoftmax2d():
    # Verified against theano.tensor.softmax
    skew = np.array([
        [0.87566484, 0.53596079, 0.85693981, 0.09526036],
        [0.32024455, 0.81532148, 0.2480434, 0.85119342],
        [0.57943085, 0.33958252, 0.95864464, 0.22881712],
    ])

    x = np.array([[0.0, 1.0, 2.0, 3.0], [4.0, 5.0, 6.0, 7.0],
                  [8.0, 9.0, 10.0, 11.0]])

    x = Tensor(x)
    f = (logsoftmax(x, constant=False) * skew).sum()

    out = np.array(-13.722895761739732)
    assert_allclose(actual=f.data, desired=out)

    f.backward()
    dx = np.array([
        [0.79988389, 0.3299668, 0.29699009, -1.42684078],
        [0.24859989, 0.62057111, -0.281343, -0.587828],
        [0.5119002, 0.15601518, 0.45965687, -1.12757225],
    ])

    assert_allclose(x.grad, dx, atol=1e-5, rtol=1e-5)
コード例 #23
0
def test_axis_interchange_methods(op: str, constant: bool):
    x = Tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], constant=constant)
    method_out = getattr(x, op)(0, -1)
    function_out = getattr(mg, op)(x, 0, -1)
    assert_equal(method_out.data, function_out.data)
    assert method_out.constant is constant
    assert type(method_out.creator) is type(function_out.creator)
コード例 #24
0
def glorot_uniform(*shape, gain=1):
    ''' Initialize a :class:`mygrad.Tensor` according to the uniform initialization procedure
    described by Glorot and Bengio.

    Parameters
    ----------
    shape : Sequence[int]
        The shape of the output Tensor. Note that `shape` must be at least two-dimensional.

    gain : Real, optional (default=1)
        The gain (scaling factor) to apply.

    Returns
    -------
    mygrad.Tensor, shape=`shape`
        A Tensor, with values initialized according to the glorot uniform initialization.

    Extended Description
    --------------------
    Glorot and Bengio put forward this initialization in the paper
        "Understanding the Difficulty of Training Deep Feedforward Neural Networks"
    http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf

    A Tensor :math:`W` initialized in this way should be drawn from a distribution about

    .. math::
        U[-\frac{\sqrt{6}}{\sqrt{n_j+n_{j+1}}}, \frac{\sqrt{6}}{\sqrt{n_j+n_{j+1}}}]
    '''
    assert len(
        shape
    ) >= 2, 'Glorot Uniform initialization requires at least two dimensions!'

    tensor = np.empty(shape)
    bound = gain * np.sqrt(6 / ((shape[0] + shape[1]) * tensor[0, 0].size))
    return Tensor(np.random.uniform(-bound, bound, shape))
コード例 #25
0
def test_init_params(data, creator, constant, scalar_only, dtype, numpy_dtype):
    elements = ((
        lambda x, y: st.floats(x, y, width=8 * np.dtype(numpy_dtype).itemsize))
                if np.issubdtype(numpy_dtype, np.floating) else st.integers)
    a = data.draw(
        hnp.arrays(
            shape=hnp.array_shapes(max_side=3, max_dims=5),
            dtype=numpy_dtype,
            elements=elements(-100, 100),
        ),
        label="a",
    )
    if dtype is not None:
        a = a.astype(dtype)

    tensor = Tensor(a,
                    _creator=creator,
                    constant=constant,
                    _scalar_only=scalar_only,
                    dtype=dtype)

    assert tensor.creator is creator
    assert tensor.constant is constant
    assert tensor.scalar_only is scalar_only
    assert tensor.dtype is a.dtype
    assert_equal(tensor.data, a)
    assert tensor.grad is None
コード例 #26
0
ファイル: uber.py プロジェクト: flappyBug/MyGrad
        def wrapper(x, constant, data):
            arrs = [x]  # list of drawn arrays to feed to functions

            for i in range(1, self.num_arrays):  # draw additional arrays according to `num_arrays`
                y = data.draw(self.gen_other_array(x, i), label="array-{}".format(i))
                arrs.append(y)

            arr_copies = [copy(arr) for arr in arrs]  # list of array-copies to check for mutation

            # set or draw keyword args to be passed to functions
            kwargs = {k: (data.draw(v(*arrs), label="kwarg: {}".format(k)) if callable(v) else v)
                      for k, v in self.kwargs.items()}

            for i, arr in enumerate(arrs):  # assure arrays don't contain forbidden values
                for value in self.index_to_no_go.get(i, ()):
                    assume(np.all(arr != value))

            # execute mygrad and "true" functions. Compare outputs and check mygrad behavior
            o = self.op(*(Tensor(i) for i in arrs), **kwargs, constant=constant)
            tensor_out = o.data
            true_out = self.true_func(*arrs, **kwargs)

            assert isinstance(o, Tensor), \
                "`mygrad_func` returned type {}, should return `mygrad.Tensor`".format(type(o))
            assert o.constant is constant, \
                "`mygrad_func` returned tensor.constant={}, should be constant={}".format(o.constant, constant)

            assert_allclose(actual=tensor_out, desired=true_out,
                            err_msg="`mygrad_func(x)` and `true_func(x)` produce different results")

            for n, (arr, arr_copy) in enumerate(zip(arrs, arr_copies)):
                assert_array_equal(arr, arr_copy,
                                   err_msg="arr-{} was mutated during forward prop".format(n))
コード例 #27
0
ファイル: funcs.py プロジェクト: samaocarpenter/MyGrad
def rand(*shape, constant=False):
    """ Create a Tensor of the given shape and populate it with random
    samples from a uniform distribution over [0, 1).

    Parameters
    ----------
    shape: d0, d1, ... dn : int, optional
        The dimensions of the returned array, must be non-negative.
        If no argument is given a single Python float is returned.

    constant : bool, optional (default=False)
        If ``True``, the returned tensor is a constant (it
        does not back-propagate a gradient)

    Returns
    -------
    mygrad.Tensor
        A ``shape``--shaped Tensor of floating-point samples from the uniform distribution
        over [0, 1), or a single such float if no parameters were supplied.

    Examples
    --------
    >>> from mygrad.random import rand
    >>> rand(3,4)
    Tensor([[0.9805903 , 0.82640985, 0.88230632, 0.73099815],
            [0.24845968, 0.12532893, 0.63171607, 0.32543228],
            [0.66029533, 0.79285341, 0.54967228, 0.25178508]])
    """

    return Tensor(np.random.rand(*shape), constant=constant)
コード例 #28
0
ファイル: funcs.py プロジェクト: samaocarpenter/MyGrad
def sample(shape=None, constant=False):
    """ Return random floats in the half-open interval [0.0, 1.0).

    To create a random sample of a given shape on the interval [a, b), call
    (b-a) * sample(shape) + a

    Parameters
    ----------
    shape: int or tuple of ints, optional
        Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn.
        Default is None, in which case a single value is returned.

    constant : bool, optional (default=False)
        If ``True``, the returned tensor is a constant (it
        does not back-propagate a gradient)

    Returns
    -------
    int or mygrad.Tensor of ints
        ``shape``-shaped array of random integers from the appropriate distribution,
        or a single such random int if size not provided.

    Examples
    --------
    >>> from mygrad.random import sample
    >>> sample((3, 4))
    Tensor([[0.47263933, 0.10928814, 0.19737707, 0.30879006],
            [0.49870689, 0.05849937, 0.21095352, 0.09778017],
            [0.405788  , 0.91888808, 0.15061143, 0.63140668]])

    >>> sample()
    Tensor(0.50690423)
    """

    return Tensor(np.random.sample(shape), constant=constant)
コード例 #29
0
ファイル: funcs.py プロジェクト: samaocarpenter/MyGrad
def random(shape=None, constant=False):
    """ Return random floats in the half-open interval [0.0, 1.0).

    To create a random sample of a given shape on the interval [a, b), call
    (b-a) * random(shape) + a

    Parameters
    ----------
    shape: int or tuple of ints, optional
        Output shape. If the given shape is, e.g., (m, n, k), then m * n * k samples are drawn.
        Default is None, in which case a single value is returned.

    constant : bool, optional (default=False)
        If ``True``, the returned tensor is a constant (it
        does not back-propagate a gradient)

    Returns
    -------
    int or mygrad.Tensor of ints
        ``shape``-shaped array of random integers from the appropriate distribution, or a
        single such random int if size not provided.

    Examples
    --------
    >>> from mygrad.random import random
    >>> random((2, 4))
    Tensor([[0.14928578, 0.28812813, 0.56885892, 0.49555962],
            [0.19780163, 0.51162365, 0.7849505 , 0.47864586]])
    """

    return Tensor(np.random.random(shape), constant=constant)
コード例 #30
0
ファイル: test_tensor_manip.py プロジェクト: edkek/MyGrad
def test_transpose(x, data):
    axes = data.draw(
        valid_axes(x.ndim, min_dim=x.ndim, max_dim=x.ndim).map(
            lambda out: (out, ) if isinstance(out, int) else out),
        label="axes",
    )

    x_arr = Tensor(np.copy(x))

    o = transpose(x_arr, axes, constant=False)
    grad = data.draw(
        hnp.arrays(shape=o.shape,
                   dtype=float,
                   elements=st.floats(1, 10),
                   unique=True),
        label="grad",
    )

    o.backward(grad)

    def f(x):
        return np.transpose(x, axes)

    assert_allclose(o.data, f(x))

    dx, = numerical_gradient_full(f, x, back_grad=grad)

    assert_allclose(x_arr.grad, dx)

    out = transpose(x, constant=True)
    assert out.constant and not x_arr.constant