예제 #1
0
    def backward(self, state, root_gradients, variables):
        x, loc, scale = state
        _grad = root_gradients * np.ascontiguousarray(
            self.grad(x, loc, scale).astype(np.float32))

        for k in variables:
            variables[k] = _grad
예제 #2
0
def _torch_convolve(A, B, axes=None, dot_axes=[(), ()], mode='full'):
    B = np.ascontiguousarray(np.transpose(B[:, :, ::-1, ::-1], (1, 0, 2, 3)))
    At, Bt = torch.tensor(A), torch.tensor(B)
    if tuple(dot_axes) == ([0], [0]):
        At = torch.transpose(At, 0, 1)
        yt = torch_F.conv2d(Bt, At)
        yt = torch.flip(torch.transpose(yt, 0, 1), (-2, -1))
    else:
        yt = torch_F.conv2d(At, Bt)
    return np.asarray(yt)
예제 #3
0
    def transform(self, X, y):
        """transform function"""
        XMat = np.array(X)
        yMat = np.array(y)

        if XMat.shape[0] != yMat.shape[0]:
            yMat = yMat.T
        assert XMat.shape[0] == yMat.shape[0]

        XMat -= XMat.mean(axis=0)
        Sw, Sb = calc_Sw_Sb(XMat, yMat)
        evals, evecs = eig(Sw, Sb)

        np.ascontiguousarray(evals)
        np.ascontiguousarray(evecs)

        idx = np.argsort(evals)
        idx = idx[::-1]
        evecs = evecs[:, idx]

        self.W = evecs[:, :self.n_components]
        X_transformed = np.dot(XMat, self.W)

        return X_transformed
예제 #4
0
def test_accelerated_equivalence():
    warnings.filterwarnings("ignore")
    batch = 10
    in_ch = 3
    out_ch = 16
    k_size = 3

    X = np.random.randn(batch, in_ch, 32, 32)
    w = np.random.randn(out_ch, in_ch, k_size, k_size)
    w = np.ascontiguousarray(np.transpose(w, (1, 0, 2, 3)))

    y = autocrit.nn.conv.convolve(X, w, accelerated=False, **CONV_KWARGS)
    accelerated_y = autocrit.nn.conv.convolve(X, w, accelerated=True, **CONV_KWARGS)

    loss_grads = loss_grad(X, w)
    accelerated_loss_grads = accelerated_loss_grad(X, w)

    assert np.allclose(y, accelerated_y),\
        "accelerated output not equal to autograd output"
    assert np.allclose(loss_grads, accelerated_loss_grads),\
        "accelerated gradients not equal to autograd gradients"
예제 #5
0
 def backward(self, state, root_gradients):
     arg = state
     return root_gradients.reshape(root_gradients.shape +
                                   (1, 1)) * np.ascontiguousarray(
                                       self.grad(arg))
예제 #6
0
 def forward(self, argument, device=None, output_to_retain=None):
     return argument, np.ascontiguousarray(np.stack(LA.slogdet(argument)).T)