示例#1
0
文件: cg.py 项目: tito21/pylops-gpu
def cg(A, y, x=None, niter=10, tol=1e-10):
    r"""Conjugate gradient

    Solve a system of equations given the square operator ``A`` and data ``y``
    using conjugate gradient iterations.

    Parameters
    ----------
    A : :obj:`pylops_gpu.LinearOperator`
        Operator to invert of size :math:`[N \times N]`
    y : :obj:`torch.Tensor`
        Data of size :math:`[N \times 1]`
    x0 : :obj:`torch.Tensor`, optional
        Initial guess
    niter : :obj:`int`, optional
        Number of iterations
    tol : :obj:`int`, optional
        Residual norm tolerance

    Returns
    -------
    x : :obj:`torch.Tensor`
        Estimated model
    iiter : :obj:`torch.Tensor`
        Max number of iterations model

    """
    complex_problem = True if isinstance(y, ComplexTensor) else False
    #if not isinstance(A, LinearOperator):
    #    A = aslinearoperator(A)
    if x is None:
        if complex_problem:
            x = ComplexTensor(torch.zeros((2 * y.shape[-1], 1),
                                          dtype=y.dtype)).t()
        else:
            x = torch.zeros_like(y)
    r = y - A.matvec(x)
    c = r.clone()
    if complex_problem:
        c = ComplexTensor(c)
    kold = torch.sum(r * r)

    iiter = 0
    while iiter < niter and torch.abs(kold) > tol:
        Ac = A.matvec(c)
        cAc = (c * Ac).sum() if complex_problem else torch.sum(c * Ac)
        a = divide(kold, cAc) if complex_problem else kold / cAc
        x += a * c
        r -= a * Ac
        k = torch.sum(r * r)
        b = k / kold
        c = r + b * c
        kold = k
        iiter += 1
    return x, iiter
示例#2
0
def test_complex_rmult():
    c = ComplexTensor(torch.zeros(4, 3)) + 1
    c = (4 + 3j) * c
    c = c.view(-1).data.numpy()

    # do the same in numpy
    sol = np.zeros((2, 3)).astype(np.complex64) + 1
    sol = (4 + 3j) * sol
    sol = sol.flatten()
    sol = list(sol.real) + list(sol.imag)

    assert np.array_equal(c, sol)
示例#3
0
def test_complex_scalar_sum():
    c = ComplexTensor(torch.zeros(4, 3))
    c = c + (4 + 3j)
    c = c.view(-1).data.numpy()

    # do the same in numpy
    sol = np.zeros((2, 3)).astype(np.complex64)
    sol = sol + (4 + 3j)
    sol = sol.flatten()
    sol = list(sol.real) + list(sol.imag)

    assert np.array_equal(c, sol)
示例#4
0
def test_shape():
    # test sizing when init with tensor
    c = ComplexTensor(torch.zeros(4, 3))
    size = c.shape
    n, m = size[-2:]
    assert n == 2
    assert m == 3

    # test sizing when init with dim spec
    c = ComplexTensor(12, 8)
    size = c.shape
    n, m = size[-2:]
    assert n == 12
    assert m == 8
示例#5
0
def test_real_matrix_sum():

    c = ComplexTensor(torch.zeros(4, 3))
    r = torch.ones(2, 3)
    c = c + r
    c = c.view(-1).data.numpy()

    # do the same in numpy
    sol = np.zeros((2, 3)).astype(np.complex64)
    sol_r = np.ones((2, 3))
    sol = sol + sol_r
    sol = sol.flatten()
    sol = list(sol.real) + list(sol.imag)

    assert np.array_equal(c, sol)
示例#6
0
def test_complex_complex_ele_mult():
    """
    Complex mtx x complex mtx elementwise multiply
    :return:
    """
    c = ComplexTensor(torch.zeros(4, 3)) + 1
    c = c * c
    c = c.view(-1).data.numpy()

    # do the same in numpy
    sol = np.zeros((2, 3)).astype(np.complex64) + 1
    sol = sol * sol
    sol = sol.flatten()
    sol = list(sol.real) + list(sol.imag)

    assert np.array_equal(c, sol)
示例#7
0
 def _power(self, fun, x):
     res = x.clone()
     if isinstance(x, ComplexTensor):
         res = ComplexTensor(res)
     for i in range(self.args[1]):
         res = fun(res)
     return res
示例#8
0
def test_complex_complex_mm():
    """
    Complex mtx x complex mtx matrix multiply
    :return:
    """
    c = ComplexTensor(torch.zeros(4, 3)) + 1
    cc = c.mm(c.t())
    cc = cc.view(-1).data.numpy()

    # do the same in numpy
    np_c = np.ones((2, 3)).astype(np.complex64)
    np_cc = np.matmul(np_c, np_c.T)

    # compare
    np_cc = np_cc.flatten()
    np_cc = list(np_cc.real) + list(np_cc.imag)

    assert np.array_equal(np_cc, cc)
示例#9
0
def __test_torch_op(complex_op, torch_op):
    a = ComplexTensor(torch.zeros(4, 3)) + 1
    b = ComplexTensor(torch.zeros(4, 3)) + 2
    c = ComplexTensor(torch.zeros(4, 3)) + 3

    d = complex_op([a, b, c], dim=0)
    size = list(d.size())

    # double second to last axis bc we always half it when generating tensors
    size[-2] *= 2

    # compare against regular torch implementation
    r_a = torch.zeros(4, 3)
    r_b = torch.zeros(4, 3)
    r_c = torch.zeros(4, 3)
    r_d = torch_op([r_a, r_b, r_c], dim=0)
    t_size = r_d.size()

    for i in range(len(size)):
        assert size[i] == t_size[i]
示例#10
0
def test_complex_real_mm():
    """
    Complex mtx x real mtx matrix multiply
    :return:
    """
    c = ComplexTensor(torch.zeros(4, 3)) + 1
    r = torch.ones(2, 3) * 2 + 3
    cr = c.mm(r.t())
    cr = cr.view(-1).data.numpy()

    # do the same in numpy
    np_c = np.ones((2, 3)).astype(np.complex64)
    np_r = np.ones((2, 3)) * 2 + 3
    np_cr = np.matmul(np_c, np_r.T)

    # compare
    np_cr = np_cr.flatten()
    np_cr = list(np_cr.real) + list(np_cr.imag)

    assert np.array_equal(np_cr, cr)
示例#11
0
    def __getitem__(self, idx):
        X, y = self.data[idx]
        X = X.astype('float32')
        y = torch.from_numpy(np.array([y]))

        if len(X.shape) == 2:
            X = np.expand_dims(X, axis=0)

        if self.transform:
            X = self.transform(X)
        X = ComplexTensor(X)
        return X, y
示例#12
0
def test_grad():
    """
    Grad calculated first with tensorflow

    :return:
    """

    c = ComplexTensor([[1, 3, 5], [7, 9, 11], [2, 4, 6], [8, 10, 12]])
    c.requires_grad = True

    # simulate some ops
    out = c + 4
    out = out.mm(c.t())

    # calc grad
    out = out.sum()
    out.backward()

    # d_out/dc
    g = c.grad.view(-1).data.numpy()

    # solution (as provided by running same ops in tensorflow)
    """
    tf_c2 = tf.constant([[1+2j, 3+4j, 5+6j], [7+8j,9+10j,11+12j]], dtype=tf.complex64)
    
    with tf.GradientTape() as t:
    t.watch(tf_c2)
    tf_out = tf_c2 + 4
    tf_out = tf.matmul(tf_out, tf.transpose(tf_c2, perm=[1,0]))
    
    tf_y = tf.reduce_sum(tf_out)
    dy_dc2 = t.gradient(tf_y, tf_c2)
    
    # solution
    print(dy_dc2)
    """
    #
    sol = np.asarray([24, 32, 40, 24, 32, 40, -20, -28, -36, -20, -28, -36])
    assert np.array_equal(g, sol)
示例#13
0
def flatten(x):
    r"""Flatten torch ComplexTensor

    Parameters
    ----------
    x : :obj:`pytorch_complex_tensor.ComplexTensor`
        Torch ComplexTensor

    Returns
    -------
    xflattened : :obj:`pytorch_complex_tensor.ComplexTensor`
        Flattened Torch ComplexTensor

    """
    xflattened = ComplexTensor(np.vstack((x.real.view(-1), x.imag.view(-1))))
    return xflattened
示例#14
0
def complextorch_fromnumpy(x):
    r"""Convert complex numpy array into torch ComplexTensor

    Parameters
    ----------
    x : :obj:`numpy.ndarray`
        Numpy complex multi-dimensional array

    Returns
    -------
    xt : :obj:`pytorch_complex_tensor.ComplexTensor`
        Torch ComplexTensor multi-dimensional array

    """
    xt = ComplexTensor(np.vstack((np.real(x), np.imag(x))))
    return xt
示例#15
0
def reshape(x, shape):
    r"""Reshape torch ComplexTensor

    Parameters
    ----------
    x : :obj:`pytorch_complex_tensor.ComplexTensor`
        Torch ComplexTensor
    shape : :obj:`tuple`
        New shape

    Returns
    -------
    xreshaped : :obj:`pytorch_complex_tensor.ComplexTensor`
        Reshaped Torch ComplexTensor

    """
    xreshaped = x.reshape([2] + list(shape))
    xreshaped = ComplexTensor(np.vstack((xreshaped[0], xreshaped[1])))
    return xreshaped
示例#16
0
def test_complex_real_ele_mult():
    """
    Complex mtx x real mtx elementwise multiply
    :return:
    """
    c = ComplexTensor(torch.zeros(4, 3)) + 1
    r = torch.ones(2, 3) * 2 + 3
    cr = c * r
    cr = cr.view(-1).data.numpy()

    # do the same in numpy
    np_c = np.ones((2, 3)).astype(np.complex64)
    np_r = np.ones((2, 3)) * 2 + 3
    np_cr = np_c * np_r

    # compare
    np_cr = np_cr.flatten()
    np_cr = list(np_cr.real) + list(np_cr.imag)

    assert np.array_equal(np_cr, cr)
示例#17
0
 def __init__(self, Layer, kwargs):
     super().__init__()
     self.device = torch.device(
         "cuda" if torch.cuda.is_available() else "cpu")
     self.bias = kwargs.get('bias', False)
     # turn the bias off so as to only do matrix multiplication
     # if you leave the bias on, then the complex arithmetic does not
     # work out correctly
     kwargs['bias'] = False
     self.f_re = Layer(**kwargs)
     self.f_im = Layer(**kwargs)
     self.b = None
     out_dim_keyNames = set(['out_channels', 'out_features'])
     self.outType = list(out_dim_keyNames.intersection(kwargs.keys()))[0]
     self.out_dim = kwargs[self.outType]
     if self.bias:
         b_r = np.random.randn(self.out_dim, 1).astype('float32')
         b_i = np.random.randn(self.out_dim, 1).astype('float32')
         z = b_r + 1j * b_i
         self.b = ComplexTensor(z)
示例#18
0
def test_get_item():
    # init random complex numpy and ct tensors
    a = np.random.randint(0, 10, (3, 2, 3))
    a = a * (1 + 5j)
    ct = ComplexTensor(a)

    # match dim 0
    __assert_tensors_equal(ct[0], a[0])
    __assert_tensors_equal(ct[-1], a[-1])

    # match dim 1
    __assert_tensors_equal(ct[:, 0], a[:, 0])
    __assert_tensors_equal(ct[:, -1], a[:, -1])

    # match dim 2
    __assert_tensors_equal(ct[:, :, 0], a[:, :, 0])
    __assert_tensors_equal(ct[:, :, -1], a[:, :, -1])

    # match ranges
    __assert_tensors_equal(ct[0:1, 0, -2:], a[0:1, 0, -2:])
    __assert_tensors_equal(ct[-1:, -1:, -2:], a[-1:, -1:, -2:])
    __assert_tensors_equal(ct[:-1, :-1, :-2], a[:-1, :-1, :-2])
示例#19
0
# total time 113.9896514415741
# ideep, hip, msnpu, mkldnn
# opengl, opencl
# upper case!
# there's thing called language server!
# also shell server, database server, and finally, nerual network server!
device = torch.device("cpu")
# total time 42.38628387451172
# you know, it is not significantly faster.
# middle is for hiddern layer dimension.
n_in, n_h, n_out, batch_size = 10, 5, 1, 10
x0 = torch.randn(batch_size, n_in).tolist()
x1 = torch.randn(batch_size, n_in).tolist()
# print(dir(x1))
# print(x1)
x = ComplexTensor([x0, x1])
# wrong.
# this can still f*****g work. i do not know why.
# to list first.

# is this for verification?
# what if the result is defined in matrix form or some imaginary form?
# just calm down.
# f*****g hell.
# wahtever. do it later. always got time to f**k over.
# test the speed first.
# it will be flatterned somehow.
# y = ComplexTensor([[[1.0], [0.0], [0.0], [1.0], [1.0],
#                   [1.0], [0.0], [0.0], [1.0], [1.0]],[[0.5], [-0.2], [-0.5], [-0.3], [1.0],
#                   [-0.2], [0.8], [0.5], [-0.1], [0.1]]])
# the first pair is not for fun. it introduces an error.
示例#20
0
# t=torch.tensor(d)
# # what the heck?
# print(t)
# t0=d.tolist()
# t0=torch.tensor(t0)
# print(t0)
# # f**k.
# different reprsentation.
s = [[[1.0], [0.0], [0.0], [1.0], [1.0], [1.0], [0.0], [0.0], [1.0], [1.0]],
     [[0.5], [-0.2], [-0.5], [-0.3], [1.0], [-0.2], [0.8], [0.5], [-0.1],
      [0.1]]]
s0 = np.array(s)
# strange, no matter how you call it.
s1 = s0.reshape(2, -1)
print(s1, s1.shape)
y = ComplexTensor(s1)
print(y, y.shape)
# # print(dir(y))
print(len(y))
# it does not have the correct format.
# e=[x for x in y]
# print(e)
# z=y.tolist()
# # print()
# e=torch.tensor(z)
# print(e,e.shape)
# # print(z)
# that's why i say it is strange.
# print(n.shape, d.shape)
# # what is the difference?
# print(n.tolist())
示例#21
0
文件: cg.py 项目: tito21/pylops-gpu
def cgls(A, y, x=None, niter=10, damp=0., tol=1e-10):
    r"""Conjugate gradient least squares

    Solve an overdetermined system of equations given an operator ``A`` and
    data ``y`` using conjugate gradient iterations.

    Parameters
    ----------
    A : :obj:`pylops_gpu.LinearOperator`
        Operator to invert of size :math:`[N \times M]`
    y : :obj:`torch.Tensor`
        Data of size :math:`[N \times 1]`
    x0 : :obj:`torch.Tensor`, optional
        Initial guess
    niter : :obj:`int`, optional
        Number of iterations
    damp : :obj:`float`, optional
        Damping coefficient
    tol : :obj:`int`, optional
        Residual norm tolerance

    Returns
    -------
    x : :obj:`torch.Tensor`
        Estimated model
    iiter : :obj:`torch.Tensor`
        Max number of iterations model

    Notes
    -----
    Minimize the following functional using conjugate gradient
    iterations:

    .. math::
        J = || \mathbf{y} -  \mathbf{Ax} ||^2 + \epsilon || \mathbf{x} ||^2

    where :math:`\epsilon` is the damping coefficient.
    """
    # naive approach ##
    # Op = A.H * A
    # y = A.H * y
    # return cg(Op, y, x=x, niter=niter, tol=tol)

    complex_problem = True if isinstance(y, ComplexTensor) else False
    # if not isinstance(A, LinearOperator):
    #    A = aslinearoperator(A)
    if x is None:
        if complex_problem:
            x = ComplexTensor(torch.zeros((2 * A.shape[1], 1),
                                          dtype=y.dtype)).t()
        else:
            x = torch.zeros(A.shape[1], dtype=y.dtype)
    s = y - A.matvec(x)
    r = A.rmatvec(s) - damp * x
    c = r.clone()
    if complex_problem:
        c = ComplexTensor(c)
    kold = torch.sum(r * r)
    q = A.matvec(c)
    iiter = 0
    while iiter < niter and torch.abs(kold) > tol:
        qq = (q * q).sum()
        a = divide(kold, qq) if complex_problem else kold / qq
        x += a * c
        s -= a * q
        r = A.rmatvec(s) - damp * x
        k = torch.sum(r * r) if complex_problem else torch.sum(r * r)
        b = k / kold
        c = r + b * c
        q = A.matvec(c)
        kold = k
        iiter += 1
    return x, iiter
示例#22
0
from pytorch_complex_tensor import ComplexTensor

C = ComplexTensor([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])
C.requires_grad = True
print(C)
result_sin = C.sin()

result_cos = C.cos()
result_tan = C.tan()

print('Sin:')
print(result_sin)

print('Cos:')
print(result_cos)

print('tan:')
print(result_tan)
示例#23
0
# opengl, opencl
# upper case!
# there's thing called language server!
# also shell server, database server, and finally, nerual network server!
device = torch.device("cpu")
# total time 42.38628387451172
# you know, it is not significantly faster.
# middle is for hiddern layer dimension.
n_in, n_h, n_out, batch_size = 10, 5, 1, 10
# wrong fuckiung def,
x0 = torch.randn(batch_size, n_in).tolist()
x1 = torch.randn(batch_size, n_in).tolist()
# # # print(dir(x1))
# # # print(x1)
# x = torch.randn(batch_size, n_in)
x = ComplexTensor([x0, x1])
# loss=0.07
# really? but how does it directly being applied?
# is this fraud?
######################################
# LOSS MATRIX: UNDER BATCH SIZE 5000 #
# Y \ X    REAL   COMPLEX            #
# REAL     0.19   0.0464             #
# COMPLEX  0.17   0.07               #
######################################
# does this really matter?
# wrong.
# this can still f*****g work. i do not know why.
# to list first.
# print(x)
# is this for verification?
示例#24
0
        x = self.fc1(x)
        x = F.softmax(x.abs(),dim=1)
        return x

model = SimpleCNN(num_output=11)


#%% create the loss, optimizer and scheduler
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=lr)

#%%

for epoch in range(1,num_epochs+1):
    perm = np.random.permutation(N)

    for i in range(1,num_batches+1):
        inds = range(i*bz,(i+1)*bz)
        x = ComplexTensor(X[inds])
        y = torch.from_numpy(Y[inds])

        optimizer.zero_grad()
        out = model(x)
        l = loss(out,y)
        l.backward()
        optimizer.step()    

        if i % 100 == 0:    # print every 100 mini-batches
            print('[%d, %5d] loss: %.3f' % (epoch, i, l.item()))