Example #1
0
    def gen_wavelet(self, p_batch, scales=[1, 2, 3, 4]):

        # constants
        B = p_batch.size()[0]  # batch size
        N = p_batch.size()[1]  # num nodes
        S = len(scales)  # num dyads

        # iterate over batch
        batch_psis = []
        for indx, ent in enumerate(p_batch):
            psi = []

            # iterate over scales
            for j in scales:
                j_high = j
                j_low = j - 1
                W_i = torch.matrix_power(ent, 2**j_low) - torch.matrix_power(
                    ent, 2**j_high)
                psi.append(W_i.unsqueeze(0))

            # combine the filters into a len(scales) x N x N tensor
            psi = torch.cat(psi, 0)  # S x N x N

            # collect them into batch list
            batch_psis.append(psi.unsqueeze(0))  # 1 x S x N x N

        # combine filter banks for all entries in batch
        batch_psi = torch.cat(batch_psis, 0)  # B x S x N x N

        return batch_psi
Example #2
0
def propagation_matrix(adj, alpha=0.85, sigma=1):
    """
    Computes the propagation matrix  (1-alpha)(I - alpha D^{-sigma} A D^{sigma-1})^{-1}.

    Parameters
    ----------
    adj : tensor, shape [n, n]
    alpha : float
        (1-alpha) is the teleport probability.
    sigma
        Hyper-parameter controlling the propagation style.
        Set sigma=1 to obtain the PPR matrix.
    Returns
    -------
    prop_matrix : tensor, shape [n, n]
        Propagation matrix.
    """
    deg = adj.sum(1)
    deg_min_sig = torch.matrix_power(torch.diag(deg), -sigma)
    # 为了节省内存 100m
    if sigma - 1 == 0:
        deg_sig_min = torch.diag(torch.ones_like(deg))
    else:
        deg_sig_min = torch.matrix_power(torch.diag(deg), sigma - 1)

    n = adj.shape[0]
    pre_inv = torch.eye(n) - alpha * deg_min_sig @ adj @ deg_sig_min

    prop_matrix = (1 - alpha) * torch.inverse(pre_inv)
    del pre_inv, deg_min_sig, adj
    return prop_matrix
Example #3
0
def robustBound(A, B, C, iterm=N_STEPS):
    Q = torch.mm(C.t(), C)
    q2 = torch.norm(Q)
    P = torch.zeros(A.size())
    for i in range(2 * iterm):
        Ai = torch.matrix_power(A * q2, i)
        P = P + torch.mm(Ai, Ai.t())
    At = torch.matrix_power(A * q2, iterm)
    return torch.trace(torch.mm(torch.mm(B.t(), P), B)) + torch.trace(
        torch.mm(At.t(), At))
Example #4
0
def searchalgo(meas, A, lf=100, iter=150):
    #meas, measn, u, A,Fw = gensingledata(n=500,alpha=20,m=15,Ncount=1000,sigma=1)
    for j in range(iter):
        Fl = l1solv(torch.matrix_power(A, lf), meas, 2)
        lf2 = int(lf + 50 * np.random.randn())
        Fl2 = l1solv(torch.matrix_power(A, lf2), meas, 2)
        print(lf, lf2, Fl / Fl2)
        if ((Fl / Fl2) > np.random.uniform(0.999, 1)):
            lf = lf2
        else:
            lf = lf
    return lf
Example #5
0
def matrix_pow_batch(A, k):
    ksorted, iidx = torch.sort(k)
    # Abusing bincount...
    count = torch.bincount(ksorted)
    nonzero = torch.nonzero(count)
    A = torch.matrix_power(A, 2**ksorted[0])
    last = ksorted[0]
    processed = count[nonzero[0]]
    for exp in nonzero[1:]:
        new, last = exp - last, exp
        A[iidx[processed:]] = torch.matrix_power(A[iidx[processed:]],
                                                 2**new.item())
        processed += count[exp]
    return A
Example #6
0
def matrix_power_two_batch(A, k):
    orig_size = A.size()
    A, k = A.flatten(0, -3), k.flatten()
    ksorted, idx = torch.sort(k)
    # Abusing bincount...
    count = torch.bincount(ksorted)
    nonzero = torch.nonzero(count, as_tuple=False)
    A = torch.matrix_power(A, 2**ksorted[0])
    last = ksorted[0]
    processed = count[nonzero[0]]
    for exp in nonzero[1:]:
        new, last = exp - last, exp
        A[idx[processed:]] = torch.matrix_power(A[idx[processed:]], 2**new.item())
        processed += count[exp]
    return A.reshape(orig_size)
Example #7
0
def get_adj(train_data, test_data, k, alpha, kappa):
    eps = np.finfo(float).eps
    emb_all = np.append(train_data, test_data, axis=0)
    N = emb_all.shape[0]
    metric = distance_metrics()['cosine']
    S = 1 - metric(emb_all, emb_all)
    S = torch.tensor(S)
    S = S - torch.eye(S.shape[0])

    if k > 0:
        topk, indices = torch.topk(S, k)
        mask = torch.zeros_like(S)
        mask = mask.scatter(1, indices, 1)
        mask = ((mask + torch.t(mask)) > 0).type(torch.float32)

        S = S * mask

    D = S.sum(0)
    Dnorm = torch.diag(torch.pow(D, -0.5))
    E = torch.matmul(Dnorm, torch.matmul(S, Dnorm))

    E = alpha * torch.eye(E.shape[0]) + E
    E = torch.matrix_power(E, kappa)

    E = E.cuda()

    train_data = train_data - train_data.mean(0)
    train_data_norm = train_data / LA.norm(train_data, 2, 1)[:, None]
    test_data = test_data - test_data.mean(0)
    test_data_norm = test_data / LA.norm(test_data, 2, 1)[:, None]
    features = np.append(train_data_norm, test_data_norm, axis=0)

    features = torch.tensor(features).cuda()
    return E, features
Example #8
0
 def create_H(self, hs, A):
     H = zeros(A.size())
     for k in range(self.K):
         Apow = matrix_power(A, k)
         for j in range(H.size()[0]):
             H[j, :] += hs[k, j] * Apow[j, :]
     return H
Example #9
0
File: MRGNN.py Project: lpasa/MRGNN
    def forward(self, data, hidden_layer_aggregator=None):
        X = data.x
        k = self.max_k

        #compute Laplacian
        L_edge_index, L_values = get_laplacian(data.edge_index,
                                               normalization="sym")
        L = torch.sparse.FloatTensor(L_edge_index, L_values,
                                     torch.Size([X.shape[0],
                                                 X.shape[0]])).to_dense()

        H = [X]
        for i in range(k - 1):
            xhi_layer_i = torch.mm(torch.matrix_power(L, i + 1), X)
            H.append(xhi_layer_i)

        H = self.lin(torch.cat(H, dim=1), self.xhi_layer_mask)
        H = self.reservoir_act_fun(H)
        H = self.bn_hidden_rec(H)

        H_avg = gap(H, data.batch)
        H_add = gadd(H, data.batch)
        H_max = gmp(H, data.batch)

        H = torch.cat([H_avg, H_add, H_max], dim=1)

        if self.output == "funnel" or self.output is None:
            return self.funnel_output(H)
        elif self.output == "one_layer":
            return self.one_layer_out(H)
        elif self.output == "restricted_funnel":
            return self.restricted_funnel_output(H)
        else:
            assert False, "error in output stage"
Example #10
0
def meas(u, blur):
    A, Sigma = makeA(500, 5)
    c = np.stack([np.linspace(0, 30, 30),
                  np.linspace(30, 0, 30)], axis=1).T.reshape(60, 1)[:, 0]
    mov_filt = np.power(c, 0.5)
    if blur == 'gaussian':
        Ncount = 1000
        Fw = torch.matrix_power(A, Ncount)
        meas = Fw @ u
    elif blur == 'linear':
        meas = u.clone()
        for i in range(N - 60):
            meas[i + 30] = u[i:(i + 60)].dot(
                torch.from_numpy(c).type(dtype=torch.float32)) / sum(c)
    elif blur == 'non-linear':
        meas = u.clone()
        for i in range(N - 60):
            meas[i + 30] = u[i:(i + 60)].dot(
                torch.from_numpy(mov_filt).type(
                    dtype=torch.float32)) / sum(mov_filt)
    elif blur == 'hybrid':
        for i in range(N - 60):
            if (i <= int(N / 2)):
                meas[i + 30] = u[i:(i + 60)].dot(
                    torch.from_numpy(c).type(dtype=torch.float32)) / sum(c)
            else:
                meas[i + 30] = u[i:(i + 60)].dot(
                    torch.from_numpy(mov_filt).type(
                        dtype=torch.float32)) / sum(mov_filt)
    elif blur == 'gaussian_local':
        gauss = mygauss(n=30)
        gauss = gauss / sum(gauss)
        meas = torch.from_numpy(np.convolve(u, gauss, 'same'))
    return meas
Example #11
0
    def __init__(self, D, A, gamma=0.5, method=WEI, K=3):
        super(GraphDownsampling, self).__init__()
        if A is not None:
            assert np.allclose(A, A.T), 'A should be symmetric'
            # self.A = np.linalg.inv(np.diag(np.sum(A, 0))).dot(A)
            self.A = A
            if method in [BIN, WEI]:
                self.A = gamma * np.eye(A.shape[0]) + (1 - gamma) * self.A
                self.A = Tensor(self.A.T)
            elif method is GF:
                N = A.shape[0]
                self.hs = nn.Parameter(torch.Tensor(K))
                stdv = 1. / math.sqrt(K)
                self.hs.data.uniform_(-stdv, stdv)
                self.A = Tensor(A)
                self.K = K
                self.Apows = torch.zeros(K, N, N)
                for i in range(K):
                    self.Apows[i, :, :] = torch.matrix_power(self.A, i)

        self.method = method
        self.parent_size = D.shape[1]
        self.child_size = D.shape[0]
        # NOTE: only creation of D changes!
        Deg_inv = np.linalg.inv(np.diag(np.sum(D, 1)))
        self.D_T = Tensor(Deg_inv.dot(D)).t()
Example #12
0
 def compute_variation_score(signal, l, hop_order):
     l = torch.matrix_power(l, hop_order)
     variations = torch.norm(torch.abs(l.matmul(signal)), p=2,
                             dim=1)  # highest variations
     # zeroed = torch.where(variations == 0, torch.tensor(0.001, dtype=torch.float32), variations)
     # variation_score = 1. / zeroed
     return variations
Example #13
0
	def forward(self, x):
		
		# Force all data to be batched if it isn't already.
		if len(x.shape) == 2: x = x.view(-1, *x.shape)
		# But we don't know how to deal with batches-{of-batches}+.
		elif len(x.shape) > 3: assert 0
		
		# A series is the powers of our adjacency matrix(es) X.
		# Compute as many powers as we have rows for each adjacency matrix.
		a_series = [torch.matrix_power(x,i+1) for i in range (1, self.rows+2)]
		# Must swap dims (0,1) since the above code places the batch as dim 1 rather than 0.
		a_series = torch.swapaxes(torch.stack(a_series),0,1).to(x.device)

		# Generate the full NxN matrix of 1's.
		_1 = torch.full(x.shape[-2:], 1.0, dtype=x.dtype).to(x.device)
		# Element wise raise the A series to the correct power, will normalize later.
		# Generator expression performs faster than for loop after profiling.
		powers = list((_1@(a_series[:,i])**(j+1)) for i in range(self.rows) for j in range(self.cols))
		powers = torch.swapaxes(torch.stack(powers), 0,1).to(x.device)

		# Cannot use torch.trace, since that only works on 2d tensors, must roll our own using diag+sum.
		# See: https://discuss.pytorch.org/t/is-there-a-way-to-compute-matrix-trace-in-batch-broadcast-fashion/43866
		traces = torch.diagonal(powers, dim1=-2, dim2=-1).sum(-1)
		traces = traces.view(-1, self.rows, self.cols)
		# The [i,j]'th position is equal to i+j+2. This is the power to which 
		norm_pow_mat = torch.stack(list(torch.arange(0, self.cols)+i+2 for i in range(self.rows))).to(traces.device)
		# Compute the number of elements in an individual graph
		numel = powers.shape[-1]*powers.shape[-2]
		# The normalization for the [i,j]'th entry of each matrix is the number of elements raised to the i+j+2'th power.
		norm =  torch.full(traces.shape, numel).to(traces.device)**norm_pow_mat
		return (self.coef * traces/norm).sum(dim=[-1,-2])
Example #14
0
def genData(n=500, alpha=20, k=5, N=1000, Ncount=1000, sigma=1):
    source = torch.zeros(n, N)
    wsource = torch.zeros(n, N)
    A = makeA1d(n, alpha)
    Fw = torch.matrix_power(A, Ncount)
    for i in range(0, N):
        Unit = torch.zeros(n, 1)
        Unit[torch.randint(0, n - 2, (k, )) + 1, 0] = 1000 * torch.rand(k)
        source[:, i] = Unit.T
        source[:, i] = source[:, i] / torch.norm(source[:, i])
        meas = Fw @ Unit
        measn = meas + sigma * torch.rand(meas.shape)
        offset = torch.randint(-900, 900, (1, ))
        wsource[:, i] = l1rec(torch.matrix_power(A, Ncount + int(offset)),
                              measn, 2)
        wsource[:, i] = wsource[:, i] / torch.norm(wsource[:, i])
    return source, wsource
Example #15
0
def getx(A, meas, u, D, power1, epsilon1):
    t1 = np.linspace(100, 2400, 20)
    a1 = []
    b1 = []
    for ti in range(20):
        mix_norm, psnr_rec = l1solv_mix(torch.matrix_power(A, int(t1[ti])),
                                        meas,
                                        D,
                                        u,
                                        power=power1,
                                        episilon=epsilon1)
        a1.append(mix_norm)
        b1.append(psnr_rec)
        print('psnr:', psnr_rec)
    N_recon = np.round(t1[a1 == np.min(a1)])
    x1 = l1solv(torch.matrix_power(A, int(N_recon)), meas, D, u)
    return psnr1(u, x1.T)
Example #16
0
    def compute_adj_bar(adj, hop_order):
        new_adj = torch.zeros_like(adj)
        for i in range(hop_order):
            new_adj = new_adj + torch.matrix_power(adj, i + 1)

        new_adj = torch.ones_like(adj) - torch.clamp(new_adj, min=0, max=1)

        return new_adj
Example #17
0
 def h_func(self):
     fc1_weight = self.fc1_pos.weight - self.fc1_neg.weight  # [j, ik]
     fc1_weight = fc1_weight.view(self.d, self.d, self.k)  # [j, i, k]
     A = torch.sum(fc1_weight * fc1_weight, dim=2).t()  # [i, j]
     # h = trace_expm(A) - d  # (Zheng et al. 2018)
     M = torch.eye(self.d) + A / self.d  # (Yu et al. 2019)
     E = torch.matrix_power(M, self.d - 1)
     h = (E.t() * M).sum() - self.d
     return h
Example #18
0
def perfbound(model):
    A = model.basic_rnn.weight_hh_l0.data
    B = model.basic_rnn.weight_ih_l0.data
    C = model.FC.weight.data
    Q = torch.mm(C.t(), C)
    P = Discrete_Lyap(A, Q)
    perfb = torch.trace(torch.mm(torch.mm(B.t(), P), B))
    At = torch.matrix_power(A, 2 * iterm)
    return perfb + torch.trace(torch.mm(torch.mm(At.t(), Q), At))
Example #19
0
def expm_taylor(A):
    if A.ndimension() < 2 or A.size(-2) != A.size(-1):
        raise ValueError('Expected a square matrix or a batch of squared matrices')

    if A.ndimension() == 2:
        # Just one matrix

        # Trivial case
        if A.size() == (1, 1):
            return torch.exp(A)

        if A.element_size() > 4:
            thetas = thetas_dict["double"]
        else:
            thetas = thetas_dict["single"]

        # No scale-square needed
        # This could be done marginally faster if iterated in reverse
        normA = torch.norm(A, 1).item()
        for deg, theta in zip(degs, thetas):
            if normA <= theta:
                return taylor_approx(A, deg)

        # Scale square
        s = int(math.ceil(math.log2(normA) - math.log2(thetas[-1])))
        A = A * (2**-s)
        X = taylor_approx(A, degs[-1])
        return torch.matrix_power(X, 2**s)
    else:
        # Batching

        # Trivial case
        if A.size()[-2:] == (1, 1):
            return torch.exp(A)

        if A.element_size() > 4:
            thetas = thetas_dict["double"]
        else:
            thetas = thetas_dict["single"]

        normA = torch.norm(A, dim=(-2, -1))

        # Handle trivial case
        if (normA == 0.).all():
            I = torch.eye(A.size(-2), A.size(-1), dtype=A.dtype, device=A.device)
            I = I.expand_as(A)
            return I

        # Handle small normA
        more = normA > thetas[-1]
        k = normA.new_zeros(normA.size(), dtype=torch.long)
        k[more] = torch.ceil(torch.log2(normA[more]) - math.log2(thetas[-1])).long()

        # A = A * 2**(-s)
        A = torch.pow(.5, k.float()).unsqueeze_(-1).unsqueeze_(-1).expand_as(A) * A
        X = taylor_approx(A, degs[-1])
        return matrix_power_two_batch(X, k)
Example #20
0
def _calc_Linvk(s, k, ttype=None):
    # easier to do with matrices than ndarray to keep dimensions straight
    D = _calc_D(s, ttype)
    diag = torch.diag(s**(k + 1))
    Linvk = ((((-1.)**k) / factorial(k)) *
             torch.mm(torch.matrix_power(D, k), torch.diag(s**(k + 1.))))[:,
                                                                          k:-k]
    # return as ndarray
    return Linvk.transpose(1, 0)
 def loss(self, x, x_hat, rho, alpha):
     exp_A = torch.matrix_power(
         torch.eye(self.d).double() +
         torch.div(torch.matmul(self.adj_A.data, self.adj_A.data), self.d),
         self.d)
     h = torch.trace(exp_A) - self.d
     total_loss = (0.5 / x.shape[0]) * F.mse_loss(x,x_hat) \
                 + self.l1_graph_penalty * torch.norm(self.adj_A.data, 1) \
                 + alpha * h + 0.5 * rho * h * h
     return F.mse_loss(x, x_hat), h, total_loss
Example #22
0
 def h_func(self):
     """Constrain 2-norm-squared of fc1 weights along m1 dim to be a DAG"""
     d = self.dims[0]
     fc1_weight = self.fc1_pos.weight - self.fc1_neg.weight  # [j * m1, i]
     fc1_weight = fc1_weight.view(d, -1, d)  # [j, m1, i]
     A = torch.sum(fc1_weight * fc1_weight, dim=1).t()  # [i, j]
     # h = trace_expm(A) - d  # (Zheng et al. 2018)
     M = torch.eye(d) + A / d  # (Yu et al. 2019)
     E = torch.matrix_power(M, d - 1)
     h = (E.t() * M).sum() - d
     return h
 def h_func(self, As):
     """Constrain 2-norm-squared of fc1 weights along m1 dim to be a DAG"""
     # batch wise loss
     M0 = th.eye(self.nagt)
     M0 = M0.reshape((1, self.nagt, self.nagt))
     M0 = M0.repeat(self.n_step, 1, 1)
     M = M0 + th.from_numpy(As) / self.nagt
     E = th.matrix_power(M, self.nagt - 1)
     hs = ((th.transpose(E, 1, 2) * M).sum() -
           self.nagt * self.n_step) / self.nagt
     return hs
Example #24
0
def laplacian_average(models, V, num_nodes, rounds):
    model = OrderedDict()
    idx = np.random.randint(0, num_nodes)
    for key, val in models[0].items():
        size = val.size()
        initial = torch.stack([_[key] for _ in models])
        final = torch.matmul(torch.matrix_power(V, rounds),
                             initial.reshape(num_nodes, -1)) * num_nodes
        model[key] = final[idx].reshape(size)

    return model
Example #25
0
 def blas_lapack_ops(self):
     m = torch.randn(3, 3)
     a = torch.randn(10, 3, 4)
     b = torch.randn(10, 4, 3)
     v = torch.randn(3)
     return (
         torch.addbmm(m, a, b),
         torch.addmm(torch.randn(2, 3), torch.randn(2, 3),
                     torch.randn(3, 3)),
         torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)),
         torch.addr(torch.zeros(3, 3), v, v),
         torch.baddbmm(m, a, b),
         torch.bmm(a, b),
         torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3),
                            torch.randn(3, 3)),
         # torch.cholesky(a), # deprecated
         torch.cholesky_inverse(torch.randn(3, 3)),
         torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)),
         torch.dot(v, v),
         torch.eig(m),
         torch.geqrf(a),
         torch.ger(v, v),
         torch.inner(m, m),
         torch.inverse(m),
         torch.det(m),
         torch.logdet(m),
         torch.slogdet(m),
         torch.lstsq(m, m),
         torch.lu(m),
         torch.lu_solve(m, *torch.lu(m)),
         torch.lu_unpack(*torch.lu(m)),
         torch.matmul(m, m),
         torch.matrix_power(m, 2),
         # torch.matrix_rank(m),
         torch.matrix_exp(m),
         torch.mm(m, m),
         torch.mv(m, v),
         # torch.orgqr(a, m),
         # torch.ormqr(a, m, v),
         torch.outer(v, v),
         torch.pinverse(m),
         # torch.qr(a),
         torch.solve(m, m),
         torch.svd(a),
         # torch.svd_lowrank(a),
         # torch.pca_lowrank(a),
         # torch.symeig(a), # deprecated
         # torch.lobpcg(a, b), # not supported
         torch.trapz(m, m),
         torch.trapezoid(m, m),
         torch.cumulative_trapezoid(m, m),
         # torch.triangular_solve(m, m),
         torch.vdot(v, v),
     )
Example #26
0
 def forward(self, edge_index, edge_attr):
     adj = to_dense_adj(edge_index=edge_index).type(
         torch.float32)  # convert sparse to adjacency matrix
     adj = torch.squeeze(adj)  # remove dimension with lenth 1
     adj_0 = torch.eye(adj.shape[0], device=try_gpu())
     powers_adj = adj_0
     powers_adj = torch.stack((powers_adj, adj))
     for p in range(2, self.power + 1):
         adj_p = torch.matrix_power(adj, p)
         adj_p = torch.unsqueeze(adj_p, 0)
         powers_adj = torch.cat((powers_adj, adj_p))
     return powers_adj
def aggregate(A, X, len_walk, num_neigh, agg_func):
    norm = torch.div(A, torch.sum(A, axis=1))
    norm = torch.matrix_power(norm, len_walk)
    result = torch.zeros(X.shape)
    for i in range(A.shape[0]):
        x = A[i].cpu().detach().numpy()
        ind = np.random.choice(range(x.shape[0]), num_neigh, replace=False)
        if agg_func == "MEAN":
            result[i] = torch.mean(X[ind], axis=0)
        else:
            result[i] = torch.max(X[ind], axis=0).values
    return result
    def init_interaction_graph_feats(self, init_method, device, d_init,
                                     feat_size):
        self.interaction_num_node_feat = d_init
        if "graph_feats" in init_method:
            if type(self.sparse_interaction_node_feats) == dict:
                self.graph_feats = torch.tensor(
                    self.sparse_interaction_node_feats[str(
                        feat_size)].todense(),
                    device=device,
                    dtype=torch.float,
                    requires_grad=False)
                self.interaction_num_node_feat = feat_size
            else:
                self.graph_feats = torch.tensor(
                    self.sparse_interaction_node_feats.todense(),
                    device=device,
                    dtype=torch.float,
                    requires_grad=False)

        elif "rand_init" in init_method:
            num_graphs = len(self.gs_map)
            init_node_embd = torch.empty(num_graphs,
                                         d_init,
                                         requires_grad=True)
            self.graph_feats = torch.nn.init.xavier_normal_(
                init_node_embd, gain=torch.nn.init.calculate_gain('relu'))
            self.graph_feats.to(device)
        elif "ones_init" in init_method:
            num_graphs = len(self.gs_map)
            self.graph_feats = torch.ones(num_graphs,
                                          d_init,
                                          requires_grad=False,
                                          device=device)
        elif 'one_hot_init' in init_method:
            num_graphs = len(self.gs_map)
            self.graph_feats = torch.matrix_power(
                torch.zeros(num_graphs,
                            num_graphs,
                            device=device,
                            requires_grad=False), 0)
            self.interaction_num_node_feat = num_graphs

        elif init_method == "model_init":
            pass
        elif init_method == "no_init":
            num_graphs = len(self.gs_map)
            self.graph_feats = torch.zeros(num_graphs,
                                           d_init,
                                           requires_grad=False,
                                           device=device)
        else:
            raise NotImplementedError
Example #29
0
def gensingledata(n=500, alpha=20, m=5, Ncount=1000, sigma=0.1):
    #torch.random.seed()
    A = makeA1d(n, alpha)
    u = torch.zeros((n, 1))
    #np.random.seed(0)
    k = np.random.randint(0, n - 75, m)
    #torch.manual_seed(0)
    u[k + 50] = 1000 * torch.rand(m, 1)
    #u[253]=759
    Fw = torch.matrix_power(A, Ncount)
    meas = Fw @ u
    measn = meas + sigma * torch.rand(meas.shape)
    return meas, measn, u, A, Fw
Example #30
0
 def __init__(self, U, A, K):
     assert A is not None
     assert np.allclose(A, A.T), ERR_A_NON_SYM
     super(NVGFUps, self).__init__(U)
     N = A.shape[0]
     self.A = Tensor(A)
     self.hs = nn.Parameter(torch.Tensor(K, N))
     # stdv = 1. / math.sqrt(K)
     # self.hs.data.uniform_(-stdv, stdv)
     self.Apows = torch.zeros(K, N, N)
     self.K = K
     for i in range(K):
         self.Apows[i, :, :] = torch.matrix_power(self.A, i)