Esempio n. 1
0
def feature_generator_(adj, A, features, order, edges, alpha=args.alpha):
    n = features.shape[0]
    edges = list(edges)
    m = len(edges)
    index = np.random.permutation(m)
    #print(index[:10])
    index_1 = index[:m // 2]
    index_2 = index[m // 2:]
    mask_1_row = [edges[x][0] for x in index_1]
    mask_1_col = [edges[x][1] for x in index_1]
    mask_2_row = [edges[x][0] for x in index_2]
    mask_2_col = [edges[x][1] for x in index_2]

    mask_1 = sp.csr_matrix((np.ones(m // 2), (mask_1_row, mask_1_col)),
                           shape=(n, n))
    mask_2 = sp.csr_matrix((np.ones(m // 2), (mask_2_row, mask_2_col)),
                           shape=(n, n))
    adj_1 = mask_1 + mask_1.T.multiply(mask_1.T > mask_1) - mask_1.multiply(
        mask_1.T > mask_1) + sp.eye(n)
    adj_2 = mask_2 + mask_2.T.multiply(mask_2.T > mask_2) - mask_2.multiply(
        mask_2.T > mask_2) + sp.eye(n)

    adj_1 = sparse_mx_to_torch_sparse_tensor(adj_1)
    adj_2 = sparse_mx_to_torch_sparse_tensor(adj_2)
    if args.cuda:
        adj_1 = adj_1.cuda()
        adj_2 = adj_2.cuda()
    A_1 = A * adj_1
    A_2 = A * adj_2

    A_1 = Edge_Generator(A_1, args.order)
    A_2 = Edge_Generator(A_2, args.order)
    #A_1 = Edge_Generator(A_1, args.order)
    #A_2 = Edge_Generator(A_2, args.order)
    #if args.cuda:
    #    A_1 = A_1.cuda()
    #    A_2 = A_2.cuda()
    #mask_1[index_1] = 1
    #mask_2[index_2] = 1

    #features_1 = mask_1.cuda() * features
    #features_2 = mask_2.cuda() * features

    # D = torch.spmm(adj, torch.ones[n, 1])**(-0.5)
    # A = D*A
    # A = torch.squeeze(D)*A
    #alpha = 1
    #features_1 = Feature_Generator(A_1, features, order)
    #features_2 = Feature_Generator(A_2, features, order)
    return A_1, A_2
Esempio n. 2
0
 def retrain_model(self, model, X, A, labels, train_idx):
     print('----------------- retraining -------------------')
     # model.reset_parameters()
     optim = t.optim.Adam(model.parameters(), lr=self.args.lr)
     X = t.from_numpy(X).float().to(self.args.device)
     A = sparse_mx_to_torch_sparse_tensor(A).to(self.args.device)
     labels = t.from_numpy(labels).long().to(self.args.device)
     for i in tqdm(range(self.args.retrain_epoch),
                   total=self.args.retrain_epoch,
                   desc='retraining'):
         logits = model(X, A)
         loss = F.cross_entropy(logits[train_idx], labels[train_idx])
         acc_train = count_acc(logits[train_idx], labels[train_idx])
         optim.zero_grad()
         loss.backward()
         optim.step()
         # print('Epoch: {:04d}'.format(i + 1),
         #       'loss_train: {:.4f}'.format(loss.item()),
         #       'acc_train: {:.4f}'.format(acc_train))
     print('----------------- retraining finished -------------------')
     with t.no_grad():
         logits = model(X, A)
         train_logits = logits[train_idx]
         train_labels = labels[train_idx]
         acc_train = count_acc(train_logits, train_labels)
         print('training acc: %.4f' % acc_train)
         train_labels_onehot = t.zeros_like(train_logits) \
             .to(self.args.device) \
             .scatter_(1, train_labels.view(-1, 1), 1)
         l_val = train_logits.gather(1, train_labels.view(-1, 1)).clone()
         c_val, c_new = t.max(train_logits - 1e6 * train_labels_onehot,
                              dim=1)
         dif = l_val.squeeze() - c_val
         return c_new, dif.cpu().detach().numpy()
Esempio n. 3
0
def update_graph(n, m, args, model, optimizer):
    # adj, full_adj, features, labels, idx_train, idx_val, idx_test,edges = load_data()
    adj, features, labels, idx_train = shortest_dist(n, m, [n, m])

    adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
    deg = np.diag(adj.toarray().sum(axis=1))
    laplacian = torch.from_numpy((deg - adj.toarray()).astype(np.float32))
    adj = normalize(sp.csr_matrix(adj) + sp.eye(adj.shape[0]))
    adj = sparse_mx_to_torch_sparse_tensor(adj)

    if args.cuda and torch.cuda.is_available():
        model.cuda()
        features = features.cuda()
        adj = adj.cuda()
        laplacian = laplacian.cuda()
        labels = labels.cuda()
        idx_train = idx_train.cuda()

    t_total = time.time()
    for epoch in range(args.gcn_epochs):
        t = time.time()
        model.train()
        optimizer.zero_grad()
        output = model(features, adj)
        loss_train = F.nll_loss(output[idx_train], labels[idx_train])
        soft_out = torch.unsqueeze(
            torch.nn.functional.softmax(output, dim=1)[:, 1], 1)
        loss_reg = torch.mm(torch.mm(soft_out.T, laplacian), soft_out)
        print('Epoch: {:04d}'.format(epoch + 1),
              'loss_train: {:.4f}'.format(loss_train.item()),
              'time: {:.4f}s'.format(time.time() - t))
        loss_train += args.gcn_lambda * loss_reg.squeeze()
        loss_train.backward()
        optimizer.step()
Esempio n. 4
0
 def eval_model(self, model, X, A, labels, train_idx):
     X = t.from_numpy(X).float().to(self.args.device)
     A = sparse_mx_to_torch_sparse_tensor(A).to(self.args.device)
     labels = t.from_numpy(labels).long().to(self.args.device)
     with t.no_grad():
         logits = model(X, A)
         acc_train = count_acc(logits[train_idx], labels[train_idx])
         print('training acc: %.4f' % acc_train)
Esempio n. 5
0
def adj2A(adj):
    adj = adj + sp.eye(adj.shape[0])
    D1 = np.array(adj.sum(axis=1))**(-0.5)
    D2 = np.array(adj.sum(axis=0))**(-0.5)
    D1 = sp.diags(D1[:, 0], format='csr')
    D2 = sp.diags(D2[0, :], format='csr')
    A = adj.dot(D1)
    A = D2.dot(A)
    #A = sp.eye(adj.shape[0]) - A
    A = sparse_mx_to_torch_sparse_tensor(A)
    return A
Esempio n. 6
0
def preprocess(a):
    #d1 = np.array(a.sum(axis-1))**(-0.5)
    #d2 = np.array(a.sum(axis=0))**(-0.5)
    D1_ = np.array(a.sum(axis=1))**(-0.5)
    D2_ = np.array(a.sum(axis=0))**(-0.5)
    D1_ = sp.diags(D1_[:, 0], format='csr')
    D2_ = sp.diags(D2_[0, :], format='csr')
    A_ = a.dot(D1_)
    A_ = D2_.dot(A_)
    A_ = sparse_mx_to_torch_sparse_tensor(A_)
    if args.cuda:
        A_ = A_.cuda()
    return A_
    def __init__(self, C_nodes, graph, all_labels_init, labels_init):
        self.loss_min = 100
        self.max_acc = 0
        self.epochs = config.learning.epochs
        self.adj = sp.coo_matrix(graph, dtype=np.float32)
        self.all_labels = encode_onehot(all_labels_init)
        self.features = normalize(np.array(C_nodes))
        self.adj = normalize(self.adj + sp.eye(self.adj.shape[0]))
        self.features = torch.FloatTensor(np.array(self.features))
        self.all_labels = torch.LongTensor(np.where(self.all_labels)[1])
        self.adj = sparse_mx_to_torch_sparse_tensor(self.adj)
        self.idx_test = np.where(np.array(labels_init) == 0)[0]
        self.labels = encode_onehot(labels_init)
        self.labels = torch.LongTensor(np.where(self.labels)[1])
        self.idx_train_all = np.where(self.labels)[0]
        print(self.idx_train_all)
        self.all_labels_init = torch.LongTensor(self.all_labels)

        self.idx_train = torch.LongTensor(
            self.idx_train_all[:int((1 - config.learning.ratio_val) *
                                    len(self.idx_train_all))])
        self.idx_val = torch.LongTensor(
            self.idx_train_all[int((1 - config.learning.ratio_val) *
                                   len(self.idx_train_all)):])

        self.idx_test = torch.LongTensor(self.idx_test)
        if config.learning.method_learning == "GCN":
            self.model = GCN(nfeat=self.features.shape[1],
                             nhid=config.learning.hidden,
                             nclass=self.labels.max().item(),
                             dropout=config.learning.dropout)

            if config.learning.cuda:
                self.adj = self.adj.cuda()

        elif config.learning.method_learning == "AGNN":
            self.model = AGNN(nfeat=self.features.shape[1],
                              nhid=config.learning.hidden,
                              nclass=self.labels.max().item(),
                              nlayers=config.learning.layers,
                              dropout_rate=config.learning.dropout)

        if config.learning.cuda:
            self.model.cuda()
            self.features = self.features.cuda()
            # self.adj = self.adj.cuda()
            self.all_labels = self.all_labels.cuda()
            self.idx_train = self.idx_train.cuda()
            self.idx_test = self.idx_test.cuda()
            self.idx_val = self.idx_val.cuda()
Esempio n. 8
0
def main():

    n = args.n
    m = args.m
    nt = args.nt
    mt = args.mt

    reward = np.ones((n,m,4))*-0.1
    reward[nt-2,mt-1,2],reward[nt-1,mt-2,1] = 0,0 

    torch.set_num_threads(1)
    device = torch.device("cuda:0" if args.cuda else "cpu")
    gcn_model = GCN(nfeat=n*m, nhid=args.hidden)
    gcn_model.to(device)
    optimizer = optim.Adam(gcn_model.parameters(),lr=args.lr, weight_decay=args.weight_decay)
    adj,features,_,_ = shortest_dist(n,m,[n,m])

    features = normalize(sp.csr_matrix(features))
    features = torch.FloatTensor(np.array(features.todense()))

    adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
    adj = normalize(sp.csr_matrix(adj) + sp.eye(adj.shape[0]))
    adj = sparse_mx_to_torch_sparse_tensor(adj)


    for episodes in range(args.gcn_epi):
        update_graph(n,m,args,gcn_model,optimizer)
        print("{} episode done :".format(episodes+1))
    
    if args.cuda and torch.cuda.is_available():
        features = features.cuda()
        adj = adj.cuda()

    output = gcn_model(features,adj).cpu()
    gcn_phi = torch.exp(output).detach().numpy()
    gcn_phi = gcn_phi[:,1].reshape(n,m)

    
    param1 = Params(n,m,nt,mt,gamma = args.gamma,qstep = args.qstep,pstep = args.pstep,alpha = 0,noepi = args.noepi)
    param2 = Params(n,m,nt,mt,gamma = args.gamma,qstep = args.qstep,pstep = args.pstep,alpha = 1,noepi = args.noepi)
    
    regcn,valgcn = ACPhi(param1,reward,gcn_phi)
    reg,val = ACPhi(param2,reward,gcn_phi)

    PlotAnalysis(param1.noepi,reg,val,regcn,valgcn,gcn_phi)
Esempio n. 9
0
def update_graph(model, optimizer, features, adj, rew_states, loss, args,
                 envs):
    if adj.shape[0] > 1:
        labels = torch.zeros((len(features)))
        idx_train = torch.LongTensor([0])
        for r_s in rew_states:
            if len(envs.observation_space.shape) == 1:  #MuJoCo experiments
                labels[r_s[0]] = torch.sigmoid(2 * r_s[1])
            else:
                labels[r_s[0]] = torch.tensor(
                    [1.]) if r_s[1] > 0. else torch.tensor([0.])
            idx_train = torch.cat((idx_train, torch.LongTensor([r_s[0]])), 0)
        labels = labels.type(torch.LongTensor)
    else:
        labels = torch.zeros((len(features))).type(torch.LongTensor)
        idx_train = torch.LongTensor([0])

    adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
    deg = np.diag(adj.toarray().sum(axis=1))
    laplacian = torch.from_numpy((deg - adj.toarray()).astype(np.float32))
    adj = normalize(sp.csr_matrix(adj) + sp.eye(adj.shape[0]))
    adj = sparse_mx_to_torch_sparse_tensor(adj)

    if args.cuda and torch.cuda.is_available():
        model.cuda()
        features = features.cuda()
        adj = adj.cuda()
        laplacian = laplacian.cuda()
        labels = labels.cuda()
        idx_train = idx_train.cuda()

    t_total = time.time()
    for epoch in range(args.gcn_epochs):
        t = time.time()
        model.train()
        optimizer.zero_grad()
        output = model(features, adj)
        loss_train = F.nll_loss(output[idx_train], labels[idx_train])
        soft_out = torch.unsqueeze(
            torch.nn.functional.softmax(output, dim=1)[:, 1], 1)
        loss_reg = torch.mm(torch.mm(soft_out.T, laplacian), soft_out)
        loss_train += args.gcn_lambda * loss_reg.squeeze()
        loss_train.backward()
        optimizer.step()
Esempio n. 10
0
    def forward(self, input_tensor, G):
        # bi-lstm
        pred_embedded = self.embedding(input_tensor[0])
        obj_embedded = input_tensor[1]
        embedded = torch.cat((pred_embedded, obj_embedded), 2)
        lstm_out, (hidden_state, cell_state) = self.lstm(embedded, self.initial_hidden)
        #lstm_out = lstm_out.permute(1, 0, 2)
        lstm_out = torch.flatten(lstm_out, start_dim=1)
        #print('lstm_out', lstm_out)
        #lstm_out = lstm_out.view(lstm_out.shape[0], -1)
        
        #pygcn
        adj = nx.adjacency_matrix(G)
        adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
        adj = normalize(adj + sp.eye(adj.shape[0]))
        adj = sparse_mx_to_torch_sparse_tensor(adj)
        #print('adj', adj)

        features = normalize(lstm_out.detach().numpy() )
        features = torch.FloatTensor(np.array(features))
        logits = self.gcn(features, adj)
        #logp = F.log_softmax(logits, 1)
        return logits
Esempio n. 11
0
adj = sp.load_npz('adjacency.npz')
adj = normalize(adj)

features = pd.read_csv('node_features.csv')
indexes = features.index.values
labels = features['labels'].values

print(np.sum(labels)/labels.shape[0])

features = features.drop(['labels'],axis=1)
feature_transformer =  StandardScaler()
features = feature_transformer.fit_transform(features)

features = torch.FloatTensor(features)
labels = torch.LongTensor(labels)
adj = sparse_mx_to_torch_sparse_tensor(adj)


print(len(indexes))

idx_sampler = IndexSampler(indexes)

idx_train = torch.LongTensor(idx_sampler.sample(n_samples=2000))


idx_val = torch.LongTensor(idx_sampler.sample(n_samples=1000))

idx_test = torch.LongTensor(idx_sampler.sample_remaining())

#%%
print(adj.shape)
Esempio n. 12
0
# coo_matrix():系数矩阵的压缩。分别定义有那些非零元素,以及各个非零元素对应的row和col,最后定义稀疏矩阵的shape。
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
                    shape=(2708, 2708),
                    dtype=np.float32)

# build symmetric adjacency matrix
adj_sysm = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)

# 引入自环
adj_sysm_self = adj + sp.eye(adj.shape[0])

# 归一化
adj_norm = normalize(adj_sysm_self)

features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
features = normalize(features)

labels = encode_onehot(idx_features_labels[:, -1])

# 数据类型转tensor
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj_norm = sparse_mx_to_torch_sparse_tensor(adj_norm)

# 测试sparse_mx_to_torch_sparse_tensor(sparse_mx)函数
# sparse_mx = adj_norm.tocoo().astype(np.float32)
# indices = torch.from_numpy(
#     np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
# values = torch.from_numpy(sparse_mx.data)
# shape = torch.Size(sparse_mx.shape)