Exemple #1
0
    def forward(self,  x, edge_index, edge_weight=None):
        batch, num_nodes = x.size(0), x.size(1)
        ##first adjust the adj matrix with diag elements
        edge_index, edge_weight = add_self_loops(edge_index, edge_weight, 1, num_nodes)
        row, col = edge_index
        
        edge_weight = edge_weight.view(-1)
        assert edge_weight.size(0) == edge_index.size(1)
        
        ###degree matrix
        deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
        # Compute normalized and rescaled Laplacian.
        deg = deg.pow(-0.5)
        deg[torch.isinf(deg)] = 0
        lap = deg[row] * edge_weight * deg[col]
        ###Rescale the Laplacian eigenvalues in [-1, 1]
        #fill_value = 0.05  ##-0.5
        #edge_index, lap = add_self_loops(edge_index, lap, fill_value, num_nodes)

        x = torch.matmul(x, self.weight)
        out = spmm(edge_index, lap, num_nodes, x.permute(1, 2, 0).contiguous().view((num_nodes, -1))).view((num_nodes, -1, batch)).permute(2, 0,1)  # spmm(edge_index, lap, num_nodes, x)

        if self.bias is not None:
            out = out + self.bias

        return out
 def forward(self, x, edge_index):
     # type: (Tensor, Tensor) -> Tensor
     edge_index, _ = remove_self_loops(edge_index)
     edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
     row, col = edge_index[0], edge_index[1]
     out = scatter_mean(x[col], row, dim=0)  # do not set dim_size, out.size() = row.max() + 1
     x = x[0:out.size(0)]
     x = torch.cat([x, out], dim=1)
     out = torch.matmul(x, self.weight)
     out = out + self.bias
     out = F.normalize(out, p=2.0, dim=-1)
     return out
Exemple #3
0
def load_planetoid_data(dataset_str):
    names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
    objects = []
    for name in names:
        with open("data/planetoid/ind.{}.{}".format(dataset_str, name),
                  'rb') as f:
            if sys.version_info > (3, 0):
                out = pkl.load(f, encoding='latin1')
            else:
                out = objects.append(pkl.load(f))

            if name == 'graph':
                objects.append(out)
            else:
                out = out.todense() if hasattr(out, 'todense') else out
                objects.append(torch.Tensor(out))

    x, y, tx, ty, allx, ally, graph = tuple(objects)
    test_idx = parse_index_file(
        "data/planetoid/ind.{}.test.index".format(dataset_str))
    train_idx = torch.arange(y.size(0), dtype=torch.long)
    val_idx = torch.arange(y.size(0), y.size(0) + 500, dtype=torch.long)
    sorted_test_idx = np.sort(test_idx)

    if dataset_str == 'citeseer':
        len_test_idx = max(test_idx) - min(test_idx) + 1
        tx_ext = torch.zeros(len_test_idx, tx.size(1))
        tx_ext[sorted_test_idx - min(test_idx), :] = tx
        ty_ext = torch.zeros(len_test_idx, ty.size(1))
        ty_ext[sorted_test_idx - min(test_idx), :] = ty

        tx, ty = tx_ext, ty_ext

    features = torch.cat([allx, tx], dim=0)
    features[test_idx] = features[sorted_test_idx]

    labels = torch.cat([ally, ty], dim=0).max(dim=1)[1]
    labels[test_idx] = labels[sorted_test_idx]

    edge_list = adj_list_from_dict(graph)
    edge_list = add_self_loops(edge_list, features.size(0))
    adj = normalize_adj(edge_list)

    train_mask = index_to_mask(train_idx, labels.shape[0])
    val_mask = index_to_mask(val_idx, labels.shape[0])
    test_mask = index_to_mask(test_idx, labels.shape[0])

    data = Data(adj, edge_list, features, labels, train_mask, val_mask,
                test_mask)

    return data
Exemple #4
0
def load_npz_data(dataset_str, ntrain, seed):
    with np.load('data/npz/' + dataset_str + '.npz',
                 allow_pickle=True) as loader:
        loader = dict(loader)
    adj_mat = sp.csr_matrix(
        (loader['adj_data'], loader['adj_indices'], loader['adj_indptr']),
        shape=loader['adj_shape']).tocoo()
    if dataset_str[:2] == 'ms':
        edge_list = torch.cat(
            (torch.tensor(adj_mat.row).type(torch.int64).view(1, -1),
             torch.tensor(adj_mat.col).type(torch.int64).view(1, -1)),
            dim=0)
    else:
        edge_list1 = torch.cat(
            (torch.tensor(adj_mat.row).type(torch.int64).view(1, -1),
             torch.tensor(adj_mat.col).type(torch.int64).view(1, -1)),
            dim=0)
        edge_list2 = torch.cat(
            (torch.tensor(adj_mat.col).type(torch.int64).view(1, -1),
             torch.tensor(adj_mat.row).type(torch.int64).view(1, -1)),
            dim=0)
        edge_list = torch.cat([edge_list1, edge_list2], dim=1)

    edge_list = add_self_loops(edge_list, loader['adj_shape'][0])
    adj = normalize_adj(edge_list)
    if 'attr_data' in loader:
        feature_mat = sp.csr_matrix(
            (loader['attr_data'], loader['attr_indices'],
             loader['attr_indptr']),
            shape=loader['attr_shape']).todense()
    elif 'attr_matrix' in loader:
        feature_mat = loader['attr_matrix']
    else:
        feature_mat = None
    features = torch.tensor(feature_mat)

    if 'labels_data' in loader:
        labels = sp.csr_matrix(
            (loader['labels_data'], loader['labels_indices'],
             loader['labels_indptr']),
            shape=loader['labels_shape']).todense()
    elif 'labels' in loader:
        labels = loader['labels']
    else:
        labels = None
    labels = torch.tensor(labels).long()
    train_mask, val_mask, test_mask = split_data(labels, ntrain, 500, seed)

    data = Data(adj, edge_list, features, labels, train_mask, val_mask,
                test_mask)
    return data
Exemple #5
0
    def forward(self, x, edge_index, edge_weight=None):
        """"""
        # edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)
        # print(x.size(), edge_index.size())
        row, col = edge_index
        batch, num_nodes, num_edges, K = x.size(0), x.size(1), row.size(
            0), self.weight.size(0)

        edge_weight = edge_weight.view(-1)
        assert edge_weight.size(0) == edge_index.size(1)

        ###degree matrix
        deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
        # Compute normalized and rescaled Laplacian.
        deg = deg.pow(-0.5)
        deg[torch.isinf(deg)] = 0
        lap = -deg[row] * edge_weight * deg[col]
        ###Rescale the Laplacian eigenvalues in [-1, 1]
        ##rescale: 2L/lmax-I; lmax=1.0
        fill_value = -0.05  ##-0.5
        edge_index, lap = add_self_loops(edge_index, lap, fill_value,
                                         num_nodes)
        lap *= 2

        ########################################
        # Perform filter operation recurrently.
        Tx_0 = x
        out = torch.matmul(Tx_0, self.weight[0])
        if K > 1:
            Tx_1 = spmm(edge_index, lap, num_nodes,
                        x.permute(1, 2, 0).contiguous().view(
                            (num_nodes, -1))).view(
                                (num_nodes, -1, batch)).permute(
                                    2, 0,
                                    1)  # spmm(edge_index, lap, num_nodes, x)
            out = out + torch.matmul(Tx_1, self.weight[1])

        for k in range(2, K):
            Tx_2 = 2 * spmm(
                edge_index, lap, num_nodes,
                x.permute(1, 2, 0).contiguous().view((num_nodes, -1))).view(
                    (num_nodes, -1, batch)).permute(2, 0, 1) - Tx_0
            # 2 * spmm(edge_index, lap, num_nodes, Tx_1) - Tx_0
            out = out + torch.matmul(Tx_2, self.weight[k])
            Tx_0, Tx_1 = Tx_1, Tx_2

        if self.bias is not None:
            out = out + self.bias

        return out
    def forward(self, x, edge_index):
        # type: (Tensor, Tensor) -> Tensor
        """"""
        edge_index, _ = remove_self_loops(edge_index)
        edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))

        x = x.unsqueeze(-1) if x.dim() == 1 else x
        row, col = edge_index[0], edge_index[1]

        x = torch.matmul(x, self.weight)
        out = scatter_mean(x[col], row, dim=0, dim_size=x.size(0))

        if self.bias is not None:
            out = out + self.bias

        if self.normalize:
            out = F.normalize(out, p=2.0, dim=-1)

        return out
Exemple #7
0
def load_wiki_data(ntrain, seed):
    # generate feature matrix
    sp_feat = torch.tensor(np.loadtxt('data/wiki/tfidf.txt')).t()
    indices = sp_feat[:2].long()
    values = sp_feat[2].float()
    features = torch.sparse.FloatTensor(indices, values).to_dense()

    # generate edge list and adj matrix
    edge_list = torch.tensor(np.loadtxt('data/wiki/graph.txt')).long().t()
    edge_list_rev = torch.stack([edge_list[1], edge_list[0]])
    edge_list = torch.cat([edge_list, edge_list_rev], dim=1)
    edge_list = add_self_loops(edge_list, int(edge_list.max() + 1))
    adj = normalize_adj(edge_list)

    # generate labels and masks
    labels = torch.tensor(np.loadtxt('data/wiki/group.txt')).long().t()[1] - 1
    train_mask, val_mask, test_mask = split_data(labels, ntrain, 500, seed)

    data = Data(adj, edge_list, features, labels, train_mask, val_mask,
                test_mask)
    return data
Exemple #8
0
def load_geom_data(dataset_str, ntrain, seed):
    # Feature and Label preprocessing
    with open('data/geom_data/{}/out1_node_feature_label.txt'.format(
            dataset_str)) as f:
        feature_labels = f.readlines()
    feat_list = []
    label_list = []
    for fl in feature_labels[1:]:
        id, feat, lab = fl.split('\t')
        feat = list(map(int, feat.split(',')))
        feat_list.append(feat)
        label_list.append(int(lab))
    features = torch.FloatTensor(feat_list)
    labels = torch.tensor(label_list).long()

    # Graph preprocessing
    with open(
            'data/geom_data/{}/out1_graph_edges.txt'.format(dataset_str)) as f:
        edges = f.readlines()
    edge_pairs = []
    G = nx.Graph()
    for e in edges[1:]:
        u, v = map(int, e.split('\t'))
        edge_pairs.append((u, v))
    G.add_edges_from(edge_pairs)
    coo_adj = nx.to_scipy_sparse_matrix(G).tocoo()
    edge_list = torch.from_numpy(
        np.vstack((coo_adj.row, coo_adj.col)).astype(np.int64))
    edge_list = add_self_loops(edge_list, features.size(0))
    adj = normalize_adj(edge_list)

    train_mask, val_mask, test_mask = split_data(labels, ntrain, ntrain * 5,
                                                 seed)

    data = Data(adj, edge_list, features, labels, train_mask, val_mask,
                test_mask)
    return data
Exemple #9
0
 def forward(self, x, edge_index):
     # type: (Tensor, Tensor) -> Tensor
     edge_index, _ = remove_self_loops(edge_index)
     edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
     x = torch.mm(x, self.weight).view(-1, self.heads, self.out_channels)
     return self.propagate(edge_index, x=x, num_nodes=x.size(0))
Exemple #10
0
    def process(self,
                batch,
                EPSILON=0,
                enforced_mask=None,
                compute_losses_and_broken=True,
                debug=False):

        DEVICE = get_hyperparameters()["device"]
        train = self.training
        SIZE, GRAPH_SIZES, SOURCE_NODES, STEPS_SIZE, SINK_NODES = self.prepare_constants(
            batch)

        batch = utils.add_self_loops(
            batch)  # also takes into account capacities/weights
        self.zero_tracking_losses_and_statistics()
        self.set_initial_last_states(batch, STEPS_SIZE, SOURCE_NODES)
        adj_matrix, flow_matrix = utils.get_adj_flow_matrix(
            SIZE, batch.edge_index, batch.edge_attr[:, 1])
        x, y = self.get_input_output_features(batch, SOURCE_NODES)
        self.mask, self.mask_cp, self.edge_mask = self.prepare_initial_masks(
            batch)
        assert self.mask_cp.all(), self.mask_cp
        self.processor.zero_hidden(batch.num_nodes)

        while self.loop_condition(batch.batch, x, y, STEPS_SIZE, GRAPH_SIZES):

            self.x_curr, self.y_curr = self.get_step_io(x, y)
            assert self.mask_cp.any(), self.mask_cp
            if not self.training:
                assert (self.last_continue_p > 0).any()
            start = time.time()
            to_process = utils.finish(x, y, batch.batch, self.steps,
                                      STEPS_SIZE, GRAPH_SIZES).bool()
            true_termination = utils.finish(x, y, batch.batch, self.steps + 1,
                                            STEPS_SIZE, GRAPH_SIZES)
            assert self.mask_cp.any(
            ), to_process if self.training else self.last_continue_p
            inp = utils.get_input(batch, EPSILON, train, self.x_curr,
                                  self.last_output)

            start = time.time()
            self.loop_body(batch, inp, true_termination, to_process,
                           compute_losses_and_broken, enforced_mask,
                           GRAPH_SIZES)
            self.mask, self.mask_cp, self.edge_mask = type(self).get_masks(
                self.training, batch, to_process, self.last_continue_p,
                enforced_mask)

        outputs = self.get_outputs(batch, adj_matrix, flow_matrix,
                                   compute_losses_and_broken)
        if type(self) == models.AugmentingPathNetwork:
            walks, mask_end_of_path = utils.get_walks(self.training, batch,
                                                      outputs, GRAPH_SIZES,
                                                      SOURCE_NODES, SINK_NODES)
            mins = self.find_mins(batch, walks, mask_end_of_path, GRAPH_SIZES,
                                  SOURCE_NODES, SINK_NODES)
            flows = self.augment_flow(batch, walks, mask_end_of_path, mins)

        batch.edge_index, batch.edge_attr = torch_geometric.utils.remove_self_loops(
            batch.edge_index, batch.edge_attr)
        return outputs