Ejemplo n.º 1
0
    def message(self, x_j, edge_index, size2):
        return x_j
        # x_j has shape [E, out_channels]

        row, col = edge_index
        deg = pyg_utils.degree(row, size2[0], dtype=x_j.dtype)
        deg_inv_sqrt = deg.pow(-0.5)
        norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]

        return norm.view(-1, 1) * x_j
    def forward(self, x, edge_index):
        x = self.fc(x)

        row, col = edge_index
        deg = degree(row, x.size(0), dtype = x.dtype) + 1
        deg_inv_sqrt = deg.pow(-0.5)

        norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]

        return self.propagate(edge_index, x=x, norm=norm)
Ejemplo n.º 3
0
    def aggregate(self,
                  inputs: Tensor,
                  index: Tensor,
                  dim_size: Optional[int] = None) -> Tensor:
        outs = [aggr(inputs, index, dim_size) for aggr in self.aggregators]
        out = torch.cat(outs, dim=-1)

        deg = degree(index, dim_size, dtype=inputs.dtype).view(-1, 1, 1)
        outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
        return torch.cat(outs, dim=-1)
Ejemplo n.º 4
0
    def message(self, x_j, edge_index, size):
        # x_j has shape [E, out_channels]

        # Step 3: Normalize node features.
        src, dst = edge_index  # we assume source_to_target message passing
        deg = degree(src, size[0], dtype=x_j.dtype)
        deg = deg.pow(-1)
        norm = deg[dst]

        return norm.view(-1, 1) * x_j  # broadcasting the normalization term to all out_channels === hidden features
Ejemplo n.º 5
0
    def message(self, x_i, x_j, edge_index, size):
        # Compute messages
        # x_j has shape [E, out_channels]

        row, col = edge_index
        deg = pyg_utils.degree(row, size[0], dtype=x_j.dtype)
        deg_inv_sqrt = deg.pow(-0.5)
        norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]

        return x_j
Ejemplo n.º 6
0
 def __call__(self, data):
     data.x = torch.zeros((data.num_nodes, 1), dtype=torch.float)
     data = TwoMalkin()(data)
     data = ConnectedThreeMalkin()(data)
     data.x = degree(data.edge_index[0], data.num_nodes, dtype=torch.long)
     data.x = F.one_hot(
         data.x // self.div,
         num_classes=(degrees[self.args.dataset]) // self.div + 1).to(
             torch.float)
     return data
Ejemplo n.º 7
0
    def message(self, x_j, edge_index, size):
        # x_j has shape [E, out_channels]

        # Step 3: Normalize node features.
        row, col = edge_index
        deg = degree(row, size[0], dtype=x_j.dtype)
        deg_inv_sqrt = deg.pow(-0.5)
        norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]

        return norm.view(-1, 1) * x_j
Ejemplo n.º 8
0
 def __call__(self, data):
     col, x = data.edge_index[1], data.x
     deg = degree(col, data.num_nodes)
     #print(deg)
     if self.norm:
         deg = deg / (deg.max() if self.max is None else self.max)
     deg = deg.view(-1, 1)
     if x is None:
         data.x = deg
     return data
Ejemplo n.º 9
0
 def forward(self, x, edge_index, edge_attr):
     edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
     self_loop_attr = torch.zeros((x.size(0), edge_attr.size(1)),
                                  dtype=edge_attr.dtype).to(x.device)
     edge_attr = torch.cat((edge_attr, self_loop_attr), dim=0)
     row, col = edge_index
     deg = degree(col, x.size(0), dtype=x.dtype)
     deg_inv_sqrt = deg.pow(-0.5)
     norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
     return self.propagate(edge_index, x=x, edge_attr=edge_attr, norm=norm)
Ejemplo n.º 10
0
    def forward(self, data):
        degrees = degree(data.edge_index[0],
                         num_nodes=data.num_nodes,
                         dtype=torch.float)
        density = torch.mm(
            degrees.view(1, -1),
            sparse_to_dense(data.edge_index,
                            num_nodes=data.num_nodes)).flatten()

        return (degrees * 1000000 + density)
Ejemplo n.º 11
0
def get_max_deg(dataset):
    max_deg = 0
    for data in dataset:
        row, col = data.edge_index
        num_nodes = data.num_nodes
        deg = degree(row, num_nodes)
        deg = max(deg).item()
        if deg > max_deg:
            max_deg = int(deg)
    return max_deg
Ejemplo n.º 12
0
    def forward(self, x, edge_index, edge_weight=None):
        """"""
        edge_index, edge_weight = remove_self_loops(edge_index, edge_weight)

        row, col = edge_index
        num_nodes, num_edges, order_filter = x.size(0), row.size(0), self.weight.size(0)

        if edge_weight is None:
            edge_weight = x.new_ones((num_edges,))
        edge_weight = edge_weight.view(-1)
        assert edge_weight.size(0) == edge_index.size(1)

        deg = degree(row, num_nodes, dtype=x.dtype)

        # Compute normalized and rescaled Laplacian.
        deg = deg.pow(-0.5)
        deg[deg == float('inf')] = 0
        lap = -deg[row] * edge_weight * deg[col]

        def weight_mult(x, w):
            y = torch.einsum('fgrs,ifrs->igrs', w, x)
            return y

        def lap_mult(edge_index, lap, x):
            L = torch.sparse.IntTensor(edge_index, lap, torch.Size([x.shape[0], x.shape[0]])).to_dense()
            x_tilde = torch.einsum('ij,ifrs->jfrs', L, x)
            return x_tilde

        # Perform filter operation recurrently.
        horizon = x.shape[1]
        x = x.permute(0, 2, 1)
        x_hat = torch.rfft(x, 1, normalized=True, onesided=True)

        Tx_0 = x_hat

        y_hat = weight_mult(Tx_0, self.weight[0, :])

        if order_filter > 1:

            Tx_1 = lap_mult(edge_index, lap, x_hat)
            y_hat = y_hat + weight_mult(Tx_1, self.weight[1, :])

            for k in range(2, order_filter):
                Tx_2 = 2 * lap_mult(edge_index, lap, Tx_1) - Tx_0
                y_hat = y_hat + weight_mult(Tx_2, self.weight[k, :])

                Tx_0, Tx_1 = Tx_1, Tx_2

        y = torch.irfft(y_hat, 1, normalized=True, onesided=True, signal_sizes=(horizon,))
        y = y.permute(0, 2, 1)

        if self.bias is not None:
            y = y + self.bias

        return y
Ejemplo n.º 13
0
def train_pyg(model_name, model, train_loader, device, optimizer):
    global epoch_load_time, epoch_forward_time, epoch_backward_time, epoch_batch_time, epoch_update_time
    t10 = time.time()
    model.train()
    n_data = 0
    loss_all = 0
    epoch_train_acc = 0
    t4 = time.time()
    t2 = time.time()
    for data in train_loader:
        torch.cuda.synchronize()
        epoch_load_time = epoch_load_time + time.time() - t2
        print("data load time:", time.time() - t2)
        t7 = time.time()
        data = data.to(device)
        torch.cuda.synchronize()
        print("data to gpu:", time.time() - t7)
        if model_name == 'monet':
            row, col = data.edge_index
            deg = degree(col, data.num_nodes)
            data.edge_attr = torch.stack([1 / torch.sqrt(deg[row]), 1 / torch.sqrt(deg[col])], dim=-1)
        optimizer.zero_grad()
        torch.cuda.synchronize()
        t9 = time.time()
        #print(data.edge_attr)
        output = model(data.x, data.edge_index, data.edge_attr, data.batch)
        torch.cuda.synchronize()
        epoch_forward_time = epoch_forward_time + time.time() - t9
        print("forward:", time.time() - t9)

        loss = F.nll_loss(output, data.y)
        torch.cuda.synchronize()
        t3 = time.time()
        #loss.register_hook(print)
        loss.backward()
        torch.cuda.synchronize()
        epoch_backward_time = epoch_backward_time + time.time() - t3
        print("backward:", time.time() - t3)

        t5 = time.perf_counter()
        optimizer.step()
        torch.cuda.synchronize()
        epoch_update_time = epoch_update_time + time.perf_counter() - t5
        print("update:", time.perf_counter() - t5)

        loss_all += loss.item() * data.num_graphs
        n_data += data.y.size(0)
        epoch_train_acc += accuracy(output, data.y)
        torch.cuda.synchronize()
        epoch_batch_time = epoch_batch_time + time.time() -t7
        print("a batch:", time.time() - t7)
        t2 = time.time()
    epoch_train_acc /= n_data
    print("train:", time.time() - t10)
    return loss_all / n_data, epoch_train_acc, optimizer
Ejemplo n.º 14
0
    def __init__(self, net_path, device, max_iter): 
        with open(net_path, "rb") as f:
            self.edge_list = pkl.load(f)
        # edge_list with size [3, E], (src_node, tar_node, weight) 
        self.device = device
        self.src_nodes = T.LongTensor(self.edge_list[0]).to(device)
        self.tar_nodes = T.LongTensor(self.edge_list[1]).to(device)
        self.weights   = T.FloatTensor(self.edge_list[2]).to(device)
        self.cave_index = T.LongTensor(self.edge_list[3]).to(device)
        
        self.N = max([T.max(self.src_nodes), T.max(self.tar_nodes)]).item()+1
        self.E = len(self.src_nodes)
        self.d = degree(self.tar_nodes, num_nodes=self.N).to(device)
        self.out_d = degree(self.src_nodes, num_nodes=self.N).to(device)
        self.out_weight_d = scatter_add(self.weights, self.src_nodes).to(device)

        self.G = nx.DiGraph()
        self.G.add_edges_from(self.edge_list[:2].T)

        self.max_iter = max_iter
Ejemplo n.º 15
0
def get_dataset(name, sparse=True, cleaned=False):

    if name == 'node':
        path = osp.join(os.environ['GNN_TRAINING_DATA_ROOT'], name)
        print(path)
        dataset = HitGraphDataset2(path, directed=False, categorical=True)
    else:
        path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data',
                        name)
        dataset = TUDataset(path, name, cleaned=cleaned)
        dataset.data.edge_attr = None

        if dataset.data.x is None:
            max_degree = 0
            degs = []
            for data in dataset:
                degs += [degree(data.edge_index[0], dtype=torch.long)]
                max_degree = max(max_degree, degs[-1].max().item())

            if max_degree < 1000:
                dataset.transform = T.OneHotDegree(max_degree)
            else:
                deg = torch.cat(degs, dim=0).to(torch.float)
                mean, std = deg.mean().item(), deg.std().item()
                dataset.transform = NormalizedDegree(mean, std)

        if not sparse:
            num_nodes = max_num_nodes = 0
            for data in dataset:
                num_nodes += data.num_nodes
                max_num_nodes = max(data.num_nodes, max_num_nodes)

            # Filter out a few really large graphs in order to apply DiffPool.
            if name == 'REDDIT-BINARY':
                num_nodes = min(int(num_nodes / len(dataset) * 1.5),
                                max_num_nodes)
            else:
                num_nodes = min(int(num_nodes / len(dataset) * 5),
                                max_num_nodes)

            indices = []
            for i, data in enumerate(dataset):
                if data.num_nodes <= num_nodes:
                    indices.append(i)
            dataset = dataset[torch.tensor(indices)]

            if dataset.transform is None:
                dataset.transform = T.ToDense(num_nodes)
            else:
                dataset.transform = T.Compose(
                    [dataset.transform,
                     T.ToDense(num_nodes)])

    return dataset
    def forward(self, x, edge_index):
        edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))

        row, col = edge_index
        deg = degree(row, x.size(0), dtype=x.dtype)
        deg_inv_sqrt = deg.pow(-0.5)
        norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]

        x = self.lin(x)
        return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x,
                              norm=norm)
Ejemplo n.º 17
0
 def message(self, x_i, x_j, edge_index, size):
     if self.cached_norm is None:
         row, col = edge_index
         deg = pyg_utils.degree(row, size[0], dtype=x_j.dtype)
         deg_inv_sqrt = deg.pow(-0.5)
         norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
         if self.cache:
             self.cached_norm = norm
     else:
         norm = self.cached_norm
     return norm.view(-1, 1) * x_j
def mask_features(x, edge_index, edge_weight, sigma):
    source, target = edge_index
    h_s, h_t = x[source], x[target]
    h = (h_t - h_s) / sigma
    h = edge_weight.view(-1, 1) * h * h
    mask = torch.zeros(x.size(), device=device)
    mask.index_add_(0, source, h)
    deg = degree(edge_index[0])
    mask = torch.exp(- mask / deg.view(-1, 1))
    x = x * mask
    return x
Ejemplo n.º 19
0
    def aggregate(self, inputs, index, ptr=None, dim_size=None):

        if self.aggr in ['add', 'mean', 'max', None]:
            return super(GenMessagePassing, self).aggregate(inputs, index, ptr, dim_size)

        elif self.aggr in ['softmax_sg', 'softmax', 'softmax_sum']:

            if self.learn_t:
                out = scatter_softmax(inputs*self.t, index, dim=self.node_dim)
            else:
                with torch.no_grad():
                    out = scatter_softmax(inputs*self.t, index, dim=self.node_dim)

            out = scatter(inputs*out, index, dim=self.node_dim,
                          dim_size=dim_size, reduce='sum')

            if self.aggr == 'softmax_sum':
                self.sigmoid_y = torch.sigmoid(self.y)
                degrees = degree(index, num_nodes=dim_size).unsqueeze(1)
                out = torch.pow(degrees, self.sigmoid_y) * out

            return out


        elif self.aggr in ['power', 'power_sum']:
            min_value, max_value = 1e-7, 1e1
            torch.clamp_(inputs, min_value, max_value)
            out = scatter(torch.pow(inputs, self.p), index, dim=self.node_dim,
                          dim_size=dim_size, reduce='mean')
            torch.clamp_(out, min_value, max_value)
            out = torch.pow(out, 1/self.p)

            if self.aggr == 'power_sum':
                self.sigmoid_y = torch.sigmoid(self.y)
                degrees = degree(index, num_nodes=dim_size).unsqueeze(1)
                out = torch.pow(degrees, self.sigmoid_y) * out

            return out

        else:
            raise NotImplementedError('To be implemented')
Ejemplo n.º 20
0
def compute_pr(edge_index, damp: float = 0.85, k: int = 10):
    num_nodes = edge_index.max().item() + 1
    deg_out = degree(edge_index[0])
    x = torch.ones((num_nodes, )).to(edge_index.device).to(torch.float32)

    for i in range(k):
        edge_msg = x[edge_index[0]] / deg_out[edge_index[0]]
        agg_msg = scatter(edge_msg, edge_index[1], reduce='sum')

        x = (1 - damp) * x + damp * agg_msg

    return x
Ejemplo n.º 21
0
    def forward(self, x: Tensor, batch: OptTensor = None) -> Tensor:
        """"""
        if batch is None:
            out = F.instance_norm(
                x.t().unsqueeze(0), self.running_mean, self.running_var,
                self.weight, self.bias, self.training
                or not self.track_running_stats, self.momentum, self.eps)
            return out.squeeze(0).t()

        batch_size = int(batch.max()) + 1

        mean = var = unbiased_var = x  # Dummies.

        if self.training or not self.track_running_stats:
            norm = degree(batch, batch_size, dtype=x.dtype).clamp_(min=1)
            norm = norm.view(-1, 1)
            unbiased_norm = (norm - 1).clamp_(min=1)

            mean = scatter(x, batch, dim=0, dim_size=batch_size,
                           reduce='add') / norm

            x = x - mean[batch]

            var = scatter(x * x,
                          batch,
                          dim=0,
                          dim_size=batch_size,
                          reduce='add')
            unbiased_var = var / unbiased_norm
            var = var / norm

            momentum = self.momentum
            if self.running_mean is not None:
                self.running_mean = (
                    1 - momentum) * self.running_mean + momentum * mean.mean(0)
            if self.running_var is not None:
                self.running_var = (
                    1 - momentum
                ) * self.running_var + momentum * unbiased_var.mean(0)
        else:
            if self.running_mean is not None:
                mean = self.running_mean.view(1, -1).expand(batch_size, -1)
            if self.running_var is not None:
                var = self.running_var.view(1, -1).expand(batch_size, -1)

            x = x - mean[batch]

        out = x / (var + self.eps).sqrt()[batch]

        if self.weight is not None and self.bias is not None:
            out = out * self.weight.view(1, -1) + self.bias.view(1, -1)

        return out
Ejemplo n.º 22
0
 def deg(self):
     # Compute in-degree histogram over training data.
     deg = torch.zeros(5, dtype=torch.long)
     if self.dataset_train is not None:
         for data in self.dataset_train:
             d = degree(
                 data.edge_index[1], num_nodes=data.num_nodes, dtype=torch.long
             )
             deg += torch.bincount(d, minlength=deg.numel())
     else:
         deg = torch.tensor([[0, 41130, 117278, 70152, 3104]])
     return deg.numpy().tolist()
Ejemplo n.º 23
0
    def __call__(self, data):
        idx, x = data.edge_index[1 if self.in_degree else 0], data.x
        deg = truncate_degree(degree(idx, data.num_nodes, dtype=torch.long))
        deg = F.one_hot(deg, num_classes=self.max_degree + 1).to(torch.float)

        if x is not None and self.cat:
            x = x.view(-1, 1) if x.dim() == 1 else x
            data.x = torch.cat([x, deg.to(x.dtype)], dim=-1)
        else:
            data.x = deg

        return data
Ejemplo n.º 24
0
    def __call__(self, data):
        row, x = data.edge_index[0], data.x
        deg = degree(row, data.num_nodes)
        deg = one_hot(deg, num_classes=self.max_degree + 1)

        if x is not None and self.cat:
            x = x.view(-1, 1) if x.dim() == 1 else x
            data.x = torch.cat([x, deg.to(x.dtype)], dim=-1)
        else:
            data.x = deg

        return data
Ejemplo n.º 25
0
def batched_negative_sampling(edge_index,
                              batch,
                              num_neg_samples=None,
                              method="sparse",
                              force_undirected=False):
    r"""Samples random negative edges of multiple graphs given by
    :attr:`edge_index` and :attr:`batch`.

    Args:
        edge_index (LongTensor): The edge indices.
        batch (LongTensor): Batch vector
            :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
            node to a specific example.
        num_neg_samples (int, optional): The number of negative samples to
            return. If set to :obj:`None`, will try to return a negative edge
            for every positive edge. (default: :obj:`None`)
        method (string, optional): The method to use for negative sampling,
            *i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
            This is a memory/runtime trade-off.
            :obj:`"sparse"` will work on any graph of any size, while
            :obj:`"dense"` can perform faster true-negative checks.
            (default: :obj:`"sparse"`)
        force_undirected (bool, optional): If set to :obj:`True`, sampled
            negative edges will be undirected. (default: :obj:`False`)

    :rtype: LongTensor
    """
    split = degree(batch[edge_index[0]], dtype=torch.long).tolist()
    edge_indices = torch.split(edge_index, split, dim=1)
    num_nodes = degree(batch, dtype=torch.long)
    cum_nodes = torch.cat([batch.new_zeros(1), num_nodes.cumsum(dim=0)[:-1]])

    neg_edge_indices = []
    for edge_index, N, C in zip(edge_indices, num_nodes.tolist(),
                                cum_nodes.tolist()):
        neg_edge_index = negative_sampling(edge_index - C, N, num_neg_samples,
                                           method, force_undirected) + C
        neg_edge_indices.append(neg_edge_index)

    return torch.cat(neg_edge_indices, dim=1)
Ejemplo n.º 26
0
    def __init__(self,
                 data,
                 state_feats=None,
                 gamma=0.99,
                 epsilon=lambda x: 0.5,
                 episode_len=10,
                 num_walks=10,
                 hidden=64,
                 network=None,
                 edata=False,
                 frozen=True):

        # Is there really not a better way to do this?
        self.max_actions = max(
            degree(data.edge_index[0]).max().long().item(),
            degree(data.edge_index[1]).max().long().item())

        if not state_feats:
            state_feats = data.num_nodes

        self.state_feats = state_feats
        self.hidden = hidden
        if not network:
            network = Q_Network_No_Action(self.state_feats, self.max_actions,
                                          hidden)

        super().__init__(data,
                         state_feats=state_feats,
                         action_feats=0,
                         gamma=gamma,
                         epsilon=epsilon,
                         episode_len=episode_len,
                         num_walks=num_walks,
                         hidden=hidden,
                         network=network,
                         edata=edata,
                         frozen=True)

        self.action_map = None
def global_mean_pool_sparse(x, batch):

    #-------------- global average pooling
    index = torch.stack(
        [batch,
         torch.tensor(list(range(batch.shape[0])), device=x.device)], 0)
    x_sparse = torch.sparse.FloatTensor(
        index, x, torch.Size([torch.max(batch) + 1, x.shape[0], x.shape[1]]))

    graph_sizes = degree(batch).float()
    graph_sizes[graph_sizes == 0.0] = 1.0

    return torch.sparse.sum(x_sparse, 1).to_dense() / graph_sizes.unsqueeze(1)
Ejemplo n.º 28
0
    def forward(self, x: torch.Tensor, idx: torch.Tensor, dim_size: Optional[int] = None, dim: int = 0) -> torch.Tensor:
        outs = [aggr(x, idx, dim_size) for aggr in self.aggregators]
        # concatenate the different aggregator results, considering the shape of the hypercomplex components.
        out = phm_cat(tensors=outs, phm_dim=self.phm_dim, dim=-1)

        if self.scalers is not None:
            deg = degree(idx, dim_size, dtype=x.dtype).view(-1, 1)
            # concatenate the different aggregator results, considering the shape of the hypercomplex components.
            outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
            out = phm_cat(tensors=outs, phm_dim=self.phm_dim, dim=-1)

        out = self.transform(out)
        return out
Ejemplo n.º 29
0
    def aggregate(self, inputs: torch.Tensor, index: torch.Tensor,
                  dim_size: Optional[int] = None) -> torch.Tensor:

        # inputs has shape (*, self.phm_dim * self.in_feats)
        outs = [aggr(inputs, index, dim_size) for aggr in self.aggregators]
        # concatenate the different aggregator results, considering the shape of the hypercomplex components.
        out = phm_cat(tensors=outs, phm_dim=self.phm_dim, dim=-1)

        deg = degree(index, dim_size, dtype=inputs.dtype).view(-1, 1)
        # concatenate the different aggregator results, considering the shape of the hypercomplex components.
        outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
        out = phm_cat(tensors=outs, phm_dim=self.phm_dim, dim=-1)
        return out
Ejemplo n.º 30
0
    def forward(self, x, edge_index, edge_attr):
        x = self.fc(x)
        edge_embedding = self.bond_encoder(edge_attr)

        row, col = edge_index
        deg = degree(row, x.size(0), dtype=x.dtype) + 1
        deg_inv_sqrt = deg.pow(-0.5)

        norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]

        return self.propagate(
            edge_index, x=x, edge_attr=edge_embedding, norm=norm
        ) + F.relu(x + self.root_emb.weight) * 1. / deg.view(-1, 1)