示例#1
0
    def __init__(self,
                 in_features,
                 out_features,
                 function='mean',
                 fc_layers=2,
                 device='cpu'):
        """
        :param in_features:     size of the input per node
        :param out_features:    size of the output per node
        :param fc_layers:       number of fully connected layers after the sum aggregator
        :param device:          device used for computation
        """
        super(GINLafLayer, self).__init__()

        self.device = device
        self.in_features = in_features
        self.out_features = out_features
        self.epsilon = nn.Parameter(torch.zeros(size=(1, ), device=device))
        self.post_transformation = MLP(in_size=in_features,
                                       hidden_size=max(in_features,
                                                       out_features),
                                       out_size=out_features,
                                       layers=fc_layers,
                                       mid_activation='relu',
                                       last_activation='relu',
                                       mid_b_norm=True,
                                       last_b_norm=False,
                                       device=device)
        self.aggregator = AdjAggregationLayer(function=function, grad=True)
        self.reset_parameters()
示例#2
0
    def __init__(self,
                 in_features,
                 out_features,
                 function='mean',
                 bias=True,
                 device='cpu'):
        """
        :param in_features:     size of the input per node
        :param out_features:    size of the output per node
        :param bias:            whether to add a learnable bias before the activation
        :param device:          device used for computation
        """
        super(GCNLafLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.device = device
        self.W = nn.Parameter(
            torch.zeros(size=(in_features, out_features), device=device))
        if bias:
            self.b = nn.Parameter(torch.zeros(out_features, device=device))
        else:
            self.register_parameter('b', None)

        self.aggregator = AdjAggregationLayer(function=function, grad=True)
        self.reset_parameters()
示例#3
0
文件: layer.py 项目: giannipele/pna
    def __init__(self, in_features, out_features, aggregators, scalers, avg_d,
                 self_loop, pretrans_layers, posttrans_layers, device):
        """
        :param in_features:     size of the input per node of the tower
        :param out_features:    size of the output per node of the tower
        :param aggregators:     set of aggregation functions each taking as input X (B x N x N x Din), adj (B x N x N), self_loop and device
        :param scalers:         set of scaling functions each taking as input X (B x N x Din), adj (B x N x N) and avg_d
        """
        super(PNALafTower, self).__init__()

        self.device = device
        self.in_features = in_features
        self.out_features = out_features
        self.gru_features = in_features * len(aggregators) * len(scalers)
        self.aggregators_list = nn.ModuleList()
        for agg in aggregators:
            aggr = AdjAggregationLayer(grad=True, device=device, function=agg)
            aggr.reset_parameters()
            self.aggregators_list.append(aggr)
        self.scalers = scalers
        self.self_loop = self_loop
        self.pretrans = MLP(in_size=2 * self.in_features,
                            hidden_size=self.in_features,
                            out_size=self.in_features,
                            layers=pretrans_layers,
                            mid_activation='relu',
                            last_activation='relu')
        self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) *
                             self.in_features,
                             hidden_size=self.out_features,
                             out_size=self.out_features,
                             layers=posttrans_layers,
                             mid_activation='relu',
                             last_activation='relu')
        self.avg_d = avg_d
示例#4
0
文件: layers.py 项目: giannipele/pna
class LafReadout(nn.Module):
    def __init__(self,
                 in_size,
                 hidden_size,
                 out_size,
                 fc_layers=3,
                 device='cpu',
                 final_activation='relu',
                 aggregation='mean'):
        super(LafReadout, self).__init__()
        self.aggregator = AdjAggregationLayer(device=device,
                                              function=aggregation)
        self.aggregator.reset_parameters()
        self.mlp = MLP(in_size=in_size,
                       hidden_size=hidden_size,
                       out_size=out_size,
                       layers=fc_layers,
                       mid_activation="relu",
                       last_activation=final_activation,
                       mid_b_norm=True,
                       last_b_norm=False,
                       device=device)

    def forward(self, x):
        sh = (x.shape[0], x.shape[1], x.shape[1])
        adj = torch.ones(sh)
        x = self.aggregator(x, adj)
        return self.mlp(x)
示例#5
0
class GINLafLayer(nn.Module):
    """
        Graph Isomorphism Network layer, similar to https://arxiv.org/abs/1810.00826
    """
    def __init__(self,
                 in_features,
                 out_features,
                 function='mean',
                 fc_layers=2,
                 device='cpu'):
        """
        :param in_features:     size of the input per node
        :param out_features:    size of the output per node
        :param fc_layers:       number of fully connected layers after the sum aggregator
        :param device:          device used for computation
        """
        super(GINLafLayer, self).__init__()

        self.device = device
        self.in_features = in_features
        self.out_features = out_features
        self.epsilon = nn.Parameter(torch.zeros(size=(1, ), device=device))
        self.post_transformation = MLP(in_size=in_features,
                                       hidden_size=max(in_features,
                                                       out_features),
                                       out_size=out_features,
                                       layers=fc_layers,
                                       mid_activation='relu',
                                       last_activation='relu',
                                       mid_b_norm=True,
                                       last_b_norm=False,
                                       device=device)
        self.aggregator = AdjAggregationLayer(function=function, grad=True)
        self.reset_parameters()

    def reset_parameters(self):
        self.epsilon.data.fill_(0.1)
        self.aggregator.reset_parameters()
        print("Reset weights: {}".format(self.aggregator.weights))

    def forward(self, input, adj):
        (B, N, _) = adj.shape

        # sum aggregation
        mod_adj = adj + torch.eye(
            N, device=self.device).unsqueeze(0) * (1 + self.epsilon)
        #mins = torch.min(input, 1)[0]
        #mins = mins
        #input = input - mins[:, None]
        support = self.aggregator(input, mod_adj)

        # post-aggregation transformation
        return self.post_transformation(support)

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'
示例#6
0
class GCNLafLayer(nn.Module):
    """
        GCN layer, similar to https://arxiv.org/abs/1609.02907
        Implementation inspired by https://github.com/tkipf/pygcn
    """
    def __init__(self,
                 in_features,
                 out_features,
                 function='mean',
                 bias=True,
                 device='cpu'):
        """
        :param in_features:     size of the input per node
        :param out_features:    size of the output per node
        :param bias:            whether to add a learnable bias before the activation
        :param device:          device used for computation
        """
        super(GCNLafLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.device = device
        self.W = nn.Parameter(
            torch.zeros(size=(in_features, out_features), device=device))
        if bias:
            self.b = nn.Parameter(torch.zeros(out_features, device=device))
        else:
            self.register_parameter('b', None)

        self.aggregator = AdjAggregationLayer(function=function, grad=True)
        self.reset_parameters()

    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.W.size(1))
        self.W.data.uniform_(-stdv, stdv)
        if self.b is not None:
            self.b.data.uniform_(-stdv, stdv)
        self.aggregator.reset_parameters()
        print("Reset weights: {}".format(self.aggregator.weights))

    def forward(self, X, adj):
        (B, N, _) = adj.shape

        # linear transformation
        XW = torch.matmul(X, self.W)

        # LAF aggregation
        y = self.aggregator(XW, adj)

        if self.b is not None:
            y = y + self.b
        return F.leaky_relu(y)

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'
示例#7
0
文件: layers.py 项目: giannipele/pna
 def __init__(self,
              in_size,
              hidden_size,
              out_size,
              fc_layers=3,
              device='cpu',
              final_activation='relu',
              aggregation='mean'):
     super(LafReadout, self).__init__()
     self.aggregator = AdjAggregationLayer(device=device,
                                           function=aggregation)
     self.aggregator.reset_parameters()
     self.mlp = MLP(in_size=in_size,
                    hidden_size=hidden_size,
                    out_size=out_size,
                    layers=fc_layers,
                    mid_activation="relu",
                    last_activation=final_activation,
                    mid_b_norm=True,
                    last_b_norm=False,
                    device=device)