def __init__(self, in_features, out_features, function='mean', bias=True, device='cpu'): """ :param in_features: size of the input per node :param out_features: size of the output per node :param bias: whether to add a learnable bias before the activation :param device: device used for computation """ super(GCNLafLayer, self).__init__() self.in_features = in_features self.out_features = out_features self.device = device self.W = nn.Parameter( torch.zeros(size=(in_features, out_features), device=device)) if bias: self.b = nn.Parameter(torch.zeros(out_features, device=device)) else: self.register_parameter('b', None) self.aggregator = AdjAggregationLayer(function=function, grad=True) self.reset_parameters()
def __init__(self, in_features, out_features, aggregators, scalers, avg_d, self_loop, pretrans_layers, posttrans_layers, device): """ :param in_features: size of the input per node of the tower :param out_features: size of the output per node of the tower :param aggregators: set of aggregation functions each taking as input X (B x N x N x Din), adj (B x N x N), self_loop and device :param scalers: set of scaling functions each taking as input X (B x N x Din), adj (B x N x N) and avg_d """ super(PNALafTower, self).__init__() self.device = device self.in_features = in_features self.out_features = out_features self.gru_features = in_features * len(aggregators) * len(scalers) self.aggregators_list = nn.ModuleList() for agg in aggregators: aggr = AdjAggregationLayer(grad=True, device=device, function=agg) aggr.reset_parameters() self.aggregators_list.append(aggr) self.scalers = scalers self.self_loop = self_loop self.pretrans = MLP(in_size=2 * self.in_features, hidden_size=self.in_features, out_size=self.in_features, layers=pretrans_layers, mid_activation='relu', last_activation='relu') self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) * self.in_features, hidden_size=self.out_features, out_size=self.out_features, layers=posttrans_layers, mid_activation='relu', last_activation='relu') self.avg_d = avg_d
def __init__(self, in_features, out_features, function='mean', fc_layers=2, device='cpu'): """ :param in_features: size of the input per node :param out_features: size of the output per node :param fc_layers: number of fully connected layers after the sum aggregator :param device: device used for computation """ super(GINLafLayer, self).__init__() self.device = device self.in_features = in_features self.out_features = out_features self.epsilon = nn.Parameter(torch.zeros(size=(1, ), device=device)) self.post_transformation = MLP(in_size=in_features, hidden_size=max(in_features, out_features), out_size=out_features, layers=fc_layers, mid_activation='relu', last_activation='relu', mid_b_norm=True, last_b_norm=False, device=device) self.aggregator = AdjAggregationLayer(function=function, grad=True) self.reset_parameters()
def __init__(self, in_size, hidden_size, out_size, fc_layers=3, device='cpu', final_activation='relu', aggregation='mean'): super(LafReadout, self).__init__() self.aggregator = AdjAggregationLayer(device=device, function=aggregation) self.aggregator.reset_parameters() self.mlp = MLP(in_size=in_size, hidden_size=hidden_size, out_size=out_size, layers=fc_layers, mid_activation="relu", last_activation=final_activation, mid_b_norm=True, last_b_norm=False, device=device)