Exemple #1
0
    def __init__(self, in_features: int, out_features: int, phm_dim: int, phm_rule: Union[None, nn.ParameterList],
                 learn_phm: True, bias: bool = True,
                 add_self_loops: bool = True,
                 w_init: str = "phm", c_init: str = "standard",
                 aggr: str = "softmax", same_dim: bool = True,
                 msg_encoder: str = "identity",
                 **kwargs) -> None:
        super(PHMConvSoftmax, self).__init__(aggr=None)

        self.in_features = in_features
        self.out_features = out_features
        self.phm_dim = phm_dim
        self.phm_rule = phm_rule
        self.learn_phm = learn_phm
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.w_init = w_init
        self.c_init = c_init
        self.aggr = aggr
        self.same_dim = same_dim
        self.transform = PHMLinear(in_features=in_features, out_features=out_features, phm_rule=phm_rule,
                                   phm_dim=phm_dim, bias=bias,
                                   w_init=w_init, c_init=c_init, learn_phm=learn_phm)

        self.initial_beta = kwargs.get("initial_beta")
        self.learn_beta = kwargs.get("learn_beta")

        self.beta = nn.Parameter(torch.tensor(self.initial_beta), requires_grad=self.learn_beta)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)

        self.reset_parameters()
Exemple #2
0
    def __init__(self, in_features: int, out_features: int, phm_dim: int, phm_rule: Union[None, nn.ParameterList],
                 learn_phm: True, bias: bool = True,
                 add_self_loops: bool = True,
                 w_init: str = "phm", c_init: str = "standard",
                 aggr: str = "add", same_dim: bool = True,
                 msg_encoder: str = "identity") -> None:
        super(PHMConv, self).__init__(aggr=aggr)

        self.in_features = in_features
        self.out_features = out_features
        self.phm_dim = phm_dim
        self.phm_rule = phm_rule
        self.learn_phm = learn_phm
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.w_init = w_init
        self.c_init = c_init
        self.aggr = aggr
        self.same_dim = same_dim
        self.transform = PHMLinear(in_features=in_features, out_features=out_features, phm_rule=phm_rule,
                                   phm_dim=phm_dim, bias=bias, w_init=w_init, c_init=c_init,
                                   learn_phm=learn_phm)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)

        self.reset_parameters()
Exemple #3
0
    def __init__(self, phm_dim: int, in_features: int, out_features: int, learn_phm: bool, init: str,
                 phm_rule, aggregators: List[str], scalers: Optional[List[str]],
                 deg: Optional[torch.Tensor]) -> None:
        super(PNAAggregator, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.phm_dim = phm_dim
        self.aggregators_l = aggregators
        self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
        self.scalers_l = scalers
        if scalers:
            self.scalers = [SCALERS[scale] for scale in scalers]
            out_trafo_dim = in_features*(len(aggregators) * len(scalers))
            self.deg = deg.to(torch.float)
            self.avg_deg: Dict[str, float] = {
                'lin': self.deg.mean().item(),
                'log': (self.deg + 1).log().mean().item(),
                'exp': self.deg.exp().mean().item(),
            }
        else:
            self.scalers = None
            self.avg_deg = None
            out_trafo_dim = in_features*len(aggregators)


        self.transform = PHMLinear(in_features=out_trafo_dim, out_features=out_features, bias=True,
                                   phm_dim=phm_dim, phm_rule=phm_rule, learn_phm=learn_phm, init=init)

        self.reset_parameters()
Exemple #4
0
    def __init__(self, in_features: int, out_features: int,
                 phm_dim: int, phm_rule: Union[None, nn.ParameterList], learn_phm: bool,
                 bias: bool,
                 activation: str, norm: Optional[str],
                 w_init: str, c_init: str,
                 deg: torch.Tensor,
                 aggregators: List[str] = ['mean', 'min', 'max', 'std'],
                 scalers: List[str] = ['identity', 'amplification', 'attenuation'],
                 post_layers: int = 1,
                 msg_encoder: str = "relu",
                 **kwargs):

        super(PHMPNAConvSimple, self).__init__(aggr=None, node_dim=0, **kwargs)

        self.in_features = in_features
        self.out_features = out_features
        self.bias_flag = bias
        self.activation_str = activation
        self.norm = norm
        self.phm_dim = phm_dim
        self.phm_rule = phm_rule
        self.w_init = w_init
        self.c_init = c_init
        self.learn_phm = learn_phm
        self.aggregators_l = aggregators
        self.scalers_l = scalers
        self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
        self.scalers = [SCALERS[scale] for scale in scalers]

        self.F_in = in_features
        self.F_out = self.out_features

        self.deg = deg.to(torch.float)
        self.avg_deg: Dict[str, float] = {
            'lin': self.deg.mean().item(),
            'log': (self.deg + 1).log().mean().item(),
            'exp': self.deg.exp().mean().item(),
        }

        in_features = (len(aggregators) * len(scalers)) * self.F_in

        modules = [PHMLinear(in_features=in_features, out_features=self.F_out, bias=self.bias_flag,
                             phm_dim=self.phm_dim, phm_rule=self.phm_rule,
                             w_init=self.w_init, c_init=self.c_init)]
        self.post_layers = post_layers
        for _ in range(post_layers - 1):
            if self.norm:
                modules += [PHMNorm(num_features=self.F_out, phm_dim=self.phm_dim, type="naive-batch-norm")]
            modules += [get_module_activation(self.activation_str)]
            modules += [PHMLinear(in_features=self.F_out, out_features=self.F_out, bias=self.bias_flag,
                                  phm_dim=self.phm_dim, phm_rule=self.phm_rule,
                                  w_init=self.w_init, c_init=self.c_init)]
        self.transform = nn.Sequential(*modules)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)


        self.reset_parameters()
Exemple #5
0
class PHMSoftAttentionPooling(nn.Module):
    def __init__(self, embed_dim: int, phm_dim: int, phm_rule: Union[None, nn.ParameterList],
                 learn_phm: bool = True,
                 bias: bool = True, w_init: str = "phm", c_init: str = "standard",
                 real_trafo: str = "linear"):
        super(PHMSoftAttentionPooling, self).__init__()
        self.embed_dim = embed_dim
        self.phm_dim = phm_dim
        self.w_init = w_init
        self.c_init = c_init
        self.phm_rule = phm_rule
        self.learn_phm = learn_phm
        self.real_trafo_type = real_trafo
        self.bias = bias
        self.linear = PHMLinear(in_features=self.embed_dim, out_features=self.embed_dim, phm_dim=self.phm_dim,
                                phm_rule=phm_rule,
                                learn_phm=learn_phm, w_init=w_init, c_init=c_init,
                                bias=bias)
        self.real_trafo = RealTransformer(type=self.real_trafo_type, phm_dim=self.phm_dim,
                                          in_features=self.embed_dim, bias=True)
        self.sigmoid = nn.Sigmoid()
        self.sum_pooling = PHMGlobalSumPooling(phm_dim=self.phm_dim)
        self.reset_parameters()

    def reset_parameters(self):
        self.real_trafo.reset_parameters()
        self.linear.reset_parameters()

    def forward(self, x: torch.Tensor, batch: Batch) -> torch.Tensor:
        out = self.linear(x)  # get logits
        out = self.real_trafo(out)  # "transform" to real-valued
        out = self.sigmoid(out)  # get "probabilities"
        #x = torch.stack([*x.split(split_size=self.embed_dim, dim=-1)], dim=0)
        x = x.reshape(x.size(0), self.phm_dim, self.embed_dim)
        # apply element-wise hadamard product through broadcasting
        out = out.unsqueeze(dim=1)
        x = out * x
        x = x.reshape(x.size(0), self.phm_dim*self.embed_dim)
        x = self.sum_pooling(x, batch=batch)
        return x

    def __repr__(self):
        return "{}(embed_dim={}, phm_dim={}, phm_rule={}, learn_phm={}," \
               "bias={}, init='{}', real_trafo='{}')".format(self.__class__.__name__,
                                                             self.embed_dim,
                                                             self.phm_dim,
                                                             self.phm_rule,
                                                             self.learn_phm,
                                                             self.bias,
                                                             self.init,
                                                             self.real_trafo_type)
Exemple #6
0
class PNAAggregator(nn.Module):
    """  Principal Neighbourhood Aggregator inherits from nn.Module in case we want to further parametrize. """

    def __init__(self, phm_dim: int, in_features: int, out_features: int, learn_phm: bool, init: str,
                 phm_rule, aggregators: List[str], scalers: Optional[List[str]],
                 deg: Optional[torch.Tensor]) -> None:
        super(PNAAggregator, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.phm_dim = phm_dim
        self.aggregators_l = aggregators
        self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
        self.scalers_l = scalers
        if scalers:
            self.scalers = [SCALERS[scale] for scale in scalers]
            out_trafo_dim = in_features*(len(aggregators) * len(scalers))
            self.deg = deg.to(torch.float)
            self.avg_deg: Dict[str, float] = {
                'lin': self.deg.mean().item(),
                'log': (self.deg + 1).log().mean().item(),
                'exp': self.deg.exp().mean().item(),
            }
        else:
            self.scalers = None
            self.avg_deg = None
            out_trafo_dim = in_features*len(aggregators)


        self.transform = PHMLinear(in_features=out_trafo_dim, out_features=out_features, bias=True,
                                   phm_dim=phm_dim, phm_rule=phm_rule, learn_phm=learn_phm, init=init)

        self.reset_parameters()

    def reset_parameters(self):
        self.transform.reset_parameters()

    def forward(self, x: torch.Tensor, idx: torch.Tensor, dim_size: Optional[int] = None, dim: int = 0) -> torch.Tensor:
        outs = [aggr(x, idx, dim_size) for aggr in self.aggregators]
        # concatenate the different aggregator results, considering the shape of the hypercomplex components.
        out = phm_cat(tensors=outs, phm_dim=self.phm_dim, dim=-1)

        if self.scalers is not None:
            deg = degree(idx, dim_size, dtype=x.dtype).view(-1, 1)
            # concatenate the different aggregator results, considering the shape of the hypercomplex components.
            outs = [scaler(out, deg, self.avg_deg) for scaler in self.scalers]
            out = phm_cat(tensors=outs, phm_dim=self.phm_dim, dim=-1)

        out = self.transform(out)
        return out
Exemple #7
0
 def __init__(self, embed_dim: int, phm_dim: int, phm_rule: Union[None, nn.ParameterList],
              learn_phm: bool = True,
              bias: bool = True, w_init: str = "phm", c_init: str = "standard",
              real_trafo: str = "linear"):
     super(PHMSoftAttentionPooling, self).__init__()
     self.embed_dim = embed_dim
     self.phm_dim = phm_dim
     self.w_init = w_init
     self.c_init = c_init
     self.phm_rule = phm_rule
     self.learn_phm = learn_phm
     self.real_trafo_type = real_trafo
     self.bias = bias
     self.linear = PHMLinear(in_features=self.embed_dim, out_features=self.embed_dim, phm_dim=self.phm_dim,
                             phm_rule=phm_rule,
                             learn_phm=learn_phm, w_init=w_init, c_init=c_init,
                             bias=bias)
     self.real_trafo = RealTransformer(type=self.real_trafo_type, phm_dim=self.phm_dim,
                                       in_features=self.embed_dim, bias=True)
     self.sigmoid = nn.Sigmoid()
     self.sum_pooling = PHMGlobalSumPooling(phm_dim=self.phm_dim)
     self.reset_parameters()
Exemple #8
0
    def __init__(self,
                 in_features: int,
                 phm_dim: int,
                 phm_rule: Union[None, nn.ParameterList],
                 hidden_layers: list,
                 out_features: int,
                 activation: str,
                 bias: bool,
                 norm: str,
                 w_init: str,
                 c_init: str,
                 dropout: Union[float, list],
                 learn_phm: bool = True,
                 same_dropout: bool = False,
                 real_trafo: str = "linear") -> None:

        super(PHMDownstreamNet, self).__init__()

        self.in_features = in_features
        self.out_features = out_features
        self.learn_phm = learn_phm
        self.phm_rule = phm_rule
        self.phm_dim = phm_dim
        self.hidden_layers = hidden_layers
        self.activation_str = activation
        self.activation_func = get_module_activation(activation)
        self.w_init = w_init
        self.c_init = c_init
        self.bias = bias
        self.dropout = [dropout] * len(hidden_layers) if isinstance(
            dropout, float) else dropout
        assert len(self.dropout) == len(self.hidden_layers), "dropout list must be of the same size " \
                                                             "as number of hidden layer"
        self.norm_type = norm
        self.same_dropout = same_dropout

        # affine linear layers
        # input -> first hidden layer
        self.affine = [
            PHMLinear(in_features=in_features,
                      phm_dim=self.phm_dim,
                      phm_rule=phm_rule,
                      out_features=self.hidden_layers[0],
                      learn_phm=learn_phm,
                      bias=bias,
                      w_init=w_init,
                      c_init=c_init)
        ]
        # hidden layers
        self.affine += [
            PHMLinear(in_features=self.hidden_layers[i],
                      out_features=self.hidden_layers[i + 1],
                      phm_dim=self.phm_dim,
                      learn_phm=learn_phm,
                      phm_rule=phm_rule,
                      bias=bias,
                      w_init=w_init,
                      c_init=c_init)
            for i in range(len(self.hidden_layers) - 1)
        ]
        # output layer
        self.affine += [
            PHMLinear(in_features=self.hidden_layers[-1],
                      out_features=self.out_features,
                      phm_rule=phm_rule,
                      phm_dim=self.phm_dim,
                      learn_phm=learn_phm,
                      w_init=w_init,
                      c_init=c_init,
                      bias=bias)
        ]

        self.affine = nn.ModuleList(self.affine)

        # transform the output quaternionic vector to real vector with Real_Transformer module
        self.real_trafo_type = real_trafo
        self.real_trafo = RealTransformer(type=self.real_trafo_type,
                                          in_features=self.out_features,
                                          phm_dim=self.phm_dim,
                                          bias=True)

        # normalizations
        self.norm_flag = False
        if self.norm_type:
            norm_type = self.norm_type
            self.norm = [
                PHMNorm(num_features=dim, phm_dim=self.phm_dim, type=norm_type)
                for dim in self.hidden_layers
            ]
            self.norm = nn.ModuleList(self.norm)
            self.norm_flag = True

        self.reset_parameters()
Exemple #9
0
class PHMConv(MessagePassing):
    r"""
    Parametrized Hypercomplex Graphconvolution operator that uses edge-attributes.
    Transformation is a linear layer.
    """

    def __init__(self, in_features: int, out_features: int, phm_dim: int, phm_rule: Union[None, nn.ParameterList],
                 learn_phm: True, bias: bool = True,
                 add_self_loops: bool = True,
                 w_init: str = "phm", c_init: str = "standard",
                 aggr: str = "add", same_dim: bool = True,
                 msg_encoder: str = "identity") -> None:
        super(PHMConv, self).__init__(aggr=aggr)

        self.in_features = in_features
        self.out_features = out_features
        self.phm_dim = phm_dim
        self.phm_rule = phm_rule
        self.learn_phm = learn_phm
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.w_init = w_init
        self.c_init = c_init
        self.aggr = aggr
        self.same_dim = same_dim
        self.transform = PHMLinear(in_features=in_features, out_features=out_features, phm_rule=phm_rule,
                                   phm_dim=phm_dim, bias=bias, w_init=w_init, c_init=c_init,
                                   learn_phm=learn_phm)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)

        self.reset_parameters()

    def reset_parameters(self):
        self.transform.reset_parameters()

    def forward(self, x: torch.Tensor, edge_index: Adj, edge_attr: torch.Tensor, size: Size = None) -> torch.Tensor:

        if self.add_self_loops:
            x_c = x.clone()
        # propagate messages
        x = self.propagate(edge_index=edge_index, x=x, edge_attr=edge_attr, size=size)

        if self.same_dim:
            x = self.transform(x)
            if self.add_self_loops:
                x += x_c
        else:
            if self.add_self_loops:
                x += x_c
            x = self.transform(x)
        return x

    def message(self, x_j: torch.Tensor, edge_attr: torch.Tensor) -> torch.Tensor:
        assert x_j.size(-1) == edge_attr.size(-1)
        return self.msg_encoder(x_j + edge_attr)

    def __repr__(self):
        return "{}(in_features={}, out_features={}, phm_dim={}, phm_rule={}," \
               " learn_phm={}, bias={}, add_self_loops={}, " \
               ", w_init='{}', c_init='{}', aggr='{}')".format(self.__class__.__name__,
                                                self.in_features,
                                                self.out_features,
                                                self.phm_dim,
                                                self.phm_rule,
                                                self.learn_phm,
                                                self.bias,
                                                self.add_self_loops,
                                                self.w_init, self.c_init,
                                                self.aggr)