示例#1
0
    def __init__(self, in_features: int, out_features: int,
                 phm_dim: int, phm_rule: Union[None, nn.ParameterList], learn_phm: bool,
                 bias: bool,
                 activation: str, norm: Optional[str],
                 w_init: str, c_init: str,
                 deg: torch.Tensor,
                 aggregators: List[str] = ['mean', 'min', 'max', 'std'],
                 scalers: List[str] = ['identity', 'amplification', 'attenuation'],
                 post_layers: int = 1,
                 msg_encoder: str = "relu",
                 **kwargs):

        super(PHMPNAConvSimple, self).__init__(aggr=None, node_dim=0, **kwargs)

        self.in_features = in_features
        self.out_features = out_features
        self.bias_flag = bias
        self.activation_str = activation
        self.norm = norm
        self.phm_dim = phm_dim
        self.phm_rule = phm_rule
        self.w_init = w_init
        self.c_init = c_init
        self.learn_phm = learn_phm
        self.aggregators_l = aggregators
        self.scalers_l = scalers
        self.aggregators = [AGGREGATORS[aggr] for aggr in aggregators]
        self.scalers = [SCALERS[scale] for scale in scalers]

        self.F_in = in_features
        self.F_out = self.out_features

        self.deg = deg.to(torch.float)
        self.avg_deg: Dict[str, float] = {
            'lin': self.deg.mean().item(),
            'log': (self.deg + 1).log().mean().item(),
            'exp': self.deg.exp().mean().item(),
        }

        in_features = (len(aggregators) * len(scalers)) * self.F_in

        modules = [PHMLinear(in_features=in_features, out_features=self.F_out, bias=self.bias_flag,
                             phm_dim=self.phm_dim, phm_rule=self.phm_rule,
                             w_init=self.w_init, c_init=self.c_init)]
        self.post_layers = post_layers
        for _ in range(post_layers - 1):
            if self.norm:
                modules += [PHMNorm(num_features=self.F_out, phm_dim=self.phm_dim, type="naive-batch-norm")]
            modules += [get_module_activation(self.activation_str)]
            modules += [PHMLinear(in_features=self.F_out, out_features=self.F_out, bias=self.bias_flag,
                                  phm_dim=self.phm_dim, phm_rule=self.phm_rule,
                                  w_init=self.w_init, c_init=self.c_init)]
        self.transform = nn.Sequential(*modules)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)


        self.reset_parameters()
示例#2
0
    def __init__(self,
                 in_features: int,
                 out_features: int,
                 bias: bool = True,
                 add_self_loops: bool = True,
                 init: str = "orthogonal",
                 aggr: Optional[str] = "add",
                 same_dim: bool = True,
                 msg_encoder="identity") -> None:
        super(QGNNConv, self).__init__(aggr=aggr)

        self.in_features = in_features
        self.out_features = out_features
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.init = init
        self.aggr = aggr
        self.transform = QLinear(in_features=in_features,
                                 out_features=out_features,
                                 bias=bias,
                                 init=init)
        self.same_dim = same_dim
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)

        self.reset_parameters()
示例#3
0
    def __init__(self,
                 in_features: int,
                 out_features: int,
                 bias: bool = True,
                 add_self_loops: bool = True,
                 init: str = "orthogonal",
                 aggr: Optional[str] = "softmax",
                 same_dim: bool = True,
                 msg_encoder: str = "identity",
                 **kwargs) -> None:
        super(QGNNConvSoftmax, self).__init__(aggr=None)

        self.in_features = in_features
        self.out_features = out_features
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.init = init
        self.aggr = aggr
        self.transform = QLinear(in_features=in_features,
                                 out_features=out_features,
                                 bias=bias,
                                 init=init)
        self.same_dim = same_dim
        self.initial_beta = kwargs.get("initial_beta")
        self.learn_beta = kwargs.get("learn_beta")
        self.beta = nn.Parameter(torch.tensor(self.initial_beta),
                                 requires_grad=self.learn_beta)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)

        self.reset_parameters()
示例#4
0
    def __init__(self,
                 in_features: int,
                 out_features: int,
                 bias: bool = True,
                 add_self_loops: bool = True,
                 norm: [Optional] = None,
                 activation: str = "relu",
                 init: str = "orthogonal",
                 aggr: str = "add",
                 msg_encoder="identity") -> None:
        super(QGINEConv, self).__init__(aggr=aggr)

        self.in_features = in_features
        self.out_features = out_features
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.norm = norm
        self.activation_str = activation
        self.init = init
        self.aggr = aggr
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)
        self.transform = QMLP(in_features=in_features,
                              out_features=out_features,
                              factor=1,
                              bias=bias,
                              activation=activation,
                              norm=norm,
                              init=init)

        self.reset_parameters()
示例#5
0
    def __init__(self, in_features: int, out_features: int, phm_dim: int,
                 phm_rule: Union[None, nn.ParameterList], learn_phm: bool = True,
                 bias: bool = True, add_self_loops: bool = True,
                 norm: [Optional] = None, activation: str = "relu",
                 w_init: str = "phm", c_init: str = "standard",
                 aggr: str = "add",
                 msg_encoder: str = "identity") -> None:
        super(PHMGINEConv, self).__init__(aggr=aggr)

        self.in_features = in_features
        self.out_features = out_features
        self.phm_dim = phm_dim
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.learn_phm = learn_phm
        self.phm_rule = phm_rule
        self.norm = norm
        self.activation_str = activation
        self.w_init = w_init
        self.c_init = c_init

        self.aggr = aggr
        self.transform = PHMMLP(in_features=in_features, out_features=out_features,
                                phm_dim=phm_dim, phm_rule=phm_rule,
                                w_init=w_init, c_init=c_init,
                                factor=1, bias=bias, activation=activation, norm=norm)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)


        self.reset_parameters()
示例#6
0
    def __init__(self, in_features: int, out_features: int, phm_dim: int, phm_rule: Union[None, nn.ParameterList],
                 learn_phm: True, bias: bool = True,
                 add_self_loops: bool = True,
                 w_init: str = "phm", c_init: str = "standard",
                 aggr: str = "add", same_dim: bool = True,
                 msg_encoder: str = "identity") -> None:
        super(PHMConv, self).__init__(aggr=aggr)

        self.in_features = in_features
        self.out_features = out_features
        self.phm_dim = phm_dim
        self.phm_rule = phm_rule
        self.learn_phm = learn_phm
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.w_init = w_init
        self.c_init = c_init
        self.aggr = aggr
        self.same_dim = same_dim
        self.transform = PHMLinear(in_features=in_features, out_features=out_features, phm_rule=phm_rule,
                                   phm_dim=phm_dim, bias=bias, w_init=w_init, c_init=c_init,
                                   learn_phm=learn_phm)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)

        self.reset_parameters()
示例#7
0
    def __init__(self, in_features: int, out_features: int, phm_dim: int, phm_rule: Union[None, nn.ParameterList],
                 learn_phm: True, bias: bool = True,
                 add_self_loops: bool = True,
                 w_init: str = "phm", c_init: str = "standard",
                 aggr: str = "softmax", same_dim: bool = True,
                 msg_encoder: str = "identity",
                 **kwargs) -> None:
        super(PHMConvSoftmax, self).__init__(aggr=None)

        self.in_features = in_features
        self.out_features = out_features
        self.phm_dim = phm_dim
        self.phm_rule = phm_rule
        self.learn_phm = learn_phm
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.w_init = w_init
        self.c_init = c_init
        self.aggr = aggr
        self.same_dim = same_dim
        self.transform = PHMLinear(in_features=in_features, out_features=out_features, phm_rule=phm_rule,
                                   phm_dim=phm_dim, bias=bias,
                                   w_init=w_init, c_init=c_init, learn_phm=learn_phm)

        self.initial_beta = kwargs.get("initial_beta")
        self.learn_beta = kwargs.get("learn_beta")

        self.beta = nn.Parameter(torch.tensor(self.initial_beta), requires_grad=self.learn_beta)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)

        self.reset_parameters()
示例#8
0
    def __init__(self, in_features: int, out_features: int, phm_dim: int, phm_rule: Union[None, nn.ParameterList],
                 bias: bool = True,
                 learn_phm: bool = True,
                 activation: str = "relu", norm: Union[None, str] = None,
                 w_init: str = "phm", c_init: str = "standard",
                 factor: float = 1, **kwargs) -> None:

        super(PHMMLP, self).__init__()
        assert norm in ["None", None, "naive-batch-norm", "naive-naive-batch-norm"]
        self.in_features = in_features
        self.out_features = out_features
        self.phm_dim = phm_dim
        self.bias_flag = bias
        self.learn_phm = learn_phm
        self.phm_rule = phm_rule
        self.activation_str = activation
        self.linear1 = PHMLinear(in_features=in_features, out_features=int(factor*out_features),
                                 phm_dim=phm_dim, phm_rule=phm_rule, learn_phm=learn_phm, bias=bias,
                                 w_init=w_init, c_init=c_init)
        self.linear2 = PHMLinear(in_features=int(factor*out_features), out_features=out_features,
                                 phm_dim=phm_dim, phm_rule=phm_rule, learn_phm=learn_phm, bias=bias,
                                 w_init=w_init, c_init=c_init)
        self.activation = get_module_activation(activation)
        self.norm_type = norm
        self.factor = factor
        self.w_init = w_init
        self.c_init = c_init
        if norm in ["naive-batch-norm", "naive-naive-batch-norm"]:
            self.norm_flag = True
            self.norm = PHMNorm(num_features=int(factor*out_features), phm_dim=self.phm_dim,
                                type=norm, **kwargs)
        else:
            self.norm_flag = False

        self.reset_parameters()
示例#9
0
    def __init__(self,
                 in_features: int,
                 out_features: int,
                 bias: bool = True,
                 add_self_loops: bool = True,
                 norm: [Optional] = None,
                 activation: str = "relu",
                 init: str = "orthogonal",
                 aggr: str = "softmax",
                 msg_encoder: str = "identity",
                 **kwargs) -> None:
        super(QGINEConvSoftmax, self).__init__(aggr=None)

        self.in_features = in_features
        self.out_features = out_features
        self.bias = bias
        self.add_self_loops = add_self_loops
        self.norm = norm
        self.activation_str = activation
        self.init = init
        self.aggr = aggr
        self.transform = QMLP(in_features=in_features,
                              out_features=out_features,
                              factor=1,
                              bias=bias,
                              activation=activation,
                              norm=norm,
                              init=init)

        self.initial_beta = kwargs.get("initial_beta")
        self.learn_beta = kwargs.get("learn_beta")
        self.beta = nn.Parameter(torch.tensor(self.initial_beta),
                                 requires_grad=self.learn_beta)
        self.msg_encoder_str = msg_encoder
        self.msg_encoder = get_module_activation(activation=msg_encoder)

        self.reset_parameters()
示例#10
0
    def __init__(self,
                 in_features: int,
                 phm_dim: int,
                 phm_rule: Union[None, nn.ParameterList],
                 hidden_layers: list,
                 out_features: int,
                 activation: str,
                 bias: bool,
                 norm: str,
                 w_init: str,
                 c_init: str,
                 dropout: Union[float, list],
                 learn_phm: bool = True,
                 same_dropout: bool = False,
                 real_trafo: str = "linear") -> None:

        super(PHMDownstreamNet, self).__init__()

        self.in_features = in_features
        self.out_features = out_features
        self.learn_phm = learn_phm
        self.phm_rule = phm_rule
        self.phm_dim = phm_dim
        self.hidden_layers = hidden_layers
        self.activation_str = activation
        self.activation_func = get_module_activation(activation)
        self.w_init = w_init
        self.c_init = c_init
        self.bias = bias
        self.dropout = [dropout] * len(hidden_layers) if isinstance(
            dropout, float) else dropout
        assert len(self.dropout) == len(self.hidden_layers), "dropout list must be of the same size " \
                                                             "as number of hidden layer"
        self.norm_type = norm
        self.same_dropout = same_dropout

        # affine linear layers
        # input -> first hidden layer
        self.affine = [
            PHMLinear(in_features=in_features,
                      phm_dim=self.phm_dim,
                      phm_rule=phm_rule,
                      out_features=self.hidden_layers[0],
                      learn_phm=learn_phm,
                      bias=bias,
                      w_init=w_init,
                      c_init=c_init)
        ]
        # hidden layers
        self.affine += [
            PHMLinear(in_features=self.hidden_layers[i],
                      out_features=self.hidden_layers[i + 1],
                      phm_dim=self.phm_dim,
                      learn_phm=learn_phm,
                      phm_rule=phm_rule,
                      bias=bias,
                      w_init=w_init,
                      c_init=c_init)
            for i in range(len(self.hidden_layers) - 1)
        ]
        # output layer
        self.affine += [
            PHMLinear(in_features=self.hidden_layers[-1],
                      out_features=self.out_features,
                      phm_rule=phm_rule,
                      phm_dim=self.phm_dim,
                      learn_phm=learn_phm,
                      w_init=w_init,
                      c_init=c_init,
                      bias=bias)
        ]

        self.affine = nn.ModuleList(self.affine)

        # transform the output quaternionic vector to real vector with Real_Transformer module
        self.real_trafo_type = real_trafo
        self.real_trafo = RealTransformer(type=self.real_trafo_type,
                                          in_features=self.out_features,
                                          phm_dim=self.phm_dim,
                                          bias=True)

        # normalizations
        self.norm_flag = False
        if self.norm_type:
            norm_type = self.norm_type
            self.norm = [
                PHMNorm(num_features=dim, phm_dim=self.phm_dim, type=norm_type)
                for dim in self.hidden_layers
            ]
            self.norm = nn.ModuleList(self.norm)
            self.norm_flag = True

        self.reset_parameters()
示例#11
0
    def __init__(self,
                 phm_dim: int = 4,
                 learn_phm: bool = True,
                 phm_rule: Union[None, nn.ParameterList] = None,
                 atom_input_dims: Union[int, list] = ATOM_FEAT_DIMS,
                 atom_encoded_dim: int = 128,
                 bond_input_dims: Union[int, list] = BOND_FEAT_DIMS,
                 naive_encoder: bool = False,
                 w_init: str = "phm",
                 c_init: str = "standard",
                 same_dropout: bool = False,
                 mp_layers: list = [128, 196, 256],
                 bias: bool = True,
                 dropout_mpnn: list = [0.0, 0.0, 0.0],
                 norm_mp: Optional[str] = "naive-batch-norm",
                 add_self_loops: bool = True,
                 msg_aggr: str = "add",
                 node_aggr: str = "sum",
                 mlp: bool = False,
                 pooling: str = "softattention",
                 activation: str = "relu",
                 real_trafo: str = "linear",
                 downstream_layers: list = [256, 128],
                 target_dim: int = 1,
                 dropout_dn: Union[list, float] = [0.2, 0.1],
                 norm_dn: Optional[str] = "naive-batch-norm",
                 msg_encoder: str = "identity",
                 **kwargs) -> None:
        super(PHMSkipConnectConcat, self).__init__()
        assert activation.lower() in ["relu", "lrelu", "elu", "selu", "swish"]
        assert len(dropout_mpnn) == len(mp_layers)
        assert pooling in ["globalsum", "softattention"
                           ], f"pooling variable '{pooling}' wrong."
        assert norm_mp in [
            None, "naive-batch-norm", "None", "naive-naive-batch-norm"
        ]
        assert w_init in ["phm", "glorot_uniform", "glorot_normal"
                          ], f"w_init variable '{w_init}' wrong."
        assert c_init in ["standard",
                          "random"], f"c_init variable '{c_init}' wrong."

        if msg_aggr == "sum":  # for pytorch_geometrics MessagePassing class.
            msg_aggr = "add"

        self.msg_encoder_str = msg_encoder

        self.phm_rule = phm_rule
        if self.phm_rule is None:
            self.variable_phm = True
        else:
            self.variable_phm = False

        self.phm_dim = phm_dim
        self.learn_phm = learn_phm
        # save input args as attributes
        self.atom_input_dims = atom_input_dims
        self.bond_input_dims = bond_input_dims

        # one hypercomplex number consists of phm_dim components, so divide the feature dims by phm_dim
        atom_encoded_dim = atom_encoded_dim // phm_dim
        mp_layers = [dim // phm_dim for dim in mp_layers]
        downstream_layers = [dim // phm_dim for dim in downstream_layers]

        self.atom_encoded_dim = atom_encoded_dim
        self.naive_encoder = naive_encoder
        self.w_init = w_init
        self.c_init = c_init
        self.same_dropout = same_dropout
        self.mp_layers = mp_layers
        self.bias = bias
        self.dropout_mpnn = dropout_mpnn
        self.norm_mp = None if norm_mp == "None" else norm_mp
        self.add_self_loops = add_self_loops
        self.msg_aggr_type = msg_aggr
        self.node_aggr_type = node_aggr
        self.mlp_mp = mlp
        self.pooling_type = pooling
        self.activation_str = activation
        self.real_trafo_type = real_trafo
        self.downstream_layers = downstream_layers
        self.target_dim = target_dim
        self.dropout_dn = dropout_dn
        self.norm_dn_type = None if norm_dn == "None" else norm_dn

        # define other attributes needed for module
        self.input_dim = atom_encoded_dim
        self.f_act = get_module_activation(self.activation_str)
        # PHM MP layers
        self.convs = nn.ModuleList([None] * len(mp_layers))
        # batch normalization layers
        self.norms = nn.ModuleList([None] * len(mp_layers))

        # atom embedding
        if naive_encoder:
            self.atomencoder = NaivePHMEncoder(out_dim=atom_encoded_dim,
                                               input_dims=atom_input_dims,
                                               phm_dim=phm_dim,
                                               combine="sum")
        else:
            self.atomencoder = PHMEncoder(out_dim=atom_encoded_dim,
                                          input_dims=atom_input_dims,
                                          phm_dim=phm_dim,
                                          combine="sum")

        # bond/edge embeddings
        self.bondencoders = []
        if naive_encoder:
            module = NaivePHMEncoder
        else:
            module = PHMEncoder

        for i in range(len(mp_layers)):
            if i == 0:
                out_dim = self.input_dim
            else:
                out_dim = self.mp_layers[i - 1] + self.input_dim

            self.bondencoders.append(
                module(input_dims=bond_input_dims,
                       out_dim=out_dim,
                       phm_dim=phm_dim,
                       combine="sum"))

        self.bondencoders = nn.ModuleList(self.bondencoders)

        # prepare Quaternion MP layers and Norm if applicable
        for i in range(len(mp_layers)):
            if i == 0:
                in_dim = self.input_dim
            else:
                in_dim = self.mp_layers[i - 1] + self.input_dim
            out_dim = self.mp_layers[i]
            self.convs[i] = PHMMessagePassing(in_features=in_dim,
                                              out_features=out_dim,
                                              bias=bias,
                                              phm_dim=phm_dim,
                                              learn_phm=learn_phm,
                                              phm_rule=self.phm_rule,
                                              norm=self.norm_mp,
                                              activation=activation,
                                              w_init=w_init,
                                              c_init=c_init,
                                              aggr=msg_aggr,
                                              mlp=mlp,
                                              add_self_loops=add_self_loops,
                                              same_dim=False,
                                              msg_encoder=msg_encoder,
                                              **kwargs)

            if self.norm_mp:
                self.norms[i] = PHMNorm(num_features=out_dim,
                                        phm_dim=phm_dim,
                                        type=norm_mp)

        if pooling == "globalsum":
            self.pooling = PHMGlobalSumPooling(phm_dim=phm_dim)
        else:
            self.pooling = PHMSoftAttentionPooling(
                embed_dim=self.mp_layers[-1] + self.input_dim,
                phm_dim=phm_dim,
                learn_phm=learn_phm,
                phm_rule=self.phm_rule,
                w_init=self.w_init,
                c_init=self.c_init,
                bias=self.bias,
                real_trafo=self.real_trafo_type)

        # downstream network
        self.downstream = PHMDownstreamNet(
            in_features=self.mp_layers[-1] + self.input_dim,
            hidden_layers=self.downstream_layers,
            out_features=self.target_dim,
            phm_dim=phm_dim,
            learn_phm=learn_phm,
            phm_rule=self.phm_rule,
            activation=self.activation_str,
            bias=self.bias,
            norm=self.norm_dn_type,
            w_init=self.w_init,
            c_init=self.c_init,
            dropout=self.dropout_dn,
            same_dropout=self.same_dropout,
            real_trafo=self.real_trafo_type)
        self.reset_parameters()