Example #1
0
    def __init__(self,
                 units,
                 use_edge_features=False,
                 activation='kgcnn>leaky_relu',
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(AttentionHeadGAT, self).__init__(**kwargs)
        # graph args
        self.use_edge_features = use_edge_features

        # dense args
        self.units = int(units)

        kernel_args = {"use_bias": use_bias, "kernel_regularizer": kernel_regularizer,
                       "activity_regularizer": activity_regularizer, "bias_regularizer": bias_regularizer,
                       "kernel_constraint": kernel_constraint, "bias_constraint": bias_constraint,
                       "kernel_initializer": kernel_initializer, "bias_initializer": bias_initializer}

        self.lay_linear_trafo = Dense(units, activation="linear", **kernel_args, **self._kgcnn_info)
        self.lay_alpha = Dense(1, activation=activation, **kernel_args, **self._kgcnn_info)
        self.lay_gather_in = GatherNodesIngoing(**self._kgcnn_info)
        self.lay_gather_out = GatherNodesOutgoing(**self._kgcnn_info)
        self.lay_concat = Concatenate(axis=-1, **self._kgcnn_info)
        self.lay_pool_attention = PoolingLocalEdgesAttention(**self._kgcnn_info)
        self.lay_final_activ = Activation(activation=activation, **self._kgcnn_info)
Example #2
0
    def __init__(self, units,
                 cfconv_pool='segment_sum',
                 use_bias=True,
                 activation='kgcnn>shifted_softplus',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize Layer."""
        super(SchNetCFconv, self).__init__(**kwargs)
        self.cfconv_pool = cfconv_pool
        self.units = units
        self.use_bias = use_bias

        kernel_args = {"kernel_regularizer": kernel_regularizer, "activity_regularizer": activity_regularizer,
                       "bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
                       "bias_constraint": bias_constraint, "kernel_initializer": kernel_initializer,
                       "bias_initializer": bias_initializer}
        # Layer
        self.lay_dense1 = Dense(units=self.units, activation=activation, use_bias=self.use_bias, **kernel_args,
                                **self._kgcnn_info)
        self.lay_dense2 = Dense(units=self.units, activation='linear', use_bias=self.use_bias, **kernel_args,
                                **self._kgcnn_info)
        self.lay_sum = PoolingLocalEdges(pooling_method=cfconv_pool, **self._kgcnn_info)
        self.gather_n = GatherNodesOutgoing(**self._kgcnn_info)
        self.lay_mult = Multiply(**self._kgcnn_info)
Example #3
0
    def __init__(self,
                 units,
                 pooling_method='sum',
                 normalize_by_weights=False,
                 activation='kgcnn>leaky_relu',
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(GCN, self).__init__(**kwargs)
        self.normalize_by_weights = normalize_by_weights
        self.pooling_method = pooling_method
        self.units = units

        kernel_args = {"kernel_regularizer": kernel_regularizer, "activity_regularizer": activity_regularizer,
                       "bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
                       "bias_constraint": bias_constraint, "kernel_initializer": kernel_initializer,
                       "bias_initializer": bias_initializer, "use_bias": use_bias}
        pool_args = {"pooling_method": pooling_method, "normalize_by_weights": normalize_by_weights}

        # Layers
        self.lay_gather = GatherNodesOutgoing(**self._kgcnn_info)
        self.lay_dense = Dense(units=self.units, activation='linear',
                               input_tensor_type=self.input_tensor_type, ragged_validate=self.ragged_validate,
                               **kernel_args)
        self.lay_pool = PoolingWeightedLocalEdges(**pool_args, **self._kgcnn_info)
        self.lay_act = Activation(activation, ragged_validate=self.ragged_validate,
                                  input_tensor_type=self.input_tensor_type)
Example #4
0
    def __init__(self,
                 units,
                 use_bias=True,
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(ResidualLayer, self).__init__(**kwargs)
        if activation is None and 'swish' in kgcnn_custom_act:
            activation = 'swish'
        elif activation is None:
            activation = "selu"
        dense_args = {
            "units": units,
            "activation": activation,
            "use_bias": use_bias,
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer
        }

        self.dense_1 = Dense(**dense_args)
        self.dense_2 = Dense(**dense_args)
        self.add_end = Add()
Example #5
0
    def __init__(self,
                 units=128,
                 cfconv_pool='sum',
                 use_bias=True,
                 activation='kgcnn>shifted_softplus',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize Layer."""
        super(SchNetInteraction, self).__init__(**kwargs)

        self.cfconv_pool = cfconv_pool
        self.use_bias = use_bias
        self.units = units

        kernel_args = {"kernel_regularizer": kernel_regularizer, "activity_regularizer": activity_regularizer,
                       "bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
                       "bias_constraint": bias_constraint, "kernel_initializer": kernel_initializer,
                       "bias_initializer": bias_initializer}
        conv_args = {"units": self.units, "use_bias": use_bias, "activation": activation, "cfconv_pool": cfconv_pool}

        # Layers
        self.lay_cfconv = SchNetCFconv(**conv_args, **kernel_args, **self._kgcnn_info)
        self.lay_dense1 = Dense(units=self.units, activation='linear', use_bias=False,
                                **self._kgcnn_info, **kernel_args)
        self.lay_dense2 = Dense(units=self.units, activation=activation, use_bias=self.use_bias,
                                **self._kgcnn_info, **kernel_args)
        self.lay_dense3 = Dense(units=self.units, activation='linear', use_bias=self.use_bias,
                                **self._kgcnn_info, **kernel_args)
        self.lay_add = Add(**self._kgcnn_info)
Example #6
0
    def __init__(self,
                 emb_size,
                 out_emb_size,
                 num_dense,
                 num_targets=12,
                 use_bias=True,
                 output_kernel_initializer="zeros",
                 kernel_initializer='kgcnn>glorot_orthogonal',
                 bias_initializer='zeros',
                 activation='kgcnn>swish',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 pooling_method="sum",
                 **kwargs):
        """Initialize layer."""
        super(DimNetOutputBlock, self).__init__(**kwargs)
        self.pooling_method = pooling_method
        self.emb_size = emb_size
        self.out_emb_size = out_emb_size
        self.num_dense = num_dense
        self.num_targets = num_targets
        self.use_bias = use_bias

        kernel_args = {
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_initializer": bias_initializer,
            "bias_regularizer": bias_regularizer,
            "bias_constraint": bias_constraint,
        }

        self.dense_rbf = Dense(emb_size,
                               use_bias=False,
                               kernel_initializer=kernel_initializer,
                               **kernel_args,
                               **self._kgcnn_info)
        self.up_projection = Dense(out_emb_size,
                                   use_bias=False,
                                   kernel_initializer=kernel_initializer,
                                   **kernel_args,
                                   **self._kgcnn_info)
        self.dense_mlp = MLP([out_emb_size] * num_dense,
                             activation=activation,
                             kernel_initializer=kernel_initializer,
                             use_bias=use_bias,
                             **kernel_args,
                             **self._kgcnn_info)
        self.dimnet_mult = Multiply(**self._kgcnn_info)
        self.pool = PoolingLocalEdges(pooling_method=self.pooling_method,
                                      **self._kgcnn_info)
        self.dense_final = Dense(num_targets,
                                 use_bias=False,
                                 kernel_initializer=output_kernel_initializer,
                                 **kernel_args,
                                 **self._kgcnn_info)
Example #7
0
    def __init__(self,
                 units,
                 use_edge_features=False,
                 activation=None,
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(AttentionHeadGAT, self).__init__(**kwargs)
        # graph args
        self.use_edge_features = use_edge_features

        # dense args
        self.units = int(units)
        if activation is None and "leaky_relu" in kgcnn_custom_act:
            activation = {"class_name": "leaky_relu", "config": {"alpha": 0.2}}
        elif activation is None:
            activation = "relu"

        kernel_args = {
            "use_bias": use_bias,
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer
        }
        dens_args = {
            "ragged_validate": self.ragged_validate,
            "input_tensor_type": self.input_tensor_type
        }
        dens_args.update(kernel_args)
        gather_args = self._all_kgcnn_info
        pooling_args = self._all_kgcnn_info

        self.lay_linear_trafo = Dense(units, activation="linear", **dens_args)
        self.lay_alpha = Dense(1, activation=activation, **dens_args)
        self.lay_gather_in = GatherNodesIngoing(**gather_args)
        self.lay_gather_out = GatherNodesOutgoing(**gather_args)
        self.lay_concat = Concatenate(axis=-1,
                                      input_tensor_type=self.input_tensor_type)
        self.lay_pool_attention = PoolingLocalEdgesAttention(**pooling_args)
        self.lay_final_activ = Activation(
            activation=activation, input_tensor_type=self.input_tensor_type)
Example #8
0
    def __init__(self,
                 units,
                 depth=3,
                 pooling_method="sum",
                 activation='kgcnn>leaky_relu',
                 activation_context="elu",
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 recurrent_activation='sigmoid',
                 recurrent_initializer='orthogonal',
                 recurrent_regularizer=None,
                 recurrent_constraint=None,
                 dropout=0.0,
                 recurrent_dropout=0.0,
                 reset_after=True,
                 **kwargs):
        """Initialize layer."""
        super(AttentiveNodePooling, self).__init__(**kwargs)
        self.pooling_method = pooling_method
        self.depth = depth
        # dense args
        self.units = int(units)

        kernel_args = {"use_bias": use_bias, "kernel_regularizer": kernel_regularizer,
                       "activity_regularizer": activity_regularizer, "bias_regularizer": bias_regularizer,
                       "kernel_constraint": kernel_constraint, "bias_constraint": bias_constraint,
                       "kernel_initializer": kernel_initializer, "bias_initializer": bias_initializer}
        gru_args = {"recurrent_activation": recurrent_activation,
                    "use_bias": use_bias, "kernel_initializer": kernel_initializer,
                    "recurrent_initializer": recurrent_initializer, "bias_initializer": bias_initializer,
                    "kernel_regularizer": kernel_regularizer, "recurrent_regularizer": recurrent_regularizer,
                    "bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
                    "recurrent_constraint": recurrent_constraint, "bias_constraint": bias_constraint,
                    "dropout": dropout, "recurrent_dropout": recurrent_dropout, "reset_after": reset_after}

        self.lay_linear_trafo = Dense(units, activation="linear", **kernel_args, **self._kgcnn_info)
        self.lay_alpha = Dense(1, activation=activation, **kernel_args, **self._kgcnn_info)
        self.lay_gather_s = GatherState(**self._kgcnn_info)
        self.lay_concat = Concatenate(axis=-1, **self._kgcnn_info)
        self.lay_pool_start = PoolingNodes(pooling_method=self.pooling_method, **self._kgcnn_info)
        self.lay_pool_attention = PoolingNodesAttention(**self._kgcnn_info)
        self.lay_final_activ = Activation(activation=activation_context, **self._kgcnn_info)
        self.lay_gru = tf.keras.layers.GRUCell(units=units, activation="tanh", **gru_args)
Example #9
0
    def __init__(self,
                 units,
                 pooling_method='sum',
                 normalize_by_weights=False,
                 activation=None,
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(GCN, self).__init__(**kwargs)
        self.normalize_by_weights = normalize_by_weights
        self.pooling_method = pooling_method
        self.units = units
        if activation is None and 'leaky_relu' in kgcnn_custom_act:
            activation = {"class_name": "leaky_relu", "config": {"alpha": 0.2}}
        elif activation is None:
            activation = "relu"

        kernel_args = {"kernel_regularizer": kernel_regularizer, "activity_regularizer": activity_regularizer,
                       "bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
                       "bias_constraint": bias_constraint, "kernel_initializer": kernel_initializer,
                       "bias_initializer": bias_initializer, "use_bias": use_bias}
        gather_args = self._all_kgcnn_info
        pool_args = {"pooling_method": pooling_method, "normalize_by_weights": normalize_by_weights}
        pool_args.update(self._all_kgcnn_info)

        # Layers
        self.lay_gather = GatherNodesOutgoing(**gather_args)
        self.lay_dense = Dense(units=self.units, activation='linear',
                               input_tensor_type=self.input_tensor_type, ragged_validate=self.ragged_validate,
                               **kernel_args)
        self.lay_pool = PoolingWeightedLocalEdges(**pool_args)
        self.lay_act = Activation(activation, ragged_validate=self.ragged_validate,
                                  input_tensor_type=self.input_tensor_type)
Example #10
0
    def __init__(self, units,
                 cfconv_pool='segment_sum',
                 use_bias=True,
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize Layer."""
        super(SchNetCFconv, self).__init__(**kwargs)
        self.cfconv_pool = cfconv_pool
        self.units = units
        self.use_bias = use_bias

        if activation is None and 'shifted_softplus' in kgcnn_custom_act:
            activation = 'shifted_softplus'
        elif activation is None:
            activation = "selu"

        kernel_args = {"kernel_regularizer": kernel_regularizer, "activity_regularizer": activity_regularizer,
                       "bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
                       "bias_constraint": bias_constraint, "kernel_initializer": kernel_initializer,
                       "bias_initializer": bias_initializer}
        pooling_args = {"pooling_method": cfconv_pool}
        pooling_args.update(self._all_kgcnn_info)
        # Layer
        self.lay_dense1 = Dense(units=self.units, activation=activation, use_bias=self.use_bias,
                                input_tensor_type=self.input_tensor_type, ragged_validate=self.ragged_validate,
                                **kernel_args)
        self.lay_dense2 = Dense(units=self.units, activation='linear', use_bias=self.use_bias,
                                input_tensor_type=self.input_tensor_type, ragged_validate=self.ragged_validate,
                                **kernel_args)
        self.lay_sum = PoolingLocalEdges(**pooling_args)
        self.gather_n = GatherNodesOutgoing(**self._all_kgcnn_info)
        self.lay_mult = Multiply(input_tensor_type=self.input_tensor_type, ragged_validate=self.ragged_validate)
Example #11
0
    def __init__(self, units,
                 use_bias=True,
                 activation='kgcnn>swish',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(ResidualLayer, self).__init__(**kwargs)

        dense_args = {"units": units, "activation": activation, "use_bias": use_bias,
                      "kernel_regularizer": kernel_regularizer, "activity_regularizer": activity_regularizer,
                      "bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
                      "bias_constraint": bias_constraint, "kernel_initializer": kernel_initializer,
                      "bias_initializer": bias_initializer}

        self.dense_1 = Dense(**dense_args, **self._kgcnn_info)
        self.dense_2 = Dense(**dense_args, **self._kgcnn_info)
        self.add_end = Add(**self._kgcnn_info)
Example #12
0
class MEGnetBlock(GraphBaseLayer):
    """Megnet Block.

    Args:
        node_embed (list, optional): List of node embedding dimension. Defaults to [16,16,16].
        edge_embed (list, optional): List of edge embedding dimension. Defaults to [16,16,16].
        env_embed (list, optional): List of environment embedding dimension. Defaults to [16,16,16].
        pooling_method (str): Pooling method information for layer. Default is 'mean'.
        use_bias (bool, optional): Use bias. Defaults to True.
        activation (str): Activation function. Default is 'softplus2' with fall-back 'selu'.
        kernel_regularizer: Kernel regularization. Default is None.
        bias_regularizer: Bias regularization. Default is None.
        activity_regularizer: Activity regularization. Default is None.
        kernel_constraint: Kernel constrains. Default is None.
        bias_constraint: Bias constrains. Default is None.
        kernel_initializer: Initializer for kernels. Default is 'glorot_uniform'.
        bias_initializer: Initializer for bias. Default is 'zeros'.
    """
    def __init__(self,
                 node_embed=None,
                 edge_embed=None,
                 env_embed=None,
                 pooling_method="mean",
                 use_bias=True,
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(MEGnetBlock, self).__init__(**kwargs)
        self.pooling_method = pooling_method

        if node_embed is None:
            node_embed = [16, 16, 16]
        if env_embed is None:
            env_embed = [16, 16, 16]
        if edge_embed is None:
            edge_embed = [16, 16, 16]
        self.node_embed = node_embed
        self.edge_embed = edge_embed
        self.env_embed = env_embed
        self.use_bias = use_bias
        if activation is None and 'softplus2' in kgcnn_custom_act:
            activation = 'softplus2'
        elif activation is None:
            activation = "selu"

        kernel_args = {
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer,
            "use_bias": use_bias
        }
        mlp_args = {
            "input_tensor_type": self.input_tensor_type,
            "ragged_validate": self.ragged_validate
        }
        mlp_args.update(kernel_args)
        pool_args = {"pooling_method": self.pooling_method}
        pool_args.update(self._all_kgcnn_info)
        gather_args = self._all_kgcnn_info

        # Node
        self.lay_phi_n = Dense(units=self.node_embed[0],
                               activation=activation,
                               **mlp_args)
        self.lay_phi_n_1 = Dense(units=self.node_embed[1],
                                 activation=activation,
                                 **mlp_args)
        self.lay_phi_n_2 = Dense(units=self.node_embed[2],
                                 activation='linear',
                                 **mlp_args)
        self.lay_esum = PoolingLocalEdges(**pool_args)
        self.lay_gather_un = GatherState(**gather_args)
        self.lay_conc_nu = Concatenate(
            axis=-1, input_tensor_type=self.input_tensor_type)
        # Edge
        self.lay_phi_e = Dense(units=self.edge_embed[0],
                               activation=activation,
                               **mlp_args)
        self.lay_phi_e_1 = Dense(units=self.edge_embed[1],
                                 activation=activation,
                                 **mlp_args)
        self.lay_phi_e_2 = Dense(units=self.edge_embed[2],
                                 activation='linear',
                                 **mlp_args)
        self.lay_gather_n = GatherNodes(**gather_args)
        self.lay_gather_ue = GatherState(**gather_args)
        self.lay_conc_enu = Concatenate(
            axis=-1, input_tensor_type=self.input_tensor_type)
        # Environment
        self.lay_usum_e = PoolingGlobalEdges(**pool_args)
        self.lay_usum_n = PoolingNodes(**pool_args)
        self.lay_conc_u = Concatenate(axis=-1, input_tensor_type="tensor")
        self.lay_phi_u = ks.layers.Dense(units=self.env_embed[0],
                                         activation=activation,
                                         **kernel_args)
        self.lay_phi_u_1 = ks.layers.Dense(units=self.env_embed[1],
                                           activation=activation,
                                           **kernel_args)
        self.lay_phi_u_2 = ks.layers.Dense(units=self.env_embed[2],
                                           activation='linear',
                                           **kernel_args)

    def build(self, input_shape):
        """Build layer."""
        super(MEGnetBlock, self).build(input_shape)

    def call(self, inputs, **kwargs):
        """Forward pass.

        Args:
            inputs: [nodes, edges, edge_index, state]

            - nodes (tf.ragged): Node embeddings of shape (batch, [N], F)
            - edges (tf.ragged): Edge or message embeddings of shape (batch, [M], F)
            - edge_index (tf.ragged): Edge indices of shape (batch, [M], 2)
            - state (tf.tensor): State information for the graph, a single tensor of shape (batch, F)

        Returns:
            node_update: Updated node embeddings of shape (batch, [N], F)
        """
        # Calculate edge Update
        node_input, edge_input, edge_index_input, env_input = inputs
        e_n = self.lay_gather_n([node_input, edge_index_input])
        e_u = self.lay_gather_ue([env_input, edge_input])
        ec = self.lay_conc_enu([e_n, edge_input, e_u])
        ep = self.lay_phi_e(ec)  # Learning of Update Functions
        ep = self.lay_phi_e_1(ep)  # Learning of Update Functions
        ep = self.lay_phi_e_2(ep)  # Learning of Update Functions
        # Calculate Node update
        vb = self.lay_esum([node_input, ep, edge_index_input
                            ])  # Summing for each node connections
        v_u = self.lay_gather_un([env_input, node_input])
        vc = self.lay_conc_nu(
            [vb, node_input,
             v_u])  # Concatenate node features with new edge updates
        vp = self.lay_phi_n(vc)  # Learning of Update Functions
        vp = self.lay_phi_n_1(vp)  # Learning of Update Functions
        vp = self.lay_phi_n_2(vp)  # Learning of Update Functions
        # Calculate environment update
        es = self.lay_usum_e(ep)
        vs = self.lay_usum_n(vp)
        ub = self.lay_conc_u([es, vs, env_input])
        up = self.lay_phi_u(ub)
        up = self.lay_phi_u_1(up)
        up = self.lay_phi_u_2(up)  # Learning of Update Functions
        return vp, ep, up

    def get_config(self):
        config = super(MEGnetBlock, self).get_config()
        config.update({
            "pooling_method": self.pooling_method,
            "node_embed": self.node_embed,
            "use_bias": self.use_bias,
            "edge_embed": self.edge_embed,
            "env_embed": self.env_embed
        })
        config_dense = self.lay_phi_n.get_config()
        for x in [
                "kernel_regularizer", "activity_regularizer",
                "bias_regularizer", "kernel_constraint", "bias_constraint",
                "kernel_initializer", "bias_initializer", "activation"
        ]:
            config.update({x: config_dense[x]})
        return config
Example #13
0
    def __init__(self,
                 units=128,
                 cfconv_pool='sum',
                 use_bias=True,
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize Layer."""
        super(SchNetInteraction, self).__init__(**kwargs)

        self.cfconv_pool = cfconv_pool
        self.use_bias = use_bias
        self.units = units
        if activation is None and 'shifted_softplus' in kgcnn_custom_act:
            activation = 'shifted_softplus'
        elif activation is None:
            activation = "selu"

        kernel_args = {
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer
        }
        conv_args = {
            "units": self.units,
            "use_bias": use_bias,
            "activation": activation,
            "cfconv_pool": cfconv_pool
        }
        conv_args.update(kernel_args)
        conv_args.update(self._all_kgcnn_info)
        # Layers
        self.lay_cfconv = SchNetCFconv(**conv_args)
        self.lay_dense1 = Dense(units=self.units,
                                activation='linear',
                                use_bias=False,
                                input_tensor_type=self.input_tensor_type,
                                ragged_validate=self.ragged_validate,
                                **kernel_args)
        self.lay_dense2 = Dense(units=self.units,
                                activation=activation,
                                use_bias=self.use_bias,
                                input_tensor_type=self.input_tensor_type,
                                ragged_validate=self.ragged_validate,
                                **kernel_args)
        self.lay_dense3 = Dense(units=self.units,
                                activation='linear',
                                use_bias=self.use_bias,
                                input_tensor_type=self.input_tensor_type,
                                ragged_validate=self.ragged_validate,
                                **kernel_args)
        self.lay_add = Add(input_tensor_type=self.input_tensor_type,
                           ragged_validate=self.ragged_validate)
Example #14
0
    def __init__(self,
                 emb_size,
                 int_emb_size,
                 basis_emb_size,
                 num_before_skip,
                 num_after_skip,
                 use_bias=True,
                 pooling_method="sum",
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='orthogonal',
                 bias_initializer='zeros',
                 **kwargs):
        super(DimNetInteractionPPBlock, self).__init__(**kwargs)
        self.use_bias = use_bias
        self.pooling_method = pooling_method
        self.emb_size = emb_size
        self.int_emb_size = int_emb_size
        self.basis_emb_size = basis_emb_size
        self.num_before_skip = num_before_skip
        self.num_after_skip = num_after_skip
        if activation is None and 'swish' in kgcnn_custom_act:
            activation = 'swish'
        elif activation is None:
            activation = "selu"

        kernel_args = {
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer
        }
        pool_args = {"pooling_method": pooling_method}
        pool_args.update(self._all_kgcnn_info)
        gather_args = self._all_kgcnn_info

        # Transformations of Bessel and spherical basis representations
        self.dense_rbf1 = Dense(basis_emb_size, use_bias=False, **kernel_args)
        self.dense_rbf2 = Dense(emb_size, use_bias=False, **kernel_args)
        self.dense_sbf1 = Dense(basis_emb_size, use_bias=False, **kernel_args)
        self.dense_sbf2 = Dense(int_emb_size, use_bias=False, **kernel_args)

        # Dense transformations of input messages
        self.dense_ji = Dense(emb_size,
                              activation=activation,
                              use_bias=True,
                              **kernel_args)
        self.dense_kj = Dense(emb_size,
                              activation=activation,
                              use_bias=True,
                              **kernel_args)

        # Embedding projections for interaction triplets
        self.down_projection = Dense(int_emb_size,
                                     activation=activation,
                                     use_bias=False,
                                     **kernel_args)
        self.up_projection = Dense(emb_size,
                                   activation=activation,
                                   use_bias=False,
                                   **kernel_args)

        # Residual layers before skip connection
        self.layers_before_skip = []
        for i in range(num_before_skip):
            self.layers_before_skip.append(
                ResidualLayer(emb_size,
                              activation=activation,
                              use_bias=True,
                              **kernel_args))
        self.final_before_skip = Dense(emb_size,
                                       activation=activation,
                                       use_bias=True,
                                       **kernel_args)

        # Residual layers after skip connection
        self.layers_after_skip = []
        for i in range(num_after_skip):
            self.layers_after_skip.append(
                ResidualLayer(emb_size,
                              activation=activation,
                              use_bias=True,
                              **kernel_args))

        self.lay_add1 = Add()
        self.lay_add2 = Add()
        self.lay_mult1 = Multiply()
        self.lay_mult2 = Multiply()

        self.lay_gather = GatherNodesOutgoing(**gather_args)  # Are edges here
        self.lay_pool = PoolingLocalEdges(**pool_args)
Example #15
0
class DimNetInteractionPPBlock(GraphBaseLayer):
    """DimNetInteractionPPBlock as defined by DimNet.

    Args:
        emb_size: Embedding size used for the messages
        int_emb_size (int): Embedding size used for interaction triplets
        basis_emb_size: Embedding size used inside the basis transformation
        num_before_skip: Number of residual layers in interaction block before skip connection
        num_after_skip: Number of residual layers in interaction block before skip connection
        use_bias (bool, optional): Use bias. Defaults to True.
        pooling_method (str): Pooling method information for layer. Default is 'sum'.
        activation (str): Activation function. Default is "swish".
        kernel_regularizer: Kernel regularization. Default is None.
        bias_regularizer: Bias regularization. Default is None.
        activity_regularizer: Activity regularization. Default is None.
        kernel_constraint: Kernel constrains. Default is None.
        bias_constraint: Bias constrains. Default is None.
        kernel_initializer: Initializer for kernels. Default is 'orthogonal'.
        bias_initializer: Initializer for bias. Default is 'zeros'.
        **kwargs:
    """
    def __init__(self,
                 emb_size,
                 int_emb_size,
                 basis_emb_size,
                 num_before_skip,
                 num_after_skip,
                 use_bias=True,
                 pooling_method="sum",
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='orthogonal',
                 bias_initializer='zeros',
                 **kwargs):
        super(DimNetInteractionPPBlock, self).__init__(**kwargs)
        self.use_bias = use_bias
        self.pooling_method = pooling_method
        self.emb_size = emb_size
        self.int_emb_size = int_emb_size
        self.basis_emb_size = basis_emb_size
        self.num_before_skip = num_before_skip
        self.num_after_skip = num_after_skip
        if activation is None and 'swish' in kgcnn_custom_act:
            activation = 'swish'
        elif activation is None:
            activation = "selu"

        kernel_args = {
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer
        }
        pool_args = {"pooling_method": pooling_method}
        pool_args.update(self._all_kgcnn_info)
        gather_args = self._all_kgcnn_info

        # Transformations of Bessel and spherical basis representations
        self.dense_rbf1 = Dense(basis_emb_size, use_bias=False, **kernel_args)
        self.dense_rbf2 = Dense(emb_size, use_bias=False, **kernel_args)
        self.dense_sbf1 = Dense(basis_emb_size, use_bias=False, **kernel_args)
        self.dense_sbf2 = Dense(int_emb_size, use_bias=False, **kernel_args)

        # Dense transformations of input messages
        self.dense_ji = Dense(emb_size,
                              activation=activation,
                              use_bias=True,
                              **kernel_args)
        self.dense_kj = Dense(emb_size,
                              activation=activation,
                              use_bias=True,
                              **kernel_args)

        # Embedding projections for interaction triplets
        self.down_projection = Dense(int_emb_size,
                                     activation=activation,
                                     use_bias=False,
                                     **kernel_args)
        self.up_projection = Dense(emb_size,
                                   activation=activation,
                                   use_bias=False,
                                   **kernel_args)

        # Residual layers before skip connection
        self.layers_before_skip = []
        for i in range(num_before_skip):
            self.layers_before_skip.append(
                ResidualLayer(emb_size,
                              activation=activation,
                              use_bias=True,
                              **kernel_args))
        self.final_before_skip = Dense(emb_size,
                                       activation=activation,
                                       use_bias=True,
                                       **kernel_args)

        # Residual layers after skip connection
        self.layers_after_skip = []
        for i in range(num_after_skip):
            self.layers_after_skip.append(
                ResidualLayer(emb_size,
                              activation=activation,
                              use_bias=True,
                              **kernel_args))

        self.lay_add1 = Add()
        self.lay_add2 = Add()
        self.lay_mult1 = Multiply()
        self.lay_mult2 = Multiply()

        self.lay_gather = GatherNodesOutgoing(**gather_args)  # Are edges here
        self.lay_pool = PoolingLocalEdges(**pool_args)

    def call(self, inputs, **kwargs):
        """Forward pass.

        Args:
            inputs: [edges, rbf, sbf, angle_index]

            - edges (tf.ragged): Edge embeddings of shape (batch, [M], F)
            - rbf (tf.ragged): Radial basis features of shape (batch, [M], F)
            - sbf (tf.ragged): Spherical basis features of shape (batch, [K], F)
            - edge_index (tf.ragged): Angle indices between two edges of shape (batch, [K], 2)

        Returns:
            tf.ragged: Updated edge embeddings.
        """
        x, rbf, sbf, id_expand = inputs

        # Initial transformation
        x_ji = self.dense_ji(x)
        x_kj = self.dense_kj(x)

        # Transform via Bessel basis
        rbf = self.dense_rbf1(rbf)
        rbf = self.dense_rbf2(rbf)
        x_kj = self.lay_mult1([x_kj, rbf])

        # Down-project embeddings and generate interaction triplet embeddings
        x_kj = self.down_projection(x_kj)
        x_kj = self.lay_gather([x_kj, id_expand])

        # Transform via 2D spherical basis
        sbf = self.dense_sbf1(sbf)
        sbf = self.dense_sbf2(sbf)
        x_kj = self.lay_mult1([x_kj, sbf])

        # Aggregate interactions and up-project embeddings
        x_kj = self.lay_pool([rbf, x_kj, id_expand])
        x_kj = self.up_projection(x_kj)

        # Transformations before skip connection
        x2 = self.lay_add1([x_ji, x_kj])
        for layer in self.layers_before_skip:
            x2 = layer(x2)
        x2 = self.final_before_skip(x2)

        # Skip connection
        x = self.lay_add2([x, x2])

        # Transformations after skip connection
        for layer in self.layers_after_skip:
            x = layer(x)

        return x

    def get_config(self):
        config = super(DimNetInteractionPPBlock, self).get_config()
        config.update({
            "use_bias": self.use_bias,
            "pooling_method": self.pooling_method,
            "emb_size": self.emb_size,
            "int_emb_size": self.int_emb_size,
            "basis_emb_size": self.basis_emb_size,
            "num_before_skip": self.num_before_skip,
            "num_after_skip": self.num_after_skip
        })
        conf_dense = self.dense_ji.get_config()
        for x in [
                "kernel_regularizer", "activity_regularizer",
                "bias_regularizer", "kernel_constraint", "bias_constraint",
                "kernel_initializer", "bias_initializer", "activation"
        ]:
            config.update({x: conf_dense[x]})
        return config
Example #16
0
class GCN(GraphBaseLayer):
    r"""Graph convolution according to Kipf et al.
    
    Computes graph conv as $\sigma(A_s*(WX+b))$ where $A_s$ is the precomputed and scaled adjacency matrix.
    The scaled adjacency matrix is defined by $A_s = D^{-0.5} (A + I) D{^-0.5}$ with the degree matrix $D$.
    In place of $A_s$, this layers uses edge features (that are the entries of $A_s$) and edge indices.
    $A_s$ is considered pre-scaled, this is not done by this layer.
    If no scaled edge features are available, you could consider use e.g. "segment_mean", or normalize_by_weights to
    obtain a similar behaviour that is expected by a pre-scaled adjacency matrix input.
    Edge features must be possible to broadcast to node features. Ideally they have shape (...,1).
    
    Args:
        units (int): Output dimension/ units of dense layer.
        pooling_method (str): Pooling method for summing edges. Default is 'segment_sum'.
        normalize_by_weights (bool): Normalize the pooled output by the sum of weights. Default is False.
            In this case the edge features are considered weights of dimension (...,1) and are summed for each node.
        activation (str): Activation. Default is {"class_name": "leaky_relu", "config": {"alpha": 0.2}},
            with fall-back "relu".
        use_bias (bool): Use bias. Default is True.
        kernel_regularizer: Kernel regularization. Default is None.
        bias_regularizer: Bias regularization. Default is None.
        activity_regularizer: Activity regularization. Default is None.
        kernel_constraint: Kernel constrains. Default is None.
        bias_constraint: Bias constrains. Default is None.
        kernel_initializer: Initializer for kernels. Default is 'glorot_uniform'.
        bias_initializer: Initializer for bias. Default is 'zeros'.
    """

    def __init__(self,
                 units,
                 pooling_method='sum',
                 normalize_by_weights=False,
                 activation=None,
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(GCN, self).__init__(**kwargs)
        self.normalize_by_weights = normalize_by_weights
        self.pooling_method = pooling_method
        self.units = units
        if activation is None and 'leaky_relu' in kgcnn_custom_act:
            activation = {"class_name": "leaky_relu", "config": {"alpha": 0.2}}
        elif activation is None:
            activation = "relu"

        kernel_args = {"kernel_regularizer": kernel_regularizer, "activity_regularizer": activity_regularizer,
                       "bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
                       "bias_constraint": bias_constraint, "kernel_initializer": kernel_initializer,
                       "bias_initializer": bias_initializer, "use_bias": use_bias}
        gather_args = self._all_kgcnn_info
        pool_args = {"pooling_method": pooling_method, "normalize_by_weights": normalize_by_weights}
        pool_args.update(self._all_kgcnn_info)

        # Layers
        self.lay_gather = GatherNodesOutgoing(**gather_args)
        self.lay_dense = Dense(units=self.units, activation='linear',
                               input_tensor_type=self.input_tensor_type, ragged_validate=self.ragged_validate,
                               **kernel_args)
        self.lay_pool = PoolingWeightedLocalEdges(**pool_args)
        self.lay_act = Activation(activation, ragged_validate=self.ragged_validate,
                                  input_tensor_type=self.input_tensor_type)

    def build(self, input_shape):
        """Build layer."""
        super(GCN, self).build(input_shape)

    def call(self, inputs, **kwargs):
        """Forward pass.

        Args:
            inputs: [nodes, edges, edge_index]

            - nodes (tf.ragged): Node embeddings of shape (batch, [N], F)
            - edges (tf.ragged): Edge or message embeddings of shape (batch, [M], F)
            - edge_index (tf.ragged): Edge indices of shape (batch, [M], 2)

        Returns:
            embeddings: Node embeddings of shape (batch, [N], F)
        """
        node, edges, edge_index = inputs
        no = self.lay_dense(node)
        no = self.lay_gather([no, edge_index])
        nu = self.lay_pool([node, no, edge_index, edges])  # Summing for each node connection
        out = self.lay_act(nu)
        return out

    def get_config(self):
        """Update config."""
        config = super(GCN, self).get_config()
        config.update({"normalize_by_weights": self.normalize_by_weights,
                       "pooling_method": self.pooling_method, "units": self.units})
        conf_dense = self.lay_dense.get_config()
        for x in ["kernel_regularizer", "activity_regularizer", "bias_regularizer", "kernel_constraint",
                  "bias_constraint", "kernel_initializer", "bias_initializer", "use_bias"]:
            config.update({x: conf_dense[x]})
        conf_act = self.lay_act.get_config()
        config.update({"activation": conf_act["activation"]})
        return config
Example #17
0
class ResidualLayer(GraphBaseLayer):
    """Residual Layer as defined by DimNet.

    Args:
        units: Dimension of the kernel.
        use_bias (bool, optional): Use bias. Defaults to True.
        activation (str): Activation function. Default is "swish".
        kernel_regularizer: Kernel regularization. Default is None.
        bias_regularizer: Bias regularization. Default is None.
        activity_regularizer: Activity regularization. Default is None.
        kernel_constraint: Kernel constrains. Default is None.
        bias_constraint: Bias constrains. Default is None.
        kernel_initializer: Initializer for kernels. Default is 'glorot_uniform'.
        bias_initializer: Initializer for bias. Default is 'zeros'.
        **kwargs:
    """
    def __init__(self,
                 units,
                 use_bias=True,
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(ResidualLayer, self).__init__(**kwargs)
        if activation is None and 'swish' in kgcnn_custom_act:
            activation = 'swish'
        elif activation is None:
            activation = "selu"
        dense_args = {
            "units": units,
            "activation": activation,
            "use_bias": use_bias,
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer
        }

        self.dense_1 = Dense(**dense_args)
        self.dense_2 = Dense(**dense_args)
        self.add_end = Add()

    def build(self, input_shape):
        """Build layer."""
        super(ResidualLayer, self).build(input_shape)

    def call(self, inputs, **kwargs):
        """Forward pass

        Args:
            inputs (tf.ragged): Node or edge embedding of shape (batch, [N], F)

        Returns:
            embeddings: Node or edge embedding of shape (batch, [N], F)
        """
        x = self.dense_1(inputs)
        x = self.dense_2(x)
        x = self.add_end([inputs, x])
        return x

    def get_config(self):
        config = super(ResidualLayer, self).get_config()
        conf_dense = self.dense_1.get_config()
        for x in [
                "kernel_regularizer", "activity_regularizer",
                "bias_regularizer", "kernel_constraint", "bias_constraint",
                "kernel_initializer", "bias_initializer", "activation",
                "use_bias", "units"
        ]:
            config.update({x: conf_dense[x]})
        return config
Example #18
0
def make_unet(
        # Input
        input_node_shape,
        input_edge_shape,
        input_embedd: dict = None,
        # Output
        output_embedd: dict = None,
        output_mlp: dict = None,
        # Model specific
        hidden_dim=32,
        depth=4,
        k=0.3,
        score_initializer='ones',
        use_bias=True,
        activation='relu',
        is_sorted=False,
        has_unconnected=True,
        use_reconnect=True
):
    """
    Make Graph U Net.

    Args:
        input_node_shape (list): Shape of node features. If shape is (None,) embedding layer is used.
        input_edge_shape (list): Shape of edge features. If shape is (None,) embedding layer is used.
        input_embedd (list): Dictionary of embedding parameters used if input shape is None. Default is
            {'input_node_vocab': 95, 'input_edge_vocab': 5, 'input_state_vocab': 100,
            'input_node_embedd': 64, 'input_edge_embedd': 64, 'input_state_embedd': 64,
            'input_type': 'ragged'}
        output_mlp (dict, optional): Parameter for MLP output classification/ regression. Defaults to
            {"use_bias": [True, False], "output_dim": [25, 1],
            "activation": ['relu', 'sigmoid']}
        output_embedd (str): Dictionary of embedding parameters of the graph network. Default is
            {"output_mode": 'graph', "output_type": 'padded'}
        hidden_dim (int): Hidden node feature dimension 32,
        depth (int): Depth of pooling steps. Default is 4.
        k (float): Pooling ratio. Default is 0.3.
        score_initializer (str): How to initialize score kernel. Default is 'ones'.
        use_bias (bool): Use bias. Default is True.
        activation (str): Activation function used. Default is 'relu'.
        is_sorted (bool, optional): Edge edge_indices are sorted. Defaults to False.
        has_unconnected (bool, optional): Has unconnected nodes. Defaults to True.
        use_reconnect (bool): Reconnect nodes after pooling. I.e. adj_matrix=adj_matrix^2. Default is True.

    Returns:
        model (ks.models.Model): Unet model.
    """
    # Default values update
    model_default = {'input_embedd': {'input_node_vocab': 95, 'input_edge_vocab': 5, 'input_state_vocab': 100,
                                      'input_node_embedd': 64, 'input_edge_embedd': 64, 'input_state_embedd': 64,
                                      'input_tensor_type': 'ragged'},
                     'output_embedd': {"output_mode": 'graph', "output_type": 'padded'},
                     'output_mlp': {"use_bias": [True, False], "units": [25, 1], "activation": ['relu', 'sigmoid']}
                     }

    # Update model args
    input_embedd = update_model_args(model_default['input_embedd'], input_embedd)
    output_embedd = update_model_args(model_default['output_embedd'], output_embedd)
    output_mlp = update_model_args(model_default['output_mlp'], output_mlp)
    pooling_args = {"pooling_method": 'segment_mean', "is_sorted": is_sorted, "has_unconnected": has_unconnected}

    # Make input embedding, if no feature dimension
    node_input, n, edge_input, ed, edge_index_input, _, _ = generate_standard_graph_input(input_node_shape,
                                                                                          input_edge_shape, None,
                                                                                          **input_embedd)
    tens_type = "values_partition"
    node_indexing = "batch"
    n = ChangeTensorType(input_tensor_type="ragged", output_tensor_type=tens_type)(n)
    ed = ChangeTensorType(input_tensor_type="ragged", output_tensor_type=tens_type)(ed)
    edi = ChangeTensorType(input_tensor_type="ragged", output_tensor_type=tens_type)(edge_index_input)
    edi = ChangeIndexing(input_tensor_type=tens_type, to_indexing=node_indexing)([n, edi])  # disjoint

    output_mlp.update({"input_tensor_type": tens_type})
    gather_args = {"input_tensor_type": tens_type, "node_indexing": node_indexing}
    pooling_args.update({"input_tensor_type": tens_type, "node_indexing": node_indexing})

    # Graph lists
    n = Dense(hidden_dim, use_bias=use_bias, activation='linear', input_tensor_type=tens_type)(n)
    in_graph = [n, ed, edi]
    graph_list = [in_graph]
    map_list = []

    # U Down
    i_graph = in_graph
    for i in range(0, depth):

        n, ed, edi = i_graph
        # GCN layer
        eu = GatherNodesOutgoing(**gather_args)([n, edi])
        eu = Dense(hidden_dim, use_bias=use_bias, activation='linear', input_tensor_type=tens_type)(eu)
        nu = PoolingLocalEdges(**pooling_args)([n, eu, edi])  # Summing for each node connection
        n = Activation(activation=activation, input_tensor_type=tens_type)(nu)

        if use_reconnect:
            ed, edi = AdjacencyPower(n=2, node_indexing=node_indexing, input_tensor_type=tens_type)([n, ed, edi])

        # Pooling
        i_graph, i_map = PoolingTopK(k=k, kernel_initializer=score_initializer,
                                     node_indexing=node_indexing, input_tensor_type=tens_type)([n, ed, edi])

        graph_list.append(i_graph)
        map_list.append(i_map)

    # U Up
    ui_graph = i_graph
    for i in range(depth, 0, -1):
        o_graph = graph_list[i - 1]
        i_map = map_list[i - 1]
        ui_graph = UnPoolingTopK(node_indexing=node_indexing, input_tensor_type=tens_type)(o_graph + i_map + ui_graph)

        n, ed, edi = ui_graph
        # skip connection
        n = Add(input_tensor_type=tens_type)([n, o_graph[0]])
        # GCN
        eu = GatherNodesOutgoing(**gather_args)([n, edi])
        eu = Dense(hidden_dim, use_bias=use_bias, activation='linear', input_tensor_type=tens_type)(eu)
        nu = PoolingLocalEdges(**pooling_args)([n, eu, edi])  # Summing for each node connection
        n = Activation(activation=activation, input_tensor_type=tens_type)(nu)

        ui_graph = [n, ed, edi]

    # Otuput
    n = ui_graph[0]
    if output_embedd["output_mode"] == 'graph':
        out = PoolingNodes(**pooling_args)(n)

        output_mlp.update({"input_tensor_type": "tensor"})
        out = MLP(**output_mlp)(out)
        main_output = ks.layers.Flatten()(out)  # will be dense
    else:  # node embedding
        out = MLP(**output_mlp)(n)
        main_output = ChangeTensorType(input_tensor_type=tens_type, output_tensor_type="tensor")(out)

    model = ks.models.Model(inputs=[node_input, edge_input, edge_index_input], outputs=main_output)

    return model
Example #19
0
class AttentiveHeadFP(GraphBaseLayer):
    r"""Computes the attention head for Attentive FP model.
    The attention coefficients are computed by $a_{ij} = \sigma_1( W_1 [h_i || h_j] )$.
    The initial representation $h_i$ and $h_j$ must be calculated beforehand.
    The attention is obtained by $\alpha_ij = softmax_j (a_{ij})$.
    And finally pooled through for context $C_i = \sigma_2(\sum_j \alpha_{ij} W_2 h_j)$.

    If graphs indices were in 'batch' mode, the layer's 'node_indexing' must be set to 'batch'.

    Args:
        units (int): Units for the linear trafo of node features before attention.
        use_edge_features (bool): Append edge features to attention computation. Default is False.
        activation (str): Activation. Default is {"class_name": "kgcnn>leaky_relu", "config": {"alpha": 0.2}}.
        activation_context (str): Activation function for context. Default is "elu".
        use_bias (bool): Use bias. Default is True.
        kernel_regularizer: Kernel regularization. Default is None.
        bias_regularizer: Bias regularization. Default is None.
        activity_regularizer: Activity regularization. Default is None.
        kernel_constraint: Kernel constrains. Default is None.
        bias_constraint: Bias constrains. Default is None.
        kernel_initializer: Initializer for kernels. Default is 'glorot_uniform'.
        bias_initializer: Initializer for bias. Default is 'zeros'.
    """

    def __init__(self,
                 units,
                 use_edge_features=False,
                 activation='kgcnn>leaky_relu',
                 activation_context="elu",
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(AttentiveHeadFP, self).__init__(**kwargs)
        # graph args
        self.use_edge_features = use_edge_features

        # dense args
        self.units = int(units)

        kernel_args = {"use_bias": use_bias, "kernel_regularizer": kernel_regularizer,
                       "activity_regularizer": activity_regularizer, "bias_regularizer": bias_regularizer,
                       "kernel_constraint": kernel_constraint, "bias_constraint": bias_constraint,
                       "kernel_initializer": kernel_initializer, "bias_initializer": bias_initializer}

        self.lay_linear_trafo = Dense(units, activation="linear", **kernel_args, **self._kgcnn_info)
        self.lay_alpha = Dense(1, activation=activation, **kernel_args, **self._kgcnn_info)
        self.lay_gather_in = GatherNodesIngoing(**self._kgcnn_info)
        self.lay_gather_out = GatherNodesOutgoing(**self._kgcnn_info)
        self.lay_concat = Concatenate(axis=-1, **self._kgcnn_info)
        self.lay_pool_attention = PoolingLocalEdgesAttention(**self._kgcnn_info)
        self.lay_final_activ = Activation(activation=activation_context, **self._kgcnn_info)
        if use_edge_features:
            self.lay_fc1 = Dense(units, activation=activation, **kernel_args, **self._kgcnn_info)
            self.lay_fc2 = Dense(units, activation=activation, **kernel_args, **self._kgcnn_info)
            self.lay_concat_edge = Concatenate(axis=-1, **self._kgcnn_info)

    def build(self, input_shape):
        """Build layer."""
        super(AttentiveHeadFP, self).build(input_shape)

    def call(self, inputs, **kwargs):
        """Forward pass.

        Args:
            inputs (list): of [node, edges, edge_indices]

            - nodes: Node features of shape (batch, [N], F)
            - edges: Edge or message features of shape (batch, [M], F)
            - edge_index: Edge indices of shape (batch, [M], 2)

        Returns:
            features: Hidden tensor of pooled edge attentions for each node.
        """
        node, edge, edge_index = inputs

        if self.use_edge_features:
            n_in = self.lay_gather_in([node, edge_index])
            n_out = self.lay_gather_out([node, edge_index])
            n_in = self.lay_fc1(n_in)
            n_out = self.lay_concat_edge([n_out, edge])
            n_out = self.self.lay_fc2(n_out)
        else:
            n_in = self.lay_gather_in([node, edge_index])
            n_out = self.lay_gather_out([node, edge_index])

        wn_out = self.lay_linear_trafo(n_out)
        e_ij = self.lay_concat([n_in, n_out])
        a_ij = self.lay_alpha(e_ij)  # Should be dimension (batch,None,1)
        n_i = self.lay_pool_attention([node, wn_out, a_ij, edge_index])
        out = self.lay_final_activ(n_i)
        return out

    def get_config(self):
        """Update layer config."""
        config = super(AttentiveHeadFP, self).get_config()
        config.update({"use_edge_features": self.use_edge_features,
                       "units": self.units})
        conf_sub = self.lay_alpha.get_config()
        for x in ["kernel_regularizer", "activity_regularizer", "bias_regularizer", "kernel_constraint",
                  "bias_constraint", "kernel_initializer", "bias_initializer", "activation", "use_bias"]:
            config.update({x: conf_sub[x]})
        conf_context = self.lay_final_activ.get_config()
        config.update({"activation_context": conf_context["activation"]})
        return config
Example #20
0
    def __init__(self,
                 units,
                 activation=None,
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 use_edge_features=False,
                 node_indexing="sample",
                 is_sorted=False,
                 has_unconnected=True,
                 partition_type="row_length",
                 input_tensor_type="ragged",
                 ragged_validate=False,
                 **kwargs):
        """Initialize layer."""
        super(AttentionHeadGAT, self).__init__(**kwargs)
        # graph args
        self.is_sorted = is_sorted
        self.use_edge_features = use_edge_features
        self.has_unconnected = has_unconnected
        self.node_indexing = node_indexing
        self.partition_type = partition_type
        self.input_tensor_type = input_tensor_type
        self.ragged_validate = ragged_validate
        self._supports_ragged_inputs = True
        self._tensor_input_type_implemented = [
            "ragged", "values_partition", "disjoint", "tensor", "RaggedTensor"
        ]

        self._test_tensor_input = kgcnn_ops_static_test_tensor_input_type(
            self.input_tensor_type, self._tensor_input_type_implemented,
            self.node_indexing)

        # dense args
        self.units = int(units)
        if activation is None and "leaky_relu" in kgcnn_custom_act:
            activation = {"class_name": "leaky_relu", "config": {"alpha": 0.2}}
        elif activation is None:
            activation = "relu"
        self.use_bias = use_bias
        self.ath_activation = tf.keras.activations.get(activation)
        self.ath_kernel_initializer = tf.keras.initializers.get(
            kernel_initializer)
        self.ath_bias_initializer = tf.keras.initializers.get(bias_initializer)
        self.ath_kernel_regularizer = tf.keras.regularizers.get(
            kernel_regularizer)
        self.ath_bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
        self.ath_activity_regularizer = tf.keras.regularizers.get(
            activity_regularizer)
        self.ath_kernel_constraint = tf.keras.constraints.get(
            kernel_constraint)
        self.ath_bias_constraint = tf.keras.constraints.get(bias_constraint)

        kernel_args = {
            "use_bias": use_bias,
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer
        }
        dens_args = {
            "ragged_validate": self.ragged_validate,
            "input_tensor_type": self.input_tensor_type
        }
        dens_args.update(kernel_args)
        gather_args = {
            "input_tensor_type": self.input_tensor_type,
            "node_indexing": self.node_indexing
        }
        pooling_args = {
            "node_indexing": node_indexing,
            "partition_type": partition_type,
            "has_unconnected": has_unconnected,
            "is_sorted": is_sorted,
            "ragged_validate": self.ragged_validate,
            "input_tensor_type": self.input_tensor_type
        }

        self.lay_linear_trafo = Dense(units, activation="linear", **dens_args)
        self.lay_alpha = Dense(1, activation=activation, **dens_args)
        self.lay_gather_in = GatherNodesIngoing(**gather_args)
        self.lay_gather_out = GatherNodesOutgoing(**gather_args)
        self.lay_concat = Concatenate(axis=-1,
                                      input_tensor_type=self.input_tensor_type)
        self.lay_pool_attention = PoolingLocalEdgesAttention(**pooling_args)
        self.lay_final_activ = Activation(
            activation=activation, input_tensor_type=self.input_tensor_type)
Example #21
0
def make_gat(  # Input
        input_node_shape,
        input_edge_shape,
        input_embedd: dict = None,
        # Output
        output_embedd: dict = None,
        output_mlp: dict = None,
        # Model specific parameter
        depth=3,
        attention_heads_num=5,
        attention_heads_concat=False,
        attention_args: dict = None):
    """
    Generate Interaction network.

    Args:
        input_node_shape (list): Shape of node features. If shape is (None,) embedding layer is used.
        input_edge_shape (list): Shape of edge features. If shape is (None,) embedding layer is used.
        input_embedd (dict): Dictionary of embedding parameters used if input shape is None. Default is
            {'input_node_vocab': 95, 'input_edge_vocab': 5, 'input_state_vocab': 100,
            'input_node_embedd': 64, 'input_edge_embedd': 64, 'input_state_embedd': 64,
            'input_type': 'ragged'}.
        output_embedd (dict): Dictionary of embedding parameters of the graph network. Default is
            {"output_mode": 'graph', "output_type": 'padded'}.
        output_mlp (dict): Dictionary of arguments for final MLP regression or classifcation layer. Default is
            {"use_bias": [True, True, False], "units": [25, 10, 1],
            "activation": ['relu', 'relu', 'sigmoid']}.
        depth (int): Number of convolution layers. Default is 3.
        attention_heads_num (int): Number of attention heads. Default is 5.
        attention_heads_concat (bool): Concat attention. Default is False.
        attention_args (dict): Layer arguments for attention layer. Default is
            {"units": 32, 'is_sorted': False, 'has_unconnected': True}
    Returns:
        model (tf.keras.model): Interaction model.
    """
    # default values
    model_default = {
        'input_embedd': {
            'input_node_vocab': 95,
            'input_edge_vocab': 5,
            'input_state_vocab': 100,
            'input_node_embedd': 64,
            'input_edge_embedd': 64,
            'input_state_embedd': 64,
            'input_tensor_type': 'ragged'
        },
        'output_embedd': {
            "output_mode": 'graph',
            "output_tensor_type": 'padded'
        },
        'output_mlp': {
            "use_bias": [True, True, False],
            "units": [25, 10, 1],
            "activation": ['relu', 'relu', 'sigmoid']
        },
        'attention_args': {
            "units": 32,
            'is_sorted': False,
            'has_unconnected': True
        }
    }

    # Update default values
    input_embedd = update_model_args(model_default['input_embedd'],
                                     input_embedd)
    output_embedd = update_model_args(model_default['output_embedd'],
                                      output_embedd)
    output_mlp = update_model_args(model_default['output_mlp'], output_mlp)
    attention_args = update_model_args(model_default['attention_args'],
                                       attention_args)
    pooling_nodes_args = {}

    # Make input embedding, if no feature dimension
    node_input, n, edge_input, ed, edge_index_input, _, _ = generate_standard_graph_input(
        input_node_shape, input_edge_shape, None, **input_embedd)

    edi = edge_index_input

    nk = Dense(units=attention_args["units"], activation="linear")(n)
    for i in range(0, depth):
        heads = [
            AttentionHeadGAT(**attention_args)([nk, ed, edi])
            for _ in range(attention_heads_num)
        ]
        if attention_heads_concat:
            nk = Concatenate(axis=-1)(heads)
        else:
            nk = Average()(heads)

    n = nk
    if output_embedd["output_mode"] == 'graph':
        out = PoolingNodes(**pooling_nodes_args)(n)
        output_mlp.update({"input_tensor_type": "tensor"})
        out = MLP(**output_mlp)(out)
        main_output = ks.layers.Flatten()(out)  # will be dense
    else:  # node embedding
        out = MLP(**output_mlp)(n)
        main_output = ChangeTensorType(input_tensor_type="ragged",
                                       output_tensor_type="tensor")(out)

    model = tf.keras.models.Model(
        inputs=[node_input, edge_input, edge_index_input], outputs=main_output)

    return model
Example #22
0
def make_megnet(
        # Input
        input_node_shape,
        input_edge_shape,
        input_state_shape,
        input_embedd: dict = None,
        # Output
        output_embedd: dict = None,  # Only graph possible for megnet
        output_mlp: dict = None,
        # Model specs
        meg_block_args: dict = None,
        node_ff_args: dict = None,
        edge_ff_args: dict = None,
        state_ff_args: dict = None,
        set2set_args: dict = None,
        nblocks: int = 3,
        has_ff: bool = True,
        dropout: float = None,
        use_set2set: bool = True,
):
    """
    Get Megnet model.

    Args:
        input_node_shape (list): Shape of node features. If shape is (None,) embedding layer is used.
        input_edge_shape (list): Shape of edge features. If shape is (None,) embedding layer is used.
        input_state_shape (list): Shape of state features. If shape is (,) embedding layer is used.
        input_embedd (dict): Dictionary of embedding parameters used if input shape is None. Default is
            {'input_node_vocab': 95, 'input_edge_vocab': 5, 'input_state_vocab': 100,
            'input_node_embedd': 64, 'input_edge_embedd': 64, 'input_state_embedd': 64,
            'input_type': 'ragged'}.
        output_embedd (str): Dictionary of embedding parameters of the graph network. Default is
            {"output_mode": 'graph', "output_type": 'padded'}
        output_mlp (dict): Dictionary of MLP arguments for output regression or classifcation. Default is
            {"use_bias": [True, True, True], "units": [32, 16, 1],
            "activation": ['softplus2', 'softplus2', 'linear']}.
        meg_block_args (dict): Dictionary of MegBlock arguments. Default is
            {'node_embed': [64, 32, 32], 'edge_embed': [64, 32, 32],
            'env_embed': [64, 32, 32], 'activation': 'softplus2', 'is_sorted': False,
            'has_unconnected': True}.
        node_ff_args (dict): Dictionary of Feed-Forward Layer arguments. Default is
            {"units": [64, 32], "activation": "softplus2"}.
        edge_ff_args (dict): Dictionary of  Feed-Forward Layer arguments. Default is
            {"units": [64, 32], "activation": "softplus2"}.
        state_ff_args (dict): Dictionary of Feed-Forward Layer arguments. Default is
            {"units": [64, 32], "activation": "softplus2"}.
        set2set_args (dict): Dictionary of Set2Set Layer Arguments. Default is
            {'channels': 16, 'T': 3, "pooling_method": "sum", "init_qstar": "0"}
        nblocks (int): Number of block. Default is 3.
        has_ff (bool): Use a Feed-Forward layer. Default is True.
        dropout (float): Use dropout. Default is None.
        use_set2set (bool): Use set2set. Default is True.

    Returns:
        model (tf.keras.models.Model): MEGnet model.
    """
    # Default arguments if None
    model_default = {'input_embedd': {'input_node_vocab': 95, 'input_edge_vocab': 5, 'input_state_vocab': 100,
                                      'input_node_embedd': 64, 'input_edge_embedd': 64, 'input_state_embedd': 64,
                                      'input_tensor_type': 'ragged'},
                     'output_embedd': {"output_mode": 'graph', "output_tensor_type": 'padded'},
                     'output_mlp': {"use_bias": [True, True, True], "units": [32, 16, 1],
                                    "activation": ['kgcnn>softplus2', 'kgcnn>softplus2', 'linear']},
                     'meg_block_args': {'node_embed': [64, 32, 32], 'edge_embed': [64, 32, 32],
                                        'env_embed': [64, 32, 32], 'activation': 'kgcnn>softplus2', 'is_sorted': False,
                                        'has_unconnected': True},
                     'set2set_args': {'channels': 16, 'T': 3, "pooling_method": "sum", "init_qstar": "0"},
                     'node_ff_args': {"units": [64, 32], "activation": "kgcnn>softplus2"},
                     'edge_ff_args': {"units": [64, 32], "activation": "kgcnn>softplus2"},
                     'state_ff_args': {"units": [64, 32], "activation": "kgcnn>softplus2"}
                     }

    # Update default arguments
    input_embedd = update_model_args(model_default['input_embedd'], input_embedd)
    output_embedd = update_model_args(model_default['output_embedd'], output_embedd)
    output_mlp = update_model_args(model_default['output_mlp'], output_mlp)
    meg_block_args = update_model_args(model_default['meg_block_args'], meg_block_args)
    set2set_args = update_model_args(model_default['set2set_args'], set2set_args)
    node_ff_args = update_model_args(model_default['node_ff_args'], node_ff_args)
    edge_ff_args = update_model_args(model_default['edge_ff_args'], edge_ff_args)
    state_ff_args = update_model_args(model_default['state_ff_args'], state_ff_args)
    state_ff_args.update({"input_tensor_type": "tensor"})

    # Make input embedding, if no feature dimension
    node_input, n, edge_input, ed, edge_index_input, env_input, uenv = generate_standard_graph_input(input_node_shape,
                                                                                                     input_edge_shape,
                                                                                                     input_state_shape,
                                                                                                     **input_embedd)



    edi = edge_index_input

    # starting
    vp = n
    ep = ed
    up = uenv
    vp = MLP(**node_ff_args)(vp)
    ep = MLP(**edge_ff_args)(ep)
    up = MLP(**state_ff_args)(up)
    vp2 = vp
    ep2 = ep
    up2 = up
    for i in range(0, nblocks):
        if has_ff and i > 0:
            vp2 = MLP(**node_ff_args)(vp)
            ep2 = MLP(**edge_ff_args)(ep)
            up2 = MLP(**state_ff_args)(up)

        # MEGnetBlock
        vp2, ep2, up2 = MEGnetBlock(**meg_block_args)(
            [vp2, ep2, edi, up2])

        # skip connection
        if dropout is not None:
            vp2 = Dropout(dropout, name='dropout_atom_%d' % i)(vp2)
            ep2 = Dropout(dropout, name='dropout_bond_%d' % i)(ep2)
            up2 = Dropout(dropout, name='dropout_state_%d' % i)(up2)

        vp = Add()([vp2, vp])
        ep = Add()([ep2, ep])
        up = Add(input_tensor_type="tensor")([up2, up])

    if use_set2set:
        vp = Dense(set2set_args["channels"], activation='linear')(vp)  # to match units
        ep = Dense(set2set_args["channels"], activation='linear')(ep)  # to match units
        vp = Set2Set(**set2set_args)(vp)
        ep = Set2Set(**set2set_args)(ep)
    else:
        vp = PoolingNodes()(vp)
        ep = PoolingGlobalEdges()(ep)

    ep = ks.layers.Flatten()(ep)
    vp = ks.layers.Flatten()(vp)
    final_vec = ks.layers.Concatenate(axis=-1)([vp, ep, up])

    if dropout is not None:
        final_vec = ks.layers.Dropout(dropout, name='dropout_final')(final_vec)

    # final dense layers
    main_output = MLP(**output_mlp, input_tensor_type="tensor")(final_vec)

    model = ks.models.Model(inputs=[node_input, edge_input, edge_index_input, env_input], outputs=main_output)

    return model
Example #23
0
class AttentiveNodePooling(GraphBaseLayer):
    r"""Computes the attentive pooling for node embeddings.

    Args:
        units (int): Units for the linear trafo of node features before attention.
        pooling_method(str): Initial pooling before iteration. Default is "sum".
        depth (int): Number of iterations for graph embedding. Default is 3.
        activation (str): Activation. Default is {"class_name": "kgcnn>leaky_relu", "config": {"alpha": 0.2}}.
        activation_context (str): Activation function for context. Default is "elu".
        use_bias (bool): Use bias. Default is True.
        kernel_regularizer: Kernel regularization. Default is None.
        bias_regularizer: Bias regularization. Default is None.
        activity_regularizer: Activity regularization. Default is None.
        kernel_constraint: Kernel constrains. Default is None.
        bias_constraint: Bias constrains. Default is None.
        kernel_initializer: Initializer for kernels. Default is 'glorot_uniform'.
        bias_initializer: Initializer for bias. Default is 'zeros'.
    """

    def __init__(self,
                 units,
                 depth=3,
                 pooling_method="sum",
                 activation='kgcnn>leaky_relu',
                 activation_context="elu",
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 recurrent_activation='sigmoid',
                 recurrent_initializer='orthogonal',
                 recurrent_regularizer=None,
                 recurrent_constraint=None,
                 dropout=0.0,
                 recurrent_dropout=0.0,
                 reset_after=True,
                 **kwargs):
        """Initialize layer."""
        super(AttentiveNodePooling, self).__init__(**kwargs)
        self.pooling_method = pooling_method
        self.depth = depth
        # dense args
        self.units = int(units)

        kernel_args = {"use_bias": use_bias, "kernel_regularizer": kernel_regularizer,
                       "activity_regularizer": activity_regularizer, "bias_regularizer": bias_regularizer,
                       "kernel_constraint": kernel_constraint, "bias_constraint": bias_constraint,
                       "kernel_initializer": kernel_initializer, "bias_initializer": bias_initializer}
        gru_args = {"recurrent_activation": recurrent_activation,
                    "use_bias": use_bias, "kernel_initializer": kernel_initializer,
                    "recurrent_initializer": recurrent_initializer, "bias_initializer": bias_initializer,
                    "kernel_regularizer": kernel_regularizer, "recurrent_regularizer": recurrent_regularizer,
                    "bias_regularizer": bias_regularizer, "kernel_constraint": kernel_constraint,
                    "recurrent_constraint": recurrent_constraint, "bias_constraint": bias_constraint,
                    "dropout": dropout, "recurrent_dropout": recurrent_dropout, "reset_after": reset_after}

        self.lay_linear_trafo = Dense(units, activation="linear", **kernel_args, **self._kgcnn_info)
        self.lay_alpha = Dense(1, activation=activation, **kernel_args, **self._kgcnn_info)
        self.lay_gather_s = GatherState(**self._kgcnn_info)
        self.lay_concat = Concatenate(axis=-1, **self._kgcnn_info)
        self.lay_pool_start = PoolingNodes(pooling_method=self.pooling_method, **self._kgcnn_info)
        self.lay_pool_attention = PoolingNodesAttention(**self._kgcnn_info)
        self.lay_final_activ = Activation(activation=activation_context, **self._kgcnn_info)
        self.lay_gru = tf.keras.layers.GRUCell(units=units, activation="tanh", **gru_args)

    def build(self, input_shape):
        """Build layer."""
        super(AttentiveNodePooling, self).build(input_shape)

    def call(self, inputs, **kwargs):
        """Forward pass.

        Args:
            inputs: nodes

            - nodes: Node features of shape (batch, [N], F)

        Returns:
            features: Hidden tensor of pooled edge attentions for each node.
        """
        node = inputs

        h = self.lay_pool_start(node)
        Wn = self.lay_linear_trafo(node)
        for _ in range(self.depth):
            hv = self.lay_gather_s(h, node)
            ev = self.lay_concat([hv, node])
            av = self.self.lay_alpha(ev)
            cont = self.lay_pool_attention([Wn, av])
            cont = self.lay_final_activ(cont)
            h = self.lay_gru(cont, h, **kwargs)

        out = h
        return out

    def get_config(self):
        """Update layer config."""
        config = super(AttentiveNodePooling, self).get_config()
        config.update({"units": self.units, "depth": self.depth, "pooling_method": self.pooling_method})
        conf_sub = self.lay_alpha.get_config()
        for x in ["kernel_regularizer", "activity_regularizer", "bias_regularizer", "kernel_constraint",
                  "bias_constraint", "kernel_initializer", "bias_initializer", "activation", "use_bias"]:
            config.update({x: conf_sub[x]})
        conf_context = self.lay_final_activ.get_config()
        config.update({"activation_context": conf_context["activation"]})
        conf_gru = self.lay_gru.get_config()
        for x in ["recurrent_activation", "recurrent_initializer", "mrecurrent_regularizer", "recurrent_constraint",
                 "dropout", "recurrent_dropout", "reset_after"]:
            config.update({x: conf_gru[x]})
        return config
Example #24
0
class DimNetOutputBlock(GraphBaseLayer):
    """DimNetOutputBlock.

    Args:
        emb_size (list): List of node embedding dimension.
        out_emb_size (list): List of edge embedding dimension.
        num_dense (list): List of environment embedding dimension.
        num_targets (int): Number of output target dimension. Defaults to 12.
        use_bias (bool, optional): Use bias. Defaults to True.
        kernel_initializer: Initializer for kernels. Default is 'orthogonal'.
        output_kernel_initializer: Initializer for last kernel. Default is 'zeros'.
        bias_initializer: Initializer for bias. Default is 'zeros'.
        activation (str): Activation function. Default is 'softplus2' with fall-back 'selu'.
        kernel_regularizer: Kernel regularization. Default is None.
        bias_regularizer: Bias regularization. Default is None.
        activity_regularizer: Activity regularization. Default is None.
        kernel_constraint: Kernel constrains. Default is None.
        bias_constraint: Bias constrains. Default is None.
        pooling_method (str): Pooling method information for layer. Default is 'mean'.
    """
    def __init__(self,
                 emb_size,
                 out_emb_size,
                 num_dense,
                 num_targets=12,
                 use_bias=True,
                 output_kernel_initializer="zeros",
                 kernel_initializer='orthogonal',
                 bias_initializer='zeros',
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 pooling_method="sum",
                 **kwargs):
        """Initialize layer."""
        super(DimNetOutputBlock, self).__init__(**kwargs)
        self.pooling_method = pooling_method
        self.emb_size = emb_size
        self.out_emb_size = out_emb_size
        self.num_dense = num_dense
        self.num_targets = num_targets
        self.use_bias = use_bias

        if activation is None and 'swish' in kgcnn_custom_act:
            activation = 'swish'
        elif activation is None:
            activation = "selu"

        kernel_args = {
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_initializer": bias_initializer,
            "bias_regularizer": bias_regularizer,
            "bias_constraint": bias_constraint,
        }
        mlp_args = {
            "input_tensor_type": self.input_tensor_type,
            "ragged_validate": self.ragged_validate
        }
        mlp_args.update(kernel_args)
        pool_args = {"pooling_method": self.pooling_method}
        pool_args.update(self._all_kgcnn_info)

        self.dense_rbf = Dense(emb_size,
                               use_bias=False,
                               kernel_initializer=kernel_initializer,
                               **kernel_args)
        self.up_projection = Dense(out_emb_size,
                                   use_bias=False,
                                   kernel_initializer=kernel_initializer,
                                   **kernel_args)
        self.dense_mlp = MLP([out_emb_size] * num_dense,
                             activation=activation,
                             kernel_initializer=kernel_initializer,
                             use_bias=use_bias,
                             **mlp_args)
        self.dimnet_mult = Multiply(input_tensor_type=self.input_tensor_type)
        self.pool = PoolingLocalEdges(**pool_args)
        self.dense_final = Dense(num_targets,
                                 use_bias=False,
                                 kernel_initializer=output_kernel_initializer,
                                 **kernel_args)

    def build(self, input_shape):
        """Build layer."""
        super(DimNetOutputBlock, self).build(input_shape)

    def call(self, inputs, **kwargs):
        """Forward pass.

        Args:
            inputs: [nodes, edges, edge_index, state]

            - nodes (tf.ragged): Node embeddings of shape (batch, [N], F)
            - edges (tf.ragged): Edge or message embeddings of shape (batch, [M], F)
            - rbf (tf.ragged): Edge distance basis of shape (batch, [M], F)
            - edge_index (tf.ragged): Node indices of shape (batch, [M], 2)

        Returns:
            node_update (tf.ragged): Updated node embeddings.
        """
        # Calculate edge Update
        n_atoms, x, rbf, idnb_i = inputs
        g = self.dense_rbf(rbf)
        x = self.dimnet_mult([g, x])
        x = self.pool([n_atoms, x, idnb_i])
        x = self.up_projection(x)
        x = self.dense_mlp(x)
        x = self.dense_final(x)
        return x

    def get_config(self):
        config = super(DimNetOutputBlock, self).get_config()
        conf_mlp = self.dense_mlp.get_config()
        for x in [
                "kernel_regularizer", "activity_regularizer",
                "bias_regularizer", "kernel_constraint", "bias_constraint",
                "kernel_initializer", "bias_initializer", "activation"
        ]:
            config.update({x: conf_mlp[x][0]})
        conf_dense_output = self.dense_final.get_config()
        config.update({
            "output_kernel_initializer":
            conf_dense_output["kernel_initializer"]
        })
        config.update({
            "pooling_method": self.pooling_method,
            "use_bias": self.use_bias
        })
        config.update({
            "emb_size": self.emb_size,
            "out_emb_size": self.out_emb_size,
            "num_dense": self.num_dense,
            "num_targets": self.num_targets
        })
        return config
Example #25
0
    def __init__(self,
                 units,
                 use_bias=True,
                 activation=None,
                 activity_regularizer=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_constraint=None,
                 bias_constraint=None,
                 ragged_validate=False,
                 input_tensor_type="ragged",
                 **kwargs):
        """Initialize MLP as for dense."""
        super(MLP, self).__init__(**kwargs)
        self._supports_ragged_inputs = True
        self.ragged_validate = ragged_validate
        self.input_tensor_type = input_tensor_type
        # Make to one element list
        if isinstance(units, int):
            units = [units]

        if not isinstance(use_bias, list) and not isinstance(use_bias, tuple):
            use_bias = [use_bias for _ in units]
        if not isinstance(activation, list) and not isinstance(
                activation, tuple):
            activation = [activation for _ in units]
        if not isinstance(kernel_regularizer, list) and not isinstance(
                kernel_regularizer, tuple):
            kernel_regularizer = [kernel_regularizer for _ in units]
        if not isinstance(bias_regularizer, list) and not isinstance(
                bias_regularizer, tuple):
            bias_regularizer = [bias_regularizer for _ in units]
        if not isinstance(activity_regularizer, list) and not isinstance(
                activity_regularizer, tuple):
            activity_regularizer = [activity_regularizer for _ in units]
        if not isinstance(kernel_initializer, list) and not isinstance(
                kernel_initializer, tuple):
            kernel_initializer = [kernel_initializer for _ in units]
        if not isinstance(bias_initializer, list) and not isinstance(
                bias_initializer, tuple):
            bias_initializer = [bias_initializer for _ in units]
        if not isinstance(kernel_constraint, list) and not isinstance(
                kernel_constraint, tuple):
            kernel_constraint = [kernel_constraint for _ in units]
        if not isinstance(bias_constraint, list) and not isinstance(
                bias_constraint, tuple):
            bias_constraint = [bias_constraint for _ in units]

        for x in [
                activation, kernel_regularizer, bias_regularizer,
                activity_regularizer, kernel_initializer, bias_initializer,
                kernel_constraint, bias_constraint, use_bias
        ]:
            if len(x) != len(units):
                raise ValueError("Error: Provide matching list of units",
                                 units, "and", x, "or simply a single value.")

        # Serialized props
        self.mlp_units = list(units)
        self.mlp_use_bias = list(use_bias)
        self.mlp_activation = list(
            [tf.keras.activations.get(x) for x in activation])
        self.mlp_kernel_regularizer = list(
            [tf.keras.regularizers.get(x) for x in kernel_regularizer])
        self.mlp_bias_regularizer = list(
            [tf.keras.regularizers.get(x) for x in bias_regularizer])
        self.mlp_activity_regularizer = list(
            [tf.keras.regularizers.get(x) for x in activity_regularizer])
        self.mlp_kernel_initializer = list(
            [tf.keras.initializers.get(x) for x in kernel_initializer])
        self.mlp_bias_initializer = list(
            [tf.keras.initializers.get(x) for x in bias_initializer])
        self.mlp_kernel_constraint = list(
            [tf.keras.constraints.get(x) for x in kernel_constraint])
        self.mlp_bias_constraint = list(
            [tf.keras.constraints.get(x) for x in bias_constraint])

        self.mlp_dense_list = [
            Dense(units=self.mlp_units[i],
                  use_bias=self.mlp_use_bias[i],
                  name=self.name + '_dense_' + str(i),
                  activation=self.mlp_activation[i],
                  activity_regularizer=self.mlp_activity_regularizer[i],
                  kernel_regularizer=self.mlp_kernel_regularizer[i],
                  bias_regularizer=self.mlp_bias_regularizer[i],
                  kernel_initializer=self.mlp_kernel_initializer[i],
                  bias_initializer=self.mlp_bias_initializer[i],
                  kernel_constraint=self.mlp_kernel_constraint[i],
                  bias_constraint=self.mlp_bias_constraint[i],
                  ragged_validate=self.ragged_validate,
                  input_tensor_type=self.input_tensor_type)
            for i in range(len(self.mlp_units))
        ]
Example #26
0
    def __init__(self,
                 emb_size,
                 out_emb_size,
                 num_dense,
                 num_targets=12,
                 use_bias=True,
                 output_kernel_initializer="zeros",
                 kernel_initializer='orthogonal',
                 bias_initializer='zeros',
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 pooling_method="sum",
                 **kwargs):
        """Initialize layer."""
        super(DimNetOutputBlock, self).__init__(**kwargs)
        self.pooling_method = pooling_method
        self.emb_size = emb_size
        self.out_emb_size = out_emb_size
        self.num_dense = num_dense
        self.num_targets = num_targets
        self.use_bias = use_bias

        if activation is None and 'swish' in kgcnn_custom_act:
            activation = 'swish'
        elif activation is None:
            activation = "selu"

        kernel_args = {
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_initializer": bias_initializer,
            "bias_regularizer": bias_regularizer,
            "bias_constraint": bias_constraint,
        }
        mlp_args = {
            "input_tensor_type": self.input_tensor_type,
            "ragged_validate": self.ragged_validate
        }
        mlp_args.update(kernel_args)
        pool_args = {"pooling_method": self.pooling_method}
        pool_args.update(self._all_kgcnn_info)

        self.dense_rbf = Dense(emb_size,
                               use_bias=False,
                               kernel_initializer=kernel_initializer,
                               **kernel_args)
        self.up_projection = Dense(out_emb_size,
                                   use_bias=False,
                                   kernel_initializer=kernel_initializer,
                                   **kernel_args)
        self.dense_mlp = MLP([out_emb_size] * num_dense,
                             activation=activation,
                             kernel_initializer=kernel_initializer,
                             use_bias=use_bias,
                             **mlp_args)
        self.dimnet_mult = Multiply(input_tensor_type=self.input_tensor_type)
        self.pool = PoolingLocalEdges(**pool_args)
        self.dense_final = Dense(num_targets,
                                 use_bias=False,
                                 kernel_initializer=output_kernel_initializer,
                                 **kernel_args)
Example #27
0
def make_inorp(  # Input
        input_node_shape,
        input_edge_shape,
        input_state_shape,
        input_embedd: dict = None,
        # Output
        output_embedd: dict = None,
        output_mlp: dict = None,
        # Model specific parameter
        depth=3,
        use_set2set: bool = False,  # not in original paper
        node_mlp_args: dict = None,
        edge_mlp_args: dict = None,
        set2set_args: dict = None,
        pooling_args: dict = None):
    """
    Generate Interaction network.

    Args:
        input_node_shape (list): Shape of node features. If shape is (None,) embedding layer is used.
        input_edge_shape (list): Shape of edge features. If shape is (None,) embedding layer is used.
        input_state_shape (list): Shape of state features. If shape is (,) embedding layer is used.
        input_embedd (dict): Dictionary of embedding parameters used if input shape is None. Default is
            {'input_node_vocab': 95, 'input_edge_vocab': 5, 'input_state_vocab': 100,
            'input_node_embedd': 64, 'input_edge_embedd': 64, 'input_state_embedd': 64,
            'input_type': 'ragged'}.
        output_embedd (dict): Dictionary of embedding parameters of the graph network. Default is
            {"output_mode": 'graph', "output_type": 'padded'}.
        output_mlp (dict): Dictionary of arguments for final MLP regression or classifcation layer. Default is
            {"use_bias": [True, True, False], "units": [25, 10, 1],
            "activation": ['relu', 'relu', 'sigmoid']}.
        depth (int): Number of convolution layers. Default is 3.
        node_mlp_args (dict): Dictionary of arguments for MLP for node update. Default is
            {"units": [100, 50], "use_bias": True, "activation": ['relu', "linear"]}
        edge_mlp_args (dict): Dictionary of arguments for MLP for interaction update. Default is
            {"units": [100, 100, 100, 100, 50],
            "activation": ['relu', 'relu', 'relu', 'relu', "linear"]}
        use_set2set (str): Use set2set pooling for graph embedding. Default is False.
        set2set_args (dict): Dictionary of set2set layer arguments. Default is
            {'channels': 32, 'T': 3, "pooling_method": "mean", "init_qstar": "mean"}.
        pooling_args (dict): Dictionary for message pooling arguments. Default is
            {'is_sorted': False, 'has_unconnected': True, 'pooling_method': "segment_mean"}

    Returns:
        model (tf.keras.model): Interaction model.

    """
    # default values
    model_default = {
        'input_embedd': {
            'input_node_vocab': 95,
            'input_edge_vocab': 5,
            'input_state_vocab': 100,
            'input_node_embedd': 64,
            'input_edge_embedd': 64,
            'input_state_embedd': 64,
            'input_tensor_type': 'ragged'
        },
        'output_embedd': {
            "output_mode": 'graph',
            "output_tensor_type": 'padded'
        },
        'output_mlp': {
            "use_bias": [True, True, False],
            "units": [25, 10, 1],
            "activation": ['relu', 'relu', 'sigmoid']
        },
        'set2set_args': {
            'channels': 32,
            'T': 3,
            "pooling_method": "mean",
            "init_qstar": "mean"
        },
        'node_mlp_args': {
            "units": [100, 50],
            "use_bias": True,
            "activation": ['relu', "linear"]
        },
        'edge_mlp_args': {
            "units": [100, 100, 100, 100, 50],
            "activation": ['relu', 'relu', 'relu', 'relu', "linear"]
        },
        'pooling_args': {
            'is_sorted': False,
            'has_unconnected': True,
            'pooling_method': "segment_mean"
        }
    }

    # Update default values
    input_embedd = update_model_args(model_default['input_embedd'],
                                     input_embedd)
    output_embedd = update_model_args(model_default['output_embedd'],
                                      output_embedd)
    output_mlp = update_model_args(model_default['output_mlp'], output_mlp)
    set2set_args = update_model_args(model_default['set2set_args'],
                                     set2set_args)
    node_mlp_args = update_model_args(model_default['node_mlp_args'],
                                      node_mlp_args)
    edge_mlp_args = update_model_args(model_default['edge_mlp_args'],
                                      edge_mlp_args)
    pooling_args = update_model_args(model_default['pooling_args'],
                                     pooling_args)
    gather_args = {"node_indexing": "sample"}

    # Make input embedding, if no feature dimension
    node_input, n, edge_input, ed, edge_index_input, env_input, uenv = generate_standard_graph_input(
        input_node_shape, input_edge_shape, input_state_shape, **input_embedd)

    # Preprocessing
    edi = edge_index_input
    ev = GatherState(**gather_args)([uenv, n])
    # n-Layer Step
    for i in range(0, depth):
        # upd = GatherNodes()([n,edi])
        eu1 = GatherNodesIngoing(**gather_args)([n, edi])
        eu2 = GatherNodesOutgoing(**gather_args)([n, edi])
        upd = Concatenate(axis=-1)([eu2, eu1])
        eu = Concatenate(axis=-1)([upd, ed])

        eu = MLP(**edge_mlp_args)(eu)
        # Pool message
        nu = PoolingLocalEdges(**pooling_args)(
            [n, eu, edi])  # Summing for each node connection
        # Add environment
        nu = Concatenate(axis=-1)(
            [n, nu, ev])  # Concatenate node features with new edge updates

        n = MLP(**node_mlp_args)(nu)

    if output_embedd["output_mode"] == 'graph':
        if use_set2set:
            # output
            outss = Dense(set2set_args["channels"], activation="linear")(n)
            out = Set2Set(**set2set_args)(outss)
        else:
            out = PoolingNodes(**pooling_args)(n)

        output_mlp.update({"input_tensor_type": "tensor"})
        main_output = MLP(**output_mlp)(out)

    else:  # Node labeling
        out = n
        main_output = MLP(**output_mlp)(out)

        main_output = ChangeTensorType(
            input_tensor_type="ragged",
            output_tensor_type="tensor")(main_output)
        # no ragged for distribution atm

    model = ks.models.Model(
        inputs=[node_input, edge_input, edge_index_input, env_input],
        outputs=main_output)

    return model
Example #28
0
    def __init__(self,
                 node_embed=None,
                 edge_embed=None,
                 env_embed=None,
                 pooling_method="mean",
                 use_bias=True,
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(MEGnetBlock, self).__init__(**kwargs)
        self.pooling_method = pooling_method

        if node_embed is None:
            node_embed = [16, 16, 16]
        if env_embed is None:
            env_embed = [16, 16, 16]
        if edge_embed is None:
            edge_embed = [16, 16, 16]
        self.node_embed = node_embed
        self.edge_embed = edge_embed
        self.env_embed = env_embed
        self.use_bias = use_bias
        if activation is None and 'softplus2' in kgcnn_custom_act:
            activation = 'softplus2'
        elif activation is None:
            activation = "selu"

        kernel_args = {
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer,
            "use_bias": use_bias
        }
        mlp_args = {
            "input_tensor_type": self.input_tensor_type,
            "ragged_validate": self.ragged_validate
        }
        mlp_args.update(kernel_args)
        pool_args = {"pooling_method": self.pooling_method}
        pool_args.update(self._all_kgcnn_info)
        gather_args = self._all_kgcnn_info

        # Node
        self.lay_phi_n = Dense(units=self.node_embed[0],
                               activation=activation,
                               **mlp_args)
        self.lay_phi_n_1 = Dense(units=self.node_embed[1],
                                 activation=activation,
                                 **mlp_args)
        self.lay_phi_n_2 = Dense(units=self.node_embed[2],
                                 activation='linear',
                                 **mlp_args)
        self.lay_esum = PoolingLocalEdges(**pool_args)
        self.lay_gather_un = GatherState(**gather_args)
        self.lay_conc_nu = Concatenate(
            axis=-1, input_tensor_type=self.input_tensor_type)
        # Edge
        self.lay_phi_e = Dense(units=self.edge_embed[0],
                               activation=activation,
                               **mlp_args)
        self.lay_phi_e_1 = Dense(units=self.edge_embed[1],
                                 activation=activation,
                                 **mlp_args)
        self.lay_phi_e_2 = Dense(units=self.edge_embed[2],
                                 activation='linear',
                                 **mlp_args)
        self.lay_gather_n = GatherNodes(**gather_args)
        self.lay_gather_ue = GatherState(**gather_args)
        self.lay_conc_enu = Concatenate(
            axis=-1, input_tensor_type=self.input_tensor_type)
        # Environment
        self.lay_usum_e = PoolingGlobalEdges(**pool_args)
        self.lay_usum_n = PoolingNodes(**pool_args)
        self.lay_conc_u = Concatenate(axis=-1, input_tensor_type="tensor")
        self.lay_phi_u = ks.layers.Dense(units=self.env_embed[0],
                                         activation=activation,
                                         **kernel_args)
        self.lay_phi_u_1 = ks.layers.Dense(units=self.env_embed[1],
                                           activation=activation,
                                           **kernel_args)
        self.lay_phi_u_2 = ks.layers.Dense(units=self.env_embed[2],
                                           activation='linear',
                                           **kernel_args)
Example #29
0
class AttentionHeadGAT(GraphBaseLayer):
    r"""Computes the attention head according to GAT.
    The attention coefficients are computed by $a_{ij} = \sigma( W n_i || W n_j)$,
    optionally by $a_{ij} = \sigma( W n_i || W n_j || e_{ij})$.
    The attention is obtained by $\alpha_ij = softmax_j (a_{ij})$.
    And the messages are pooled by $n_i =  \sum_j \alpha_{ij} e_ij $.
    And finally passed through an activation $h_i = \sigma(\sum_j \alpha_{ij} e_ij)$.

    An edge is defined by index tuple (i,j) with i<-j connection.
    If graphs indices were in 'batch' mode, the layer's 'node_indexing' must be set to 'batch'.

    Args:
        units (int): Units for the linear trafo of node features before attention.
        use_edge_features (bool): Append edge features to attention computation. Default is False.
        activation (str): Activation. Default is {"class_name": "leaky_relu", "config": {"alpha": 0.2}},
            with fall-back "relu".
        use_bias (bool): Use bias. Default is True.
        kernel_regularizer: Kernel regularization. Default is None.
        bias_regularizer: Bias regularization. Default is None.
        activity_regularizer: Activity regularization. Default is None.
        kernel_constraint: Kernel constrains. Default is None.
        bias_constraint: Bias constrains. Default is None.
        kernel_initializer: Initializer for kernels. Default is 'glorot_uniform'.
        bias_initializer: Initializer for bias. Default is 'zeros'.
    """
    def __init__(self,
                 units,
                 use_edge_features=False,
                 activation=None,
                 use_bias=True,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize layer."""
        super(AttentionHeadGAT, self).__init__(**kwargs)
        # graph args
        self.use_edge_features = use_edge_features

        # dense args
        self.units = int(units)
        if activation is None and "leaky_relu" in kgcnn_custom_act:
            activation = {"class_name": "leaky_relu", "config": {"alpha": 0.2}}
        elif activation is None:
            activation = "relu"

        kernel_args = {
            "use_bias": use_bias,
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer
        }
        dens_args = {
            "ragged_validate": self.ragged_validate,
            "input_tensor_type": self.input_tensor_type
        }
        dens_args.update(kernel_args)
        gather_args = self._all_kgcnn_info
        pooling_args = self._all_kgcnn_info

        self.lay_linear_trafo = Dense(units, activation="linear", **dens_args)
        self.lay_alpha = Dense(1, activation=activation, **dens_args)
        self.lay_gather_in = GatherNodesIngoing(**gather_args)
        self.lay_gather_out = GatherNodesOutgoing(**gather_args)
        self.lay_concat = Concatenate(axis=-1,
                                      input_tensor_type=self.input_tensor_type)
        self.lay_pool_attention = PoolingLocalEdgesAttention(**pooling_args)
        self.lay_final_activ = Activation(
            activation=activation, input_tensor_type=self.input_tensor_type)

    def build(self, input_shape):
        """Build layer."""
        super(AttentionHeadGAT, self).build(input_shape)

    def call(self, inputs, **kwargs):
        """Forward pass.

        Args:
            inputs (list): of [node, edges, edge_indices]

            - nodes: Node features of shape (batch, [N], F)
            - edges: Edge or message features of shape (batch, [M], F)
            - edge_index: Edge indices of shape (batch, [M], 2)

        Returns:
            features: Feature tensor of pooled edge attentions for each node.
        """
        node, edge, edge_index = inputs

        n_in = self.lay_gather_in([node, edge_index])
        n_out = self.lay_gather_out([node, edge_index])
        wn_in = self.lay_linear_trafo(n_in)
        wn_out = self.lay_linear_trafo(n_out)
        if self.use_edge_features:
            e_ij = self.lay_concat([wn_in, wn_out, edge])
        else:
            e_ij = self.lay_concat([wn_in, wn_out])
        a_ij = self.lay_alpha(e_ij)  # Should be dimension (batch*None,1)
        n_i = self.lay_pool_attention([node, wn_out, a_ij, edge_index])
        out = self.lay_final_activ(n_i)
        return out

    def get_config(self):
        """Update layer config."""
        config = super(AttentionHeadGAT, self).get_config()
        config.update({
            "use_edge_features": self.use_edge_features,
            "units": self.units
        })
        conf_sub = self.lay_alpha.get_config()
        for x in [
                "kernel_regularizer", "activity_regularizer",
                "bias_regularizer", "kernel_constraint", "bias_constraint",
                "kernel_initializer", "bias_initializer", "activation",
                "use_bias"
        ]:
            config.update({x: conf_sub[x]})
        return config
Example #30
0
class SchNetInteraction(GraphBaseLayer):
    """
    Schnet interaction block, which uses the continuous filter convolution from SchNetCFconv.

    Args:
        units (int): Dimension of node embedding. Default is 128.
        cfconv_pool (str): Pooling method information for SchNetCFconv layer. Default is'segment_sum'.
        use_bias (bool): Use bias in last layers. Default is True.
        activation (str): Activation function. Default is 'shifted_softplus' with fall-back 'selu'.
        kernel_regularizer: Kernel regularization. Default is None.
        bias_regularizer: Bias regularization. Default is None.
        activity_regularizer: Activity regularization. Default is None.
        kernel_constraint: Kernel constrains. Default is None.
        bias_constraint: Bias constrains. Default is None.
        kernel_initializer: Initializer for kernels. Default is 'glorot_uniform'.
        bias_initializer: Initializer for bias. Default is 'zeros'.
    """
    def __init__(self,
                 units=128,
                 cfconv_pool='sum',
                 use_bias=True,
                 activation=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 **kwargs):
        """Initialize Layer."""
        super(SchNetInteraction, self).__init__(**kwargs)

        self.cfconv_pool = cfconv_pool
        self.use_bias = use_bias
        self.units = units
        if activation is None and 'shifted_softplus' in kgcnn_custom_act:
            activation = 'shifted_softplus'
        elif activation is None:
            activation = "selu"

        kernel_args = {
            "kernel_regularizer": kernel_regularizer,
            "activity_regularizer": activity_regularizer,
            "bias_regularizer": bias_regularizer,
            "kernel_constraint": kernel_constraint,
            "bias_constraint": bias_constraint,
            "kernel_initializer": kernel_initializer,
            "bias_initializer": bias_initializer
        }
        conv_args = {
            "units": self.units,
            "use_bias": use_bias,
            "activation": activation,
            "cfconv_pool": cfconv_pool
        }
        conv_args.update(kernel_args)
        conv_args.update(self._all_kgcnn_info)
        # Layers
        self.lay_cfconv = SchNetCFconv(**conv_args)
        self.lay_dense1 = Dense(units=self.units,
                                activation='linear',
                                use_bias=False,
                                input_tensor_type=self.input_tensor_type,
                                ragged_validate=self.ragged_validate,
                                **kernel_args)
        self.lay_dense2 = Dense(units=self.units,
                                activation=activation,
                                use_bias=self.use_bias,
                                input_tensor_type=self.input_tensor_type,
                                ragged_validate=self.ragged_validate,
                                **kernel_args)
        self.lay_dense3 = Dense(units=self.units,
                                activation='linear',
                                use_bias=self.use_bias,
                                input_tensor_type=self.input_tensor_type,
                                ragged_validate=self.ragged_validate,
                                **kernel_args)
        self.lay_add = Add(input_tensor_type=self.input_tensor_type,
                           ragged_validate=self.ragged_validate)

    def build(self, input_shape):
        """Build layer."""
        super(SchNetInteraction, self).build(input_shape)

    def call(self, inputs, **kwargs):
        """Forward pass: Calculate node update.

        Args:
            inputs: [nodes, edges, edge_index]

            - nodes: Node embeddings of shape (batch, [N], F)
            - edges: Edge or message embeddings of shape (batch, [N], F)
            - edge_index: Edge indices of shape (batch, [N], 2)

        Returns:
            node_update: Updated node embeddings.
        """
        node, edge, indexlist = inputs
        x = self.lay_dense1(node)
        x = self.lay_cfconv([x, edge, indexlist])
        x = self.lay_dense2(x)
        x = self.lay_dense3(x)
        out = self.lay_add([node, x])
        return out

    def get_config(self):
        config = super(SchNetInteraction, self).get_config()
        config.update({
            "cfconv_pool": self.cfconv_pool,
            "units": self.units,
            "use_bias": self.use_bias
        })
        conf_dense = self.lay_dense2.get_config()
        for x in [
                "activation", "kernel_regularizer", "bias_regularizer",
                "activity_regularizer", "kernel_constraint", "bias_constraint",
                "kernel_initializer", "bias_initializer"
        ]:
            config.update({x: conf_dense[x]})
        return config