Esempio n. 1
0
 def __init__(self,
              n_node_features: int = 32,
              n_edge_features: int = 32,
              n_global_features: int = 32,
              n_blocks: int = 1,
              is_undirected: bool = True,
              residual_connection: bool = True,
              mode: str = 'regression',
              n_classes: int = 2,
              n_tasks: int = 1,
              **kwargs):
   """
   Parameters
   ----------
   n_node_features: int
     Number of features in a node
   n_edge_features: int
     Number of features in a edge
   n_global_features: int
     Number of global features
   n_blocks: int
     Number of GraphNetworks block to use in update
   is_undirected: bool, optional (default True)
     True when the model is used on undirected graphs otherwise false
   residual_connection: bool, optional (default True)
     If True, the layer uses a residual connection during training
   n_tasks: int, default 1
     The number of tasks
   mode: str, default 'regression'
     The model type - classification or regression
   n_classes: int, default 2
     The number of classes to predict (used only in classification mode).
   kwargs: Dict
     kwargs supported by TorchModel
   """
   model = MEGNet(n_node_features=n_node_features,
                  n_edge_features=n_edge_features,
                  n_global_features=n_global_features,
                  n_blocks=n_blocks,
                  is_undirected=is_undirected,
                  residual_connection=residual_connection,
                  mode=mode,
                  n_classes=n_classes,
                  n_tasks=n_tasks)
   if mode == 'regression':
     loss: Loss = L2Loss()
     output_types = ['prediction']
   elif mode == 'classification':
     loss = SparseSoftmaxCrossEntropy()
     output_types = ['prediction', 'loss']
   super(MEGNetModel, self).__init__(model,
                                     loss=loss,
                                     output_types=output_types,
                                     **kwargs)
Esempio n. 2
0
    def __init__(self,
                 in_node_dim: int = 30,
                 hidden_node_dim: int = 32,
                 heads: int = 1,
                 dropout: float = 0.0,
                 num_conv: int = 2,
                 predictor_hidden_feats: int = 64,
                 n_tasks: int = 1,
                 mode: str = 'regression',
                 n_classes: int = 2,
                 **kwargs):
        """
    This class accepts all the keyword arguments from TorchModel.

    Parameters
    ----------
    in_node_dim: int, default 30
      The length of the initial node feature vectors. The 30 is
      based on `MolGraphConvFeaturizer`.
    hidden_node_dim: int, default 32
      The length of the hidden node feature vectors.
    heads: int, default 1
      The number of multi-head-attentions.
    dropout: float, default 0.0
      The dropout probability for each convolutional layer.
    num_conv: int, default 2
      The number of convolutional layers.
    predictor_hidden_feats: int, default 64
      The size for hidden representations in the output MLP predictor, default to 64.
    n_tasks: int, default 1
      The number of the output size, default to 1.
    mode: str, default 'regression'
      The model type, 'classification' or 'regression'.
    n_classes: int, default 2
      The number of classes to predict (only used in classification mode).
    kwargs: Dict
      This class accepts all the keyword arguments from TorchModel.
    """
        model = GAT(in_node_dim, hidden_node_dim, heads, dropout, num_conv,
                    predictor_hidden_feats, n_tasks, mode, n_classes)
        if mode == "regression":
            loss: Loss = L2Loss()
            output_types = ['prediction']
        else:
            loss = SparseSoftmaxCrossEntropy()
            output_types = ['prediction', 'loss']
        super(GATModel, self).__init__(model,
                                       loss=loss,
                                       output_types=output_types,
                                       **kwargs)
Esempio n. 3
0
    def __init__(self,
                 in_node_dim: int = 92,
                 hidden_node_dim: int = 64,
                 in_edge_dim: int = 41,
                 num_conv: int = 3,
                 predictor_hidden_feats: int = 128,
                 n_tasks: int = 1,
                 mode: str = 'regression',
                 n_classes: int = 2,
                 **kwargs):
        """
    This class accepts all the keyword arguments from TorchModel.

    Parameters
    ----------
    in_node_dim: int, default 92
      The length of the initial node feature vectors. The 92 is
      based on length of vectors in the atom_init.json.
    hidden_node_dim: int, default 64
      The length of the hidden node feature vectors.
    in_edge_dim: int, default 41
      The length of the initial edge feature vectors. The 41 is
      based on default setting of CGCNNFeaturizer.
    num_conv: int, default 3
      The number of convolutional layers.
    predictor_hidden_feats: int, default 128
      The size for hidden representations in the output MLP predictor.
    n_tasks: int, default 1
      The number of the output size.
    mode: str, default 'regression'
      The model type, 'classification' or 'regression'.
    n_classes: int, default 2
      The number of classes to predict (only used in classification mode).
    kwargs: Dict
      This class accepts all the keyword arguments from TorchModel.
    """
        model = CGCNN(in_node_dim, hidden_node_dim, in_edge_dim, num_conv,
                      predictor_hidden_feats, n_tasks, mode, n_classes)
        if mode == "regression":
            loss: Loss = L2Loss()
            output_types = ['prediction']
        else:
            loss = SparseSoftmaxCrossEntropy()
            output_types = ['prediction', 'loss']
        super(CGCNNModel, self).__init__(model,
                                         loss=loss,
                                         output_types=output_types,
                                         **kwargs)
Esempio n. 4
0
    def __init__(self,
                 n_tasks: int,
                 node_out_feats: int = 64,
                 edge_hidden_feats: int = 128,
                 num_step_message_passing: int = 3,
                 num_step_set2set: int = 6,
                 num_layer_set2set: int = 3,
                 mode: str = 'regression',
                 number_atom_features: int = 30,
                 number_bond_features: int = 11,
                 n_classes: int = 2,
                 self_loop: bool = False,
                 **kwargs):
        """
    Parameters
    ----------
    n_tasks: int
      Number of tasks.
    node_out_feats: int
      The length of the final node representation vectors. Default to 64.
    edge_hidden_feats: int
      The length of the hidden edge representation vectors. Default to 128.
    num_step_message_passing: int
      The number of rounds of message passing. Default to 3.
    num_step_set2set: int
      The number of set2set steps. Default to 6.
    num_layer_set2set: int
      The number of set2set layers. Default to 3.
    mode: str
      The model type, 'classification' or 'regression'. Default to 'regression'.
    number_atom_features: int
      The length of the initial atom feature vectors. Default to 30.
    number_bond_features: int
      The length of the initial bond feature vectors. Default to 11.
    n_classes: int
      The number of classes to predict per task
      (only used when ``mode`` is 'classification'). Default to 2.
    self_loop: bool
      Whether to add self loops for the nodes, i.e. edges from nodes to themselves.
      Generally, an MPNNModel does not require self loops. Default to False.
    kwargs
      This can include any keyword argument of TorchModel.
    """
        model = MPNN(n_tasks=n_tasks,
                     node_out_feats=node_out_feats,
                     edge_hidden_feats=edge_hidden_feats,
                     num_step_message_passing=num_step_message_passing,
                     num_step_set2set=num_step_set2set,
                     num_layer_set2set=num_layer_set2set,
                     mode=mode,
                     number_atom_features=number_atom_features,
                     number_bond_features=number_bond_features,
                     n_classes=n_classes)
        if mode == 'regression':
            loss: Loss = L2Loss()
            output_types = ['prediction']
        else:
            loss = SparseSoftmaxCrossEntropy()
            output_types = ['prediction', 'loss']
        super(MPNNModel, self).__init__(model,
                                        loss=loss,
                                        output_types=output_types,
                                        **kwargs)

        self._self_loop = self_loop
Esempio n. 5
0
    def __init__(self,
                 n_tasks: int,
                 graph_conv_layers: list = None,
                 activation=None,
                 residual: bool = True,
                 batchnorm: bool = False,
                 dropout: float = 0.,
                 predictor_hidden_feats: int = 128,
                 predictor_dropout: float = 0.,
                 mode: str = 'regression',
                 number_atom_features=30,
                 n_classes: int = 2,
                 nfeat_name: str = 'x',
                 self_loop: bool = True,
                 **kwargs):
        """
    Parameters
    ----------
    n_tasks: int
      Number of tasks.
    graph_conv_layers: list of int
      Width of channels for GCN layers. graph_conv_layers[i] gives the width of channel
      for the i-th GCN layer. If not specified, the default value will be [64, 64].
    activation: callable
      The activation function to apply to the output of each GCN layer.
      By default, no activation function will be applied.
    residual: bool
      Whether to add a residual connection within each GCN layer. Default to True.
    batchnorm: bool
      Whether to apply batch normalization to the output of each GCN layer.
      Default to False.
    dropout: float
      The dropout probability for the output of each GCN layer. Default to 0.
    predictor_hidden_feats: int
      The size for hidden representations in the output MLP predictor. Default to 128.
    predictor_dropout: float
      The dropout probability in the output MLP predictor. Default to 0.
    mode: str
      The model type, 'classification' or 'regression'. Default to 'regression'.
    number_atom_features: int
      The length of the initial atom feature vectors. Default to 30.
    n_classes: int
      The number of classes to predict per task
      (only used when ``mode`` is 'classification'). Default to 2.
    nfeat_name: str
      For an input graph ``g``, the model assumes that it stores node features in
      ``g.ndata[nfeat_name]`` and will retrieve input node features from that.
      Default to 'x'.
    self_loop: bool
      Whether to add self loops for the nodes, i.e. edges from nodes to themselves.
      Default to True.
    kwargs
      This can include any keyword argument of TorchModel.
    """
        model = GCN(n_tasks=n_tasks,
                    graph_conv_layers=graph_conv_layers,
                    activation=activation,
                    residual=residual,
                    batchnorm=batchnorm,
                    dropout=dropout,
                    predictor_hidden_feats=predictor_hidden_feats,
                    predictor_dropout=predictor_dropout,
                    mode=mode,
                    number_atom_features=number_atom_features,
                    n_classes=n_classes,
                    nfeat_name=nfeat_name)
        if mode == 'regression':
            loss: Loss = L2Loss()
            output_types = ['prediction']
        else:
            loss = SparseSoftmaxCrossEntropy()
            output_types = ['prediction', 'loss']
        super(GCNModel, self).__init__(model,
                                       loss=loss,
                                       output_types=output_types,
                                       **kwargs)

        self._self_loop = self_loop
Esempio n. 6
0
    def __init__(self,
                 n_tasks: int,
                 graph_attention_layers: list = None,
                 n_attention_heads: int = 8,
                 agg_modes: list = None,
                 activation=F.elu,
                 residual: bool = True,
                 dropout: float = 0.,
                 alpha: float = 0.2,
                 predictor_hidden_feats: int = 128,
                 predictor_dropout: float = 0.,
                 mode: str = 'regression',
                 number_atom_features: int = 30,
                 n_classes: int = 2,
                 self_loop: bool = True,
                 **kwargs):
        """
    Parameters
    ----------
    n_tasks: int
      Number of tasks.
    graph_attention_layers: list of int
      Width of channels per attention head for GAT layers. graph_attention_layers[i]
      gives the width of channel for each attention head for the i-th GAT layer. If
      both ``graph_attention_layers`` and ``agg_modes`` are specified, they should have
      equal length. If not specified, the default value will be [8, 8].
    n_attention_heads: int
      Number of attention heads in each GAT layer.
    agg_modes: list of str
      The way to aggregate multi-head attention results for each GAT layer, which can be
      either 'flatten' for concatenating all-head results or 'mean' for averaging all-head
      results. ``agg_modes[i]`` gives the way to aggregate multi-head attention results for
      the i-th GAT layer. If both ``graph_attention_layers`` and ``agg_modes`` are
      specified, they should have equal length. If not specified, the model will flatten
      multi-head results for intermediate GAT layers and compute mean of multi-head results
      for the last GAT layer.
    activation: activation function or None
      The activation function to apply to the aggregated multi-head results for each GAT
      layer. If not specified, the default value will be ELU.
    residual: bool
      Whether to add a residual connection within each GAT layer. Default to True.
    dropout: float
      The dropout probability within each GAT layer. Default to 0.
    alpha: float
      A hyperparameter in LeakyReLU, which is the slope for negative values. Default to 0.2.
    predictor_hidden_feats: int
      The size for hidden representations in the output MLP predictor. Default to 128.
    predictor_dropout: float
      The dropout probability in the output MLP predictor. Default to 0.
    mode: str
      The model type, 'classification' or 'regression'. Default to 'regression'.
    number_atom_features: int
      The length of the initial atom feature vectors. Default to 30.
    n_classes: int
      The number of classes to predict per task
      (only used when ``mode`` is 'classification'). Default to 2.
    self_loop: bool
      Whether to add self loops for the nodes, i.e. edges from nodes to themselves.
      When input graphs have isolated nodes, self loops allow preserving the original feature
      of them in message passing. Default to True.
    kwargs
      This can include any keyword argument of TorchModel.
    """
        model = GAT(n_tasks=n_tasks,
                    graph_attention_layers=graph_attention_layers,
                    n_attention_heads=n_attention_heads,
                    agg_modes=agg_modes,
                    activation=activation,
                    residual=residual,
                    dropout=dropout,
                    alpha=alpha,
                    predictor_hidden_feats=predictor_hidden_feats,
                    predictor_dropout=predictor_dropout,
                    mode=mode,
                    number_atom_features=number_atom_features,
                    n_classes=n_classes)
        if mode == 'regression':
            loss: Loss = L2Loss()
            output_types = ['prediction']
        else:
            loss = SparseSoftmaxCrossEntropy()
            output_types = ['prediction', 'loss']
        super(GATModel, self).__init__(model,
                                       loss=loss,
                                       output_types=output_types,
                                       **kwargs)

        self._self_loop = self_loop
Esempio n. 7
0
 def __init__(self,
              n_tasks: int,
              number_atom_features: int = 94,
              number_bond_features: int = 42,
              mode: str = 'regression',
              n_classes: int = 2,
              output_node_features: int = 256,
              hidden_features: int = 32,
              num_layers: int = 5,
              num_heads: int = 1,
              dropout: float = 0.1,
              pool_mode: str = 'sum',
              **kwargs):
     """
 Parameters
 ----------
 n_tasks: int
   Number of tasks.
 number_atom_features : int
   Size for the input node features. Default to 94.
 number_bond_features : int
   Size for the input edge features. Default to 42.
 mode: str
   The model type, 'classification' or 'regression'. Default to 'regression'.
 n_classes: int
   The number of classes to predict per task
   (only used when ``mode`` is 'classification'). Default to 2.
 output_node_features : int
   Size for the output node features in PAGTN layers. Default to 256.
 hidden_features : int
   Size for the hidden node features in PAGTN layers. Default to 32.
 num_layers: int
   Number of graph neural network layers, i.e. number of rounds of message passing.
   Default to 2.
 num_heads : int
   Number of attention heads. Default to 1.
 dropout: float
   Dropout probability. Default to 0.1
 pool_mode : 'max' or 'mean' or 'sum'
   Whether to compute elementwise maximum, mean or sum of the node representations.
 kwargs
   This can include any keyword argument of TorchModel.
 """
     model = Pagtn(n_tasks=n_tasks,
                   number_atom_features=number_atom_features,
                   number_bond_features=number_bond_features,
                   mode=mode,
                   n_classes=n_classes,
                   output_node_features=output_node_features,
                   hidden_features=hidden_features,
                   num_layers=num_layers,
                   num_heads=num_heads,
                   dropout=dropout,
                   pool_mode=pool_mode)
     if mode == 'regression':
         loss: Loss = L2Loss()
         output_types = ['prediction']
     else:
         loss = SparseSoftmaxCrossEntropy()
         output_types = ['prediction', 'loss']
     super(PagtnModel, self).__init__(model,
                                      loss=loss,
                                      output_types=output_types,
                                      **kwargs)
 def create_loss(self):
     return SparseSoftmaxCrossEntropy()
Esempio n. 9
0
    def __init__(self,
                 n_tasks: int,
                 num_layers: int = 2,
                 num_timesteps: int = 2,
                 graph_feat_size: int = 200,
                 dropout: float = 0.,
                 mode: str = 'regression',
                 number_atom_features: int = 30,
                 number_bond_features: int = 11,
                 n_classes: int = 2,
                 nfeat_name: str = 'x',
                 efeat_name: str = 'edge_attr',
                 self_loop: bool = True,
                 **kwargs):
        """
    Parameters
    ----------
    n_tasks: int
      Number of tasks.
    num_layers: int
      Number of graph neural network layers, i.e. number of rounds of message passing.
      Default to 2.
    num_timesteps: int
      Number of time steps for updating graph representations with a GRU. Default to 2.
    graph_feat_size: int
      Size for graph representations. Default to 200.
    dropout: float
      Dropout probability. Default to 0.
    mode: str
      The model type, 'classification' or 'regression'. Default to 'regression'.
    number_atom_features: int
      The length of the initial atom feature vectors. Default to 30.
    number_bond_features: int
      The length of the initial bond feature vectors. Default to 11.
    n_classes: int
      The number of classes to predict per task
      (only used when ``mode`` is 'classification'). Default to 2.
    nfeat_name: str
      For an input graph ``g``, the model assumes that it stores node features in
      ``g.ndata[nfeat_name]`` and will retrieve input node features from that.
      Default to 'x'.
    efeat_name: str
      For an input graph ``g``, the model assumes that it stores edge features in
      ``g.edata[efeat_name]`` and will retrieve input edge features from that.
      Default to 'edge_attr'.
    self_loop: bool
      Whether to add self loops for the nodes, i.e. edges from nodes to themselves.
      Default to True.
    kwargs
      This can include any keyword argument of TorchModel.
    """
        model = AttentiveFP(n_tasks=n_tasks,
                            num_layers=num_layers,
                            num_timesteps=num_timesteps,
                            graph_feat_size=graph_feat_size,
                            dropout=dropout,
                            mode=mode,
                            number_atom_features=number_atom_features,
                            number_bond_features=number_bond_features,
                            n_classes=n_classes,
                            nfeat_name=nfeat_name,
                            efeat_name=efeat_name)
        if mode == 'regression':
            loss: Loss = L2Loss()
            output_types = ['prediction']
        else:
            loss = SparseSoftmaxCrossEntropy()
            output_types = ['prediction', 'loss']
        super(AttentiveFPModel, self).__init__(model,
                                               loss=loss,
                                               output_types=output_types,
                                               **kwargs)

        self._self_loop = self_loop