Exemplo n.º 1
0
    def __init__(self,
                 output_dim,
                 input_dim,
                 init='glorot_uniform',
                 inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh',
                 inner_activation='hard_sigmoid',
                 **kwargs):

        warnings.warn(
            "The dc.nn.LSTMStep is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by dc.models.tensorgraph.layers.LSTMStep",
            DeprecationWarning)

        super(LSTMStep, self).__init__(**kwargs)

        self.output_dim = output_dim

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        # No other forget biases supported right now.
        assert forget_bias_init == "one"
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.input_dim = input_dim
Exemplo n.º 2
0
    def __init__(self,
                 n_test,
                 n_support,
                 max_depth,
                 init='glorot_uniform',
                 activation='linear',
                 **kwargs):
        """
    Unlike the AttnLSTM model which only modifies the test vectors additively,
    this model allows for an additive update to be performed to both test and
    support using information from each other.

    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    max_depth: int
      Number of LSTM Embedding layers.
    init: string
      Type of weight initialization (from Keras)
    activation: string
      Activation type (ReLu/Linear/etc.)
    """
        super(ResiLSTMEmbedding, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.max_depth = max_depth
        self.n_test = n_test
        self.n_support = n_support
Exemplo n.º 3
0
  def __init__(self,
               n_test,
               n_support,
               n_feat,
               max_depth,
               init='glorot_uniform',
               activation='linear',
               dropout=None,
               **kwargs):
    """
    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    n_feat: int
      Number of features per atom
    max_depth: int
      Number of "processing steps" used by sequence-to-sequence for sets model.
    init: str, optional
      Type of initialization of weights
    activation: str, optional
      Activation for layers.
    dropout: float, optional
      Dropout probability
    """
    super(AttnLSTMEmbedding, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.max_depth = max_depth
    self.n_test = n_test
    self.n_support = n_support
    self.n_feat = n_feat
Exemplo n.º 4
0
    def __init__(self,
                 nb_filter,
                 init='glorot_uniform',
                 activation='linear',
                 dropout=None,
                 max_deg=10,
                 min_deg=0,
                 **kwargs):
        """
    Parameters
    ----------
    nb_filter: int
      Number of convolutional filters.
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied after convolution.
    dropout: float, optional
      Dropout probability.
    max_deg: int, optional
      Maximum degree of atoms in molecules.
    min_deg: int, optional
      Minimum degree of atoms in molecules.
    """
        super(GraphConv, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.nb_filter = nb_filter  # Save number of filters
        self.dropout = dropout  # Save dropout params
        self.max_deg = max_deg
        self.min_deg = min_deg
        # TODO(rbharath): It's not clear where nb_affine comes from.
        # Is there a solid explanation here?
        self.nb_affine = 2 * max_deg + (1 - min_deg)
Exemplo n.º 5
0
    def __init__(self,
                 batch_size,
                 n_input=128,
                 gaussian_expand=False,
                 init='glorot_uniform',
                 activation='tanh',
                 epsilon=1e-3,
                 momentum=0.99,
                 **kwargs):
        """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    n_input: int, optional
      number of features for each input molecule
    gaussian_expand: boolean. optional
      Whether to expand each dimension of atomic features by gaussian histogram
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied

    """
        self.n_input = n_input
        self.batch_size = batch_size
        self.gaussian_expand = gaussian_expand
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.epsilon = epsilon
        self.momentum = momentum
        super(WeaveGather, self).__init__(**kwargs)
Exemplo n.º 6
0
    def __init__(self,
                 n_test,
                 n_support,
                 n_feat,
                 max_depth,
                 init='glorot_uniform',
                 activation='linear',
                 dropout=None,
                 **kwargs):
        """
    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    n_feat: int
      Number of features per atom
    max_depth: int
      Number of "processing steps" used by sequence-to-sequence for sets model.
    init: str, optional
      Type of initialization of weights
    activation: str, optional
      Activation for layers.
    dropout: float, optional
      Dropout probability
    """
        super(AttnLSTMEmbedding, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.max_depth = max_depth
        self.n_test = n_test
        self.n_support = n_support
        self.n_feat = n_feat
Exemplo n.º 7
0
  def __init__(self, output_dim,
               init='glorot_uniform', inner_init='orthogonal',
               forget_bias_init='one', activation='tanh', 
               inner_activation='hard_sigmoid', **kwargs):

    super(LSTMStep, self).__init__(**kwargs)

    self.output_dim = output_dim

    self.init = initializations.get(init)
    self.inner_init = initializations.get(inner_init)
    # No other forget biases supported right now.
    assert forget_bias_init == "one"
    self.forget_bias_init = initializations.get(forget_bias_init)
    self.activation = activations.get(activation)
    self.inner_activation = activations.get(inner_activation)
Exemplo n.º 8
0
  def __init__(self, nb_filter, init='glorot_uniform', activation='linear',
               dropout=None, max_deg=10, min_deg=0, **kwargs):
    """
    Parameters
    ----------
    nb_filter: int
      Number of convolutional filters.
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied after convolution.
    dropout: float, optional
      Dropout probability.
    max_deg: int, optional
      Maximum degree of atoms in molecules.
    min_deg: int, optional
      Minimum degree of atoms in molecules.
    """
    super(GraphConv, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.nb_filter = nb_filter  # Save number of filters
    self.dropout = dropout  # Save dropout params
    self.max_deg = max_deg
    self.min_deg = min_deg
    # TODO(rbharath): It's not clear where nb_affine comes from.
    # Is there a solid explanation here?
    self.nb_affine = 2*max_deg + (1-min_deg)        
Exemplo n.º 9
0
  def __init__(self, n_test, n_support, max_depth, init='glorot_uniform',
               activation='linear', **kwargs):
    """
    Unlike the AttnLSTM model which only modifies the test vectors additively,
    this model allows for an additive update to be performed to both test and
    support using information from each other.

    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    max_depth: int
      Number of LSTM Embedding layers.
    init: string
      Type of weight initialization (from Keras)
    activation: string
      Activation type (ReLu/Linear/etc.)
    """
    super(ResiLSTMEmbedding, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.max_depth = max_depth
    self.n_test = n_test
    self.n_support = n_support
Exemplo n.º 10
0
  def __init__(self,
               batch_size,
               n_atom_input_feat=50,
               n_output=128,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    n_atom_input_feat: int, optional
      Number of features for each atom in input.
    n_output: int, optional
      Number of output features for each atom(concatenated)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied

    """
    self.batch_size = batch_size
    self.n_atom_input_feat = n_atom_input_feat
    self.n_output = n_output
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    super(WeaveConcat, self).__init__(**kwargs)
Exemplo n.º 11
0
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 activation=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 input_dim=None,
                 **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim='2+')]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim, )
        super(Dense, self).__init__(**kwargs)
Exemplo n.º 12
0
  def __init__(self,
               output_dim,
               input_dim,
               init='glorot_uniform',
               activation="relu",
               bias=True,
               **kwargs):
    self.init = initializations.get(init)
    self.activation = activations.get(activation)
    self.output_dim = output_dim
    self.input_dim = input_dim

    self.bias = bias

    input_shape = (self.input_dim,)
    if self.input_dim:
      kwargs['input_shape'] = (self.input_dim,)
    super(Dense, self).__init__(**kwargs)
    self.input_dim = input_dim
    self.W = self.add_weight(
        (self.input_dim, self.output_dim),
        initializer=self.init,
        name='{}_W'.format(self.name))
    self.b = self.add_weight(
        (self.output_dim,), initializer='zero', name='{}_b'.format(self.name))
Exemplo n.º 13
0
  def __init__(self,
               batch_size,
               n_input=128,
               gaussian_expand=False,
               init='glorot_uniform',
               activation='tanh',
               epsilon=1e-3,
               momentum=0.99,
               **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    gaussian_expand: boolean. optional
      Whether to expand each dimension of atomic features by gaussian histogram

    """
    self.n_input = n_input
    self.batch_size = batch_size
    self.gaussian_expand = gaussian_expand
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.epsilon = epsilon
    self.momentum = momentum
    super(WeaveGather, self).__init__(**kwargs)
Exemplo n.º 14
0
    def __init__(self,
                 n_embedding=30,
                 n_distance=100,
                 n_hidden=60,
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    n_distance: int, optional
      granularity of distance matrix
    n_hidden: int, optional
      Number of nodes in hidden layer
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
        self.n_embedding = n_embedding
        self.n_distance = n_distance
        self.n_hidden = n_hidden
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNStep, self).__init__(**kwargs)
Exemplo n.º 15
0
    def __init__(self,
                 n_embedding=30,
                 n_distance=100,
                 n_hidden=60,
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    n_distance: int, optional
      granularity of distance matrix
    n_hidden: int, optional
      Number of nodes in hidden layer
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
        warnings.warn(
            "The dc.nn.DTNNStep is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by "
            "dc.models.tensorgraph.graph_layers.DTNNStep", DeprecationWarning)
        self.n_embedding = n_embedding
        self.n_distance = n_distance
        self.n_hidden = n_hidden
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNStep, self).__init__(**kwargs)
Exemplo n.º 16
0
    def __init__(self,
                 n_embedding=30,
                 n_outputs=100,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    n_outputs: int, optional
      Number of features for each molecule(output)
    layer_sizes: list of int, optional(default=[1000])
      Structure of hidden layer(s)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
        self.n_embedding = n_embedding
        self.n_outputs = n_outputs
        self.layer_sizes = layer_sizes
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNGather, self).__init__(**kwargs)
Exemplo n.º 17
0
    def __init__(self,
                 max_atoms,
                 n_atom_input_feat=75,
                 n_pair_input_feat=14,
                 n_atom_output_feat=50,
                 n_pair_output_feat=50,
                 n_hidden_AA=50,
                 n_hidden_PA=50,
                 n_hidden_AP=50,
                 n_hidden_PP=50,
                 update_pair=True,
                 init='glorot_uniform',
                 activation='relu',
                 dropout=None,
                 **kwargs):
        """
    Parameters
    ----------
    max_atoms: int
      Maximum number of atoms in a molecule, should be defined based on dataset
    n_atom_input_feat: int, optional
      Number of features for each atom in input.
    n_pair_input_feat: int, optional
      Number of features for each pair of atoms in input.
    n_atom_output_feat: int, optional
      Number of features for each atom in output.
    n_pair_output_feat: int, optional
      Number of features for each pair of atoms in output.
    n_hidden_XX: int, optional
      Number of units(convolution depths) in corresponding hidden layer
    update_pair: bool, optional
      Whether to calculate for pair features,
      could be turned off for last layer
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported here

    """
        super(WeaveLayer, self).__init__(**kwargs)
        self.max_atoms = max_atoms
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.update_pair = update_pair  # last weave layer does not need to update
        self.n_hidden_AA = n_hidden_AA
        self.n_hidden_PA = n_hidden_PA
        self.n_hidden_AP = n_hidden_AP
        self.n_hidden_PP = n_hidden_PP
        self.n_hidden_A = n_hidden_AA + n_hidden_PA
        self.n_hidden_P = n_hidden_AP + n_hidden_PP

        self.n_atom_input_feat = n_atom_input_feat
        self.n_pair_input_feat = n_pair_input_feat
        self.n_atom_output_feat = n_atom_output_feat
        self.n_pair_output_feat = n_pair_output_feat
Exemplo n.º 18
0
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh',
                 inner_activation='hard_sigmoid',
                 **kwargs):

        super(LSTMStep, self).__init__(**kwargs)

        self.output_dim = output_dim

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        # No other forget biases supported right now.
        assert forget_bias_init == "one"
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
Exemplo n.º 19
0
    def __init__(self, batch_size, activation='linear', **kwargs):
        """
    Parameters
    ----------
    batch_size: int
      Number of elements in batch of data.
    """
        super(GraphGather, self).__init__(**kwargs)

        self.activation = activations.get(activation)  # Get activations
        self.batch_size = batch_size
Exemplo n.º 20
0
  def __init__(self, batch_size, activation='linear', **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      Number of elements in batch of data.
    """
    super(GraphGather, self).__init__(**kwargs)

    self.activation = activations.get(activation)  # Get activations
    self.batch_size = batch_size
Exemplo n.º 21
0
  def __init__(self,
               n_embedding=20,
               layer_sizes=[100],
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    self.n_embedding = n_embedding
    self.layer_sizes = layer_sizes
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations

    super(DTNNGather, self).__init__(**kwargs)
Exemplo n.º 22
0
    def __init__(self,
                 n_embedding=20,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        self.n_embedding = n_embedding
        self.layer_sizes = layer_sizes
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNGather, self).__init__(**kwargs)
Exemplo n.º 23
0
    def __init__(self,
                 max_atoms,
                 out_channels,
                 atom_number_cases=[1, 6, 7, 8],
                 init='glorot_uniform',
                 activation='relu',
                 **kwargs):
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.max_atoms = max_atoms
        self.out_channels = out_channels
        self.atom_number_cases = atom_number_cases

        super(AtomicDifferentiatedDense, self).__init__(**kwargs)
Exemplo n.º 24
0
  def __init__(self,
               max_atoms,
               n_atom_input_feat=75,
               n_pair_input_feat=14,
               n_atom_output_feat=50,
               n_pair_output_feat=50,
               n_hidden_AA=50,
               n_hidden_PA=50,
               n_hidden_AP=50,
               n_hidden_PP=50,
               init='glorot_uniform',
               activation='relu',
               dropout=None,
               **kwargs):
    """
    Parameters
    ----------
    n_atom_input_feat: int
      Number of features for each atom in input.
    n_pair_input_feat: int
      Number of features for each pair of atoms in input.
    n_atom_output_feat: int
      Number of features for each atom in output.
    n_pair_output_feat: int
      Number of features for each pair of atoms in output.
    n_hidden_XX: int
      Number of units(convolution depths) in corresponding hidden layer
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported here

    """
    super(WeaveLayer, self).__init__(**kwargs)
    self.max_atoms = max_atoms
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.n_hidden_AA = n_hidden_AA
    self.n_hidden_PA = n_hidden_PA
    self.n_hidden_AP = n_hidden_AP
    self.n_hidden_PP = n_hidden_PP
    self.n_hidden_A = n_hidden_AA + n_hidden_PA
    self.n_hidden_P = n_hidden_AP + n_hidden_PP

    self.n_atom_input_feat = n_atom_input_feat
    self.n_pair_input_feat = n_pair_input_feat
    self.n_atom_output_feat = n_atom_output_feat
    self.n_pair_output_feat = n_pair_output_feat
Exemplo n.º 25
0
  def __init__(self,
               n_embedding=20,
               n_distance=100,
               n_hidden=20,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    self.n_embedding = n_embedding
    self.n_distance = n_distance
    self.n_hidden = n_hidden
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations

    super(DTNNStep, self).__init__(**kwargs)
Exemplo n.º 26
0
    def __init__(self,
                 n_embedding=20,
                 n_distance=100,
                 n_hidden=20,
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        self.n_embedding = n_embedding
        self.n_distance = n_distance
        self.n_hidden = n_hidden
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNStep, self).__init__(**kwargs)
Exemplo n.º 27
0
    def __init__(self,
                 n_graph_feat=30,
                 n_atom_feat=75,
                 max_atoms=50,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='relu',
                 dropout=None,
                 batch_size=64,
                 **kwargs):
        """
    Parameters
    ----------
    n_graph_feat: int, optional
      Number of features for each node(and the whole grah).
    n_atom_feat: int, optional
      Number of features listed per atom.
    max_atoms: int, optional
      Maximum number of atoms in molecules.
    layer_sizes: list of int, optional(default=[1000])
      Structure of hidden layer(s)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported here
    batch_size: int, optional
      number of molecules in a batch
    """
        warnings.warn(
            "The dc.nn.DAGLayer is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by "
            "dc.models.tensorgraph.graph_layers.DAGLayer", DeprecationWarning)
        super(DAGLayer, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.layer_sizes = layer_sizes
        self.dropout = dropout
        self.max_atoms = max_atoms
        self.batch_size = batch_size
        self.n_inputs = n_atom_feat + (self.max_atoms - 1) * n_graph_feat
        # number of inputs each step
        self.n_graph_feat = n_graph_feat
        self.n_outputs = n_graph_feat
        self.n_atom_feat = n_atom_feat
Exemplo n.º 28
0
    def __init__(self, batch_size, activation='linear', **kwargs):
        """
    Parameters
    ----------
    batch_size: int
      Number of elements in batch of data.
    """
        warnings.warn(
            "The dc.nn.GraphGather is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by dc.models.tensorgraph.layers.GraphGather",
            DeprecationWarning)
        super(GraphGather, self).__init__(**kwargs)

        self.activation = activations.get(activation)  # Get activations
        self.batch_size = batch_size
Exemplo n.º 29
0
    def __init__(self,
               nb_filter,
               n_atom_features,
               batch_size,
               init='glorot_uniform',
               activation='linear',
               dropout=None,
               max_deg=10,
               min_deg=0,
               **kwargs):
        """
        Parameters
        ----------
        nb_filter: int
          Number of convolutional filters.
        n_atom_features: int
          Number of features listed per atom.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied after convolution.
        dropout: float, optional
          Dropout probability.
        max_deg: int, optional
          Maximum degree of atoms in molecules.
        min_deg: int, optional
          Minimum degree of atoms in molecules.
        """
        warnings.warn("The dc.nn.GraphConv is "
                      "deprecated. Will be removed in DeepChem 1.4. "
                      "Will be replaced by dc.models.tensorgraph.layers.GraphConv",
                      DeprecationWarning)
        super(Gather1, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.nb_filter = nb_filter  # Save number of filters
        self.dropout = dropout  # Save dropout params
        self.max_deg = max_deg
        self.min_deg = min_deg
        self.batch_size = batch_size
        # Is there a solid explanation here?
        self.nb_affine = max_deg + (1 - min_deg)
        self.n_atom_features = n_atom_features
Exemplo n.º 30
0
  def __init__(self,
               output_dim,
               input_dim,
               init='glorot_uniform',
               activation="relu",
               bias=True,
               **kwargs):
    self.init = initializations.get(init)
    self.activation = activations.get(activation)
    self.output_dim = output_dim
    self.input_dim = input_dim

    self.bias = bias

    input_shape = (self.input_dim,)
    if self.input_dim:
      kwargs['input_shape'] = (self.input_dim,)
    super(Dense, self).__init__(**kwargs)
    self.input_dim = input_dim
Exemplo n.º 31
0
    def __init__(self,
                 n_test,
                 n_support,
                 n_feat,
                 max_depth,
                 init='glorot_uniform',
                 activation='linear',
                 **kwargs):
        """
    Unlike the AttnLSTM model which only modifies the test vectors additively,
    this model allows for an additive update to be performed to both test and
    support using information from each other.

    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    n_feat: int
      Number of input atom features
    max_depth: int
      Number of LSTM Embedding layers.
    init: string
      Type of weight initialization (from Keras)
    activation: string
      Activation type (ReLu/Linear/etc.)
    """
        warnings.warn(
            "The dc.nn.ResiLSTMEmbedding is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by "
            "dc.models.tensorgraph.layers.IterRefLSTM", DeprecationWarning)
        super(ResiLSTMEmbedding, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.max_depth = max_depth
        self.n_test = n_test
        self.n_support = n_support
        self.n_feat = n_feat
Exemplo n.º 32
0
    def __init__(self,
                 n_graph_feat=30,
                 n_atom_features=75,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='relu',
                 dropout=None,
                 max_atoms=50,
                 **kwargs):
        """
    Parameters
    ----------
    n_graph_feat: int
      Number of features for each node(and the whole grah).
    n_atom_features: int
      Number of features listed per atom.
    layer_sizes: list of int, optional(default=[1000])
      Structure of hidden layer(s)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported here
    max_atoms: int, optional
      Maximum number of atoms in molecules.
    """
        super(DAGLayer, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.layer_sizes = layer_sizes
        self.dropout = dropout
        self.max_atoms = max_atoms
        self.n_inputs = n_atom_features + (self.max_atoms - 1) * n_graph_feat
        # number of inputs each step
        self.n_graph_feat = n_graph_feat
        self.n_outputs = n_graph_feat
        self.n_atom_features = n_atom_features
Exemplo n.º 33
0
  def __init__(self,
               n_graph_feat=30,
               n_atom_features=75,
               layer_sizes=[100],
               init='glorot_uniform',
               activation='relu',
               dropout=None,
               max_atoms=50,
               **kwargs):
    """
    Parameters
    ----------
    n_graph_feat: int
      Number of features for each node(and the whole grah).
    n_atom_features: int
      Number of features listed per atom.
    layer_sizes: list of int, optional(default=[1000])
      Structure of hidden layer(s)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported here
    max_atoms: int, optional
      Maximum number of atoms in molecules.
    """
    super(DAGLayer, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.layer_sizes = layer_sizes
    self.dropout = dropout
    self.max_atoms = max_atoms
    self.n_inputs = n_atom_features + (self.max_atoms - 1) * n_graph_feat
    # number of inputs each step
    self.n_graph_feat = n_graph_feat
    self.n_outputs = n_graph_feat
    self.n_atom_features = n_atom_features
Exemplo n.º 34
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ Generate Radial Symmetry Function """
        init_fn = initializations.get(self.init)  # Set weight initialization
        activation_fn = activations.get(self.activation)
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        inputs = in_layers[0].out_tensor
        atom_numbers = in_layers[1].out_tensor
        in_channels = inputs.get_shape().as_list()[-1]
        self.W = init_fn(
            [len(self.atom_number_cases), in_channels, self.out_channels])

        self.b = model_ops.zeros(
            (len(self.atom_number_cases), self.out_channels))
        outputs = []
        for i, atom_case in enumerate(self.atom_number_cases):
            # optimization to allow for tensorcontraction/broadcasted mmul
            # using a reshape trick. Note that the np and tf matmul behavior
            # differs when dealing with broadcasts

            a = inputs  # (i,j,k)
            b = self.W[i, :, :]  # (k, l)

            ai = tf.shape(a)[0]
            aj = tf.shape(a)[1]
            ak = tf.shape(a)[2]
            bl = tf.shape(b)[1]

            output = activation_fn(
                tf.reshape(tf.matmul(tf.reshape(a, [ai * aj, ak]), b),
                           [ai, aj, bl]) + self.b[i, :])

            mask = 1 - tf.to_float(tf.cast(atom_numbers - atom_case, tf.bool))
            output = tf.reshape(output * tf.expand_dims(mask, 2),
                                (-1, self.max_atoms, self.out_channels))
            outputs.append(output)
        self.out_tensor = tf.add_n(outputs)
Exemplo n.º 35
0
  def __init__(self, output_dim, init='glorot_uniform',
               activation=None,
               W_regularizer=None, b_regularizer=None, activity_regularizer=None,
               W_constraint=None, b_constraint=None,
               bias=True, input_dim=None, **kwargs):
    self.init = initializations.get(init)
    self.activation = activations.get(activation)
    self.output_dim = output_dim
    self.input_dim = input_dim

    self.W_regularizer = regularizers.get(W_regularizer)
    self.b_regularizer = regularizers.get(b_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)

    self.W_constraint = constraints.get(W_constraint)
    self.b_constraint = constraints.get(b_constraint)

    self.bias = bias
    self.input_spec = [InputSpec(ndim='2+')]

    if self.input_dim:
      kwargs['input_shape'] = (self.input_dim,)
    super(Dense, self).__init__(**kwargs)
Exemplo n.º 36
0
    def __init__(self,
                 n_graph_feat=30,
                 n_outputs=30,
                 max_atoms=50,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='relu',
                 dropout=None,
                 **kwargs):
        """
    Parameters
    ----------
    n_graph_feat: int, optional
      Number of features for each atom
    n_outputs: int, optional
      Number of features for each molecule.
    max_atoms: int, optional
      Maximum number of atoms in molecules.
    layer_sizes: list of int, optional
      Structure of hidden layer(s)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported
    """
        super(DAGGather, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.layer_sizes = layer_sizes
        self.dropout = dropout
        self.max_atoms = max_atoms
        self.n_graph_feat = n_graph_feat
        self.n_outputs = n_outputs
Exemplo n.º 37
0
    def __init__(self,
                 n_embedding=30,
                 n_outputs=100,
                 layer_sizes=[100],
                 output_activation=True,
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    layer_sizes: list of int, optional(default=[1000])
      Structure of hidden layer(s)
    n_tasks: int, optional
      Number of final summed outputs
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
        warnings.warn(
            "The dc.nn.DTNNGather is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by "
            "dc.models.tensorgraph.graph_layers.DTNNGather",
            DeprecationWarning)
        self.n_embedding = n_embedding
        self.layer_sizes = layer_sizes
        self.n_outputs = n_outputs
        self.output_activation = output_activation
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNGather, self).__init__(**kwargs)
Exemplo n.º 38
0
    def __init__(self,
               nb_filter,
               n_atom_features,
               batch_size,
               init='glorot_uniform',
               activation='linear',
               dropout=None,
               max_deg=10,
               min_deg=0,
               **kwargs):
        """
        Parameters
        ----------
        nb_filter: int
          Number of convolutional filters.
        n_atom_features: int
          Number of features listed per atom.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied after convolution.
        dropout: float, optional
          Dropout probability.
        max_deg: int, optional
          Maximum degree of atoms in molecules.
        min_deg: int, optional
          Minimum degree of atoms in molecules.
        """
        warnings.warn("The dc.nn.GraphConv is "
                      "deprecated. Will be removed in DeepChem 1.4. "
                      "Will be replaced by dc.models.tensorgraph.layers.GraphConv",
                      DeprecationWarning)
        super(GraphConv_and_gather, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.nb_filter = nb_filter  # Save number of filters
        self.dropout = dropout  # Save dropout params
        self.max_deg = max_deg
        self.min_deg = min_deg
        self.batch_size = batch_size
        # Is there a solid explanation here?
        self.nb_affine = 3 * max_deg + (2 - min_deg)
        self.n_atom_features = n_atom_features
        n_atom_features = self.n_atom_features

        self.beta_init = initializations.get('zero')
        self.gamma_init = initializations.get('one')
        self.epsilon = 1e-5
        self.momentum = 0.99
        self.gamma_regularizer = regularizers.get(None)
        self.beta_regularizer = regularizers.get(None)


        # Generate the nb_affine weights and biases
        self.W_list = [
            self.init([n_atom_features, self.nb_filter])
            for k in range(self.nb_affine)
            ]
        self.b_list = [
            model_ops.zeros(shape=[
                self.nb_filter,
            ]) for k in range(self.nb_affine)
            ]

        self.trainable_weights = self.W_list + self.b_list