示例#1
0
    def __init__(self,
                 output_dim,
                 input_dim,
                 init='glorot_uniform',
                 inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh',
                 inner_activation='hard_sigmoid',
                 **kwargs):

        warnings.warn(
            "The dc.nn.LSTMStep is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by dc.models.tensorgraph.layers.LSTMStep",
            DeprecationWarning)

        super(LSTMStep, self).__init__(**kwargs)

        self.output_dim = output_dim

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        # No other forget biases supported right now.
        assert forget_bias_init == "one"
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.input_dim = input_dim
示例#2
0
 def __init__(self, epsilon=1e-3, mode=0, axis=-1, momentum=0.99,
              beta_init='zero', gamma_init='one',
              gamma_regularizer=None, beta_regularizer=None, **kwargs):
   self.beta_init = initializations.get(beta_init)
   self.gamma_init = initializations.get(gamma_init)
   self.epsilon = epsilon
   self.mode = mode
   self.axis = axis
   self.momentum = momentum
   self.gamma_regularizer = regularizers.get(gamma_regularizer)
   self.beta_regularizer = regularizers.get(beta_regularizer)
   if self.mode == 0:
     self.uses_learning_phase = True
   super(BatchNormalization, self).__init__(**kwargs)
示例#3
0
    def __init__(self,
                 n_embedding=30,
                 n_outputs=100,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    n_outputs: int, optional
      Number of features for each molecule(output)
    layer_sizes: list of int, optional(default=[1000])
      Structure of hidden layer(s)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
        self.n_embedding = n_embedding
        self.n_outputs = n_outputs
        self.layer_sizes = layer_sizes
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNGather, self).__init__(**kwargs)
示例#4
0
  def __init__(self,
               n_test,
               n_support,
               n_feat,
               max_depth,
               init='glorot_uniform',
               activation='linear',
               dropout=None,
               **kwargs):
    """
    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    n_feat: int
      Number of features per atom
    max_depth: int
      Number of "processing steps" used by sequence-to-sequence for sets model.
    init: str, optional
      Type of initialization of weights
    activation: str, optional
      Activation for layers.
    dropout: float, optional
      Dropout probability
    """
    super(AttnLSTMEmbedding, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.max_depth = max_depth
    self.n_test = n_test
    self.n_support = n_support
    self.n_feat = n_feat
示例#5
0
    def __init__(self,
                 n_test,
                 n_support,
                 n_feat,
                 max_depth,
                 init='glorot_uniform',
                 activation='linear',
                 dropout=None,
                 **kwargs):
        """
    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    n_feat: int
      Number of features per atom
    max_depth: int
      Number of "processing steps" used by sequence-to-sequence for sets model.
    init: str, optional
      Type of initialization of weights
    activation: str, optional
      Activation for layers.
    dropout: float, optional
      Dropout probability
    """
        super(AttnLSTMEmbedding, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.max_depth = max_depth
        self.n_test = n_test
        self.n_support = n_support
        self.n_feat = n_feat
示例#6
0
    def __init__(self,
                 n_test,
                 n_support,
                 max_depth,
                 init='glorot_uniform',
                 activation='linear',
                 **kwargs):
        """
    Unlike the AttnLSTM model which only modifies the test vectors additively,
    this model allows for an additive update to be performed to both test and
    support using information from each other.

    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    max_depth: int
      Number of LSTM Embedding layers.
    init: string
      Type of weight initialization (from Keras)
    activation: string
      Activation type (ReLu/Linear/etc.)
    """
        super(ResiLSTMEmbedding, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.max_depth = max_depth
        self.n_test = n_test
        self.n_support = n_support
示例#7
0
    def __init__(self,
                 nb_filter,
                 init='glorot_uniform',
                 activation='linear',
                 dropout=None,
                 max_deg=10,
                 min_deg=0,
                 **kwargs):
        """
    Parameters
    ----------
    nb_filter: int
      Number of convolutional filters.
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied after convolution.
    dropout: float, optional
      Dropout probability.
    max_deg: int, optional
      Maximum degree of atoms in molecules.
    min_deg: int, optional
      Minimum degree of atoms in molecules.
    """
        super(GraphConv, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.nb_filter = nb_filter  # Save number of filters
        self.dropout = dropout  # Save dropout params
        self.max_deg = max_deg
        self.min_deg = min_deg
        # TODO(rbharath): It's not clear where nb_affine comes from.
        # Is there a solid explanation here?
        self.nb_affine = 2 * max_deg + (1 - min_deg)
示例#8
0
    def __init__(self,
                 n_embedding=30,
                 n_distance=100,
                 n_hidden=60,
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    n_distance: int, optional
      granularity of distance matrix
    n_hidden: int, optional
      Number of nodes in hidden layer
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
        self.n_embedding = n_embedding
        self.n_distance = n_distance
        self.n_hidden = n_hidden
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNStep, self).__init__(**kwargs)
示例#9
0
    def __init__(self,
                 n_embedding=30,
                 periodic_table_length=30,
                 init='glorot_uniform',
                 **kwargs):
        """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    periodic_table_length: int, optional
      Length of embedding, 83=Bi
    init: str, optional
      Weight initialization for filters.
    """

        warnings.warn(
            "The dc.nn.DTNNEmbedding is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by "
            "dc.models.tensorgraph.graph_layers.DTNNEmbedding",
            DeprecationWarning)
        self.n_embedding = n_embedding
        self.periodic_table_length = periodic_table_length
        self.init = initializations.get(init)  # Set weight initialization

        super(DTNNEmbedding, self).__init__(**kwargs)
示例#10
0
  def __init__(self,
               batch_size,
               n_input=128,
               gaussian_expand=False,
               init='glorot_uniform',
               activation='tanh',
               epsilon=1e-3,
               momentum=0.99,
               **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    gaussian_expand: boolean. optional
      Whether to expand each dimension of atomic features by gaussian histogram

    """
    self.n_input = n_input
    self.batch_size = batch_size
    self.gaussian_expand = gaussian_expand
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.epsilon = epsilon
    self.momentum = momentum
    super(WeaveGather, self).__init__(**kwargs)
示例#11
0
    def __init__(self,
                 batch_size,
                 n_input=128,
                 gaussian_expand=False,
                 init='glorot_uniform',
                 activation='tanh',
                 epsilon=1e-3,
                 momentum=0.99,
                 **kwargs):
        """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    n_input: int, optional
      number of features for each input molecule
    gaussian_expand: boolean. optional
      Whether to expand each dimension of atomic features by gaussian histogram
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied

    """
        self.n_input = n_input
        self.batch_size = batch_size
        self.gaussian_expand = gaussian_expand
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.epsilon = epsilon
        self.momentum = momentum
        super(WeaveGather, self).__init__(**kwargs)
示例#12
0
  def __init__(self, n_test, n_support, max_depth, init='glorot_uniform',
               activation='linear', **kwargs):
    """
    Unlike the AttnLSTM model which only modifies the test vectors additively,
    this model allows for an additive update to be performed to both test and
    support using information from each other.

    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    max_depth: int
      Number of LSTM Embedding layers.
    init: string
      Type of weight initialization (from Keras)
    activation: string
      Activation type (ReLu/Linear/etc.)
    """
    super(ResiLSTMEmbedding, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.max_depth = max_depth
    self.n_test = n_test
    self.n_support = n_support
示例#13
0
  def __init__(self, output_dim,
               init='glorot_uniform', inner_init='orthogonal',
               forget_bias_init='one', activation='tanh', 
               inner_activation='hard_sigmoid', **kwargs):

    super(LSTMStep, self).__init__(**kwargs)

    self.output_dim = output_dim

    self.init = initializations.get(init)
    self.inner_init = initializations.get(inner_init)
    # No other forget biases supported right now.
    assert forget_bias_init == "one"
    self.forget_bias_init = initializations.get(forget_bias_init)
    self.activation = activations.get(activation)
    self.inner_activation = activations.get(inner_activation)
示例#14
0
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 activation=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 input_dim=None,
                 **kwargs):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim='2+')]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim, )
        super(Dense, self).__init__(**kwargs)
示例#15
0
  def __init__(self, nb_filter, init='glorot_uniform', activation='linear',
               dropout=None, max_deg=10, min_deg=0, **kwargs):
    """
    Parameters
    ----------
    nb_filter: int
      Number of convolutional filters.
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied after convolution.
    dropout: float, optional
      Dropout probability.
    max_deg: int, optional
      Maximum degree of atoms in molecules.
    min_deg: int, optional
      Minimum degree of atoms in molecules.
    """
    super(GraphConv, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.nb_filter = nb_filter  # Save number of filters
    self.dropout = dropout  # Save dropout params
    self.max_deg = max_deg
    self.min_deg = min_deg
    # TODO(rbharath): It's not clear where nb_affine comes from.
    # Is there a solid explanation here?
    self.nb_affine = 2*max_deg + (1-min_deg)        
示例#16
0
  def add_weight(self, shape, initializer, name=None,
                 trainable=True,
                 regularizer=None,
                 constraint=None):
    """Adds a weight variable to the layer.

    Parameters
    ----------
    shape: The shape tuple of the weight.
    initializer: An Initializer instance (callable).
    trainable: A boolean, whether the weight should
      be trained via backprop or not (assuming
      that the layer itself is also trainable).
    regularizer: An optional Regularizer instance.
    """
    initializer = initializations.get(initializer)
    weight = initializer(shape, name=name)
    if regularizer is not None:
      self.add_loss(regularizer(weight))
    if constraint is not None:
      self.constraints[weight] = constraint
    if trainable:
      self._trainable_weights.append(weight)
    else:
      self._non_trainable_weights.append(weight)
    return weight
示例#17
0
    def add_weight(self,
                   shape,
                   initializer,
                   name=None,
                   trainable=True,
                   regularizer=None,
                   constraint=None):
        """Adds a weight variable to the layer.

    Parameters
    ----------
    shape: The shape tuple of the weight.
    initializer: An Initializer instance (callable).
    trainable: A boolean, whether the weight should
      be trained via backprop or not (assuming
      that the layer itself is also trainable).
    regularizer: An optional Regularizer instance.
    """
        initializer = initializations.get(initializer)
        weight = initializer(shape, name=name)
        if regularizer is not None:
            self.add_loss(regularizer(weight))
        if constraint is not None:
            self.constraints[weight] = constraint
        if trainable:
            self._trainable_weights.append(weight)
        else:
            self._non_trainable_weights.append(weight)
        return weight
示例#18
0
  def __init__(self,
               batch_size,
               n_atom_input_feat=50,
               n_output=128,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    """
    Parameters
    ----------
    batch_size: int
      number of molecules in a batch
    n_atom_input_feat: int, optional
      Number of features for each atom in input.
    n_output: int, optional
      Number of output features for each atom(concatenated)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied

    """
    self.batch_size = batch_size
    self.n_atom_input_feat = n_atom_input_feat
    self.n_output = n_output
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    super(WeaveConcat, self).__init__(**kwargs)
示例#19
0
  def __init__(self,
               output_dim,
               input_dim,
               init='glorot_uniform',
               activation="relu",
               bias=True,
               **kwargs):
    self.init = initializations.get(init)
    self.activation = activations.get(activation)
    self.output_dim = output_dim
    self.input_dim = input_dim

    self.bias = bias

    input_shape = (self.input_dim,)
    if self.input_dim:
      kwargs['input_shape'] = (self.input_dim,)
    super(Dense, self).__init__(**kwargs)
    self.input_dim = input_dim
    self.W = self.add_weight(
        (self.input_dim, self.output_dim),
        initializer=self.init,
        name='{}_W'.format(self.name))
    self.b = self.add_weight(
        (self.output_dim,), initializer='zero', name='{}_b'.format(self.name))
示例#20
0
    def __init__(self,
                 n_embedding=30,
                 n_distance=100,
                 n_hidden=60,
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    n_distance: int, optional
      granularity of distance matrix
    n_hidden: int, optional
      Number of nodes in hidden layer
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    """
        warnings.warn(
            "The dc.nn.DTNNStep is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by "
            "dc.models.tensorgraph.graph_layers.DTNNStep", DeprecationWarning)
        self.n_embedding = n_embedding
        self.n_distance = n_distance
        self.n_hidden = n_hidden
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNStep, self).__init__(**kwargs)
示例#21
0
    def __init__(self,
                 max_atoms,
                 n_atom_input_feat=75,
                 n_pair_input_feat=14,
                 n_atom_output_feat=50,
                 n_pair_output_feat=50,
                 n_hidden_AA=50,
                 n_hidden_PA=50,
                 n_hidden_AP=50,
                 n_hidden_PP=50,
                 update_pair=True,
                 init='glorot_uniform',
                 activation='relu',
                 dropout=None,
                 **kwargs):
        """
    Parameters
    ----------
    max_atoms: int
      Maximum number of atoms in a molecule, should be defined based on dataset
    n_atom_input_feat: int, optional
      Number of features for each atom in input.
    n_pair_input_feat: int, optional
      Number of features for each pair of atoms in input.
    n_atom_output_feat: int, optional
      Number of features for each atom in output.
    n_pair_output_feat: int, optional
      Number of features for each pair of atoms in output.
    n_hidden_XX: int, optional
      Number of units(convolution depths) in corresponding hidden layer
    update_pair: bool, optional
      Whether to calculate for pair features,
      could be turned off for last layer
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported here

    """
        super(WeaveLayer, self).__init__(**kwargs)
        self.max_atoms = max_atoms
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.update_pair = update_pair  # last weave layer does not need to update
        self.n_hidden_AA = n_hidden_AA
        self.n_hidden_PA = n_hidden_PA
        self.n_hidden_AP = n_hidden_AP
        self.n_hidden_PP = n_hidden_PP
        self.n_hidden_A = n_hidden_AA + n_hidden_PA
        self.n_hidden_P = n_hidden_AP + n_hidden_PP

        self.n_atom_input_feat = n_atom_input_feat
        self.n_pair_input_feat = n_pair_input_feat
        self.n_atom_output_feat = n_atom_output_feat
        self.n_pair_output_feat = n_pair_output_feat
示例#22
0
  def __init__(self,
               n_embedding=20,
               periodic_table_length=83,
               init='glorot_uniform',
               **kwargs):
    self.n_embedding = n_embedding
    self.periodic_table_length = periodic_table_length
    self.init = initializations.get(init)  # Set weight initialization

    super(DTNNEmbedding, self).__init__(**kwargs)
示例#23
0
    def __init__(self,
                 n_embedding=20,
                 periodic_table_length=83,
                 init='glorot_uniform',
                 **kwargs):
        self.n_embedding = n_embedding
        self.periodic_table_length = periodic_table_length
        self.init = initializations.get(init)  # Set weight initialization

        super(DTNNEmbedding, self).__init__(**kwargs)
示例#24
0
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 inner_init='orthogonal',
                 forget_bias_init='one',
                 activation='tanh',
                 inner_activation='hard_sigmoid',
                 **kwargs):

        super(LSTMStep, self).__init__(**kwargs)

        self.output_dim = output_dim

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        # No other forget biases supported right now.
        assert forget_bias_init == "one"
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
示例#25
0
  def __init__(self,
               n_embedding=20,
               layer_sizes=[100],
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    self.n_embedding = n_embedding
    self.layer_sizes = layer_sizes
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations

    super(DTNNGather, self).__init__(**kwargs)
示例#26
0
 def __init__(self,
              epsilon=1e-3,
              mode=0,
              axis=-1,
              momentum=0.99,
              beta_init='zero',
              gamma_init='one',
              gamma_regularizer=None,
              beta_regularizer=None,
              **kwargs):
     self.beta_init = initializations.get(beta_init)
     self.gamma_init = initializations.get(gamma_init)
     self.epsilon = epsilon
     self.mode = mode
     self.axis = axis
     self.momentum = momentum
     self.gamma_regularizer = regularizers.get(gamma_regularizer)
     self.beta_regularizer = regularizers.get(beta_regularizer)
     if self.mode == 0:
         self.uses_learning_phase = True
     super(BatchNormalization, self).__init__(**kwargs)
示例#27
0
    def __init__(self,
                 n_embedding=20,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        self.n_embedding = n_embedding
        self.layer_sizes = layer_sizes
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNGather, self).__init__(**kwargs)
 def __init__(self, n_hidden=100, init='glorot_uniform'):
     self.n_hidden = n_hidden
     self.init = initializations.get(init)
     Wz = self.init([n_hidden, n_hidden])
     Wr = self.init([n_hidden, n_hidden])
     Wh = self.init([n_hidden, n_hidden])
     Uz = self.init([n_hidden, n_hidden])
     Ur = self.init([n_hidden, n_hidden])
     Uh = self.init([n_hidden, n_hidden])
     bz = model_ops.zeros(shape=(n_hidden, ))
     br = model_ops.zeros(shape=(n_hidden, ))
     bh = model_ops.zeros(shape=(n_hidden, ))
     self.trainable_weights = [Wz, Wr, Wh, Uz, Ur, Uh, bz, br, bh]
 def __init__(self,
              pair_features,
              n_pair_features=8,
              n_hidden=100,
              init='glorot_uniform'):
     self.n_pair_features = n_pair_features
     self.n_hidden = n_hidden
     self.init = initializations.get(init)
     W = self.init([n_pair_features, n_hidden * n_hidden])
     b = model_ops.zeros(shape=(n_hidden * n_hidden, ))
     self.A = tf.nn.xw_plus_b(pair_features, W, b)
     self.A = tf.reshape(self.A, (-1, n_hidden, n_hidden))
     self.trainable_weights = [W, b]
示例#30
0
  def __init__(self,
               n_embedding=20,
               n_distance=100,
               n_hidden=20,
               init='glorot_uniform',
               activation='tanh',
               **kwargs):
    self.n_embedding = n_embedding
    self.n_distance = n_distance
    self.n_hidden = n_hidden
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations

    super(DTNNStep, self).__init__(**kwargs)
示例#31
0
  def __init__(self,
               max_atoms,
               n_atom_input_feat=75,
               n_pair_input_feat=14,
               n_atom_output_feat=50,
               n_pair_output_feat=50,
               n_hidden_AA=50,
               n_hidden_PA=50,
               n_hidden_AP=50,
               n_hidden_PP=50,
               init='glorot_uniform',
               activation='relu',
               dropout=None,
               **kwargs):
    """
    Parameters
    ----------
    n_atom_input_feat: int
      Number of features for each atom in input.
    n_pair_input_feat: int
      Number of features for each pair of atoms in input.
    n_atom_output_feat: int
      Number of features for each atom in output.
    n_pair_output_feat: int
      Number of features for each pair of atoms in output.
    n_hidden_XX: int
      Number of units(convolution depths) in corresponding hidden layer
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported here

    """
    super(WeaveLayer, self).__init__(**kwargs)
    self.max_atoms = max_atoms
    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.n_hidden_AA = n_hidden_AA
    self.n_hidden_PA = n_hidden_PA
    self.n_hidden_AP = n_hidden_AP
    self.n_hidden_PP = n_hidden_PP
    self.n_hidden_A = n_hidden_AA + n_hidden_PA
    self.n_hidden_P = n_hidden_AP + n_hidden_PP

    self.n_atom_input_feat = n_atom_input_feat
    self.n_pair_input_feat = n_pair_input_feat
    self.n_atom_output_feat = n_atom_output_feat
    self.n_pair_output_feat = n_pair_output_feat
示例#32
0
    def __init__(self,
                 max_atoms,
                 out_channels,
                 atom_number_cases=[1, 6, 7, 8],
                 init='glorot_uniform',
                 activation='relu',
                 **kwargs):
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.max_atoms = max_atoms
        self.out_channels = out_channels
        self.atom_number_cases = atom_number_cases

        super(AtomicDifferentiatedDense, self).__init__(**kwargs)
示例#33
0
    def __init__(self,
                 n_embedding=20,
                 n_distance=100,
                 n_hidden=20,
                 init='glorot_uniform',
                 activation='tanh',
                 **kwargs):
        self.n_embedding = n_embedding
        self.n_distance = n_distance
        self.n_hidden = n_hidden
        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations

        super(DTNNStep, self).__init__(**kwargs)
示例#34
0
  def add_weight(self, shape, initializer, regularizer=None, name=None):
    """Adds a weight variable to the layer.

    Parameters
    ----------
    shape: The shape tuple of the weight.
    initializer: An Initializer instance (callable).
    regularizer: An optional Regularizer instance.
    """
    initializer = initializations.get(initializer)
    weight = initializer(shape, name=name)
    if regularizer is not None:
      self.add_loss(regularizer(weight))

    return weight
示例#35
0
    def __init__(self,
                 n_graph_feat=30,
                 n_atom_feat=75,
                 max_atoms=50,
                 layer_sizes=[100],
                 init='glorot_uniform',
                 activation='relu',
                 dropout=None,
                 batch_size=64,
                 **kwargs):
        """
    Parameters
    ----------
    n_graph_feat: int, optional
      Number of features for each node(and the whole grah).
    n_atom_feat: int, optional
      Number of features listed per atom.
    max_atoms: int, optional
      Maximum number of atoms in molecules.
    layer_sizes: list of int, optional(default=[1000])
      Structure of hidden layer(s)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported here
    batch_size: int, optional
      number of molecules in a batch
    """
        warnings.warn(
            "The dc.nn.DAGLayer is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by "
            "dc.models.tensorgraph.graph_layers.DAGLayer", DeprecationWarning)
        super(DAGLayer, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.layer_sizes = layer_sizes
        self.dropout = dropout
        self.max_atoms = max_atoms
        self.batch_size = batch_size
        self.n_inputs = n_atom_feat + (self.max_atoms - 1) * n_graph_feat
        # number of inputs each step
        self.n_graph_feat = n_graph_feat
        self.n_outputs = n_graph_feat
        self.n_atom_feat = n_atom_feat
示例#36
0
  def __init__(self, M, batch_size, n_hidden=100, init='orthogonal', **kwargs):
    """
        Parameters
        ----------
        M: int
          Number of LSTM steps
        batch_size: int
          Number of samples in a batch(all batches must have same size)
        n_hidden: int, optional
          number of hidden units in the passing phase
        """

    self.M = M
    self.batch_size = batch_size
    self.n_hidden = n_hidden
    self.init = initializations.get(init)
    super(SetGather, self).__init__(**kwargs)
示例#37
0
    def __init__(self,
               nb_filter,
               n_atom_features,
               batch_size,
               init='glorot_uniform',
               activation='linear',
               dropout=None,
               max_deg=10,
               min_deg=0,
               **kwargs):
        """
        Parameters
        ----------
        nb_filter: int
          Number of convolutional filters.
        n_atom_features: int
          Number of features listed per atom.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied after convolution.
        dropout: float, optional
          Dropout probability.
        max_deg: int, optional
          Maximum degree of atoms in molecules.
        min_deg: int, optional
          Minimum degree of atoms in molecules.
        """
        warnings.warn("The dc.nn.GraphConv is "
                      "deprecated. Will be removed in DeepChem 1.4. "
                      "Will be replaced by dc.models.tensorgraph.layers.GraphConv",
                      DeprecationWarning)
        super(Gather1, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.nb_filter = nb_filter  # Save number of filters
        self.dropout = dropout  # Save dropout params
        self.max_deg = max_deg
        self.min_deg = min_deg
        self.batch_size = batch_size
        # Is there a solid explanation here?
        self.nb_affine = max_deg + (1 - min_deg)
        self.n_atom_features = n_atom_features
示例#38
0
  def __init__(self,
               output_dim,
               input_dim,
               init='glorot_uniform',
               activation="relu",
               bias=True,
               **kwargs):
    self.init = initializations.get(init)
    self.activation = activations.get(activation)
    self.output_dim = output_dim
    self.input_dim = input_dim

    self.bias = bias

    input_shape = (self.input_dim,)
    if self.input_dim:
      kwargs['input_shape'] = (self.input_dim,)
    super(Dense, self).__init__(**kwargs)
    self.input_dim = input_dim
示例#39
0
    def __init__(self,
                 n_embedding=30,
                 periodic_table_length=83,
                 init='glorot_uniform',
                 **kwargs):
        """
    Parameters
    ----------
    n_embedding: int, optional
      Number of features for each atom
    periodic_table_length: int, optional
      Length of embedding, 83=Bi
    init: str, optional
      Weight initialization for filters.
    """
        self.n_embedding = n_embedding
        self.periodic_table_length = periodic_table_length
        self.init = initializations.get(init)  # Set weight initialization

        super(DTNNEmbedding, self).__init__(**kwargs)
示例#40
0
    def __init__(self,
                 n_test,
                 n_support,
                 n_feat,
                 max_depth,
                 init='glorot_uniform',
                 activation='linear',
                 **kwargs):
        """
    Unlike the AttnLSTM model which only modifies the test vectors additively,
    this model allows for an additive update to be performed to both test and
    support using information from each other.

    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    n_feat: int
      Number of input atom features
    max_depth: int
      Number of LSTM Embedding layers.
    init: string
      Type of weight initialization (from Keras)
    activation: string
      Activation type (ReLu/Linear/etc.)
    """
        warnings.warn(
            "The dc.nn.ResiLSTMEmbedding is "
            "deprecated. Will be removed in DeepChem 1.4. "
            "Will be replaced by "
            "dc.models.tensorgraph.layers.IterRefLSTM", DeprecationWarning)
        super(ResiLSTMEmbedding, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.max_depth = max_depth
        self.n_test = n_test
        self.n_support = n_support
        self.n_feat = n_feat
示例#41
0
  def __init__(self,
               n_graph_feat=30,
               n_atom_features=75,
               layer_sizes=[100],
               init='glorot_uniform',
               activation='relu',
               dropout=None,
               max_atoms=50,
               **kwargs):
    """
    Parameters
    ----------
    n_graph_feat: int
      Number of features for each node(and the whole grah).
    n_atom_features: int
      Number of features listed per atom.
    layer_sizes: list of int, optional(default=[1000])
      Structure of hidden layer(s)
    init: str, optional
      Weight initialization for filters.
    activation: str, optional
      Activation function applied
    dropout: float, optional
      Dropout probability, not supported here
    max_atoms: int, optional
      Maximum number of atoms in molecules.
    """
    super(DAGLayer, self).__init__(**kwargs)

    self.init = initializations.get(init)  # Set weight initialization
    self.activation = activations.get(activation)  # Get activations
    self.layer_sizes = layer_sizes
    self.dropout = dropout
    self.max_atoms = max_atoms
    self.n_inputs = n_atom_features + (self.max_atoms - 1) * n_graph_feat
    # number of inputs each step
    self.n_graph_feat = n_graph_feat
    self.n_outputs = n_graph_feat
    self.n_atom_features = n_atom_features
示例#42
0
  def __init__(self, output_dim, init='glorot_uniform',
               activation=None,
               W_regularizer=None, b_regularizer=None, activity_regularizer=None,
               W_constraint=None, b_constraint=None,
               bias=True, input_dim=None, **kwargs):
    self.init = initializations.get(init)
    self.activation = activations.get(activation)
    self.output_dim = output_dim
    self.input_dim = input_dim

    self.W_regularizer = regularizers.get(W_regularizer)
    self.b_regularizer = regularizers.get(b_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)

    self.W_constraint = constraints.get(W_constraint)
    self.b_constraint = constraints.get(b_constraint)

    self.bias = bias
    self.input_spec = [InputSpec(ndim='2+')]

    if self.input_dim:
      kwargs['input_shape'] = (self.input_dim,)
    super(Dense, self).__init__(**kwargs)