Пример #1
0
    def build(self, input_shape):
        """Initializes trainable weights."""
        x_input_shape, xp_input_shape = input_shape  #Unpack

        n_feat = xp_input_shape[1]

        self.lstm = LSTMStep(n_feat)
        self.q_init = model_ops.zeros([self.n_test, n_feat])
        self.r_init = model_ops.zeros([self.n_test, n_feat])
        self.states_init = self.lstm.get_initial_states([self.n_test, n_feat])

        self.trainable_weights = [self.q_init, self.r_init]
Пример #2
0
  def build(self, input_shape):
    """Initializes trainable weights."""
    x_input_shape, xp_input_shape = input_shape  #Unpack

    n_feat = xp_input_shape[1]

    self.lstm = LSTMStep(n_feat)
    self.q_init = model_ops.zeros([self.n_test, n_feat])
    self.r_init = model_ops.zeros([self.n_test, n_feat])
    self.states_init = self.lstm.get_initial_states([self.n_test, n_feat])
    
    self.trainable_weights = [self.q_init, self.r_init]
Пример #3
0
 def __init__(self, n_hidden=100, init='glorot_uniform'):
     self.n_hidden = n_hidden
     self.init = initializations.get(init)
     Wz = self.init([n_hidden, n_hidden])
     Wr = self.init([n_hidden, n_hidden])
     Wh = self.init([n_hidden, n_hidden])
     Uz = self.init([n_hidden, n_hidden])
     Ur = self.init([n_hidden, n_hidden])
     Uh = self.init([n_hidden, n_hidden])
     bz = model_ops.zeros(shape=(n_hidden, ))
     br = model_ops.zeros(shape=(n_hidden, ))
     bh = model_ops.zeros(shape=(n_hidden, ))
     self.trainable_weights = [Wz, Wr, Wh, Uz, Ur, Uh, bz, br, bh]
Пример #4
0
    def build(self):
        self.W_cf = self.init([self.n_embedding, self.n_hidden])
        self.W_df = self.init([self.n_distance, self.n_hidden])
        self.W_fc = self.init([self.n_hidden, self.n_embedding])
        self.b_cf = model_ops.zeros(shape=[
            self.n_hidden,
        ])
        self.b_df = model_ops.zeros(shape=[
            self.n_hidden,
        ])

        self.trainable_weights = [
            self.W_cf, self.W_df, self.W_fc, self.b_cf, self.b_df
        ]
Пример #5
0
  def build(self):
    self.W_cf = self.init([self.n_embedding, self.n_hidden])
    self.W_df = self.init([self.n_distance, self.n_hidden])
    self.W_fc = self.init([self.n_hidden, self.n_embedding])
    self.b_cf = model_ops.zeros(shape=[
        self.n_hidden,
    ])
    self.b_df = model_ops.zeros(shape=[
        self.n_hidden,
    ])

    self.trainable_weights = [
        self.W_cf, self.W_df, self.W_fc, self.b_cf, self.b_df
    ]
Пример #6
0
    def call(self, x_xp, mask=None):
        """Execute this layer on input tensors.

    Parameters
    ----------
    x_xp: list
      List of two tensors (X, Xp). X should be of shape (n_test, n_feat) and
      Xp should be of shape (n_support, n_feat) where n_test is the size of
      the test set, n_support that of the support set, and n_feat is the number
      of per-atom features.

    Returns
    -------
    list
      Returns two tensors of same shape as input. Namely the output shape will
      be [(n_test, n_feat), (n_support, n_feat)]
    """
        # x is test set, xp is support set.
        x, xp = x_xp

        ## Initializes trainable weights.
        n_feat = self.n_feat

        self.lstm = LSTMStep(n_feat, 2 * n_feat)
        self.q_init = model_ops.zeros([self.n_test, n_feat])
        self.r_init = model_ops.zeros([self.n_test, n_feat])
        self.states_init = self.lstm.get_initial_states([self.n_test, n_feat])

        self.trainable_weights = [self.q_init, self.r_init]

        ### Performs computations

        # Get initializations
        q = self.q_init
        #r = self.r_init
        states = self.states_init

        for d in range(self.max_depth):
            # Process using attention
            # Eqn (4), appendix A.1 of Matching Networks paper
            e = cos(x + q, xp)
            a = tf.nn.softmax(e)
            r = model_ops.dot(a, xp)

            # Generate new aattention states
            y = model_ops.concatenate([q, r], axis=1)
            q, states = self.lstm([y] + states)  #+ self.lstm.get_constants(x)

        return [x + q, xp]
Пример #7
0
  def call(self, x_xp, mask=None):
    """Execute this layer on input tensors.

    Parameters
    ----------
    x_xp: list
      List of two tensors (X, Xp). X should be of shape (n_test, n_feat) and
      Xp should be of shape (n_support, n_feat) where n_test is the size of
      the test set, n_support that of the support set, and n_feat is the number
      of per-atom features.

    Returns
    -------
    list
      Returns two tensors of same shape as input. Namely the output shape will
      be [(n_test, n_feat), (n_support, n_feat)]
    """
    # x is test set, xp is support set.
    x, xp = x_xp

    ## Initializes trainable weights.
    n_feat = self.n_feat

    self.lstm = LSTMStep(n_feat, 2 * n_feat)
    self.q_init = model_ops.zeros([self.n_test, n_feat])
    self.r_init = model_ops.zeros([self.n_test, n_feat])
    self.states_init = self.lstm.get_initial_states([self.n_test, n_feat])

    self.trainable_weights = [self.q_init, self.r_init]

    ### Performs computations

    # Get initializations
    q = self.q_init
    #r = self.r_init      
    states = self.states_init

    for d in range(self.max_depth):
      # Process using attention
      # Eqn (4), appendix A.1 of Matching Networks paper
      e = cos(x + q, xp)
      a = tf.nn.softmax(e)
      r = model_ops.dot(a, xp)

      # Generate new aattention states
      y = model_ops.concatenate([q, r], axis=1)
      q, states = self.lstm([y] + states)  #+ self.lstm.get_constants(x)

    return [x + q, xp]
Пример #8
0
    def build(self, input_shape):
        """"Construct internal trainable weights.

    This layer expects arguments of form

    [atom_features, deg_slice, membership, deg_adj_list placeholders...]

    input_shape should provide the shapes of each of these tensors.

    Parameters
    ----------
    input_shape: list
      Shapes of incoming tensors
    """

        # Generate the nb_affine weights and biases
        atom_features_shape = input_shape[0]
        n_features = atom_features_shape[1]
        self.W_list = [
            self.init([n_features, self.nb_filter])
            for k in range(self.nb_affine)
        ]
        self.b_list = [
            model_ops.zeros(shape=[
                self.nb_filter,
            ]) for k in range(self.nb_affine)
        ]

        self.trainable_weights = self.W_list + self.b_list
Пример #9
0
    def build(self):
        self.W_list = []
        self.b_list = []
        prev_layer_size = self.n_embedding
        for layer_size in self.layer_sizes:
            self.W_list.append(self.init([prev_layer_size, layer_size]))
            self.b_list.append(model_ops.zeros(shape=[
                layer_size,
            ]))
            prev_layer_size = layer_size
        self.W_list.append(self.init([prev_layer_size, self.n_embedding]))
        self.b_list.append(model_ops.zeros(shape=[
            self.n_embedding,
        ]))

        self.trainable_weights = self.W_list + self.b_list
Пример #10
0
  def build(self):
    self.W_list = []
    self.b_list = []
    prev_layer_size = self.n_embedding
    for layer_size in self.layer_sizes:
      self.W_list.append(self.init([prev_layer_size, layer_size]))
      self.b_list.append(model_ops.zeros(shape=[
          layer_size,
      ]))
      prev_layer_size = layer_size
    self.W_list.append(self.init([prev_layer_size, self.n_embedding]))
    self.b_list.append(model_ops.zeros(shape=[
        self.n_embedding,
    ]))

    self.trainable_weights = self.W_list + self.b_list
Пример #11
0
    def build(self):
        self.W_list = []
        self.b_list = []
        prev_layer_size = self.n_embedding
        for i, layer_size in enumerate(self.layer_sizes):
            self.W_list.append(self.init([prev_layer_size, layer_size]))
            self.b_list.append(model_ops.zeros(shape=[
                layer_size,
            ]))
            prev_layer_size = layer_size
        self.W_list.append(self.init([prev_layer_size, self.n_outputs]))
        self.b_list.append(model_ops.zeros(shape=[
            self.n_outputs,
        ]))
        prev_layer_size = self.n_outputs

        self.trainable_weights = self.W_list + self.b_list
Пример #12
0
 def build(self):
   if self.gaussian_expand:
     self.W = self.init([self.n_input * 11, self.n_input])
     self.b = model_ops.zeros(shape=[
         self.n_input,
     ])
     self.trainable_weights = self.W + self.b
   else:
     self.trainable_weights = None
Пример #13
0
 def build(self):
     if self.gaussian_expand:
         self.W = self.init([self.n_input * 11, self.n_input])
         self.b = model_ops.zeros(shape=[
             self.n_input,
         ])
         self.trainable_weights = self.W + self.b
     else:
         self.trainable_weights = None
Пример #14
0
    def build(self):
        """"Construct internal trainable weights.
    """

        self.W_list = []
        self.b_list = []
        prev_layer_size = self.n_graph_feat
        for layer_size in self.layer_sizes:
            self.W_list.append(self.init([prev_layer_size, layer_size]))
            self.b_list.append(model_ops.zeros(shape=[
                layer_size,
            ]))
            prev_layer_size = layer_size
        self.W_list.append(self.init([prev_layer_size, self.n_outputs]))
        self.b_list.append(model_ops.zeros(shape=[
            self.n_outputs,
        ]))

        self.trainable_weights = self.W_list + self.b_list
Пример #15
0
  def build(self):
    """"Construct internal trainable weights.
    """

    self.W_list = []
    self.b_list = []
    prev_layer_size = self.n_graph_feat
    for layer_size in self.layer_sizes:
      self.W_list.append(self.init([prev_layer_size, layer_size]))
      self.b_list.append(model_ops.zeros(shape=[
          layer_size,
      ]))
      prev_layer_size = layer_size
    self.W_list.append(self.init([prev_layer_size, self.n_outputs]))
    self.b_list.append(model_ops.zeros(shape=[
        self.n_outputs,
    ]))

    self.trainable_weights = self.W_list + self.b_list
Пример #16
0
  def build(self):
    """"Construct internal trainable weights.
    """

    self.W = self.init([self.n_atom_input_feat, self.n_output])
    self.b = model_ops.zeros(shape=[
        self.n_output,
    ])

    self.trainable_weights = self.W + self.b
Пример #17
0
    def build(self):
        """Builds this layer.
    """
        #_, support_input_shape = input_shape  #Unpack
        #n_feat = support_input_shape[1]
        n_feat = self.n_feat

        # Support set lstm
        self.support_lstm = LSTMStep(n_feat, 2 * n_feat)
        self.q_init = model_ops.zeros([self.n_support, n_feat])
        self.support_states_init = self.support_lstm.get_initial_states(
            [self.n_support, n_feat])

        # Test lstm
        self.test_lstm = LSTMStep(n_feat, 2 * n_feat)
        self.p_init = model_ops.zeros([self.n_test, n_feat])
        self.test_states_init = self.test_lstm.get_initial_states(
            [self.n_test, n_feat])

        self.trainable_weights = []
Пример #18
0
  def build(self):
    """Builds this layer.
    """
    #_, support_input_shape = input_shape  #Unpack
    #n_feat = support_input_shape[1]
    n_feat = self.n_feat

    # Support set lstm
    self.support_lstm = LSTMStep(n_feat, 2 * n_feat)
    self.q_init = model_ops.zeros([self.n_support, n_feat])
    self.support_states_init = self.support_lstm.get_initial_states(
        [self.n_support, n_feat])

    # Test lstm
    self.test_lstm = LSTMStep(n_feat, 2 * n_feat)
    self.p_init = model_ops.zeros([self.n_test, n_feat])
    self.test_states_init = self.test_lstm.get_initial_states(
        [self.n_test, n_feat])

    self.trainable_weights = []
Пример #19
0
 def __init__(self,
              pair_features,
              n_pair_features=8,
              n_hidden=100,
              init='glorot_uniform'):
     self.n_pair_features = n_pair_features
     self.n_hidden = n_hidden
     self.init = initializations.get(init)
     W = self.init([n_pair_features, n_hidden * n_hidden])
     b = model_ops.zeros(shape=(n_hidden * n_hidden, ))
     self.A = tf.nn.xw_plus_b(pair_features, W, b)
     self.A = tf.reshape(self.A, (-1, n_hidden, n_hidden))
     self.trainable_weights = [W, b]
Пример #20
0
    def build(self, input_shape):
        """Builds this layer.

    Parameters
    ----------
    input_shape: tuple
      Tuple of ((n_test, n_feat), (n_support, n_feat))
    """
        _, support_input_shape = input_shape  #Unpack
        n_feat = support_input_shape[1]

        # Support set lstm
        self.support_lstm = LSTMStep(n_feat)
        self.q_init = model_ops.zeros([self.n_support, n_feat])
        self.support_states_init = self.support_lstm.get_initial_states(
            [self.n_support, n_feat])

        # Test lstm
        self.test_lstm = LSTMStep(n_feat)
        self.p_init = model_ops.zeros([self.n_test, n_feat])
        self.test_states_init = self.test_lstm.get_initial_states(
            [self.n_test, n_feat])

        self.trainable_weights = []
Пример #21
0
  def build(self, input_shape):
    """Builds this layer.

    Parameters
    ----------
    input_shape: tuple
      Tuple of ((n_test, n_feat), (n_support, n_feat))
    """
    _, support_input_shape = input_shape  #Unpack
    n_feat = support_input_shape[1]

    # Support set lstm
    self.support_lstm = LSTMStep(n_feat)
    self.q_init = model_ops.zeros([self.n_support, n_feat])
    self.support_states_init = self.support_lstm.get_initial_states(
        [self.n_support, n_feat])

    # Test lstm
    self.test_lstm = LSTMStep(n_feat)
    self.p_init = model_ops.zeros([self.n_test, n_feat])
    self.test_states_init = self.test_lstm.get_initial_states(
        [self.n_test, n_feat])
    
    self.trainable_weights = []
Пример #22
0
  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate Radial Symmetry Function """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    inputs = in_layers[0].out_tensor
    atom_numbers = in_layers[1].out_tensor
    in_channels = inputs.get_shape().as_list()[-1]
    self.W = self.init(
        [len(self.atom_number_cases), in_channels, self.out_channels])
    self.b = model_ops.zeros((len(self.atom_number_cases), self.out_channels))
    outputs = []
    for i, atom_case in enumerate(self.atom_number_cases):
      output = self.activation(
          tf.tensordot(inputs, self.W[i, :, :], [[2], [0]]) + self.b[i, :])
      mask = 1 - tf.to_float(tf.cast(atom_numbers - atom_case, tf.bool))
      output = tf.reshape(output * tf.expand_dims(mask, 2), (-1, self.max_atoms,
                                                             self.out_channels))
      outputs.append(output)
    self.out_tensor = tf.add_n(outputs)
Пример #23
0
    def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
        """ Generate Radial Symmetry Function """
        init_fn = initializations.get(self.init)  # Set weight initialization
        activation_fn = activations.get(self.activation)
        if in_layers is None:
            in_layers = self.in_layers
        in_layers = convert_to_layers(in_layers)

        inputs = in_layers[0].out_tensor
        atom_numbers = in_layers[1].out_tensor
        in_channels = inputs.get_shape().as_list()[-1]
        self.W = init_fn(
            [len(self.atom_number_cases), in_channels, self.out_channels])

        self.b = model_ops.zeros(
            (len(self.atom_number_cases), self.out_channels))
        outputs = []
        for i, atom_case in enumerate(self.atom_number_cases):
            # optimization to allow for tensorcontraction/broadcasted mmul
            # using a reshape trick. Note that the np and tf matmul behavior
            # differs when dealing with broadcasts

            a = inputs  # (i,j,k)
            b = self.W[i, :, :]  # (k, l)

            ai = tf.shape(a)[0]
            aj = tf.shape(a)[1]
            ak = tf.shape(a)[2]
            bl = tf.shape(b)[1]

            output = activation_fn(
                tf.reshape(tf.matmul(tf.reshape(a, [ai * aj, ak]), b),
                           [ai, aj, bl]) + self.b[i, :])

            mask = 1 - tf.to_float(tf.cast(atom_numbers - atom_case, tf.bool))
            output = tf.reshape(output * tf.expand_dims(mask, 2),
                                (-1, self.max_atoms, self.out_channels))
            outputs.append(output)
        self.out_tensor = tf.add_n(outputs)
Пример #24
0
    def build(self):
        """ Construct internal trainable weights.

        TODO(rbharath): Need to make this not set instance variables to
        follow style in other layers.
        """

        self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])
        self.b_AA = model_ops.zeros(shape=[
            self.n_hidden_AA,
        ])

        self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])
        self.b_PA = model_ops.zeros(shape=[
            self.n_hidden_PA,
        ])

        self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])
        self.b_A = model_ops.zeros(shape=[
            self.n_atom_output_feat,
        ])

        self.trainable_weights = [
            self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A
        ]
        if self.update_pair:
            self.W_AP = self.init(
                [self.n_atom_input_feat * 2, self.n_hidden_AP])
            self.b_AP = model_ops.zeros(shape=[
                self.n_hidden_AP,
            ])

            self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])
            self.b_PP = model_ops.zeros(shape=[
                self.n_hidden_PP,
            ])

            self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])
            self.b_P = model_ops.zeros(shape=[
                self.n_pair_output_feat,
            ])

            self.trainable_weights.extend([
                self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P
            ])
Пример #25
0
  def build(self):
    """"Construct internal trainable weights.

    n_atom_features should provide the number of features per atom. 

    Parameters
    ----------
    n_atom_features: int 
      Number of features provied per atom. 
    """
    n_atom_features = self.n_atom_features

    # Generate the nb_affine weights and biases
    self.W_list = [
        self.init([n_atom_features, self.nb_filter])
        for k in range(self.nb_affine)
    ]
    self.b_list = [
        model_ops.zeros(shape=[
            self.nb_filter,
        ]) for k in range(self.nb_affine)
    ]

    self.trainable_weights = self.W_list + self.b_list
Пример #26
0
    def build(self):
        """"Construct internal trainable weights.

    n_atom_features should provide the number of features per atom. 

    Parameters
    ----------
    n_atom_features: int 
      Number of features provied per atom. 
    """
        n_atom_features = self.n_atom_features

        # Generate the nb_affine weights and biases
        self.W_list = [
            self.init([n_atom_features, self.nb_filter])
            for k in range(self.nb_affine)
        ]
        self.b_list = [
            model_ops.zeros(shape=[
                self.nb_filter,
            ]) for k in range(self.nb_affine)
        ]

        self.trainable_weights = self.W_list + self.b_list
Пример #27
0
  def build(self, input_shape):
    """"Construct internal trainable weights.

    This layer expects arguments of form

    [atom_features, deg_slice, membership, deg_adj_list placeholders...]

    input_shape should provide the shapes of each of these tensors.

    Parameters
    ----------
    input_shape: list
      Shapes of incoming tensors
    """
      
    # Generate the nb_affine weights and biases
    atom_features_shape = input_shape[0]
    n_features = atom_features_shape[1]
    self.W_list = [self.init([n_features, self.nb_filter]) 
                   for k in range(self.nb_affine)]
    self.b_list = [model_ops.zeros(shape=[self.nb_filter,])
                   for k in range(self.nb_affine)]

    self.trainable_weights = self.W_list + self.b_list
Пример #28
0
    def build(self):
        """ Construct internal trainable weights.
    """

        self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])
        self.b_AA = model_ops.zeros(shape=[
            self.n_hidden_AA,
        ])

        self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])
        self.b_PA = model_ops.zeros(shape=[
            self.n_hidden_PA,
        ])

        self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])
        self.b_A = model_ops.zeros(shape=[
            self.n_atom_output_feat,
        ])

        self.trainable_weights = [
            self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A
        ]
        if self.update_pair:
            self.W_AP = self.init(
                [self.n_atom_input_feat * 2, self.n_hidden_AP])
            self.b_AP = model_ops.zeros(shape=[
                self.n_hidden_AP,
            ])

            self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])
            self.b_PP = model_ops.zeros(shape=[
                self.n_hidden_PP,
            ])

            self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])
            self.b_P = model_ops.zeros(shape=[
                self.n_pair_output_feat,
            ])

            self.trainable_weights.extend([
                self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P
            ])
Пример #29
0
  def build(self):
    """"Construct internal trainable weights.
    """

    self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])
    self.b_AA = model_ops.zeros(shape=[
        self.n_hidden_AA,
    ])

    self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])
    self.b_PA = model_ops.zeros(shape=[
        self.n_hidden_PA,
    ])

    self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])
    self.b_A = model_ops.zeros(shape=[
        self.n_atom_output_feat,
    ])

    self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])
    self.b_AP = model_ops.zeros(shape=[
        self.n_hidden_AP,
    ])

    self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])
    self.b_PP = model_ops.zeros(shape=[
        self.n_hidden_PP,
    ])

    self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])
    self.b_P = model_ops.zeros(shape=[
        self.n_pair_output_feat,
    ])

    self.trainable_weights = [
        self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A,
        self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P
    ]
Пример #30
0
 def get_initial_states(self, input_shape):
   return [model_ops.zeros(input_shape), model_ops.zeros(input_shape)]
Пример #31
0
  def __call__(self, *parents):
    #   parents = [atom_features, deg_slice, membership, deg_adj_list placeholders...]
    in_channels = parents[0].out_tensor.get_shape()[-1].value

    # Generate the nb_affine weights and biases
    self.W_list = [
        initializations.glorot_uniform([in_channels, self.out_channel])
        for k in range(self.num_deg)
    ]
    self.b_list = [
        model_ops.zeros(shape=[
            self.out_channel,
        ]) for k in range(self.num_deg)
    ]

    # Extract atom_features
    atom_features = parents[0].out_tensor

    # Extract graph topology
    deg_slice = parents[1].out_tensor
    deg_adj_lists = [x.out_tensor for x in parents[3:]]

    # Perform the mol conv
    # atom_features = graph_conv(atom_features, deg_adj_lists, deg_slice,
    #                            self.max_deg, self.min_deg, self.W_list,
    #                            self.b_list)

    W = iter(self.W_list)
    b = iter(self.b_list)

    # Sum all neighbors using adjacency matrix
    deg_summed = self.sum_neigh(atom_features, deg_adj_lists)

    # Get collection of modified atom features
    new_rel_atoms_collection = (self.max_degree + 1 - self.min_degree) * [None]

    for deg in range(1, self.max_degree + 1):
      # Obtain relevant atoms for this degree
      rel_atoms = deg_summed[deg - 1]

      # Get self atoms
      begin = tf.stack([deg_slice[deg - self.min_degree, 0], 0])
      size = tf.stack([deg_slice[deg - self.min_degree, 1], -1])
      self_atoms = tf.slice(atom_features, begin, size)

      # Apply hidden affine to relevant atoms and append
      rel_out = tf.matmul(rel_atoms, next(W)) + next(b)
      self_out = tf.matmul(self_atoms, next(W)) + next(b)
      out = rel_out + self_out

      new_rel_atoms_collection[deg - self.min_degree] = out

    # Determine the min_deg=0 case
    if self.min_degree == 0:
      deg = 0

      begin = tf.stack([deg_slice[deg - self.min_degree, 0], 0])
      size = tf.stack([deg_slice[deg - self.min_degree, 1], -1])
      self_atoms = tf.slice(atom_features, begin, size)

      # Only use the self layer
      out = tf.matmul(self_atoms, next(W)) + next(b)

      new_rel_atoms_collection[deg - self.min_degree] = out

    # Combine all atoms back into the list
    atom_features = tf.concat(axis=0, values=new_rel_atoms_collection)

    if self.activation_fn is not None:
      atom_features = self.activation_fn(atom_features)

    self.out_tensor = atom_features
    return atom_features
Пример #32
0
    def _create_tensor(self):
        #   self.in_layers = [atom_features, deg_slice, membership, deg_adj_list placeholders...]
        in_channels = self.in_layers[0].out_tensor.get_shape()[-1].value

        # Generate the nb_affine weights and biases
        self.W_list = [
            initializations.glorot_uniform([in_channels, self.out_channel])
            for k in range(self.num_deg)
        ]
        self.b_list = [
            model_ops.zeros(shape=[
                self.out_channel,
            ]) for k in range(self.num_deg)
        ]

        # Extract atom_features
        atom_features = self.in_layers[0].out_tensor

        # Extract graph topology
        deg_slice = self.in_layers[1].out_tensor
        deg_adj_lists = [x.out_tensor for x in self.in_layers[3:]]

        # Perform the mol conv
        # atom_features = graph_conv(atom_features, deg_adj_lists, deg_slice,
        #                            self.max_deg, self.min_deg, self.W_list,
        #                            self.b_list)

        W = iter(self.W_list)
        b = iter(self.b_list)

        # Sum all neighbors using adjacency matrix
        deg_summed = self.sum_neigh(atom_features, deg_adj_lists)

        # Get collection of modified atom features
        new_rel_atoms_collection = (self.max_degree + 1 -
                                    self.min_degree) * [None]

        for deg in range(1, self.max_degree + 1):
            # Obtain relevant atoms for this degree
            rel_atoms = deg_summed[deg - 1]

            # Get self atoms
            begin = tf.stack([deg_slice[deg - self.min_degree, 0], 0])
            size = tf.stack([deg_slice[deg - self.min_degree, 1], -1])
            self_atoms = tf.slice(atom_features, begin, size)

            # Apply hidden affine to relevant atoms and append
            rel_out = tf.matmul(rel_atoms, next(W)) + next(b)
            self_out = tf.matmul(self_atoms, next(W)) + next(b)
            out = rel_out + self_out

            new_rel_atoms_collection[deg - self.min_degree] = out

        # Determine the min_deg=0 case
        if self.min_degree == 0:
            deg = 0

            begin = tf.stack([deg_slice[deg - self.min_degree, 0], 0])
            size = tf.stack([deg_slice[deg - self.min_degree, 1], -1])
            self_atoms = tf.slice(atom_features, begin, size)

            # Only use the self layer
            out = tf.matmul(self_atoms, next(W)) + next(b)

            new_rel_atoms_collection[deg - self.min_degree] = out

        # Combine all atoms back into the list
        atom_features = tf.concat(axis=0, values=new_rel_atoms_collection)

        if self.activation_fn is not None:
            atom_features = self.activation_fn(atom_features)

        self.out_tensor = atom_features
        return atom_features
Пример #33
0
 def get_initial_states(self, input_shape):
     return [model_ops.zeros(input_shape), model_ops.zeros(input_shape)]
Пример #34
0
    def __init__(self,
               nb_filter,
               n_atom_features,
               batch_size,
               init='glorot_uniform',
               activation='linear',
               dropout=None,
               max_deg=10,
               min_deg=0,
               **kwargs):
        """
        Parameters
        ----------
        nb_filter: int
          Number of convolutional filters.
        n_atom_features: int
          Number of features listed per atom.
        init: str, optional
          Weight initialization for filters.
        activation: str, optional
          Activation function applied after convolution.
        dropout: float, optional
          Dropout probability.
        max_deg: int, optional
          Maximum degree of atoms in molecules.
        min_deg: int, optional
          Minimum degree of atoms in molecules.
        """
        warnings.warn("The dc.nn.GraphConv is "
                      "deprecated. Will be removed in DeepChem 1.4. "
                      "Will be replaced by dc.models.tensorgraph.layers.GraphConv",
                      DeprecationWarning)
        super(GraphConv_and_gather, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.nb_filter = nb_filter  # Save number of filters
        self.dropout = dropout  # Save dropout params
        self.max_deg = max_deg
        self.min_deg = min_deg
        self.batch_size = batch_size
        # Is there a solid explanation here?
        self.nb_affine = 3 * max_deg + (2 - min_deg)
        self.n_atom_features = n_atom_features
        n_atom_features = self.n_atom_features

        self.beta_init = initializations.get('zero')
        self.gamma_init = initializations.get('one')
        self.epsilon = 1e-5
        self.momentum = 0.99
        self.gamma_regularizer = regularizers.get(None)
        self.beta_regularizer = regularizers.get(None)


        # Generate the nb_affine weights and biases
        self.W_list = [
            self.init([n_atom_features, self.nb_filter])
            for k in range(self.nb_affine)
            ]
        self.b_list = [
            model_ops.zeros(shape=[
                self.nb_filter,
            ]) for k in range(self.nb_affine)
            ]

        self.trainable_weights = self.W_list + self.b_list