示例#1
0
    def call(self, argument, mask=None):
        """Execute this layer on input tensors.

    Parameters
    ----------
    argument: list
      List of two tensors (X, Xp). X should be of shape (n_test, n_feat) and
      Xp should be of shape (n_support, n_feat) where n_test is the size of
      the test set, n_support that of the support set, and n_feat is the number
      of per-atom features.

    Returns
    -------
    list
      Returns two tensors of same shape as input. Namely the output shape will
      be [(n_test, n_feat), (n_support, n_feat)]
    """
        self.build()
        x, xp = argument

        # Get initializations
        p = self.p_init
        q = self.q_init
        # Rename support
        z = xp
        states = self.support_states_init
        x_states = self.test_states_init

        for d in range(self.max_depth):
            # Process support xp using attention
            e = cos(z + q, xp)
            a = tf.nn.softmax(e)
            # Get linear combination of support set
            r = model_ops.dot(a, xp)

            # Not sure if it helps to place the update here or later yet.  Will
            # decide
            #z = r

            # Process test x using attention
            x_e = cos(x + p, z)
            x_a = tf.nn.softmax(x_e)
            s = model_ops.dot(x_a, z)

            # Generate new support attention states
            qr = model_ops.concatenate([q, r], axis=1)
            q, states = self.support_lstm([qr] + states)

            # Generate new test attention states
            ps = model_ops.concatenate([p, s], axis=1)
            p, x_states = self.test_lstm([ps] + x_states)

            # Redefine
            z = r

        #return [x+p, z+q]
        return [x + p, xp + q]
示例#2
0
  def call(self, argument, mask=None):
    """Execute this layer on input tensors.

    Parameters
    ----------
    argument: list
      List of two tensors (X, Xp). X should be of shape (n_test, n_feat) and
      Xp should be of shape (n_support, n_feat) where n_test is the size of
      the test set, n_support that of the support set, and n_feat is the number
      of per-atom features.

    Returns
    -------
    list
      Returns two tensors of same shape as input. Namely the output shape will
      be [(n_test, n_feat), (n_support, n_feat)]
    """
    self.build()
    x, xp = argument

    # Get initializations
    p = self.p_init
    q = self.q_init
    # Rename support
    z = xp
    states = self.support_states_init
    x_states = self.test_states_init

    for d in range(self.max_depth):
      # Process support xp using attention
      e = cos(z + q, xp)
      a = tf.nn.softmax(e)
      # Get linear combination of support set
      r = model_ops.dot(a, xp)

      # Not sure if it helps to place the update here or later yet.  Will
      # decide
      #z = r  

      # Process test x using attention
      x_e = cos(x + p, z)
      x_a = tf.nn.softmax(x_e)
      s = model_ops.dot(x_a, z)

      # Generate new support attention states
      qr = model_ops.concatenate([q, r], axis=1)
      q, states = self.support_lstm([qr] + states)

      # Generate new test attention states
      ps = model_ops.concatenate([p, s], axis=1)
      p, x_states = self.test_lstm([ps] + x_states)

      # Redefine  
      z = r

    #return [x+p, z+q]
    return [x + p, xp + q]
示例#3
0
  def call(self, x_states, mask=None):
    x, h_tm1, c_tm1 = x_states # Unpack

    # Taken from Keras code [citation needed]
    z = model_ops.dot(x, self.W) + model_ops.dot(h_tm1, self.U) + self.b

    z0 = z[:, :self.output_dim]
    z1 = z[:, self.output_dim: 2 * self.output_dim]
    z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]
    z3 = z[:, 3 * self.output_dim:]

    i = self.inner_activation(z0)
    f = self.inner_activation(z1)
    c = f * c_tm1 + i * self.activation(z2)
    o = self.inner_activation(z3)

    h = o * self.activation(c)
    
    return o, [h, c]
示例#4
0
    def call(self, x_states, mask=None):
        x, h_tm1, c_tm1 = x_states  # Unpack

        # Taken from Keras code [citation needed]
        z = model_ops.dot(x, self.W) + model_ops.dot(h_tm1, self.U) + self.b

        z0 = z[:, :self.output_dim]
        z1 = z[:, self.output_dim:2 * self.output_dim]
        z2 = z[:, 2 * self.output_dim:3 * self.output_dim]
        z3 = z[:, 3 * self.output_dim:]

        i = self.inner_activation(z0)
        f = self.inner_activation(z1)
        c = f * c_tm1 + i * self.activation(z2)
        o = self.inner_activation(z3)

        h = o * self.activation(c)

        return o, [h, c]
示例#5
0
  def __call__(self, x):
    self.W = self.add_weight(
        (self.input_dim, self.output_dim),
        initializer=self.init,
        name='{}_W'.format(self.name))
    self.b = self.add_weight(
        (self.output_dim,), initializer='zero', name='{}_b'.format(self.name))

    output = model_ops.dot(x, self.W)
    if self.bias:
      output += self.b
    return output
示例#6
0
  def call(self, x_xp, mask=None):
    """Execute this layer on input tensors.

    Parameters
    ----------
    x_xp: list
      List of two tensors (X, Xp). X should be of shape (n_test, n_feat) and
      Xp should be of shape (n_support, n_feat) where n_test is the size of
      the test set, n_support that of the support set, and n_feat is the number
      of per-atom features.

    Returns
    -------
    list
      Returns two tensors of same shape as input. Namely the output shape will
      be [(n_test, n_feat), (n_support, n_feat)]
    """
    # x is test set, xp is support set.
    x, xp = x_xp

    ## Initializes trainable weights.
    n_feat = self.n_feat

    self.lstm = LSTMStep(n_feat, 2 * n_feat)
    self.q_init = model_ops.zeros([self.n_test, n_feat])
    self.r_init = model_ops.zeros([self.n_test, n_feat])
    self.states_init = self.lstm.get_initial_states([self.n_test, n_feat])

    self.trainable_weights = [self.q_init, self.r_init]

    ### Performs computations

    # Get initializations
    q = self.q_init
    #r = self.r_init      
    states = self.states_init

    for d in range(self.max_depth):
      # Process using attention
      # Eqn (4), appendix A.1 of Matching Networks paper
      e = cos(x + q, xp)
      a = tf.nn.softmax(e)
      r = model_ops.dot(a, xp)

      # Generate new aattention states
      y = model_ops.concatenate([q, r], axis=1)
      q, states = self.lstm([y] + states)  #+ self.lstm.get_constants(x)

    return [x + q, xp]
示例#7
0
    def call(self, x_xp, mask=None):
        """Execute this layer on input tensors.

    Parameters
    ----------
    x_xp: list
      List of two tensors (X, Xp). X should be of shape (n_test, n_feat) and
      Xp should be of shape (n_support, n_feat) where n_test is the size of
      the test set, n_support that of the support set, and n_feat is the number
      of per-atom features.

    Returns
    -------
    list
      Returns two tensors of same shape as input. Namely the output shape will
      be [(n_test, n_feat), (n_support, n_feat)]
    """
        # x is test set, xp is support set.
        x, xp = x_xp

        ## Initializes trainable weights.
        n_feat = self.n_feat

        self.lstm = LSTMStep(n_feat, 2 * n_feat)
        self.q_init = model_ops.zeros([self.n_test, n_feat])
        self.r_init = model_ops.zeros([self.n_test, n_feat])
        self.states_init = self.lstm.get_initial_states([self.n_test, n_feat])

        self.trainable_weights = [self.q_init, self.r_init]

        ### Performs computations

        # Get initializations
        q = self.q_init
        #r = self.r_init
        states = self.states_init

        for d in range(self.max_depth):
            # Process using attention
            # Eqn (4), appendix A.1 of Matching Networks paper
            e = cos(x + q, xp)
            a = tf.nn.softmax(e)
            r = model_ops.dot(a, xp)

            # Generate new aattention states
            y = model_ops.concatenate([q, r], axis=1)
            q, states = self.lstm([y] + states)  #+ self.lstm.get_constants(x)

        return [x + q, xp]
示例#8
0
def cos(x, y):
  denom = (
      model_ops.sqrt(model_ops.sum(tf.square(x)) *
                     model_ops.sum(tf.square(y))) + model_ops.epsilon())
  return model_ops.dot(x, tf.transpose(y)) / denom
示例#9
0
def cos(x, y):
    denom = (model_ops.sqrt(
        model_ops.sum(tf.square(x)) * model_ops.sum(tf.square(y))) +
             model_ops.epsilon())
    return model_ops.dot(x, tf.transpose(y)) / denom
示例#10
0
 def call(self, x):
   output = model_ops.dot(x, self.W)
   if self.bias:
     output += self.b
   return self.activation(output)
示例#11
0
 def call(self, x):
     output = model_ops.dot(x, self.W)
     if self.bias:
         output += self.b
     return self.activation(output)
示例#12
0
 def __call__(self, x):
   output = model_ops.dot(x, self.W)
   if self.bias:
     output += self.b
   return output