Пример #1
0
    def get_scores(self):
        """Adds tensor operations for computing scores.

        Computes prediction yhat (eqn (1) in Matching networks) of class for test
        compounds.
        """
        # Get featurization for test
        # Shape (n_test, n_feat)
        test_feat = self.model.get_test_output()
        # Get featurization for support
        # Shape (n_support, n_feat)
        support_feat = self.model.get_support_output()

        # Computes the inner part c() of the kernel
        # (the inset equation in section 2.1.1 of Matching networks paper).
        # Normalize
        g = model_ops.cosine_distances(test_feat, support_feat)
        # Note that gram matrix g has shape (n_test, n_support)

        # soft corresponds to a(xhat, x_i) in eqn (1) of Matching Networks paper
        # https://arxiv.org/pdf/1606.04080v1.pdf
        # Computes softmax across axis 1, (so sums distances to support set for
        # each test entry) to get attention vector
        # Shape (n_test, n_support)
        attention = tf.nn.softmax(g)  # Renormalize
        # Weighted sum of support labels
        # Shape (n_support, 1)
        support_labels = tf.expand_dims(self.support_label_placeholder, 1)
        # pred is yhat in eqn (1) of Matching Networks.
        # Shape squeeze((n_test, n_support) * (n_support, 1)) = (n_test,1)
        pred = tf.matmul(attention, support_labels)
        return pred
Пример #2
0
    def get_scores(self):
        """Adds tensor operations for computing scores.

    Computes prediction yhat (eqn (1) in Matching networks) of class for test
    compounds.
    """
        # Get featurization for test
        # Shape (n_test, n_feat)
        test_feat = self.model.get_test_output()
        # Get featurization for support
        # Shape (n_support, n_feat)
        support_feat = self.model.get_support_output()

        # Computes the inner part c() of the kernel
        # (the inset equation in section 2.1.1 of Matching networks paper).
        # Normalize
        if self.similarity == 'cosine':
            g = model_ops.cosine_distances(test_feat, support_feat)
        else:
            raise ValueError("Only cosine similarity is supported.")
        # TODO(rbharath): euclidean kernel is broken!
        #elif self.similarity == 'euclidean':
        #  g = model_ops.euclidean_distance(test_feat, support_feat)
        # Note that gram matrix g has shape (n_test, n_support)

        # soft corresponds to a(xhat, x_i) in eqn (1) of Matching Networks paper
        # https://arxiv.org/pdf/1606.04080v1.pdf
        # Computes softmax across axis 1, (so sums distances to support set for
        # each test entry) to get attention vector
        # Shape (n_test, n_support)
        attention = tf.nn.softmax(g)  # Renormalize

        # Weighted sum of support labels
        # Shape (n_support, 1)
        support_labels = tf.expand_dims(self.support_label_placeholder, 1)
        # pred is yhat in eqn (1) of Matching Networks.
        # Shape squeeze((n_test, n_support) * (n_support, 1)) = (n_test,)
        pred = tf.squeeze(tf.matmul(attention, support_labels), [1])

        # Clip softmax probabilities to range [epsilon, 1-epsilon]
        # Shape (n_test,)
        pred = tf.clip_by_value(pred, 1e-7, 1. - 1e-7)

        # Convert to logit space using inverse sigmoid (logit) function
        # logit function: log(pred) - log(1-pred)
        # Used to invoke tf.nn.sigmoid_cross_entropy_with_logits
        # in Cross Entropy calculation.
        # Shape (n_test,)
        scores = tf.log(pred) - tf.log(
            tf.constant(1., dtype=tf.float32) - pred)

        return pred, scores
Пример #3
0
  def get_scores(self):
    """Adds tensor operations for computing scores.

    Computes prediction yhat (eqn (1) in Matching networks) of class for test
    compounds.
    """
    # Get featurization for test 
    # Shape (n_test, n_feat)
    test_feat = self.model.get_test_output()
    # Get featurization for support
    # Shape (n_support, n_feat)
    support_feat = self.model.get_support_output()

    # Computes the inner part c() of the kernel
    # (the inset equation in section 2.1.1 of Matching networks paper). 
    # Normalize
    if self.similarity == 'cosine':
      g = model_ops.cosine_distances(test_feat, support_feat)
    else:
      raise ValueError("Only cosine similarity is supported.")
    # TODO(rbharath): euclidean kernel is broken!
    #elif self.similarity == 'euclidean':
    #  g = model_ops.euclidean_distance(test_feat, support_feat)
    # Note that gram matrix g has shape (n_test, n_support)

    # soft corresponds to a(xhat, x_i) in eqn (1) of Matching Networks paper 
    # https://arxiv.org/pdf/1606.04080v1.pdf
    # Computes softmax across axis 1, (so sums distances to support set for
    # each test entry) to get attention vector
    # Shape (n_test, n_support)
    attention = tf.nn.softmax(g)  # Renormalize

    # Weighted sum of support labels
    # Shape (n_support, 1)
    support_labels = tf.expand_dims(self.support_label_placeholder, 1)
    # pred is yhat in eqn (1) of Matching Networks.
    # Shape squeeze((n_test, n_support) * (n_support, 1)) = (n_test,)
    pred = tf.squeeze(tf.matmul(attention, support_labels), [1])

    # Clip softmax probabilities to range [epsilon, 1-epsilon]
    # Shape (n_test,)
    pred = tf.clip_by_value(pred, 1e-7, 1. - 1e-7)

    # Convert to logit space using inverse sigmoid (logit) function
    # logit function: log(pred) - log(1-pred)
    # Used to invoke tf.nn.sigmoid_cross_entropy_with_logits
    # in Cross Entropy calculation.
    # Shape (n_test,)
    scores = tf.log(pred) - tf.log(tf.constant(1., dtype=tf.float32) - pred)

    return pred, scores