예제 #1
0
    def testLiftedStruct(self):
        num_data = 10
        feat_dim = 6
        margin = 1.0
        num_classes = 4

        embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
        labels = np.random.randint(0, num_classes,
                                   size=(num_data)).astype(np.float32)
        # Reshape labels to compute adjacency matrix.
        # pylint: disable=E1136
        labels_reshaped = np.reshape(labels, (labels.shape[0], 1))
        # pylint: enable=E1136

        # Compute the loss in NP
        # pylint: disable=E1111
        adjacency = np.equal(labels_reshaped, labels_reshaped.T)
        # pylint: enable=E1111
        pdist_matrix = pairwise_distance_np(embedding)
        loss_np = 0.0
        num_constraints = 0.0
        for i in range(num_data):
            for j in range(num_data):
                if adjacency[i][j] > 0.0 and i != j:
                    d_pos = pdist_matrix[i][j]
                    negs = []
                    for k in range(num_data):
                        if not adjacency[i][k]:
                            negs.append(margin - pdist_matrix[i][k])
                    for l in range(num_data):
                        if not adjacency[j][l]:
                            negs.append(margin - pdist_matrix[j][l])

                    negs = np.array(negs)
                    max_elem = np.max(negs)
                    negs -= max_elem
                    negs = np.exp(negs)
                    soft_maximum = np.log(np.sum(negs)) + max_elem

                    num_constraints += 1.0
                    this_loss = max(soft_maximum + d_pos, 0)
                    loss_np += this_loss * this_loss

        loss_np = loss_np / num_constraints / 2.0

        # Compute the loss in TF.
        y_true = tf.constant(labels)
        y_pred = tf.constant(embedding)
        cce_obj = lifted.LiftedStructLoss()
        loss = cce_obj(y_true, y_pred)
        self.assertAlmostEqual(self.evaluate(loss), loss_np, 3)
예제 #2
0
def test_lifted_struct(dtype):
    num_data = 10
    feat_dim = 6
    margin = 1.0
    num_classes = 4

    embedding = np.random.rand(num_data, feat_dim).astype(np.float32)
    labels = np.random.randint(0, num_classes, size=num_data).astype(np.float32)

    # Compute the loss in NP
    loss_np = lifted_struct_loss_np(labels, embedding, margin)

    # Compute the loss in TF.
    y_true = tf.constant(labels)
    y_pred = tf.constant(embedding, dtype=dtype)
    cce_obj = lifted.LiftedStructLoss()
    loss = cce_obj(y_true, y_pred)
    test_utils.assert_allclose_according_to_type(loss.numpy(), loss_np)
예제 #3
0
 def test_serialization(self):
     loss = lifted.LiftedStructLoss()
     new_loss = tf.keras.losses.deserialize(tf.keras.losses.serialize(loss))
예제 #4
0
def test_serialization():
    loss = lifted.LiftedStructLoss()
    tf.keras.losses.deserialize(tf.keras.losses.serialize(loss))