Beispiel #1
0
    def testGradientReversalOp(self):
        with tf.Graph().as_default():
            with self.test_session():
                # Test that in forward prop, gradient reversal op acts as the
                # identity operation.
                examples = tf.constant([5.0, 4.0, 3.0, 2.0, 1.0])
                output = grl_ops.gradient_reversal(examples)
                expected_output = examples
                self.assertAllEqual(output.eval(), expected_output.eval())

                # Test that shape inference works as expected.
                self.assertAllEqual(output.get_shape(),
                                    expected_output.get_shape())

                # Test that in backward prop, gradient reversal op multiplies
                # gradients by -1.
                examples = tf.constant([[1.0]])
                w = tf.get_variable(name='w', shape=[1, 1])
                b = tf.get_variable(name='b', shape=[1])
                init_op = tf.global_variables_initializer()
                init_op.run()
                features = tf.nn.xw_plus_b(examples, w, b)
                # Construct two outputs: features layer passes directly to output1, but
                # features layer passes through a gradient reversal layer before
                # reaching output2.
                output1 = features
                output2 = grl_ops.gradient_reversal(features)
                gold = tf.constant([1.0])
                loss1 = gold - output1
                loss2 = gold - output2
                opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
                grads_and_vars_1 = opt.compute_gradients(
                    loss1, tf.trainable_variables())
                grads_and_vars_2 = opt.compute_gradients(
                    loss2, tf.trainable_variables())
                self.assertAllEqual(len(grads_and_vars_1),
                                    len(grads_and_vars_2))
                for i in range(len(grads_and_vars_1)):
                    g1 = grads_and_vars_1[i][0]
                    g2 = grads_and_vars_2[i][0]
                    # Verify that gradients of loss1 are the negative of gradients of
                    # loss2.
                    self.assertAllEqual(tf.negative(g1).eval(), g2.eval())
Beispiel #2
0
  def testGradientReversalOp(self):
    with tf.Graph().as_default():
      with self.test_session():
        # Test that in forward prop, gradient reversal op acts as the
        # identity operation.
        examples = tf.constant([5.0, 4.0, 3.0, 2.0, 1.0])
        output = grl_ops.gradient_reversal(examples)
        expected_output = examples
        self.assertAllEqual(output.eval(), expected_output.eval())

        # Test that shape inference works as expected.
        self.assertAllEqual(output.get_shape(), expected_output.get_shape())

        # Test that in backward prop, gradient reversal op multiplies
        # gradients by -1.
        examples = tf.constant([[1.0]])
        w = tf.get_variable(name='w', shape=[1, 1])
        b = tf.get_variable(name='b', shape=[1])
        init_op = tf.global_variables_initializer()
        init_op.run()
        features = tf.nn.xw_plus_b(examples, w, b)
        # Construct two outputs: features layer passes directly to output1, but
        # features layer passes through a gradient reversal layer before
        # reaching output2.
        output1 = features
        output2 = grl_ops.gradient_reversal(features)
        gold = tf.constant([1.0])
        loss1 = gold - output1
        loss2 = gold - output2
        opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
        grads_and_vars_1 = opt.compute_gradients(loss1,
                                                 tf.trainable_variables())
        grads_and_vars_2 = opt.compute_gradients(loss2,
                                                 tf.trainable_variables())
        self.assertAllEqual(len(grads_and_vars_1), len(grads_and_vars_2))
        for i in range(len(grads_and_vars_1)):
          g1 = grads_and_vars_1[i][0]
          g2 = grads_and_vars_2[i][0]
          # Verify that gradients of loss1 are the negative of gradients of
          # loss2.
          self.assertAllEqual(tf.negative(g1).eval(), g2.eval())
Beispiel #3
0
def dann_loss(source_samples, target_samples, weight, scope=None):
    """Adds the domain adversarial (DANN) loss.

  Args:
    source_samples: a tensor of shape [num_samples, num_features].
    target_samples: a tensor of shape [num_samples, num_features].
    weight: the weight of the loss.
    scope: optional name scope for summary tags.

  Returns:
    a scalar tensor representing the correlation loss value.
  """
    with tf.variable_scope('dann'):
        batch_size = tf.shape(source_samples)[0]
        samples = tf.concat(axis=0, values=[source_samples, target_samples])
        samples = slim.flatten(samples)

        domain_selection_mask = tf.concat(
            axis=0,
            values=[tf.zeros((batch_size, 1)),
                    tf.ones((batch_size, 1))])

        # Perform the gradient reversal and be careful with the shape.
        grl = grl_ops.gradient_reversal(samples)
        grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))

        grl = slim.fully_connected(grl, 100, scope='fc1')
        logits = slim.fully_connected(grl, 1, activation_fn=None, scope='fc2')

        domain_predictions = tf.sigmoid(logits)

    domain_loss = tf.losses.log_loss(domain_selection_mask,
                                     domain_predictions,
                                     weights=weight)

    domain_accuracy = utils.accuracy(tf.round(domain_predictions),
                                     domain_selection_mask)

    assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
    with tf.control_dependencies([assert_op]):
        tag_loss = 'losses/Domain Loss'
        tag_accuracy = 'losses/Domain Accuracy'
        if scope:
            tag_loss = scope + tag_loss
            tag_accuracy = scope + tag_accuracy

        tf.summary.scalar(tag_loss, domain_loss, name='domain_loss_summary')
        tf.summary.scalar(tag_accuracy,
                          domain_accuracy,
                          name='domain_accuracy_summary')

    return domain_loss
Beispiel #4
0
def dann_loss(source_samples, target_samples, weight, scope=None):
  """Adds the domain adversarial (DANN) loss.

  Args:
    source_samples: a tensor of shape [num_samples, num_features].
    target_samples: a tensor of shape [num_samples, num_features].
    weight: the weight of the loss.
    scope: optional name scope for summary tags.

  Returns:
    a scalar tensor representing the correlation loss value.
  """
  with tf.variable_scope('dann'):
    batch_size = tf.shape(source_samples)[0]
    samples = tf.concat(axis=0, values=[source_samples, target_samples])
    samples = slim.flatten(samples)

    domain_selection_mask = tf.concat(
        axis=0, values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))])

    # Perform the gradient reversal and be careful with the shape.
    grl = grl_ops.gradient_reversal(samples)
    grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))

    grl = slim.fully_connected(grl, 100, scope='fc1')
    logits = slim.fully_connected(grl, 1, activation_fn=None, scope='fc2')

    domain_predictions = tf.sigmoid(logits)

  domain_loss = tf.losses.log_loss(
      domain_selection_mask, domain_predictions, weights=weight)

  domain_accuracy = utils.accuracy(
      tf.round(domain_predictions), domain_selection_mask)

  assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
  with tf.control_dependencies([assert_op]):
    tag_loss = 'losses/domain_loss'
    tag_accuracy = 'losses/domain_accuracy'
    if scope:
      tag_loss = scope + tag_loss
      tag_accuracy = scope + tag_accuracy

    tf.summary.scalar(tag_loss, domain_loss)
    tf.summary.scalar(tag_accuracy, domain_accuracy)

  return domain_loss