def testIndexedSlicesGradientInCondInWhileLoop(self):
    with ops.Graph().as_default():
      embedding_matrix = tf.get_variable(
          "embedding_matrix", [5, 5],
          initializer=tf.random_normal_initializer())

      def Cond(it, _):
        return it < 5
      def Body(it, cost):
        embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
        cost = tf.cond(tf.equal(it, 3),
                       lambda: tf.square(cost),
                       lambda: cost + tf.reduce_sum(embedding))
        return it + 1, cost
      _, cost = control_flow_ops.while_loop(
          Cond, Body, [tf.constant(0), tf.constant(0.0)])

      dynamic_grads = tf.gradients(cost, [embedding_matrix])[0]
      dynamic_grads = tf.segment_sum(dynamic_grads.values,
                                     dynamic_grads.indices)

      embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
      static = tf.square(
          tf.reduce_sum(embedding) +
          tf.reduce_sum(embedding) +
          tf.reduce_sum(embedding)) + tf.reduce_sum(embedding)
      static_grads = tf.gradients(static, [embedding_matrix])[0]
      static_grads = tf.segment_sum(static_grads.values, static_grads.indices)

      with self.test_session() as sess:
        sess.run(tf.global_variables_initializer())
        self.assertAllEqual(*sess.run([static_grads, dynamic_grads]))
Exemple #2
0
    def test_multiplicative_swap_regret_optimizer(self):
        """Tests that the stochastic matrices update as expected."""
        minimization_problem = test_util.ConstantMinimizationProblem(
            np.array([0.6, -0.1, 0.4]))
        optimizer = MultiplicativeSwapRegretOptimizerWrapper(
            gradient_descent.GradientDescentOptimizer(1.0),
            initial_multiplier_radius=0.8)
        train_op = optimizer.minimize_constrained(minimization_problem)

        # Calculated using a numpy+python implementation of the algorithm.
        expected_matrices = [
            np.array([[0.4, 0.4, 0.4, 0.4], [0.2, 0.2, 0.2, 0.2],
                      [0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2]]),
            np.array([[0.36999014, 0.38528351, 0.38528351, 0.38528351],
                      [0.23517483, 0.21720297, 0.21720297, 0.21720297],
                      [0.17774131, 0.18882719, 0.18882719, 0.18882719],
                      [0.21709373, 0.20868632, 0.20868632, 0.20868632]]),
            np.array([[0.33972109, 0.36811863, 0.37118462, 0.36906575],
                      [0.27114826, 0.23738228, 0.23376693, 0.23626491],
                      [0.15712313, 0.17641793, 0.17858959, 0.17708679],
                      [0.23200752, 0.21808115, 0.21645886, 0.21758255]]),
        ]

        matrices = []
        with self.cached_session() as session:
            session.run(standard_ops.global_variables_initializer())
            while len(matrices) < len(expected_matrices):
                matrices.append(session.run(optimizer.stochastic_matrix))
                session.run(train_op)

        for expected, actual in zip(expected_matrices, matrices):
            self.assertAllClose(expected, actual, rtol=0, atol=1e-6)
  def test_multiplicative_swap_regret_optimizer(self):
    """Tests that the stochastic matrices update as expected."""
    minimization_problem = test_util.ConstantMinimizationProblem(
        np.array([0.6, -0.1, 0.4]))
    optimizer = MultiplicativeSwapRegretOptimizerWrapper(
        gradient_descent.GradientDescentOptimizer(1.0),
        initial_multiplier_radius=0.8)
    train_op = optimizer.minimize_constrained(minimization_problem)

    # Calculated using a numpy+python implementation of the algorithm.
    expected_matrices = [
        np.array([[0.4, 0.4, 0.4, 0.4], [0.2, 0.2, 0.2, 0.2],
                  [0.2, 0.2, 0.2, 0.2], [0.2, 0.2, 0.2, 0.2]]),
        np.array([[0.36999014, 0.38528351, 0.38528351, 0.38528351], [
            0.23517483, 0.21720297, 0.21720297, 0.21720297
        ], [0.17774131, 0.18882719, 0.18882719, 0.18882719],
                  [0.21709373, 0.20868632, 0.20868632, 0.20868632]]),
        np.array([[0.33972109, 0.36811863, 0.37118462, 0.36906575], [
            0.27114826, 0.23738228, 0.23376693, 0.23626491
        ], [0.15712313, 0.17641793, 0.17858959, 0.17708679],
                  [0.23200752, 0.21808115, 0.21645886, 0.21758255]]),
    ]

    matrices = []
    with self.test_session() as session:
      session.run(standard_ops.global_variables_initializer())
      while len(matrices) < len(expected_matrices):
        matrices.append(session.run(optimizer.stochastic_matrix))
        session.run(train_op)

    for expected, actual in zip(expected_matrices, matrices):
      self.assertAllClose(expected, actual, rtol=0, atol=1e-6)
Exemple #4
0
    def test_additive_swap_regret_optimizer(self):
        """Tests that the stochastic matrices update as expected."""
        minimization_problem = test_util.ConstantMinimizationProblem(
            np.array([0.6, -0.1, 0.4]))
        optimizer = AdditiveSwapRegretOptimizerWrapper(
            gradient_descent.GradientDescentOptimizer(1.0))
        train_op = optimizer.minimize_constrained(minimization_problem)

        # Calculated using a numpy+python implementation of the algorithm.
        expected_matrices = [
            np.array([[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0],
                      [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]),
            np.array([[0.66666667, 1.0, 1.0, 1.0], [0.26666667, 0.0, 0.0, 0.0],
                      [0.0, 0.0, 0.0, 0.0], [0.06666667, 0.0, 0.0, 0.0]]),
            np.array([[0.41666667, 0.93333333, 1.0, 0.98333333],
                      [0.46666667, 0.05333333, 0.0, 0.01333333],
                      [0.0, 0.0, 0.0, 0.0],
                      [0.11666667, 0.01333333, 0.0, 0.00333333]]),
        ]

        matrices = []
        with self.cached_session() as session:
            session.run(standard_ops.global_variables_initializer())
            while len(matrices) < len(expected_matrices):
                matrices.append(session.run(optimizer.stochastic_matrix))
                session.run(train_op)

        for expected, actual in zip(expected_matrices, matrices):
            self.assertAllClose(expected, actual, rtol=0, atol=1e-6)
  def test_additive_swap_regret_optimizer(self):
    """Tests that the stochastic matrices update as expected."""
    minimization_problem = test_util.ConstantMinimizationProblem(
        np.array([0.6, -0.1, 0.4]))
    optimizer = AdditiveSwapRegretOptimizerWrapper(
        gradient_descent.GradientDescentOptimizer(1.0))
    train_op = optimizer.minimize_constrained(minimization_problem)

    # Calculated using a numpy+python implementation of the algorithm.
    expected_matrices = [
        np.array([[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0],
                  [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]),
        np.array([[0.66666667, 1.0, 1.0, 1.0], [0.26666667, 0.0, 0.0, 0.0],
                  [0.0, 0.0, 0.0, 0.0], [0.06666667, 0.0, 0.0, 0.0]]),
        np.array([[0.41666667, 0.93333333, 1.0,
                   0.98333333], [0.46666667, 0.05333333, 0.0,
                                 0.01333333], [0.0, 0.0, 0.0, 0.0],
                  [0.11666667, 0.01333333, 0.0, 0.00333333]]),
    ]

    matrices = []
    with self.test_session() as session:
      session.run(standard_ops.global_variables_initializer())
      while len(matrices) < len(expected_matrices):
        matrices.append(session.run(optimizer.stochastic_matrix))
        session.run(train_op)

    for expected, actual in zip(expected_matrices, matrices):
      self.assertAllClose(expected, actual, rtol=0, atol=1e-6)
Exemple #6
0
    def testIndexedSlicesGradientInCondInWhileLoop(self):
        with ops.Graph().as_default():
            embedding_matrix = tf.get_variable(
                "embedding_matrix", [5, 5],
                initializer=tf.random_normal_initializer())

            def Cond(it, _):
                return it < 5

            def Body(it, cost):
                embedding = embedding_ops.embedding_lookup(
                    embedding_matrix, [0])
                cost = tf.cond(tf.equal(it, 3), lambda: tf.square(cost),
                               lambda: cost + tf.reduce_sum(embedding))
                return it + 1, cost

            _, cost = control_flow_ops.while_loop(
                Cond, Body, [tf.constant(0), tf.constant(0.0)])

            dynamic_grads = tf.gradients(cost, [embedding_matrix])[0]
            dynamic_grads = tf.segment_sum(dynamic_grads.values,
                                           dynamic_grads.indices)

            embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
            static = tf.square(
                tf.reduce_sum(embedding) + tf.reduce_sum(embedding) +
                tf.reduce_sum(embedding)) + tf.reduce_sum(embedding)
            static_grads = tf.gradients(static, [embedding_matrix])[0]
            static_grads = tf.segment_sum(static_grads.values,
                                          static_grads.indices)

            with self.test_session() as sess:
                sess.run(tf.global_variables_initializer())
                self.assertAllEqual(*sess.run([static_grads, dynamic_grads]))
  def test_additive_external_regret_optimizer(self):
    """Tests that the Lagrange multipliers update as expected."""
    minimization_problem = test_util.ConstantMinimizationProblem(
        np.array([0.6, -0.1, 0.4]))
    optimizer = AdditiveExternalRegretOptimizerWrapper(
        gradient_descent.GradientDescentOptimizer(1.0),
        maximum_multiplier_radius=1.0)
    train_op = optimizer.minimize_constrained(minimization_problem)

    expected_multipliers = [
        np.array([0.0, 0.0, 0.0]),
        np.array([0.6, 0.0, 0.4]),
        np.array([0.7, 0.0, 0.3]),
        np.array([0.8, 0.0, 0.2]),
        np.array([0.9, 0.0, 0.1]),
        np.array([1.0, 0.0, 0.0]),
        np.array([1.0, 0.0, 0.0]),
    ]

    multipliers = []
    with self.test_session() as session:
      session.run(standard_ops.global_variables_initializer())
      while len(multipliers) < len(expected_multipliers):
        multipliers.append(session.run(optimizer.lagrange_multipliers))
        session.run(train_op)

    for expected, actual in zip(expected_multipliers, multipliers):
      self.assertAllClose(expected, actual, rtol=0, atol=1e-6)
Exemple #8
0
    def test_additive_external_regret_optimizer(self):
        """Tests that the Lagrange multipliers update as expected."""
        minimization_problem = test_util.ConstantMinimizationProblem(
            np.array([0.6, -0.1, 0.4]))
        optimizer = AdditiveExternalRegretOptimizerWrapper(
            gradient_descent.GradientDescentOptimizer(1.0),
            maximum_multiplier_radius=1.0)
        train_op = optimizer.minimize_constrained(minimization_problem)

        expected_multipliers = [
            np.array([0.0, 0.0, 0.0]),
            np.array([0.6, 0.0, 0.4]),
            np.array([0.7, 0.0, 0.3]),
            np.array([0.8, 0.0, 0.2]),
            np.array([0.9, 0.0, 0.1]),
            np.array([1.0, 0.0, 0.0]),
            np.array([1.0, 0.0, 0.0]),
        ]

        multipliers = []
        with self.test_session() as session:
            session.run(standard_ops.global_variables_initializer())
            while len(multipliers) < len(expected_multipliers):
                multipliers.append(session.run(optimizer.lagrange_multipliers))
                session.run(train_op)

        for expected, actual in zip(expected_multipliers, multipliers):
            self.assertAllClose(expected, actual, rtol=0, atol=1e-6)
 def testIndexedSlicesGradient(self):
   with ops.Graph().as_default():
     embedding_matrix = tf.get_variable(
         "embedding_matrix", [5, 5],
         initializer=tf.random_normal_initializer())
     def Cond(it, _):
       return it < 5
     def Body(it, cost):
       embedding = embedding_ops.embedding_lookup(embedding_matrix + 0.0, [0])
       cost += tf.reduce_sum(embedding)
       return it + 1, cost
     _, cost = control_flow_ops.while_loop(
         Cond, Body, [tf.constant(0), tf.constant(0.0)])
     optimizer = momentum.MomentumOptimizer(0.1, 0.9)
     train_op = optimizer.minimize(cost)
     with self.test_session() as sess:
       sess.run(tf.global_variables_initializer())
       for _ in range(10):
         sess.run([train_op])
Exemple #10
0
    def testIndexedSlicesGradient(self):
        with ops.Graph().as_default():
            embedding_matrix = tf.get_variable(
                "embedding_matrix", [5, 5],
                initializer=tf.random_normal_initializer())

            def Cond(it, _):
                return it < 5

            def Body(it, cost):
                embedding = embedding_ops.embedding_lookup(
                    embedding_matrix + 0.0, [0])
                cost += tf.reduce_sum(embedding)
                return it + 1, cost

            _, cost = control_flow_ops.while_loop(
                Cond, Body, [tf.constant(0), tf.constant(0.0)])
            optimizer = momentum.MomentumOptimizer(0.1, 0.9)
            train_op = optimizer.minimize(cost)
            with self.test_session() as sess:
                sess.run(tf.global_variables_initializer())
                for _ in range(10):
                    sess.run([train_op])