コード例 #1
0
    def testNoWeightedExamples(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        # Zeroed out example weights.
        example_weights = [0.0, 0.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1,
                           symmetric_l1_regularization=0,
                           loss_type='logistic_loss')

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
            lr.minimize().run()
            self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
            with self.assertRaisesOpError(
                    'No examples found or all examples have zero weight.'):
                lr.approximate_duality_gap().eval()
コード例 #2
0
  def testNoWeightedExamples(self):
    # Setup test data with 1 positive, and 1 negative example.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 1),
    ]
    # Zeroed out example weights.
    example_weights = [0.0, 0.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
      lr.minimize().run()
      self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
      with self.assertRaisesOpError(
          'No examples found or all examples have zero weight.'):
        lr.approximate_duality_gap().eval()
コード例 #3
0
  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, -10.0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 14.0),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='squared_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      predictions = lr.predictions(examples)

      for _ in xrange(20):
        lr.minimize().run()

      # Predictions should be 2/3 of label due to minimizing regularized loss:
      #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
      self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
                          predictions.eval(),
                          rtol=0.005)
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
コード例 #4
0
ファイル: sdca_ops_test.py プロジェクト: apollos/tensorflow
    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="squared_loss")

            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Predictions should be 2/3 of label due to minimizing regularized loss:
            #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
            self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005)
            # Approximate gap should be very close to 0.0. (In fact, because the gap
            # is only approximate, it is likely that upon convergence the duality gap
            # can have a tiny negative value).
            self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
コード例 #5
0
ファイル: sdca_ops_test.py プロジェクト: cartland/tensorflow
    def testSomeUnweightedExamples(self):
        # Setup test data with 4 examples, but should produce the same
        # results as testSimple.
        example_protos = [
            # Will be used.
            make_example_proto({"age": [0], "gender": [0]}, 0),
            # Will be ignored.
            make_example_proto({"age": [1], "gender": [0]}, 0),
            # Will be used.
            make_example_proto({"age": [1], "gender": [1]}, 1),
            # Will be ignored.
            make_example_proto({"age": [1], "gender": [0]}, 1),
        ]
        example_weights = [1.0, 0.0, 1.0, 0.0]
        with self._single_threaded_test_session():
            # Only use examples 0 and 2
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
コード例 #6
0
ファイル: sdca_ops_test.py プロジェクト: cartland/tensorflow
    def testImbalanced(self):
        # Setup test data with 1 positive, and 3 negative examples.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [2], "gender": [0]}, 0),
            make_example_proto({"age": [3], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0, 1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(3, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            self.assertAllClose(0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
            self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
コード例 #7
0
ファイル: sdca_ops_test.py プロジェクト: cartland/tensorflow
    def testSimpleNoL2(self):
        # Same as test above (so comments from above apply) but without an L2.
        # The algorithm should behave as if we have an L2 of 1 in optimization but
        # 0 in regularized_loss.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=0, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            # There is neither L1 nor L2 loss, so regularized and unregularized losses
            # should be exactly the same.
            self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
            self.assertAllClose(0.40244, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
コード例 #8
0
    def testImbalancedWithExampleWeights(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [3.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1,
                           symmetric_l1_regularization=0,
                           loss_type='logistic_loss')

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            for _ in xrange(5):
                lr.minimize().run()
            self.assertAllClose(0.284860, unregularized_loss.eval(), rtol=0.08)
            self.assertAllClose(0.408044, loss.eval(), atol=0.012)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01,
                                lr.approximate_duality_gap().eval(),
                                rtol=1e-2,
                                atol=1e-2)
コード例 #9
0
  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, -10.0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 14.0),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='squared_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      predictions = lr.predictions(examples)

      for _ in xrange(20):
        lr.minimize().run()

      # Predictions should be 2/3 of label due to minimizing regularized loss:
      #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
      self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
                          predictions.eval(),
                          rtol=0.005)
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
コード例 #10
0
ファイル: sdca_ops_test.py プロジェクト: cartland/tensorflow
    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()
            # The high tolerance in unregularized_loss comparisons is due to the
            # fact that it's possible to trade off unregularized_loss vs.
            # regularization and still have a sum that is quite close to the
            # optimal regularized_loss value.  SDCA's duality gap only ensures that
            # the regularized_loss is within 0.01 of optimal.
            # 0.525457 is the optimal regularized_loss.
            # 0.411608 is the unregularized_loss at that optimum.
            self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
コード例 #11
0
  def testSparseRandom(self):
    dim = 20
    num_examples = 1000
    # Number of non-zero features per example.
    non_zeros = 10
    # Setup test data.
    with self._single_threaded_test_session():
      examples, variables = make_random_examples_and_variables_dicts(
          num_examples, dim, non_zeros)
      options = dict(
          symmetric_l2_regularization=.1,
          symmetric_l1_regularization=0,
          num_table_shards=1,
          adaptive=False,
          loss_type='logistic_loss')

      lr = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      train_op = lr.minimize()
      for _ in range(4):
        train_op.run()
      lr.update_weights(train_op).run()
      # Duality gap is 1.4e-5.
      # It would be 0.01 without shuffling and 0.02 with adaptive sampling.
      self.assertNear(0.0, lr.approximate_duality_gap().eval(), err=1e-3)
コード例 #12
0
    def testInstancesOfOneClassOnly(self):
        # Setup test data with 1 positive (ignored), and 1 negative example.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [0]
            }, 1),  # Shares gender with the instance above.
        ]
        example_weights = [1.0, 0.0]  # Second example "omitted" from training.
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1,
                           symmetric_l1_regularization=0,
                           loss_type='logistic_loss')

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            for _ in xrange(5):
                lr.minimize().run()
            self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.12)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 0], predicted_labels.eval())
            self.assertAllClose(0.01,
                                lr.approximate_duality_gap().eval(),
                                rtol=1e-2,
                                atol=1e-2)
コード例 #13
0
ファイル: sdca_ops_test.py プロジェクト: apollos/tensorflow
    def testImbalancedWithExampleWeights(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [3.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
                self.assertAllClose(0.408044, loss.eval(), atol=0.012)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 1], predicted_labels.eval())
                self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
コード例 #14
0
  def testInstancesOfOneClassOnly(self):
    # Setup test data with 1 positive (ignored), and 1 negative example.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [0]}, 1),  # Shares gender with the instance above.
    ]
    example_weights = [1.0, 0.0]  # Second example "omitted" from training.
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      unregularized_loss = lr.unregularized_loss(examples)
      loss = lr.regularized_loss(examples)
      predictions = lr.predictions(examples)
      for _ in xrange(5):
        lr.minimize().run()
      self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.12)
      self.assertAllClose(0.525457, loss.eval(), atol=0.01)
      predicted_labels = get_binary_predictions_for_logistic(predictions)
      self.assertAllEqual([0, 0], predicted_labels.eval())
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
コード例 #15
0
  def testImbalancedWithExampleWeights(self):
    # Setup test data with 1 positive, and 1 negative example.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 1),
    ]
    example_weights = [3.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      unregularized_loss = lr.unregularized_loss(examples)
      loss = lr.regularized_loss(examples)
      predictions = lr.predictions(examples)
      for _ in xrange(5):
        lr.minimize().run()
      self.assertAllClose(0.284860, unregularized_loss.eval(), rtol=0.08)
      self.assertAllClose(0.408044, loss.eval(), atol=0.012)
      predicted_labels = get_binary_predictions_for_logistic(predictions)
      self.assertAllEqual([0, 1], predicted_labels.eval())
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
コード例 #16
0
ファイル: sdca_ops_test.py プロジェクト: apollos/tensorflow
    def testInstancesOfOneClassOnly(self):
        # Setup test data with 1 positive (ignored), and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [0]}, 1),  # Shares gender with the instance above.
        ]
        example_weights = [1.0, 0.0]  # Second example "omitted" from training.
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
                self.assertAllClose(0.525457, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 0], predicted_labels.eval())
                self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
コード例 #17
0
ファイル: sdca_ops_test.py プロジェクト: instadeep/Mobile-ai
    def testSomeUnweightedExamples(self):
        # Setup test data with 4 examples, but should produce the same
        # results as testSimple.
        example_protos = [
            # Will be used.
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            # Will be ignored.
            make_example_proto({
                'age': [1],
                'gender': [0]
            }, 0),
            # Will be used.
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
            # Will be ignored.
            make_example_proto({
                'age': [1],
                'gender': [0]
            }, 1),
        ]
        example_weights = [1.0, 0.0, 1.0, 0.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                # Only use examples 0 and 2
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(symmetric_l2_regularization=1,
                               symmetric_l1_regularization=0,
                               loss_type='logistic_loss')

                lr = SdcaModel(CONTAINER,
                               examples,
                               variables,
                               options,
                               num_table_shards=num_shards)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                self.assertAllClose(0.411608,
                                    unregularized_loss.eval(),
                                    atol=0.05)
                self.assertAllClose(0.525457, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
                self.assertAllClose(0.01,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=1e-2,
                                    atol=1e-2)
コード例 #18
0
  def testDistributedSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({'age': [0],
                            'gender': [0]}, 0),
        make_example_proto({'age': [1],
                            'gender': [1]}, 1),
    ]
    example_weights = [1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      for num_loss_partitions in _NUM_LOSS_PARTITIONS:
        with self._single_threaded_test_session():
          examples = make_example_dict(example_protos, example_weights)
          variables = make_variable_dict(1, 1)
          options = dict(
              symmetric_l2_regularization=1,
              symmetric_l1_regularization=0,
              loss_type='logistic_loss',
              num_table_shards=num_shards,
              num_loss_partitions=num_loss_partitions)

          lr = SdcaModel(examples, variables, options)
          tf.initialize_all_variables().run()
          unregularized_loss = lr.unregularized_loss(examples)
          loss = lr.regularized_loss(examples)
          predictions = lr.predictions(examples)
          self.assertAllClose(0.693147, unregularized_loss.eval())
          self.assertAllClose(0.693147, loss.eval())

          train_op = lr.minimize()

          def Minimize():
            with self._single_threaded_test_session():
              for _ in range(_MAX_ITERATIONS):
                train_op.run()

          threads = []
          for _ in range(num_loss_partitions):
            threads.append(Thread(target=Minimize))
            threads[-1].start()

          for t in threads:
            t.join()
          lr.update_weights(train_op).run()

          # The high tolerance in unregularized_loss comparisons is due to the
          # fact that it's possible to trade off unregularized_loss vs.
          # regularization and still have a sum that is quite close to the
          # optimal regularized_loss value.  SDCA's duality gap only ensures
          # that the regularized_loss is within 0.01 of optimal.
          # 0.525457 is the optimal regularized_loss.
          # 0.411608 is the unregularized_loss at that optimum.
          self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
          self.assertAllClose(0.525457, loss.eval(), atol=0.01)
          predicted_labels = get_binary_predictions_for_logistic(predictions)
          self.assertAllEqual([0, 1], predicted_labels.eval())
          self.assertTrue(lr.approximate_duality_gap().eval() < 0.02)
コード例 #19
0
  def testDistributedSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({'age': [0],
                            'gender': [0]}, 0),
        make_example_proto({'age': [1],
                            'gender': [1]}, 1),
    ]
    example_weights = [1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      for num_loss_partitions in _NUM_LOSS_PARTITIONS:
        with self._single_threaded_test_session():
          examples = make_example_dict(example_protos, example_weights)
          variables = make_variable_dict(1, 1)
          options = dict(
              symmetric_l2_regularization=1,
              symmetric_l1_regularization=0,
              loss_type='logistic_loss',
              num_table_shards=num_shards,
              num_loss_partitions=num_loss_partitions)

          lr = SdcaModel(examples, variables, options)
          tf.global_variables_initializer().run()
          unregularized_loss = lr.unregularized_loss(examples)
          loss = lr.regularized_loss(examples)
          predictions = lr.predictions(examples)
          self.assertAllClose(0.693147, unregularized_loss.eval())
          self.assertAllClose(0.693147, loss.eval())

          train_op = lr.minimize()

          def Minimize():
            with self._single_threaded_test_session():
              for _ in range(_MAX_ITERATIONS):
                train_op.run()

          threads = []
          for _ in range(num_loss_partitions):
            threads.append(Thread(target=Minimize))
            threads[-1].start()

          for t in threads:
            t.join()
          lr.update_weights(train_op).run()

          # The high tolerance in unregularized_loss comparisons is due to the
          # fact that it's possible to trade off unregularized_loss vs.
          # regularization and still have a sum that is quite close to the
          # optimal regularized_loss value.  SDCA's duality gap only ensures
          # that the regularized_loss is within 0.01 of optimal.
          # 0.525457 is the optimal regularized_loss.
          # 0.411608 is the unregularized_loss at that optimum.
          self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
          self.assertAllClose(0.525457, loss.eval(), atol=0.01)
          predicted_labels = get_binary_predictions_for_logistic(predictions)
          self.assertAllEqual([0, 1], predicted_labels.eval())
          self.assertTrue(lr.approximate_duality_gap().eval() < 0.02)
コード例 #20
0
ファイル: sdca_ops_test.py プロジェクト: instadeep/Mobile-ai
    def testImbalanced(self):
        # Setup test data with 1 positive, and 3 negative examples.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [2],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [3],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [1.0, 1.0, 1.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(3, 1)
                options = dict(symmetric_l2_regularization=1,
                               symmetric_l1_regularization=0,
                               loss_type='logistic_loss')

                lr = SdcaModel(CONTAINER,
                               examples,
                               variables,
                               options,
                               num_table_shards=num_shards)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                self.assertAllClose(0.226487 + 0.102902,
                                    unregularized_loss.eval(),
                                    atol=0.08)
                self.assertAllClose(0.328394 + 0.131364,
                                    loss.eval(),
                                    atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
                self.assertAllClose(0.01,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=1e-2,
                                    atol=1e-2)
コード例 #21
0
ファイル: sdca_ops_test.py プロジェクト: instadeep/Mobile-ai
    def testSimpleNoL2(self):
        # Same as test above (so comments from above apply) but without an L2.
        # The algorithm should behave as if we have an L2 of 1 in optimization but
        # 0 in regularized_loss.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [1.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(symmetric_l2_regularization=0,
                               symmetric_l1_regularization=0,
                               loss_type='logistic_loss')

                lr = SdcaModel(CONTAINER,
                               examples,
                               variables,
                               options,
                               num_table_shards=num_shards)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                self.assertAllClose(0.693147, unregularized_loss.eval())
                self.assertAllClose(0.693147, loss.eval())
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                # There is neither L1 nor L2 loss, so regularized and unregularized
                # losses should be exactly the same.
                self.assertAllClose(0.40244,
                                    unregularized_loss.eval(),
                                    atol=0.01)
                self.assertAllClose(0.40244, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllEqual([0, 1], predicted_labels.eval())
                self.assertAllClose(0.01,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=1e-2,
                                    atol=1e-2)
コード例 #22
0
ファイル: sdca_ops_test.py プロジェクト: Immexxx/tensorflow
  def testImbalanced(self):
    # Setup test data with 1 positive, and 3 negative examples.
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [2],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [3],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0, 1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      with self._single_threaded_test_session():
        examples = make_example_dict(example_protos, example_weights)
        variables = make_variable_dict(3, 1)
        options = dict(
            symmetric_l2_regularization=1,
            symmetric_l1_regularization=0,
            num_table_shards=num_shards,
            loss_type='logistic_loss')

        lr = SdcaModel(examples, variables, options)
        variables_lib.global_variables_initializer().run()
        unregularized_loss = lr.unregularized_loss(examples)
        loss = lr.regularized_loss(examples)
        predictions = lr.predictions(examples)
        train_op = lr.minimize()
        for _ in range(_MAX_ITERATIONS):
          train_op.run()
        lr.update_weights(train_op).run()

        self.assertAllClose(
            0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
        self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
        predicted_labels = get_binary_predictions_for_logistic(predictions)
        self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
        self.assertAllClose(
            0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
コード例 #23
0
  def testPartitionedPrimals(self):
    # Setup test data
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      with self._single_threaded_test_session():
        examples = make_example_dict(example_protos, example_weights)
        variables = make_variable_dict(1, 1, partitioned=True)
        options = dict(
            symmetric_l2_regularization=1,
            symmetric_l1_regularization=0,
            num_table_shards=num_shards,
            loss_type='logistic_loss')

        lr = SdcaModel(examples, variables, options)
        variables_lib.global_variables_initializer().run()
        unregularized_loss = lr.unregularized_loss(examples)
        loss = lr.regularized_loss(examples)
        predictions = lr.predictions(examples)
        self.assertAllClose(0.693147, unregularized_loss.eval())
        self.assertAllClose(0.693147, loss.eval())
        train_op = lr.minimize()
        for _ in range(_MAX_ITERATIONS):
          train_op.run()
        lr.update_weights(train_op).run()
        # The high tolerance in unregularized_loss comparisons is due to the
        # fact that it's possible to trade off unregularized_loss vs.
        # regularization and still have a sum that is quite close to the
        # optimal regularized_loss value.  SDCA's duality gap only ensures that
        # the regularized_loss is within 0.01 of optimal.
        # 0.525457 is the optimal regularized_loss.
        # 0.411608 is the unregularized_loss at that optimum.
        self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
        self.assertAllClose(0.525457, loss.eval(), atol=0.01)
        predicted_labels = get_binary_predictions_for_logistic(predictions)
        self.assertAllEqual([0, 1], predicted_labels.eval())
        self.assertAllClose(
            0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
コード例 #24
0
  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 2),
    ]
    example_weights = [100.0, 100.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='poisson_loss')
      model = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()

      # Before minimization, the weights default to zero. There is no loss due
      # to regularization, only unregularized loss which is 1 for each example.
      predictions = model.predictions(examples)
      self.assertAllClose([1.0, 1.0], predictions.eval())
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      approximate_duality_gap = model.approximate_duality_gap()
      self.assertAllClose(1.0, unregularized_loss.eval())
      self.assertAllClose(1.0, regularized_loss.eval())

      # There are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender
      # (say w3 and w4). The minimization leads to:
      # w1=w3=-1.96487, argmin of 100*(exp(2*w)-2*w*0)+w**2.
      # w2=w4=0.345708, argmin of 100*(exp(2*w)-2*w*2)+w**2.
      # This gives an unregularized loss of .3167 and .3366 with regularization.
      train_op = model.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      model.update_weights(train_op).run()

      self.assertAllClose([0.0196, 1.9965], predictions.eval(), atol=1e-4)
      self.assertAllClose(0.3167, unregularized_loss.eval(), atol=1e-4)
      self.assertAllClose(0.3366, regularized_loss.eval(), atol=1e-4)
      self.assertAllClose(0., approximate_duality_gap.eval(), atol=1e-6)
コード例 #25
0
    def testSimpleLogistic(self):
        # Setup test data
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1,
                           symmetric_l1_regularization=0,
                           loss_type='logistic_loss')

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            for _ in xrange(5):
                lr.minimize().run()
            # The high tolerance in unregularized_loss comparisons is due to the
            # fact that it's possible to trade off unregularized_loss vs.
            # regularization and still have a sum that is quite close to the
            # optimal regularized_loss value.  SDCA's duality gap only ensures that
            # the regularized_loss is within 0.01 of optimal.
            # 0.525457 is the optimal regularized_loss.
            # 0.411608 is the unregularized_loss at that optimum.
            self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.11)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01,
                                lr.approximate_duality_gap().eval(),
                                rtol=1e-2,
                                atol=1e-2)
コード例 #26
0
    def testImbalancedWithExampleWeights(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [3.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(symmetric_l2_regularization=1,
                               symmetric_l1_regularization=0,
                               num_table_shards=num_shards,
                               loss_type='logistic_loss')

                lr = SdcaModel(examples, variables, options)
                variables_lib.global_variables_initializer().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                lr.update_weights(train_op).run()

                self.assertAllClose(0.284860,
                                    unregularized_loss.eval(),
                                    atol=0.08)
                self.assertAllClose(0.408044, loss.eval(), atol=0.012)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllEqual([0, 1], predicted_labels.eval())
                self.assertAllClose(0.0,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=2e-2,
                                    atol=1e-2)
コード例 #27
0
    def testInstancesOfOneClassOnly(self):
        # Setup test data with 1 positive (ignored), and 1 negative example.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [0]
            }, 1),  # Shares gender with the instance above.
        ]
        example_weights = [1.0, 0.0]  # Second example "omitted" from training.
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(symmetric_l2_regularization=1,
                               symmetric_l1_regularization=0,
                               num_table_shards=num_shards,
                               loss_type='logistic_loss')

                lr = SdcaModel(examples, variables, options)
                variables_lib.global_variables_initializer().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                lr.update_weights(train_op).run()
                self.assertAllClose(0.411608,
                                    unregularized_loss.eval(),
                                    atol=0.05)
                self.assertAllClose(0.525457, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllEqual([0, 0], predicted_labels.eval())
                self.assertAllClose(0.01,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=1e-2,
                                    atol=1e-2)
コード例 #28
0
    def testSimpleLogisticNoL2(self):
        # Same as test above (so comments from above apply) but without an L2.
        # The algorithm should behave as if we have an L2 of 1 in optimization but
        # 0 in regularized_loss.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=0,
                           symmetric_l1_regularization=0,
                           loss_type='logistic_loss')

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            for _ in xrange(5):
                lr.minimize().run()
            self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.11)
            self.assertAllClose(0.371705, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01,
                                lr.approximate_duality_gap().eval(),
                                rtol=1e-2,
                                atol=1e-2)
コード例 #29
0
    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, -10.0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 14.0),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1,
                           symmetric_l1_regularization=0,
                           loss_type='squared_loss')

            lr = SdcaModel(examples, variables, options)
            variables_lib.global_variables_initializer().run()
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()
            lr.update_weights(train_op).run()

            # Predictions should be 2/3 of label due to minimizing regularized loss:
            #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
            self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
                                predictions.eval(),
                                rtol=0.005)
            # Approximate gap should be very close to 0.0. (In fact, because the gap
            # is only approximate, it is likely that upon convergence the duality gap
            # can have a tiny negative value).
            self.assertAllClose(0.0,
                                lr.approximate_duality_gap().eval(),
                                atol=1e-2)
コード例 #30
0
  def testSimpleNoL2(self):
    # Same as test above (so comments from above apply) but without an L2.
    # The algorithm should behave as if we have an L2 of 1 in optimization but
    # 0 in regularized_loss.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 1),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=0,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      unregularized_loss = lr.unregularized_loss(examples)
      loss = lr.regularized_loss(examples)
      predictions = lr.predictions(examples)
      self.assertAllClose(0.693147, unregularized_loss.eval())
      self.assertAllClose(0.693147, loss.eval())
      for _ in xrange(5):
        lr.minimize().run()
      self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.11)
      self.assertAllClose(0.371705, loss.eval(), atol=0.01)
      predicted_labels = get_binary_predictions_for_logistic(predictions)
      self.assertAllEqual([0, 1], predicted_labels.eval())
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
コード例 #31
0
  def testDistributedSimple(self):
    # Distributed SDCA may not converge if the workers update concurrently the
    # same example. In this test the examples are partitioned across workers.
    # The examples are the same for all workers, just the example_ids are
    # different.
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    examples = make_example_dict(example_protos, example_weights)
    example_ids = array_ops.placeholder(
        dtypes.string, shape=(len(example_weights),))
    examples['example_ids'] = example_ids
    variables = make_variable_dict(1, 1)
    for num_shards in _SHARD_NUMBERS:
      for num_loss_partitions in _NUM_LOSS_PARTITIONS:
        with self._single_threaded_test_session():
          options = dict(
              # Keep the same solution as for TestSimple: since the number of
              # examples is multplied by num_loss_partitions, multiply also
              # L2 by the same value.
              symmetric_l2_regularization=num_loss_partitions,
              symmetric_l1_regularization=0,
              loss_type='logistic_loss',
              num_table_shards=num_shards,
              num_loss_partitions=num_loss_partitions)

          lr = SdcaModel(examples, variables, options)
          variables_lib.global_variables_initializer().run()
          unregularized_loss = lr.unregularized_loss(examples)
          loss = lr.regularized_loss(examples)
          predictions = lr.predictions(examples)
          self.assertAllClose(0.693147, unregularized_loss.eval())
          self.assertAllClose(0.693147, loss.eval())

          train_op = lr.minimize()

          def minimize(worker_id):
            with self._single_threaded_test_session():
              feed_dict = {example_ids: [
                  str(i + worker_id*len(example_weights)) for i in range(
                      len(example_weights))]}
              for _ in range(_MAX_ITERATIONS):
                train_op.run(feed_dict=feed_dict)  # pylint: disable=cell-var-from-loop

          threads = []
          for worker_id in range(num_loss_partitions):
            threads.append(threading.Thread(target=minimize, args=(worker_id,)))
            threads[-1].start()

          for t in threads:
            t.join()
          lr.update_weights(train_op).run(feed_dict={
              example_ids: [str(i) for i in range(len(example_weights))]})

          # Test only the unregularized loss because the optimal value of the
          # regularized loss depends on num_loss_partitions.
          self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.02)
          predicted_labels = get_binary_predictions_for_logistic(predictions)
          self.assertAllEqual([0, 1], predicted_labels.eval())
          self.assertNear(0.0, lr.approximate_duality_gap().eval(), 0.02)