Ejemplo n.º 1
0
 def testInstancesOfOneClassOnly(self):
   # Setup test data with 1 positive (ignored), and 1 negative example.
   example_protos = [
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [1],
            'gender': [0]}, 1),  # Shares gender with the instance above.
   ]
   example_weights = [1.0, 0.0]  # Second example "omitted" from training.
   with self._single_threaded_test_session():
     examples = make_example_dict(example_protos, example_weights)
     variables = make_variable_dict(1, 1)
     options = dict(symmetric_l2_regularization=0.25,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss')
     tf.initialize_all_variables().run()
     lr = SdcaModel(CONTAINER, examples, variables, options)
     unregularized_loss = lr.unregularized_loss(examples)
     loss = lr.regularized_loss(examples)
     prediction = lr.predictions(examples)
     lr.minimize().run()
     self.assertAllClose(0.395226,
                         unregularized_loss.eval(),
                         rtol=3e-2,
                         atol=3e-2)
     self.assertAllClose(0.460781, loss.eval(), rtol=3e-2, atol=3e-2)
     predicted_labels = tf.cast(
         tf.greater_equal(prediction,
                          tf.ones_like(prediction) * 0.5), tf.float32)
     self.assertAllEqual([0, 0], predicted_labels.eval())
Ejemplo n.º 2
0
  def testHingeDenseFeaturesWeightedExamples(self):
    with self._single_threaded_test_session():
      examples = make_dense_examples_dict(
          dense_feature_values=[[1.0, 1.0], [0.5, -0.5]],
          weights=[3.0, 1.0],
          labels=[1.0, 0.0])
      variables = make_dense_variable_dict(2, 2)
      options = dict(symmetric_l2_regularization=1.0,
                     symmetric_l1_regularization=0,
                     loss_type='hinge_loss')
      tf.initialize_all_variables().run()
      model = SdcaModel(CONTAINER, examples, variables, options)
      predictions = model.predictions(examples)
      binary_predictions = get_binary_predictions_for_hinge(predictions)
      model.minimize().run()

      # Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
      # try to increase the margin from (1.0, 0.5). Due to regularization,
      # (1.0, -0.5) will be within the margin. For these points and example
      # weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
      # loss of 0.25 * 1.6 = 0.4. The binary predictions will be correct, but
      # the boundary will be much closer to the 2nd point than the first one.
      self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
      self.assertAllClose([1.0, 0.0], binary_predictions.eval(), atol=0.05)
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
      self.assertAllClose(0.6, regularized_loss.eval(), atol=0.02)
Ejemplo n.º 3
0
    def testInstancesOfOneClassOnly(self):
        # Setup test data with 1 positive (ignored), and 1 negative example.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [0]
            }, 1),  # Shares gender with the instance above.
        ]
        example_weights = [1.0, 0.0]  # Second example "omitted" from training.
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1,
                           symmetric_l1_regularization=0,
                           loss_type='logistic_loss')

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            for _ in xrange(5):
                lr.minimize().run()
            self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.12)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 0], predicted_labels.eval())
            self.assertAllClose(0.01,
                                lr.approximate_duality_gap().eval(),
                                rtol=1e-2,
                                atol=1e-2)
Ejemplo n.º 4
0
 def testSimple(self):
     # Setup test data
     example_protos = [
         make_example_proto({
             'age': [0],
             'gender': [0]
         }, 0),
         make_example_proto({
             'age': [1],
             'gender': [1]
         }, 1),
     ]
     example_weights = [1.0, 1.0]
     with self.test_session(use_gpu=False):
         examples = make_example_dict(example_protos, example_weights)
         variables = make_variable_dict(examples, 1, 1)
         options = dict(symmetric_l2_regularization=0.5,
                        symmetric_l1_regularization=0,
                        loss_type='logistic_loss',
                        prior=0.0)
         tf.initialize_all_variables().run()
         lr = SdcaModel(examples, variables, options)
         unregularized_loss = lr.unregularized_loss(examples)
         loss = lr.regularized_loss(examples)
         prediction = lr.predictions(examples)
         self.assertAllClose(0.693147, unregularized_loss.eval())
         self.assertAllClose(0.693147, loss.eval())
         lr.minimize().run()
         self.assertAllClose(0.395226, unregularized_loss.eval())
         self.assertAllClose(0.657446, loss.eval())
         predicted_labels = tf.cast(
             tf.greater_equal(prediction,
                              tf.ones_like(prediction) * 0.5), tf.float32)
         self.assertAllEqual([0, 1], predicted_labels.eval())
Ejemplo n.º 5
0
 def testImbalancedWithExampleWeights(self):
     # Setup test data with 1 positive, and 3 negative examples.
     example_protos = [
         make_example_proto({
             'age': [0],
             'gender': [0]
         }, 0),
         make_example_proto({
             'age': [1],
             'gender': [1]
         }, 1),
     ]
     example_weights = [3.0, 1.0]
     with self.test_session(use_gpu=False):
         examples = make_example_dict(example_protos, example_weights)
         variables = make_variable_dict(examples, 1, 1)
         options = dict(symmetric_l2_regularization=0.25,
                        symmetric_l1_regularization=0,
                        loss_type='logistic_loss')
         tf.initialize_all_variables().run()
         lr = SdcaModel(examples, variables, options)
         unregularized_loss = lr.unregularized_loss(examples)
         loss = lr.regularized_loss(examples)
         prediction = lr.predictions(examples)
         lr.minimize().run()
         self.assertAllClose(0.266189,
                             unregularized_loss.eval(),
                             rtol=3e-2,
                             atol=3e-2)
         self.assertAllClose(0.571912, loss.eval(), rtol=3e-2, atol=3e-2)
         predicted_labels = tf.cast(
             tf.greater_equal(prediction,
                              tf.ones_like(prediction) * 0.5), tf.float32)
         self.assertAllEqual([0, 1], predicted_labels.eval())
Ejemplo n.º 6
0
    def testDenseFeaturesWeightedExamples(self):
        with self._single_threaded_test_session():
            examples, variables = make_dense_examples_and_variables_dicts(
                dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]], weights=[3.0, 1.0], labels=[1.0, 0.0]
            )
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="hinge_loss")
            model = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)
            train_op = model.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
            # try to increase the margin from (1.0, 0.5). Due to regularization,
            # (1.0, -0.5) will be within the margin. For these points and example
            # weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
            # loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
            # correct, but the boundary will be much closer to the 2nd point than the
            # first one.
            self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
Ejemplo n.º 7
0
    def testImbalancedWithExampleWeights(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [3.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1,
                           symmetric_l1_regularization=0,
                           loss_type='logistic_loss')

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            for _ in xrange(5):
                lr.minimize().run()
            self.assertAllClose(0.284860, unregularized_loss.eval(), rtol=0.08)
            self.assertAllClose(0.408044, loss.eval(), atol=0.012)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01,
                                lr.approximate_duality_gap().eval(),
                                rtol=1e-2,
                                atol=1e-2)
Ejemplo n.º 8
0
 def testImbalancedWithExampleWeights(self):
   # Setup test data with 1 positive, and 3 negative examples.
   example_protos = [
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [1],
            'gender': [1]}, 1),
   ]
   example_weights = [3.0, 1.0]
   with self._single_threaded_test_session():
     examples = make_example_dict(example_protos, example_weights)
     variables = make_variable_dict(1, 1)
     options = dict(symmetric_l2_regularization=0.25,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss')
     tf.initialize_all_variables().run()
     lr = SdcaModel(CONTAINER, examples, variables, options)
     unregularized_loss = lr.unregularized_loss(examples)
     loss = lr.regularized_loss(examples)
     prediction = lr.predictions(examples)
     lr.minimize().run()
     self.assertAllClose(0.266189, unregularized_loss.eval(),
                         rtol=3e-2, atol=3e-2)
     self.assertAllClose(0.571912, loss.eval(), rtol=3e-2, atol=3e-2)
     predicted_labels = tf.cast(
         tf.greater_equal(prediction,
                          tf.ones_like(prediction) * 0.5), tf.float32)
     self.assertAllEqual([0, 1], predicted_labels.eval())
Ejemplo n.º 9
0
 def testSimpleLogistic(self):
   # Setup test data
   example_protos = [
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [1],
            'gender': [1]}, 1),
   ]
   example_weights = [1.0, 1.0]
   with self._single_threaded_test_session():
     examples = make_example_dict(example_protos, example_weights)
     variables = make_variable_dict(1, 1)
     options = dict(symmetric_l2_regularization=0.5,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss',
                    prior=0.0)
     tf.initialize_all_variables().run()
     lr = SdcaModel(CONTAINER, examples, variables, options)
     unregularized_loss = lr.unregularized_loss(examples)
     loss = lr.regularized_loss(examples)
     prediction = lr.predictions(examples)
     self.assertAllClose(0.693147, unregularized_loss.eval())
     self.assertAllClose(0.693147, loss.eval())
     lr.minimize().run()
     self.assertAllClose(0.395226, unregularized_loss.eval(),
                         rtol=3e-2, atol=3e-2)
     self.assertAllClose(0.657446, loss.eval(),
                         rtol=3e-2, atol=3e-2)
     predicted_labels = tf.cast(
         tf.greater_equal(prediction,
                          tf.ones_like(prediction) * 0.5), tf.float32)
     self.assertAllEqual([0, 1], predicted_labels.eval())
Ejemplo n.º 10
0
  def testImbalancedWithExampleWeights(self):
    # Setup test data with 1 positive, and 1 negative example.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 1),
    ]
    example_weights = [3.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      unregularized_loss = lr.unregularized_loss(examples)
      loss = lr.regularized_loss(examples)
      predictions = lr.predictions(examples)
      for _ in xrange(5):
        lr.minimize().run()
      self.assertAllClose(0.284860, unregularized_loss.eval(), rtol=0.08)
      self.assertAllClose(0.408044, loss.eval(), atol=0.012)
      predicted_labels = get_binary_predictions_for_logistic(predictions)
      self.assertAllEqual([0, 1], predicted_labels.eval())
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
Ejemplo n.º 11
0
 def testImbalanced(self):
     # Setup test data with 1 positive, and 3 negative examples.
     example_protos = [
         make_example_proto({"age": [0], "gender": [0]}, 0),
         make_example_proto({"age": [2], "gender": [0]}, 0),
         make_example_proto({"age": [3], "gender": [0]}, 0),
         make_example_proto({"age": [1], "gender": [1]}, 1),
     ]
     example_weights = [1.0, 1.0, 1.0, 1.0]
     with self._single_threaded_test_session():
         examples = make_example_dict(example_protos, example_weights)
         variables = make_variable_dict(3, 1)
         options = dict(
             symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss", prior=-1.09861
         )
         tf.initialize_all_variables().run()
         lr = SdcaModel(CONTAINER, examples, variables, options)
         unregularized_loss = lr.unregularized_loss(examples)
         loss = lr.regularized_loss(examples)
         prediction = lr.predictions(examples)
         lr.minimize().run()
         self.assertAllClose(0.331710, unregularized_loss.eval(), rtol=3e-2, atol=3e-2)
         self.assertAllClose(0.591295, loss.eval(), rtol=3e-2, atol=3e-2)
         predicted_labels = tf.cast(tf.greater_equal(prediction, tf.ones_like(prediction) * 0.5), tf.float32)
         self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
Ejemplo n.º 12
0
    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()
            # The high tolerance in unregularized_loss comparisons is due to the
            # fact that it's possible to trade off unregularized_loss vs.
            # regularization and still have a sum that is quite close to the
            # optimal regularized_loss value.  SDCA's duality gap only ensures that
            # the regularized_loss is within 0.01 of optimal.
            # 0.525457 is the optimal regularized_loss.
            # 0.411608 is the unregularized_loss at that optimum.
            self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
Ejemplo n.º 13
0
    def testImbalanced(self):
        # Setup test data with 1 positive, and 3 negative examples.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [2], "gender": [0]}, 0),
            make_example_proto({"age": [3], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0, 1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(3, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            self.assertAllClose(0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
            self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
Ejemplo n.º 14
0
    def testImbalancedWithExampleWeights(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [3.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
                self.assertAllClose(0.408044, loss.eval(), atol=0.012)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 1], predicted_labels.eval())
                self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
Ejemplo n.º 15
0
    def testDenseFeaturesPerfectlySeparable(self):
        with self._single_threaded_test_session():
            examples, variables = make_dense_examples_and_variables_dicts(
                dense_features_values=[[1.0, 1.0], [1.0, -1.0]], weights=[1.0, 1.0], labels=[1.0, 0.0]
            )
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="hinge_loss")
            model = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)

            train_op = model.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())

            # (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
            # the SVM's functional margin >=1), so the unregularized loss is ~0.0.
            # There is only loss due to l2-regularization. For these datapoints, it
            # turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
Ejemplo n.º 16
0
    def testInstancesOfOneClassOnly(self):
        # Setup test data with 1 positive (ignored), and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [0]}, 1),  # Shares gender with the instance above.
        ]
        example_weights = [1.0, 0.0]  # Second example "omitted" from training.
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
                self.assertAllClose(0.525457, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 0], predicted_labels.eval())
                self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
Ejemplo n.º 17
0
    def testSimpleNoL2(self):
        # Same as test above (so comments from above apply) but without an L2.
        # The algorithm should behave as if we have an L2 of 1 in optimization but
        # 0 in regularized_loss.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=0, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            # There is neither L1 nor L2 loss, so regularized and unregularized losses
            # should be exactly the same.
            self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
            self.assertAllClose(0.40244, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
Ejemplo n.º 18
0
    def testSomeUnweightedExamples(self):
        # Setup test data with 4 examples, but should produce the same
        # results as testSimple.
        example_protos = [
            # Will be used.
            make_example_proto({"age": [0], "gender": [0]}, 0),
            # Will be ignored.
            make_example_proto({"age": [1], "gender": [0]}, 0),
            # Will be used.
            make_example_proto({"age": [1], "gender": [1]}, 1),
            # Will be ignored.
            make_example_proto({"age": [1], "gender": [0]}, 1),
        ]
        example_weights = [1.0, 0.0, 1.0, 0.0]
        with self._single_threaded_test_session():
            # Only use examples 0 and 2
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
            self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
Ejemplo n.º 19
0
  def testInstancesOfOneClassOnly(self):
    # Setup test data with 1 positive (ignored), and 1 negative example.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [0]}, 1),  # Shares gender with the instance above.
    ]
    example_weights = [1.0, 0.0]  # Second example "omitted" from training.
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      unregularized_loss = lr.unregularized_loss(examples)
      loss = lr.regularized_loss(examples)
      predictions = lr.predictions(examples)
      for _ in xrange(5):
        lr.minimize().run()
      self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.12)
      self.assertAllClose(0.525457, loss.eval(), atol=0.01)
      predicted_labels = get_binary_predictions_for_logistic(predictions)
      self.assertAllEqual([0, 0], predicted_labels.eval())
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
Ejemplo n.º 20
0
 def testSomeUnweightedExamples(self):
     # Setup test data with 4 examples, but should produce the same
     # results as testSimple.
     example_protos = [
         # Will be used.
         make_example_proto({"age": [0], "gender": [0]}, 0),
         # Will be ignored.
         make_example_proto({"age": [1], "gender": [0]}, 0),
         # Will be used.
         make_example_proto({"age": [1], "gender": [1]}, 1),
         # Will be ignored.
         make_example_proto({"age": [1], "gender": [0]}, 1),
     ]
     example_weights = [1.0, 0.0, 1.0, 0.0]
     with self._single_threaded_test_session():
         # Only use examples 0 and 2
         examples = make_example_dict(example_protos, example_weights)
         variables = make_variable_dict(1, 1)
         options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="logistic_loss")
         tf.initialize_all_variables().run()
         lr = SdcaModel(CONTAINER, examples, variables, options)
         unregularized_loss = lr.unregularized_loss(examples)
         loss = lr.regularized_loss(examples)
         prediction = lr.predictions(examples)
         lr.minimize().run()
         self.assertAllClose(0.395226, unregularized_loss.eval(), rtol=3e-2, atol=3e-2)
         self.assertAllClose(0.657446, loss.eval(), rtol=3e-2, atol=3e-2)
         predicted_labels = tf.cast(tf.greater_equal(prediction, tf.ones_like(prediction) * 0.5), tf.float32)
         self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
Ejemplo n.º 21
0
    def testDenseFeaturesWeightedExamples(self):
        with self._single_threaded_test_session():
            examples = make_dense_examples_dict(
                dense_feature_values=[[1.0, 1.0], [0.5, -0.5]],
                weights=[3.0, 1.0],
                labels=[1.0, 0.0])
            variables = make_dense_variable_dict(2)
            options = dict(symmetric_l2_regularization=1.0,
                           symmetric_l1_regularization=0,
                           loss_type='hinge_loss')
            model = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)
            train_op = model.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            # Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
            # try to increase the margin from (1.0, 0.5). Due to regularization,
            # (1.0, -0.5) will be within the margin. For these points and example
            # weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
            # loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
            # correct, but the boundary will be much closer to the 2nd point than the
            # first one.
            self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
Ejemplo n.º 22
0
    def testDenseFeaturesSeparableWithinMargins(self):
        with self._single_threaded_test_session():
            examples = make_dense_examples_dict(
                dense_feature_values=[[1.0, 1.0], [0.5, -0.5]],
                weights=[1.0, 1.0],
                labels=[1.0, 0.0])
            variables = make_dense_variable_dict(2)
            options = dict(symmetric_l2_regularization=1.0,
                           symmetric_l1_regularization=0,
                           loss_type='hinge_loss')
            model = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)

            train_op = model.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            # (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
            # are within the margins so there is unregularized loss (1/2 per example).
            # For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
            # gives an L2 loss of ~0.25.
            self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
Ejemplo n.º 23
0
    def testDenseFeaturesPerfectlySeparable(self):
        with self._single_threaded_test_session():
            examples = make_dense_examples_dict(
                dense_feature_values=[[1.0, 1.0], [1.0, -1.0]],
                weights=[1.0, 1.0],
                labels=[1.0, 0.0])
            variables = make_dense_variable_dict(2)
            options = dict(symmetric_l2_regularization=1.0,
                           symmetric_l1_regularization=0,
                           loss_type='hinge_loss')
            model = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)

            train_op = model.minimize()
            for _ in xrange(_MAX_ITERATIONS):
                train_op.run()

            self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())

            # (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
            # the SVM's functional margin >=1), so the unregularized loss is ~0.0.
            # There is only loss due to l2-regularization. For these datapoints, it
            # turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
Ejemplo n.º 24
0
    def testDenseFeaturesSeparableWithinMargins(self):
        with self._single_threaded_test_session():
            examples, variables = make_dense_examples_and_variables_dicts(
                dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]], weights=[1.0, 1.0], labels=[1.0, 0.0]
            )
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="hinge_loss")
            model = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)

            train_op = model.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
            # are within the margins so there is unregularized loss (1/2 per example).
            # For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
            # gives an L2 loss of ~0.25.
            self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
Ejemplo n.º 25
0
    def testSomeUnweightedExamples(self):
        # Setup test data with 4 examples, but should produce the same
        # results as testSimple.
        example_protos = [
            # Will be used.
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            # Will be ignored.
            make_example_proto({
                'age': [1],
                'gender': [0]
            }, 0),
            # Will be used.
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
            # Will be ignored.
            make_example_proto({
                'age': [1],
                'gender': [0]
            }, 1),
        ]
        example_weights = [1.0, 0.0, 1.0, 0.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                # Only use examples 0 and 2
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(symmetric_l2_regularization=1,
                               symmetric_l1_regularization=0,
                               loss_type='logistic_loss')

                lr = SdcaModel(CONTAINER,
                               examples,
                               variables,
                               options,
                               num_table_shards=num_shards)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                self.assertAllClose(0.411608,
                                    unregularized_loss.eval(),
                                    atol=0.05)
                self.assertAllClose(0.525457, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
                self.assertAllClose(0.01,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=1e-2,
                                    atol=1e-2)
Ejemplo n.º 26
0
  def testDistributedSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({'age': [0],
                            'gender': [0]}, 0),
        make_example_proto({'age': [1],
                            'gender': [1]}, 1),
    ]
    example_weights = [1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      for num_loss_partitions in _NUM_LOSS_PARTITIONS:
        with self._single_threaded_test_session():
          examples = make_example_dict(example_protos, example_weights)
          variables = make_variable_dict(1, 1)
          options = dict(
              symmetric_l2_regularization=1,
              symmetric_l1_regularization=0,
              loss_type='logistic_loss',
              num_table_shards=num_shards,
              num_loss_partitions=num_loss_partitions)

          lr = SdcaModel(examples, variables, options)
          tf.global_variables_initializer().run()
          unregularized_loss = lr.unregularized_loss(examples)
          loss = lr.regularized_loss(examples)
          predictions = lr.predictions(examples)
          self.assertAllClose(0.693147, unregularized_loss.eval())
          self.assertAllClose(0.693147, loss.eval())

          train_op = lr.minimize()

          def Minimize():
            with self._single_threaded_test_session():
              for _ in range(_MAX_ITERATIONS):
                train_op.run()

          threads = []
          for _ in range(num_loss_partitions):
            threads.append(Thread(target=Minimize))
            threads[-1].start()

          for t in threads:
            t.join()
          lr.update_weights(train_op).run()

          # The high tolerance in unregularized_loss comparisons is due to the
          # fact that it's possible to trade off unregularized_loss vs.
          # regularization and still have a sum that is quite close to the
          # optimal regularized_loss value.  SDCA's duality gap only ensures
          # that the regularized_loss is within 0.01 of optimal.
          # 0.525457 is the optimal regularized_loss.
          # 0.411608 is the unregularized_loss at that optimum.
          self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
          self.assertAllClose(0.525457, loss.eval(), atol=0.01)
          predicted_labels = get_binary_predictions_for_logistic(predictions)
          self.assertAllEqual([0, 1], predicted_labels.eval())
          self.assertTrue(lr.approximate_duality_gap().eval() < 0.02)
Ejemplo n.º 27
0
  def testDistributedSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({'age': [0],
                            'gender': [0]}, 0),
        make_example_proto({'age': [1],
                            'gender': [1]}, 1),
    ]
    example_weights = [1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      for num_loss_partitions in _NUM_LOSS_PARTITIONS:
        with self._single_threaded_test_session():
          examples = make_example_dict(example_protos, example_weights)
          variables = make_variable_dict(1, 1)
          options = dict(
              symmetric_l2_regularization=1,
              symmetric_l1_regularization=0,
              loss_type='logistic_loss',
              num_table_shards=num_shards,
              num_loss_partitions=num_loss_partitions)

          lr = SdcaModel(examples, variables, options)
          tf.initialize_all_variables().run()
          unregularized_loss = lr.unregularized_loss(examples)
          loss = lr.regularized_loss(examples)
          predictions = lr.predictions(examples)
          self.assertAllClose(0.693147, unregularized_loss.eval())
          self.assertAllClose(0.693147, loss.eval())

          train_op = lr.minimize()

          def Minimize():
            with self._single_threaded_test_session():
              for _ in range(_MAX_ITERATIONS):
                train_op.run()

          threads = []
          for _ in range(num_loss_partitions):
            threads.append(Thread(target=Minimize))
            threads[-1].start()

          for t in threads:
            t.join()
          lr.update_weights(train_op).run()

          # The high tolerance in unregularized_loss comparisons is due to the
          # fact that it's possible to trade off unregularized_loss vs.
          # regularization and still have a sum that is quite close to the
          # optimal regularized_loss value.  SDCA's duality gap only ensures
          # that the regularized_loss is within 0.01 of optimal.
          # 0.525457 is the optimal regularized_loss.
          # 0.411608 is the unregularized_loss at that optimum.
          self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
          self.assertAllClose(0.525457, loss.eval(), atol=0.01)
          predicted_labels = get_binary_predictions_for_logistic(predictions)
          self.assertAllEqual([0, 1], predicted_labels.eval())
          self.assertTrue(lr.approximate_duality_gap().eval() < 0.02)
Ejemplo n.º 28
0
    def testImbalanced(self):
        # Setup test data with 1 positive, and 3 negative examples.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [2],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [3],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [1.0, 1.0, 1.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(3, 1)
                options = dict(symmetric_l2_regularization=1,
                               symmetric_l1_regularization=0,
                               loss_type='logistic_loss')

                lr = SdcaModel(CONTAINER,
                               examples,
                               variables,
                               options,
                               num_table_shards=num_shards)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                self.assertAllClose(0.226487 + 0.102902,
                                    unregularized_loss.eval(),
                                    atol=0.08)
                self.assertAllClose(0.328394 + 0.131364,
                                    loss.eval(),
                                    atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
                self.assertAllClose(0.01,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=1e-2,
                                    atol=1e-2)
Ejemplo n.º 29
0
    def testSimpleNoL2(self):
        # Same as test above (so comments from above apply) but without an L2.
        # The algorithm should behave as if we have an L2 of 1 in optimization but
        # 0 in regularized_loss.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [1.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(symmetric_l2_regularization=0,
                               symmetric_l1_regularization=0,
                               loss_type='logistic_loss')

                lr = SdcaModel(CONTAINER,
                               examples,
                               variables,
                               options,
                               num_table_shards=num_shards)
                tf.initialize_all_variables().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                self.assertAllClose(0.693147, unregularized_loss.eval())
                self.assertAllClose(0.693147, loss.eval())
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()

                # There is neither L1 nor L2 loss, so regularized and unregularized
                # losses should be exactly the same.
                self.assertAllClose(0.40244,
                                    unregularized_loss.eval(),
                                    atol=0.01)
                self.assertAllClose(0.40244, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllEqual([0, 1], predicted_labels.eval())
                self.assertAllClose(0.01,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=1e-2,
                                    atol=1e-2)
Ejemplo n.º 30
0
  def testImbalanced(self):
    # Setup test data with 1 positive, and 3 negative examples.
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [2],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [3],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0, 1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      with self._single_threaded_test_session():
        examples = make_example_dict(example_protos, example_weights)
        variables = make_variable_dict(3, 1)
        options = dict(
            symmetric_l2_regularization=1,
            symmetric_l1_regularization=0,
            num_table_shards=num_shards,
            loss_type='logistic_loss')

        lr = SdcaModel(examples, variables, options)
        variables_lib.global_variables_initializer().run()
        unregularized_loss = lr.unregularized_loss(examples)
        loss = lr.regularized_loss(examples)
        predictions = lr.predictions(examples)
        train_op = lr.minimize()
        for _ in range(_MAX_ITERATIONS):
          train_op.run()
        lr.update_weights(train_op).run()

        self.assertAllClose(
            0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
        self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
        predicted_labels = get_binary_predictions_for_logistic(predictions)
        self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
        self.assertAllClose(
            0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
Ejemplo n.º 31
0
  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='hinge_loss')
      model = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()

      # Before minimization, the weights default to zero. There is no loss due
      # to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
      predictions = model.predictions(examples)
      self.assertAllClose([0.0, 0.0], predictions.eval())
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      self.assertAllClose(1.0, unregularized_loss.eval())
      self.assertAllClose(1.0, regularized_loss.eval())

      # After minimization, the model separates perfectly the data points. There
      # are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
      # and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
      # wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
      # unregularized loss and 0.25 L2 loss.
      train_op = model.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      model.update_weights(train_op).run()

      binary_predictions = get_binary_predictions_for_hinge(predictions)
      self.assertAllEqual([-1.0, 1.0], predictions.eval())
      self.assertAllEqual([0, 1], binary_predictions.eval())
      self.assertAllClose(0.0, unregularized_loss.eval())
      self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
Ejemplo n.º 32
0
  def testPartitionedPrimals(self):
    # Setup test data
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      with self._single_threaded_test_session():
        examples = make_example_dict(example_protos, example_weights)
        variables = make_variable_dict(1, 1, partitioned=True)
        options = dict(
            symmetric_l2_regularization=1,
            symmetric_l1_regularization=0,
            num_table_shards=num_shards,
            loss_type='logistic_loss')

        lr = SdcaModel(examples, variables, options)
        variables_lib.global_variables_initializer().run()
        unregularized_loss = lr.unregularized_loss(examples)
        loss = lr.regularized_loss(examples)
        predictions = lr.predictions(examples)
        self.assertAllClose(0.693147, unregularized_loss.eval())
        self.assertAllClose(0.693147, loss.eval())
        train_op = lr.minimize()
        for _ in range(_MAX_ITERATIONS):
          train_op.run()
        lr.update_weights(train_op).run()
        # The high tolerance in unregularized_loss comparisons is due to the
        # fact that it's possible to trade off unregularized_loss vs.
        # regularization and still have a sum that is quite close to the
        # optimal regularized_loss value.  SDCA's duality gap only ensures that
        # the regularized_loss is within 0.01 of optimal.
        # 0.525457 is the optimal regularized_loss.
        # 0.411608 is the unregularized_loss at that optimum.
        self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
        self.assertAllClose(0.525457, loss.eval(), atol=0.01)
        predicted_labels = get_binary_predictions_for_logistic(predictions)
        self.assertAllEqual([0, 1], predicted_labels.eval())
        self.assertAllClose(
            0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
Ejemplo n.º 33
0
  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 2),
    ]
    example_weights = [100.0, 100.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='poisson_loss')
      model = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()

      # Before minimization, the weights default to zero. There is no loss due
      # to regularization, only unregularized loss which is 1 for each example.
      predictions = model.predictions(examples)
      self.assertAllClose([1.0, 1.0], predictions.eval())
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      approximate_duality_gap = model.approximate_duality_gap()
      self.assertAllClose(1.0, unregularized_loss.eval())
      self.assertAllClose(1.0, regularized_loss.eval())

      # There are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender
      # (say w3 and w4). The minimization leads to:
      # w1=w3=-1.96487, argmin of 100*(exp(2*w)-2*w*0)+w**2.
      # w2=w4=0.345708, argmin of 100*(exp(2*w)-2*w*2)+w**2.
      # This gives an unregularized loss of .3167 and .3366 with regularization.
      train_op = model.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      model.update_weights(train_op).run()

      self.assertAllClose([0.0196, 1.9965], predictions.eval(), atol=1e-4)
      self.assertAllClose(0.3167, unregularized_loss.eval(), atol=1e-4)
      self.assertAllClose(0.3366, regularized_loss.eval(), atol=1e-4)
      self.assertAllClose(0., approximate_duality_gap.eval(), atol=1e-6)
Ejemplo n.º 34
0
  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='hinge_loss')
      model = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()

      # Before minimization, the weights default to zero. There is no loss due
      # to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
      predictions = model.predictions(examples)
      self.assertAllClose([0.0, 0.0], predictions.eval())
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      self.assertAllClose(1.0, unregularized_loss.eval())
      self.assertAllClose(1.0, regularized_loss.eval())

      # After minimization, the model separates perfectly the data points. There
      # are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
      # and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
      # wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
      # unregularized loss and 0.25 L2 loss.
      train_op = model.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      model.update_weights(train_op).run()

      binary_predictions = get_binary_predictions_for_hinge(predictions)
      self.assertAllEqual([-1.0, 1.0], predictions.eval())
      self.assertAllEqual([0, 1], binary_predictions.eval())
      self.assertAllClose(0.0, unregularized_loss.eval())
      self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
Ejemplo n.º 35
0
 def testSomeUnweightedExamples(self):
     # Setup test data with 4 examples, but should produce the same
     # results as testSimple.
     example_protos = [
         # Will be used.
         make_example_proto({
             'age': [0],
             'gender': [0]
         }, 0),
         # Will be ignored.
         make_example_proto({
             'age': [1],
             'gender': [0]
         }, 0),
         # Will be used.
         make_example_proto({
             'age': [1],
             'gender': [1]
         }, 1),
         # Will be ignored.
         make_example_proto({
             'age': [1],
             'gender': [0]
         }, 1),
     ]
     example_weights = [1.0, 0.0, 1.0, 0.0]
     with self.test_session(use_gpu=False):
         # Only use examples 0 and 2
         examples = make_example_dict(example_protos, example_weights)
         variables = make_variable_dict(examples, 1, 1)
         options = dict(symmetric_l2_regularization=0.25,
                        symmetric_l1_regularization=0,
                        loss_type='logistic_loss')
         tf.initialize_all_variables().run()
         lr = SdcaModel(examples, variables, options)
         unregularized_loss = lr.unregularized_loss(examples)
         loss = lr.regularized_loss(examples)
         prediction = lr.predictions(examples)
         lr.minimize().run()
         self.assertAllClose(0.395226, unregularized_loss.eval())
         self.assertAllClose(0.526336, loss.eval())
         predicted_labels = tf.cast(
             tf.greater_equal(prediction,
                              tf.ones_like(prediction) * 0.5), tf.float32)
         self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
Ejemplo n.º 36
0
    def testSimpleLogistic(self):
        # Setup test data
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1,
                           symmetric_l1_regularization=0,
                           loss_type='logistic_loss')

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            for _ in xrange(5):
                lr.minimize().run()
            # The high tolerance in unregularized_loss comparisons is due to the
            # fact that it's possible to trade off unregularized_loss vs.
            # regularization and still have a sum that is quite close to the
            # optimal regularized_loss value.  SDCA's duality gap only ensures that
            # the regularized_loss is within 0.01 of optimal.
            # 0.525457 is the optimal regularized_loss.
            # 0.411608 is the unregularized_loss at that optimum.
            self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.11)
            self.assertAllClose(0.525457, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01,
                                lr.approximate_duality_gap().eval(),
                                rtol=1e-2,
                                atol=1e-2)
Ejemplo n.º 37
0
    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1.0,
                           symmetric_l1_regularization=0,
                           loss_type='smooth_hinge_loss')
            model = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()

            # Before minimization, the weights default to zero. There is no loss due
            # to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
            predictions = model.predictions(examples)
            self.assertAllClose([0.0, 0.0], predictions.eval())
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(1.0, unregularized_loss.eval())
            self.assertAllClose(1.0, regularized_loss.eval())

            # After minimization, the model separates perfectly the data points. There
            # are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
            # and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives
            # an unregularized hinge loss of 0.33 and a 0.11 L2 loss
            train_op = model.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            binary_predictions = get_binary_predictions_for_hinge(predictions)
            self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05)
            self.assertAllEqual([0, 1], binary_predictions.eval())
            self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02)
Ejemplo n.º 38
0
    def testImbalancedWithExampleWeights(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [3.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(symmetric_l2_regularization=1,
                               symmetric_l1_regularization=0,
                               num_table_shards=num_shards,
                               loss_type='logistic_loss')

                lr = SdcaModel(examples, variables, options)
                variables_lib.global_variables_initializer().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                lr.update_weights(train_op).run()

                self.assertAllClose(0.284860,
                                    unregularized_loss.eval(),
                                    atol=0.08)
                self.assertAllClose(0.408044, loss.eval(), atol=0.012)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllEqual([0, 1], predicted_labels.eval())
                self.assertAllClose(0.0,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=2e-2,
                                    atol=1e-2)
Ejemplo n.º 39
0
  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto({'age': [0],
                            'gender': [0]}, 0),
        make_example_proto({'age': [1],
                            'gender': [1]}, 1),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(
          symmetric_l2_regularization=1.0,
          symmetric_l1_regularization=0,
          loss_type='smooth_hinge_loss')
      model = SdcaModel(examples, variables, options)
      tf.initialize_all_variables().run()

      # Before minimization, the weights default to zero. There is no loss due
      # to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
      predictions = model.predictions(examples)
      self.assertAllClose([0.0, 0.0], predictions.eval())
      unregularized_loss = model.unregularized_loss(examples)
      regularized_loss = model.regularized_loss(examples)
      self.assertAllClose(1.0, unregularized_loss.eval())
      self.assertAllClose(1.0, regularized_loss.eval())

      # After minimization, the model separates perfectly the data points. There
      # are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
      # and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives
      # an unregularized hinge loss of 0.33 and a 0.11 L2 loss
      train_op = model.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()
      model.update_weights(train_op).run()

      binary_predictions = get_binary_predictions_for_hinge(predictions)
      self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05)
      self.assertAllEqual([0, 1], binary_predictions.eval())
      self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02)
      self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02)
Ejemplo n.º 40
0
    def testInstancesOfOneClassOnly(self):
        # Setup test data with 1 positive (ignored), and 1 negative example.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [0]
            }, 1),  # Shares gender with the instance above.
        ]
        example_weights = [1.0, 0.0]  # Second example "omitted" from training.
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(symmetric_l2_regularization=1,
                               symmetric_l1_regularization=0,
                               num_table_shards=num_shards,
                               loss_type='logistic_loss')

                lr = SdcaModel(examples, variables, options)
                variables_lib.global_variables_initializer().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                lr.update_weights(train_op).run()
                self.assertAllClose(0.411608,
                                    unregularized_loss.eval(),
                                    atol=0.05)
                self.assertAllClose(0.525457, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(
                    predictions)
                self.assertAllEqual([0, 0], predicted_labels.eval())
                self.assertAllClose(0.01,
                                    lr.approximate_duality_gap().eval(),
                                    rtol=1e-2,
                                    atol=1e-2)
 def testSomeUnweightedExamples(self):
   # Setup test data with 4 examples, but should produce the same
   # results as testSimple.
   example_protos = [
       # Will be used.
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       # Will be ignored.
       make_example_proto(
           {'age': [1],
            'gender': [0]}, 0),
       # Will be used.
       make_example_proto(
           {'age': [1],
            'gender': [1]}, 1),
       # Will be ignored.
       make_example_proto(
           {'age': [1],
            'gender': [0]}, 1),
   ]
   example_weights = [1.0, 0.0, 1.0, 0.0]
   with self.test_session(use_gpu=False):
     # Only use examples 0 and 2
     examples = make_example_dict(example_protos, example_weights)
     variables = make_variable_dict(examples, 1, 1)
     options = dict(symmetric_l2_regularization=0.25,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss')
     tf.initialize_all_variables().run()
     lr = SdcaModel(examples, variables, options)
     unregularized_loss = lr.unregularized_loss(examples)
     loss = lr.regularized_loss(examples)
     prediction = lr.predictions(examples)
     lr.minimize().run()
     self.assertAllClose(0.395226, unregularized_loss.eval())
     self.assertAllClose(0.526336, loss.eval())
     predicted_labels = tf.cast(
         tf.greater_equal(prediction,
                          tf.ones_like(prediction) * 0.5), tf.float32)
     self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
Ejemplo n.º 42
0
    def testSimpleLogisticNoL2(self):
        # Same as test above (so comments from above apply) but without an L2.
        # The algorithm should behave as if we have an L2 of 1 in optimization but
        # 0 in regularized_loss.
        example_protos = [
            make_example_proto({
                'age': [0],
                'gender': [0]
            }, 0),
            make_example_proto({
                'age': [1],
                'gender': [1]
            }, 1),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=0,
                           symmetric_l1_regularization=0,
                           loss_type='logistic_loss')

            lr = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()
            unregularized_loss = lr.unregularized_loss(examples)
            loss = lr.regularized_loss(examples)
            predictions = lr.predictions(examples)
            self.assertAllClose(0.693147, unregularized_loss.eval())
            self.assertAllClose(0.693147, loss.eval())
            for _ in xrange(5):
                lr.minimize().run()
            self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.11)
            self.assertAllClose(0.371705, loss.eval(), atol=0.01)
            predicted_labels = get_binary_predictions_for_logistic(predictions)
            self.assertAllEqual([0, 1], predicted_labels.eval())
            self.assertAllClose(0.01,
                                lr.approximate_duality_gap().eval(),
                                rtol=1e-2,
                                atol=1e-2)
Ejemplo n.º 43
0
 def testImbalanced(self):
   # Setup test data with 1 positive, and 3 negative examples.
   example_protos = [
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [2],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [3],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [1],
            'gender': [1]}, 1),
   ]
   example_weights = [1.0, 1.0, 1.0, 1.0]
   config = tf.ConfigProto(inter_op_parallelism_threads=1,
                           intra_op_parallelism_threads=1)
   with self.test_session(use_gpu=False, config=config):
     examples = make_example_dict(example_protos, example_weights)
     variables = make_variable_dict(3, 1)
     options = dict(symmetric_l2_regularization=0.25,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss',
                    prior=-1.09861)
     tf.initialize_all_variables().run()
     lr = SdcaModel(CONTAINER, examples, variables, options)
     unregularized_loss = lr.unregularized_loss(examples)
     loss = lr.regularized_loss(examples)
     prediction = lr.predictions(examples)
     lr.minimize().run()
     self.assertAllClose(0.331710, unregularized_loss.eval(),
                         rtol=3e-2, atol=3e-2)
     self.assertAllClose(0.591295, loss.eval(), rtol=3e-2, atol=3e-2)
     predicted_labels = tf.cast(
         tf.greater_equal(prediction,
                          tf.ones_like(prediction) * 0.5), tf.float32)
     self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
Ejemplo n.º 44
0
  def testSimpleNoL2(self):
    # Same as test above (so comments from above apply) but without an L2.
    # The algorithm should behave as if we have an L2 of 1 in optimization but
    # 0 in regularized_loss.
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, 0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 1),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=0,
                     symmetric_l1_regularization=0,
                     loss_type='logistic_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      unregularized_loss = lr.unregularized_loss(examples)
      loss = lr.regularized_loss(examples)
      predictions = lr.predictions(examples)
      self.assertAllClose(0.693147, unregularized_loss.eval())
      self.assertAllClose(0.693147, loss.eval())
      for _ in xrange(5):
        lr.minimize().run()
      self.assertAllClose(0.411608, unregularized_loss.eval(), rtol=0.11)
      self.assertAllClose(0.371705, loss.eval(), atol=0.01)
      predicted_labels = get_binary_predictions_for_logistic(predictions)
      self.assertAllEqual([0, 1], predicted_labels.eval())
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
Ejemplo n.º 45
0
 def testImbalanced(self):
   # Setup test data with 1 positive, and 3 negative examples.
   example_protos = [
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [2],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [3],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [1],
            'gender': [1]}, 1),
   ]
   example_weights = [1.0, 1.0, 1.0, 1.0]
   with self._single_threaded_test_session():
     examples = make_example_dict(example_protos, example_weights)
     variables = make_variable_dict(3, 1)
     options = dict(symmetric_l2_regularization=0.25,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss',
                    prior=-1.09861)
     tf.initialize_all_variables().run()
     lr = SdcaModel(CONTAINER, examples, variables, options)
     unregularized_loss = lr.unregularized_loss(examples)
     loss = lr.regularized_loss(examples)
     prediction = lr.predictions(examples)
     lr.minimize().run()
     self.assertAllClose(0.331710, unregularized_loss.eval(),
                         rtol=3e-2, atol=3e-2)
     self.assertAllClose(0.591295, loss.eval(), rtol=3e-2, atol=3e-2)
     predicted_labels = tf.cast(
         tf.greater_equal(prediction,
                          tf.ones_like(prediction) * 0.5), tf.float32)
     self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
Ejemplo n.º 46
0
    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [1.0, 1.0]
        with self.test_session(use_gpu=False):
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="hinge_loss")
            model = SdcaModel(CONTAINER, examples, variables, options)
            tf.initialize_all_variables().run()

            # Before minimization, the weights default to zero. There is no loss due
            # to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
            predictions = model.predictions(examples)
            self.assertAllClose([0.0, 0.0], predictions.eval())
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(1.0, unregularized_loss.eval())
            self.assertAllClose(1.0, regularized_loss.eval())

            # After minimization, the model separates perfectly the data points. There
            # are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
            # and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
            # wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
            # unregularized loss and 0.25 L2 loss.
            for _ in xrange(5):
                model.minimize().run()

            binary_predictions = get_binary_predictions_for_hinge(predictions)
            self.assertAllEqual([-1.0, 1.0], predictions.eval())
            self.assertAllEqual([0.0, 1.0], binary_predictions.eval())
            self.assertAllClose(0.0, unregularized_loss.eval())
            self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
Ejemplo n.º 47
0
  def testDistributedSimple(self):
    # Distributed SDCA may not converge if the workers update concurrently the
    # same example. In this test the examples are partitioned across workers.
    # The examples are the same for all workers, just the example_ids are
    # different.
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    examples = make_example_dict(example_protos, example_weights)
    example_ids = array_ops.placeholder(
        dtypes.string, shape=(len(example_weights),))
    examples['example_ids'] = example_ids
    variables = make_variable_dict(1, 1)
    for num_shards in _SHARD_NUMBERS:
      for num_loss_partitions in _NUM_LOSS_PARTITIONS:
        with self._single_threaded_test_session():
          options = dict(
              # Keep the same solution as for TestSimple: since the number of
              # examples is multplied by num_loss_partitions, multiply also
              # L2 by the same value.
              symmetric_l2_regularization=num_loss_partitions,
              symmetric_l1_regularization=0,
              loss_type='logistic_loss',
              num_table_shards=num_shards,
              num_loss_partitions=num_loss_partitions)

          lr = SdcaModel(examples, variables, options)
          variables_lib.global_variables_initializer().run()
          unregularized_loss = lr.unregularized_loss(examples)
          loss = lr.regularized_loss(examples)
          predictions = lr.predictions(examples)
          self.assertAllClose(0.693147, unregularized_loss.eval())
          self.assertAllClose(0.693147, loss.eval())

          train_op = lr.minimize()

          def minimize(worker_id):
            with self._single_threaded_test_session():
              feed_dict = {example_ids: [
                  str(i + worker_id*len(example_weights)) for i in range(
                      len(example_weights))]}
              for _ in range(_MAX_ITERATIONS):
                train_op.run(feed_dict=feed_dict)  # pylint: disable=cell-var-from-loop

          threads = []
          for worker_id in range(num_loss_partitions):
            threads.append(threading.Thread(target=minimize, args=(worker_id,)))
            threads[-1].start()

          for t in threads:
            t.join()
          lr.update_weights(train_op).run(feed_dict={
              example_ids: [str(i) for i in range(len(example_weights))]})

          # Test only the unregularized loss because the optimal value of the
          # regularized loss depends on num_loss_partitions.
          self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.02)
          predicted_labels = get_binary_predictions_for_logistic(predictions)
          self.assertAllEqual([0, 1], predicted_labels.eval())
          self.assertNear(0.0, lr.approximate_duality_gap().eval(), 0.02)