コード例 #1
0
    def testTrainFnNonChiefWithCentering(self):
        """Tests the train function running on worker with bias centering."""
        with self.test_session():
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
            learner_config.num_classes = 2
            learner_config.regularization.l1 = 0
            learner_config.regularization.l2 = 0
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            features = {}
            features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)

            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=False,
                num_ps_replicas=0,
                center_bias=True,
                ensemble_handle=ensemble_handle,
                examples_per_layer=1,
                learner_config=learner_config,
                features=features)

            predictions = array_ops.constant([[0.0], [1.0], [0.0], [2.0]],
                                             dtype=dtypes.float32)
            partition_ids = array_ops.zeros([4], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp
            }

            labels = array_ops.ones([4, 1], dtypes.float32)
            weights = array_ops.ones([4, 1], dtypes.float32)
            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                _squared_loss(labels, weights, predictions)),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # Regardless of how many times the train op is run, a non-chief worker
            # can only accumulate stats so the tree ensemble never changes.
            for _ in range(5):
                train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEquals(len(output.trees), 0)
            self.assertEquals(len(output.tree_weights), 0)
            self.assertEquals(stamp_token.eval(), 0)
コード例 #2
0
    def testWithExistingEnsembleAndShrinkage(self):
        with self.test_session():
            # Add shrinkage config.
            learning_rate = 0.0001
            tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            # Add 10 trees with some weights.
            for i in range(0, 5):
                tree = tree_ensemble.trees.add()
                _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
                tree_ensemble.tree_weights.append(i + 1)
                meta = tree_ensemble.tree_metadata.add()
                meta.num_tree_weight_updates = 1
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0,
                tree_ensemble_config=tree_ensemble.SerializeToString(),
                name="existing")

            # Create non-zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=np.array([4, 7], np.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=np.array(
                [0.2, 0.8], np.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()

            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [1, 2],
                        feature_gains, [0.5, 0.3], [[], []],
                        learning_rate=learning_rate)
            ]):
                output_ensemble.ParseFromString(
                    model_ops.tree_ensemble_serialize(tree_ensemble_handle)
                    [1].eval())

            # The weights of previous trees stayed the same, new tree (LAST) is added
            # with shrinkage weight.
            self.assertAllClose([1.0, 2.0, 3.0, 4.0, 5.0, learning_rate],
                                output_ensemble.tree_weights)

            # Check that all number of updates are equal to 1 (e,g, no old tree weight
            # got adjusted.
            for i in range(0, 6):
                self.assertEqual(
                    1,
                    output_ensemble.tree_metadata[i].num_tree_weight_updates)

            # Ensure feature importance was aggregated correctly.
            self.assertAllEqual([5, 9], feature_usage_counts.eval())
            self.assertArrayNear(
                [0.2 + 0.5 * learning_rate, 0.8 + 0.3 * learning_rate],
                feature_gains.eval(), 1e-6)
コード例 #3
0
  def testWithExistingEnsembleAndShrinkage(self):
    with self.test_session():
      # Add shrinkage config.
      learning_rate = 0.0001
      tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Add 10 trees with some weights.
      for i in range(0, 5):
        tree = tree_ensemble.trees.add()
        _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
        tree_ensemble.tree_weights.append(i + 1)
        meta = tree_ensemble.tree_metadata.add()
        meta.num_tree_weight_updates = 1
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble.SerializeToString(),
          name="existing")

      # Create non-zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=np.array([4, 7], np.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=np.array([0.2, 0.8], np.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()

      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [1, 2],
              feature_gains, [0.5, 0.3], [[], []],
              learning_rate=learning_rate)
      ]):
        output_ensemble.ParseFromString(
            model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())

      # The weights of previous trees stayed the same, new tree (LAST) is added
      # with shrinkage weight.
      self.assertAllClose([1.0, 2.0, 3.0, 4.0, 5.0, learning_rate],
                          output_ensemble.tree_weights)

      # Check that all number of updates are equal to 1 (e,g, no old tree weight
      # got adjusted.
      for i in range(0, 6):
        self.assertEqual(
            1, output_ensemble.tree_metadata[i].num_tree_weight_updates)

      # Ensure feature importance was aggregated correctly.
      self.assertAllEqual([5, 9], feature_usage_counts.eval())
      self.assertArrayNear(
          [0.2 + 0.5 * learning_rate, 0.8 + 0.3 * learning_rate],
          feature_gains.eval(), 1e-6)
コード例 #4
0
ファイル: gbdt_batch.py プロジェクト: zxypat/tensorflow
      def _refresh_local_ensemble_fn():
        # Serialize the model from parameter server after reading all inputs.
        with ops.control_dependencies(input_deps):
          (ensemble_stamp, serialized_model) = (
              model_ops.tree_ensemble_serialize(self._ensemble_handle))

        # Update local ensemble with the serialized model from parameter server.
        with ops.control_dependencies([create_op]):
          return model_ops.tree_ensemble_deserialize(
              local_ensemble_handle,
              stamp_token=ensemble_stamp,
              tree_ensemble_config=serialized_model), ensemble_stamp
コード例 #5
0
    def testWithExistingEnsemble(self):
        with self.test_session():
            # Create existing tree ensemble.
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0,
                tree_ensemble_config=self._tree_ensemble.SerializeToString(),
                name="existing")
            # Create non-zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=np.array([0, 4, 1], np.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=np.array(
                [0.0, 0.3, 0.05], np.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()
            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [1, 2, 0],
                        feature_gains, [0.02, 0.1, 0.0], [[], []],
                        learning_rate=1)
            ]):
                output_ensemble.ParseFromString(
                    model_ops.tree_ensemble_serialize(tree_ensemble_handle)
                    [1].eval())

            # Output.
            self.assertEqual(3, len(output_ensemble.trees))
            self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[2])

            self.assertAllEqual([1.0, 1.0, 1.0], output_ensemble.tree_weights)

            self.assertEqual(
                2, output_ensemble.tree_metadata[0].num_tree_weight_updates)
            self.assertEqual(
                3, output_ensemble.tree_metadata[1].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[2].num_tree_weight_updates)
            self.assertAllEqual([1, 6, 1], feature_usage_counts.eval())
            self.assertArrayNear([0.02, 0.4, 0.05], feature_gains.eval(), 1e-6)
コード例 #6
0
  def testWithExistingEnsemble(self):
    with self.test_session():
      # Create existing tree ensemble.
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=self._tree_ensemble.SerializeToString(),
          name="existing")
      # Create non-zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=np.array([0, 4, 1], np.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=np.array([0.0, 0.3, 0.05], np.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()
      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [1, 2, 0],
              feature_gains, [0.02, 0.1, 0.0], [[], []],
              learning_rate=1)
      ]):
        output_ensemble.ParseFromString(
            model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())

      # Output.
      self.assertEqual(3, len(output_ensemble.trees))
      self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[2])

      self.assertAllEqual([1.0, 1.0, 1.0], output_ensemble.tree_weights)

      self.assertEqual(2,
                       output_ensemble.tree_metadata[0].num_tree_weight_updates)
      self.assertEqual(3,
                       output_ensemble.tree_metadata[1].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[2].num_tree_weight_updates)
      self.assertAllEqual([1, 6, 1], feature_usage_counts.eval())
      self.assertArrayNear([0.02, 0.4, 0.05], feature_gains.eval(), 1e-6)
コード例 #7
0
    def testWithEmptyEnsembleAndShrinkage(self):
        with self.test_session():
            # Add shrinkage config.
            learning_rate = 0.0001
            tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0,
                tree_ensemble_config=tree_ensemble.SerializeToString(),
                name="existing")

            # Create zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=np.array([0, 0], np.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=np.array(
                [0.0, 0.0], np.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()

            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [1, 2],
                        feature_gains, [0.5, 0.3], [[], []],
                        learning_rate=learning_rate)
            ]):
                output_ensemble.ParseFromString(
                    model_ops.tree_ensemble_serialize(tree_ensemble_handle)
                    [1].eval())

            # New tree is added with shrinkage weight.
            self.assertAllClose([learning_rate], output_ensemble.tree_weights)
            self.assertEqual(
                1, output_ensemble.tree_metadata[0].num_tree_weight_updates)
            self.assertAllEqual([1, 2], feature_usage_counts.eval())
            self.assertArrayNear([0.5 * learning_rate, 0.3 * learning_rate],
                                 feature_gains.eval(), 1e-6)
コード例 #8
0
    def testWithEmptyEnsemble(self):
        with self.test_session():
            # Create an empty ensemble.
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="empty")

            # Create zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=array_ops.zeros([1], dtypes.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=array_ops.zeros(
                [1], dtypes.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()

            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [2],
                        feature_gains, [0.4], [[]],
                        learning_rate=1.0)
            ]):
                result = model_ops.tree_ensemble_serialize(
                    tree_ensemble_handle)[1]

            # Output.
            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            output_ensemble.ParseFromString(result.eval())
            self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[0])
            self.assertEqual(1, len(output_ensemble.trees))

            self.assertAllEqual([1.0], output_ensemble.tree_weights)

            self.assertEqual(
                1, output_ensemble.tree_metadata[0].num_tree_weight_updates)

            self.assertAllEqual([2], feature_usage_counts.eval())
            self.assertArrayNear([0.4], feature_gains.eval(), 1e-6)
コード例 #9
0
  def testWithEmptyEnsembleAndShrinkage(self):
    with self.test_session():
      # Add shrinkage config.
      learning_rate = 0.0001
      tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble.SerializeToString(),
          name="existing")

      # Create zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=np.array([0, 0], np.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=np.array([0.0, 0.0], np.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()

      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [1, 2],
              feature_gains, [0.5, 0.3], [[], []],
              learning_rate=learning_rate)
      ]):
        output_ensemble.ParseFromString(
            model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())

      # New tree is added with shrinkage weight.
      self.assertAllClose([learning_rate], output_ensemble.tree_weights)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[0].num_tree_weight_updates)
      self.assertAllEqual([1, 2], feature_usage_counts.eval())
      self.assertArrayNear([0.5 * learning_rate, 0.3 * learning_rate],
                           feature_gains.eval(), 1e-6)
コード例 #10
0
  def testWithEmptyEnsemble(self):
    with self.test_session():
      # Create an empty ensemble.
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="empty")

      # Create zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=array_ops.zeros([1], dtypes.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=array_ops.zeros([1], dtypes.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()

      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [2],
              feature_gains, [0.4], [[]],
              learning_rate=1.0)
      ]):
        result = model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1]

      # Output.
      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      output_ensemble.ParseFromString(result.eval())
      self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[0])
      self.assertEqual(1, len(output_ensemble.trees))

      self.assertAllEqual([1.0], output_ensemble.tree_weights)

      self.assertEqual(1,
                       output_ensemble.tree_metadata[0].num_tree_weight_updates)

      self.assertAllEqual([2], feature_usage_counts.eval())
      self.assertArrayNear([0.4], feature_gains.eval(), 1e-6)
コード例 #11
0
  def testSerialization(self):
    with ops.Graph().as_default() as graph:
      with self.session(graph):
        tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
        # Bias tree only for second class.
        tree1 = tree_ensemble_config.trees.add()
        _append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)

        tree_ensemble_config.tree_weights.append(1.0)

        # Depth 2 tree.
        tree2 = tree_ensemble_config.trees.add()
        tree_ensemble_config.tree_weights.append(1.0)
        _set_float_split(tree2.nodes.add()
                         .sparse_float_binary_split_default_right.split, 1, 4.0,
                         1, 2)
        _set_float_split(tree2.nodes.add().dense_float_binary_split, 0, 9.0, 3,
                         4)
        _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
        _append_to_leaf(tree2.nodes.add().leaf, 1, 1.2)
        _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)

        tree_ensemble_handle = model_ops.tree_ensemble_variable(
            stamp_token=7,
            tree_ensemble_config=tree_ensemble_config.SerializeToString(),
            name="saver_tree")
        stamp_token, serialized_config = model_ops.tree_ensemble_serialize(
            tree_ensemble_handle)
        resources.initialize_resources(resources.shared_resources()).run()
        self.assertEqual(stamp_token.eval(), 7)
        serialized_config = serialized_config.eval()

    with ops.Graph().as_default() as graph:
      with self.session(graph):
        tree_ensemble_handle2 = model_ops.tree_ensemble_variable(
            stamp_token=9,
            tree_ensemble_config=serialized_config,
            name="saver_tree2")
        resources.initialize_resources(resources.shared_resources()).run()

        # Prepare learner config.
        learner_config = learner_pb2.LearnerConfig()
        learner_config.num_classes = 3

        result, _ = prediction_ops.gradient_trees_prediction(
            tree_ensemble_handle2,
            self._seed, [self._dense_float_tensor], [
                self._sparse_float_indices1, self._sparse_float_indices2
            ], [self._sparse_float_values1, self._sparse_float_values2],
            [self._sparse_float_shape1,
             self._sparse_float_shape2], [self._sparse_int_indices1],
            [self._sparse_int_values1], [self._sparse_int_shape1],
            learner_config=learner_config.SerializeToString(),
            apply_dropout=False,
            apply_averaging=False,
            center_bias=False,
            reduce_dim=True)

        # Re-serialize tree.
        stamp_token2, serialized_config2 = model_ops.tree_ensemble_serialize(
            tree_ensemble_handle2)

        # The first example will get bias class 1 -0.2 from first tree and
        # leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
        # the second example will get the same bias class 1 -0.2 and leaf 3
        # payload of class 1 1.2 hence [0.0, 1.0].
        self.assertEqual(stamp_token2.eval(), 9)

        # Class 2 does have scores in the leaf => it gets score 0.
        self.assertEqual(serialized_config2.eval(), serialized_config)
        self.assertAllClose(result.eval(), [[0.5, -0.2], [0, 1.0]])
コード例 #12
0
ファイル: gbdt_batch_test.py プロジェクト: chdinh/tensorflow
  def testTrainFnMulticlassTreePerClass(self):
    """Tests the GBDT train for multiclass tree per class strategy."""
    with self.test_session() as sess:
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")

      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 1
      # Use full hessian multiclass strategy.
      learner_config.multi_class_strategy = (
          learner_pb2.LearnerConfig.TREE_PER_CLASS)
      learner_config.num_classes = 5
      learner_config.regularization.l1 = 0
      # To make matrix inversible.
      learner_config.regularization.l2 = 1e-5
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      features = {
          "dense_float": array_ops.constant(
              [[1.0], [1.5], [2.0]], dtypes.float32),
      }

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=False,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          features=features)

      batch_size = 3
      predictions = array_ops.constant(
          [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
           [0.0, 0.0, 0.0, 2.0, 1.2]],
          dtype=dtypes.float32)

      labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
      weights = array_ops.ones([batch_size, 1], dtypes.float32)

      partition_ids = array_ops.zeros([batch_size], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp,
          # This should result in a tree built for a class 2.
          "num_trees": 13,
      }

      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              losses.per_example_maxent_loss(
                  labels,
                  weights,
                  predictions,
                  num_classes=learner_config.num_classes)[0]),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect no splits to be chosen because the quantile
      # buckets will not be ready.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEqual(len(output.trees), 0)
      self.assertEqual(len(output.tree_weights), 0)
      self.assertEqual(stamp_token.eval(), 1)

      # Update the stamp to be able to run a second time.
      sess.run([ensemble_stamp.assign_add(1)])
      # On second run, expect a trivial split to be chosen to basically
      # predict the average.
      train_op.run()
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())

      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output.ParseFromString(serialized.eval())
      self.assertEqual(len(output.trees), 1)
      self.assertAllClose(output.tree_weights, [1])
      self.assertEqual(stamp_token.eval(), 2)

      # One node for a split, two children nodes.
      self.assertEqual(3, len(output.trees[0].nodes))

      # Leafs will have a sparse vector for class 3.
      self.assertEqual(1,
                       len(output.trees[0].nodes[1].leaf.sparse_vector.index))
      self.assertEqual(3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])
      self.assertAlmostEqual(
          -1.13134455681, output.trees[0].nodes[1].leaf.sparse_vector.value[0])

      self.assertEqual(1,
                       len(output.trees[0].nodes[2].leaf.sparse_vector.index))
      self.assertEqual(3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])
      self.assertAlmostEqual(
          0.893284678459, output.trees[0].nodes[2].leaf.sparse_vector.value[0])
コード例 #13
0
ファイル: gbdt_batch_test.py プロジェクト: chdinh/tensorflow
  def testTrainFnMulticlassDiagonalHessian(self):
    """Tests the GBDT train for multiclass diagonal hessian."""
    with self.test_session() as sess:
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")

      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 1
      # Use full hessian multiclass strategy.
      learner_config.multi_class_strategy = (
          learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
      learner_config.num_classes = 5
      learner_config.regularization.l1 = 0
      # To make matrix inversible.
      learner_config.regularization.l2 = 1e-5
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      batch_size = 3
      features = {}
      features["dense_float"] = array_ops.constant(
          [0.3, 1.5, 1.1], dtype=dtypes.float32)

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=False,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          features=features)

      predictions = array_ops.constant(
          [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
           [0.0, 0.0, 0.0, 0.0, 1.2]],
          dtype=dtypes.float32)

      labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
      weights = array_ops.ones([batch_size, 1], dtypes.float32)

      partition_ids = array_ops.zeros([batch_size], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp,
          "num_trees": 0,
      }

      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              losses.per_example_maxent_loss(
                  labels,
                  weights,
                  predictions,
                  num_classes=learner_config.num_classes)[0]),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect no splits to be chosen because the quantile
      # buckets will not be ready.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEqual(len(output.trees), 0)
      self.assertEqual(len(output.tree_weights), 0)
      self.assertEqual(stamp_token.eval(), 1)

      # Update the stamp to be able to run a second time.
      sess.run([ensemble_stamp.assign_add(1)])
      # On second run, expect a trivial split to be chosen to basically
      # predict the average.
      train_op.run()
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())

      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output.ParseFromString(serialized.eval())
      self.assertEqual(len(output.trees), 1)
      # We got 3 nodes: one parent and 2 leafs.
      self.assertEqual(len(output.trees[0].nodes), 3)
      self.assertAllClose(output.tree_weights, [1])
      self.assertEqual(stamp_token.eval(), 2)

      # Leafs should have a dense vector of size 5.
      expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023]
      expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269]
      self.assertArrayNear(expected_leaf_1,
                           output.trees[0].nodes[1].leaf.vector.value, 1e-3)
      self.assertArrayNear(expected_leaf_2,
                           output.trees[0].nodes[2].leaf.vector.value, 1e-3)
コード例 #14
0
ファイル: gbdt_batch_test.py プロジェクト: chdinh/tensorflow
  def testTrainFnNonChiefWithCentering(self):
    """Tests the train function running on worker with bias centering."""
    with self.test_session():
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
      learner_config.num_classes = 2
      learner_config.regularization.l1 = 0
      learner_config.regularization.l2 = 0
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      features = {}
      features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=False,
          num_ps_replicas=0,
          center_bias=True,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          features=features)

      predictions = array_ops.constant(
          [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
      partition_ids = array_ops.zeros([4], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp
      }

      labels = array_ops.ones([4, 1], dtypes.float32)
      weights = array_ops.ones([4, 1], dtypes.float32)
      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              _squared_loss(labels, weights, predictions)),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # Regardless of how many times the train op is run, a non-chief worker
      # can only accumulate stats so the tree ensemble never changes.
      for _ in range(5):
        train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEquals(len(output.trees), 0)
      self.assertEquals(len(output.tree_weights), 0)
      self.assertEquals(stamp_token.eval(), 0)
コード例 #15
0
ファイル: gbdt_batch_test.py プロジェクト: chdinh/tensorflow
  def testTrainFnChiefWithBiasCentering(self):
    """Tests the train function running on chief with bias centering."""
    with self.test_session():
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
      learner_config.num_classes = 2
      learner_config.regularization.l1 = 0
      learner_config.regularization.l2 = 0
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      features = {}
      features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=True,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          features=features)

      predictions = array_ops.constant(
          [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
      partition_ids = array_ops.zeros([4], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp,
          "num_trees": 12,
      }

      labels = array_ops.ones([4, 1], dtypes.float32)
      weights = array_ops.ones([4, 1], dtypes.float32)
      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              _squared_loss(labels, weights, predictions)),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect bias to be centered.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      expected_tree = """
          nodes {
            leaf {
              vector {
                value: 0.25
              }
            }
          }"""
      self.assertEquals(len(output.trees), 1)
      self.assertAllEqual(output.tree_weights, [1.0])
      self.assertProtoEquals(expected_tree, output.trees[0])
      self.assertEquals(stamp_token.eval(), 1)
コード例 #16
0
ファイル: gbdt_batch_test.py プロジェクト: chdinh/tensorflow
  def testTrainFnChiefScalingNumberOfExamples(self):
    """Tests the train function running on chief without bias centering."""
    with self.test_session() as sess:
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
      learner_config.num_classes = 2
      learner_config.regularization.l1 = 0
      learner_config.regularization.l2 = 0
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.min_node_weight = 0
      num_examples_fn = (
          lambda layer: math_ops.pow(math_ops.cast(2, dtypes.int64), layer) * 1)
      features = {}
      features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=False,
          ensemble_handle=ensemble_handle,
          examples_per_layer=num_examples_fn,
          learner_config=learner_config,
          features=features)

      predictions = array_ops.constant(
          [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
      partition_ids = array_ops.zeros([4], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions": predictions,
          "predictions_no_dropout": predictions,
          "partition_ids": partition_ids,
          "ensemble_stamp": ensemble_stamp,
          "num_trees": 12,
      }

      labels = array_ops.ones([4, 1], dtypes.float32)
      weights = array_ops.ones([4, 1], dtypes.float32)
      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              _squared_loss(labels, weights, predictions)),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect no splits to be chosen because the quantile
      # buckets will not be ready.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEquals(len(output.trees), 0)
      self.assertEquals(len(output.tree_weights), 0)
      self.assertEquals(stamp_token.eval(), 1)

      # Update the stamp to be able to run a second time.
      sess.run([ensemble_stamp.assign_add(1)])

      # On second run, expect a trivial split to be chosen to basically
      # predict the average.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEquals(len(output.trees), 1)
      self.assertAllClose(output.tree_weights, [0.1])
      self.assertEquals(stamp_token.eval(), 2)
      expected_tree = """
          nodes {
            dense_float_binary_split {
              threshold: 1.0
              left_id: 1
              right_id: 2
            }
            node_metadata {
              gain: 0
            }
          }
          nodes {
            leaf {
              vector {
                value: 0.25
              }
            }
          }
          nodes {
            leaf {
              vector {
                value: 0.0
              }
            }
          }"""
      self.assertProtoEquals(expected_tree, output.trees[0])
コード例 #17
0
    def testSerialization(self):
        with ops.Graph().as_default() as graph:
            with self.test_session(graph):
                tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig(
                )
                # Bias tree only for second class.
                tree1 = tree_ensemble_config.trees.add()
                _append_to_leaf(tree1.nodes.add().leaf, 1, -0.2)

                tree_ensemble_config.tree_weights.append(1.0)

                # Depth 2 tree.
                tree2 = tree_ensemble_config.trees.add()
                tree_ensemble_config.tree_weights.append(1.0)
                _set_float_split(
                    tree2.nodes.add().sparse_float_binary_split_default_right.
                    split, 1, 4.0, 1, 2)
                _set_float_split(tree2.nodes.add().dense_float_binary_split, 0,
                                 9.0, 3, 4)
                _append_to_leaf(tree2.nodes.add().leaf, 0, 0.5)
                _append_to_leaf(tree2.nodes.add().leaf, 1, 1.2)
                _append_to_leaf(tree2.nodes.add().leaf, 0, -0.9)

                tree_ensemble_handle = model_ops.tree_ensemble_variable(
                    stamp_token=7,
                    tree_ensemble_config=tree_ensemble_config.
                    SerializeToString(),
                    name="saver_tree")
                stamp_token, serialized_config = model_ops.tree_ensemble_serialize(
                    tree_ensemble_handle)
                resources.initialize_resources(
                    resources.shared_resources()).run()
                self.assertEqual(stamp_token.eval(), 7)
                serialized_config = serialized_config.eval()

        with ops.Graph().as_default() as graph:
            with self.test_session(graph):
                tree_ensemble_handle2 = model_ops.tree_ensemble_variable(
                    stamp_token=9,
                    tree_ensemble_config=serialized_config,
                    name="saver_tree2")
                resources.initialize_resources(
                    resources.shared_resources()).run()

                # Prepare learner config.
                learner_config = learner_pb2.LearnerConfig()
                learner_config.num_classes = 3

                result, _, _ = prediction_ops.gradient_trees_prediction(
                    tree_ensemble_handle2,
                    self._seed, [self._dense_float_tensor],
                    [self._sparse_float_indices1, self._sparse_float_indices2],
                    [self._sparse_float_values1, self._sparse_float_values2],
                    [self._sparse_float_shape1, self._sparse_float_shape2],
                    [self._sparse_int_indices1], [self._sparse_int_values1],
                    [self._sparse_int_shape1],
                    learner_config=learner_config.SerializeToString(),
                    apply_dropout=False,
                    apply_averaging=False,
                    center_bias=False,
                    reduce_dim=True)

                # Re-serialize tree.
                stamp_token2, serialized_config2 = model_ops.tree_ensemble_serialize(
                    tree_ensemble_handle2)

                # The first example will get bias class 1 -0.2 from first tree and
                # leaf 2 payload (sparse feature missing) of 0.5 hence [0.5, -0.2],
                # the second example will get the same bias class 1 -0.2 and leaf 3
                # payload of class 1 1.2 hence [0.0, 1.0].
                self.assertEqual(stamp_token2.eval(), 9)

                # Class 2 does have scores in the leaf => it gets score 0.
                self.assertEqual(serialized_config2.eval(), serialized_config)
                self.assertAllClose(result.eval(), [[0.5, -0.2], [0, 1.0]])
コード例 #18
0
    def testTrainFnChiefScalingNumberOfExamples(self):
        """Tests the train function running on chief without bias centering."""
        with self.test_session() as sess:
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
            learner_config.num_classes = 2
            learner_config.regularization.l1 = 0
            learner_config.regularization.l2 = 0
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            num_examples_fn = (lambda layer: math_ops.pow(
                math_ops.cast(2, dtypes.int64), layer) * 1)
            features = {}
            features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=True,
                num_ps_replicas=0,
                center_bias=False,
                ensemble_handle=ensemble_handle,
                examples_per_layer=num_examples_fn,
                learner_config=learner_config,
                features=features)

            predictions = array_ops.constant([[0.0], [1.0], [0.0], [2.0]],
                                             dtype=dtypes.float32)
            partition_ids = array_ops.zeros([4], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp,
                "num_trees": 12,
            }

            labels = array_ops.ones([4, 1], dtypes.float32)
            weights = array_ops.ones([4, 1], dtypes.float32)
            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                _squared_loss(labels, weights, predictions)),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # On first run, expect no splits to be chosen because the quantile
            # buckets will not be ready.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEquals(len(output.trees), 0)
            self.assertEquals(len(output.tree_weights), 0)
            self.assertEquals(stamp_token.eval(), 1)

            # Update the stamp to be able to run a second time.
            sess.run([ensemble_stamp.assign_add(1)])

            # On second run, expect a trivial split to be chosen to basically
            # predict the average.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEquals(len(output.trees), 1)
            self.assertAllClose(output.tree_weights, [0.1])
            self.assertEquals(stamp_token.eval(), 2)
            expected_tree = """
          nodes {
            dense_float_binary_split {
              threshold: 1.0
              left_id: 1
              right_id: 2
            }
            node_metadata {
              gain: 0
            }
          }
          nodes {
            leaf {
              vector {
                value: 0.25
              }
            }
          }
          nodes {
            leaf {
              vector {
                value: 0.0
              }
            }
          }"""
            self.assertProtoEquals(expected_tree, output.trees[0])
コード例 #19
0
  def testWithExistingEnsembleAndDropout(self):
    with self.test_session():
      tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      # Add 10 trees with some weights.
      for i in range(0, 10):
        tree = tree_ensemble.trees.add()
        _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
        tree_ensemble.tree_weights.append(i + 1)
        meta = tree_ensemble.tree_metadata.add()
        meta.num_tree_weight_updates = 1
      tree_ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0,
          tree_ensemble_config=tree_ensemble.SerializeToString(),
          name="existing")
      # Create non-zero feature importance.
      feature_usage_counts = variables.Variable(
          initial_value=np.array([2, 3], np.int64),
          name="feature_usage_counts",
          trainable=False)
      feature_gains = variables.Variable(
          initial_value=np.array([0.0, 0.3], np.float32),
          name="feature_gains",
          trainable=False)

      resources.initialize_resources(resources.shared_resources()).run()
      variables.initialize_all_variables().run()

      dropped = [1, 6, 8]
      dropped_original_weights = [2.0, 7.0, 9.0]

      output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
      with ops.control_dependencies([
          ensemble_optimizer_ops.add_trees_to_ensemble(
              tree_ensemble_handle,
              self._ensemble_to_add.SerializeToString(),
              feature_usage_counts, [1, 2],
              feature_gains, [0.5, 0.3], [dropped, dropped_original_weights],
              learning_rate=0.1)
      ]):
        output_ensemble.ParseFromString(
            model_ops.tree_ensemble_serialize(tree_ensemble_handle)[1].eval())

      # Output.
      self.assertEqual(11, len(output_ensemble.trees))
      self.assertProtoEquals(self._tree_to_add, output_ensemble.trees[10])
      self.assertAllClose(4.5, output_ensemble.tree_weights[10])

      self.assertAllClose([1., 1.5, 3., 4., 5., 6., 5.25, 8., 6.75, 10., 4.5],
                          output_ensemble.tree_weights)

      self.assertEqual(1,
                       output_ensemble.tree_metadata[0].num_tree_weight_updates)
      self.assertEqual(2,
                       output_ensemble.tree_metadata[1].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[2].num_tree_weight_updates)

      self.assertEqual(1,
                       output_ensemble.tree_metadata[3].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[4].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[5].num_tree_weight_updates)
      self.assertEqual(2,
                       output_ensemble.tree_metadata[6].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[7].num_tree_weight_updates)
      self.assertEqual(2,
                       output_ensemble.tree_metadata[8].num_tree_weight_updates)
      self.assertEqual(1,
                       output_ensemble.tree_metadata[9].num_tree_weight_updates)
      self.assertEqual(
          1, output_ensemble.tree_metadata[10].num_tree_weight_updates)
      self.assertAllEqual([3, 5], feature_usage_counts.eval())
      self.assertArrayNear([0.05, 0.33], feature_gains.eval(), 1e-6)
コード例 #20
0
  def testTrainFnChiefFeatureSelectionReachedLimitIncrementAttemptedLayer(self):
    """Tests the train function running on chief with feature selection."""
    with self.test_session() as sess:
      tree_ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
      tree = tree_ensemble_config.trees.add()

      _set_float_split(tree.nodes.add()
                       .sparse_float_binary_split_default_right.split, 2, 4.0,
                       1, 2)
      _append_to_leaf(tree.nodes.add().leaf, 0, 0.5)
      _append_to_leaf(tree.nodes.add().leaf, 1, 1.2)
      tree_ensemble_config.tree_weights.append(1.0)
      metadata = tree_ensemble_config.tree_metadata.add()
      metadata.is_finalized = False
      metadata.num_layers_grown = 1
      tree_ensemble_config = tree_ensemble_config.SerializeToString()
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config=tree_ensemble_config,
          name="tree_ensemble")
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
      learner_config.num_classes = 2
      learner_config.regularization.l1 = 0
      learner_config.regularization.l2 = 0
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.max_number_of_unique_feature_columns = 1
      learner_config.constraints.min_node_weight = 0
      features = {}
      # Both features will be disabled since the feature selection limit is
      # already reached.
      features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32)
      features["dense_float_1"] = array_ops.constant([0, 0, 1, 1],
                                                     dtypes.float32)

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=False,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          logits_dimension=1,
          features=features)

      predictions = array_ops.constant(
          [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
      partition_ids = array_ops.zeros([4], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions":
              predictions,
          "predictions_no_dropout":
              predictions,
          "partition_ids":
              partition_ids,
          "ensemble_stamp":
              ensemble_stamp,
          "num_trees":
              12,
          # We have somehow reached our limit 1. Both of the handlers will be
          # disabled.
          "num_used_handlers":
              array_ops.constant(1, dtype=dtypes.int64),
          "used_handlers_mask":
              array_ops.constant([False, False], dtype=dtypes.bool),
      }

      labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)
      weights = array_ops.ones([4, 1], dtypes.float32)
      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              _squared_loss(labels, weights, predictions)),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect no splits to be chosen because the quantile
      # buckets will not be ready.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEquals(len(output.trees), 1)
      self.assertEquals(output.growing_metadata.num_layers_attempted, 1)
      self.assertEquals(stamp_token.eval(), 1)

      # Update the stamp to be able to run a second time.
      sess.run([ensemble_stamp.assign_add(1)])

      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      # Make sure the trees are not modified, but the num_layers_attempted is
      # incremented so that eventually the training stops.
      self.assertEquals(len(output.trees), 1)
      self.assertEquals(len(output.trees[0].nodes), 3)

      self.assertEquals(output.growing_metadata.num_layers_attempted, 2)
コード例 #21
0
    def testTrainFnMulticlassTreePerClass(self):
        """Tests the GBDT train for multiclass tree per class strategy."""
        with self.test_session() as sess:
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")

            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 1
            # Use full hessian multiclass strategy.
            learner_config.multi_class_strategy = (
                learner_pb2.LearnerConfig.TREE_PER_CLASS)
            learner_config.num_classes = 5
            learner_config.regularization.l1 = 0
            # To make matrix inversible.
            learner_config.regularization.l2 = 1e-5
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            features = {
                "dense_float":
                array_ops.constant([[1.0], [1.5], [2.0]], dtypes.float32),
            }

            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=True,
                num_ps_replicas=0,
                center_bias=False,
                ensemble_handle=ensemble_handle,
                examples_per_layer=1,
                learner_config=learner_config,
                features=features)

            batch_size = 3
            predictions = array_ops.constant(
                [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
                 [0.0, 0.0, 0.0, 2.0, 1.2]],
                dtype=dtypes.float32)

            labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
            weights = array_ops.ones([batch_size, 1], dtypes.float32)

            partition_ids = array_ops.zeros([batch_size], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp,
                # This should result in a tree built for a class 2.
                "num_trees": 13,
            }

            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                losses.per_example_maxent_loss(
                    labels,
                    weights,
                    predictions,
                    num_classes=learner_config.num_classes)[0]),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # On first run, expect no splits to be chosen because the quantile
            # buckets will not be ready.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEqual(len(output.trees), 0)
            self.assertEqual(len(output.tree_weights), 0)
            self.assertEqual(stamp_token.eval(), 1)

            # Update the stamp to be able to run a second time.
            sess.run([ensemble_stamp.assign_add(1)])
            # On second run, expect a trivial split to be chosen to basically
            # predict the average.
            train_op.run()
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())

            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output.ParseFromString(serialized.eval())
            self.assertEqual(len(output.trees), 1)
            self.assertAllClose(output.tree_weights, [1])
            self.assertEqual(stamp_token.eval(), 2)

            # One node for a split, two children nodes.
            self.assertEqual(3, len(output.trees[0].nodes))

            # Leafs will have a sparse vector for class 3.
            self.assertEqual(
                1, len(output.trees[0].nodes[1].leaf.sparse_vector.index))
            self.assertEqual(
                3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])
            self.assertAlmostEqual(
                -1.13134455681,
                output.trees[0].nodes[1].leaf.sparse_vector.value[0])

            self.assertEqual(
                1, len(output.trees[0].nodes[2].leaf.sparse_vector.index))
            self.assertEqual(
                3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])
            self.assertAlmostEqual(
                0.893284678459,
                output.trees[0].nodes[2].leaf.sparse_vector.value[0])
コード例 #22
0
    def testTrainFnMulticlassDiagonalHessian(self):
        """Tests the GBDT train for multiclass diagonal hessian."""
        with self.test_session() as sess:
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")

            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 1
            # Use full hessian multiclass strategy.
            learner_config.multi_class_strategy = (
                learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
            learner_config.num_classes = 5
            learner_config.regularization.l1 = 0
            # To make matrix inversible.
            learner_config.regularization.l2 = 1e-5
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            batch_size = 3
            features = {}
            features["dense_float"] = array_ops.ones([batch_size, 1],
                                                     dtypes.float32)

            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=True,
                num_ps_replicas=0,
                center_bias=False,
                ensemble_handle=ensemble_handle,
                examples_per_layer=1,
                learner_config=learner_config,
                features=features)

            predictions = array_ops.constant(
                [[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
                 [0.0, 0.0, 0.0, 0.0, 1.2]],
                dtype=dtypes.float32)

            labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
            weights = array_ops.ones([batch_size, 1], dtypes.float32)

            partition_ids = array_ops.zeros([batch_size], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp,
                "num_trees": 0,
            }

            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                losses.per_example_maxent_loss(
                    labels,
                    weights,
                    predictions,
                    num_classes=learner_config.num_classes)[0]),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # On first run, expect no splits to be chosen because the quantile
            # buckets will not be ready.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            self.assertEqual(len(output.trees), 0)
            self.assertEqual(len(output.tree_weights), 0)
            self.assertEqual(stamp_token.eval(), 1)

            # Update the stamp to be able to run a second time.
            sess.run([ensemble_stamp.assign_add(1)])
            # On second run, expect a trivial split to be chosen to basically
            # predict the average.
            train_op.run()
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())

            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output.ParseFromString(serialized.eval())
            self.assertEqual(len(output.trees), 1)
            self.assertAllClose(output.tree_weights, [1])
            self.assertEqual(stamp_token.eval(), 2)

            # Leaf should have a dense vector of size 5.
            expected = [
                -1.26767396927, -1.13043296337, 4.58542203903, 1.81428349018,
                -2.43038392067
            ]
            for i in range(learner_config.num_classes):
                self.assertAlmostEqual(
                    expected[i], output.trees[0].nodes[1].leaf.vector.value[i])
コード例 #23
0
    def testTrainFnChiefWithBiasCentering(self):
        """Tests the train function running on chief with bias centering."""
        with self.test_session():
            ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
            learner_config = learner_pb2.LearnerConfig()
            learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
            learner_config.num_classes = 2
            learner_config.regularization.l1 = 0
            learner_config.regularization.l2 = 0
            learner_config.constraints.max_tree_depth = 1
            learner_config.constraints.min_node_weight = 0
            features = {}
            features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)

            gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
                is_chief=True,
                num_ps_replicas=0,
                center_bias=True,
                ensemble_handle=ensemble_handle,
                examples_per_layer=1,
                learner_config=learner_config,
                features=features)

            predictions = array_ops.constant([[0.0], [1.0], [0.0], [2.0]],
                                             dtype=dtypes.float32)
            partition_ids = array_ops.zeros([4], dtypes.int32)
            ensemble_stamp = variables.Variable(initial_value=0,
                                                name="ensemble_stamp",
                                                trainable=False,
                                                dtype=dtypes.int64)

            predictions_dict = {
                "predictions": predictions,
                "predictions_no_dropout": predictions,
                "partition_ids": partition_ids,
                "ensemble_stamp": ensemble_stamp,
                "num_trees": 12,
            }

            labels = array_ops.ones([4, 1], dtypes.float32)
            weights = array_ops.ones([4, 1], dtypes.float32)
            # Create train op.
            train_op = gbdt_model.train(loss=math_ops.reduce_mean(
                _squared_loss(labels, weights, predictions)),
                                        predictions_dict=predictions_dict,
                                        labels=labels)
            variables.global_variables_initializer().run()
            resources.initialize_resources(resources.shared_resources()).run()

            # On first run, expect bias to be centered.
            train_op.run()
            stamp_token, serialized = model_ops.tree_ensemble_serialize(
                ensemble_handle)
            output = tree_config_pb2.DecisionTreeEnsembleConfig()
            output.ParseFromString(serialized.eval())
            expected_tree = """
          nodes {
            leaf {
              vector {
                value: 0.25
              }
            }
          }"""
            self.assertEquals(len(output.trees), 1)
            self.assertAllEqual(output.tree_weights, [1.0])
            self.assertProtoEquals(expected_tree, output.trees[0])
            self.assertEquals(stamp_token.eval(), 1)
コード例 #24
0
  def testTrainFnChiefFeatureSelectionWithGoodSplits(self):
    """Tests the train function running on chief with feature selection."""
    with self.test_session() as sess:
      ensemble_handle = model_ops.tree_ensemble_variable(
          stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
      learner_config = learner_pb2.LearnerConfig()
      learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
      learner_config.num_classes = 2
      learner_config.regularization.l1 = 0
      learner_config.regularization.l2 = 0
      learner_config.constraints.max_tree_depth = 1
      learner_config.constraints.max_number_of_unique_feature_columns = 1
      learner_config.constraints.min_node_weight = 0
      features = {}
      features["dense_float_0"] = array_ops.ones([4, 1], dtypes.float32)
      # Feature 1 is predictive and is in our selected features so it will be
      # used even when we're at the limit.
      features["dense_float_1"] = array_ops.constant([0, 0, 1, 1],
                                                     dtypes.float32)

      gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
          is_chief=True,
          num_ps_replicas=0,
          center_bias=False,
          ensemble_handle=ensemble_handle,
          examples_per_layer=1,
          learner_config=learner_config,
          logits_dimension=1,
          features=features)

      predictions = array_ops.constant(
          [[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
      partition_ids = array_ops.zeros([4], dtypes.int32)
      ensemble_stamp = variables.Variable(
          initial_value=0,
          name="ensemble_stamp",
          trainable=False,
          dtype=dtypes.int64)

      predictions_dict = {
          "predictions":
              predictions,
          "predictions_no_dropout":
              predictions,
          "partition_ids":
              partition_ids,
          "ensemble_stamp":
              ensemble_stamp,
          "num_trees":
              12,
          "num_used_handlers":
              array_ops.constant(1, dtype=dtypes.int64),
          "used_handlers_mask":
              array_ops.constant([False, True], dtype=dtypes.bool),
      }

      labels = array_ops.constant([0, 0, 1, 1], dtypes.float32)
      weights = array_ops.ones([4, 1], dtypes.float32)
      # Create train op.
      train_op = gbdt_model.train(
          loss=math_ops.reduce_mean(
              _squared_loss(labels, weights, predictions)),
          predictions_dict=predictions_dict,
          labels=labels)
      variables.global_variables_initializer().run()
      resources.initialize_resources(resources.shared_resources()).run()

      # On first run, expect no splits to be chosen because the quantile
      # buckets will not be ready.
      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())
      self.assertEquals(len(output.trees), 0)
      self.assertEquals(len(output.tree_weights), 0)
      self.assertEquals(stamp_token.eval(), 1)

      # Update the stamp to be able to run a second time.
      sess.run([ensemble_stamp.assign_add(1)])

      train_op.run()
      stamp_token, serialized = model_ops.tree_ensemble_serialize(
          ensemble_handle)
      output = tree_config_pb2.DecisionTreeEnsembleConfig()
      output.ParseFromString(serialized.eval())

      self.assertEquals(len(output.trees), 1)
      self.assertAllClose(output.tree_weights, [0.1])
      self.assertEquals(stamp_token.eval(), 2)
      expected_tree = """
          nodes {
            dense_float_binary_split {
              feature_column: 1
              left_id: 1
              right_id: 2
            }
            node_metadata {
              gain: 0.5
            }
          }
          nodes {
            leaf {
              vector {
                value: 0.0
              }
            }
          }
          nodes {
            leaf {
              vector {
                value: -0.5
              }
            }
          }"""
      self.assertProtoEquals(expected_tree, output.trees[0])
コード例 #25
0
    def testWithExistingEnsembleAndDropout(self):
        with self.test_session():
            tree_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            # Add 10 trees with some weights.
            for i in range(0, 10):
                tree = tree_ensemble.trees.add()
                _append_to_leaf(tree.nodes.add().leaf, 0, -0.4)
                tree_ensemble.tree_weights.append(i + 1)
                meta = tree_ensemble.tree_metadata.add()
                meta.num_tree_weight_updates = 1
            tree_ensemble_handle = model_ops.tree_ensemble_variable(
                stamp_token=0,
                tree_ensemble_config=tree_ensemble.SerializeToString(),
                name="existing")
            # Create non-zero feature importance.
            feature_usage_counts = variables.Variable(
                initial_value=np.array([2, 3], np.int64),
                name="feature_usage_counts",
                trainable=False)
            feature_gains = variables.Variable(initial_value=np.array(
                [0.0, 0.3], np.float32),
                                               name="feature_gains",
                                               trainable=False)

            resources.initialize_resources(resources.shared_resources()).run()
            variables.initialize_all_variables().run()

            dropped = [1, 6, 8]
            dropped_original_weights = [2.0, 7.0, 9.0]

            output_ensemble = tree_config_pb2.DecisionTreeEnsembleConfig()
            with ops.control_dependencies([
                    ensemble_optimizer_ops.add_trees_to_ensemble(
                        tree_ensemble_handle,
                        self._ensemble_to_add.SerializeToString(),
                        feature_usage_counts, [1, 2],
                        feature_gains, [0.5, 0.3],
                        [dropped, dropped_original_weights],
                        learning_rate=0.1)
            ]):
                output_ensemble.ParseFromString(
                    model_ops.tree_ensemble_serialize(tree_ensemble_handle)
                    [1].eval())

            # Output.
            self.assertEqual(11, len(output_ensemble.trees))
            self.assertProtoEquals(self._tree_to_add,
                                   output_ensemble.trees[10])
            self.assertAllClose(4.5, output_ensemble.tree_weights[10])

            self.assertAllClose(
                [1., 1.5, 3., 4., 5., 6., 5.25, 8., 6.75, 10., 4.5],
                output_ensemble.tree_weights)

            self.assertEqual(
                1, output_ensemble.tree_metadata[0].num_tree_weight_updates)
            self.assertEqual(
                2, output_ensemble.tree_metadata[1].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[2].num_tree_weight_updates)

            self.assertEqual(
                1, output_ensemble.tree_metadata[3].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[4].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[5].num_tree_weight_updates)
            self.assertEqual(
                2, output_ensemble.tree_metadata[6].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[7].num_tree_weight_updates)
            self.assertEqual(
                2, output_ensemble.tree_metadata[8].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[9].num_tree_weight_updates)
            self.assertEqual(
                1, output_ensemble.tree_metadata[10].num_tree_weight_updates)
            self.assertAllEqual([3, 5], feature_usage_counts.eval())
            self.assertArrayNear([0.05, 0.33], feature_gains.eval(), 1e-6)