コード例 #1
0
    def testEmpty(self):
        with self.cached_session() as sess:
            gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
            hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
            partition_ids = [0, 0, 0, 1]
            indices = constant_op.constant_v1([],
                                              dtype=dtypes.int64,
                                              shape=[0, 2])
            values = constant_op.constant_v1([], dtype=dtypes.int64)

            gradient_shape = tensor_shape.TensorShape([])
            hessian_shape = tensor_shape.TensorShape([])
            class_id = -1

            split_handler = categorical_split_handler.EqualitySplitHandler(
                l1_regularization=0.1,
                l2_regularization=1,
                tree_complexity_regularization=0,
                min_node_weight=0,
                sparse_int_column=sparse_tensor.SparseTensor(
                    indices, values, [4, 1]),
                feature_column_group_id=0,
                gradient_shape=gradient_shape,
                hessian_shape=hessian_shape,
                multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
                init_stamp_token=0)
            resources.initialize_resources(resources.shared_resources()).run()

            empty_gradients, empty_hessians = get_empty_tensors(
                gradient_shape, hessian_shape)
            example_weights = array_ops.ones([4, 1], dtypes.float32)

            update_1 = split_handler.update_stats_sync(
                0,
                partition_ids,
                gradients,
                hessians,
                empty_gradients,
                empty_hessians,
                example_weights,
                is_active=array_ops.constant([True, True]))
            with ops.control_dependencies([update_1]):
                are_splits_ready, partitions, gains, splits = (
                    split_handler.make_splits(0, 1, class_id))
                are_splits_ready, partitions, gains, splits = (sess.run(
                    [are_splits_ready, partitions, gains, splits]))
        self.assertTrue(are_splits_ready)
        self.assertEqual(len(partitions), 0)
        self.assertEqual(len(gains), 0)
        self.assertEqual(len(splits), 0)
コード例 #2
0
    def train(self, loss, predictions_dict, labels):
        """Grows a new tree and adds it to the ensemble.

    Args:
      loss: A scalar tensor representing average loss of examples.
      predictions_dict: Dictionary of Rank 2 `Tensor` representing information
          about predictions per example.
      labels: Rank 2 `Tensor` representing labels per example.

    Returns:
      An op that adds a new tree to the ensemble.

    Raises:
      ValueError: if inputs are not valid.
    """
        # Get the worker device from input dependencies.
        input_deps = (self._dense_floats + self._sparse_float_indices +
                      self._sparse_int_indices)
        worker_device = input_deps[0].device

        # Get tensors relevant for training and form the loss.
        predictions = predictions_dict[PREDICTIONS]
        partition_ids = predictions_dict[PARTITION_IDS]
        ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
        gradients = gradients_impl.gradients(loss,
                                             predictions,
                                             name="Gradients",
                                             colocate_gradients_with_ops=False,
                                             gate_gradients=0,
                                             aggregation_method=None)[0]
        strategy = self._learner_config.multi_class_strategy

        class_id = -1
        # Handle different multiclass strategies.
        if strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS:
            # We build one vs rest trees.
            gradient_shape = tensor_shape.scalar()
            hessian_shape = tensor_shape.scalar()

            if self._logits_dimension == 1:
                # We have only 1 score, gradients is of shape [batch, 1].
                hessians = gradients_impl.gradients(
                    gradients,
                    predictions,
                    name="Hessian",
                    colocate_gradients_with_ops=False,
                    gate_gradients=0,
                    aggregation_method=None)[0]

                squeezed_gradients = array_ops.squeeze(gradients, axis=[1])
                squeezed_hessians = array_ops.squeeze(hessians, axis=[1])
            else:
                hessian_list = self._diagonal_hessian(gradients, predictions)
                # Assemble hessian list into a tensor.
                hessians = array_ops.stack(hessian_list, axis=1)

                # Choose the class for which the tree is built (one vs rest).
                class_id = math_ops.to_int32(
                    predictions_dict[NUM_TREES_ATTEMPTED] %
                    self._logits_dimension)

                # Use class id tensor to get the column with that index from gradients
                # and hessians.
                squeezed_gradients = array_ops.squeeze(
                    _get_column_by_index(gradients, class_id))
                squeezed_hessians = array_ops.squeeze(
                    _get_column_by_index(hessians, class_id))
        else:
            # Other multiclass strategies.
            gradient_shape = tensor_shape.TensorShape([self._logits_dimension])

            if strategy == learner_pb2.LearnerConfig.FULL_HESSIAN:
                hessian_shape = tensor_shape.TensorShape(
                    ([self._logits_dimension, self._logits_dimension]))
                hessian_list = self._full_hessian(gradients, predictions)
            else:
                # Diagonal hessian strategy.
                hessian_shape = tensor_shape.TensorShape(
                    ([self._logits_dimension]))
                hessian_list = self._diagonal_hessian(gradients, predictions)

            squeezed_gradients = gradients
            hessians = array_ops.stack(hessian_list, axis=1)
            squeezed_hessians = hessians

        # Get the weights for each example for quantiles calculation,
        weights = self._get_weights(hessian_shape, squeezed_hessians)

        regularization_config = self._learner_config.regularization
        min_node_weight = self._learner_config.constraints.min_node_weight
        # Create all handlers ensuring resources are evenly allocated across PS.
        fc_name_idx = 0
        handlers = []
        init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)
        with ops.device(self._get_replica_device_setter(worker_device)):
            # Create handlers for dense float columns
            for dense_float_column_idx in range(len(self._dense_floats)):
                fc_name = self._fc_names[fc_name_idx]
                handlers.append(
                    ordinal_split_handler.DenseSplitHandler(
                        l1_regularization=regularization_config.l1,
                        l2_regularization=regularization_config.l2,
                        tree_complexity_regularization=(
                            regularization_config.tree_complexity),
                        min_node_weight=min_node_weight,
                        feature_column_group_id=dense_float_column_idx,
                        epsilon=0.01,
                        num_quantiles=100,
                        dense_float_column=self.
                        _dense_floats[dense_float_column_idx],
                        name=fc_name,
                        gradient_shape=gradient_shape,
                        hessian_shape=hessian_shape,
                        multiclass_strategy=strategy,
                        init_stamp_token=init_stamp_token))
                fc_name_idx += 1

            # Create handlers for sparse float columns.
            for sparse_float_column_idx in range(
                    len(self._sparse_float_indices)):
                fc_name = self._fc_names[fc_name_idx]
                handlers.append(
                    ordinal_split_handler.SparseSplitHandler(
                        l1_regularization=regularization_config.l1,
                        l2_regularization=regularization_config.l2,
                        tree_complexity_regularization=(
                            regularization_config.tree_complexity),
                        min_node_weight=min_node_weight,
                        feature_column_group_id=sparse_float_column_idx,
                        epsilon=0.01,
                        num_quantiles=100,
                        sparse_float_column=sparse_tensor.SparseTensor(
                            self.
                            _sparse_float_indices[sparse_float_column_idx],
                            self._sparse_float_values[sparse_float_column_idx],
                            self._sparse_float_shapes[sparse_float_column_idx]
                        ),
                        name=fc_name,
                        gradient_shape=gradient_shape,
                        hessian_shape=hessian_shape,
                        multiclass_strategy=strategy,
                        init_stamp_token=init_stamp_token))
                fc_name_idx += 1

            # Create handlers for sparse int columns.
            for sparse_int_column_idx in range(len(self._sparse_int_indices)):
                fc_name = self._fc_names[fc_name_idx]
                handlers.append(
                    categorical_split_handler.EqualitySplitHandler(
                        l1_regularization=regularization_config.l1,
                        l2_regularization=regularization_config.l2,
                        tree_complexity_regularization=(
                            regularization_config.tree_complexity),
                        min_node_weight=min_node_weight,
                        feature_column_group_id=sparse_int_column_idx,
                        sparse_int_column=sparse_tensor.SparseTensor(
                            self._sparse_int_indices[sparse_int_column_idx],
                            self._sparse_int_values[sparse_int_column_idx],
                            self._sparse_int_shapes[sparse_int_column_idx]),
                        name=fc_name,
                        gradient_shape=gradient_shape,
                        hessian_shape=hessian_shape,
                        multiclass_strategy=strategy,
                        init_stamp_token=init_stamp_token))
                fc_name_idx += 1

            # Create steps accumulator.
            steps_accumulator = stats_accumulator_ops.StatsAccumulator(
                stamp_token=0,
                gradient_shape=tensor_shape.scalar(),
                hessian_shape=tensor_shape.scalar(),
                name="StepsAccumulator")

            # Create bias stats accumulator.
            bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(
                stamp_token=0,
                gradient_shape=gradient_shape,
                hessian_shape=hessian_shape,
                name="BiasAccumulator")

            # Create ensemble stats variables.
            num_layer_examples = variables.Variable(
                initial_value=array_ops.zeros([], dtypes.int64),
                name="num_layer_examples",
                trainable=False)
            num_layer_steps = variables.Variable(initial_value=array_ops.zeros(
                [], dtypes.int64),
                                                 name="num_layer_steps",
                                                 trainable=False)
            num_layers = variables.Variable(initial_value=array_ops.zeros(
                [], dtypes.int64),
                                            name="num_layers",
                                            trainable=False)
            active_tree = variables.Variable(initial_value=array_ops.zeros(
                [], dtypes.int64),
                                             name="active_tree",
                                             trainable=False)
            active_layer = variables.Variable(initial_value=array_ops.zeros(
                [], dtypes.int64),
                                              name="active_layer",
                                              trainable=False)

        # Create ensemble stats summaries.
        summary.scalar("layer_stats/num_examples", num_layer_examples)
        summary.scalar("layer_stats/num_steps", num_layer_steps)
        summary.scalar("ensemble_stats/active_tree", active_tree)
        summary.scalar("ensemble_stats/active_layer", active_layer)

        # Update bias stats.
        stats_update_ops = []
        continue_centering = variables.Variable(
            initial_value=self._center_bias,
            name="continue_centering",
            trainable=False)
        stats_update_ops.append(
            control_flow_ops.cond(
                continue_centering,
                self._make_update_bias_stats_fn(ensemble_stamp, predictions,
                                                gradients,
                                                bias_stats_accumulator),
                control_flow_ops.no_op))

        # Update handler stats.
        handler_reads = {}
        for handler in handlers:
            handler_reads[handler] = handler.scheduled_reads()

        handler_results = batch_ops_utils.run_handler_scheduled_ops(
            handler_reads, ensemble_stamp, worker_device)
        per_handler_updates = {}
        # Two values per handler. First one is if the handler is active for the
        # current layer. The second one is if the handler is going to be active
        # for the next layer.
        subsampling_type = self._learner_config.WhichOneof("feature_fraction")
        if subsampling_type == "feature_fraction_per_level":
            seed = predictions_dict[NUM_LAYERS_ATTEMPTED]
            active_handlers_current_layer = stateless.stateless_random_uniform(
                shape=[len(handlers)], seed=[seed, 1])
            active_handlers_next_layer = stateless.stateless_random_uniform(
                shape=[len(handlers)], seed=[seed + 1, 1])
            active_handlers = array_ops.stack(
                [active_handlers_current_layer, active_handlers_next_layer],
                axis=1)
            active_handlers = (active_handlers <
                               self._learner_config.feature_fraction_per_level)
        elif subsampling_type == "feature_fraction_per_tree":
            seed = predictions_dict[NUM_TREES_ATTEMPTED]
            active_handlers_current_layer = stateless.stateless_random_uniform(
                shape=[len(handlers)], seed=[seed, 2])
            active_handlers_current_layer = (
                active_handlers_current_layer <
                self._learner_config.feature_fraction_per_tree)
            active_handlers = array_ops.stack(
                active_handlers_current_layer,
                array_ops.ones([len(handlers)], dtype=dtypes.bool))
        else:
            active_handlers = array_ops.ones([len(handlers), 2],
                                             dtype=dtypes.bool)

        # Prepare empty gradients and hessians when handlers are not ready.
        empty_hess_shape = [1] + hessian_shape.as_list()
        empty_grad_shape = [1] + gradient_shape.as_list()

        empty_gradients = constant_op.constant([],
                                               dtype=dtypes.float32,
                                               shape=empty_grad_shape)
        empty_hessians = constant_op.constant([],
                                              dtype=dtypes.float32,
                                              shape=empty_hess_shape)

        for handler_idx in range(len(handlers)):
            handler = handlers[handler_idx]
            is_active = active_handlers[handler_idx]
            updates, scheduled_updates = handler.update_stats(
                ensemble_stamp, partition_ids, squeezed_gradients,
                squeezed_hessians, empty_gradients, empty_hessians, weights,
                is_active, handler_results[handler])
            stats_update_ops.append(updates)
            per_handler_updates[handler] = scheduled_updates

        update_results = batch_ops_utils.run_handler_scheduled_ops(
            per_handler_updates, ensemble_stamp, worker_device)
        for update in update_results.values():
            stats_update_ops += update
        # Accumulate a step after updating stats.
        batch_size = math_ops.cast(array_ops.shape(labels)[0], dtypes.float32)
        with ops.control_dependencies(stats_update_ops):
            add_step_op = steps_accumulator.add(ensemble_stamp, [0], [[0, 0]],
                                                [batch_size], [1.0])

        # Determine learning rate.
        learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof(
            "tuner")
        if learning_rate_tuner == "fixed" or learning_rate_tuner == "dropout":
            tuner = getattr(self._learner_config.learning_rate_tuner,
                            learning_rate_tuner)
            learning_rate = tuner.learning_rate
        else:
            # TODO (nponomareva, soroush) do the line search. id:498 gh:499
            raise ValueError("Line search learning rate is not yet supported.")

        # After adding the step, decide if further processing is needed.
        ensemble_update_ops = [add_step_op]
        with ops.control_dependencies([add_step_op]):
            if self._is_chief:
                dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]

                # Get accumulated steps and examples for the current layer.
                _, _, _, _, acc_examples, acc_steps = steps_accumulator.serialize(
                )
                acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)
                acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)
                ensemble_update_ops.append(
                    num_layer_examples.assign(acc_examples))
                ensemble_update_ops.append(num_layer_steps.assign(acc_steps))
                # Determine whether we need to update tree ensemble.
                examples_per_layer = self._examples_per_layer
                if callable(examples_per_layer):
                    examples_per_layer = examples_per_layer(active_layer)
                ensemble_update_ops.append(
                    control_flow_ops.cond(
                        acc_examples >= examples_per_layer,
                        self._make_update_ensemble_fn(
                            ensemble_stamp, steps_accumulator,
                            bias_stats_accumulator, continue_centering,
                            learning_rate, handlers, num_layers, active_tree,
                            active_layer, dropout_seed, class_id),
                        control_flow_ops.no_op))

        # Calculate the loss to be reported.
        # Note, the loss is calculated from the prediction considering dropouts, so
        # that the value might look staggering over steps when the dropout ratio is
        # high. eval_loss might be referred instead in the aspect of convergence.
        return control_flow_ops.group(*ensemble_update_ops)
コード例 #3
0
    def testGenerateFeatureSplitCandidates(self):
        with self.test_session() as sess:
            # The data looks like the following:
            # Example |  Gradients    | Partition | Feature ID     |
            # i0      |  (0.2, 0.12)  | 0         | 1,2            |
            # i1      |  (-0.5, 0.07) | 0         |                |
            # i2      |  (1.2, 0.2)   | 0         | 2              |
            # i3      |  (4.0, 0.13)  | 1         | 1              |
            gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
            hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
            partition_ids = [0, 0, 0, 1]
            indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
            values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)

            gradient_shape = tensor_shape.scalar()
            hessian_shape = tensor_shape.scalar()
            class_id = -1

            split_handler = categorical_split_handler.EqualitySplitHandler(
                l1_regularization=0.1,
                l2_regularization=1,
                tree_complexity_regularization=0,
                min_node_weight=0,
                sparse_int_column=sparse_tensor.SparseTensor(
                    indices, values, [4, 1]),
                feature_column_group_id=0,
                gradient_shape=gradient_shape,
                hessian_shape=hessian_shape,
                multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
                init_stamp_token=0)
            resources.initialize_resources(resources.shared_resources()).run()

            empty_gradients, empty_hessians = get_empty_tensors(
                gradient_shape, hessian_shape)
            example_weights = array_ops.ones([4, 1], dtypes.float32)

            update_1 = split_handler.update_stats_sync(
                0,
                partition_ids,
                gradients,
                hessians,
                empty_gradients,
                empty_hessians,
                example_weights,
                is_active=array_ops.constant([True, True]))
            update_2 = split_handler.update_stats_sync(
                0,
                partition_ids,
                gradients,
                hessians,
                empty_gradients,
                empty_hessians,
                example_weights,
                is_active=array_ops.constant([True, True]))

            with ops.control_dependencies([update_1, update_2]):
                are_splits_ready, partitions, gains, splits = (
                    split_handler.make_splits(0, 1, class_id))
                are_splits_ready, partitions, gains, splits = (sess.run(
                    [are_splits_ready, partitions, gains, splits]))
        self.assertTrue(are_splits_ready)
        self.assertAllEqual([0, 1], partitions)

        # Check the split on partition 0.
        # -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
        expected_left_weight = -0.9848484848484846

        # (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
        expected_left_gain = 1.2803030303030298

        # -(-0.5 + 0.1) / (0.07 + 1)
        expected_right_weight = 0.37383177570093457

        # (-0.5 + 0.1) ** 2 / (0.07 + 1)
        expected_right_gain = 0.14953271028037385

        # (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
        expected_bias_gain = 0.46043165467625885

        split_info = split_info_pb2.SplitInfo()
        split_info.ParseFromString(splits[0])
        left_child = split_info.left_child.vector
        right_child = split_info.right_child.vector
        split_node = split_info.split_node.categorical_id_binary_split

        self.assertEqual(0, split_node.feature_column)

        self.assertEqual(2, split_node.feature_id)

        self.assertAllClose(
            expected_left_gain + expected_right_gain - expected_bias_gain,
            gains[0], 0.00001)

        self.assertAllClose([expected_left_weight], left_child.value, 0.00001)

        self.assertAllClose([expected_right_weight], right_child.value,
                            0.00001)

        # Check the split on partition 1.
        # (-4 + 0.1) / (0.13 + 1)
        expected_left_weight = -3.4513274336283186
        # (-4 + 0.1) ** 2 / (0.13 + 1)
        expected_left_gain = 13.460176991150442
        expected_right_weight = 0
        expected_right_gain = 0
        # (-4 + 0.1) ** 2 / (0.13 + 1)
        expected_bias_gain = 13.460176991150442

        # Verify candidate for partition 1, there's only one active feature here
        # so zero gain is expected.
        split_info = split_info_pb2.SplitInfo()
        split_info.ParseFromString(splits[1])
        left_child = split_info.left_child.vector
        right_child = split_info.right_child.vector
        split_node = split_info.split_node.categorical_id_binary_split
        self.assertAllClose(0.0, gains[1], 0.00001)

        self.assertAllClose([expected_left_weight], left_child.value, 0.00001)

        self.assertAllClose([expected_right_weight], right_child.value,
                            0.00001)

        self.assertEqual(0, split_node.feature_column)

        self.assertEqual(1, split_node.feature_id)
コード例 #4
0
    def testGenerateFeatureSplitCandidatesMulticlass(self):
        with self.test_session() as sess:
            # Batch size is 4, 2 gradients per each instance.
            gradients = array_ops.constant(
                [[0.2, 0.1], [-0.5, 0.2], [1.2, 3.4], [4.0, -3.5]],
                shape=[4, 2])
            # 2x2 matrix for each instance
            hessian_0 = [[0.12, 0.02], [0.3, 0.11]]
            hessian_1 = [[0.07, -0.2], [-0.5, 0.2]]
            hessian_2 = [[0.2, -0.23], [-0.8, 0.9]]
            hessian_3 = [[0.13, -0.3], [-1.5, 2.2]]
            hessians = array_ops.constant(
                [hessian_0, hessian_1, hessian_2, hessian_3])

            partition_ids = [0, 0, 0, 1]
            indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
            values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)

            hessians = array_ops.constant(
                [hessian_0, hessian_1, hessian_2, hessian_3])
            partition_ids = array_ops.constant([0, 0, 0, 1],
                                               dtype=dtypes.int32)

            gradient_shape = tensor_shape.TensorShape([2])
            hessian_shape = tensor_shape.TensorShape([2, 2])
            class_id = -1

            split_handler = categorical_split_handler.EqualitySplitHandler(
                l1_regularization=0.1,
                l2_regularization=1,
                tree_complexity_regularization=0,
                min_node_weight=0,
                sparse_int_column=sparse_tensor.SparseTensor(
                    indices, values, [4, 1]),
                feature_column_group_id=0,
                gradient_shape=gradient_shape,
                hessian_shape=hessian_shape,
                multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN,
                init_stamp_token=0)
            resources.initialize_resources(resources.shared_resources()).run()

            empty_gradients, empty_hessians = get_empty_tensors(
                gradient_shape, hessian_shape)
            example_weights = array_ops.ones([4, 1], dtypes.float32)

            update_1 = split_handler.update_stats_sync(
                0,
                partition_ids,
                gradients,
                hessians,
                empty_gradients,
                empty_hessians,
                example_weights,
                is_active=array_ops.constant([True, True]))
            with ops.control_dependencies([update_1]):
                are_splits_ready, partitions, gains, splits = (
                    split_handler.make_splits(0, 1, class_id))
                are_splits_ready, partitions, gains, splits = (sess.run(
                    [are_splits_ready, partitions, gains, splits]))
        self.assertTrue(are_splits_ready)
        self.assertAllEqual([0, 1], partitions)

        split_info = split_info_pb2.SplitInfo()
        split_info.ParseFromString(splits[0])

        left_child = split_info.left_child.vector
        right_child = split_info.right_child.vector
        split_node = split_info.split_node.categorical_id_binary_split
        # Each leaf has 2 element vector.
        self.assertEqual(2, len(left_child.value))
        self.assertEqual(2, len(right_child.value))
        self.assertEqual(1, split_node.feature_id)

        split_info.ParseFromString(splits[1])
        left_child = split_info.left_child.vector
        right_child = split_info.right_child.vector
        split_node = split_info.split_node.categorical_id_binary_split
        self.assertEqual(2, len(left_child.value))
        self.assertEqual(0, len(right_child.value))
        self.assertEqual(1, split_node.feature_id)
コード例 #5
0
    def testGenerateFeatureSplitCandidatesSumReduction(self):
        with self.test_session() as sess:
            # The data looks like the following:
            # Example |  Gradients    | Partition | Feature ID     |
            # i0      |  (0.2, 0.12)  | 0         | 1,2            |
            # i1      |  (-0.5, 0.07) | 0         |                |
            # i2      |  (1.2, 0.2)   | 0         | 2              |
            # i3      |  (4.0, 0.13)  | 1         | 1              |
            gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
            hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
            partition_ids = [0, 0, 0, 1]
            indices = [[0, 0], [0, 1], [2, 0], [3, 0]]
            values = array_ops.constant([1, 2, 2, 1], dtype=dtypes.int64)

            gradient_shape = tensor_shape.scalar()
            hessian_shape = tensor_shape.scalar()
            class_id = -1

            split_handler = categorical_split_handler.EqualitySplitHandler(
                l1_regularization=0.1,
                l2_regularization=1,
                tree_complexity_regularization=0,
                min_node_weight=0,
                sparse_int_column=sparse_tensor.SparseTensor(
                    indices, values, [4, 1]),
                feature_column_group_id=0,
                gradient_shape=gradient_shape,
                hessian_shape=hessian_shape,
                multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
                init_stamp_token=0,
                loss_uses_sum_reduction=True)
            resources.initialize_resources(resources.shared_resources()).run()

            empty_gradients, empty_hessians = get_empty_tensors(
                gradient_shape, hessian_shape)
            example_weights = array_ops.ones([4, 1], dtypes.float32)

            update_1 = split_handler.update_stats_sync(
                0,
                partition_ids,
                gradients,
                hessians,
                empty_gradients,
                empty_hessians,
                example_weights,
                is_active=array_ops.constant([True, True]))
            update_2 = split_handler.update_stats_sync(
                0,
                partition_ids,
                gradients,
                hessians,
                empty_gradients,
                empty_hessians,
                example_weights,
                is_active=array_ops.constant([True, True]))
            with ops.control_dependencies([update_1, update_2]):
                are_splits_ready, partitions, gains, splits = (
                    split_handler.make_splits(0, 1, class_id))
                are_splits_ready, partitions, gains, splits = (sess.run(
                    [are_splits_ready, partitions, gains, splits]))
        self.assertTrue(are_splits_ready)
        self.assertAllEqual([0, 1], partitions)

        # Check the split on partition 0.
        # -(0.4 + 2.4 - 0.1) / (0.24 + 0.4 + 1)
        expected_left_weight = -1.6463414634146338

        # (0.4 + 2.4 - 0.1) ** 2 / (0.24 + 0.4 + 1)
        expected_left_gain = 4.445121951219511

        # -(-1 + 0.1) / (0.14 + 1)
        expected_right_weight = 0.789473684211

        # (-1 + 0.1) ** 2 / (0.14 + 1)
        expected_right_gain = 0.710526315789

        # (0.4 + -1 + 2.4 - 0.1) ** 2 / (0.24 + 0.14 + 0.4 + 1)
        expected_bias_gain = 1.6235955056179772

        split_info = split_info_pb2.SplitInfo()
        split_info.ParseFromString(splits[0])
        left_child = split_info.left_child.vector
        right_child = split_info.right_child.vector
        split_node = split_info.split_node.categorical_id_binary_split

        self.assertEqual(0, split_node.feature_column)

        self.assertEqual(2, split_node.feature_id)

        self.assertAllClose(
            expected_left_gain + expected_right_gain - expected_bias_gain,
            gains[0], 0.00001)

        self.assertAllClose([expected_left_weight], left_child.value, 0.00001)

        self.assertAllClose([expected_right_weight], right_child.value,
                            0.00001)

        # Check the split on partition 1.
        # (-8 + 0.1) / (0.26 + 1)
        expected_left_weight = -6.26984126984
        # (-8 + 0.1) ** 2 / (0.26 + 1)
        expected_left_gain = 49.5317460317
        expected_right_weight = 0
        expected_right_gain = 0
        # (-8 + 0.1) ** 2 / (0.26 + 1)
        expected_bias_gain = 49.5317460317

        # Verify candidate for partition 1, there's only one active feature here
        # so zero gain is expected.
        split_info = split_info_pb2.SplitInfo()
        split_info.ParseFromString(splits[1])
        left_child = split_info.left_child.vector
        right_child = split_info.right_child.vector
        split_node = split_info.split_node.categorical_id_binary_split
        self.assertAllClose(0.0, gains[1], 0.00001)

        self.assertAllClose([expected_left_weight], left_child.value, 0.00001)

        self.assertAllClose([expected_right_weight], right_child.value,
                            0.00001)

        self.assertEqual(0, split_node.feature_column)

        self.assertEqual(1, split_node.feature_id)
コード例 #6
0
    def testObliviousFeatureSplitGeneration(self):
        with self.cached_session() as sess:
            # The data looks like the following:
            # Example |  Gradients    | Partition | Feature ID     |
            # i0      |  (0.2, 0.12)  | 1         | 1              |
            # i1      |  (-0.5, 0.07) | 1         | 2              |
            # i2      |  (1.2, 0.2)   | 1         | 1              |
            # i3      |  (4.0, 0.13)  | 2         | 2              |
            gradients = array_ops.constant([0.2, -0.5, 1.2, 4.0])
            hessians = array_ops.constant([0.12, 0.07, 0.2, 0.13])
            partition_ids = [1, 1, 1, 2]
            indices = [[0, 0], [1, 0], [2, 0], [3, 0]]
            values = array_ops.constant([1, 2, 1, 2], dtype=dtypes.int64)

            gradient_shape = tensor_shape.TensorShape([])
            hessian_shape = tensor_shape.TensorShape([])
            class_id = -1

            split_handler = categorical_split_handler.EqualitySplitHandler(
                l1_regularization=0.1,
                l2_regularization=1,
                tree_complexity_regularization=0,
                min_node_weight=0,
                sparse_int_column=sparse_tensor.SparseTensor(
                    indices, values, [4, 1]),
                feature_column_group_id=0,
                gradient_shape=gradient_shape,
                hessian_shape=hessian_shape,
                multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS,
                init_stamp_token=0,
                weak_learner_type=learner_pb2.LearnerConfig.
                OBLIVIOUS_DECISION_TREE)
            resources.initialize_resources(resources.shared_resources()).run()

            empty_gradients, empty_hessians = get_empty_tensors(
                gradient_shape, hessian_shape)
            example_weights = array_ops.ones([4, 1], dtypes.float32)

            update_1 = split_handler.update_stats_sync(
                0,
                partition_ids,
                gradients,
                hessians,
                empty_gradients,
                empty_hessians,
                example_weights,
                is_active=array_ops.constant([True, True]))
            update_2 = split_handler.update_stats_sync(
                0,
                partition_ids,
                gradients,
                hessians,
                empty_gradients,
                empty_hessians,
                example_weights,
                is_active=array_ops.constant([True, True]))

            with ops.control_dependencies([update_1, update_2]):
                are_splits_ready, partitions, gains, splits = (
                    split_handler.make_splits(0, 1, class_id))
                are_splits_ready, partitions, gains, splits = (sess.run(
                    [are_splits_ready, partitions, gains, splits]))
        self.assertTrue(are_splits_ready)
        self.assertAllEqual([1, 2], partitions)

        # For partition 1.
        # -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
        expected_left_weight1 = -0.9848484848484846
        # (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
        expected_left_gain1 = 1.2803030303030298

        # -(-0.5 + 0.1) / (0.07 + 1)
        expected_right_weight1 = 0.37383177570093457

        # (-0.5 + 0.1) ** 2 / (0.07 + 1)
        expected_right_gain1 = 0.14953271028037385

        # (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
        expected_bias_gain1 = 0.46043165467625885

        split_info = split_info_pb2.ObliviousSplitInfo()
        split_info.ParseFromString(splits[0])
        # Children of partition 1.
        left_child = split_info.children[0].vector
        right_child = split_info.children[1].vector
        split_node = split_info.split_node.oblivious_categorical_id_binary_split

        self.assertEqual(0, split_node.feature_column)
        self.assertEqual(1, split_node.feature_id)
        self.assertAllClose([expected_left_weight1], left_child.value, 0.00001)
        self.assertAllClose([expected_right_weight1], right_child.value,
                            0.00001)

        # For partition2.
        expected_left_weight2 = 0
        expected_left_gain2 = 0
        # -(4 - 0.1) / (0.13 + 1)
        expected_right_weight2 = -3.4513274336283186
        # (4 - 0.1) ** 2 / (0.13 + 1)
        expected_right_gain2 = 13.460176991150442
        # (4 - 0.1) ** 2 / (0.13 + 1)
        expected_bias_gain2 = 13.460176991150442

        # Children of partition 2.
        left_child = split_info.children[2].vector
        right_child = split_info.children[3].vector
        self.assertAllClose([expected_left_weight2], left_child.value, 0.00001)
        self.assertAllClose([expected_right_weight2], right_child.value,
                            0.00001)

        self.assertAllClose(
            expected_left_gain1 + expected_right_gain1 - expected_bias_gain1 +
            expected_left_gain2 + expected_right_gain2 - expected_bias_gain2,
            gains[0], 0.00001)