def testMultidimensionalAcculumator(self):
        with self.cached_session() as sess:
            accumulator = stats_accumulator_ops.StatsAccumulator(
                stamp_token=0,
                gradient_shape=tensor_shape.TensorShape([]),
                hessian_shape=tensor_shape.TensorShape([]))
            with ops.control_dependencies([accumulator.initializer]):
                op1 = accumulator.add(stamp_token=0,
                                      partition_ids=[1, 2, 1],
                                      feature_ids=[[2, 2], [3, 0], [2, 2]],
                                      gradients=[0.1, 0.3, 0.8],
                                      hessians=[0.2, 0.4, -9])
                op2 = accumulator.add(0, [2, 1], [[3, 1], [2, 2]], [0.1, 1],
                                      [0.2, -1])

            with ops.control_dependencies([op1, op2]):
                num_updates, partition, bucket_ids, grads, hessians = accumulator.flush(
                    stamp_token=0, next_stamp_token=1)
                num_updates, partition, bucket_ids, grads, hessians = sess.run(
                    [num_updates, partition, bucket_ids, grads, hessians])

            result = _AccumulatorResultToDict(partition, bucket_ids, grads,
                                              hessians)
            self.assertEqual(num_updates, 2)
            self.assertEqual(len(result), 3)
            # Key is partition, bucket, dimension.
            self.assertAllClose(result[(1, 2, 2)], [1.9, -9.8])
            self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])
            self.assertAllClose(result[(2, 3, 1)], [0.1, 0.2])
    def testDropStaleUpdate(self):
        with self.cached_session() as sess:
            accumulator = stats_accumulator_ops.StatsAccumulator(
                stamp_token=0,
                gradient_shape=tensor_shape.TensorShape([]),
                hessian_shape=tensor_shape.TensorShape([]))
            with ops.control_dependencies([accumulator.initializer]):
                op1 = accumulator.add(stamp_token=0,
                                      partition_ids=[1, 2],
                                      feature_ids=[[2, 0], [3, 0]],
                                      gradients=[0.1, 0.3],
                                      hessians=[0.2, 0.4])
                op2 = accumulator.add(stamp_token=-1,
                                      partition_ids=[1],
                                      feature_ids=[[2, 0]],
                                      gradients=[0.1],
                                      hessians=[0.2])

            with ops.control_dependencies([op1, op2]):
                num_updates, partition, feature, grads, hessians = accumulator.flush(
                    stamp_token=0, next_stamp_token=1)
                num_updates, partition, feature, grads, hessians = sess.run(
                    [num_updates, partition, feature, grads, hessians])

            result = _AccumulatorResultToDict(partition, feature, grads,
                                              hessians)
            self.assertEqual(num_updates, 1)
            self.assertEqual(len(result), 2)
            self.assertAllClose(result[(1, 2, 0)], [0.1, 0.2])
            self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])
    def testDeserialize(self):
        with self.cached_session() as sess:
            accumulator = stats_accumulator_ops.StatsAccumulator(
                stamp_token=0,
                gradient_shape=tensor_shape.TensorShape([]),
                hessian_shape=tensor_shape.TensorShape([]))
            with ops.control_dependencies([accumulator.initializer]):
                # These will be deleted due to deserialize call.
                op1 = accumulator.add(stamp_token=0,
                                      partition_ids=[1, 2],
                                      feature_ids=[[2, 0], [3, 1]],
                                      gradients=[0.1, 0.3],
                                      hessians=[0.2, 0.4])

            with ops.control_dependencies([op1]):
                deserialize = (accumulator.saveable.deserialize(
                    stamp_token=2,
                    num_updates=3,
                    partition_ids=[3, 4],
                    feature_ids=[[5, 0], [6, 2]],
                    gradients=[0.4, 0.5],
                    hessians=[0.6, 0.7]))
            with ops.control_dependencies([deserialize]):
                num_updates, partition, feature, grads, hessians = accumulator.flush(
                    stamp_token=2, next_stamp_token=3)
                num_updates, partition, feature, grads, hessians = sess.run(
                    [num_updates, partition, feature, grads, hessians])

            result = _AccumulatorResultToDict(partition, feature, grads,
                                              hessians)
            self.assertEqual(num_updates, 3)
            self.assertEqual(len(result), 2)
            self.assertAllClose(result[(3, 5, 0)], [0.4, 0.6])
            self.assertAllClose(result[(4, 6, 2)], [0.5, 0.7])
    def testMakeSummary(self):
        with self.cached_session() as sess:
            accumulator = stats_accumulator_ops.StatsAccumulator(
                stamp_token=0,
                gradient_shape=tensor_shape.TensorShape([2]),
                hessian_shape=tensor_shape.TensorShape([2, 2]))
            partition, feature, grads, hessians = accumulator._make_summary(
                partition_ids=[1, 2, 1],
                feature_ids=[[2, 0], [3, 2], [2, 0]],
                # Two values for gradients,
                gradients=[[0.1, 0.1], [0.2, 0.2], [0.10, 0.11]],
                # A 2x2 matrix for each hessian.
                hessians=[[[0.01, 0.02], [0.03, 0.04]],
                          [[0.05, 0.06], [0.07, 0.08]],
                          [[0.011, 0.022], [0.033, 0.044]]])
            partition, feature, grads, hessians = sess.run(
                [partition, feature, grads, hessians])

            result = _AccumulatorResultToDict(partition, feature, grads,
                                              hessians)
            self.assertEqual(len(result), 2)
            self.assertAllClose(result[(1, 2, 0)][0], [0.20, 0.21])
            self.assertAllClose(result[(1, 2, 0)][1],
                                [[0.021, 0.042], [0.063, 0.084]])
            self.assertAllClose(result[(2, 3, 2)][0], [0.2, 0.2])
            self.assertAllClose(result[(2, 3, 2)][1],
                                [[0.05, 0.06], [0.07, 0.08]])
コード例 #5
0
    def __init__(self,
                 l1_regularization,
                 l2_regularization,
                 tree_complexity_regularization,
                 min_node_weight,
                 feature_column_group_id,
                 epsilon,
                 num_quantiles,
                 gradient_shape,
                 hessian_shape,
                 multiclass_strategy,
                 init_stamp_token=0,
                 loss_uses_sum_reduction=False,
                 name=None):
        """Initialize the internal state for this split handler.

    Args:
      l1_regularization: L1 regularization applied for this split handler.
      l2_regularization: L2 regularization applied for this split handler.
      tree_complexity_regularization: Tree complexity regularization applied
          for this split handler.
      min_node_weight: Minimum sum of weights of examples in each partition to
          be considered for splitting.
      feature_column_group_id: Feature column group index.
      epsilon: A float, the error bound for quantile computation.
      num_quantiles: An int, the number of buckets to create from the histogram.
      gradient_shape: A TensorShape, containing shape of gradients.
      hessian_shape: A TensorShape, containing shape of hessians.
      multiclass_strategy: Strategy describing how to treat multiclass problems.
      init_stamp_token: A tensor containing an scalar for initial stamp of the
         stamped objects.
      loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
          SUM or MEAN reduction was used for the loss.
      name: An optional handler name.
    """
        super(InequalitySplitHandler, self).__init__(
            name=name,
            l1_regularization=l1_regularization,
            l2_regularization=l2_regularization,
            tree_complexity_regularization=tree_complexity_regularization,
            min_node_weight=min_node_weight,
            feature_column_group_id=feature_column_group_id,
            gradient_shape=gradient_shape,
            hessian_shape=hessian_shape,
            multiclass_strategy=multiclass_strategy,
            loss_uses_sum_reduction=loss_uses_sum_reduction)
        self._stats_accumulator = stats_accumulator_ops.StatsAccumulator(
            init_stamp_token,
            gradient_shape,
            hessian_shape,
            name="StatsAccumulator/{}".format(self._name))
        # Allocate both stats accumulator and quantile accumulator on the same
        # device so that we can build splits with fewer RPCs.
        with ops.colocate_with(self._stats_accumulator.resource_handle):
            self._quantile_accumulator = quantile_ops.QuantileAccumulator(
                init_stamp_token,
                epsilon=epsilon,
                num_quantiles=num_quantiles,
                name="QuantileAccumulator/{}".format(self._name))
    def __init__(
            self,
            sparse_int_column,
            l1_regularization,
            l2_regularization,
            tree_complexity_regularization,
            min_node_weight,
            feature_column_group_id,
            gradient_shape,
            hessian_shape,
            multiclass_strategy,
            init_stamp_token=0,
            loss_uses_sum_reduction=False,
            weak_learner_type=learner_pb2.LearnerConfig.NORMAL_DECISION_TREE,
            name=None):
        """Initialize the internal state for this split handler.

    Args:
      sparse_int_column: A `SparseTensor` column with int64 values associated
        with this handler.
      l1_regularization: L1 regularization applied for this split handler.
      l2_regularization: L2 regularization applied for this split handler.
      tree_complexity_regularization: Tree complexity regularization applied
          for this split handler.
      min_node_weight: Minimum sum of weights of examples in each partition to
          be considered for splitting.
      feature_column_group_id: Feature column group index.
      gradient_shape: A TensorShape, containing shape of gradients.
      hessian_shape: A TensorShape, containing shape of hessians.
      multiclass_strategy: Strategy describing how to treat multiclass problems.
      init_stamp_token: A tensor containing an scalar for initial stamp of the
         stamped objects.
      loss_uses_sum_reduction: A scalar boolean tensor that specifies whether
          SUM or MEAN reduction was used for the loss.
      weak_learner_type: Specifies the type of weak learner to use.
      name: An optional handler name.
    """
        super(EqualitySplitHandler, self).__init__(
            l1_regularization=l1_regularization,
            l2_regularization=l2_regularization,
            tree_complexity_regularization=tree_complexity_regularization,
            min_node_weight=min_node_weight,
            feature_column_group_id=feature_column_group_id,
            gradient_shape=gradient_shape,
            hessian_shape=hessian_shape,
            multiclass_strategy=multiclass_strategy,
            loss_uses_sum_reduction=loss_uses_sum_reduction,
            name=name)
        self._stats_accumulator = stats_accumulator_ops.StatsAccumulator(
            init_stamp_token,
            gradient_shape,
            hessian_shape,
            name="StatsAccumulator/{}".format(self._name))
        self._sparse_int_column = sparse_int_column
        self._weak_learner_type = weak_learner_type
    def testSerialize(self):
        with self.cached_session() as sess:
            accumulator = stats_accumulator_ops.StatsAccumulator(
                stamp_token=0,
                gradient_shape=tensor_shape.TensorShape([2]),
                hessian_shape=tensor_shape.TensorShape([2, 2]))
            with ops.control_dependencies([accumulator.initializer]):
                op1 = accumulator.add(
                    stamp_token=0,
                    partition_ids=[1, 2],
                    feature_ids=[[2, 0], [3, 0]],
                    # Two values for gradients,
                    gradients=[[0.1, 0.1], [0.2, 0.2]],
                    # A 2x2 matrix for each hessian.
                    hessians=[[[0.01, 0.02], [0.03, 0.04]],
                              [[0.05, 0.06], [0.07, 0.08]]])

            with ops.control_dependencies([op1]):
                (stamp_token, num_updates_1, partition_1, feature_1, grads_1,
                 hessians_1) = accumulator.saveable.serialize()
            # Make sure that the accumulator hasn't changed during serialization.
            with ops.control_dependencies([stamp_token]):
                num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (
                    accumulator.flush(stamp_token=0, next_stamp_token=1))
                (stamp_token, num_updates_1, partition_1, feature_1, grads_1,
                 hessians_1, num_updates_2, partition_2, feature_2, grads_2,
                 hessians_2) = sess.run([
                     stamp_token, num_updates_1, partition_1, feature_1,
                     grads_1, hessians_1, num_updates_2, partition_2,
                     feature_2, grads_2, hessians_2
                 ])

            result_1 = _AccumulatorResultToDict(partition_1, feature_1,
                                                grads_1, hessians_1)
            result_2 = _AccumulatorResultToDict(partition_2, feature_2,
                                                grads_2, hessians_2)

            self.assertEqual(num_updates_1, 1)
            self.assertEqual(num_updates_2, 1)
            self.assertEqual(len(result_1), 2)
            self.assertAllClose(result_1[(1, 2, 0)][0], [0.1, 0.1])
            self.assertAllClose(result_1[(1, 2, 0)][1],
                                [[0.01, 0.02], [0.03, 0.04]])
            self.assertAllClose(result_1[(2, 3, 0)][0], [0.2, 0.2])
            self.assertAllClose(result_1[(2, 3, 0)][1],
                                [[0.05, 0.06], [0.07, 0.08]])

            self.assertAllEqual(result_1[1, 2, 0][0], result_2[1, 2, 0][0])
            self.assertAllEqual(result_1[1, 2, 0][1], result_2[1, 2, 0][1])
            self.assertAllEqual(result_1[2, 3, 0][0], result_2[2, 3, 0][0])
            self.assertAllEqual(result_1[2, 3, 0][1], result_2[2, 3, 0][1])
    def testDeserialize(self):
        with self.cached_session() as sess:
            accumulator = stats_accumulator_ops.StatsAccumulator(
                stamp_token=0,
                gradient_shape=tensor_shape.TensorShape([2]),
                hessian_shape=tensor_shape.TensorShape([2, 2]))
            with ops.control_dependencies([accumulator.initializer]):
                # These will be deleted due to deserialize call.
                op1 = accumulator.add(
                    stamp_token=0,
                    partition_ids=[1, 2],
                    feature_ids=[[2, 0], [3, 0]],
                    # Two values for gradients,
                    gradients=[[0.1, 0.1], [0.2, 0.2]],
                    # A 2x2 matrix for each hessian.
                    hessians=[[[0.01, 0.02], [0.03, 0.04]],
                              [[0.05, 0.06], [0.07, 0.08]]])

            with ops.control_dependencies([op1]):
                deserialize = accumulator.saveable.deserialize(
                    stamp_token=2,
                    num_updates=3,
                    partition_ids=[3, 4],
                    feature_ids=[[4, 0], [5, 0]],
                    # Two values for gradients,
                    gradients=[[0.3, 0.3], [0.5, 0.5]],
                    # A 2x2 matrix for each hessian.
                    hessians=[[[0.03, 0.04], [0.05, 0.06]],
                              [[0.07, 0.08], [0.09, 0.10]]])
            with ops.control_dependencies([deserialize]):
                num_updates, partition, feature, grads, hessians = accumulator.flush(
                    stamp_token=2, next_stamp_token=3)
                num_updates, partition, feature, grads, hessians = sess.run(
                    [num_updates, partition, feature, grads, hessians])

            result = _AccumulatorResultToDict(partition, feature, grads,
                                              hessians)
            self.assertEqual(num_updates, 3)
            self.assertEqual(len(result), 2)
            self.assertAllClose(result[(3, 4, 0)][0], [0.3, 0.3])
            self.assertAllClose(result[(3, 4, 0)][1],
                                [[0.03, 0.04], [0.05, 0.06]])
            self.assertAllClose(result[(4, 5, 0)][0], [0.5, 0.5])
            self.assertAllClose(result[(4, 5, 0)][1],
                                [[0.07, 0.08], [0.09, 0.10]])
 def testMakeSummary(self):
     with self.cached_session() as sess:
         accumulator = stats_accumulator_ops.StatsAccumulator(
             stamp_token=0,
             gradient_shape=tensor_shape.TensorShape([]),
             hessian_shape=tensor_shape.TensorShape([]))
         partition, feature, grads, hessians = accumulator._make_summary(
             partition_ids=[1, 2, 1],
             feature_ids=[[2, 0], [3, 1], [2, 0]],
             gradients=[0.1, 0.3, 0.1],
             hessians=[0.2, 0.4, 0.2])
         partition, feature, grads, hessians = sess.run(
             [partition, feature, grads, hessians])
         result = _AccumulatorResultToDict(partition, feature, grads,
                                           hessians)
         self.assertEqual(len(result), 2)
         self.assertAllClose(result[(1, 2, 0)], [0.2, 0.4])
         self.assertAllClose(result[(2, 3, 1)], [0.3, 0.4])
    def testDropStaleUpdate(self):
        with self.cached_session() as sess:
            accumulator = stats_accumulator_ops.StatsAccumulator(
                stamp_token=0,
                gradient_shape=tensor_shape.TensorShape([2]),
                hessian_shape=tensor_shape.TensorShape([2, 2]))
            with ops.control_dependencies([accumulator.initializer]):
                op1 = accumulator.add(
                    stamp_token=0,
                    partition_ids=[1, 2],
                    feature_ids=[[2, 5], [3, 0]],
                    # Two values for gradients,
                    gradients=[[0.1, 0.1], [0.2, 0.2]],
                    # A 2x2 matrix for each hessian.
                    hessians=[[[0.01, 0.02], [0.03, 0.04]],
                              [[0.05, 0.06], [0.07, 0.08]]])
                op2 = accumulator.add(stamp_token=-1,
                                      partition_ids=[1],
                                      feature_ids=[[2, 5]],
                                      gradients=[[0.10, 0.11]],
                                      hessians=[[[0.011, 0.022],
                                                 [0.033, 0.044]]])

            with ops.control_dependencies([op1, op2]):
                num_updates, partition, feature, grads, hessians = accumulator.flush(
                    stamp_token=0, next_stamp_token=1)
                num_updates, partition, feature, grads, hessians = sess.run(
                    [num_updates, partition, feature, grads, hessians])

            result = _AccumulatorResultToDict(partition, feature, grads,
                                              hessians)
            self.assertEqual(num_updates, 1)
            self.assertEqual(len(result), 2)
            self.assertAllClose(result[(1, 2, 5)][0], [0.1, 0.1])
            self.assertAllClose(result[(1, 2, 5)][1],
                                [[0.01, 0.02], [0.03, 0.04]])
            self.assertAllClose(result[(2, 3, 0)][0], [0.2, 0.2])
            self.assertAllClose(result[(2, 3, 0)][1],
                                [[0.05, 0.06], [0.07, 0.08]])