コード例 #1
0
  def test_callable_returns_invalid(self):

    def _optimizer_fn():
      return (1, 2, 3)

    with self.assertRaisesRegexp(
        ValueError,
        'The given object is not a tf.keras.optimizers.Optimizer instance'):
      optimizers.get_optimizer_instance_v2(_optimizer_fn)
コード例 #2
0
ファイル: rnn.py プロジェクト: tirkarthi/estimator
def _get_rnn_estimator_spec(
    features, labels, mode, head, rnn_model, optimizer, return_sequences):
  """Computes `EstimatorSpec` from logits to use in estimator model function.

  Args:
    features: dict of `Tensor` and `SparseTensor` objects returned from
      `input_fn`.
    labels: `Tensor` of shape [batch_size, 1] or [batch_size] with labels.
    mode: Defines whether this is training, evaluation or prediction.
      See `ModeKeys`.
    head: A `Head` instance.
    rnn_model: A Keras model that computes RNN logits from features.
    optimizer: String, `tf.keras.optimizers.Optimizer` object, or callable that
      creates the optimizer to use for training. If not specified, will use the
      Adagrad optimizer with a default learning rate of 0.05 and gradient clip
      norm of 5.0.
    return_sequences: A boolean indicating whether to return the last output
      in the output sequence, or the full sequence.

  Returns:
    An `EstimatorSpec` instance.

  Raises:
    ValueError: If mode or optimizer is invalid, or features has the wrong type.
  """
  training = (mode == model_fn.ModeKeys.TRAIN)
  # In TRAIN mode, create optimizer and assign global_step variable to
  # optimizer.iterations to make global_step increased correctly, as Hooks
  # relies on global step as step counter - otherwise skip optimizer
  # initialization and set it to None.
  if training:
    # If user does not provide an optimizer instance, use the optimizer
    # specified by the string with default learning rate and gradient clipping.
    if isinstance(optimizer, six.string_types):
      optimizer = optimizers.get_optimizer_instance_v2(
          optimizer, learning_rate=_DEFAULT_LEARNING_RATE)
      optimizer.clipnorm = _DEFAULT_CLIP_NORM
    else:
      optimizer = optimizers.get_optimizer_instance_v2(optimizer)
    optimizer.iterations = tf.compat.v1.train.get_or_create_global_step()
  else:
    optimizer = None

  logits = rnn_model(features, training)

  if return_sequences and head.input_sequence_mask_key not in features:
    features[head.input_sequence_mask_key] = logits._keras_mask  # pylint: disable=protected-access

  return head.create_estimator_spec(
      features=features,
      mode=mode,
      labels=labels,
      optimizer=optimizer,
      logits=logits,
      update_ops=rnn_model.updates,
      trainable_variables=rnn_model.trainable_variables)
コード例 #3
0
def _baseline_model_fn_v2(
        features,
        labels,
        mode,
        head,
        optimizer,
        weight_column=None,
        config=None,
        loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE):
    """Model_fn for baseline models.

  Args:
    features: `Tensor` or dict of `Tensor` (depends on data passed to `train`).
    labels: `Tensor` of labels that are compatible with the `Head` instance.
    mode: Defines whether this is training, evaluation or prediction. See
      `ModeKeys`.
    head: A `Head` instance.
    optimizer: String, `tf.Optimizer` object, or callable that creates the
      optimizer to use for training. If not specified, will use `FtrlOptimizer`
      with a default learning rate of 0.3.
    weight_column: A string or a `NumericColumn` created by
      `tf.feature_column.numeric_column` defining feature column representing
      weights. It will be multiplied by the loss of the example.
    config: `RunConfig` object to configure the runtime settings.
    loss_reduction: One of `tf.keras.losses.Reduction` except `NONE`. Describes
      how to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.

  Raises:
    KeyError: If weight column is specified but not present.
    ValueError: If features is an empty dictionary.

  Returns:
    An `EstimatorSpec` instance.
  """
    del config  # Unused.

    trainable_variables, logits = _baseline_model_fn_builder_v2(
        features, head.logits_dimension, weight_column)

    # In TRAIN mode, create optimizer and assign global_step variable to
    # optimizer.iterations to make global_step increased correctly, as Hooks
    # relies on global step as step counter.
    if mode == ModeKeys.TRAIN:
        opt = optimizers.get_optimizer_instance_v2(
            optimizer, learning_rate=_LEARNING_RATE)
        opt.iterations = tf.compat.v1.train.get_or_create_global_step()

    def train_op_fn(loss):
        # Scale loss by number of replicas.
        if loss_reduction == tf.losses.Reduction.SUM_OVER_BATCH_SIZE:
            num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
            if num_replicas > 1:
                loss *= (1. / num_replicas)
        return opt.get_updates(loss, trainable_variables)[0]

    return head.create_estimator_spec(features=features,
                                      mode=mode,
                                      logits=logits,
                                      labels=labels,
                                      train_op_fn=train_op_fn)
コード例 #4
0
 def test_ftrl(self):
   with self.cached_session():
     opt = optimizers.get_optimizer_instance_v2('Ftrl', learning_rate=0.1)
     self.assertIsInstance(opt.learning_rate, tf.Variable)
     self.evaluate(tf.compat.v1.initializers.global_variables())
     self.assertIsInstance(opt, tf.keras.optimizers.Ftrl)
     self.assertAlmostEqual(0.1, self.evaluate(opt.learning_rate))
コード例 #5
0
 def test_rmsprop(self):
   with self.cached_session():
     opt = optimizers.get_optimizer_instance_v2('RMSProp', learning_rate=0.1)
     self.assertIsInstance(opt.learning_rate, tf.Variable)
     self.evaluate(tf.compat.v1.initializers.global_variables())
     self.assertIsInstance(opt, rmsprop.RMSProp)
     self.assertAlmostEqual(0.1, self.evaluate(opt.learning_rate))
コード例 #6
0
  def test_callable(self):

    def _optimizer_fn():
      return _TestOptimizerV2()

    opt = optimizers.get_optimizer_instance_v2(_optimizer_fn)
    self.assertIsInstance(opt, _TestOptimizerV2)
コード例 #7
0
 def test_adam_but_no_learning_rate(self):
   with self.cached_session():
     opt = optimizers.get_optimizer_instance_v2('Adam')
     self.assertIsInstance(opt.learning_rate, tf.Variable)
     self.evaluate(tf.compat.v1.initializers.global_variables())
     self.assertIsInstance(opt, adam.Adam)
     self.assertAlmostEqual(0.001, self.evaluate(opt.learning_rate))
コード例 #8
0
 def test_sgd(self):
   with self.cached_session():
     opt = optimizers.get_optimizer_instance_v2('SGD', learning_rate=0.1)
     self.assertIsInstance(opt.learning_rate, tf.Variable)
     self.evaluate(tf.compat.v1.initializers.global_variables())
     self.assertIsInstance(opt, gradient_descent.SGD)
     self.assertAlmostEqual(0.1, self.evaluate(opt.learning_rate))
コード例 #9
0
 def test_adam(self):
     with self.cached_session():
         opt = optimizers.get_optimizer_instance_v2('Adam',
                                                    learning_rate=0.1)
         self.assertIsInstance(opt.learning_rate, variables.Variable)
         self.evaluate(variables.global_variables_initializer())
         self.assertIsInstance(opt, adam.Adam)
         self.assertAlmostEqual(0.1, self.evaluate(opt.learning_rate))
コード例 #10
0
 def test_adagrad_but_no_learning_rate(self):
   with self.cached_session():
     opt = optimizers.get_optimizer_instance_v2('Adagrad')
     # The creation of variables in optimizer_v2 is deferred to when it's
     # called, so we need to manually create it here. Same for all other tests.
     self.assertIsInstance(opt.learning_rate, tf.Variable)
     self.evaluate(tf.compat.v1.initializers.global_variables())
     self.assertIsInstance(opt, adagrad.Adagrad)
     self.assertAlmostEqual(0.001, self.evaluate(opt.learning_rate))
コード例 #11
0
 def test_sgd(self):
     opt = optimizers.get_optimizer_instance_v2('SGD', learning_rate=0.1)
     self.assertIsInstance(opt, gradient_descent.GradientDescentOptimizer)
     self.assertAlmostEqual(0.1, opt._learning_rate)
コード例 #12
0
 def test_rmsprop(self):
     opt = optimizers.get_optimizer_instance_v2('RMSProp',
                                                learning_rate=0.1)
     self.assertIsInstance(opt, rmsprop.RMSPropOptimizer)
     self.assertAlmostEqual(0.1, opt._learning_rate)
コード例 #13
0
 def test_ftrl(self):
     opt = optimizers.get_optimizer_instance_v2('Ftrl', learning_rate=0.1)
     self.assertIsInstance(opt, ftrl.FtrlOptimizer)
     self.assertAlmostEqual(0.1, opt._learning_rate)
コード例 #14
0
 def test_adam(self):
     opt = optimizers.get_optimizer_instance_v2('Adam', learning_rate=0.1)
     self.assertIsInstance(opt, adam.AdamOptimizer)
     self.assertAlmostEqual(0.1, opt._lr)
コード例 #15
0
 def test_lambda(self):
     opt = optimizers.get_optimizer_instance_v2(lambda: _TestOptimizerV2())  # pylint: disable=unnecessary-lambda
     self.assertIsInstance(opt, _TestOptimizerV2)
コード例 #16
0
def _dnn_linear_combined_model_fn_v2(features,
                                     labels,
                                     mode,
                                     head,
                                     linear_feature_columns=None,
                                     linear_optimizer='Ftrl',
                                     dnn_feature_columns=None,
                                     dnn_optimizer='Adagrad',
                                     dnn_hidden_units=None,
                                     dnn_activation_fn=nn.relu,
                                     dnn_dropout=None,
                                     config=None,
                                     batch_norm=False,
                                     linear_sparse_combiner='sum'):
    """Deep Neural Net and Linear combined model_fn.

  Args:
    features: dict of `Tensor`.
    labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
      `int32` or `int64` in the range `[0, n_classes)`.
    mode: Defines whether this is training, evaluation or prediction. See
      `ModeKeys`.
    head: A `Head` instance.
    linear_feature_columns: An iterable containing all the feature columns used
      by the Linear model.
    linear_optimizer: string, `Optimizer` object, or callable that defines the
      optimizer to use for training the Linear model. Defaults to the Ftrl
      optimizer.
    dnn_feature_columns: An iterable containing all the feature columns used by
      the DNN model.
    dnn_optimizer: string, `Optimizer` object, or callable that defines the
      optimizer to use for training the DNN model. Defaults to the Adagrad
      optimizer.
    dnn_hidden_units: List of hidden units per DNN layer.
    dnn_activation_fn: Activation function applied to each DNN layer. If `None`,
      will use `tf.nn.relu`.
    dnn_dropout: When not `None`, the probability we will drop out a given DNN
      coordinate.
    config: `RunConfig` object to configure the runtime settings.
    batch_norm: Whether to use batch normalization after each hidden layer.
    linear_sparse_combiner: A string specifying how to reduce the linear model
      if a categorical column is multivalent.  One of "mean", "sqrtn", and
      "sum".

  Returns:
    An `EstimatorSpec` instance.

  Raises:
    ValueError: If both `linear_feature_columns` and `dnn_features_columns`
      are empty at the same time, or `input_layer_partitioner` is missing,
      or features has the wrong type.
  """
    if not isinstance(features, dict):
        raise ValueError('features should be a dictionary of `Tensor`s. '
                         'Given type: {}'.format(type(features)))
    if not linear_feature_columns and not dnn_feature_columns:
        raise ValueError(
            'Either linear_feature_columns or dnn_feature_columns must be defined.'
        )

    del config

    # Build DNN Logits.
    if not dnn_feature_columns:
        dnn_logits = None
    else:
        dnn_optimizer = optimizers.get_optimizer_instance_v2(
            dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)
        _check_no_sync_replicas_optimizer(dnn_optimizer)
        if not dnn_hidden_units:
            raise ValueError(
                'dnn_hidden_units must be defined when dnn_feature_columns is '
                'specified.')

        dnn_logits, dnn_trainable_variables, dnn_update_ops = (
            dnn._dnn_model_fn_builder_v2(  # pylint: disable=protected-access
                units=head.logits_dimension,
                hidden_units=dnn_hidden_units,
                feature_columns=dnn_feature_columns,
                activation_fn=dnn_activation_fn,
                dropout=dnn_dropout,
                batch_norm=batch_norm,
                features=features,
                mode=mode,
                optimizer=dnn_optimizer))

    linear_parent_scope = 'linear'

    if not linear_feature_columns:
        linear_logits = None
    else:
        linear_optimizer = optimizers.get_optimizer_instance(
            linear_optimizer,
            learning_rate=_linear_learning_rate(len(linear_feature_columns)))
        _check_no_sync_replicas_optimizer(linear_optimizer)
        with variable_scope.variable_scope(
                linear_parent_scope,
                values=tuple(six.itervalues(features))) as scope:
            linear_absolute_scope = scope.name
            logit_fn = linear.linear_logit_fn_builder_v2(
                units=head.logits_dimension,
                feature_columns=linear_feature_columns,
                sparse_combiner=linear_sparse_combiner)
            linear_logits = logit_fn(features=features)
            _add_layer_summary(linear_logits, scope.name)

    # Combine logits and build full model.
    if dnn_logits is not None and linear_logits is not None:
        logits = dnn_logits + linear_logits
    elif dnn_logits is not None:
        logits = dnn_logits
    else:
        logits = linear_logits

    def _train_op_fn(loss):
        """Returns the op to optimize the loss."""
        train_ops = []
        global_step = training_util.get_global_step()
        if dnn_logits is not None:
            train_ops.append(
                dnn_optimizer.get_updates(loss, dnn_trainable_variables))
            if dnn_update_ops is not None:
                train_ops.append(dnn_update_ops)
            # TODO(yhliang): For DNN only case with optimizer V2.
            # Can be removed after optimizer V2 is used in Linear part
            if linear_logits is None:
                return control_flow_ops.group(*train_ops)
        if linear_logits is not None:
            train_ops.append(
                linear_optimizer.minimize(
                    loss,
                    var_list=ops.get_collection(
                        ops.GraphKeys.TRAINABLE_VARIABLES,
                        scope=linear_absolute_scope)))
            # As linear uses optimizer v1, it still relies on global_step if it's
            # Linear Only model
            if dnn_logits is None:
                train_op = control_flow_ops.group(*train_ops)
                with ops.control_dependencies([train_op]):
                    return state_ops.assign_add(global_step, 1).op
        train_op = control_flow_ops.group(*train_ops)
        return train_op

    return head.create_estimator_spec(features=features,
                                      mode=mode,
                                      labels=labels,
                                      train_op_fn=_train_op_fn,
                                      logits=logits)
コード例 #17
0
 def test_object(self):
     opt = optimizers.get_optimizer_instance_v2(_TestOptimizerV2())
     self.assertIsInstance(opt, _TestOptimizerV2)
コード例 #18
0
 def test_object_invalid(self):
     with self.assertRaisesRegexp(
             ValueError, 'The given object is not an Optimizer instance'):
         optimizers.get_optimizer_instance_v2((1, 2, 3))
コード例 #19
0
ファイル: dnn.py プロジェクト: shaosimon/estimator
def dnn_model_fn_v2(features,
                    labels,
                    mode,
                    head,
                    hidden_units,
                    feature_columns,
                    optimizer='Adagrad',
                    activation_fn=tf.nn.relu,
                    dropout=None,
                    config=None,
                    use_tpu=False,
                    batch_norm=False):
  """Deep Neural Net model_fn v2.

  This function is different than _dnn_model_fn_v1 in the way it handles the
  optimizer when a String optimizer name is passed.

  Args:
    features: dict of `Tensor`.
    labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
      `int32` or `int64` in the range `[0, n_classes)`.
    mode: Defines whether this is training, evaluation or prediction. See
      `ModeKeys`.
    head: A `base_head.Head` instance.
    hidden_units: Iterable of integer number of hidden units per layer.
    feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.
    optimizer: String, `tf.keras.optimizers.Optimizer` object, or callable that
      creates the optimizer to use for training. If not specified, will use the
      Adagrad optimizer. If it is String, the default learning rate of the
      optimizer will be used. If it is String, and optimizer does not have a
      default learning rate, then, a fixed learning rate of 0.05 is used.
    activation_fn: Activation function applied to each layer.
    dropout: When not `None`, the probability we will drop out a given
      coordinate.
    config: `RunConfig` object to configure the runtime settings.
    use_tpu: Whether to make a DNN model able to run on TPU. Will make function
      return a `_TPUEstimatorSpec` instance and disable variable partitioning.
    batch_norm: Whether to use batch normalization after each hidden layer.

  Returns:
    An `EstimatorSpec` instance.

  Raises:
    ValueError: If features has the wrong type.
  """
  _validate_features(features)

  del config

  logits, trainable_variables, update_ops = _dnn_model_fn_builder_v2(
      units=head.logits_dimension,
      hidden_units=hidden_units,
      feature_columns=feature_columns,
      activation_fn=activation_fn,
      dropout=dropout,
      batch_norm=batch_norm,
      features=features,
      mode=mode)

  # In TRAIN mode, create optimizer and assign global_step variable to
  # optimizer.iterations to make global_step increased correctly, as Hooks
  # relies on global step as step counter.
  if mode == ModeKeys.TRAIN:
    optimizer = optimizers.get_optimizer_instance_v2(optimizer)
    optimizer.iterations = tf.compat.v1.train.get_or_create_global_step()

  # Create EstimatorSpec.
  if use_tpu:
    estimator_spec_fn = head._create_tpu_estimator_spec  # pylint: disable=protected-access
  else:
    estimator_spec_fn = head.create_estimator_spec  # pylint: disable=protected-access

  return estimator_spec_fn(
      features=features,
      mode=mode,
      labels=labels,
      optimizer=optimizer,
      logits=logits,
      trainable_variables=trainable_variables,
      update_ops=update_ops)
コード例 #20
0
 def test_unsupported_name(self):
     with self.assertRaisesRegexp(
             ValueError, 'Unsupported optimizer name: unsupported_name'):
         optimizers.get_optimizer_instance_v2('unsupported_name',
                                              learning_rate=0.1)
コード例 #21
0
  def model_fn(features, labels, mode, config):
    """model_fn for the custom estimator."""
    del config
    input_tensors = tfl.estimators.transform_features(features, feature_columns)
    inputs = {
        key: tf.keras.layers.Input(shape=(1,), name=key)
        for key in input_tensors
    }

    lattice_sizes = [3, 2, 2, 2]
    lattice_monotonicities = ['increasing', 'none', 'increasing', 'increasing']
    lattice_input = tf.keras.layers.Concatenate(axis=1)([
        tfl.layers.PWLCalibration(
            input_keypoints=np.linspace(10, 100, num=8, dtype=np.float32),
            # The output range of the calibrator should be the input range of
            # the following lattice dimension.
            output_min=0.0,
            output_max=lattice_sizes[0] - 1.0,
            monotonicity='increasing',
        )(inputs['age']),
        tfl.layers.CategoricalCalibration(
            # Number of categories including any missing/default category.
            num_buckets=2,
            output_min=0.0,
            output_max=lattice_sizes[1] - 1.0,
        )(inputs['sex']),
        tfl.layers.PWLCalibration(
            input_keypoints=[0.0, 1.0, 2.0, 3.0],
            output_min=0.0,
            output_max=lattice_sizes[0] - 1.0,
            # You can specify TFL regularizers as tuple
            # ('regularizer name', l1, l2).
            kernel_regularizer=('hessian', 0.0, 1e-4),
            monotonicity='increasing',
        )(inputs['ca']),
        tfl.layers.CategoricalCalibration(
            num_buckets=3,
            output_min=0.0,
            output_max=lattice_sizes[1] - 1.0,
            # Categorical monotonicity can be partial order.
            # (i, j) indicates that we must have output(i) <= output(i).
            # Make sure to set the lattice monotonicity to 1 for this dimension.
            monotonicities=[(0, 1), (0, 2)],
        )(inputs['thal']),
    ])
    output = tfl.layers.Lattice(
        lattice_sizes=lattice_sizes, monotonicities=lattice_monotonicities)(
            lattice_input)

    training = (mode == tf.estimator.ModeKeys.TRAIN)
    model = tf.keras.Model(inputs=inputs, outputs=output)
    logits = model(input_tensors, training=training)

    if training:
      optimizer = optimizers.get_optimizer_instance_v2('Adam',
                                                       FLAGS.learning_rate)
    else:
      optimizer = None

    head = binary_class_head.BinaryClassHead()
    return head.create_estimator_spec(
        features=features,
        mode=mode,
        labels=labels,
        optimizer=optimizer,
        logits=logits,
        trainable_variables=model.trainable_variables,
        update_ops=model.updates)
コード例 #22
0
def dnn_linear_combined_model(
        features,
        mode,
        linear_feature_columns=None,
        linear_optimizer='Ftrl',
        dnn_feature_columns=None,
        dnn_optimizer='Adagrad',
        dnn_hidden_units=None,
        dnn_activation_fn=nn.relu,
        dnn_dropout=None,
        config=None,
        batch_norm=False,
        linear_sparse_combiner='sum',
        loss_reduction=losses.ReductionV2.SUM_OVER_BATCH_SIZE):
    """Deep Neural Net and Linear combined model_fn.
  Args:
    features: dict of `Tensor`.
    mode: Defines whether this is training, evaluation or prediction. See
      `ModeKeys`.
    linear_feature_columns: An iterable containing all the feature columns used
      by the Linear model.
    linear_optimizer: string, `Optimizer` object, or callable that defines the
      optimizer to use for training the Linear model. Defaults to the Ftrl
      optimizer.
    dnn_feature_columns: An iterable containing all the feature columns used by
      the DNN model.
    dnn_optimizer: string, `Optimizer` object, or callable that defines the
      optimizer to use for training the DNN model. Defaults to the Adagrad
      optimizer.
    dnn_hidden_units: List of hidden units per DNN layer.
    dnn_activation_fn: Activation function applied to each DNN layer. If `None`,
      will use `tf.nn.relu`.
    dnn_dropout: When not `None`, the probability we will drop out a given DNN
      coordinate.
    config: `RunConfig` object to configure the runtime settings.
    batch_norm: Whether to use batch normalization after each hidden layer.
    linear_sparse_combiner: A string specifying how to reduce the linear model
      if a categorical column is multivalent.  One of "mean", "sqrtn", and
      "sum".
    loss_reduction: One of `tf.keras.losses.Reduction` except `NONE`. Describes
      how to reduce training loss over batch. Defaults to `SUM_OVER_BATCH_SIZE`.
  Returns:
    An `EstimatorSpec` instance.
  Raises:
    ValueError: If both `linear_feature_columns` and `dnn_features_columns`
      are empty at the same time, or `input_layer_partitioner` is missing,
      or features has the wrong type.
  """
    if not isinstance(features, dict):
        raise ValueError('features should be a dictionary of `Tensor`s. '
                         'Given type: {}'.format(type(features)))
    if not linear_feature_columns and not dnn_feature_columns:
        raise ValueError(
            'Either linear_feature_columns or dnn_feature_columns must be defined.'
        )

    del config

    head = regression_head.RegressionHead(label_dimension=1,
                                          weight_column=None,
                                          loss_reduction=loss_reduction)

    # Build DNN Logits.
    if not dnn_feature_columns:
        dnn_logits = None
    else:
        if mode == ModeKeys.TRAIN:
            dnn_optimizer = optimizers.get_optimizer_instance_v2(
                dnn_optimizer, learning_rate=_DNN_LEARNING_RATE)
            _check_no_sync_replicas_optimizer(dnn_optimizer)

        if not dnn_hidden_units:
            raise ValueError(
                'dnn_hidden_units must be defined when dnn_feature_columns is '
                'specified.')
        dnn_logits, dnn_trainable_variables, dnn_update_ops = (
            dnn._dnn_model_fn_builder_v2(  # pylint: disable=protected-access
                units=head.logits_dimension,
                hidden_units=dnn_hidden_units,
                feature_columns=dnn_feature_columns,
                activation_fn=dnn_activation_fn,
                dropout=dnn_dropout,
                batch_norm=batch_norm,
                features=features,
                mode=mode))

    if not linear_feature_columns:
        linear_logits = None
    else:
        if mode == ModeKeys.TRAIN:
            linear_optimizer = optimizers.get_optimizer_instance_v2(
                linear_optimizer,
                learning_rate=_linear_learning_rate(
                    len(linear_feature_columns)))
            _check_no_sync_replicas_optimizer(linear_optimizer)

        linear_logits, linear_trainable_variables = (
            linear._linear_model_fn_builder_v2(  # pylint: disable=protected-access
                units=head.logits_dimension,
                feature_columns=linear_feature_columns,
                sparse_combiner=linear_sparse_combiner,
                features=features))
        _add_layer_summary(linear_logits, 'linear')

    # Combine logits and build full model.
    if dnn_logits is not None and linear_logits is not None:
        logits = dnn_logits + linear_logits
    elif dnn_logits is not None:
        logits = dnn_logits
    else:
        logits = linear_logits

    def _train_op_fn(loss):
        """Returns the op to optimize the loss."""
        train_ops = []
        # Scale loss by number of replicas.
        if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE:
            loss = losses_utils.scale_loss_for_distribution(loss)

        if dnn_logits is not None:
            train_ops.extend(
                dnn_optimizer.get_updates(loss, dnn_trainable_variables))
            if dnn_update_ops is not None:
                train_ops.extend(dnn_update_ops)
        if linear_logits is not None:
            train_ops.extend(
                linear_optimizer.get_updates(loss, linear_trainable_variables))
        train_op = control_flow_ops.group(*train_ops)
        return train_op

    # In TRAIN mode, asssign global_step variable to optimizer.iterations to
    # make global_step increased correctly, as Hooks relies on global step as
    # step counter. Note that, Only one model's optimizer needs this assignment.
    if mode == ModeKeys.TRAIN:
        if dnn_logits is not None:
            dnn_optimizer.iterations = training_util.get_or_create_global_step(
            )
        else:
            linear_optimizer.iterations = training_util.get_or_create_global_step(
            )

    return logits
コード例 #23
0
 def test_adagrad_but_no_learning_rate(self):
     opt = optimizers.get_optimizer_instance_v2('Adagrad')
     self.assertIsInstance(opt, adagrad.AdagradOptimizer)
     self.assertAlmostEqual(0.05, opt._learning_rate)
コード例 #24
0
ファイル: dnn.py プロジェクト: AhmedRafat19/estimator
def _dnn_model_fn_v2(features,
                     labels,
                     mode,
                     head,
                     hidden_units,
                     feature_columns,
                     optimizer='Adagrad',
                     activation_fn=nn.relu,
                     dropout=None,
                     input_layer_partitioner=None,
                     config=None,
                     use_tpu=False,
                     batch_norm=False):
  """Deep Neural Net model_fn v2.

  This function is different than _dnn_model_fn_v1 in the way it handles the
  optimizer when a String optimizer name is passed.

  Args:
    features: dict of `Tensor`.
    labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
      `int32` or `int64` in the range `[0, n_classes)`.
    mode: Defines whether this is training, evaluation or prediction. See
      `ModeKeys`.
    head: A `base_head.Head` instance.
    hidden_units: Iterable of integer number of hidden units per layer.
    feature_columns: Iterable of `feature_column._FeatureColumn` model inputs.
    optimizer: String, `tf.Optimizer` object, or callable that creates the
      optimizer to use for training. If not specified, will use the Adagrad
      optimizer. If it is String, the default learning rate of the optimizer
      will be used. If it is String, and optimizer does not have a default
      learning rate, then, a fixed learning rate of 0.05 is used.
    activation_fn: Activation function applied to each layer.
    dropout: When not `None`, the probability we will drop out a given
      coordinate.
    input_layer_partitioner: Partitioner for input layer. Defaults to
      `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
    config: `RunConfig` object to configure the runtime settings.
    use_tpu: Whether to make a DNN model able to run on TPU. Will make function
      return a `_TPUEstimatorSpec` instance and disable variable partitioning.
    batch_norm: Whether to use batch normalization after each hidden layer.

  Returns:
    An `EstimatorSpec` instance.

  Raises:
    ValueError: If features has the wrong type.
  """
  optimizer = optimizers.get_optimizer_instance_v2(optimizer)

  return _dnn_model_fn_core(
      features,
      labels,
      mode,
      head,
      hidden_units,
      feature_columns,
      optimizer=optimizer,
      activation_fn=activation_fn,
      dropout=dropout,
      input_layer_partitioner=input_layer_partitioner,
      use_tpu=use_tpu,
      batch_norm=batch_norm)
コード例 #25
0
 def test_adam_but_no_learning_rate(self):
     opt = optimizers.get_optimizer_instance_v2('Adam')
     self.assertIsInstance(opt, adam.AdamOptimizer)
     self.assertAlmostEqual(0.001, opt._lr)