def model_fn(features, labels, mode, params):
    del params  # unused
    with variable_scope.variable_scope('m', reuse=variable_scope.AUTO_REUSE):
        w = variable_scope.get_variable('W', shape=[1000, 10])
    logits = math_ops.matmul(features, w)
    loss = losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

    if mode == model_fn_lib.ModeKeys.TRAIN:
        optimizer = training.RMSPropOptimizer(learning_rate=0.01)
        optimizer = tpu_optimizer.CrossShardOptimizer(optimizer)
        train_op = optimizer.minimize(loss, training.get_global_step())
        return tpu_estimator.TPUEstimatorSpec(
            mode=model_fn_lib.ModeKeys.TRAIN,
            loss=loss,
            train_op=train_op,
        )
    elif mode == model_fn_lib.ModeKeys.EVAL:

        def metric_fn(labels, logits):
            labels = math_ops.cast(labels, dtypes.int64)
            logging.info('LABELS %s %s', labels, logits)
            return {
                'recall@1': metrics_lib.recall_at_k(labels, logits, 1),
                'recall@5': metrics_lib.recall_at_k(labels, logits, 5),
            }

        loss = losses.sparse_softmax_cross_entropy(labels=labels,
                                                   logits=logits)
        eval_metrics = (metric_fn, [labels, logits])
        return tpu_estimator.TPUEstimatorSpec(mode=model_fn_lib.ModeKeys.EVAL,
                                              loss=loss,
                                              eval_metrics=eval_metrics)
    def _ModelFn(features, labels, mode):
      if is_training:
        logits_out = self._BuildGraph(features)
      else:
        graph_def = self._GetGraphDef(use_trt, batch_size, model_dir)
        logits_out = importer.import_graph_def(
            graph_def,
            input_map={INPUT_NODE_NAME: features},
            return_elements=[OUTPUT_NODE_NAME + ':0'],
            name='')[0]

      loss = losses.sparse_softmax_cross_entropy(
          labels=labels, logits=logits_out)
      summary.scalar('loss', loss)

      classes_out = math_ops.argmax(logits_out, axis=1, name='classes_out')
      accuracy = metrics.accuracy(
          labels=labels, predictions=classes_out, name='acc_op')
      summary.scalar('accuracy', accuracy[1])

      if mode == ModeKeys.EVAL:
        return EstimatorSpec(
            mode, loss=loss, eval_metric_ops={'accuracy': accuracy})
      elif mode == ModeKeys.TRAIN:
        optimizer = AdamOptimizer(learning_rate=1e-2)
        train_op = optimizer.minimize(loss, global_step=get_global_step())
        return EstimatorSpec(mode, loss=loss, train_op=train_op)
 def model_fn():
   """Mnist model with synthetic input."""
   data_format = 'channels_last'
   input_shape = [28, 28, 1]
   l = keras.layers
   max_pool = l.MaxPooling2D((2, 2), (2, 2),
                             padding='same',
                             data_format=data_format)
   model = keras.Sequential([
       l.Reshape(target_shape=input_shape, input_shape=(28 * 28,)),
       l.Conv2D(
           32,
           5,
           padding='same',
           data_format=data_format,
           activation=nn.relu), max_pool,
       l.Conv2D(
           64,
           5,
           padding='same',
           data_format=data_format,
           activation=nn.relu), max_pool,
       l.Flatten(),
       l.Dense(1024, activation=nn.relu),
       l.Dropout(0.4),
       l.Dense(10)
   ])
   image = random_ops.random_uniform([2, 28, 28])
   label = random_ops.random_uniform([2, 1], maxval=10, dtype=dtypes.int32)
   logits = model(image, training=True)
   loss = losses.sparse_softmax_cross_entropy(labels=label, logits=logits)
   optimizer = adam.AdamOptimizer(learning_rate=1e-4)
   train_op = optimizer.minimize(loss,
                                 training_util.get_or_create_global_step())
   return train_op
        def _ModelFn(features, labels, mode):
            if is_training:
                logits_out = self._BuildGraph(features)
            else:
                graph_def = self._GetGraphDef(use_trt, batch_size, model_dir)
                logits_out = importer.import_graph_def(
                    graph_def,
                    input_map={INPUT_NODE_NAME: features},
                    return_elements=[OUTPUT_NODE_NAME + ':0'],
                    name='')[0]

            loss = losses.sparse_softmax_cross_entropy(labels=labels,
                                                       logits=logits_out)
            summary.scalar('loss', loss)

            classes_out = math_ops.argmax(logits_out,
                                          axis=1,
                                          name='classes_out')
            accuracy = metrics.accuracy(labels=labels,
                                        predictions=classes_out,
                                        name='acc_op')
            summary.scalar('accuracy', accuracy[1])

            if mode == ModeKeys.EVAL:
                return EstimatorSpec(mode,
                                     loss=loss,
                                     eval_metric_ops={'accuracy': accuracy})
            elif mode == ModeKeys.TRAIN:
                optimizer = AdamOptimizer(learning_rate=1e-2)
                train_op = optimizer.minimize(loss,
                                              global_step=get_global_step())
                return EstimatorSpec(mode, loss=loss, train_op=train_op)
def model_fn(features, labels, mode, params):
    """The model_fn argument for creating an Estimator."""
    model = Model(params["data_format"])
    image = features
    if isinstance(image, dict):
        image = features["image"]

    if mode == estimator.ModeKeys.PREDICT:
        logits = model(image, training=False)
        predictions = {
            "classes": math_ops.argmax(logits, axis=1),
            "probabilities": nn.softmax(logits),
        }
        return estimator.EstimatorSpec(
            mode=estimator.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                "classify": estimator.export.PredictOutput(predictions)
            })

    elif mode == estimator.ModeKeys.TRAIN:
        optimizer = train.AdamOptimizer(learning_rate=1e-4)

        logits = model(image, training=True)
        loss = losses.sparse_softmax_cross_entropy(labels=labels,
                                                   logits=logits)
        return estimator.EstimatorSpec(mode=estimator.ModeKeys.TRAIN,
                                       loss=loss,
                                       train_op=optimizer.minimize(
                                           loss,
                                           train.get_or_create_global_step()))

    elif mode == estimator.ModeKeys.EVAL:
        logits = model(image, training=False)
        loss = losses.sparse_softmax_cross_entropy(labels=labels,
                                                   logits=logits)
        return estimator.EstimatorSpec(
            mode=estimator.ModeKeys.EVAL,
            loss=loss,
            eval_metric_ops={
                "accuracy":
                ops.metrics.accuracy(labels=labels,
                                     predictions=math_ops.argmax(logits,
                                                                 axis=1)),
            })
Exemple #6
0
 def create_loss(self, features, mode, logits, labels):
     """See `Head`."""
     del mode, features  # Unused for this head.
     label_ids = self._label_ids(_check_and_reshape_dense_labels(labels, 1))
     unweighted_loss = losses.sparse_softmax_cross_entropy(
         labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
     # Restore the squeezed dim, so unweighted_loss matches the weights shape.
     return LossAndLabels(unweighted_loss=array_ops.expand_dims(
         unweighted_loss, axis=(1, )),
                          processed_labels=label_ids)
Exemple #7
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode, features  # Unused for this head.
   label_ids = self._label_ids(_check_and_reshape_dense_labels(labels, 1))
   unweighted_loss = losses.sparse_softmax_cross_entropy(
       labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
   # Restore the squeezed dim, so unweighted_loss matches the weights shape.
   return LossAndLabels(
       unweighted_loss=array_ops.expand_dims(unweighted_loss, axis=(1,)),
       processed_labels=label_ids)
 def body(_, i):
   i += 1
   x, yt = it.get_next()
   dense = layers.Dense(nclass)
   y = dense(x)
   loss = losses.sparse_softmax_cross_entropy(yt, y)
   opt = adam.AdamOptimizer()
   train_op = opt.minimize(loss, var_list=dense.trainable_weights)
   with ops.control_dependencies([train_op]):
     loss = array_ops.identity(loss)
   return loss, i
def model_fn(features, labels, mode, params):
  """The model_fn argument for creating an Estimator."""
  model = Model(params["data_format"])
  image = features
  if isinstance(image, dict):
    image = features["image"]

  if mode == estimator.ModeKeys.PREDICT:
    logits = model(image, training=False)
    predictions = {
        "classes": math_ops.argmax(logits, axis=1),
        "probabilities": nn.softmax(logits),
    }
    return estimator.EstimatorSpec(
        mode=estimator.ModeKeys.PREDICT,
        predictions=predictions,
        export_outputs={
            "classify": estimator.export.PredictOutput(predictions)
        })

  elif mode == estimator.ModeKeys.TRAIN:
    optimizer = train.AdamOptimizer(learning_rate=1e-4)

    logits = model(image, training=True)
    loss = losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
    return estimator.EstimatorSpec(
        mode=estimator.ModeKeys.TRAIN,
        loss=loss,
        train_op=optimizer.minimize(loss, train.get_or_create_global_step()))

  elif mode == estimator.ModeKeys.EVAL:
    logits = model(image, training=False)
    loss = losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
    return estimator.EstimatorSpec(
        mode=estimator.ModeKeys.EVAL,
        loss=loss,
        eval_metric_ops={
            "accuracy":
                ops.metrics.accuracy(
                    labels=labels, predictions=math_ops.argmax(logits, axis=1)),
        })
Exemple #10
0
 def create_loss(self, features, mode, logits, labels):
   """See `Head`."""
   del mode  # Unused for this head.
   label_ids = self._label_ids(_check_and_reshape_dense_labels(labels, 1))
   unweighted_loss = losses.sparse_softmax_cross_entropy(
       labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
   # Restore the squeezed dim, so unweighted_loss matches the weights shape.
   unweighted_loss = array_ops.expand_dims(unweighted_loss, axis=(1,))
   weights = _weights(features, self._weight_column)
   weighted_sum_loss = losses.compute_weighted_loss(
       unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
   # _weights() can return 1.
   example_weight_sum = math_ops.reduce_sum(
       weights * array_ops.ones_like(unweighted_loss))
   return LossSpec(
       weighted_sum_loss=weighted_sum_loss,
       example_weight_sum=example_weight_sum,
       processed_labels=label_ids)
Exemple #11
0
 def _unweighted_loss_and_weights(self, logits, label_ids, features):
   """Computes loss spec."""
   if self._loss_fn:
     unweighted_loss = base_head.call_loss_fn(
         loss_fn=self._loss_fn,
         labels=label_ids,
         logits=logits,
         features=features,
         expected_loss_dim=1)
   else:
     unweighted_loss = losses.sparse_softmax_cross_entropy(
         labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
     # Restore the squeezed dim, so unweighted_loss matches the weights shape.
     unweighted_loss = array_ops.expand_dims(unweighted_loss, axis=-1)
   weights = base_head.get_weights_and_check_match_logits(
       features=features,
       weight_column=self._weight_column,
       logits=logits)
   return unweighted_loss, weights
Exemple #12
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with variable_scope.variable_scope(
        None,
        default_name='multi_class_head',
        values=(tuple(six.itervalues(features)) + (labels, logits))):
      logits = _check_logits(logits, self.logits_dimension)

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        # class_ids's shape is [batch_size]
        class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)
        class_ids = array_ops.expand_dims(class_ids, axis=(1,))
        if self._label_vocabulary:
          table = lookup_ops.index_to_string_table_from_tensor(
              vocabulary_list=self._label_vocabulary,
              name='class_string_lookup')
          classes = table.lookup(class_ids)
        else:
          classes = string_ops.as_string(class_ids, name='str_classes')

        probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
            # Expand to [batch_size, 1]
            pred_keys.CLASS_IDS: class_ids,
            pred_keys.CLASSES: classes,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        batch_size = array_ops.shape(probabilities)[0]
        export_class_list = self._label_vocabulary
        if not export_class_list:
          export_class_list = string_ops.as_string(
              math_ops.range(self._n_classes))
        export_output_classes = array_ops.tile(
            input=array_ops.expand_dims(input=export_class_list, axis=0),
            multiples=[batch_size, 1])
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                '':
                    export_output.ClassificationOutput(
                        scores=probabilities,
                        # `ClassificationOutput` requires string classes.
                        classes=export_output_classes)
            })

      # Eval.
      label_ids = self._label_ids(_check_labels(labels, 1))

      unweighted_loss = losses.sparse_softmax_cross_entropy(
          labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
      # Restore the squeezed dim, so unweighted_loss matches the weights shape.
      unweighted_loss = array_ops.expand_dims(unweighted_loss, axis=(1,))
      weights = (
          1. if (self._weight_feature_key is None) else
          features[self._weight_feature_key])
      weights = math_ops.to_float(weights, name='weights')
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=label_ids,
                probabilities=probabilities,
                logits=logits,
                class_ids=class_ids,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
      logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS, training_loss)
      logging_ops.scalar_summary(
          metric_keys.MetricKeys.LOSS_MEAN,
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
      return model_fn.EstimatorSpec(
          mode=model_fn.ModeKeys.TRAIN,
          predictions=predictions,
          loss=training_loss,
          train_op=train_op_fn(training_loss))
Exemple #13
0
  def create_estimator_spec(
      self, features, mode, logits, labels=None, train_op_fn=None):
    """See `Head`."""
    with variable_scope.variable_scope(
        None,
        default_name='multi_class_head',
        values=(tuple(six.itervalues(features)) + (labels, logits))):
      logits = _check_logits(logits, self.logits_dimension)

      # Predict.
      pred_keys = prediction_keys.PredictionKeys
      with ops.name_scope(None, 'predictions', (logits,)):
        # class_ids's shape is [batch_size]
        class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASS_IDS)
        class_ids = array_ops.expand_dims(class_ids, axis=(1,))
        if self._label_vocabulary:
          table = lookup_ops.index_to_string_table_from_tensor(
              vocabulary_list=self._label_vocabulary,
              name='class_string_lookup')
          classes = table.lookup(class_ids)
        else:
          classes = string_ops.as_string(class_ids, name='str_classes')

        probabilities = nn.softmax(logits, name=pred_keys.PROBABILITIES)
        predictions = {
            pred_keys.LOGITS: logits,
            pred_keys.PROBABILITIES: probabilities,
            # Expand to [batch_size, 1]
            pred_keys.CLASS_IDS: class_ids,
            pred_keys.CLASSES: classes,
        }
      if mode == model_fn.ModeKeys.PREDICT:
        batch_size = array_ops.shape(probabilities)[0]
        export_class_list = self._label_vocabulary
        if not export_class_list:
          export_class_list = string_ops.as_string(
              math_ops.range(self._n_classes))
        export_output_classes = array_ops.tile(
            input=array_ops.expand_dims(input=export_class_list, axis=0),
            multiples=[batch_size, 1])
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.PREDICT,
            predictions=predictions,
            export_outputs={
                '':
                    export_output.ClassificationOutput(
                        scores=probabilities,
                        # `ClassificationOutput` requires string classes.
                        classes=export_output_classes)
            })

      # Eval.
      label_ids = self._label_ids(_check_labels(_maybe_expand_dim(labels), 1))

      unweighted_loss = losses.sparse_softmax_cross_entropy(
          labels=label_ids, logits=logits, reduction=losses.Reduction.NONE)
      # Restore the squeezed dim, so unweighted_loss matches the weights shape.
      unweighted_loss = array_ops.expand_dims(unweighted_loss, axis=(1,))
      weights = (
          1. if (self._weight_feature_key is None) else
          features[self._weight_feature_key])
      weights = _maybe_expand_dim(math_ops.to_float(weights, name='weights'))
      training_loss = losses.compute_weighted_loss(
          unweighted_loss, weights=weights, reduction=losses.Reduction.SUM)
      if mode == model_fn.ModeKeys.EVAL:
        return model_fn.EstimatorSpec(
            mode=model_fn.ModeKeys.EVAL,
            predictions=predictions,
            loss=training_loss,
            eval_metric_ops=self._eval_metric_ops(
                labels=label_ids,
                probabilities=probabilities,
                logits=logits,
                class_ids=class_ids,
                unweighted_loss=unweighted_loss,
                weights=weights))

      # Train.
      if train_op_fn is None:
        raise ValueError('train_op_fn can not be None.')
      logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS, training_loss)
      logging_ops.scalar_summary(
          metric_keys.MetricKeys.LOSS_MEAN,
          losses.compute_weighted_loss(
              unweighted_loss, weights=weights,
              reduction=losses.Reduction.MEAN))
      return model_fn.EstimatorSpec(
          mode=model_fn.ModeKeys.TRAIN,
          predictions=predictions,
          loss=training_loss,
          train_op=train_op_fn(training_loss))
Exemple #14
0
    def create_estimator_spec(self,
                              features,
                              mode,
                              logits,
                              labels=None,
                              train_op_fn=None):
        """See `Head`."""
        with variable_scope.variable_scope(
                None,
                default_name='multi_class_head',
                values=(tuple(six.itervalues(features)) + (labels, logits))):
            logits = _check_logits(logits, self.logits_dimension)

            # Predict.
            pred_keys = prediction_keys.PredictionKeys
            with ops.name_scope(None, 'predictions', (logits, )):
                # class_ids's shape is [batch_size]
                class_ids = math_ops.argmax(logits, 1, name=pred_keys.CLASSES)
                probabilities = nn.softmax(logits,
                                           name=pred_keys.PROBABILITIES)
                predictions = {
                    pred_keys.LOGITS:
                    logits,
                    pred_keys.PROBABILITIES:
                    probabilities,
                    # Expand to [batch_size, 1]
                    pred_keys.CLASSES:
                    array_ops.expand_dims(class_ids, axis=(1, ))
                }
            if mode == model_fn.ModeKeys.PREDICT:
                batch_size = array_ops.shape(probabilities)[0]
                output_classes = array_ops.tile(input=array_ops.expand_dims(
                    input=math_ops.range(self._n_classes), axis=0),
                                                multiples=[batch_size, 1])
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.PREDICT,
                    predictions=predictions,
                    export_outputs={
                        '':
                        export_output.ClassificationOutput(
                            scores=probabilities,
                            # `ClassificationOutput` requires string classes.
                            # TODO(xiejw): Support label_keys or label_column
                            classes=string_ops.as_string(output_classes,
                                                         name='str_classes'))
                    })

            # Eval.
            labels = _check_labels(labels, 1)
            # Check that we got integer for classification.
            if not labels.dtype.is_integer:
                raise ValueError('Labels dtype should be integer '
                                 'Instead got %s.' % labels.dtype)
            assert_less = check_ops.assert_less(
                labels,
                ops.convert_to_tensor(self._n_classes, dtype=labels.dtype),
                message='Label IDs must < n_classes')
            assert_greater = check_ops.assert_non_negative(
                labels, message='Label Ids must >= 0')
            with ops.control_dependencies((assert_less, assert_greater)):
                labels = array_ops.identity(labels)

            unweighted_loss = losses.sparse_softmax_cross_entropy(
                labels=labels, logits=logits, reduction=losses.Reduction.NONE)
            # Restore the squeezed dim, so unweighted_loss matches the weights shape.
            unweighted_loss = array_ops.expand_dims(unweighted_loss,
                                                    axis=(1, ))
            weights = (1. if (self._weight_feature_key is None) else
                       features[self._weight_feature_key])
            weights = math_ops.to_float(weights, name='weights')
            training_loss = losses.compute_weighted_loss(
                unweighted_loss,
                weights=weights,
                reduction=losses.Reduction.SUM)
            if mode == model_fn.ModeKeys.EVAL:
                return model_fn.EstimatorSpec(
                    mode=model_fn.ModeKeys.EVAL,
                    predictions=predictions,
                    loss=training_loss,
                    eval_metric_ops=self._eval_metric_ops(
                        labels=labels,
                        probabilities=probabilities,
                        logits=logits,
                        class_ids=class_ids,
                        unweighted_loss=unweighted_loss,
                        weights=weights))

            # Train.
            if train_op_fn is None:
                raise ValueError('train_op_fn can not be None.')
            logging_ops.scalar_summary(metric_keys.MetricKeys.LOSS,
                                       training_loss)
            logging_ops.scalar_summary(
                metric_keys.MetricKeys.LOSS_MEAN,
                losses.compute_weighted_loss(unweighted_loss,
                                             weights=weights,
                                             reduction=losses.Reduction.MEAN))
            return model_fn.EstimatorSpec(mode=model_fn.ModeKeys.TRAIN,
                                          predictions=predictions,
                                          loss=training_loss,
                                          train_op=train_op_fn(training_loss))