Пример #1
0
  def _get_eval_ops(self, features, targets, metrics):
    features, _, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)
    _assert_float32(features)
    _assert_float32(labels)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner, training=False,
        **self.construction_args)

    probabilities = graph_builder.inference_graph(features, data_spec=spec)

    # One-hot the labels.
    if not self.params.regression:
      labels = math_ops.to_int64(array_ops.one_hot(math_ops.to_int64(
          array_ops.squeeze(labels)), self.params.num_classes, 1, 0))

    if metrics is None:
      metrics = {self.accuracy_metric:
                 eval_metrics.get_metric(self.accuracy_metric)}

    result = {}
    for name, metric in six.iteritems(metrics):
      result[name] = metric(probabilities, labels)

    return result
Пример #2
0
  def _get_train_ops(self, features, targets):
    """Method that builds model graph and returns trainer ops.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.

    Returns:
      Tuple of train `Operation` and loss `Tensor`.
    """
    features, _, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)
    _assert_float32(features)
    _assert_float32(labels)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner,
        **self.construction_args)

    epoch = None
    if self.data_feeder:
      epoch = self.data_feeder.make_epoch_variable()

    train = control_flow_ops.group(
        graph_builder.training_graph(
            features, labels, data_spec=spec, epoch=epoch,
            **self.training_args),
        state_ops.assign_add(contrib_framework.get_global_step(), 1))

    self.training_loss = graph_builder.training_loss(features, targets)

    return train, self.training_loss
Пример #3
0
 def _get_predict_ops(self, features):
     graph_builder = self.graph_builder_class(
         self.params,
         device_assigner=self.device_assigner,
         training=False,
         **self.construction_args)
     features, spec = data_ops.ParseDataTensorOrDict(features)
     return graph_builder.inference_graph(features, data_spec=spec)
Пример #4
0
 def _get_predict_ops(self, features):
   graph_builder = self.graph_builder_class(
       self.params, device_assigner=self.device_assigner, training=False,
       **self.construction_args)
   features, keys, spec = data_ops.ParseDataTensorOrDict(features)
   _assert_float32(features)
   output_dict = {
       'probabilities': graph_builder.inference_graph(features,
                                                      data_spec=spec)}
   if keys is not None:
     output_dict['keys'] = keys
   return output_dict
Пример #5
0
    def _model_fn(features, labels):
        """Function that returns predictions, training loss, and training op."""
        weights = None
        keys = None
        if weights_name and weights_name in features:
            weights = features.pop(weights_name)
        if keys_name and keys_name in features:
            keys = features.pop(keys_name)
        processed_features, spec = data_ops.ParseDataTensorOrDict(features)
        _assert_float32(processed_features)
        if labels is not None:
            labels = data_ops.ParseLabelTensorOrDict(labels)
            _assert_float32(labels)

        graph_builder = graph_builder_class(params,
                                            device_assigner=device_assigner)
        inference = {
            eval_metrics.INFERENCE_PROB_NAME:
            graph_builder.inference_graph(processed_features, data_spec=spec)
        }
        if not params.regression:
            inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
                inference[eval_metrics.INFERENCE_PROB_NAME], 1)
        if keys:
            inference[KEYS_NAME] = keys

        # labels might be None if we're doing prediction (which brings up the
        # question of why we force everything to adhere to a single model_fn).
        training_loss = None
        training_graph = None
        if labels is not None:
            training_loss = graph_builder.training_loss(processed_features,
                                                        labels,
                                                        data_spec=spec,
                                                        name=LOSS_NAME)
            training_graph = control_flow_ops.group(
                graph_builder.training_graph(processed_features,
                                             labels,
                                             data_spec=spec,
                                             input_weights=weights),
                state_ops.assign_add(contrib_framework.get_global_step(), 1))
        # Put weights back in
        if weights is not None:
            features[weights_name] = weights
        return (inference, training_loss, training_graph)