예제 #1
0
 def testGetInputSpecsWithSignatures(self, save_as_keras, signature_name):
     model = self.createModelWithSingleInput(save_as_keras)
     self.assertEqual(
         {
             'input':
             tf.TensorSpec(name='input', shape=(None, 1), dtype=tf.string),
         }, model_util.get_input_specs(model, signature_name))
 def testGetInputSpecsWithSignatures(self, save_as_keras, signature_name):
     export_path = self.createModelWithSingleInput(save_as_keras)
     if save_as_keras:
         model = tf.keras.models.load_model(export_path)
     else:
         model = tf.compat.v1.saved_model.load_v2(export_path)
     self.assertEqual(
         {
             'input':
             tf.TensorSpec(name='input', shape=(None, 1), dtype=tf.string),
         }, model_util.get_input_specs(model, signature_name))
예제 #3
0
 def testGetInputSpecsWithKerasModel(self):
     model = self.createModelWithMultipleInputs(True)
     # Some versions of TF set the TensorSpec.name and others do not. Since we
     # don't care about the name, clear it from the output for testing purposes
     specs = model_util.get_input_specs(model)
     for k, v in specs.items():
         if isinstance(v, tf.TensorSpec):
             specs[k] = tf.TensorSpec(shape=v.shape, dtype=v.dtype)
     self.assertEqual(
         {
             'input_1':
             tf.TensorSpec(shape=(None, 2), dtype=tf.int64),
             'input_2':
             tf.SparseTensorSpec(shape=(None, 1), dtype=tf.float32),
             'input_3':
             tf.RaggedTensorSpec(shape=(None, None), dtype=tf.float32),
         }, specs)
예제 #4
0
 def _update_state(
         self, accumulator: tf_metric_accumulators.TFMetricsAccumulator):
     serialized_examples = None
     labels = {}
     example_weights = {}
     for i, output_name in enumerate(self._output_names):
         e, l, w = accumulator.get_inputs(i)
         if i == 0:
             serialized_examples = e
         if not output_name and len(self._output_names) > 1:
             # The empty output_name for multi-output models is not used for inputs.
             continue
         labels[output_name] = np.array(l)
         weights = np.array(w)
         # TFv1 will not squeeze the weights, so must do manually
         if weights.shape[-1] == 1:
             weights = weights.squeeze(axis=-1)
         example_weights[output_name] = weights
     if len(self._output_names) == 1:
         # Single-output models don't use dicts.
         labels = next(iter(labels.values()))
         example_weights = next(iter(example_weights.values()))
     record_batch = self._decoder.DecodeBatch(serialized_examples)
     input_specs = model_util.get_input_specs(self._model,
                                              signature_name=None)
     inputs = model_util.get_inputs(record_batch, input_specs,
                                    self._tensor_adapter)
     if inputs is None:
         raise ValueError('unable to prepare inputs for evaluation: '
                          'input_specs={}, record_batch={}'.format(
                              input_specs, record_batch))
     self._model.evaluate(x=inputs,
                          y=labels,
                          batch_size=record_batch.num_rows,
                          verbose=0,
                          sample_weight=example_weights)
예제 #5
0
 def _batch_reducible_process(
         self, batched_extract: types.Extracts) -> List[types.Extracts]:
     result = copy.copy(batched_extract)
     record_batch = batched_extract[constants.ARROW_RECORD_BATCH_KEY]
     serialized_examples = batched_extract[constants.INPUT_KEY]
     predictions = [None] * record_batch.num_rows
     for spec in self._eval_config.model_specs:
         # To maintain consistency between settings where single models are used,
         # always use '' as the model name regardless of whether a name is passed.
         model_name = spec.name if len(
             self._eval_config.model_specs) > 1 else ''
         if model_name not in self._loaded_models:
             raise ValueError(
                 'loaded model for "{}" not found: eval_config={}'.format(
                     spec.name, self._eval_config))
         model = self._loaded_models[model_name]
         signature_name = spec.signature_name
         input_specs = model_util.get_input_specs(model,
                                                  signature_name) or {}
         # If tensor_adaptor and input_specs exist then filter the inputs by input
         # names (unlike estimators, keras does not accept unknown inputs).
         # However, avoid getting the tensors if we appear to be feeding serialized
         # examples to the model.
         if (self._tensor_adapter and input_specs
                 and not (len(input_specs) == 1 and next(
                     iter(input_specs.values())).dtype == tf.string
                          and model_util.find_input_name_in_features(
                              set(self._tensor_adapter.TypeSpecs().keys()),
                              next(iter(input_specs.keys()))) is None)):
             inputs = model_util.filter_tensors_by_input_names(
                 self._tensor_adapter.ToBatchTensors(record_batch),
                 list(input_specs.keys()))
         else:
             inputs = None
         if not inputs:
             # Assume serialized examples
             assert serialized_examples is not None, 'Raw examples not found.'
             inputs = serialized_examples
             # If a signature name was not provided, default to using the serving
             # signature since parsing normally will be done outside model.
             if not signature_name:
                 signature_name = model_util.get_default_signature_name(
                     model)
         signature = model_util.get_callable(model, signature_name)
         if signature is None:
             raise ValueError(
                 'PredictExtractor V2 requires a keras model or a serving model. '
                 'If using EvalSavedModel then you must use PredictExtractor V1.'
             )
         if isinstance(inputs, dict):
             if signature is model:
                 outputs = signature(inputs)
             else:
                 outputs = signature(**inputs)
         else:
             outputs = signature(tf.constant(inputs, dtype=tf.string))
         for i in range(record_batch.num_rows):
             if isinstance(outputs, dict):
                 output = {k: v[i].numpy() for k, v in outputs.items()}
                 # Keras and regression serving models return a dict of predictions
                 # even for single-outputs. Convert these to a single tensor for
                 # compatibility with the labels (and model.predict API).
                 if len(output) == 1:
                     output = list(output.values())[0]
             else:
                 output = np.asarray(outputs)[i]
             # If only one model, the predictions are stored without using a dict
             if len(self._eval_config.model_specs) == 1:
                 predictions[i] = output
             else:
                 if predictions[i] is None:
                     predictions[i] = {}
                 predictions[i][spec.name] = output  # pytype: disable=unsupported-operands
     result[constants.PREDICTIONS_KEY] = predictions
     return [result]