def test_create_score_calibration_file_fails_with_negative_scale(self):
        malformed_calibration_file = test_utils.create_calibration_file(
            self.get_temp_dir(), content="-1.0,0.2,0.1")

        with self.assertRaisesRegex(
                ValueError,
                "Expected scale to be a non-negative value, but got -1.0."):
            metadata_info.ScoreCalibrationMd(
                _metadata_fb.ScoreTransformationType.LOG, self._DEFAULT_VALUE,
                malformed_calibration_file)
 def test_create_for_inference_score_calibration_should_succeed(self):
   score_calibration_md = metadata_info.ScoreCalibrationMd(
       _metadata_fb.ScoreTransformationType.INVERSE_LOGISTIC,
       _SCORE_CALIBRATION_DEFAULT_SCORE,
       self._score_file,
   )
   writer = object_detector.MetadataWriter.create_for_inference(
       test_utils.load_file(_MODEL_COCO), [_NORM_MEAN], [_NORM_STD],
       [self._label_file], score_calibration_md)
   self._validate_metadata(writer, _JSON_FOR_SCORE_CALIBRATION)
   self._validate_populated_model(writer)
    def test_create_metadata_should_succeed(self):
        score_calibration_md = metadata_info.ScoreCalibrationMd(
            _metadata_fb.ScoreTransformationType.LOG, self._DEFAULT_VALUE,
            self._SCORE_CALIBRATION_FILE)
        score_calibration_metadata = score_calibration_md.create_metadata()

        metadata_json = _metadata.convert_to_json(
            _create_dummy_model_metadata_with_process_uint(
                score_calibration_metadata))
        expected_json = test_utils.load_file(self._EXPECTED_TENSOR_JSON, "r")
        self.assertEqual(metadata_json, expected_json)
Beispiel #4
0
  def test_create_for_inference_should_succeed_dynaamic_input_shape_model(self):
    writer = audio_classifier.MetadataWriter.create_for_inference(
        test_utils.load_file(_DYNAMIC_INPUT_SIZE_MODEL), _SAMPLE_RATE,
        _CHANNELS, [_LABEL_FILE],
        metadata_info.ScoreCalibrationMd(
            _metadata_fb.ScoreTransformationType.LOG,
            _DEFAULT_SCORE_CALIBRATION_VALUE, _SCORE_CALIBRATION_FILE))

    metadata_json = writer.get_metadata_json()
    expected_json = test_utils.load_file(_JSON_FOR_INFERENCE_DYNAMIC, "r")
    self.assertEqual(metadata_json, expected_json)
    def test_create_score_calibration_file_fails_with_less_colunms(self):
        malformed_calibration_file = test_utils.create_calibration_file(
            self.get_temp_dir(), content="1.0,0.2")

        with self.assertRaisesRegex(
                ValueError,
                "Expected empty lines or 3 or 4 parameters per line in score" +
                " calibration file, but got 2."):
            metadata_info.ScoreCalibrationMd(
                _metadata_fb.ScoreTransformationType.LOG, self._DEFAULT_VALUE,
                malformed_calibration_file)
  def test_create_from_metadata_info_succeeds_for_multihead(self):
    calibration_file1 = test_utils.create_calibration_file(
        self.get_temp_dir(), "score_cali_1.txt")
    calibration_file2 = test_utils.create_calibration_file(
        self.get_temp_dir(), "score_cali_2.txt")

    general_md = metadata_info.GeneralMd(name="AudioClassifier")
    input_md = metadata_info.InputAudioTensorMd(
        name="audio_clip", sample_rate=_SAMPLE_RATE, channels=_CHANNELS)
    # The output tensors in the model are: Identity, Identity_1
    # Create metadata in a different order to test if MetadataWriter can correct
    # it.
    output_head_md_1 = metadata_info.ClassificationTensorMd(
        name="head1",
        label_files=[
            metadata_info.LabelFileMd("labels_en_1.txt"),
            metadata_info.LabelFileMd("labels_cn_1.txt")
        ],
        score_calibration_md=metadata_info.ScoreCalibrationMd(
            _metadata_fb.ScoreTransformationType.LOG,
            _DEFAULT_SCORE_CALIBRATION_VALUE, calibration_file1),
        tensor_name="Identity_1")
    output_head_md_2 = metadata_info.ClassificationTensorMd(
        name="head2",
        label_files=[
            metadata_info.LabelFileMd("labels_en_2.txt"),
            metadata_info.LabelFileMd("labels_cn_2.txt")
        ],
        score_calibration_md=metadata_info.ScoreCalibrationMd(
            _metadata_fb.ScoreTransformationType.LOG,
            _DEFAULT_SCORE_CALIBRATION_VALUE, calibration_file2),
        tensor_name="Identity")

    writer = (
        audio_classifier.MetadataWriter.create_from_metadata_info_for_multihead(
            test_utils.load_file(_MULTIHEAD_MODEL), general_md, input_md,
            [output_head_md_1, output_head_md_2]))

    metadata_json = writer.get_metadata_json()
    expected_json = test_utils.load_file(_JSON_MULTIHEAD, "r")
    self.assertEqual(metadata_json, expected_json)
    def test_create_for_inference_should_succeed(self, model_file,
                                                 golden_json):
        writer = image_classifier.MetadataWriter.create_for_inference(
            test_utils.load_file(model_file), [_NORM_MEAN], [_NORM_STD],
            [_LABEL_FILE],
            metadata_info.ScoreCalibrationMd(
                _metadata_fb.ScoreTransformationType.LOG,
                _DEFAULT_SCORE_CALIBRATION_VALUE, _SCORE_CALIBRATION_FILE))

        metadata_json = writer.get_metadata_json()
        expected_json = test_utils.load_file(golden_json, "r")
        self.assertEqual(metadata_json, expected_json)
    def add_classification_output(
            self,
            labels: Labels,
            score_calibration: Optional[ScoreCalibration] = None,
            name=_OUTPUT_CLASSIFICATION_NAME,
            description=_OUTPUT_CLASSIFICATION_DESCRIPTION):
        """Marks model's next output tensor as a classification head.

    Example usage:
    writer.add_classification_output(
      Labels()
        .add(['cat', 'dog], 'en')
        .add(['chat', 'chien], 'fr')
        .add(['/m/011l78', '/m/031d23'], use_as_category_name=True))

    Args:
      labels: an instance of Labels helper class.
      score_calibration: an instance of ScoreCalibration helper class.
      name: Metadata name of the tensor. Note that this is different from tensor
        name in the flatbuffer.
      description: human readable description of what the tensor does.

    Returns:
      The current Writer instance to allow chained operation.
    """
        calibration_md = None
        if score_calibration:
            calibration_md = metadata_info.ScoreCalibrationMd(
                score_transformation_type=score_calibration.
                transformation_type,
                default_score=score_calibration.default_score,
                file_path=self._export_calibration_file(
                    'score_calibration.txt', score_calibration.parameters))

        idx = len(self._output_mds)

        label_files = []
        for item in labels._labels:  # pylint: disable=protected-access
            label_files.append(
                metadata_info.LabelFileMd(self._export_labels(
                    item.filename, item.names),
                                          locale=item.locale))

        output_md = metadata_info.ClassificationTensorMd(
            name=name,
            description=description,
            label_files=label_files,
            tensor_type=self._output_tensor_type(idx),
            score_calibration_md=calibration_md,
        )
        self._output_mds.append(output_md)
        return self
Beispiel #9
0
  def test_create_for_inference_fails_with_wrong_channels(self, wrong_channels):

    with self.assertRaises(ValueError) as error:
      audio_classifier.MetadataWriter.create_for_inference(
          test_utils.load_file(_DYNAMIC_INPUT_SIZE_MODEL), _SAMPLE_RATE,
          wrong_channels, [_LABEL_FILE],
          metadata_info.ScoreCalibrationMd(
              _metadata_fb.ScoreTransformationType.LOG,
              _DEFAULT_SCORE_CALIBRATION_VALUE, _SCORE_CALIBRATION_FILE))

    self.assertEqual(
        "channels should be positive, but got {}.".format(wrong_channels),
        str(error.exception))
  def test_create_for_inference_fails_with_wrong_sample_rate(
      self, wrong_sample_rate):

    with self.assertRaises(ValueError) as error:
      audio_classifier.MetadataWriter.create_for_inference(
          test_utils.load_file(_DYNAMIC_INPUT_SIZE_MODEL), wrong_sample_rate,
          _CHANNELS, [_LABEL_FILE],
          metadata_info.ScoreCalibrationMd(
              _metadata_fb.ScoreTransformationType.LOG,
              _DEFAULT_SCORE_CALIBRATION_VALUE,
              test_utils.create_calibration_file(self.get_temp_dir())))

    self.assertEqual(
        "sample_rate should be positive, but got {}.".format(wrong_sample_rate),
        str(error.exception))
  def test_create_for_inference_dummy_score_calibration_should_succeed(self):
    score_calibration_md = metadata_info.ScoreCalibrationMd(
        _metadata_fb.ScoreTransformationType.INVERSE_LOGISTIC,
        _DUMMY_SCORE_CALIBRATION_DEFAULT_SCORE,
        self._dummy_score_file,
    )
    writer = object_detector.MetadataWriter.create_for_inference(
        test_utils.load_file(_MODEL_COCO), [_NORM_MEAN], [_NORM_STD],
        [self._label_file], score_calibration_md)
    self._validate_metadata(writer, _JSON_FOR_DUMMY_SCORE_CALIBRATION)
    self._validate_populated_model(writer)

    # Test if populated model is equivalent to the expected model.
    metadata_dict = json.loads(writer.get_metadata_json())
    displayer = metadata.MetadataDisplayer.with_model_buffer(
        test_utils.load_file(_EXPECTED_DUMMY_MODEL))
    expected_metadata_dict = json.loads(displayer.get_metadata_json())
    self.assertDictContainsSubset(metadata_dict, expected_metadata_dict)
    def test_create_score_calibration_file_md_should_succeed(self):
        score_calibration_md = metadata_info.ScoreCalibrationMd(
            _metadata_fb.ScoreTransformationType.LOG, self._DEFAULT_VALUE,
            self._SCORE_CALIBRATION_FILE)
        score_calibration_file_md = (
            score_calibration_md.create_score_calibration_file_md())
        file_metadata = score_calibration_file_md.create_metadata()

        # Create the Flatbuffers object and convert it to the json format.
        model_metadata = _metadata_fb.ModelMetadataT()
        model_metadata.associatedFiles = [file_metadata]
        builder = flatbuffers.Builder(0)
        builder.Finish(model_metadata.Pack(builder),
                       _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
        metadata_json = _metadata.convert_to_json(bytes(builder.Output()))

        expected_json = test_utils.load_file(self._EXPECTED_MODEL_META_JSON,
                                             "r")
        self.assertEqual(metadata_json, expected_json)
    def test_create_metadata_should_succeed(self, tensor_type, golden_json):
        label_file_en = metadata_info.LabelFileMd(
            file_path=self._LABEL_FILE_EN, locale="en")
        label_file_cn = metadata_info.LabelFileMd(
            file_path=self._LABEL_FILE_CN, locale="cn")
        score_calibration_md = metadata_info.ScoreCalibrationMd(
            _metadata_fb.ScoreTransformationType.IDENTITY,
            self._CALIBRATION_DEFAULT_SCORE, self._SCORE_CALIBRATION_FILE)

        tesnor_md = metadata_info.ClassificationTensorMd(
            name=self._NAME,
            description=self._DESCRIPTION,
            label_files=[label_file_en, label_file_cn],
            tensor_type=tensor_type,
            score_calibration_md=score_calibration_md)
        tensor_metadata = tesnor_md.create_metadata()

        metadata_json = _metadata.convert_to_json(
            _create_dummy_model_metadata_with_tensor(tensor_metadata))
        expected_json = test_utils.load_file(golden_json, "r")
        self.assertEqual(metadata_json, expected_json)