def test_import_model_evaluation_with_explanation_overriding(self, mock_api):
    explanation_path_1 = self.create_tempfile().full_path
    with open(explanation_path_1, 'w') as f:
      f.write(EXPLANATION_1)

    explanation_path_2 = self.create_tempfile().full_path
    with open(explanation_path_2, 'w') as f:
      f.write(EXPLANATION_2)

    main([
        '--metrics', self.metrics_path, '--metrics_explanation',
        explanation_path_1, '--explanation', explanation_path_2,
        '--problem_type', 'classification', '--model_name', self._model_name,
        '--gcp_resources', self._gcp_resources
    ])
    mock_api.assert_called_with(
        mock.ANY,
        parent=self._model_name,
        model_evaluation={
            'metrics':
                to_value(
                    json.loads(METRICS)['slicedMetrics'][0]['metrics']
                    ['regression']),
            'metrics_schema_uri':
                SCHEMA_URI,
            'model_explanation': {
                'mean_attributions': [{
                    'feature_attributions':
                        to_value(
                            json.loads(EXPLANATION_2)['explanation']
                            ['attributions'][0]['featureAttributions'])
                }]
            },
        })
Пример #2
0
    def test_import(self, mock_api, mock_auth):
        mock_creds = mock.Mock(spec=google.auth.credentials.Credentials)
        mock_creds.token = 'token'
        mock_auth.return_value = [mock_creds, 'project']

        metrics_path = self.create_tempfile().full_path
        with open(metrics_path, 'w') as f:
            f.write(METRICS)

        main([
            '--metrics', metrics_path, '--metrics_schema_uri', SCHEMA_URI,
            '--model_name', MODEL_NAME
        ])
        mock_api.assert_called_with(
            mock.ANY,
            parent=MODEL_NAME,
            model_evaluation={
                'metrics':
                to_value(
                    json.loads(METRICS)['slicedMetrics'][0]['metrics']
                    ['regression']),
                'metrics_schema_uri':
                SCHEMA_URI,
            })

        explanation_path = self.create_tempfile().full_path
        with open(explanation_path, 'w') as f:
            f.write(EXPLANATION)

        main([
            '--metrics', metrics_path, '--metrics_schema_uri', SCHEMA_URI,
            '--model_name', MODEL_NAME, '--explanation', explanation_path
        ])
        mock_api.assert_called_with(
            mock.ANY,
            parent=MODEL_NAME,
            model_evaluation={
                'metrics':
                to_value(
                    json.loads(METRICS)['slicedMetrics'][0]['metrics']
                    ['regression']),
                'metrics_schema_uri':
                SCHEMA_URI,
                'model_explanation': {
                    'mean_attributions': [{
                        'feature_attributions':
                        to_value(
                            json.loads(EXPLANATION)['explanation']
                            ['attributions'][0]['featureAttributions'])
                    }]
                },
            })
 def test_import_model_evaluation(self, mock_api):
   main([
       '--metrics', self.metrics_path, '--problem_type', 'classification',
       '--model_name', self._model_name, '--gcp_resources', self._gcp_resources
   ])
   mock_api.assert_called_with(
       mock.ANY,
       parent=self._model_name,
       model_evaluation={
           'metrics':
               to_value(
                   json.loads(METRICS)['slicedMetrics'][0]['metrics']
                   ['regression']),
           'metrics_schema_uri':
               SCHEMA_URI,
       })
 def test_import_model_evaluation_empty_explanation_with_empty_explanation_override(
     self, mock_api):
   with self.assertRaises(SystemExit):
     main([
         '--metrics',
         self.metrics_path,
         '--problem_type',
         'classification',
         '--model_name',
         self._model_name,
         '--gcp_resources',
         self._gcp_resources,
         '--metrics_explanation',
         "{{$.inputs.artifacts['metrics'].metadata['explanation_gcs_path']}}",
         '--explanation',
         "{{$.inputs.artifacts['explanation'].metadata['explanation_gcs_path']}}",
     ])
  def test_import_model_evaluation_contains_explanation_with_empty_explanation_override(
      self, mock_api):
    explanation_path = self.create_tempfile().full_path
    with open(explanation_path, 'w') as f:
      f.write(EXPLANATION_1)

    with self.assertRaises(SystemExit):
      main([
          '--metrics',
          self.metrics_path,
          '--problem_type',
          'classification',
          '--model_name',
          self._model_name,
          '--gcp_resources',
          self._gcp_resources,
          '--metrics_explanation',
          explanation_path,
          '--explanation',
          "{{$.inputs.artifacts['explanation'].metadata['explanation_gcs_path']}}",
      ])
  def test_import_model_evaluation_gcp_resources(self, mock_api):
    import_model_evaluation_response = mock.Mock()
    mock_api.return_value = import_model_evaluation_response
    import_model_evaluation_response.name = self._model_name

    main([
        '--metrics', self.metrics_path, '--problem_type', 'classification',
        '--model_name', self._model_name, '--gcp_resources', self._gcp_resources
    ])

    with open(self._gcp_resources) as f:
      serialized_gcp_resources = f.read()

      # Instantiate GCPResources Proto
      model_evaluation_resources = json_format.Parse(serialized_gcp_resources,
                                                     GcpResources())

      self.assertLen(model_evaluation_resources.resources, 1)
      model_evaluation_name = model_evaluation_resources.resources[
          0].resource_uri[len(self._model_evaluation_uri_prefix):]
      self.assertEqual(model_evaluation_name, self._model_name)
  def test_import_model_evaluation_empty_explanation(self, mock_api):
    import_model_evaluation_response = mock.Mock()
    mock_api.return_value = import_model_evaluation_response
    import_model_evaluation_response.name = self._model_name

    main([
        '--metrics', self.metrics_path, '--problem_type', 'classification',
        '--model_name', self._model_name, '--gcp_resources',
        self._gcp_resources, '--metrics_explanation',
        "{{$.inputs.artifacts['metrics'].metadata['explanation_gcs_path']}}"
    ])

    mock_api.assert_called_with(
        mock.ANY,
        parent=self._model_name,
        model_evaluation={
            'metrics':
                to_value(
                    json.loads(METRICS)['slicedMetrics'][0]['metrics']
                    ['regression']),
            'metrics_schema_uri':
                SCHEMA_URI,
        })