Beispiel #1
0
    def test_exception_raised_when_response_body_contains_error_entry(self):
        error_msg = 'Base64 decode failed.'
        builder = http.RequestMockBuilder({
            'ml.projects.predict':
            (None, self._make_response_body(error_msg, successful=False))
        })
        resource = discovery.build('ml',
                                   'v1',
                                   http=http.HttpMock(
                                       self._discovery_testdata_dir,
                                       {'status': http_client.OK}),
                                   requestBuilder=builder)
        with mock.patch('googleapiclient.discovery.' 'build') as response_mock:
            response_mock.side_effect = lambda service, version: resource
            inference_spec_type = model_spec_pb2.InferenceSpecType(
                ai_platform_prediction_model_spec=model_spec_pb2.
                AIPlatformPredictionModelSpec(
                    project_id='test-project',
                    model_name='test-model',
                ))

            try:
                self._set_up_pipeline(inference_spec_type)
                self._run_inference_with_beam()
            except ValueError as exc:
                actual_error_msg = str(exc)
                self.assertTrue(actual_error_msg.startswith(error_msg))
            else:
                self.fail('Test was expected to throw ValueError exception')
Beispiel #2
0
    def test_model_predict(self):
        predictions = [{'output_1': [0.901], 'output_2': [0.997]}]
        builder = http.RequestMockBuilder({
            'ml.projects.predict':
            (None, self._make_response_body(predictions, successful=True))
        })
        resource = discovery.build('ml',
                                   'v1',
                                   http=http.HttpMock(
                                       self._discovery_testdata_dir,
                                       {'status': http_client.OK}),
                                   requestBuilder=builder)
        with mock.patch('googleapiclient.discovery.' 'build') as response_mock:
            response_mock.side_effect = lambda service, version: resource
            inference_spec_type = model_spec_pb2.InferenceSpecType(
                ai_platform_prediction_model_spec=model_spec_pb2.
                AIPlatformPredictionModelSpec(
                    project_id='test-project',
                    model_name='test-model',
                ))

            prediction_log = prediction_log_pb2.PredictionLog()
            prediction_log.predict_log.response.outputs['output_1'].CopyFrom(
                tf.make_tensor_proto(values=[0.901],
                                     dtype=tf.double,
                                     shape=(1, 1)))
            prediction_log.predict_log.response.outputs['output_2'].CopyFrom(
                tf.make_tensor_proto(values=[0.997],
                                     dtype=tf.double,
                                     shape=(1, 1)))

            self._set_up_pipeline(inference_spec_type)
            assert_that(self.pcoll, equal_to([prediction_log]))
            self._run_inference_with_beam()
  def test_can_format_requests(self):
    predictions = [{
        'output_1': [0.901],
        'output_2': [0.997]
    }] * len(self._predict_examples)
    builder = http.RequestMockBuilder({
        'ml.projects.predict':
            (None, self._make_response_body(predictions, successful=True))
    })
    resource = discovery.build(
        'ml',
        'v1',
        http=http.HttpMock(self._discovery_testdata_dir,
                           {'status': http_client.OK}),
        requestBuilder=builder)
    with mock.patch('googleapiclient.discovery.' 'build') as response_mock:
      response_mock.side_effect = lambda service, version: resource
      inference_spec_type = model_spec_pb2.InferenceSpecType(
          ai_platform_prediction_model_spec=model_spec_pb2
          .AIPlatformPredictionModelSpec(
              project_id='test-project',
              model_name='test-model',
          ))

      example = text_format.Parse(
          """
        features {
          feature { key: "x_bytes" value { bytes_list { value: ["ASa8asdf"] }}}
          feature { key: "x" value { bytes_list { value: "JLK7ljk3" }}}
          feature { key: "y" value { int64_list { value: [1, 2] }}}
          feature { key: "z" value { float_list { value: [4.5, 5, 5.5] }}}
        }
        """, tf.train.Example())

      self.pipeline = self._make_beam_pipeline()
      self.pcoll = (
          self.pipeline
          | 'CreateExamples' >> beam.Create([example])
          | 'RunInference'
          >> run_inference.RunInferenceImpl(inference_spec_type))
      self._run_inference_with_beam()
  def test_model_predict(self, keyed_input: bool):
    predictions = [{
        'output_1': [0.901],
        'output_2': [0.997]
    }] * len(self._predict_examples)
    builder = http.RequestMockBuilder({
        'ml.projects.predict':
            (None, self._make_response_body(predictions, successful=True))
    })
    resource = discovery.build(
        'ml',
        'v1',
        http=http.HttpMock(self._discovery_testdata_dir,
                           {'status': http_client.OK}),
        requestBuilder=builder)
    with mock.patch('googleapiclient.discovery.' 'build') as response_mock:
      response_mock.side_effect = lambda service, version: resource
      inference_spec_type = model_spec_pb2.InferenceSpecType(
          ai_platform_prediction_model_spec=model_spec_pb2
          .AIPlatformPredictionModelSpec(
              project_id='test-project',
              model_name='test-model',
          ))
      expected = []
      for example in self._predict_examples:
        prediction_log = prediction_log_pb2.PredictionLog()
        predict_log = prediction_log.predict_log
        input_tensor_proto = predict_log.request.inputs['inputs']
        input_tensor_proto.dtype = tf.string.as_datatype_enum
        input_tensor_proto.tensor_shape.dim.add().size = 1
        input_tensor_proto.string_val.append(example.SerializeToString())
        predict_log.response.outputs['output_1'].CopyFrom(
            tf.make_tensor_proto(values=[0.901], dtype=tf.double, shape=(1, 1)))
        predict_log.response.outputs['output_2'].CopyFrom(
            tf.make_tensor_proto(values=[0.997], dtype=tf.double, shape=(1, 1)))
        expected.append(prediction_log)

      self._set_up_pipeline(inference_spec_type, keyed_input)
      assert_that(self.pcoll, equal_to(expected))
      self._run_inference_with_beam()