Example #1
0
 def infer_impl(self):
     """Performs inference on examples that require inference."""
     indices_to_infer = sorted(self.updated_example_indices)
     examples_to_infer = [
         self.json_to_proto(self.examples[index])
         for index in indices_to_infer
     ]
     infer_objs = []
     extra_output_objs = []
     serving_bundle = inference_utils.ServingBundle(
         self.config.get('inference_address'),
         self.config.get('model_name'), self.config.get('model_type'),
         self.config.get('model_version'),
         self.config.get('model_signature'),
         self.config.get('uses_predict_api'),
         self.config.get('predict_input_tensor'),
         self.config.get('predict_output_tensor'),
         self.estimator_and_spec.get('estimator'),
         self.estimator_and_spec.get('feature_spec'),
         self.custom_predict_fn)
     (predictions,
      extra_output) = (inference_utils.run_inference_for_inference_results(
          examples_to_infer, serving_bundle))
     infer_objs.append(predictions)
     extra_output_objs.append(extra_output)
     if ('inference_address_2' in self.config
             or self.compare_estimator_and_spec.get('estimator')
             or self.compare_custom_predict_fn):
         serving_bundle = inference_utils.ServingBundle(
             self.config.get('inference_address_2'),
             self.config.get('model_name_2'), self.config.get('model_type'),
             self.config.get('model_version_2'),
             self.config.get('model_signature_2'),
             self.config.get('uses_predict_api'),
             self.config.get('predict_input_tensor'),
             self.config.get('predict_output_tensor'),
             self.compare_estimator_and_spec.get('estimator'),
             self.compare_estimator_and_spec.get('feature_spec'),
             self.compare_custom_predict_fn)
         (predictions, extra_output) = (
             inference_utils.run_inference_for_inference_results(
                 examples_to_infer, serving_bundle))
         infer_objs.append(predictions)
         extra_output_objs.append(extra_output)
     self.updated_example_indices = set()
     return {
         'inferences': {
             'indices': indices_to_infer,
             'results': infer_objs
         },
         'label_vocab': self.config.get('label_vocab'),
         'extra_outputs': extra_output_objs
     }
Example #2
0
    def _infer_mutants_impl(self, feature_name, example_index,
                            inference_addresses, model_names, model_type,
                            model_versions, model_signatures, use_predict,
                            predict_input_tensor, predict_output_tensor, x_min,
                            x_max, feature_index_pattern, custom_predict_fn):
        """Helper for generating PD plots for a feature."""
        examples = (self.examples
                    if example_index == -1 else [self.examples[example_index]])
        serving_bundles = []
        for model_num in xrange(len(inference_addresses)):
            serving_bundles.append(
                inference_utils.ServingBundle(
                    inference_addresses[model_num],
                    model_names[model_num],
                    model_type,
                    model_versions[model_num],
                    model_signatures[model_num],
                    use_predict,
                    predict_input_tensor,
                    predict_output_tensor,
                    custom_predict_fn=custom_predict_fn))

        viz_params = inference_utils.VizParams(
            x_min, x_max, self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,
            feature_index_pattern)
        return inference_utils.mutant_charts_for_feature(
            examples, feature_name, serving_bundles, viz_params)
Example #3
0
    def _infer(self, request):
        """Returns JSON for the `vz-line-chart`s for a feature.

    Args:
      request: A request that should contain 'inference_address', 'model_name',
        'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.

    Returns:
      A list of JSON objects, one for each chart.
    """
        label_vocab = inference_utils.get_label_vocab(
            request.args.get('label_vocab_path'))

        try:
            if request.method != 'GET':
                logger.error('%s requests are forbidden.', request.method)
                return http_util.Respond(request,
                                         {'error': 'invalid non-GET request'},
                                         'application/json',
                                         code=405)

            (inference_addresses, model_names, model_versions,
             model_signatures) = self._parse_request_arguments(request)

            indices_to_infer = sorted(self.updated_example_indices)
            examples_to_infer = [
                self.examples[index] for index in indices_to_infer
            ]
            infer_objs = []
            for model_num in xrange(len(inference_addresses)):
                serving_bundle = inference_utils.ServingBundle(
                    inference_addresses[model_num],
                    model_names[model_num],
                    request.args.get('model_type'),
                    model_versions[model_num],
                    model_signatures[model_num],
                    request.args.get('use_predict') == 'true',
                    request.args.get('predict_input_tensor'),
                    request.args.get('predict_output_tensor'),
                    custom_predict_fn=self.custom_predict_fn)
                (predictions,
                 _) = inference_utils.run_inference_for_inference_results(
                     examples_to_infer, serving_bundle)
                infer_objs.append(predictions)

            resp = {'indices': indices_to_infer, 'results': infer_objs}
            self.updated_example_indices = set()
            return http_util.Respond(request, {
                'inferences': json.dumps(resp),
                'vocab': json.dumps(label_vocab)
            }, 'application/json')
        except common_utils.InvalidUserInputError as e:
            return http_util.Respond(request, {'error': e.message},
                                     'application/json',
                                     code=400)
        except AbortionError as e:
            return http_util.Respond(request, {'error': e.details},
                                     'application/json',
                                     code=400)
Example #4
0
 def infer_mutants_impl(self, info):
     """Performs mutant inference on specified examples."""
     example_index = int(info['example_index'])
     feature_name = info['feature_name']
     examples = (self.examples
                 if example_index == -1 else [self.examples[example_index]])
     examples = [self.json_to_proto(ex) for ex in examples]
     scan_examples = [self.json_to_proto(ex) for ex in self.examples[0:50]]
     serving_bundles = []
     serving_bundles.append(
         inference_utils.ServingBundle(
             self.config.get('inference_address'),
             self.config.get('model_name'), self.config.get('model_type'),
             self.config.get('model_version'),
             self.config.get('model_signature'),
             self.config.get('uses_predict_api'),
             self.config.get('predict_input_tensor'),
             self.config.get('predict_output_tensor'),
             self.estimator_and_spec.get('estimator'),
             self.estimator_and_spec.get('feature_spec'),
             self.custom_predict_fn))
     if ('inference_address_2' in self.config
             or self.compare_estimator_and_spec.get('estimator')
             or self.compare_custom_predict_fn):
         serving_bundles.append(
             inference_utils.ServingBundle(
                 self.config.get('inference_address_2'),
                 self.config.get('model_name_2'),
                 self.config.get('model_type'),
                 self.config.get('model_version_2'),
                 self.config.get('model_signature_2'),
                 self.config.get('uses_predict_api'),
                 self.config.get('predict_input_tensor'),
                 self.config.get('predict_output_tensor'),
                 self.compare_estimator_and_spec.get('estimator'),
                 self.compare_estimator_and_spec.get('feature_spec'),
                 self.compare_custom_predict_fn))
     viz_params = inference_utils.VizParams(info['x_min'], info['x_max'],
                                            scan_examples, 10,
                                            info['feature_index_pattern'])
     self.running_mutant_infer = True
     charts = inference_utils.mutant_charts_for_feature(
         examples, feature_name, serving_bundles, viz_params)
     self.running_mutant_infer = False
     return charts
    def test_convert_predict_response_regression(self):
        """Test converting a PredictResponse to a RegressionResponse."""
        predict = predict_pb2.PredictResponse()
        output = predict.outputs['scores']
        dim = output.tensor_shape.dim.add()
        dim.size = 2
        output.float_val.extend([0.1, 0.2])

        bundle = inference_utils.ServingBundle('', '', 'regression', '', '',
                                               True, '', 'scores')
        converted = common_utils.convert_predict_response(predict, bundle)

        self.assertAlmostEqual(0.1, converted.result.regressions[0].value)
        self.assertAlmostEqual(0.2, converted.result.regressions[1].value)
    def test_convert_predict_response_classification(self):
        """Test converting a PredictResponse to a ClassificationResponse."""
        predict = predict_pb2.PredictResponse()
        output = predict.outputs['probabilities']
        dim = output.tensor_shape.dim.add()
        dim.size = 3
        dim = output.tensor_shape.dim.add()
        dim.size = 2
        output.float_val.extend([1., 0., .9, .1, .8, .2])

        bundle = inference_utils.ServingBundle('', '', 'classification', '',
                                               '', True, '', 'probabilities')
        converted = common_utils.convert_predict_response(predict, bundle)

        self.assertEqual("0",
                         converted.result.classifications[0].classes[0].label)
        self.assertAlmostEqual(
            1, converted.result.classifications[0].classes[0].score)
        self.assertEqual("1",
                         converted.result.classifications[0].classes[1].label)
        self.assertAlmostEqual(
            0, converted.result.classifications[0].classes[1].score)

        self.assertEqual("0",
                         converted.result.classifications[1].classes[0].label)
        self.assertAlmostEqual(
            .9, converted.result.classifications[1].classes[0].score)
        self.assertEqual("1",
                         converted.result.classifications[1].classes[1].label)
        self.assertAlmostEqual(
            .1, converted.result.classifications[1].classes[1].score)

        self.assertEqual("0",
                         converted.result.classifications[2].classes[0].label)
        self.assertAlmostEqual(
            .8, converted.result.classifications[2].classes[0].score)
        self.assertEqual("1",
                         converted.result.classifications[2].classes[1].label)
        self.assertAlmostEqual(
            .2, converted.result.classifications[2].classes[1].score)
    def test_mutant_charts_for_feature(
            self, mock_call_servo, mock_make_json_formatted_for_single_chart):
        example = self.make_and_write_fake_example()
        serving_bundles = [
            inference_utils.ServingBundle('', '', 'classification', '', '',
                                          False, '', '')
        ]
        num_mutants = 10
        viz_params = inference_utils.VizParams(x_min=1,
                                               x_max=10,
                                               examples=[example],
                                               num_mutants=num_mutants,
                                               feature_index_pattern=None)

        mock_call_servo = lambda _, __: None
        mock_make_json_formatted_for_single_chart = lambda _, __: {}
        charts = inference_utils.mutant_charts_for_feature([example],
                                                           'repeated_float',
                                                           serving_bundles,
                                                           viz_params)
        self.assertEqual('numeric', charts['chartType'])
        self.assertEqual(4, len(charts['data']))
        charts = inference_utils.mutant_charts_for_feature([example],
                                                           'repeated_int',
                                                           serving_bundles,
                                                           viz_params)
        self.assertEqual('numeric', charts['chartType'])
        self.assertEqual(2, len(charts['data']))
        charts = inference_utils.mutant_charts_for_feature([example],
                                                           'single_int',
                                                           serving_bundles,
                                                           viz_params)
        self.assertEqual('numeric', charts['chartType'])
        self.assertEqual(1, len(charts['data']))
        charts = inference_utils.mutant_charts_for_feature([example],
                                                           'non_numeric',
                                                           serving_bundles,
                                                           viz_params)
        self.assertEqual('categorical', charts['chartType'])
        self.assertEqual(3, len(charts['data']))
    def test_mutant_charts_for_feature_with_feature_index_pattern(
            self, mock_call_servo, mock_make_json_formatted_for_single_chart):
        example = self.make_and_write_fake_example()
        serving_bundles = [
            inference_utils.ServingBundle('', '', 'classification', '', '',
                                          False, '', '')
        ]
        num_mutants = 10
        viz_params = inference_utils.VizParams(x_min=1,
                                               x_max=10,
                                               examples=[example],
                                               num_mutants=num_mutants,
                                               feature_index_pattern='0 , 2-3')

        mock_call_servo = lambda _, __: None
        mock_make_json_formatted_for_single_chart = lambda _, __: {}
        charts = inference_utils.mutant_charts_for_feature([example],
                                                           'repeated_float',
                                                           serving_bundles,
                                                           viz_params)
        self.assertEqual('numeric', charts['chartType'])
        self.assertEqual(3, len(charts['data']))

        # These should return 3 charts even though all fields from the index
        # pattern don't exist for the example.
        charts = inference_utils.mutant_charts_for_feature([example],
                                                           'repeated_int',
                                                           serving_bundles,
                                                           viz_params)
        self.assertEqual('numeric', charts['chartType'])
        self.assertEqual(3, len(charts['data']))

        charts = inference_utils.mutant_charts_for_feature([example],
                                                           'single_int',
                                                           serving_bundles,
                                                           viz_params)
        self.assertEqual('numeric', charts['chartType'])
        self.assertEqual(3, len(charts['data']))
Example #9
0
    def _infer(self, request):
        """Returns JSON for the `vz-line-chart`s for a feature.

    Args:
      request: A request that should contain 'inference_address', 'model_name',
        'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.

    Returns:
      A list of JSON objects, one for each chart.
    """
        start_example = (int(request.args.get('start_example'))
                         if request.args.get('start_example') else 0)
        if not start_example:
            label_vocab = inference_utils.get_label_vocab(
                request.args.get('label_vocab_path'))
            try:
                if request.method != 'GET':
                    logger.error('%s requests are forbidden.', request.method)
                    return http_util.Respond(request,
                                             'invalid non-GET request',
                                             'application/json',
                                             code=405)

                (inference_addresses, model_names, model_versions,
                 model_signatures) = self._parse_request_arguments(request)

                self.indices_to_infer = sorted(self.updated_example_indices)
                examples_to_infer = [
                    self.examples[index] for index in self.indices_to_infer
                ]
                self.infer_objs = []
                self.extra_outputs = []
                for model_num in xrange(len(inference_addresses)):
                    serving_bundle = inference_utils.ServingBundle(
                        inference_addresses[model_num],
                        model_names[model_num],
                        request.args.get('model_type'),
                        model_versions[model_num],
                        model_signatures[model_num],
                        request.args.get('use_predict') == 'true',
                        request.args.get('predict_input_tensor'),
                        request.args.get('predict_output_tensor'),
                        custom_predict_fn=self.custom_predict_fn)
                    (predictions, extra_output
                     ) = inference_utils.run_inference_for_inference_results(
                         examples_to_infer, serving_bundle)
                    self.infer_objs.append(predictions)
                    self.extra_outputs.append(extra_output)
                self.updated_example_indices = set()
            except AbortionError as e:
                logging.error(str(e))
                return http_util.Respond(request,
                                         e.details,
                                         'application/json',
                                         code=400)
            except Exception as e:
                logging.error(str(e))
                return http_util.Respond(request,
                                         str(e),
                                         'application/json',
                                         code=400)

        # Split results from start_example to + max_examples
        # Send next start_example if necessary
        end_example = start_example + MAX_EXAMPLES_TO_SEND

        def get_inferences_resp():
            sliced_infer_objs = [
                copy.deepcopy(infer_obj) for infer_obj in self.infer_objs
            ]
            if request.args.get('model_type') == 'classification':
                for obj in sliced_infer_objs:
                    obj['classificationResult']['classifications'][:] = obj[
                        'classificationResult']['classifications'][
                            start_example:end_example]
            else:
                for obj in sliced_infer_objs:
                    obj['regressionResult']['regressions'][:] = obj[
                        'regressionResult']['regressions'][
                            start_example:end_example]
            return {
                'indices': self.indices_to_infer[start_example:end_example],
                'results': sliced_infer_objs
            }

        def get_extra_outputs_resp():
            sliced_extra_objs = [
                copy.deepcopy(infer_obj) for infer_obj in self.extra_outputs
            ]
            for obj in sliced_extra_objs:
                if obj is not None:
                    for key in obj:
                        obj[key][:] = obj[key][start_example:end_example]
            return sliced_extra_objs

        try:
            inferences_resp = get_inferences_resp()
            extra_outputs_resp = get_extra_outputs_resp()
            resp = {
                'inferences': json.dumps(inferences_resp),
                'extraOutputs': json.dumps(extra_outputs_resp)
            }
            if end_example >= len(self.examples):
                end_example = -1
            if start_example == 0:
                resp['vocab'] = json.dumps(label_vocab)
            resp['next'] = end_example
            return http_util.Respond(request, resp, 'application/json')
        except Exception as e:
            logging.error(e)
            return http_util.Respond(request,
                                     str(e),
                                     'application/json',
                                     code=400)