def testConvertEvalResultToUIInputWithSlicingColumn(self): eval_result = self._makeEvalResult() result = widget_view.convert_slicing_metrics_to_ui_input( eval_result.slicing_metrics, slicing_column='slice') self.assertEqual(result, [{ 'slice': 'Overall', 'sliceValue': 'Overall', 'metrics': { 'metrics2': { 'double_value': { 'value': 0.5 } }, 'metrics1': { 'double_value': { 'value': 0.5 } } } }, { 'slice': u'slice:1', 'sliceValue': u'1', 'metrics': { 'metrics2': { 'double_value': { 'value': 0.5 } }, 'metrics1': { 'double_value': { 'value': 0.5 } } } }])
def _get_evaluation_result_from_remote_path(self, request): evaluation_output_path = request.args.get('evaluation_output_path') try: evaluation_output_path = six.ensure_text(evaluation_output_path) except (UnicodeDecodeError, AttributeError): pass try: metrics = ( metrics_and_plots_serialization.load_and_deserialize_metrics( path=evaluation_output_path)) data = widget_view.convert_slicing_metrics_to_ui_input(metrics) except (KeyError, json_format.ParseError) as error: logging.info('Error while fetching evaluation data, %s', error) data = [] return http_util.Respond(request, data, content_type='application/json')
def _get_evaluation_result_from_remote_path(self, request): evaluation_output_path = request.args.get('evaluation_output_path') try: evaluation_output_path = six.ensure_text(evaluation_output_path) except (UnicodeDecodeError, AttributeError): pass try: eval_result = tfma.load_eval_result( os.path.dirname(evaluation_output_path), output_file_format=self._get_output_file_format( evaluation_output_path)) data = widget_view.convert_slicing_metrics_to_ui_input( eval_result.slicing_metrics) except (KeyError, json_format.ParseError) as error: logging.info('Error while fetching evaluation data, %s', error) data = [] return http_util.Respond(request, data, content_type='application/json')
def testConvertEvalResultToUIInputForCrossSliceKeyType(self): eval_result = self._makeEvalResult(slices=(((), (('slice_1', 1), ('slice_2', 2))), ((('slice_1', 5), ('slice_2', 6)), (('slice_1', 3), ('slice_2', 4))))) result = widget_view.convert_slicing_metrics_to_ui_input( eval_result.slicing_metrics) self.assertEqual(result, [{ 'slice': 'Overall__XX__slice_1_X_slice_2:1_X_2', 'sliceValue': 'Overall__XX__1_X_2', 'metrics': { 'metrics2': { 'double_value': { 'value': 0.5 } }, 'metrics1': { 'double_value': { 'value': 0.5 } } } }, { 'slice': 'slice_1_X_slice_2:5_X_6__XX__slice_1_X_slice_2:3_X_4', 'sliceValue': '5_X_6__XX__3_X_4', 'metrics': { 'metrics2': { 'double_value': { 'value': 0.5 } }, 'metrics1': { 'double_value': { 'value': 0.5 } } } }])
def _get_evaluation_result(self, request): run = request.args.get('run') try: run = six.ensure_text(run) except (UnicodeDecodeError, AttributeError): pass data = [] try: eval_result_output_dir = six.ensure_text( self._multiplexer.Tensors(run, FairnessIndicatorsPlugin.plugin_name) [0].tensor_proto.string_val[0]) eval_result = tfma.load_eval_result( output_path=eval_result_output_dir) # TODO(b/141283811): Allow users to choose different model output names # and class keys in case of multi-output and multi-class model. data = widget_view.convert_slicing_metrics_to_ui_input( eval_result.slicing_metrics) except (KeyError, json_format.ParseError) as error: logging.info('Error while fetching evaluation data, %s', error) return http_util.Respond(request, data, content_type='application/json')