def load_eval_result(output_path: Text, model_name: Optional[Text] = None) -> EvalResult: """Creates an EvalResult object for use with the visualization functions.""" metrics_proto_list = ( metrics_and_plots_serialization.load_and_deserialize_metrics( path=os.path.join(output_path, _METRICS_OUTPUT_FILE), model_name=model_name)) plots_proto_list = ( metrics_and_plots_serialization.load_and_deserialize_plots( path=os.path.join(output_path, _PLOTS_OUTPUT_FILE))) eval_config = load_eval_config(output_path) return EvalResult(slicing_metrics=metrics_proto_list, plots=plots_proto_list, config=eval_config)
def load_eval_result(output_path: Text, model_name: Optional[Text] = None) -> EvalResult: """Creates an EvalResult object for use with the visualization functions.""" eval_config = load_eval_config(output_path) output_spec = _get_output_data_spec(eval_config, model_name) metrics_proto_list = ( metrics_and_plots_serialization.load_and_deserialize_metrics( path=output_filename(output_spec, constants.METRICS_KEY), model_name=model_name)) plots_proto_list = ( metrics_and_plots_serialization.load_and_deserialize_plots( path=output_filename(output_spec, constants.PLOTS_KEY))) return EvalResult(slicing_metrics=metrics_proto_list, plots=plots_proto_list, config=eval_config)
def load_eval_result(output_path: Text) -> EvalResult: """Creates an EvalResult object for use with the visualization functions.""" metrics_proto_list = ( metrics_and_plots_serialization.load_and_deserialize_metrics( path=os.path.join(output_path, _METRICS_OUTPUT_FILE))) plots_proto_list = ( metrics_and_plots_serialization.load_and_deserialize_plots( path=os.path.join(output_path, _PLOTS_OUTPUT_FILE))) slicing_metrics = [(key, _convert_proto_map_to_dict(metrics_data)) for key, metrics_data in metrics_proto_list] plots = [(key, _convert_proto_map_to_dict(plot_data)) for key, plot_data in plots_proto_list] eval_config = load_eval_config(output_path) return EvalResult( slicing_metrics=slicing_metrics, plots=plots, config=eval_config)
def _get_evaluation_result_from_remote_path(self, request): evaluation_output_path = request.args.get('evaluation_output_path') try: evaluation_output_path = six.ensure_text(evaluation_output_path) except (UnicodeDecodeError, AttributeError): pass try: metrics = ( metrics_and_plots_serialization.load_and_deserialize_metrics( path=evaluation_output_path)) data = widget_view.convert_slicing_metrics_to_ui_input(metrics) except (KeyError, json_format.ParseError) as error: logging.info('Error while fetching evaluation data, %s', error) data = [] return http_util.Respond(request, data, content_type='application/json')
def load_eval_result(output_path: Text, model_name: Optional[Text] = None) -> EvalResult: """Creates an EvalResult object for use with the visualization functions.""" eval_config, data_location, file_format, model_locations = ( _load_eval_run(output_path)) metrics_proto_list = ( metrics_and_plots_serialization.load_and_deserialize_metrics( path=os.path.join(output_path, constants.METRICS_KEY), model_name=model_name)) plots_proto_list = ( metrics_and_plots_serialization.load_and_deserialize_plots( path=os.path.join(output_path, constants.PLOTS_KEY))) if model_name is None: model_location = list(model_locations.values())[0] else: model_location = model_locations[model_name] return EvalResult(slicing_metrics=metrics_proto_list, plots=plots_proto_list, config=eval_config, data_location=data_location, file_format=file_format, model_location=model_location)