def make_html_map(state: MLApplicationState, base_path: Path) -> dict:
     return {
         "css_style":
         Util.get_css_content(MLApplicationHTMLBuilder.CSS_PATH),
         "hp_setting":
         state.hp_setting.get_key(),
         'immuneML_version':
         MLUtil.get_immuneML_version(),
         "label":
         state.label_config.get_labels_by_name()[0],
         "dataset_name":
         state.dataset.name,
         "dataset_type":
         StringHelper.camel_case_to_word_string(
             type(state.dataset).__name__),
         "example_count":
         state.dataset.get_example_count(),
         "dataset_size":
         f"{state.dataset.get_example_count()} {type(state.dataset).__name__.replace('Dataset', 's').lower()}",
         "labels": [{
             "name":
             label_name,
             "values":
             str(state.label_config.get_label_values(label_name))[1:-1]
         } for label_name in state.label_config.get_labels_by_name()],
         "predictions":
         Util.get_table_string_from_csv(state.predictions_path),
         "predictions_download_link":
         os.path.relpath(state.predictions_path, base_path)
     }
Esempio n. 2
0
    def _make_assessment_pages(state: TrainMLModelState, base_path: Path, label: str):
        assessment_list = []

        for i, assessment_state in enumerate(state.assessment_states):

            assessment_item = {"css_style": Util.get_css_content(HPHTMLBuilder.CSS_PATH),
                               "optimization_metric": state.optimization_metric.name.lower(),
                               "split_index": assessment_state.split_index + 1,
                               "hp_settings": [],
                               "has_reports": len(state.assessment.reports.model_reports) + len(state.assessment.reports.encoding_reports) > 0,
                               "train_data_reports": Util.to_dict_recursive(assessment_state.train_val_data_reports, base_path),
                               "test_data_reports": Util.to_dict_recursive(assessment_state.test_data_reports, base_path),
                               "show_data_reports": len(assessment_state.train_val_data_reports) > 0 or len(assessment_state.test_data_reports) > 0}

            if hasattr(assessment_state.train_val_dataset, "metadata_file") and assessment_state.train_val_dataset.metadata_file is not None:
                assessment_item["train_metadata_path"] = os.path.relpath(str(assessment_state.train_val_dataset.metadata_file), str(base_path))
                assessment_item["train_metadata"] = Util.get_table_string_from_csv(assessment_state.train_val_dataset.metadata_file)
            else:
                assessment_item["train_metadata_path"] = None

            if hasattr(assessment_state.test_dataset, "metadata_file") and assessment_state.test_dataset.metadata_file is not None:
                assessment_item['test_metadata_path'] = os.path.relpath(assessment_state.test_dataset.metadata_file, base_path)
                assessment_item["test_metadata"] = Util.get_table_string_from_csv(assessment_state.test_dataset.metadata_file)
            else:
                assessment_item["test_metadata_path"] = None

            assessment_item["label"] = label
            for hp_setting, item in assessment_state.label_states[label].assessment_items.items():
                optimal = str(assessment_state.label_states[label].optimal_hp_setting.get_key())
                reports_path = HPHTMLBuilder._make_assessment_reports(state, i, hp_setting, assessment_state, label, base_path)
                assessment_item["hp_settings"].append({
                    "optimal": str(hp_setting) == optimal,
                    "hp_setting": str(hp_setting),
                    "optimization_metric_val": HPHTMLBuilder._print_metric(item.performance, state.optimization_metric),
                    "reports_path": reports_path
                })
            assessment_item["show_non_optimal"] = len(assessment_item["hp_settings"]) > 1

            assessment_item["selection_path"] = HPHTMLBuilder._make_selection_split_path(i, label, state.name)
            assessment_item['performances_per_metric'] = HPHTMLBuilder._extract_assessment_performances_per_metric(state, assessment_state, label)

            assessment_list.append(assessment_item)

        return assessment_list