Beispiel #1
0
 def plot_cross_validation_and_upload_results(self) -> Path:
     from InnerEye.ML.visualizers.plot_cross_validation import crossval_config_from_model_config, \
         plot_cross_validation, unroll_aggregate_metrics
     # perform aggregation as cross val splits are now ready
     plot_crossval_config = crossval_config_from_model_config(
         self.model_config)
     plot_crossval_config.run_recovery_id = PARENT_RUN_CONTEXT.tags[
         RUN_RECOVERY_ID_KEY_NAME]
     plot_crossval_config.outputs_directory = self.model_config.outputs_folder
     plot_crossval_config.settings_yaml_file = self.yaml_config_file
     cross_val_results_root = plot_cross_validation(plot_crossval_config)
     if self.post_cross_validation_hook:
         self.post_cross_validation_hook(self.model_config,
                                         cross_val_results_root)
     # upload results to the parent run's outputs. Normally, we use blobxfer for that, but here we want
     # to ensure that the files are visible inside the AzureML UI.
     PARENT_RUN_CONTEXT.upload_folder(name=CROSSVAL_RESULTS_FOLDER,
                                      path=str(cross_val_results_root))
     if self.model_config.is_scalar_model:
         try:
             aggregates = pd.read_csv(cross_val_results_root /
                                      METRICS_AGGREGATES_FILE)
             unrolled_aggregate_metrics = unroll_aggregate_metrics(
                 aggregates)
             for m in unrolled_aggregate_metrics:
                 PARENT_RUN_CONTEXT.log(m.metric_name, m.metric_value)
         except Exception as ex:
             print_exception(
                 ex,
                 "Unable to log metrics to Hyperdrive parent run.",
                 logger_fn=logging.warning)
     return cross_val_results_root
Beispiel #2
0
 def log_to_azure(self, label: str, metric: float) -> None:
     """
     Logs a metric as a key/value pair to AzureML.
     """
     if not is_offline_run_context(RUN_CONTEXT):
         metric_name = self.logging_prefix + label
         RUN_CONTEXT.log(metric_name, metric)
         # When running in a cross validation setting, log all metrics to the hyperdrive parent run too,
         # so that we can easily overlay graphs across runs.
         if self.log_to_parent_run and PARENT_RUN_CONTEXT:
             if self.cross_validation_split_index > DEFAULT_CROSS_VALIDATION_SPLIT_INDEX:
                 PARENT_RUN_CONTEXT.log(f"{metric_name}_Split{self.cross_validation_split_index}",
                                        metric)