def model_train(config: ModelConfigBase, checkpoint_handler: CheckpointHandler) -> ModelTrainingResults: """ The main training loop. It creates the model, dataset, optimizer_type, and criterion, then proceeds to train the model. If a checkpoint was specified, then it loads the checkpoint before resuming training. :param config: The arguments which specify all required information. :param checkpoint_handler: Checkpoint handler object to find checkpoint paths for model initialization :raises TypeError: If the arguments are of the wrong type. :raises ValueError: When there are issues loading a previous checkpoint. """ # Save the dataset files for later use in cross validation analysis config.write_dataset_files() # set the random seed for all libraries ml_util.set_random_seed(config.get_effective_random_seed(), "Patch visualization") # Visualize how patches are sampled for segmentation models. This changes the random generator, but we don't # want training to depend on how many patients we visualized, and hence set the random seed again right after. with logging_section( "Visualizing the effect of sampling random crops for training"): visualize_random_crops_for_dataset(config) ml_util.set_random_seed(config.get_effective_random_seed(), "Model training") logging.debug("Creating the PyTorch model.") # Create the train loader and validation loader to load images from the dataset data_loaders = config.create_data_loaders() # Get the path to the checkpoint to recover from checkpoint_path = checkpoint_handler.get_recovery_path_train() models_and_optimizer = ModelAndInfo( config=config, model_execution_mode=ModelExecutionMode.TRAIN, checkpoint_path=checkpoint_path) # Create the main model # If continuing from a previous run at a specific epoch, then load the previous model. model_loaded = models_and_optimizer.try_create_model_and_load_from_checkpoint( ) if not model_loaded: raise ValueError( "There was no checkpoint file available for the model for given start_epoch {}" .format(config.start_epoch)) # Print out a detailed breakdown of layers, memory consumption and time. generate_and_print_model_summary(config, models_and_optimizer.model) # Move model to GPU and adjust for multiple GPUs models_and_optimizer.adjust_model_for_gpus() # Create the mean teacher model and move to GPU if config.compute_mean_teacher_model: mean_teacher_model_loaded = models_and_optimizer.try_create_mean_teacher_model_load_from_checkpoint_and_adjust( ) if not mean_teacher_model_loaded: raise ValueError( "There was no checkpoint file available for the mean teacher model " f"for given start_epoch {config.start_epoch}") # Create optimizer models_and_optimizer.create_optimizer() if checkpoint_handler.should_load_optimizer_checkpoint(): optimizer_loaded = models_and_optimizer.try_load_checkpoint_for_optimizer( ) if not optimizer_loaded: raise ValueError( f"There was no checkpoint file available for the optimizer for given start_epoch " f"{config.start_epoch}") # Create checkpoint directory for this run if it doesn't already exist logging.info(f"Models are saved at {config.checkpoint_folder}") if not config.checkpoint_folder.is_dir(): config.checkpoint_folder.mkdir() # Create the SummaryWriters for Tensorboard writers = create_summary_writers(config) config.create_dataframe_loggers() # Create LR scheduler l_rate_scheduler = SchedulerWithWarmUp(config, models_and_optimizer.optimizer) # Training loop logging.info("Starting training") train_results_per_epoch, val_results_per_epoch, learning_rates_per_epoch = [], [], [] resource_monitor = None if config.monitoring_interval_seconds > 0: # initialize and start GPU monitoring diagnostics_events = config.logs_folder / "diagnostics" logging.info( f"Starting resource monitor, outputting to {diagnostics_events}") resource_monitor = ResourceMonitor( interval_seconds=config.monitoring_interval_seconds, tensorboard_folder=diagnostics_events) resource_monitor.start() gradient_scaler = GradScaler( ) if config.use_gpu and config.use_mixed_precision else None optimal_temperature_scale_values = [] for epoch in config.get_train_epochs(): logging.info("Starting epoch {}".format(epoch)) save_epoch = config.should_save_epoch( epoch) and models_and_optimizer.optimizer is not None # store the learning rates used for each epoch epoch_lrs = l_rate_scheduler.get_last_lr() learning_rates_per_epoch.append(epoch_lrs) train_val_params: TrainValidateParameters = \ TrainValidateParameters(data_loader=data_loaders[ModelExecutionMode.TRAIN], model=models_and_optimizer.model, mean_teacher_model=models_and_optimizer.mean_teacher_model, epoch=epoch, optimizer=models_and_optimizer.optimizer, gradient_scaler=gradient_scaler, epoch_learning_rate=epoch_lrs, summary_writers=writers, dataframe_loggers=config.metrics_data_frame_loggers, in_training_mode=True) training_steps = create_model_training_steps(config, train_val_params) train_epoch_results = train_or_validate_epoch(training_steps) train_results_per_epoch.append(train_epoch_results.metrics) metrics.validate_and_store_model_parameters(writers.train, epoch, models_and_optimizer.model) # Run without adjusting weights on the validation set train_val_params.in_training_mode = False train_val_params.data_loader = data_loaders[ModelExecutionMode.VAL] # if temperature scaling is enabled then do not save validation metrics for the checkpoint epochs # as these will be re-computed after performing temperature scaling on the validation set. if isinstance(config, SequenceModelBase): train_val_params.save_metrics = not ( save_epoch and config.temperature_scaling_config) training_steps = create_model_training_steps(config, train_val_params) val_epoch_results = train_or_validate_epoch(training_steps) val_results_per_epoch.append(val_epoch_results.metrics) if config.is_segmentation_model: metrics.store_epoch_stats_for_segmentation( config.outputs_folder, epoch, epoch_lrs, train_epoch_results.metrics, val_epoch_results.metrics) if save_epoch: # perform temperature scaling if required if isinstance( config, SequenceModelBase) and config.temperature_scaling_config: optimal_temperature, scaled_val_results = \ temperature_scaling_steps(config, train_val_params, val_epoch_results) optimal_temperature_scale_values.append(optimal_temperature) # overwrite the metrics for the epoch with the metrics from the temperature scaled model val_results_per_epoch[-1] = scaled_val_results.metrics models_and_optimizer.save_checkpoint(epoch) # Updating the learning rate should happen at the end of the training loop, so that the # initial learning rate will be used for the very first epoch. l_rate_scheduler.step() model_training_results = ModelTrainingResults( train_results_per_epoch=train_results_per_epoch, val_results_per_epoch=val_results_per_epoch, learning_rates_per_epoch=learning_rates_per_epoch, optimal_temperature_scale_values_per_checkpoint_epoch= optimal_temperature_scale_values) logging.info("Finished training") # Since we have trained the model further, let the checkpoint_handler object know so it can handle # checkpoints correctly. checkpoint_handler.additional_training_done() # Upload visualization directory to AML run context to be able to see it # in the Azure UI. if config.max_batch_grad_cam > 0 and config.visualization_folder.exists(): RUN_CONTEXT.upload_folder(name=VISUALIZATION_FOLDER, path=str(config.visualization_folder)) writers.close_all() config.metrics_data_frame_loggers.close_all() if resource_monitor: # stop the resource monitoring process logging.info( "Shutting down the resource monitor process. Aggregate resource utilization:" ) for name, value in resource_monitor.read_aggregate_metrics(): logging.info(f"{name}: {value}") if not is_offline_run_context(RUN_CONTEXT): RUN_CONTEXT.log(name, value) resource_monitor.kill() return model_training_results
def test_register_and_score_model( is_ensemble: bool, dataset_expected_spacing_xyz: Any, model_outside_package: bool, test_output_dirs: OutputFolderForTests) -> None: """ End-to-end test which ensures the scoring pipeline is functioning as expected by performing the following: 1) Registering a pre-trained model to AML 2) Checking that a model zip from the registered model can be created successfully 3) Calling the scoring pipeline to check inference can be run from the published model successfully """ # We are creating checkpoints on the fly in this test, writing a randomly initialized model. set_random_seed(0) # Get an existing config as template loader = get_model_loader( "Tests.ML.configs" if model_outside_package else None) config: SegmentationModelBase = loader.create_model_config_from_name( model_name="BasicModel2EpochsOutsidePackage" if model_outside_package else "BasicModel2Epochs") config.dataset_expected_spacing_xyz = dataset_expected_spacing_xyz config.set_output_to(test_output_dirs.root_dir) checkpoints_absolute = [] model_and_info = ModelAndInfo( config=config, model_execution_mode=ModelExecutionMode.TRAIN) model_and_info.create_model() model_and_info.create_optimizer() checkpoints_absolute.append(model_and_info.save_checkpoint(epoch=10)) if is_ensemble: checkpoints_absolute.append(model_and_info.save_checkpoint(epoch=20)) checkpoints_relative = [ f.relative_to(config.checkpoint_folder) for f in checkpoints_absolute ] azureml_model = None # Simulate a project root: We can't derive that from the repository root because that might point # into Python's package folder project_root = Path(__file__).parent.parent # Double-check that we are at the right place, by testing for a file that would quite certainly not be found # somewhere else assert (project_root / fixed_paths.SCORE_SCRIPT).is_file() try: azure_config = get_default_azure_config() if model_outside_package: azure_config.extra_code_directory = "Tests" # contains BasicModel2EpochsOutsidePackage deployment_hook = lambda cfg, azure_cfg, mdl, is_ens: (Path( cfg.model_name), azure_cfg.docker_shm_size) ml_runner = MLRunner(config, azure_config, project_root=project_root, model_deployment_hook=deployment_hook) registration_result = ml_runner.register_segmentation_model( model_description="", checkpoint_paths=checkpoints_absolute, model_proc=ModelProcessing.DEFAULT) assert registration_result is not None azureml_model, deployment_result = registration_result assert azureml_model is not None assert deployment_result == (Path(config.model_name), azure_config.docker_shm_size) # download the registered model and test that we can run the score pipeline on it model_root = Path( azureml_model.download(str(test_output_dirs.root_dir))) # The model needs to contain score.py at the root, the (merged) environment definition, # and the inference config. expected_files = [ *fixed_paths.SCRIPTS_AT_ROOT, fixed_paths.ENVIRONMENT_YAML_FILE_NAME, fixed_paths.MODEL_INFERENCE_JSON_FILE_NAME, "InnerEye/ML/runner.py", ] # All checkpoints go into their own folder expected_files.extend( str(Path(CHECKPOINT_FOLDER) / c) for c in checkpoints_relative) for expected_file in expected_files: assert (model_root / expected_file).is_file(), f"File {expected_file} missing" # create a dummy datastore to store the image data test_datastore = test_output_dirs.root_dir / "test_datastore" # move test data into the data folder to simulate an actual run train_and_test_data_dir = full_ml_test_data_path("train_and_test_data") img_files = ["id1_channel1.nii.gz", "id1_channel2.nii.gz"] data_root = test_datastore / fixed_paths.DEFAULT_DATA_FOLDER data_root.mkdir(parents=True) for f in img_files: shutil.copy(str(train_and_test_data_dir / f), str(data_root)) # run score pipeline as a separate process python_executable = sys.executable [return_code1, stdout1] = SubprocessConfig(process=python_executable, args=["--version" ]).spawn_and_monitor_subprocess() assert return_code1 == 0 print(f"Executing Python version {stdout1[0]}") return_code, stdout2 = SubprocessConfig( process=python_executable, args=[ str(model_root / fixed_paths.SCORE_SCRIPT), f"--data_folder={str(data_root)}", f"--image_files={img_files[0]},{img_files[1]}", "--use_gpu=False" ]).spawn_and_monitor_subprocess() # check that the process completed as expected assert return_code == 0, f"Subprocess failed with return code {return_code}. Stdout: {os.linesep.join(stdout2)}" expected_segmentation_path = Path( model_root) / DEFAULT_RESULT_IMAGE_NAME assert expected_segmentation_path.exists( ), f"Result file not found: {expected_segmentation_path}" # sanity check the resulting segmentation expected_shape = get_nifti_shape(train_and_test_data_dir / img_files[0]) image_header = get_unit_image_header() assert_nifti_content(str(expected_segmentation_path), expected_shape, image_header, [3], np.ubyte) finally: # delete the registered model if azureml_model: azureml_model.delete()