def get_model(project_dir, train_seq, hparams, logger, args): """ Initializes a tf.keras Model from mpunet.models as specified in hparams['build']. If args.continue_training, the best previous model stored in [project_dir]/models will be loaded. If hparams["build"]["biased_output_layer"] is True, sets the bias weights on the final conv. layer so that a zero-input gives an output of class probabilities equal to the class frequencies of the training set. Args: project_dir: A path to a mpunet project folder train_seq: A mpunet.sequences object for the training data hparams: A mpunet YAMLHParams object logger: A mpunet logging object args: argparse arguments Returns: model: The model to fit org_model: The original, non-GPU-distributed model (Same as model if num_GPUs==1) """ from mpunet.models import model_initializer # Build new model (or continue training an existing one) hparams["build"]['flatten_output'] = True model = model_initializer(hparams=hparams, continue_training=args.continue_training, project_dir=project_dir, logger=logger) # Initialize weights in final layer? if not args.continue_training and hparams["build"].get( "biased_output_layer"): from mpunet.utils.utils import set_bias_weights_on_all_outputs set_bias_weights_on_all_outputs(model, train_seq, hparams, logger) return model
def entry_func(args=None): # Get command line arguments args = vars(get_argparser().parse_args(args)) base_dir = os.path.abspath(args["project_dir"]) _file = args["f"] label = args["l"] N_extra = args["extra"] try: N_extra = int(N_extra) except ValueError: pass # Get settings from YAML file from mpunet.hyperparameters import YAMLHParams hparams = YAMLHParams(os.path.join(base_dir, "train_hparams.yaml")) # Set strides hparams["fit"]["strides"] = args["strides"] if not _file: try: # Data specified from command line? data_dir = os.path.abspath(args["data_dir"]) # Set with default sub dirs hparams["test_data"] = { "base_dir": data_dir, "img_subdir": "images", "label_subdir": "labels" } except (AttributeError, TypeError): data_dir = hparams["test_data"]["base_dir"] else: data_dir = False out_dir = os.path.abspath(args["out_dir"]) overwrite = args["overwrite"] predict_mode = args["no_eval"] save_only_pred = args["save_only_pred"] # Check if valid dir structures validate_folders(base_dir, data_dir, out_dir, overwrite) # Import all needed modules (folder is valid at this point) import numpy as np from mpunet.image import ImagePairLoader, ImagePair from mpunet.utils import get_best_model, create_folders, \ pred_to_class, await_and_set_free_gpu, set_gpu from mpunet.utils.fusion import predict_3D_patches, predict_3D_patches_binary, pred_3D_iso from mpunet.logging import init_result_dict_3D, save_all_3D from mpunet.evaluate import dice_all from mpunet.bin.predict import save_nii_files # Fetch GPU(s) num_GPUs = args["num_GPUs"] force_gpu = args["force_GPU"] # Wait for free GPU if force_gpu == -1: await_and_set_free_gpu(N=num_GPUs, sleep_seconds=240) else: set_gpu(force_gpu) # Read settings from the project hyperparameter file dim = hparams["build"]["dim"] n_classes = hparams["build"]["n_classes"] mode = hparams["fit"]["intrp_style"] # Set ImagePairLoader object if not _file: image_pair_loader = ImagePairLoader(predict_mode=predict_mode, **hparams["test_data"]) else: predict_mode = not bool(label) image_pair_loader = ImagePairLoader(predict_mode=predict_mode, initialize_empty=True) image_pair_loader.add_image(ImagePair(_file, label)) all_images = { image.identifier: image for image in image_pair_loader.images } # Set scaler and bg values image_pair_loader.set_scaler_and_bg_values( bg_value=hparams.get_from_anywhere('bg_value'), scaler=hparams.get_from_anywhere('scaler'), compute_now=False) # Init LazyQueue and get its sequencer from mpunet.sequences.utils import get_sequence seq = get_sequence(data_queue=image_pair_loader, is_validation=True, **hparams["fit"], **hparams["build"]) """ Define UNet model """ from mpunet.models import model_initializer hparams["build"]["batch_size"] = 1 unet = model_initializer(hparams, False, base_dir) model_path = get_best_model(base_dir + "/model") unet.load_weights(model_path) # Evaluate? if not predict_mode: # Prepare dictionary to store results in pd df results, detailed_res = init_result_dict_3D(all_images, n_classes) # Save to check correct format save_all_3D(results, detailed_res, out_dir) # Define result paths nii_res_dir = os.path.join(out_dir, "nii_files") create_folders(nii_res_dir) image_ids = sorted(all_images) for n_image, image_id in enumerate(image_ids): print("\n[*] Running on: %s" % image_id) with seq.image_pair_queue.get_image_by_id(image_id) as image_pair: if mode.lower() == "iso_live_3d": pred = pred_3D_iso(model=unet, sequence=seq, image=image_pair, extra_boxes=N_extra, min_coverage=None) else: # Predict on volume using model if n_classes > 1: pred = predict_3D_patches(model=unet, patches=seq, image=image_pair, N_extra=N_extra) else: pred = predict_3D_patches_binary(model=unet, patches=seq, image_id=image_id, N_extra=N_extra) if not predict_mode: # Get patches for the current image y = image_pair.labels # Calculate dice score print("Mean dice: ", end="", flush=True) p = pred_to_class(pred, img_dims=3, has_batch_dim=False) dices = dice_all(y, p, n_classes=n_classes, ignore_zero=True) mean_dice = dices[~np.isnan(dices)].mean() print("Dices: ", dices) print("%s (n=%i)" % (mean_dice, len(dices))) # Add to results results[image_id] = [mean_dice] detailed_res[image_id] = dices # Overwrite with so-far results save_all_3D(results, detailed_res, out_dir) # Save results save_nii_files(p, image_pair, nii_res_dir, save_only_pred) if not predict_mode: # Write final results save_all_3D(results, detailed_res, out_dir)