def set_gpu_vis(args): force_gpu = args.force_GPU if not force_gpu: # Wait for free GPU from mpunet.utils import await_and_set_free_gpu await_and_set_free_gpu(N=args.num_GPUs, sleep_seconds=120) num_GPUs = args.num_GPUs else: from mpunet.utils import set_gpu set_gpu(force_gpu) num_GPUs = len(force_gpu.split(",")) return num_GPUs
def entry_func(args=None): # Get parser parser = vars(get_parser().parse_args(args)) # Get parser arguments cv_dir = os.path.abspath(parser["CV_dir"]) out_dir = os.path.abspath(parser["out_dir"]) create_folders(out_dir) await_PID = parser["wait_for"] run_split = parser["run_on_split"] start_from = parser["start_from"] or 0 num_jobs = parser["num_jobs"] or 1 # GPU settings num_GPUs = parser["num_GPUs"] force_GPU = parser["force_GPU"] ignore_GPU = parser["ignore_GPU"] monitor_GPUs_every = parser["monitor_GPUs_every"] # User input assertions _assert_force_and_ignore_gpus(force_GPU, ignore_GPU) if run_split: _assert_run_split(start_from, monitor_GPUs_every, num_jobs) # Wait for PID? if await_PID: from mpunet.utils import await_PIDs await_PIDs(await_PID) # Get file paths script = os.path.abspath(parser["script_prototype"]) hparams = os.path.abspath(parser["hparams_prototype"]) no_hparams = parser["no_hparams"] # Get list of folders of CV data to run on cv_folders = get_CV_folders(cv_dir) if run_split is not None: if run_split < 0 or run_split >= len(cv_folders): raise ValueError("--run_on_split should be in range [0-{}], " "got {}".format(len(cv_folders) - 1, run_split)) cv_folders = [cv_folders[run_split]] log_appendix = "_split{}".format(run_split) else: log_appendix = "" # Get a logger object logger = Logger(base_path="./", active_file="output" + log_appendix, print_calling_method=False, overwrite_existing=True) if force_GPU: # Only these GPUs fill be chosen from from mpunet.utils import set_gpu set_gpu(force_GPU) if num_GPUs: # Get GPU sets (up to the number of splits) gpu_sets = get_free_GPU_sets(num_GPUs, ignore_GPU)[:len(cv_folders)] elif not num_jobs or num_jobs < 0: raise ValueError("Should specify a number of jobs to run in parallel " "with the --num_jobs flag when using 0 GPUs pr. " "process (--num_GPUs=0 was set).") else: gpu_sets = ["''"] * parser["num_jobs"] # Get process pool, lock and GPU queue objects lock = Lock() gpu_queue = Queue() for gpu in gpu_sets: gpu_queue.put(gpu) procs = [] if monitor_GPUs_every is not None and monitor_GPUs_every: logger("\nOBS: Monitoring GPU pool every %i seconds\n" % monitor_GPUs_every) # Start a process monitoring new GPU availability over time stop_event = Event() t = Process(target=monitor_GPUs, args=(monitor_GPUs_every, gpu_queue, num_GPUs, ignore_GPU, gpu_sets, stop_event)) t.start() procs.append(t) else: stop_event = None try: for cv_folder in cv_folders[start_from:]: gpus = gpu_queue.get() t = Process(target=run_sub_experiment, args=(cv_folder, out_dir, script, hparams, no_hparams, gpus, gpu_queue, lock, logger)) t.start() procs.append(t) for t in procs: if not t.is_alive(): t.join() except KeyboardInterrupt: for t in procs: t.terminate() if stop_event is not None: stop_event.set() for t in procs: t.join()
def entry_func(args=None): # Project base path args = vars(get_argparser().parse_args(args)) basedir = os.path.abspath(args["project_dir"]) overwrite = args["overwrite"] continue_training = args["continue_training"] eval_prob = args["eval_prob"] await_PID = args["wait_for"] dice_weight = args["dice_weight"] print("Fitting fusion model for project-folder: %s" % basedir) # Minimum images in validation set before also using training images min_val_images = 15 # Fusion model training params epochs = args['epochs'] fm_batch_size = args["batch_size"] # Early stopping params early_stopping = args["early_stopping"] # Wait for PID? if await_PID: from mpunet.utils import await_PIDs await_PIDs(await_PID) # Fetch GPU(s) num_GPUs = args["num_GPUs"] force_gpu = args["force_GPU"] # Wait for free GPU if not force_gpu: await_and_set_free_gpu(N=num_GPUs, sleep_seconds=120) else: set_gpu(force_gpu) # Get logger logger = Logger(base_path=basedir, active_file="train_fusion", overwrite_existing=overwrite) # Get YAML hyperparameters hparams = YAMLHParams(os.path.join(basedir, "train_hparams.yaml")) # Get some key settings n_classes = hparams["build"]["n_classes"] if hparams["build"]["out_activation"] == "linear": # Trained with logit targets? hparams["build"][ "out_activation"] = "softmax" if n_classes > 1 else "sigmoid" # Get views views = np.load("%s/views.npz" % basedir)["arr_0"] del hparams["fit"]["views"] # Get weights and set fusion (output) path weights = get_best_model("%s/model" % basedir) weights_name = os.path.splitext(os.path.split(weights)[-1])[0] fusion_weights = "%s/model/fusion_weights/" \ "%s_fusion_weights.h5" % (basedir, weights_name) create_folders(os.path.split(fusion_weights)[0]) # Log a few things log(logger, hparams, views, weights, fusion_weights) # Check if exists already... if not overwrite and os.path.exists(fusion_weights): from sys import exit print("\n[*] A fusion weights file already exists at '%s'." "\n Use the --overwrite flag to overwrite." % fusion_weights) exit(0) # Load validation data images = ImagePairLoader(**hparams["val_data"], logger=logger) is_validation = {m.identifier: True for m in images} # Define random sets of images to train on simul. (cant be all due # to memory constraints) image_IDs = [m.identifier for m in images] if len(images) < min_val_images: # Pick N random training images diff = min_val_images - len(images) logger("Adding %i training images to set" % diff) # Load the training data and pick diff images train = ImagePairLoader(**hparams["train_data"], logger=logger) indx = np.random.choice(np.arange(len(train)), diff, replace=diff > len(train)) # Add the images to the image set set train_add = [train[i] for i in indx] for m in train_add: is_validation[m.identifier] = False image_IDs.append(m.identifier) images.add_images(train_add) # Append to length % sub_size == 0 sub_size = args["images_per_round"] rest = int(sub_size * np.ceil(len(image_IDs) / sub_size)) - len(image_IDs) if rest: image_IDs += list(np.random.choice(image_IDs, rest, replace=False)) # Shuffle and split random.shuffle(image_IDs) sets = [ set(s) for s in np.array_split(image_IDs, len(image_IDs) / sub_size) ] assert (contains_all_images(sets, image_IDs)) # Define fusion model (named 'org' to store reference to orgiginal model if # multi gpu model is created below) fusion_model = FusionModel(n_inputs=len(views), n_classes=n_classes, weight=dice_weight, logger=logger, verbose=False) if continue_training: fusion_model.load_weights(fusion_weights) print("\n[OBS] CONTINUED TRAINING FROM:\n", fusion_weights) import tensorflow as tf with tf.distribute.MirroredStrategy().scope(): # Define model unet = init_model(hparams["build"], logger) print("\n[*] Loading weights: %s\n" % weights) unet.load_weights(weights, by_name=True) # Compile the model logger("Compiling...") metrics = [ "sparse_categorical_accuracy", sparse_fg_precision, sparse_fg_recall ] fusion_model.compile(optimizer=Adam(lr=1e-3), loss=fusion_model.loss, metrics=metrics) fusion_model._log() try: _run_fusion_training(sets, logger, hparams, min_val_images, is_validation, views, n_classes, unet, fusion_model, early_stopping, fm_batch_size, epochs, eval_prob, fusion_weights) except KeyboardInterrupt: pass finally: if not os.path.exists(os.path.split(fusion_weights)[0]): os.mkdir(os.path.split(fusion_weights)[0]) # Save fusion model weights # OBS: Must be original model if multi-gpu is performed! fusion_model.save_weights(fusion_weights)
def entry_func(args=None): # Get command line arguments args = vars(get_argparser().parse_args(args)) base_dir = os.path.abspath(args["project_dir"]) _file = args["f"] label = args["l"] N_extra = args["extra"] try: N_extra = int(N_extra) except ValueError: pass # Get settings from YAML file from mpunet.hyperparameters import YAMLHParams hparams = YAMLHParams(os.path.join(base_dir, "train_hparams.yaml")) # Set strides hparams["fit"]["strides"] = args["strides"] if not _file: try: # Data specified from command line? data_dir = os.path.abspath(args["data_dir"]) # Set with default sub dirs hparams["test_data"] = { "base_dir": data_dir, "img_subdir": "images", "label_subdir": "labels" } except (AttributeError, TypeError): data_dir = hparams["test_data"]["base_dir"] else: data_dir = False out_dir = os.path.abspath(args["out_dir"]) overwrite = args["overwrite"] predict_mode = args["no_eval"] save_only_pred = args["save_only_pred"] # Check if valid dir structures validate_folders(base_dir, data_dir, out_dir, overwrite) # Import all needed modules (folder is valid at this point) import numpy as np from mpunet.image import ImagePairLoader, ImagePair from mpunet.utils import get_best_model, create_folders, \ pred_to_class, await_and_set_free_gpu, set_gpu from mpunet.utils.fusion import predict_3D_patches, predict_3D_patches_binary, pred_3D_iso from mpunet.logging import init_result_dict_3D, save_all_3D from mpunet.evaluate import dice_all from mpunet.bin.predict import save_nii_files # Fetch GPU(s) num_GPUs = args["num_GPUs"] force_gpu = args["force_GPU"] # Wait for free GPU if force_gpu == -1: await_and_set_free_gpu(N=num_GPUs, sleep_seconds=240) else: set_gpu(force_gpu) # Read settings from the project hyperparameter file dim = hparams["build"]["dim"] n_classes = hparams["build"]["n_classes"] mode = hparams["fit"]["intrp_style"] # Set ImagePairLoader object if not _file: image_pair_loader = ImagePairLoader(predict_mode=predict_mode, **hparams["test_data"]) else: predict_mode = not bool(label) image_pair_loader = ImagePairLoader(predict_mode=predict_mode, initialize_empty=True) image_pair_loader.add_image(ImagePair(_file, label)) all_images = { image.identifier: image for image in image_pair_loader.images } # Set scaler and bg values image_pair_loader.set_scaler_and_bg_values( bg_value=hparams.get_from_anywhere('bg_value'), scaler=hparams.get_from_anywhere('scaler'), compute_now=False) # Init LazyQueue and get its sequencer from mpunet.sequences.utils import get_sequence seq = get_sequence(data_queue=image_pair_loader, is_validation=True, **hparams["fit"], **hparams["build"]) """ Define UNet model """ from mpunet.models import model_initializer hparams["build"]["batch_size"] = 1 unet = model_initializer(hparams, False, base_dir) model_path = get_best_model(base_dir + "/model") unet.load_weights(model_path) # Evaluate? if not predict_mode: # Prepare dictionary to store results in pd df results, detailed_res = init_result_dict_3D(all_images, n_classes) # Save to check correct format save_all_3D(results, detailed_res, out_dir) # Define result paths nii_res_dir = os.path.join(out_dir, "nii_files") create_folders(nii_res_dir) image_ids = sorted(all_images) for n_image, image_id in enumerate(image_ids): print("\n[*] Running on: %s" % image_id) with seq.image_pair_queue.get_image_by_id(image_id) as image_pair: if mode.lower() == "iso_live_3d": pred = pred_3D_iso(model=unet, sequence=seq, image=image_pair, extra_boxes=N_extra, min_coverage=None) else: # Predict on volume using model if n_classes > 1: pred = predict_3D_patches(model=unet, patches=seq, image=image_pair, N_extra=N_extra) else: pred = predict_3D_patches_binary(model=unet, patches=seq, image_id=image_id, N_extra=N_extra) if not predict_mode: # Get patches for the current image y = image_pair.labels # Calculate dice score print("Mean dice: ", end="", flush=True) p = pred_to_class(pred, img_dims=3, has_batch_dim=False) dices = dice_all(y, p, n_classes=n_classes, ignore_zero=True) mean_dice = dices[~np.isnan(dices)].mean() print("Dices: ", dices) print("%s (n=%i)" % (mean_dice, len(dices))) # Add to results results[image_id] = [mean_dice] detailed_res[image_id] = dices # Overwrite with so-far results save_all_3D(results, detailed_res, out_dir) # Save results save_nii_files(p, image_pair, nii_res_dir, save_only_pred) if not predict_mode: # Write final results save_all_3D(results, detailed_res, out_dir)
def run(args): cv_dir = os.path.abspath(args.CV_dir) # Get list of folders of CV data to run on cv_folders = get_CV_folders(cv_dir) assert_args(args, n_splits=len(cv_folders)) out_dir = os.path.abspath(args.out_dir) hparams_dir = os.path.abspath(args.hparams_prototype_dir) prepare_hparams_dir(hparams_dir) create_folders(out_dir) # Wait for PID? if args.wait_for: from mpunet.utils import await_PIDs await_PIDs(args.wait_for) if args.run_on_split is not None: cv_folders = [cv_folders[args.run_on_split]] log_appendix = "_split{}".format(args.run_on_split) else: log_appendix = "" # Get a logger object logger = Logger(base_path="./", active_file="output" + log_appendix, print_calling_method=False, overwrite_existing=True) if args.force_GPU: # Only these GPUs fill be chosen from from mpunet.utils import set_gpu set_gpu(args.force_GPU) if args.num_GPUs: # Get GPU sets (up to the number of splits) gpu_sets = get_free_GPU_sets(args.num_GPUs, args.ignore_GPU)[:len(cv_folders)] elif not args.num_jobs or args.num_jobs < 0: raise ValueError("Should specify a number of jobs to run in parallel " "with the --num_jobs flag when using 0 GPUs pr. " "process (--num_GPUs=0 was set).") else: gpu_sets = ["''"] * args.num_jobs # Get process pool, lock and GPU queue objects lock = Lock() gpu_queue = Queue() for gpu in gpu_sets: gpu_queue.put(gpu) # Get file paths script = os.path.abspath(args.script_prototype) # Get GPU monitor process running_processes, stop_event = start_gpu_monitor_process( args, gpu_queue, gpu_sets, logger) try: for cv_folder in cv_folders[args.start_from:]: gpus = gpu_queue.get() t = Process(target=run_sub_experiment, args=(cv_folder, out_dir, script, hparams_dir, args.no_hparams, gpus, gpu_queue, lock, logger)) t.start() running_processes.append(t) for t in running_processes: if not t.is_alive(): t.join() except KeyboardInterrupt: for t in running_processes: t.terminate() if stop_event is not None: stop_event.set() for t in running_processes: t.join()