Exemplo n.º 1
0
    def __init__(self, base_path, print_to_screen=True, active_file=None,
                 overwrite_existing=False, append_existing=False,
                 print_calling_method=True, no_sub_folder=False,
                 log_prefix=""):
        self.base_path = os.path.abspath(base_path)
        if not no_sub_folder:
            self.path = os.path.join(self.base_path, "logs")
        else:
            self.path = self.base_path
        create_folders([self.path])
        if overwrite_existing and append_existing:
            raise ValueError("Cannot set both 'overwrite_existing' and "
                             "'append_existing' to True.")
        self.overwrite_existing = overwrite_existing
        self.append_existing = append_existing
        self._enabled = True

        # Get built in print function
        # (if overwritten globally, Logger still maintains a reference to the
        # true print function)
        self.print_f = __builtins__["print"]

        # Print options
        self.separator = "-" * 80
        self.print_to_screen = print_to_screen
        self.print_calling_method = print_calling_method

        # Set paths to log files
        self.log_files = {}
        self.currently_logging = {}
        self.prefix = "" if log_prefix is None else str(log_prefix)
        self.active_log_file = active_file or "log"

        # For using the logger from multiple threads
        self.lock = Lock()
Exemplo n.º 2
0
def run_sub_experiment(split_dir, out_dir, script, hparams, no_hparams, GPUs,
                       GPU_queue, lock, logger):

    # Create sub-directory
    split = os.path.split(split_dir)[-1]
    out_dir = os.path.join(out_dir, split)
    out_hparams = os.path.join(out_dir, "train_hparams.yaml")
    create_folders(out_dir)

    # Get list of commands
    commands = parse_script(script, GPUs)

    # Move hparams and script files into folder
    if not no_hparams:
        copy_yaml_and_set_data_dirs(in_path=hparams,
                                    out_path=out_hparams,
                                    data_dir=split_dir)

    # Change directory and file permissions
    os.chdir(out_dir)

    # Log
    lock.acquire()
    s = "[*] Running experiment: %s" % split
    logger("\n%s\n%s" % ("-" * len(s), s))
    logger("Data dir:", split_dir)
    logger("Out dir:", out_dir)
    logger("Using GPUs:", GPUs)
    logger("\nRunning commands:")
    for i, command in enumerate(commands):
        logger(" %i) %s" % (i + 1, " ".join(command)))
    logger("-" * len(s))
    lock.release()

    # Run the commands
    run_next_command = True
    for command in commands:
        if not run_next_command:
            break
        lock.acquire()
        logger("[%s - STARTING] %s" % (split, " ".join(command)))
        lock.release()
        p = subprocess.Popen(command,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
        _, err = p.communicate()
        rc = p.returncode
        lock.acquire()
        if rc != 0:
            logger("[%s - ERROR - Exit code %i] %s" %
                   (split, rc, " ".join(command)))
            logger("\n----- START error message -----\n%s\n"
                   "----- END error message -----\n" % err.decode("utf-8"))
            run_next_command = False
        else:
            logger("[%s - FINISHED] %s" % (split, " ".join(command)))
        lock.release()

    # Add the GPUs back into the queue
    GPU_queue.put(GPUs)
Exemplo n.º 3
0
def entry_func(args=None):
    parser = get_parser()
    args = vars(parser.parse_args(args))

    # Get arguments
    project_folder = os.path.abspath(args["project_folder"])
    copy_weights = args["copy_weights"]
    weights = os.path.abspath(args["weights_file"]
                              or get_best_model(project_folder + "/model"))
    out_dir = os.path.join(project_folder, args["out_dir"])

    # Get main hyperparamter file and check if correct modelt ype
    hparams = YAMLHParams(project_folder + "/train_hparams.yaml", no_log=True)
    tasks = hparams.get("tasks", False)
    if not tasks:
        print("[ERROR] Project must be of type 'MultiTask'.")
        sys.exit(0)

    # Branch out each sub-task
    create_folders(out_dir)
    for name, hparams_file in zip(tasks["task_names"], tasks["hparam_files"]):
        print("\n[*] Branching task %s" % name)
        # Reload the hparams in each iteration as we overwrite fields each time
        hparams = YAMLHParams(project_folder + "/train_hparams.yaml",
                              no_log=False)
        # Get task specific parameters
        task_hparams = YAMLHParams(project_folder + "/%s" % hparams_file)
        branch(task_name=name,
               out_dir=os.path.join(out_dir, name),
               task_hparams=task_hparams,
               task_hparams_file=hparams_file,
               shared_hparams=hparams,
               weights=weights,
               copy_weights=copy_weights,
               views_file=os.path.join(project_folder, "views.npz"))
Exemplo n.º 4
0
def branch(task_name, out_dir, task_hparams, task_hparams_file, shared_hparams,
           weights, copy_weights, views_file):
    create_folders(out_dir)

    # Set the task fields to only the current task under 'build'
    shared_hparams.set_value("build",
                             "task_names",
                             value='["%s"]' % task_name,
                             overwrite=True)
    shared_hparams.delete_group("tasks")  # No longer necessary

    # Create a map defining where each task specific parameter should go
    mapping = {
        "task_specifics/n_classes": "build/n_classes",
        "task_specifics/n_channels": "build/n_channels",
        "task_specifics/dim": ["build/dim", "fit/dim"],
        "task_specifics/out_activation": "build/out_activation",
        "task_specifics/real_space_span": "fit/real_space_span"
    }

    for soruce, targets in mapping.items():
        in_key1, in_key2 = soruce.split("/")
        value = task_hparams[in_key1][in_key2]

        # Set the value at all targets
        targets = [targets] if isinstance(targets, str) else targets
        for target in targets:
            out_key1, out_key2 = target.split("/")
            shared_hparams.set_value(out_key1, out_key2, value, overwrite=True)

    # Add all data folders
    data_folders = ("train_data", "val_data", "test_data", "aug_data")
    for df in data_folders:
        yaml = task_hparams.get_group(df)
        shared_hparams.add_group(yaml_string=yaml)

    # Save the updates parameters to a new location
    shared_hparams.save_current(os.path.join(out_dir, "train_hparams.yaml"))

    # Add weights to folder
    weights_folder = os.path.join(out_dir, "model")
    create_folders(weights_folder)
    out_weights_name = os.path.split(weights)[1]
    out_weights_path = os.path.join(weights_folder, out_weights_name)
    func = copy if copy_weights else os.symlink
    if copy_weights:
        print("Copying weights...")
    else:
        print("Symlinking weights...")
    func(weights, out_weights_path)

    # Add views (check for existence for future compatibility with 3D models)
    if os.path.exists(views_file):
        func(views_file, os.path.join(out_dir, "views.npz"))
Exemplo n.º 5
0
def save_all_3D(results, detailed_res, out_dir):
    txt_res_dir = os.path.join(out_dir, "txt")
    csv_res_dir = os.path.join(out_dir, "csv")

    # Create folders
    create_folders([txt_res_dir, csv_res_dir])

    # Save main text/csv results files
    results_to_txt(results, txt_res_dir, transpose=True)
    results_to_csv(results, csv_res_dir, transpose=True)
    results_to_txt(detailed_res, txt_res_dir, fname="detailed")
    results_to_csv(detailed_res, csv_res_dir, fname="detailed")
Exemplo n.º 6
0
def run_on_split(split_path, test_split, train_val_data, n_val, args):
    """
    Add the train/val/test files of a single split to the split directories

    Depending on the arguments parsed (--copy, --file_list etc.) either copies,
    symlinks or creates a LIST_OF_FILES.txt file of absolute paths in each
    split sub-directory.

    Args:
        split_path:      (string) Path to the split directory
        test_split:      (list)   List of paths pointing to split test data
        train_val_data:  (list)   List of paths pointing to the remaining data
        n_val:           (int)    Number of samples in 'train_val_data' to use
                                  for validation - rest is used for training.
        args:            (tuple)  Parsed arguments, see argparser.
    """
    # Define train, val and test sub-dirs
    train_path = os.path.join(split_path, "train")
    val_path = os.path.join(split_path, "val") if n_val else None
    test_path = os.path.join(split_path, "test")

    # Create folders if not existing
    create_folders([train_path, val_path, test_path])

    # Copy or symlink?
    if args.copy:
        from shutil import copyfile
        move_func = copyfile
    elif args.file_list:
        move_func = _add_to_file_list_fallback
    else:
        move_func = os.symlink

    # Extract validation data from the remaining
    random.shuffle(train_val_data)
    validation = train_val_data[:n_val]
    training = train_val_data[n_val:]

    # Add training
    train_records = add_files(training, train_path, move_func)
    # Add test data
    test_records = add_files(test_split, test_path, move_func)
    if n_val:
        # Add validation
        val_records = add_files(validation, val_path, move_func)
    else:
        val_records = 0
    return train_records, val_records, test_records
Exemplo n.º 7
0
def save_all(results, pc_results, out_dir):

    # Get output paths
    txt_res_dir = os.path.join(out_dir, "txt")
    csv_res_dir = os.path.join(out_dir, "csv")

    # Create folders
    create_folders([txt_res_dir, csv_res_dir])

    # Save main text/csv results files
    results_to_txt(results, txt_res_dir)
    results_to_csv(results, csv_res_dir)

    # Write detailed results
    for view in pc_results:
        r = pc_results[view]
        view_str = str(view).replace("[", "").strip().replace("]", "").replace(" ", "_")
        results_to_txt(r, txt_res_dir, fname=view_str)
        results_to_csv(r, csv_res_dir, fname=view_str)
Exemplo n.º 8
0
def entry_func(args=None):
    # Get parser
    parser = vars(get_parser().parse_args(args))

    # Get parser arguments
    cv_dir = os.path.abspath(parser["CV_dir"])
    out_dir = os.path.abspath(parser["out_dir"])
    create_folders(out_dir)
    await_PID = parser["wait_for"]
    run_split = parser["run_on_split"]
    start_from = parser["start_from"] or 0
    num_jobs = parser["num_jobs"] or 1

    # GPU settings
    num_GPUs = parser["num_GPUs"]
    force_GPU = parser["force_GPU"]
    ignore_GPU = parser["ignore_GPU"]
    monitor_GPUs_every = parser["monitor_GPUs_every"]

    # User input assertions
    _assert_force_and_ignore_gpus(force_GPU, ignore_GPU)
    if run_split:
        _assert_run_split(start_from, monitor_GPUs_every, num_jobs)

    # Wait for PID?
    if await_PID:
        from mpunet.utils import await_PIDs
        await_PIDs(await_PID)

    # Get file paths
    script = os.path.abspath(parser["script_prototype"])
    hparams = os.path.abspath(parser["hparams_prototype"])
    no_hparams = parser["no_hparams"]

    # Get list of folders of CV data to run on
    cv_folders = get_CV_folders(cv_dir)
    if run_split is not None:
        if run_split < 0 or run_split >= len(cv_folders):
            raise ValueError("--run_on_split should be in range [0-{}], "
                             "got {}".format(len(cv_folders) - 1, run_split))
        cv_folders = [cv_folders[run_split]]
        log_appendix = "_split{}".format(run_split)
    else:
        log_appendix = ""

    # Get a logger object
    logger = Logger(base_path="./",
                    active_file="output" + log_appendix,
                    print_calling_method=False,
                    overwrite_existing=True)

    if force_GPU:
        # Only these GPUs fill be chosen from
        from mpunet.utils import set_gpu
        set_gpu(force_GPU)
    if num_GPUs:
        # Get GPU sets (up to the number of splits)
        gpu_sets = get_free_GPU_sets(num_GPUs, ignore_GPU)[:len(cv_folders)]
    elif not num_jobs or num_jobs < 0:
        raise ValueError("Should specify a number of jobs to run in parallel "
                         "with the --num_jobs flag when using 0 GPUs pr. "
                         "process (--num_GPUs=0 was set).")
    else:
        gpu_sets = ["''"] * parser["num_jobs"]

    # Get process pool, lock and GPU queue objects
    lock = Lock()
    gpu_queue = Queue()
    for gpu in gpu_sets:
        gpu_queue.put(gpu)

    procs = []
    if monitor_GPUs_every is not None and monitor_GPUs_every:
        logger("\nOBS: Monitoring GPU pool every %i seconds\n" %
               monitor_GPUs_every)
        # Start a process monitoring new GPU availability over time
        stop_event = Event()
        t = Process(target=monitor_GPUs,
                    args=(monitor_GPUs_every, gpu_queue, num_GPUs, ignore_GPU,
                          gpu_sets, stop_event))
        t.start()
        procs.append(t)
    else:
        stop_event = None
    try:
        for cv_folder in cv_folders[start_from:]:
            gpus = gpu_queue.get()
            t = Process(target=run_sub_experiment,
                        args=(cv_folder, out_dir, script, hparams, no_hparams,
                              gpus, gpu_queue, lock, logger))
            t.start()
            procs.append(t)
            for t in procs:
                if not t.is_alive():
                    t.join()
    except KeyboardInterrupt:
        for t in procs:
            t.terminate()
    if stop_event is not None:
        stop_event.set()
    for t in procs:
        t.join()
Exemplo n.º 9
0
def entry_func(args=None):

    # Project base path
    args = vars(get_argparser().parse_args(args))
    basedir = os.path.abspath(args["project_dir"])
    overwrite = args["overwrite"]
    continue_training = args["continue_training"]
    eval_prob = args["eval_prob"]
    await_PID = args["wait_for"]
    dice_weight = args["dice_weight"]
    print("Fitting fusion model for project-folder: %s" % basedir)

    # Minimum images in validation set before also using training images
    min_val_images = 15

    # Fusion model training params
    epochs = args['epochs']
    fm_batch_size = args["batch_size"]

    # Early stopping params
    early_stopping = args["early_stopping"]

    # Wait for PID?
    if await_PID:
        from mpunet.utils import await_PIDs
        await_PIDs(await_PID)

    # Fetch GPU(s)
    num_GPUs = args["num_GPUs"]
    force_gpu = args["force_GPU"]
    # Wait for free GPU
    if not force_gpu:
        await_and_set_free_gpu(N=num_GPUs, sleep_seconds=120)
    else:
        set_gpu(force_gpu)

    # Get logger
    logger = Logger(base_path=basedir,
                    active_file="train_fusion",
                    overwrite_existing=overwrite)

    # Get YAML hyperparameters
    hparams = YAMLHParams(os.path.join(basedir, "train_hparams.yaml"))

    # Get some key settings
    n_classes = hparams["build"]["n_classes"]

    if hparams["build"]["out_activation"] == "linear":
        # Trained with logit targets?
        hparams["build"][
            "out_activation"] = "softmax" if n_classes > 1 else "sigmoid"

    # Get views
    views = np.load("%s/views.npz" % basedir)["arr_0"]
    del hparams["fit"]["views"]

    # Get weights and set fusion (output) path
    weights = get_best_model("%s/model" % basedir)
    weights_name = os.path.splitext(os.path.split(weights)[-1])[0]
    fusion_weights = "%s/model/fusion_weights/" \
                     "%s_fusion_weights.h5" % (basedir, weights_name)
    create_folders(os.path.split(fusion_weights)[0])

    # Log a few things
    log(logger, hparams, views, weights, fusion_weights)

    # Check if exists already...
    if not overwrite and os.path.exists(fusion_weights):
        from sys import exit
        print("\n[*] A fusion weights file already exists at '%s'."
              "\n    Use the --overwrite flag to overwrite." % fusion_weights)
        exit(0)

    # Load validation data
    images = ImagePairLoader(**hparams["val_data"], logger=logger)
    is_validation = {m.identifier: True for m in images}

    # Define random sets of images to train on simul. (cant be all due
    # to memory constraints)
    image_IDs = [m.identifier for m in images]

    if len(images) < min_val_images:
        # Pick N random training images
        diff = min_val_images - len(images)
        logger("Adding %i training images to set" % diff)

        # Load the training data and pick diff images
        train = ImagePairLoader(**hparams["train_data"], logger=logger)
        indx = np.random.choice(np.arange(len(train)),
                                diff,
                                replace=diff > len(train))

        # Add the images to the image set set
        train_add = [train[i] for i in indx]
        for m in train_add:
            is_validation[m.identifier] = False
            image_IDs.append(m.identifier)
        images.add_images(train_add)

    # Append to length % sub_size == 0
    sub_size = args["images_per_round"]
    rest = int(sub_size * np.ceil(len(image_IDs) / sub_size)) - len(image_IDs)
    if rest:
        image_IDs += list(np.random.choice(image_IDs, rest, replace=False))

    # Shuffle and split
    random.shuffle(image_IDs)
    sets = [
        set(s) for s in np.array_split(image_IDs,
                                       len(image_IDs) / sub_size)
    ]
    assert (contains_all_images(sets, image_IDs))

    # Define fusion model (named 'org' to store reference to orgiginal model if
    # multi gpu model is created below)
    fusion_model = FusionModel(n_inputs=len(views),
                               n_classes=n_classes,
                               weight=dice_weight,
                               logger=logger,
                               verbose=False)

    if continue_training:
        fusion_model.load_weights(fusion_weights)
        print("\n[OBS] CONTINUED TRAINING FROM:\n", fusion_weights)

    import tensorflow as tf
    with tf.distribute.MirroredStrategy().scope():
        # Define model
        unet = init_model(hparams["build"], logger)
        print("\n[*] Loading weights: %s\n" % weights)
        unet.load_weights(weights, by_name=True)

    # Compile the model
    logger("Compiling...")
    metrics = [
        "sparse_categorical_accuracy", sparse_fg_precision, sparse_fg_recall
    ]
    fusion_model.compile(optimizer=Adam(lr=1e-3),
                         loss=fusion_model.loss,
                         metrics=metrics)
    fusion_model._log()

    try:
        _run_fusion_training(sets, logger, hparams, min_val_images,
                             is_validation, views, n_classes, unet,
                             fusion_model, early_stopping, fm_batch_size,
                             epochs, eval_prob, fusion_weights)
    except KeyboardInterrupt:
        pass
    finally:
        if not os.path.exists(os.path.split(fusion_weights)[0]):
            os.mkdir(os.path.split(fusion_weights)[0])
        # Save fusion model weights
        # OBS: Must be original model if multi-gpu is performed!
        fusion_model.save_weights(fusion_weights)
Exemplo n.º 10
0
def entry_func(args=None):

    # Get command line arguments
    args = vars(get_argparser().parse_args(args))
    base_dir = os.path.abspath(args["project_dir"])
    _file = args["f"]
    label = args["l"]
    N_extra = args["extra"]
    try:
        N_extra = int(N_extra)
    except ValueError:
        pass

    # Get settings from YAML file
    from mpunet.hyperparameters import YAMLHParams
    hparams = YAMLHParams(os.path.join(base_dir, "train_hparams.yaml"))

    # Set strides
    hparams["fit"]["strides"] = args["strides"]

    if not _file:
        try:
            # Data specified from command line?
            data_dir = os.path.abspath(args["data_dir"])

            # Set with default sub dirs
            hparams["test_data"] = {
                "base_dir": data_dir,
                "img_subdir": "images",
                "label_subdir": "labels"
            }
        except (AttributeError, TypeError):
            data_dir = hparams["test_data"]["base_dir"]
    else:
        data_dir = False
    out_dir = os.path.abspath(args["out_dir"])
    overwrite = args["overwrite"]
    predict_mode = args["no_eval"]
    save_only_pred = args["save_only_pred"]

    # Check if valid dir structures
    validate_folders(base_dir, data_dir, out_dir, overwrite)

    # Import all needed modules (folder is valid at this point)
    import numpy as np
    from mpunet.image import ImagePairLoader, ImagePair
    from mpunet.utils import get_best_model, create_folders, \
                                    pred_to_class, await_and_set_free_gpu, set_gpu
    from mpunet.utils.fusion import predict_3D_patches, predict_3D_patches_binary, pred_3D_iso
    from mpunet.logging import init_result_dict_3D, save_all_3D
    from mpunet.evaluate import dice_all
    from mpunet.bin.predict import save_nii_files

    # Fetch GPU(s)
    num_GPUs = args["num_GPUs"]
    force_gpu = args["force_GPU"]
    # Wait for free GPU
    if force_gpu == -1:
        await_and_set_free_gpu(N=num_GPUs, sleep_seconds=240)
    else:
        set_gpu(force_gpu)

    # Read settings from the project hyperparameter file
    dim = hparams["build"]["dim"]
    n_classes = hparams["build"]["n_classes"]
    mode = hparams["fit"]["intrp_style"]

    # Set ImagePairLoader object
    if not _file:
        image_pair_loader = ImagePairLoader(predict_mode=predict_mode,
                                            **hparams["test_data"])
    else:
        predict_mode = not bool(label)
        image_pair_loader = ImagePairLoader(predict_mode=predict_mode,
                                            initialize_empty=True)
        image_pair_loader.add_image(ImagePair(_file, label))
    all_images = {
        image.identifier: image
        for image in image_pair_loader.images
    }

    # Set scaler and bg values
    image_pair_loader.set_scaler_and_bg_values(
        bg_value=hparams.get_from_anywhere('bg_value'),
        scaler=hparams.get_from_anywhere('scaler'),
        compute_now=False)

    # Init LazyQueue and get its sequencer
    from mpunet.sequences.utils import get_sequence
    seq = get_sequence(data_queue=image_pair_loader,
                       is_validation=True,
                       **hparams["fit"],
                       **hparams["build"])
    """ Define UNet model """
    from mpunet.models import model_initializer
    hparams["build"]["batch_size"] = 1
    unet = model_initializer(hparams, False, base_dir)
    model_path = get_best_model(base_dir + "/model")
    unet.load_weights(model_path)

    # Evaluate?
    if not predict_mode:
        # Prepare dictionary to store results in pd df
        results, detailed_res = init_result_dict_3D(all_images, n_classes)

        # Save to check correct format
        save_all_3D(results, detailed_res, out_dir)

    # Define result paths
    nii_res_dir = os.path.join(out_dir, "nii_files")
    create_folders(nii_res_dir)

    image_ids = sorted(all_images)
    for n_image, image_id in enumerate(image_ids):
        print("\n[*] Running on: %s" % image_id)

        with seq.image_pair_queue.get_image_by_id(image_id) as image_pair:
            if mode.lower() == "iso_live_3d":
                pred = pred_3D_iso(model=unet,
                                   sequence=seq,
                                   image=image_pair,
                                   extra_boxes=N_extra,
                                   min_coverage=None)
            else:
                # Predict on volume using model
                if n_classes > 1:
                    pred = predict_3D_patches(model=unet,
                                              patches=seq,
                                              image=image_pair,
                                              N_extra=N_extra)
                else:
                    pred = predict_3D_patches_binary(model=unet,
                                                     patches=seq,
                                                     image_id=image_id,
                                                     N_extra=N_extra)

            if not predict_mode:
                # Get patches for the current image
                y = image_pair.labels

                # Calculate dice score
                print("Mean dice: ", end="", flush=True)
                p = pred_to_class(pred, img_dims=3, has_batch_dim=False)
                dices = dice_all(y, p, n_classes=n_classes, ignore_zero=True)
                mean_dice = dices[~np.isnan(dices)].mean()
                print("Dices: ", dices)
                print("%s (n=%i)" % (mean_dice, len(dices)))

                # Add to results
                results[image_id] = [mean_dice]
                detailed_res[image_id] = dices

                # Overwrite with so-far results
                save_all_3D(results, detailed_res, out_dir)

                # Save results
                save_nii_files(p, image_pair, nii_res_dir, save_only_pred)

    if not predict_mode:
        # Write final results
        save_all_3D(results, detailed_res, out_dir)
Exemplo n.º 11
0
def entry_func(args=None):

    # Get parser
    parser = vars(get_parser().parse_args(args))

    # Get arguments
    data_dir = os.path.abspath(parser["data_dir"])
    n_splits = int(parser["CV"])
    if n_splits > 1:
        out_dir = os.path.join(data_dir, parser["out_dir"], "%i_CV" % n_splits)
    else:
        out_dir = os.path.join(data_dir, parser["out_dir"], "fixed_split")
    im_dir = os.path.join(data_dir, parser["im_sub_dir"])
    lab_dir = os.path.join(data_dir, parser["lab_sub_dir"])

    copy = parser["copy"]
    file_list = parser["file_list"]
    regex = parser["file_regex"]
    val_frac = parser["validation_fraction"]
    test_frac = parser["test_fraction"]
    common_prefix_length = parser["common_prefix_length"]

    if n_splits == 1 and not test_frac:
        raise ValueError("Must specify --test_fraction with --CV=1.")
    if copy and file_list:
        raise ValueError("Only one of --copy and --file_list "
                         "flags must be set.")

    # Assert suitable folders
    assert_dir_structure(data_dir, im_dir, lab_dir, out_dir)

    # Create sub-folders
    create_view_folders(out_dir, n_splits)

    # Get images and pair by subject identifier if common_prefix_length > 0
    images = glob(os.path.join(im_dir, regex))
    images = pair_by_names(images, common_prefix_length)
    print("-----")
    print("Found {} images".format(len(images)))

    # Get validation size
    N_total = len(images)
    if n_splits > 1:
        N_test = N_total // n_splits
    else:
        N_test = int(np.ceil(N_total * test_frac))
    N_val = int(np.ceil(N_total * val_frac))
    if N_val + N_test >= N_total:
        raise ValueError("Too large validation_fraction - "
                         "No training samples left!")
    N_train = N_total - N_test - N_val
    print("Total images:".ljust(40), N_total)
    print("Train images pr. split:".ljust(40), N_train)
    print("Validation images pr. split:".ljust(40), N_val)
    print("Test images pr. split:".ljust(40), N_test)

    # Shuffle and split the images into CV parts
    random.shuffle(images)
    splits = np.array_split(images, n_splits)

    # Symlink / copy files
    for i, split in enumerate(splits):
        print("  Split %i/%i" % (i+1, n_splits), end="\r", flush=True)

        # Set root path to split folder
        if n_splits > 1:
            split_path = os.path.join(out_dir, "split_%i" % i)
        else:
            split_path = out_dir
            # Here we kind of hacky force the following code to work with CV=1
            # Define a test set and overwrite the current split (which stores
            # add the data, as splits was never split with n_splits=1
            split = splits[0][:N_test]

            # Overwrite the splits variable to a length 2 array with the
            # remaining data which will be used as val+train. The loop still
            # refers to the old split and thus will only execute once
            splits = [split, splits[0][N_test:]]

        # Define train, val and test sub-dirs
        train_path = os.path.join(split_path, "train")
        train_im_path = os.path.join(train_path, parser["im_sub_dir"])
        train_label_path = os.path.join(train_path, parser["lab_sub_dir"])
        if N_val:
            val_path = os.path.join(split_path, "val")
            val_im_path = os.path.join(val_path, parser["im_sub_dir"])
            val_label_path = os.path.join(val_path, parser["lab_sub_dir"])
        else:
            val_path, val_im_path, val_label_path = (None,) * 3
        test_path = os.path.join(split_path, "test")
        test_im_path = os.path.join(test_path, parser["im_sub_dir"])
        test_label_path = os.path.join(test_path, parser["lab_sub_dir"])

        # Create folders if not existing
        create_folders([train_path, val_path, train_im_path, train_label_path,
                        val_im_path, val_label_path, test_path, test_im_path,
                        test_label_path])

        # Copy or symlink?
        if copy:
            from shutil import copyfile
            move_func = copyfile
        elif file_list:
            move_func = _add_to_file_list_fallback
        else:
            move_func = os.symlink

        # Add test data to test folder
        add_images(split, test_im_path, test_label_path, im_dir, lab_dir, move_func)

        # Join remaining splits into train+val
        remaining = [x for ind, x in enumerate(splits) if ind != i]
        remaining = [item for sublist in remaining for item in sublist]

        # Extract validation data from the remaining
        random.shuffle(remaining)
        validation = remaining[:N_val]
        training = remaining[N_val:]

        # Add
        if validation:
            add_images(validation, val_im_path, val_label_path, im_dir, lab_dir, move_func)
        add_images(training, train_im_path, train_label_path, im_dir, lab_dir, move_func)
Exemplo n.º 12
0
def run(args):
    cv_dir = os.path.abspath(args.CV_dir)
    # Get list of folders of CV data to run on
    cv_folders = get_CV_folders(cv_dir)
    assert_args(args, n_splits=len(cv_folders))
    out_dir = os.path.abspath(args.out_dir)
    hparams_dir = os.path.abspath(args.hparams_prototype_dir)
    prepare_hparams_dir(hparams_dir)
    create_folders(out_dir)

    # Wait for PID?
    if args.wait_for:
        from mpunet.utils import await_PIDs
        await_PIDs(args.wait_for)

    if args.run_on_split is not None:
        cv_folders = [cv_folders[args.run_on_split]]
        log_appendix = "_split{}".format(args.run_on_split)
    else:
        log_appendix = ""

    # Get a logger object
    logger = Logger(base_path="./",
                    active_file="output" + log_appendix,
                    print_calling_method=False,
                    overwrite_existing=True)

    if args.force_GPU:
        # Only these GPUs fill be chosen from
        from mpunet.utils import set_gpu
        set_gpu(args.force_GPU)
    if args.num_GPUs:
        # Get GPU sets (up to the number of splits)
        gpu_sets = get_free_GPU_sets(args.num_GPUs,
                                     args.ignore_GPU)[:len(cv_folders)]
    elif not args.num_jobs or args.num_jobs < 0:
        raise ValueError("Should specify a number of jobs to run in parallel "
                         "with the --num_jobs flag when using 0 GPUs pr. "
                         "process (--num_GPUs=0 was set).")
    else:
        gpu_sets = ["''"] * args.num_jobs

    # Get process pool, lock and GPU queue objects
    lock = Lock()
    gpu_queue = Queue()
    for gpu in gpu_sets:
        gpu_queue.put(gpu)

    # Get file paths
    script = os.path.abspath(args.script_prototype)

    # Get GPU monitor process
    running_processes, stop_event = start_gpu_monitor_process(
        args, gpu_queue, gpu_sets, logger)

    try:
        for cv_folder in cv_folders[args.start_from:]:
            gpus = gpu_queue.get()
            t = Process(target=run_sub_experiment,
                        args=(cv_folder, out_dir, script, hparams_dir,
                              args.no_hparams, gpus, gpu_queue, lock, logger))
            t.start()
            running_processes.append(t)
            for t in running_processes:
                if not t.is_alive():
                    t.join()
    except KeyboardInterrupt:
        for t in running_processes:
            t.terminate()
    if stop_event is not None:
        stop_event.set()
    for t in running_processes:
        t.join()