)
    parser.add_argument(
        '--exp_source',
        type=str,
        default='experiments/toy_exp',
        help=
        'specifies, from which source experiment to load configs and data_loader.'
    )

    args = parser.parse_args()
    folds = args.folds
    resume_to_checkpoint = args.resume_to_checkpoint

    if args.mode == 'train' or args.mode == 'train_test':

        cf = utils.prep_exp(args.exp_source, args.exp_dir, args.server_env,
                            args.use_stored_settings)
        cf.slurm_job_id = args.slurm_job_id
        model = utils.import_module('model', cf.model_path)
        data_loader = utils.import_module(
            'dl', os.path.join(args.exp_source, 'data_loader.py'))
        if folds is None:
            folds = range(cf.n_cv_splits)

        for fold in folds:
            cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold))
            cf.fold = fold
            cf.resume_to_checkpoint = resume_to_checkpoint
            if not os.path.exists(cf.fold_dir):
                os.mkdir(cf.fold_dir)
            logger = utils.get_logger(cf.fold_dir)
            train(logger)
Beispiel #2
0
    parser.add_argument('-d',
                        '--dev',
                        default=False,
                        action='store_true',
                        help="development mode: shorten everything")

    args = parser.parse_args()
    args.dataset_name = os.path.join(
        "datasets", args.dataset_name
    ) if not "datasets" in args.dataset_name else args.dataset_name
    folds = args.folds
    resume = None if args.resume in ['None', 'none'] else args.resume

    if args.mode == 'create_exp':
        cf = utils.prep_exp(args.dataset_name,
                            args.exp_dir,
                            args.server_env,
                            use_stored_settings=False)
        logger = utils.get_logger(cf.exp_dir, cf.server_env, -1)
        logger.info('created experiment directory at {}'.format(args.exp_dir))

    elif args.mode == 'train' or args.mode == 'train_test':
        cf = utils.prep_exp(args.dataset_name, args.exp_dir, args.server_env,
                            args.use_stored_settings)
        if args.dev:
            folds = [0, 1]
            cf.batch_size, cf.num_epochs, cf.min_save_thresh, cf.save_n_models = 3 if cf.dim == 2 else 1, 2, 0, 2
            cf.num_train_batches, cf.num_val_batches, cf.max_val_patients = 5, 1, 1
            cf.test_n_epochs, cf.max_test_patients = cf.save_n_models, 2
            torch.backends.cudnn.benchmark = cf.dim == 3
        else:
            torch.backends.cudnn.benchmark = cf.cuda_benchmark
Beispiel #3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-i", '--input_folder', 
                        help="Should contain all modalities for each patient in the correct order(same as training).", 
                             required=True)
    parser.add_argument('-o', "--output_folder", required=True, help="folder for saving predictions")
    parser.add_argument('-t', '--task_name', 
                        help='task name, required.',
                        default=default_plans_identifier, required=True)

    parser.add_argument('-tr', '--unet_trainer', help='UNet trainer class. Default: Trainer', required=False,
                        default='Trainer')
    parser.add_argument('-m', '--model', help="2d, 3d_lowres, 3d_fullres or 3d_cascade_fullres. Default: 3d_fullres",
                        default="3d_fullres", required=False)
    parser.add_argument('-p', '--plans_identifier', help='plans ID',
                        default=default_plans_identifier, required=False)

    parser.add_argument('-f', '--folds', nargs='+', default='None', 
                        help="folds to use for prediction. Default is None ")
    parser.add_argument('-z', '--save_npz', required=False, action='store_true', 
                        help="use this if you want to ensemble")
    parser.add_argument('-l', '--lowres_segmentations', required=False, default='None', 
                        help="if model is the highres, need to use -l to specify where the segmentations of the "
                         "corresponding lowres unet are. and required to do a prediction")
    parser.add_argument("--part_id", type=int, required=False, default=0, 
                        help="Used to parallelize the prediction of the folder over several GPUs.")
    parser.add_argument("--num_parts", type=int, required=False, default=1, 
                        help="Used to parallelize the prediction of the folder over several GPUs.")
    parser.add_argument("--num_threads_preprocessing", required=False, default=6, type=int, 
                        help="Determines many background processes will be used for data preprocessing. Default: 6")
    parser.add_argument("--num_threads_nifti_save", required=False, default=2, type=int, 
                        help="Determines many background processes will be used for segmentation export. Default: 2")
    parser.add_argument("--tta", required=False, type=int, default=1, 
                        help="test time data augmentation. 0: disable; (e.g. speedup of factor 4(2D)/8(3D)).")
    parser.add_argument("--overwrite_existing", required=False, type=int, default=1, 
                        help="Set this to 0 if you need to resume a previous prediction. ")
    parser.add_argument('--exp_dir', type=str, default='/path/to/experiment/directory',
                        help='path to experiment dir. will be created if non existent.')
    parser.add_argument('--server_env', default=False, action='store_true',
                        help='change IO settings to deploy models on a cluster.')
    parser.add_argument('--exp_source', type=str, default='experiments/toy_exp',
                        help='specifies, from which source experiment to load configs and data_loader.')


    args = parser.parse_args()
    input_folder = args.input_folder
    output_folder = args.output_folder
    part_id = args.part_id
    num_parts = args.num_parts
    folds = args.folds
    save_npz = args.save_npz
    lowres_segmentations = args.lowres_segmentations
    num_threads_preprocessing = args.num_threads_preprocessing
    num_threads_nifti_save = args.num_threads_nifti_save
    tta = args.tta
    overwrite = args.overwrite_existing
    cf = prep_exp(args.exp_source, args.exp_dir, args.server_env, is_training=True, use_stored_settings=True)
    
    output_folder_name = join(net_training_out_dir, args.model, args.task_name, args.unet_trainer + "__" +
                              args.plans_identifier)
    print("using model stored in ", output_folder_name)
    assert isdir(output_folder_name), "model output folder not found: %s" % output_folder_name

    if lowres_segmentations == "None":
        lowres_segmentations = None

    if isinstance(folds, list):
        if folds[0] == 'all' and len(folds) == 1:
            pass
        else:
            folds = [int(i) for i in folds]
    elif folds == "None":
        folds = None
    else:
        raise ValueError("Unexpected value for argument folds")

    if tta == 0:
        tta = False
    elif tta == 1:
        tta = True
    else:
        raise ValueError("Unexpected value for tta, Use 1 or 0")

    if overwrite == 0:
        overwrite = False
    elif overwrite == 1:
        overwrite = True
    else:
        raise ValueError("Unexpected value for overwrite, Use 1 or 0")

    predict_group(cf, output_folder_name, input_folder, output_folder, folds, save_npz, num_threads_preprocessing,
                        num_threads_nifti_save, lowres_segmentations, part_id, num_parts, tta,
                        overwrite_existing=overwrite)
    )
    parser.add_argument(
        '--exp_source',
        type=str,
        default='experiments/abus_exp/',
        help=
        'specifies, from which source experiment to load configs and data_loader.'
    )

    args = parser.parse_args()
    folds = args.folds
    resume_to_checkpoint = args.resume_to_checkpoint

    if args.mode == 'train':

        cf = utils.prep_exp(args.exp_dir, is_training=True)

        cf.resume_to_checkpoint = resume_to_checkpoint  #default:None

        model = utils.import_module('model', cf.model_path)
        data_loader = utils.import_module(
            'dl', os.path.join(args.exp_source, 'data_loader.py'))

        for fold in folds:
            cf.fold_dir = os.path.join(
                cf.exp_dir, 'fold_{}'.format(fold))  #path to save results
            cf.fold = fold
            if not os.path.exists(cf.fold_dir):
                os.mkdir(cf.fold_dir)
            logger = utils.get_logger(cf.fold_dir)  #loginfo for this fold
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("net")
    parser.add_argument("net_trainer")
    parser.add_argument("task")
    parser.add_argument("fold", help='0, 1, ..., 5 or \'all\'')
    parser.add_argument("-val",
                        "--validation_only",
                        help="use this if you want to only run the validation",
                        action="store_true")
    parser.add_argument("-c",
                        "--continue_training",
                        help="use this if you want to continue a training",
                        action="store_true")
    parser.add_argument("-p",
                        help="plans identifier",
                        default=default_plans_identifier,
                        required=False)
    parser.add_argument("-u",
                        "--unpack_data",
                        help="Leave it as 1, development only",
                        required=False,
                        default=1,
                        type=int)
    parser.add_argument(
        "--ndet",
        help=
        "nondeterministic training, it allows cudnn.benchmark which will can increase performance."
        "default training is deterministic.",
        required=False,
        default=False,
        action="store_true")
    parser.add_argument(
        "--npz",
        required=False,
        default=False,
        action="store_true",
        help=
        "if set then UNet will export npz files of predicted segmentations in the vlaidation as well."
    )
    parser.add_argument("--find_lr",
                        required=False,
                        default=False,
                        action="store_true",
                        help="not used, for analysis only.")
    parser.add_argument("--valbest",
                        required=False,
                        default=False,
                        action="store_true",
                        help="hands off. for analysis only.")
    parser.add_argument(
        '--exp_dir',
        type=str,
        default='/path/to/experiment/directory',
        help='path to experiment dir. will be created if non existent.')
    parser.add_argument(
        '--server_env',
        default=False,
        action='store_true',
        help='change IO settings to deploy models on a cluster.')
    parser.add_argument(
        '--exp_source',
        type=str,
        default='experiments/demo_exp',
        help=
        'specifies, from which source experiment to load configs and data_loader.'
    )

    args = parser.parse_args()

    task = args.task
    fold = args.fold
    net = args.net
    net_trainer = args.net_trainer
    validation_only = args.validation_only
    plans_identifier = args.p
    find_lr = args.find_lr
    unpack = args.unpack_data
    deterministic = not args.ndet
    valbest = args.valbest

    if unpack == 0:
        unpack = False
    elif unpack == 1:
        unpack = True
    else:
        raise ValueError(
            "Unexpected value for -u/--unpack_data: %s. Use 1 or 0." %
            str(unpack))

    if fold == 'all':
        pass
    else:
        fold = int(fold)

    cf = prep_exp(args.exp_source,
                  args.exp_dir,
                  args.server_env,
                  is_training=True,
                  use_stored_settings=True)

    plans_file, output_folder_name, dataset_directory, batch_dice, stage, \
        trainer_class = get_default_configuration(net, task, net_trainer, plans_identifier)

    if trainer_class is None:
        raise RuntimeError("Could not find trainer class in training.trainer")

    if net == "3d_cascade_fullres":
        assert issubclass(trainer_class, CascadeTrainer), "If running 3d_cascade_fullres then your " \
                           "trainer class must be derived from CascadeTrainer."
    else:
        assert issubclass(
            trainer_class,
            Trainer), "net_trainer was found but is not derived from Trainer"

    trainer = trainer_class(cf,
                            plans_file,
                            fold,
                            output_folder=output_folder_name,
                            dataset_directory=dataset_directory,
                            batch_dice=batch_dice,
                            stage=stage,
                            unpack_data=unpack,
                            deterministic=deterministic)

    trainer.initialize(not validation_only)

    if find_lr:
        trainer.find_lr()
    else:
        if not validation_only:
            if args.continue_training:
                trainer.load_latest_checkpoint()
            trainer.do_training()
        elif not valbest:
            trainer.load_latest_checkpoint(train=False)

        if valbest:
            trainer.load_best_checkpoint(train=False)
            val_folder = "validation_best_epoch"
        else:
            val_folder = "validation"

        # predict validation
        trainer.validate(save_softmax=args.npz,
                         validation_folder_name=val_folder)

        if net == '3d_lowres':
            trainer.load_best_checkpoint(False)
            print("predicting segmentations for the next stage of the cascade")
            predict_next(
                cf, trainer,
                join(dataset_directory,
                     trainer.plans['data_identifier'] + "_stage%d" % 1))
        default='experiments/toy_exp',
        help=
        'specifies, from which source experiment to load configs and data_loader.'
    )
    parser.add_argument("net_trainer")
    parser.add_argument("task")
    parser.add_argument("fold", type=int)

    args = parser.parse_args()

    trainerclass = args.net_trainer
    task = args.task
    fold = args.fold
    cf = prep_exp(args.exp_source,
                  args.exp_dir,
                  args.server_env,
                  is_training=True,
                  use_stored_settings=True)

    plans_file, folder_with_preprocessing_data, output_folder_name, dataset_directory, batch_dice, stage = \
        get_default_configuration("3d_lowres", task)

    trainer_class = recursive_find_trainer(
        [join(unet.__path__[0], "training", "net_training")], trainerclass,
        "unet.training.net_training")

    if trainer_class is None:
        raise RuntimeError(
            "Could not find trainer class in unet.training.net_training")
    else:
        assert issubclass(