Пример #1
0
    'validation_droupout': False,
    'in_size': in_size,
    'cuda': cuda,
    'max_epochs': max_epochs
}
#'conv_block':[8, 16, 32, 64, 128]

dir_cache = get_cache_dir(root_fs=root_fs)
#load_from_dir = ['{}/{}/'.format(dir_cache, data_name_train), '{}/{}/'.format(dir_cache, data_name_val)]
res_name = '{}_{}'.format(base_name, data_name_train)
load_from_dir = [None]

doit = do_training(res_dir, res_name, verbose)

#transforms = get_motion_transform('random_noise_1')
transforms = get_motion_transform('AffFFT_random_noise')

if do_eval:

    from torchio.transforms import CropOrPad, RandomAffine, RescaleIntensity, ApplyMask, RandomBiasField, RandomNoise, \
        Interpolation, RandomAffineFFT

    from utils_file import get_parent_path, gfile, gdir
    from utils import get_ep_iter_from_res_name

    tc = [RandomNoise(std=(0.020, 0.2))]

    if add_affine_rot > 0 or add_affine_zoom > 0:
        if add_affine_zoom == 0: add_affine_zoom = 1  # 0 -> no affine so 1
        # tc.append(RandomAffine(scales=(add_affine_zoom, add_affine_zoom), degrees=(add_affine_rot, add_affine_rot),
        #                        image_interpolation = Interpolation.NEAREST ))
Пример #2
0
load_from_dir = [
    '{}/{}/'.format(dir_cache, data_name_train),
    '{}/{}/'.format(dir_cache, data_name_val)
]
res_name = '{}_{}'.format(base_name, data_name_train)

doit = do_training(res_dir, res_name, verbose)

if do_save:
    #rr test
    load_from_dir = [None]
    res_dir = res_name = '/data/romain/HCPdata'
    train_csv_file, val_csv_file = res_name + '/healthy_brain_ms_train_hcp400.csv', res_name + '/healthy_brain_ms_val_hcp200.csv'
    doit = do_training(res_dir, res_name, verbose)

    transforms = get_motion_transform()
    train_csv_file, val_csv_file = get_train_and_val_csv(data_name_train,
                                                         root_fs=root_fs)
    doit.set_data_loader(train_csv_file=train_csv_file,
                         val_csv_file=val_csv_file,
                         transforms=transforms,
                         batch_size=batch_size,
                         num_workers=num_workers,
                         save_to_dir=load_from_dir[0],
                         replicate_suj=20)
    doit.save_to_dir(
        load_from_dir
    )  # no more use, because it is much faster on cluster with job created by

elif do_eval:
    doit.set_data_loader(batch_size=batch_size,
Пример #3
0
res_name = '{}_{}'.format(base_name, data_name_train)
load_from_dir = [None]

if 'cati' in data_name_train:
    target_shape, mask_key = (182, 218, 182), 'brain'
    print('adding a CropOrPad {} with mask key {}'.format(
        target_shape, mask_key))
    tc = [
        CropOrPad(target_shape=target_shape, mode='mask', mask_key=mask_key),
    ]
else:
    tc = None

doit = do_training(res_dir, res_name, verbose)

transforms = get_motion_transform('random_noise_1')

if do_eval:
    val_csv_file = train_csv_file

doit.set_data_loader(train_csv_file=train_csv_file,
                     val_csv_file=val_csv_file,
                     transforms=transforms,
                     batch_size=batch_size,
                     num_workers=num_workers,
                     save_to_dir=load_from_dir[0],
                     replicate_suj=nb_replicate)

doit.set_model(par_model)
if do_eval:
    doit.val_dataloader = doit.train_dataloader
Пример #4
0
        options.seed), options.res_dir
    index, nb_sample = np.int(options.index_num), np.int(options.nb_sample)
    plot_volume, keep_all, keep_brain = options.plot_volume, options.keep_all, options.keep_brain
    motion_type = options.motion_type

    import os

    resdir_mvt = res_dir + '/mvt_param/'
    resdir_fig = res_dir + '/fig/'
    try:  #on cluster, all job are doing the mkdir at the same time ...
        if not os.path.isdir(resdir_mvt): os.mkdir(resdir_mvt)
        if not os.path.isdir(resdir_fig): os.mkdir(resdir_fig)
    except:
        pass

    transfo = get_motion_transform(type=motion_type)

    torch.manual_seed(seed)
    np.random.seed(seed)

    dir_img = get_parent_path([fin])[0]
    fm = gfile(dir_img, '^mask', {"items": 1})
    fp1 = gfile(dir_img, '^p1', {"items": 1})
    fp2 = gfile(dir_img, '^p2', {"items": 1})
    if len(fm) == 0:  #may be in cat12 subdir (like for HCP)
        fm = gfile(dir_img, '^brain_T1', {"items": 1})
        #dir_cat = gdir(dir_img,'cat12')
        #fm = gfile(dir_cat, '^mask_brain', {"items": 1})
        #fp1 = gfile(dir_cat, '^p1', {"items": 1})
        #fp2 = gfile(dir_cat, '^p2', {"items": 1})
Пример #5
0
def get_dataset_from_option(options):

    fin = options.image_in
    dir_sample = options.sample_dir
    add_affine_zoom, add_affine_rot = options.add_affine_zoom, options.add_affine_rot

    batch_size, num_workers = options.batch_size, options.num_workers

    doit = do_training('/tmp/', 'not_use', verbose=True)
    # adding transformation
    tc = []
    name_suffix = ''
    #Attention pas de _ dans le name_suffix
    if options.add_cut_mask > 0:
        target_shape, mask_key = (182, 218, 182), 'brain'
        tc = [
            CropOrPad(target_shape=target_shape, mask_name=mask_key),
        ]
        name_suffix += '_tCropBrain'

    if add_affine_rot > 0 or add_affine_zoom > 0:
        if add_affine_zoom == 0: add_affine_zoom = 1  #0 -> no affine so 1
        tc.append(
            RandomAffine(scales=(add_affine_zoom, add_affine_zoom),
                         degrees=(add_affine_rot, add_affine_rot),
                         image_interpolation=Interpolation.NEAREST))
        name_suffix += '_tAffineS{}R{}'.format(add_affine_zoom, add_affine_rot)

    # for hcp should be before RescaleIntensity
    mask_brain = False
    if options.add_mask_brain:
        tc.append(ApplyMask(masking_method='brain'))
        name_suffix += '_tMaskBrain'
        mask_brain = True

    if options.add_rescal_Imax:
        tc.append(RescaleIntensity(percentiles=(0, 99)))
        name_suffix += '_tRescale-0-99'

    if options.add_elastic1:
        tc.append(get_motion_transform(type='elastic1'))
        name_suffix += '_tElastic1'

    if options.add_bias:
        tc.append(RandomBiasField())
        name_suffix += '_tBias'

    if len(name_suffix) == 0:
        name_suffix = '_Raw'

    target = None
    if len(tc) == 0: tc = None

    add_to_load, add_to_load_regexp = None, None

    if len(dir_sample) > 0:
        print('loading from {}'.format(dir_sample))
        if options.add_orig:
            add_to_load, add_to_load_regexp = 'original', 'notused'

        data_name = get_parent_path(dir_sample)[1]
        if mask_brain and 'hcp' in data_name:
            add_to_load_regexp = 'brain_T'
            if add_to_load is None:
                add_to_load = 'brain'
            else:
                add_to_load += 'brain'

        doit.set_data_loader(batch_size=batch_size,
                             num_workers=num_workers,
                             load_from_dir=dir_sample,
                             transforms=tc,
                             add_to_load=add_to_load,
                             add_to_load_regexp=add_to_load_regexp)

        name_suffix = 'On_' + data_name + name_suffix
        target = options.target  #'ssim' #suppose that if from sample, it should be simulation so set target
    else:
        print('working on ')
        for ff in fin:
            print(ff)

        doit.set_data_loader_from_file_list(fin,
                                            transforms=tc,
                                            batch_size=batch_size,
                                            num_workers=num_workers,
                                            mask_key=mask_key,
                                            mask_regex='^mask')

    return doit, name_suffix, target