예제 #1
0
def main(overwrite=False):
    # convert input images into an hdf5 file
    if overwrite or not os.path.exists(config["hdf5_file"]):
        write_data_to_file(config["data_dir"],
                           config["hdf5_file"],
                           image_shape=config["image_shape"],
                           nb_channels=config["nb_channels"])
    hdf5_file_opened = tables.open_file(config["hdf5_file"], "r")

    if not overwrite and os.path.exists(config["model_file"]):
        model = load_old_model(config["model_file"])
    else:
        # instantiate new model
        model = unet_model_3d()

    # get training and testing generators
    train_generator, test_generator, nb_train_samples, nb_test_samples = get_training_and_testing_generators(
        hdf5_file_opened,
        batch_size=config["batch_size"],
        data_split=config["validation_split"],
        overwrite=overwrite)

    # run training
    train_model(model, config["model_file"], train_generator, test_generator,
                nb_train_samples, nb_test_samples)
    hdf5_file_opened.close()
예제 #2
0
def train():
    print('-'*30)
    print('Loading and preprocessing train data...')
    print('-'*30)
    imgs_train, imgs_gtruth_train = load_train_data()
    
    print('-'*30)
    print('Loading and preprocessing validation data...')
    print('-'*30)
    imgs_val, imgs_gtruth_val  = load_validatation_data()
    
    print('-'*30)
    print('Creating and compiling model...')
    print('-'*30)

   # create a model
    model = unet_model_3d(input_shape=config["input_shape"],
                                depth=config["depth"],
                                pool_size=config["pool_size"],
                                n_labels=config["n_labels"],
                                initial_learning_rate=config["initial_learning_rate"],
                                deconvolution=config["deconvolution"])

    model.summary()
    
    print('-'*30)
    print('Fitting model...')
    print('-'*30)
    
    #============================================================================
    print('training starting..')
    log_filename = 'outputs/' + image_type +'_model_train.csv' 
    
    
    csv_log = callbacks.CSVLogger(log_filename, separator=',', append=True)
    
#    early_stopping = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='min')
    
    #checkpoint_filepath = 'outputs/' + image_type +"_best_weight_model_{epoch:03d}_{val_loss:.4f}.hdf5"
    checkpoint_filepath = 'outputs/' + 'weights.h5'
    
    checkpoint = callbacks.ModelCheckpoint(checkpoint_filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    
    callbacks_list = [csv_log, checkpoint]
    callbacks_list.append(ReduceLROnPlateau(factor=config["learning_rate_drop"], patience=config["patience"],
                                           verbose=True))
    callbacks_list.append(EarlyStopping(verbose=True, patience=config["early_stop"]))

    #============================================================================
    hist = model.fit(imgs_train, imgs_gtruth_train, batch_size=config["batch_size"], nb_epoch=config["n_epochs"], verbose=1, validation_data=(imgs_val,imgs_gtruth_val), shuffle=True, callbacks=callbacks_list) #              validation_split=0.2,
        
     
    model_name = 'outputs/' + image_type + '_model_last'
    model.save(model_name)  # creates a HDF5 file 'my_model.h5'
예제 #3
0
def predict(img_dir_name):
    # create a model
    model = unet_model_3d(
        input_shape=config["input_shape"],
        depth=config["depth"],
        pool_size=config["pool_size"],
        n_labels=config["n_labels"],
        initial_learning_rate=config["initial_learning_rate"],
        deconvolution=config["deconvolution"])

    model.summary()

    checkpoint_filepath = 'outputs/' + checkpoint_filename
    model.load_weights(checkpoint_filepath)

    SegmentedVolume = np.zeros((image_rows, image_cols, image_depth))

    img_mask_name = img_dir_name + '_mask.nii.gz'
    img_mask_name = os.path.join(train_imgs_path, img_dir_name, img_mask_name)

    img_mask = nib.load(img_mask_name)
    img_mask_data = img_mask.get_data()

    patches_training_imgs_3d, rows, cols, depths = create_slice_testing(
        img_dir_name)
    imgs_valid_predict = model.predict(patches_training_imgs_3d)
    label_final = write_predict(imgs_valid_predict, rows, cols, depths)

    for i in range(0, SegmentedVolume.shape[0]):
        for j in range(0, SegmentedVolume.shape[1]):
            for k in range(0, SegmentedVolume.shape[2]):
                if img_mask_data.item((i, j, k)) == 1:
                    SegmentedVolume.itemset((i, j, k),
                                            label_final.item((i, j, k)))
                else:
                    label_final.itemset((i, j, k), 0)

    print('done')

    data = SegmentedVolume
    img = nib.Nifti1Image(data, np.eye(4))
    if num_classes == 3:
        img_name = img_dir_name + '_predicted_3class_unet3d.nii.gz'
    else:
        img_name = img_dir_name + '_predicted_4class_unet3d_extract12_hist15.nii.gz'
    nib.save(img, os.path.join('../data_new', write_path, img_name))
    print('-' * 30)
예제 #4
0
tst_fns = read_txt(test_file)
val_fns = tst_fns

## loading volumes
ph_vols = np.stack([
    np.load(os.path.join(DATA_DIR, 'phase', vol_fn + '.npy'))
    for vol_fn in tst_fns
])
fl1_vols = np.stack([
    np.load(os.path.join(DATA_DIR, 'fl1', vol_fn + '.npy'))
    for vol_fn in tst_fns
])

## network architecture
model = unet_model_3d(input_shape=(1, None, None, None),
                      n_base_filters=16,
                      pool_size=(2, 2, 2),
                      depth=5)
model_name = '3D_phase'
model_folder = '/data/models_fl/{}'.format(model_name)
best_weight = model_folder + '/best_model.h5'
model.load_weights(best_weight)

## padding
ph_vol = ph_vols[0].transpose([1, 2, 0])
gt_vol = fl1_vols[0].transpose([1, 2, 0])
xl, yl, zl = ph_vol.shape
ix, iy, iz = 0, 0, 0
ex, ey, ez = ix + 256, iy + 256, iz + 32
all_voi_list = []
nb_voi = 0
iz, ez = 0, 0
예제 #5
0
    params = {
        'dim': (nx, ny, TIME, 1),
        'batch_size': batch_size,
        'n_channels': 1,
        'shuffle': True,
        'name': name_save
    }
    generator_train = data_loader.DataGenerator3D(List_id_train, **params)
    generator_test = data_loader.DataGenerator3D(List_id_test, **params)
    # Model
    # Change model parameters here
    net = model.unet_model_3d(input_shape=(nx, ny, TIME, 1),
                              pool_size=(2, 2, 1),
                              n_labels=1,
                              deconvolution=False,
                              depth=4,
                              n_base_filters=16,
                              include_label_wise_dice_coefficients=False,
                              metrics='mse',
                              batch_normalization=True,
                              activation_name="sigmoid")
elif model_name == 'LSTM':
    # Data
    X_train, Y_train = data_loader.path_to_time_batchs(path_train,
                                                       nx,
                                                       ny,
                                                       TIME=TIME,
                                                       type_im=type_im,
                                                       format_im='png',
                                                       code_im1='images',
                                                       code_im2='masks')
    X_train = np.expand_dims(np.array(X_train, dtype=np.float32), 4)
예제 #6
0
## parameters
dim, dep, val_dim, val_dep, scale = args.dim, args.dep, args.val_dim, args.val_dep, args.scale
BATCH_SIZE, EPOCHS, LR, decay = args.batch_size, args.epoch, args.lr, args.decay
levels, filters = args.levels, args.filters
model_name = '3d-set-{}-dim-{}-dep-{}-val_dim-{}-val_dep-{}-bz-{}-lr-{}-level-{}-filters-{}-ep-{}-decay-{}-scale-{}'.format(
    dataset, dim, val_dep, val_dim, val_dep, BATCH_SIZE, LR, levels, filters,
    EPOCHS, decay, scale)
model_folder = '/data/3d_models/{}/{}'.format(
    dataset, model_name) if args.docker else './3d_models/{}/{}'.format(
        dataset, model_name)
generate_folder(model_folder)

## network architecture
model = unet_model_3d(input_shape=(1, None, None, None),
                      n_base_filters=filters,
                      pool_size=(2, 2, 1),
                      depth=levels)

## testing training dataset
train_dataset = Dataset(X_dir,
                        Y_dir,
                        trn_fns,
                        scale=scale,
                        hsizes=[dim, dim, dep])
valid_dataset = Dataset(X_dir,
                        Y_dir,
                        tst_fns,
                        scale=scale,
                        hsizes=[val_dim, val_dim, val_dep])
print(train_dataset[0][0].shape, train_dataset[0][1].shape)
print(valid_dataset[0][0].shape, valid_dataset[0][1].shape)