Beispiel #1
0
def validate(batch):

    #===========================================
    dv.section_print('Calculating Image Lists...')

    partition_file_name = 'one_time_frame_4classes'

    imgs_list_tst = [
        np.load(os.path.join(cg.partition_dir, partition_file_name,
                             'img_list_' + str(p) + '.npy'),
                allow_pickle=True) for p in range(cg.num_partitions)
    ]
    segs_list_tst = [
        np.load(os.path.join(cg.partition_dir, partition_file_name,
                             'seg_list_' + str(p) + '.npy'),
                allow_pickle=True) for p in range(cg.num_partitions)
    ]

    if batch == None:
        raise ValueError('No batch was provided: wrong!')
        # print('pick all batches')
        # batch = 'all'
        # imgs_list_tst = np.concatenate(imgs_list_tst)
        # segs_list_tst = np.concatenate(segs_list_tst)
    else:
        imgs_list_tst = imgs_list_tst[batch]
        segs_list_tst = segs_list_tst[batch]

    print(imgs_list_tst.shape)
    #===========================================
    dv.section_print('Loading Saved Weights...')

    # Build the U-NET
    shape = cg.dim + (1, )
    model_inputs = [Input(shape)]
    model_outputs = []
    _, _, unet_output = dvpy.tf_2d.get_unet(
        cg.dim,
        cg.num_classes,
        cg.conv_depth,
        layer_name='unet',
        dimension=cg.unetdim,
        unet_depth=cg.unet_depth,
    )(model_inputs[0])
    model_outputs += [unet_output]
    model = Model(inputs=model_inputs, outputs=model_outputs)

    # Load weights
    model.load_weights(model_files[0], by_name=True)

    #===========================================
    dv.section_print('Calculating Predictions...')
    # build data generator
    valgen = dv.tf_2d.ImageDataGenerator(
        cg.unetdim,
        input_layer_names=['input_1'],
        output_layer_names=['unet'],
    )

    # predict
    for img, seg in zip(imgs_list_tst, segs_list_tst):
        patient_id = os.path.basename(os.path.dirname(os.path.dirname(img)))
        patient_class = os.path.basename(
            os.path.dirname(os.path.dirname(os.path.dirname(img))))
        print(img)
        print(patient_class, patient_id, '\n')

        u_pred = model.predict_generator(
            valgen.flow(
                np.asarray([img]),
                np.asarray([seg]),
                slice_num=cg.slice_num,
                batch_size=cg.slice_num,
                relabel_LVOT=cg.relabel_LVOT,
                shuffle=False,
                input_adapter=ut.in_adapt,
                output_adapter=ut.out_adapt,
                shape=cg.dim,
                input_channels=1,
                output_channels=cg.num_classes,
                adapted_already=cg.adapted_already,
            ),
            verbose=1,
            steps=1,
        )

        # save u_net segmentation
        time = ff.find_timeframe(seg, 1, '_')
        u_gt_nii = nb.load(
            os.path.join(cg.seg_data_dir, patient_class, patient_id,
                         'seg-pred-1.5-upsample-retouch',
                         'pred_s_' + str(time) + '.nii.gz')
        )  # load the manual segmentation file for affine matrix
        u_pred = np.rollaxis(u_pred, 0, 3)
        u_pred = np.argmax(u_pred, axis=-1).astype(np.uint8)
        u_pred = dv.crop_or_pad(u_pred, u_gt_nii.get_fdata().shape)
        u_pred[u_pred == 3] = 4  # use for LVOT only
        u_pred = nb.Nifti1Image(u_pred, u_gt_nii.affine)
        save_file = os.path.join(cg.seg_data_dir, patient_class, patient_id,
                                 'seg-pred-0.625-4classes',
                                 seg_filename + str(time) +
                                 '.nii.gz')  # predicted segmentation file
        os.makedirs(os.path.dirname(save_file), exist_ok=True)
        nb.save(u_pred, save_file)
Beispiel #2
0
vector = ''  # ignore
suffix = ''  #ignore
test_set = 'VR_1tf_4class'
print(view, vector, Batch)

model_folder = os.path.join(cg.fc_dir, 'models', 'model_batch' + Batch,
                            '2D-UNet-seg')
filename = 'model-' + test_set + '_batch' + Batch + '_s' + suffix + '-' + epoch + '-*'
model_files = ff.find_all_target_files([filename], model_folder)
assert len(model_files) == 1
print(model_files)

seg_filename = 'pred_s_'  #define file name of predicted segmentation
###########

dv.section_print('Loading Saved Weights...')

# BUILT U-NET

shape = cg.dim + (1, )
model_inputs = [Input(shape)]
model_outputs = []
_, _, unet_output = dvpy.tf_2d.get_unet(
    cg.dim,
    cg.num_classes,
    cg.conv_depth,
    layer_name='unet',
    dimension=cg.unetdim,
    unet_depth=cg.unet_depth,
)(model_inputs[0])
Beispiel #3
0
def test_section_print():
    dv.section_print("Section Header")
Beispiel #4
0
def train(batch):
    print(cg.dim)
    print('BATCH_SIZE = ', cg.batch_size)

    # define a name of your trial
    test_set = 'VR_1tf_4classes'

    # define partition file
    partition_file_name = 'one_time_frame_4classes'

    # define hdf5 file save folder (hdf5 file is the model weights file)
    print(cg.fc_dir)
    weight_file_save_folder = os.path.join(cg.fc_dir, 'models')

    #===========================================
    dv.section_print('Calculating Image Lists...')

    # obtain image list and segmentation list in training and validation
    imgs_list_trn = [
        np.load(os.path.join(cg.partition_dir, partition_file_name,
                             'img_list_' + str(p) + '.npy'),
                allow_pickle=True) for p in range(cg.num_partitions)
    ]
    segs_list_trn = [
        np.load(os.path.join(cg.partition_dir, partition_file_name,
                             'seg_list_' + str(p) + '.npy'),
                allow_pickle=True) for p in range(cg.num_partitions)
    ]

    imgs_list_tst = imgs_list_trn.pop(batch)
    segs_list_tst = segs_list_trn.pop(batch)

    imgs_list_trn = np.concatenate(imgs_list_trn)
    segs_list_trn = np.concatenate(segs_list_trn)

    len_list = [
        len(imgs_list_trn),
        len(segs_list_trn),
        len(imgs_list_tst),
        len(segs_list_tst)
    ]
    print(len_list, segs_list_trn[0])

    #===========================================
    dv.section_print('Creating and compiling model...')
    shape = cg.dim + (1, )
    model_inputs = [Input(shape)]
    model_outputs = []
    _, _, unet_output = dvpy.tf_2d.get_unet(
        cg.dim,
        cg.num_classes,
        cg.conv_depth,
        layer_name='unet',
        dimension=cg.unetdim,
        unet_depth=cg.unet_depth,
    )(model_inputs[0])
    model_outputs += [unet_output]

    model = Model(inputs=model_inputs, outputs=model_outputs)
    opt = Adam(lr=1e-4)
    losses = {'unet': 'categorical_crossentropy'}
    model.compile(optimizer=opt, loss=losses, metrics={
        'unet': 'acc',
    })

    #======================
    dv.section_print('Fitting model...')

    # define the name of each model weight file
    if batch is None:
        model_name = 'model-' + test_set + '_batch_all_s'
        model_fld = 'model_batch_all'
    else:
        model_name = 'model-' + test_set + '_batch' + str(batch) + '_s'
        model_fld = 'model_batch' + str(batch)
    filename = model_name + '-{epoch:03d}-{loss:.3f}-{val_loss:.3f}-{val_acc:.4f}.hdf5'
    filepath = os.path.join(weight_file_save_folder, model_fld, '2D-UNet-seg',
                            filename)
    os.makedirs(os.path.dirname(filepath), exist_ok=True)

    # set callbacks
    csv_logger = CSVLogger(
        os.path.join(cg.fc_dir, 'logs', model_name + '_training-log' + '.csv')
    )  # log will automatically record the train_accuracy/loss and validation_accuracy/loss in each epoch
    callbacks = [
        csv_logger,
        ModelCheckpoint(
            filepath,
            monitor='val_loss',
            save_best_only=
            False,  # set True then only save model weight file when "monitor" (Define as val_loss here) gets improved, set False then save every epoch no matter whether the result improves
        ),
        LearningRateScheduler(dv.learning_rate_step_decay2),  # learning decay
    ]

    datagen = dv.tf_2d.ImageDataGenerator(
        cg.unetdim,  # Dimension of input image
        input_layer_names=['input_1'],
        output_layer_names=['unet'],
        translation_range=cg.
        xy_range,  # randomly shift images vertically (fraction of total height)
        rotation_range=cg.
        rt_range,  # randomly rotate images in the range (degrees, 0 to 180)
        scale_range=cg.zm_range,
        flip=cg.flip,
    )

    datagen_flow = datagen.flow(
        imgs_list_trn,
        segs_list_trn,
        slice_num=cg.slice_num,
        batch_size=cg.batch_size,
        patients_in_one_batch=cg.patients_in_one_batch,
        relabel_LVOT=cg.relabel_LVOT,  # remove it for RV
        shuffle=True,
        input_adapter=ut.in_adapt,
        output_adapter=ut.out_adapt,
        shape=cg.dim,
        input_channels=1,
        output_channels=cg.num_classes,
        augment=
        True,  # only True in the training process to randomly translate, rotate and scale the image.
        normalize=cg.normalize,
        adapted_already=cg.
        adapted_already,  # True when you already did the image adaption in the pre-processing step.
    )

    valgen = dv.tf_2d.ImageDataGenerator(
        cg.unetdim,
        input_layer_names=['input_1'],
        output_layer_names=['unet'],
    )

    valgen_flow = valgen.flow(
        imgs_list_tst,
        segs_list_tst,
        slice_num=cg.slice_num,
        batch_size=cg.batch_size,
        patients_in_one_batch=1,  # set as 1 in validation
        relabel_LVOT=cg.relabel_LVOT,
        shuffle=True,
        input_adapter=ut.in_adapt,
        output_adapter=ut.out_adapt,
        shape=cg.dim,
        input_channels=1,
        output_channels=cg.num_classes,
        normalize=cg.normalize,
        adapted_already=cg.adapted_already,
    )

    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(
        datagen_flow,
        steps_per_epoch=imgs_list_trn.shape[0] * cg.slice_num // cg.batch_size,
        epochs=cg.epochs,
        workers=1,
        validation_data=valgen_flow,
        validation_steps=imgs_list_tst.shape[0] * cg.slice_num //
        cg.batch_size,
        callbacks=callbacks,
        verbose=1,
    )
def train(batch):

    #===========================================
    dv.section_print('Calculating Image Lists...')

    imgs_list_trn = [
        np.load(fs.img_list(p, 'ED_ES')) for p in range(cg.num_partitions)
    ]
    segs_list_trn = [
        np.load(fs.seg_list(p, 'ED_ES')) for p in range(cg.num_partitions)
    ]

    if batch is None:
        print('No batch was provided: training on all images.')
        batch = 'all'

        imgs_list_trn = np.concatenate(imgs_list_trn)
        segs_list_trn = np.concatenate(segs_list_trn)

        imgs_list_tst = imgs_list_trn
        segs_list_tst = segs_list_trn

    else:
        imgs_list_tst = imgs_list_trn.pop(batch)
        segs_list_tst = segs_list_trn.pop(batch)

        imgs_list_trn = np.concatenate(imgs_list_trn)
        segs_list_trn = np.concatenate(segs_list_trn)

    #===========================================
    dv.section_print('Creating and compiling model...')

    shape = cg.dim + (1, )
    # cg.batch_size = 1
    model_inputs = [Input(shape)]

    _, _, output = unet.get_unet(
        cg.dim,
        cg.num_classes,
        cg.conv_depth,
        0,  # Stage
        dimension=len(cg.dim),
        unet_depth=cg.unet_depth,
    )(model_inputs[0])

    model_outputs = [output]
    with tf.device("/cpu:0"):
        models = Model(
            inputs=model_inputs,
            outputs=model_outputs,
        )

#    # https://github.com/avolkov1/keras_experiments/blob/master/examples/mnist/mnist_tfrecord_mgpu.py
#    model = make_parallel(model, get_available_gpus())

    print(cg.batch_size)
    # cbk = MyCbk(models,batch)
    # saved_model="/media/McVeighLab/projects/SNitesh/datasetsall-classes-all-phases-1.5/model_batch_all.hdf5"
    # if(os.path.isfile(saved_model)):
    #   models.load_weights(fs.model(batch), by_name=True)

    # model = multi_gpu_model(models, gpus=2)
    model = models
    opt = Adam(lr=1e-3)
    model.compile(optimizer=opt, loss='categorical_crossentropy')
    #===========================================
    dv.section_print('Fitting model...')

    # callbacks = [
    #              LearningRateScheduler(ut.step_decay),cbk
    #             ]
    callbacks = [
        ModelCheckpoint(
            fs.model(batch),
            monitor='val_loss',
            save_best_only=True,
        ),
        LearningRateScheduler(ut.step_decay),
    ]
    # Training Generator
    datagen = ImageDataGenerator(
        3,  # Dimension of input image
        translation_range=cg.
        xy_range,  # randomly shift images vertically (fraction of total height)
        #        rotation_range = 0.0,  # randomly rotate images in the range (degrees, 0 to 180)
        scale_range=cg.zm_range,
        flip=cg.flip,
    )

    datagen_flow = datagen.flow(
        imgs_list_trn,
        segs_list_trn,
        batch_size=cg.batch_size,
        input_adapter=ut.in_adapt,
        output_adapter=ut.out_adapt,
        shape=cg.dim,
        input_channels=1,
        output_channels=cg.num_classes,
        augment=True,
    )

    valgen = ImageDataGenerator(
        3,  # Dimension of input image
    )
    print(cg.dim)

    valgen_flow = valgen.flow(
        imgs_list_tst,
        segs_list_tst,
        batch_size=cg.batch_size,
        input_adapter=ut.in_adapt,
        output_adapter=ut.out_adapt,
        shape=cg.dim,
        input_channels=1,
        output_channels=cg.num_classes,
    )
    # file_write=open("model_description","w")
    # print_summary(model, line_length=None, positions=[.33, .75, .87, 1.], print_fn=None)
    # print(model.layers)
    # print(model.inputs)
    # print(model.outputs)
    # print(model.summary(),file=file_write)
    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(
        datagen_flow,
        steps_per_epoch=imgs_list_trn.shape[0] // (cg.batch_size),
        epochs=cg.epochs,
        workers=1,
        validation_data=valgen_flow,
        validation_steps=imgs_list_tst.shape[0] // (cg.batch_size),
        callbacks=callbacks,
        verbose=1,
    )
    print_summary(model, line_length=None, positions=None, print_fn=None)
Beispiel #6
0
def train():

    ut = utils()

    #===========================================
    dv.section_print('Calculating Image Lists...')

    (imgs_list_trn, segs_list_trn, imgs_list_tst,
     segs_list_tst) = ut.get_image_lists()

    #===========================================
    dv.section_print('Creating and compiling model...')

    shape = cg.dim + (1, )

    model_inputs = [Input(shape)]

    _, _, output = dvpy.tf.get_unet(
        cg.dim,
        cg.num_classes,
        cg.conv_depth,
        0,  # Stage
        dimension=len(cg.dim),
        unet_depth=cg.unet_depth,
    )(model_inputs[0])

    model_outputs = [output]

    model = Model(
        inputs=model_inputs,
        outputs=model_outputs,
    )
    opt = Adam(lr=1e-3)
    model.compile(optimizer=opt, loss='categorical_crossentropy')

    #===========================================
    dv.section_print('Fitting model...')

    callbacks = [
        ModelCheckpoint(
            cg.model_name,
            monitor='val_loss',
            save_best_only=True,
        ),
        LearningRateScheduler(ut.step_decay),
    ]

    # Training Generator
    datagen = dvpy.tf.ImageDataGenerator(
        3,  # Dimension of input image
        translation_range=cg.
        xy_range,  # randomly shift images vertically (fraction of total height)
        #        rotation_range = 0.0,  # randomly rotate images in the range (degrees, 0 to 180)
        scale_range=cg.zm_range,
        flip=cg.flip,
    )

    datagen_flow = datagen.flow(
        imgs_list_trn,
        segs_list_trn,
        batch_size=cg.batch_size,
        input_adapter=ut.in_adapt,
        output_adapter=ut.out_adapt,
        shape=cg.dim,
        input_channels=1,
        output_channels=cg.num_classes,
        augment=True,
    )

    valgen = dvpy.tf.ImageDataGenerator(
        3,  # Dimension of input image
    )

    valgen_flow = valgen.flow(
        imgs_list_tst,
        segs_list_tst,
        batch_size=cg.batch_size,
        input_adapter=ut.in_adapt,
        output_adapter=ut.out_adapt,
        shape=cg.dim,
        input_channels=1,
        output_channels=cg.num_classes,
    )

    # Fit the model on the batches generated by datagen.flow().
    model.fit_generator(
        datagen_flow,
        steps_per_epoch=imgs_list_trn.shape[0] // cg.batch_size,
        epochs=64,
        workers=1,
        validation_data=valgen_flow,
        validation_steps=imgs_list_tst.shape[0] // cg.batch_size,
        callbacks=callbacks,
        verbose=1,
    )