Beispiel #1
0
def predict(img_dir_name):
    if unet_model_type == 'default':
        model = get_unet_default()
    elif unet_model_type == 'reduced':
        model = get_unet_reduced()
    elif unet_model_type == 'extended':
        model = get_unet_extended()

    checkpoint_filepath = 'outputs/' + checkpoint_filename
    model.load_weights(checkpoint_filepath)
    model.summary()

    SegmentedVolume = np.zeros((image_rows, image_cols, image_depth))

    img_mask_name = img_dir_name + '_mask.nii.gz'
    img_mask_name = os.path.join(train_imgs_path, img_dir_name, img_mask_name)

    img_mask = nib.load(img_mask_name)
    img_mask_data = img_mask.get_data()

    # for each slice, extract patches and predict
    for iSlice in range(0, 256):
        mask = img_mask_data[2:254, 2:127, iSlice]

        if np.sum(mask, axis=(0, 1)) > 0:
            print('-' * 30)
            print('Slice number: ', iSlice)
            label_predicted, patches_training_imgs_2d, rows, cols = create_slice_testing(
                iSlice, img_dir_name)
            imgs_valid_predict = model.predict(patches_training_imgs_2d)
            label_predicted_filled = write_slice_predict(
                imgs_valid_predict, rows, cols)

            for i in range(0, SegmentedVolume.shape[0]):
                for j in range(0, SegmentedVolume.shape[1]):
                    if img_mask_data.item((i, j, iSlice)) == 1:
                        SegmentedVolume.itemset((i, j, iSlice),
                                                label_predicted_filled.item(
                                                    (i, j)))
                    else:
                        label_predicted_filled.itemset((i, j), 0)
        print('done')

# utilize mask to write output
    data = SegmentedVolume
    img = nib.Nifti1Image(data, np.eye(4))
    if num_classes == 3:
        img_name = img_dir_name + '_predicted_3class_' + str(
            patch_size) + '_' + unet_model_type + '_tuned_8925.nii.gz'
    else:
        img_name = img_dir_name + '_predicted_4class_' + str(
            patch_size) + '_' + unet_model_type + '_tuned_8925.nii.gz'
    nib.save(img, os.path.join('../data_new', write_path, img_name))
    print('-' * 30)
def predict(img_dir_name):
    if unet_model_type == 'default':
        model = get_unet_default()
    elif unet_model_type == 'reduced':
        model = get_unet_reduced()
    elif unet_model_type == 'extended':
        model = get_unet_extended()   
    
    checkpoint_filepath = 'outputs/' + checkpoint_filename
    model.load_weights(checkpoint_filepath)  
    model.summary()
    
    SegmentedVolume = np.zeros((image_depth,image_rows,image_cols))

    img_mask_data=np.load('../npy_data/test_mask.npy')
    img_mask_data=img_mask_data[0]
    
	# for each slice, extract patches and predict
    for iSlice in [28]:
        mask = img_mask_data[iSlice]

        if np.sum(mask, axis=(0,1))>0:
            print('-' * 30)
            print('Slice number: ', iSlice)
            label_predicted, patches_training_imgs_2d, rows, cols = create_slice_testing(iSlice, img_dir_name)
            imgs_valid_predict = model.predict(patches_training_imgs_2d)
            label_predicted_filled = write_slice_predict(imgs_valid_predict, rows, cols)
            
            for i in range(0, SegmentedVolume.shape[1]):
                for j in range(0, SegmentedVolume.shape[2]):
                        if img_mask_data.item((iSlice,i, j)) == 1:
                            SegmentedVolume.itemset((iSlice,i,j), label_predicted_filled.item((i, j)))
                        else:
                            label_predicted_filled.itemset((i, j), 0)
        print ('done')

	# utilize mask to write output
    data = SegmentedVolume
    kkk=np.array(data)
    print(np.unique(kkk))
    misc.imsave("28layer_test.bmp",kkk[28]*80)
    test_label=np.load(img_dir_name+'test_y.npy')
    test_label=test_label[0,28]

    dice(kkk[28],test_label)
def resume():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)
    imgs_train, imgs_gtruth_train = load_train_data()

    print('-' * 30)
    print('Loading and preprocessing validation data...')
    print('-' * 30)
    imgs_val, imgs_gtruth_val = load_validatation_data()

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)

    if unet_model_type == 'default':
        model = get_unet_default()
    elif unet_model_type == 'reduced':
        model = get_unet_reduced()
    elif unet_model_type == 'extended':
        model = get_unet_extended()

    checkpoint_filepath_best = 'outputs/' + 'best_4classes_32_reduced_tuned_8915.h5'

    print(checkpoint_filepath_best)

    model.load_weights(checkpoint_filepath_best)
    model.summary()

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    #============================================================================
    print('training starting..')
    log_filename = 'outputs/' + image_type + '_model_train.csv'
    #Callback that streams epoch results to a csv file.

    csv_log = callbacks.CSVLogger(log_filename, separator=',', append=True)

    early_stopping = callbacks.EarlyStopping(monitor='val_loss',
                                             min_delta=0,
                                             patience=10,
                                             verbose=0,
                                             mode='min')

    #checkpoint_filepath = 'outputs/' + image_type +"_best_weight_model_{epoch:03d}_{val_loss:.4f}.hdf5"
    checkpoint_filepath = 'outputs/' + 'weights.h5'

    checkpoint = callbacks.ModelCheckpoint(checkpoint_filepath,
                                           monitor='val_loss',
                                           verbose=1,
                                           save_best_only=True,
                                           mode='min')

    #callbacks_list = [csv_log, checkpoint]
    callbacks_list = [csv_log, early_stopping, checkpoint]

    #============================================================================
    hist = model.fit(
        imgs_train,
        imgs_gtruth_train,
        batch_size=batch_size,
        nb_epoch=nb_epochs,
        verbose=1,
        validation_data=(imgs_val, imgs_gtruth_val),
        shuffle=True,
        callbacks=callbacks_list)  #              validation_split=0.2,

    model_name = 'outputs/' + image_type + '_model_last'
    model.save(model_name)  # creates a HDF5 file 'my_model.h5'