Esempio n. 1
0
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)
output_folder = '/data/results' if docker else './data'
print('Results saved in: ' + output_folder)
DA_model_name = 'mmd-dcnn-{}-gcnn-{}-fc-{}-dbn-{}-gbn-{}-clr-{}-glr-{}-dlr-{}-dw-{}-tw-{}-sw-{}-D-{}-L-{}-bz-{}-itr-{}'.\
 format(d_cnn, g_cnn, fc_layer, d_bn, g_bn, lr, g_lr, d_lr, d_weight, t_weight, s_weight, dataset, nb_trg_labels, batch_size, nb_steps)

noise = 2.0
sig_rate = 0.035
source = os.path.join(output_folder, 'CLB')
target = os.path.join(output_folder, 'FDA')
DA = os.path.join(
    output_folder, '{}-{}'.format(os.path.basename(source),
                                  os.path.basename(target)))
DA_model_folder = os.path.join(DA, DA_model_name)
generate_folder(DA_model_folder)

# load source data
nb_source = 40000
Xs_trn, Xs_val, Xs_tst, ys_trn, ys_val, ys_tst = load_source(train=nb_source,
                                                             sig_rate=sig_rate)
Xs_trn, Xs_val, Xs_tst = np.random.RandomState(2).normal(
    Xs_trn, noise), np.random.RandomState(0).normal(
        Xs_val, noise), np.random.RandomState(1).normal(Xs_tst, noise)
Xs_trn, Xs_val, Xs_tst = (Xs_trn - np.min(Xs_trn)) / (
    np.max(Xs_trn) - np.min(Xs_trn)), (Xs_val - np.min(Xs_val)) / (
        np.max(Xs_val) - np.min(Xs_val)), (Xs_tst - np.min(Xs_tst)) / (
            np.max(Xs_tst) - np.min(Xs_tst))
Xs_trn, Xs_val, Xs_tst = np.expand_dims(Xs_trn, axis=3), np.expand_dims(
    Xs_val, axis=3), np.expand_dims(Xs_tst, axis=3)
ys_tst = ys_tst.reshape(-1, 1)
activation = 'softmax'
net_func = globals()[net_arch]
decoder_filters=(int(nb_filters),int(nb_filters/2), int(nb_filters/4), int(nb_filters/8), int(nb_filters/16))
model = net_func(backbone, classes=n_classes, encoder_weights = None, activation=activation,\
		decoder_block_type = upsample, feature_version = feature_version, decoder_filters = decoder_filters)
#model = net_func(backbone, classes=n_classes, encoder_weights = None, activation=activation,\
#		decoder_block_type = upsample, decoder_filters = decoder_filters)
# model = net_func(backbone, classes=n_classes, activation=activation)
model.summary()
#load best weights
model.load_weights(best_weight)
## save model
model.save(model_folder+'/ready_model.h5')

result_dir = os.path.join(model_folder,'eval_train_val_test')
generate_folder(result_dir)

# evaluate model
# subsets = ['val', 'train', 'test']
subsets = ['test']
# subset = subsets[2]
for subset in subsets:
	subset = subsets[0]
	print('processing subset :{}'.format(subset))
	if subset == 'test':
		img_fns = test_fns
	if subset == 'val':
		img_fns = valid_fns
	elif subset == 'train':
		img_fns = train_fns
Esempio n. 3
0
            gt_vol2 = gt_vol2[:, offset:-offset, offset:-offset]
            mse_score2 = np.mean(np.square(pr_vol2 - gt_vol2))
            psnr_score2 = calculate_psnr(pr_vol2, gt_vol2)
            cor_score2 = calculate_pearsonr(pr_vol2, gt_vol2)
            print(pr_vol2.shape, gt_vol2.shape)
            mse2_scores.append(mse_score2)
            psnr2_scores.append(psnr_score2)
            cor2_scores.append(cor_score2)
            print('{}-FL2: psnr {:.4f}, cor {:.4f}, mse {:.4f}\n'.format(
                vol_fn, psnr_score2, cor_score2, mse_score2))

        # save prediction
        pred_save = args.save
        if pred_save:
            pr_vol_dir = model_folder + '/pred_fl1_fl2'
            generate_folder(pr_vol_dir)
            if fl_ch == 'fl12' or fl_ch == 'fl1':
                np.save(os.path.join(pr_vol_dir, 'Pr1_{}.npy'.format(vol_fn)),
                        pr_vol)
                np.save(os.path.join(pr_vol_dir, 'GT1_{}.npy'.format(vol_fn)),
                        gt_vol)
                print('FL1: {}'.format(pr_vol.shape))
            if fl_ch == 'fl12' or fl_ch == 'fl2':
                np.save(os.path.join(pr_vol_dir, 'Pr2_{}.npy'.format(vol_fn)),
                        pr_vol2)
                np.save(os.path.join(pr_vol_dir, 'GT2_{}.npy'.format(vol_fn)),
                        gt_vol2)
                print('FL2: {}'.format(pr_vol2.shape))

        # save prediction examples
        prediction_dir = model_folder + '/pred_examples'
Esempio n. 4
0
import os
import numpy as np
from skimage import io
from helper_function import generate_folder

docker = True
# dataset = 'live_dead'
dataset = 'cell_cycle2'
dataset_dir = '/data/datasets/{}'.format(dataset) if docker else ''
down_factor = 2

down_dataset_dir = '/data/datasets/{}/down_x{}'.format(
    dataset, down_factor) if docker else ''
generate_folder(down_dataset_dir)

subsets = ['train', 'test', 'val']

for subset in subsets:
    # 	subset = subsets[0]
    print('>>>> processing subset {}'.format(subset))
    if dataset == 'live_dead':
        image_folder = dataset_dir + '/{}_images2'.format(subset)
    else:
        image_folder = dataset_dir + '/{}_images'.format(subset)

    mask_folder = dataset_dir + '/{}_masks'.format(subset)

    ## generate subset folders
    down_image_folder = os.path.join(down_dataset_dir,
                                     '{}_images'.format(subset))
    generate_folder(down_image_folder)
Esempio n. 5
0
    Xt_val, axis=3), np.expand_dims(Xt_tst, axis=3)
yt_trn, yt_val, yt_tst = yt_trn.reshape(-1, 1), yt_val.reshape(
    -1, 1), yt_tst.reshape(-1, 1)
Xt_trn_l = np.concatenate([
    Xt_trn[0:nb_trg_labels, :], Xt_trn[nb_target:nb_target + nb_trg_labels, :]
],
                          axis=0)
yt_trn_l = np.concatenate([
    yt_trn[0:nb_trg_labels, :], yt_trn[nb_target:nb_target + nb_trg_labels, :]
],
                          axis=0)
# DA = '/data/results/{}-{}'.format(os.path.basename(source), os.path.basename(target))
DA = os.path.join(
    output_folder, '{}-{}'.format(os.path.basename(source),
                                  os.path.basename(target)))
generate_folder(DA)
base_model_folder = os.path.join(DA, source_model_name)
generate_folder(base_model_folder)
# copy the source weight file to the DA_model_folder
DA_model_name = 'mmd_wd-{0:}-glr-{1:}-dlr-{2:}-bz-{3:}-iter-{4:}-scr-{5:}-shar-{6:}-dis_fc-{7:}-bn-{8:}-tclf-{9:}-sclf-{10:}-tlabels-{11:}-{12:}-cnn-{13:}-dis_bn-{14:}-gcnn-{15:}-smooth-{16:}-drop-{17:}-lr-{18:}-mmd-{19:}'.format(
    dis_param, g_lr, d_lr, batch_size, nb_steps, source_scratch, shared,
    dis_fc, den_bn, trg_clf_param, src_clf_param, nb_trg_labels, dataset,
    dis_cnn, dis_bn, g_cnn, lsmooth, drop, lr, mmd_param)
DA_model_folder = os.path.join(base_model_folder, DA_model_name)
generate_folder(DA_model_folder)
os.system('cp -f {} {}'.format(source_model_file + '*', DA_model_folder))

if source_model_name.split('-')[0] == 'cnn':
    nb_cnn = int(source_model_name.split('-')[1])
else:
    nb_cnn = 4
    gt_masks.append(gt_mask)
images = np.stack(images)
gt_masks = np.stack(gt_masks)

# scale back from args.flu_scale
gt_masks = np.uint8(gt_masks / scale * 255)
pr_masks = pr_masks / scale * 255
pr_masks = np.uint8(np.clip(pr_masks, 0, 255))

# save prediction examples
plot_fig_file = model_folder + '/pred_examples.png'
nb_images = 8
plot_flu_prediction(plot_fig_file, images, gt_masks, pr_masks, nb_images)
## save prediction results
pred_folder = model_folder + '/pred_fl_only'
generate_folder(pred_folder)
for i in range(gt_masks.shape[0]):
    io.imsave(pred_folder + '/p{}'.format(test_dataset.ids[i]),
              pr_masks[i, :, :])
# output_dir = model_folder+'/pred_fl'; generate_folder(output_dir)
# plot_set_prediction(output_dir, images, gt_masks, pr_masks)
# calculate PSNR
mPSNR, psnr_scores = calculate_psnr(gt_masks, pr_masks)
print('PSNR: {:.4f}'.format(mPSNR))

# calculate Pearson correlation coefficient
mPear, pear_scores = calculate_pearsonr(gt_masks, pr_masks)
print('Pearsonr:{:.4f}'.format(mPear))

with open(model_folder + '/{}_metric_summary.txt'.format(subset), 'w+') as f:
    # save PSNR over fluorescent 1 and fluorescent 2
    x_dir,
    y_dir,
    sample_names=sample_names,
    scale=scale,
    augmentation=get_validation_augmentation(val_dim),
    preprocessing=get_preprocessing(preprocess_input),
)

print(test_dataset[0][0].shape, test_dataset[0][1].shape)
test_dataloader = Dataloder(test_dataset, batch_size=1, shuffle=False)

# prediction and ground truth
start_time = time.time()
pr_masks = model.predict(test_dataloader)
end_time = time.time()
# scale to [0,255]
pr_masks = pr_masks / scale * 255.
pr_masks = pr_masks.squeeze()
# pr_masks = np.uint8(np.clip(pr_masks, 0, 255))

# save the prediction results
inference_dir = model_folder + '/pred_for_all'
generate_folder(inference_dir)
for i, id in enumerate(test_dataset.ids):
    np.save(inference_dir + '/{}'.format(id), pr_masks[i, :])

# save time process
with open(model_folder + '/infer_time.txt', 'w+') as f:
    f.write('Inference time:\n')
    f.write('Mean inference time: mean {:.4f}\n'.format(
        (end_time - start_time) / len(test_dataset.ids)))
Esempio n. 8
0
anomaly = args.anomaly

os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)

if docker:
    output_folder = '/data/results/MRI'
else:
    output_folder = './data/MRI'

## model folder
model_name = 'AEL{}-{}-cn-{}-fr-{}-ks-{}-bn-{}-lr-{}-stps-{}-bz-{}-tr-{}k-vl-{}-test-{}-l-{}-ano_w-{}-{}'.format(
    version, os.path.basename(output_folder), nb_cnn,
    filters, kernel_size, batch_norm, lr, nb_steps, batch_size,
    int(train / 1000), val, test, loss, ano_weight, anomaly)
model_folder = os.path.join(output_folder, model_name)
generate_folder(model_folder)

#image size
img_size = 256
## load dataset
print_red('Data loading ...')
if anomaly == '4x':
    dataset_version = 0
elif anomaly == '2x':
    dataset_version = 4
elif anomaly == '3x':
    dataset_version = 5

Xn_trn, Xn_val, Xn_tst, Xa_trn, Xa_tst = load_MRI_anomaly_labels(
    docker=docker,
    train=train,
    recon_mask = np.zeros(bin_map.shape)
    for i in range(len(mask_list)):
        if np.sum(mask_list[i]) > 128:
            recon_mask = recon_mask + mask_list[i] * map
    print_green(np.sum(map))
    print_red(np.sum(np.uint8(recon_mask)))
    print_green(np.unique(map))
    print_red(np.unique(np.uint8(recon_mask)))

    # Save images and masks
    rgb_pha_img = np.uint8(255 * (pha_img - np.min(pha_img)) /
                           (np.max(pha_img) - np.min(pha_img)))
    rgb_pha_img = np.concatenate([
        rgb_pha_img.reshape(rgb_pha_img.shape + (1, )),
        rgb_pha_img.reshape(rgb_pha_img.shape + (1, )),
        rgb_pha_img.reshape(rgb_pha_img.shape + (1, ))
    ],
                                 axis=2)
    data_folder = os.path.join(output_dir, '{:04d}'.format(image_indx))
    new_image_id = '{:04d}'.format(image_indx)
    generate_folder(data_folder + '/images')
    generate_folder(data_folder + '/masks')
    generate_folder(data_folder + '/GT')
    generate_folder(data_folder + '/original_GT')
    io.imsave(data_folder + '/images/{}.png'.format(new_image_id), rgb_pha_img)
    io.imsave(data_folder + '/GT/{}.png'.format(new_image_id),
              np.uint8(recon_mask))
    io.imsave(data_folder + '/original_GT/{}.png'.format(new_image_id), map)
    for i in range(len(mask_list)):
        io.imsave(data_folder + '/masks/mask_{:04d}.png'.format(i),
                  np.uint8(mask_list[i] * map))
Esempio n. 10
0
						pr_vol = pr_vol[:,offset:-offset,offset:-offset]
						gt_vol = gt_vol[:,offset:-offset,offset:-offset]
						mse_score = np.mean(np.square(pr_vol-gt_vol))
						psnr_score = calculate_psnr(pr_vol, gt_vol)
						cor_score = calculate_pearsonr(pr_vol, gt_vol)
						print(pr_vol.shape, gt_vol.shape)
						mse_scores.append(mse_score); psnr_scores.append(psnr_score); cor_scores.append(cor_score)
						print('{}-FL1: psnr {:.4f}, cor {:.4f}, mse {:.4f}\n'.format(vol_fn, psnr_score, cor_score, mse_score))
				if fl_ch == 'fl12' or fl_ch == 'fl2':
						pr_vol2 = pr_vol2[:,offset:-offset,offset:-offset]
						gt_vol2 = gt_vol2[:,offset:-offset,offset:-offset]
						mse_score2 = np.mean(np.square(pr_vol2-gt_vol2))
						psnr_score2 = calculate_psnr(pr_vol2, gt_vol2)
						cor_score2 = calculate_pearsonr(pr_vol2, gt_vol2)
						print(pr_vol2.shape, gt_vol2.shape)
						mse2_scores.append(mse_score2); psnr2_scores.append(psnr_score2); cor2_scores.append(cor_score2)
						print('{}-FL2: psnr {:.4f}, cor {:.4f}, mse {:.4f}\n'.format(vol_fn, psnr_score2, cor_score2, mse_score2))

				# save prediction
				pred_save = args.save
				if pred_save:
						pr_vol_dir = model_folder+'/grant'
						generate_folder(pr_vol_dir)
						if fl_ch == 'fl12' or fl_ch == 'fl1':				
								np.save(os.path.join(pr_vol_dir,'Pr1_{}.npy'.format(vol_fn)), pr_vol)
								np.save(os.path.join(pr_vol_dir,'GT1_{}.npy'.format(vol_fn)), gt_vol)
								print('FL1: {}'.format(pr_vol.shape))
						if fl_ch == 'fl12' or fl_ch == 'fl2':
								np.save(os.path.join(pr_vol_dir,'Pr2_{}.npy'.format(vol_fn)), pr_vol2)
								np.save(os.path.join(pr_vol_dir,'GT2_{}.npy'.format(vol_fn)), gt_vol2)
								print('FL2: {}'.format(pr_vol2.shape))
Esempio n. 11
0
        end_time = time.time()
        print('Before scaling: min {:.4f}, max {:.4f}'.format(
            pr_masks.min(), pr_masks.max()))
        # scale values back to [0,1]
        pr_masks = pr_masks / scale_factor
        print('After scaling: min {:.4f}, max {:.4f}'.format(
            pr_masks.min(), pr_masks.max()))
        # crop and save prediction maps
        if dataset == 'bone_marrow' or dataset == 'colorectal':
            offset1, offset2 = int((val_dim - img_dim) / 2), val_dim - int(
                (val_dim - img_dim) / 2)
            pr_masks = pr_masks[:, offset1:offset2, offset1:offset2]
        print('output: {}'.format(pr_masks.shape))

        # save data as numpy data
        generate_folder(pred_valid_dir)
        generate_folder(pred_valid_time_dir)

        for index in range(len(test_dataset)):
            # save data
            npy_file_name = pred_valid_dir + '/{}.npy'.format(
                test_dataset.ids[index].split('.')[0])
            if index % (len(test_dataset) - 1) == 0:
                print('Save prediction to:{}'.format(npy_file_name))
            np.save(npy_file_name, pr_masks[index])

        # load the saved data to check the integrity
        pred_masks = []
        for index in range(len(test_dataset)):
            npy_file_name = pred_valid_dir + '/{}.npy'.format(
                test_dataset.ids[index].split('.')[0])
Esempio n. 12
0
                              axis=0)
    recon_errs = np.concatenate([
        norm_recon_errs, anom_recon_errs, anom_recon_errs1, anom_recon_errs2,
        anom_recon_errs3
    ],
                                axis=0)
    total_errs = np.concatenate([
        norm_recon_errs, anom_recon_errs, anom_recon_errs1, anom_recon_errs2,
        anom_recon_errs3, anom_recon_errs4
    ],
                                axis=0)
    # 	recon_errs = np.apply_over_axes(np.mean, err_maps, [1,2,3]).flatten()
    # 	print_yellow('AUC: AE {0:.4f} AE(compare) {1:.4f} AE(normalized) {2:.4f} MP: {3:.4f}'.format(AE_auc, AE_auc1, AE_auc_n, MP_auc))
    print(model_name)
    result_folder = model_folder + '/detection_results'
    generate_folder(result_folder)
    np.savetxt(os.path.join(result_folder, 'norm_stat.txt'), norm_recon_errs)
    np.savetxt(os.path.join(result_folder, 'anom_stat.txt'), anom_recon_errs)
    np.savetxt(os.path.join(result_folder, 'anom_stat1.txt'), anom_recon_errs1)
    np.savetxt(os.path.join(result_folder, 'anom_stat2.txt'), anom_recon_errs2)
    np.savetxt(os.path.join(result_folder, 'anom_stat3.txt'), anom_recon_errs3)
    np.savetxt(os.path.join(result_folder, 'anom_stat4.txt'), anom_recon_errs4)

    ## plot err histogram and recon images
    idx, idx1, idx2, idx3 = int(len(recon_errs) * 1 / 5), int(
        len(recon_errs) * 2 / 5), int(len(recon_errs) * 3 / 5), int(
            len(recon_errs) * 4 / 5)
    err_stat_list = [
        recon_errs[:idx], recon_errs[idx:idx1], recon_errs[idx1:idx2],
        recon_errs[idx2:idx3], recon_errs[idx3:]
    ]