dataset_dir = '../data/minidataset/'
else:
    dataset_dir = './data/Phase_fluo_Cells/0_FUCCI_Timelapse/'

phase_img_folder = dataset_dir + 'f0_phase_cropped/'
fl1_img_folder = dataset_dir + 'f0_fl1_cropped/'
fl2_img_folder = dataset_dir + 'f0_fl2_cropped/'

phase_img_files = glob.glob(phase_img_folder + '*.tif')
fl1_img_files = glob.glob(fl1_img_folder + '*.tif')
fl2_img_files = glob.glob(fl2_img_folder + '*.tif')

nb_imgs = 6
for i in range(nb_imgs):
    print_yellow(os.path.basename(phase_img_files[i]))
    print_red(os.path.basename(fl1_img_files[i]))
    print_green(os.path.basename(fl2_img_files[i]))

# plt.ion();
fig = plt.figure(1)

index = 1
pha_file_name = os.path.basename(phase_img_files[index])
fl1_file_name = pha_file_name.replace('ch0', 'ch1').replace('mhilbert', 'mFL1')
fl2_file_name = pha_file_name.replace('ch0', 'ch2').replace('mhilbert', 'mFL2')
pha_img = io.imread(phase_img_folder + pha_file_name)
fl1_img = io.imread(fl1_img_folder + fl1_file_name)
fl2_img = io.imread(fl2_img_folder + fl2_file_name)
print(pha_file_name)
print(fl1_file_name)
print(fl2_file_name)
예제 #2
0
         DA_model_folder, M_loss_list, M_loss_list,
         DA_model_folder + '/MMD_lOSS_{}.png'.format(DA_model_name))
     plot_src_trg_auc_iterations(
         test_auc_list, val_auc_list, src_test_list,
         DA_model_folder + '/AUC_src_{}.png'.format(DA_model_name))
 plot_auc_iterations(
     test_auc_list, val_auc_list,
     DA_model_folder + '/AUC_Final_{}.png'.format(DA_model_name))
 if best_val_auc < val_target_AUC:
     best_val_auc = val_target_AUC
     target_saver.save(sess, DA_model_folder + '/target_best')
     np.savetxt(os.path.join(DA_model_folder, 'test_stat.txt'),
                test_target_stat)
     np.savetxt(os.path.join(DA_model_folder, 'test_best_auc.txt'),
                [test_target_AUC])
     print_red('Update best:' + DA_model_folder)
 if iteration % 1000 == 0:
     indices = np.random.randint(0, Xs_tst.shape[0], 100)
     source_feat = h_src.eval(session=sess,
                              feed_dict={
                                  xs: Xs_tst[indices, ],
                                  g_training: False
                              })
     target_feat = h_trg.eval(session=sess,
                              feed_dict={
                                  xt: Xt_tst[indices, ],
                                  g_training: False
                              })
     plot_feature_pair_dist(
         DA_model_folder +
         '/feat_{}_iter_{}.png'.format(DA_model_name, iteration),
예제 #3
0
    output_folder = '/data/results/MRI'
else:
    output_folder = './data/MRI'

## model folder
model_name = 'AEL{}-{}-cn-{}-fr-{}-ks-{}-bn-{}-lr-{}-stps-{}-bz-{}-tr-{}k-vl-{}-test-{}-l-{}-ano_w-{}-{}'.format(
    version, os.path.basename(output_folder), nb_cnn,
    filters, kernel_size, batch_norm, lr, nb_steps, batch_size,
    int(train / 1000), val, test, loss, ano_weight, anomaly)
model_folder = os.path.join(output_folder, model_name)
generate_folder(model_folder)

#image size
img_size = 256
## load dataset
print_red('Data loading ...')
if anomaly == '4x':
    dataset_version = 0
elif anomaly == '2x':
    dataset_version = 4
elif anomaly == '3x':
    dataset_version = 5

Xn_trn, Xn_val, Xn_tst, Xa_trn, Xa_tst = load_MRI_anomaly_labels(
    docker=docker,
    train=train,
    val=val,
    normal=test,
    anomaly=test,
    version=dataset_version)
print_red('Data 0-1 normalization ...')
예제 #4
0
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu)

if docker:
	output_folder = '/data/results/MRI'
else:
	output_folder = './data/MRI'

## model folder
model_name = 'AE{}-{}-cn-{}-fr-{}-ks-{}-bn-{}-lr-{}-stps-{}-bz-{}-tr-{}k-vl-{}-test-{}-l-{}'.format(version, os.path.basename(output_folder), nb_cnn, filters, kernel_size, batch_norm, lr, nb_steps, batch_size, int(train/1000), val, test, loss)
model_folder = os.path.join(output_folder, model_name)
generate_folder(model_folder)

#image size
img_size = 256
## load dataset
print_red('Data loading ...')
X_SA_trn, X_SA_val, X_SA_tst, X_SP_tst = load_MRI_anomaly(docker = docker, train = train, val = val, normal = test, anomaly = test)
print_red('Data 0-1 normalization ...')
X_SA_trn, X_SA_val, X_SA_tst = normalize_0_1(X_SA_trn), normalize_0_1(X_SA_val), normalize_0_1(X_SA_tst)
## Dimension adjust
X_SA_trn, X_SA_val, X_SA_tst, X_SP_tst = np.expand_dims(X_SA_trn, axis = 3), np.expand_dims(X_SA_val, axis = 3), np.expand_dims(X_SA_tst, axis = 3),\
		 np.expand_dims(X_SP_tst, axis = 3)
print_red('Data ready!')

# create the graph
scope = 'base'
x = tf.placeholder("float", shape=[None, img_size, img_size, 1])
is_training = tf.placeholder_with_default(False, (), 'is_training')
if version == 1 or version ==2:
	h1, h2, y = auto_encoder(x, nb_cnn = nb_cnn, bn = batch_norm, bn_training = is_training, filters = filters, kernel_size = [kernel_size, kernel_size], scope_name = scope)
elif version == 3:
            for xy_tuple in list(obj_list[i]):
                mask[xy_tuple] = 1
            mask = ndimage.morphology.binary_fill_holes(mask)
            if np.sum(mask) > 128:
                mask_list.append(
                    mask
                )  ## add to the mask list if the mask size is larger than 128 (filtering)

    # Check the mask
    bin_map = map > 0
    recon_mask = np.zeros(bin_map.shape)
    for i in range(len(mask_list)):
        if np.sum(mask_list[i]) > 128:
            recon_mask = recon_mask + mask_list[i] * map
    print_green(np.sum(map))
    print_red(np.sum(np.uint8(recon_mask)))
    print_green(np.unique(map))
    print_red(np.unique(np.uint8(recon_mask)))

    # Save images and masks
    rgb_pha_img = np.uint8(255 * (pha_img - np.min(pha_img)) /
                           (np.max(pha_img) - np.min(pha_img)))
    rgb_pha_img = np.concatenate([
        rgb_pha_img.reshape(rgb_pha_img.shape + (1, )),
        rgb_pha_img.reshape(rgb_pha_img.shape + (1, )),
        rgb_pha_img.reshape(rgb_pha_img.shape + (1, ))
    ],
                                 axis=2)
    data_folder = os.path.join(output_dir, '{:04d}'.format(image_indx))
    new_image_id = '{:04d}'.format(image_indx)
    generate_folder(data_folder + '/images')
예제 #6
0
        kernel_size = int(splits[i + 1])
    elif splits[i] == 'tr':
        train = int(splits[i + 1][:2]) * 1000
    elif splits[i] == 'vl':
        val = int(splits[i + 1])
    elif splits[i] == 'test':
        test = int(splits[i + 1])
    elif splits[i] == 'n' or splits[i] == 'NL':
        noise = float(splits[i + 1])
    elif splits[i] == 'l':
        loss = splits[i + 1]

model_folder = os.path.join(output_folder, model_name)

## load data
print_red('Data loading ...')
version1, version2, version3, version4 = 0, 1, 4, 5
_, _, _, X_SP_tst = load_MRI_anomaly(docker=docker,
                                     train=train,
                                     val=val,
                                     normal=test,
                                     anomaly=test,
                                     version=version1)
_, _, X_SA_tst, X_SP_tst1 = load_MRI_anomaly(docker=docker,
                                             train=train,
                                             val=val,
                                             normal=test,
                                             anomaly=test,
                                             version=version2)
_, _, _, X_SP_tst2 = load_MRI_anomaly(docker=docker,
                                      train=train,
예제 #7
0
        kernel_size = int(splits[i + 1])
    elif splits[i] == 'tr':
        train = int(splits[i + 1][:2]) * 1000
    elif splits[i] == 'vl':
        val = int(splits[i + 1])
    elif splits[i] == 'test':
        test = int(splits[i + 1])
    elif splits[i] == 'n' or splits[i] == 'NL':
        noise = float(splits[i + 1])
    elif splits[i] == 'l':
        loss = splits[i + 1]

model_folder = os.path.join(output_folder, model_name)
dim = 256
## load data
print_red('Data loading ...')
Xn = load_MRI_anomaly_test(dataset='true')
Xa2, Xa3, Xa4 = load_MRI_anomaly_test(
    dataset='meas_2x'), load_MRI_anomaly_test(
        dataset='meas_3x'), load_MRI_anomaly_test(dataset='meas_4x')
Xam2, Xam4 = load_MRI_anomaly_test(
    dataset='null_mixed_2x'), load_MRI_anomaly_test(dataset='null_mixed_4x')
Xn, Xa2, Xa3, Xa4, Xam2, Xam4 = normalize_0_1(Xn), normalize_0_1(
    Xa2), normalize_0_1(Xa3), normalize_0_1(Xa4), normalize_0_1(
        Xam2), normalize_0_1(Xam4)

# create a graph
scope = 'base'
x = tf.placeholder("float", shape=[None, dim, dim, 1])
is_training = tf.placeholder_with_default(False, (), 'is_training')