Пример #1
0
 )  # ;D_loss_list.append(D_loss);sC_loss_list.append(sC_loss)
 np.savetxt(DA_model_folder + '/MMD_loss.txt', M_loss_list)
 # np.savetxt(DA_model_folder+'/G_loss.txt',G_loss_list);
 np.savetxt(DA_model_folder + '/src_clf_loss.txt', sC_loss_list)
 # print and plot results
 print_block(symbol='-', nb_sybl=60)
 print_yellow(os.path.basename(DA_model_folder))
 if nb_trg_labels > 0:
     train_auc_list.append(train_target_AUC)
     tC_loss_list.append(tC_loss)
     np.savetxt(os.path.join(DA_model_folder, 'trg_train_auc.txt'),
                train_auc_list)
     np.savetxt(os.path.join(DA_model_folder, 'trg_clf_loss.txt'),
                tC_loss_list)
     print_green(
         'AUC: T-test {0:.4f}, T-valid {1:.4f}, T-train {2:.4f}, S-test: {3:.4f}'
         .format(test_target_AUC, val_target_AUC, train_target_AUC,
                 test_source_AUC))
     print_yellow(
         'Loss: D:{:.4f}, S:{:.4f}, T:{:.4f}, Iter:{:}'.format(
             M_loss, sC_loss, tC_loss, iteration))
     plot_LOSS(
         DA_model_folder + '/loss_{}.png'.format(DA_model_name),
         M_loss_list, sC_loss_list, tC_loss_list)
     plot_loss(
         DA_model_folder, M_loss_list, M_loss_list,
         DA_model_folder + '/MMD_loss_{}.png'.format(DA_model_name))
     plot_src_trg_AUCs(
         DA_model_folder + '/AUC_src_{}.png'.format(DA_model_name),
         train_auc_list, val_auc_list, test_auc_list, src_test_list)
     plot_AUCs(
         DA_model_folder + '/AUC_trg_{}.png'.format(DA_model_name),
else:
    dataset_dir = './data/Phase_fluo_Cells/0_FUCCI_Timelapse/'

phase_img_folder = dataset_dir + 'f0_phase_cropped/'
fl1_img_folder = dataset_dir + 'f0_fl1_cropped/'
fl2_img_folder = dataset_dir + 'f0_fl2_cropped/'

phase_img_files = glob.glob(phase_img_folder + '*.tif')
fl1_img_files = glob.glob(fl1_img_folder + '*.tif')
fl2_img_files = glob.glob(fl2_img_folder + '*.tif')

nb_imgs = 6
for i in range(nb_imgs):
    print_yellow(os.path.basename(phase_img_files[i]))
    print_red(os.path.basename(fl1_img_files[i]))
    print_green(os.path.basename(fl2_img_files[i]))

# plt.ion();
fig = plt.figure(1)

index = 1
pha_file_name = os.path.basename(phase_img_files[index])
fl1_file_name = pha_file_name.replace('ch0', 'ch1').replace('mhilbert', 'mFL1')
fl2_file_name = pha_file_name.replace('ch0', 'ch2').replace('mhilbert', 'mFL2')
pha_img = io.imread(phase_img_folder + pha_file_name)
fl1_img = io.imread(fl1_img_folder + fl1_file_name)
fl2_img = io.imread(fl2_img_folder + fl2_file_name)
print(pha_file_name)
print(fl1_file_name)
print(fl2_file_name)
# fl1_img = io.imread(fl1_img_files[index])
Пример #3
0
                              nb_cnn=nb_cnn,
                              bn=batch_norm,
                              bn_training=is_training,
                              filters=filters,
                              kernel_size=[kernel_size, kernel_size],
                              scope_name=scope)

# create a saver
key_direct = {}
vars_list = tf.global_variables(scope)
key_list = [v.name[:-2] for v in tf.global_variables(scope)]
for key, var in zip(key_list, vars_list):
    key_direct[key] = var
saver = tf.train.Saver(key_direct, max_to_keep=nb_steps)
for v in key_list:
    print_green(v)

if loss == 'mse':
    err_map = tf.square(y - x)
elif loss == 'correntropy':
    sigma = 0.1
    err_map = -tf.exp(-tf.square(x - y) / sigma)
elif loss == 'mae':
    err_map = tf.abs(y - x)

# loss function
err_mean = tf.reduce_mean(err_map, [1, 2, 3])
cost = tf.reduce_mean(err_mean)
# update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# with tf.control_dependencies(update_ops):
trn_norm_step = tf.train.AdamOptimizer(lr).minimize(
Пример #4
0
     src_loss_lis, src_loss), np.append(src_tst_auc_list, src_auc)
 mmd_loss_list, trg_val_auc_list, trg_tst_auc_list = np.append(
     mmd_loss_list,
     MMD_loss), np.append(trg_val_auc_list, trg_val_auc), np.append(
         trg_tst_auc_list, trg_auc)
 np.savetxt(DA_model_folder + '/source_train_loss.txt',
            src_loss_list)
 np.savetxt(DA_model_folder + '/source_test_auc.txt',
            src_tst_auc_list)
 np.savetxt(DA_model_folder + '/mmd_train_loss.txt', trg_loss_list)
 np.savetxt(DA_model_folder + '/target_test_auc.txt',
            trg_tst_auc_list)
 np.savetxt(DA_model_folder + '/target_val_auc.txt',
            trg_val_auc_list)
 print_green(
     'LOSS: src-test {0:.4f} mmd {1:.4f}; AUC: T-val {2:.4f} T-test {3:.4f} S-train {4:.4f} S-test {5:.4f}-iter-{6:}'
     .format(src_loss, MMD_loss, trg_val_auc, trg_auc, src_trn_auc,
             src_auc, iteration))
 print(DA_model_name)
 if nb_trg_labels > 0:
     plot_AUC(
         DA_model_folder + '/auc-full_{}.png'.format(DA_model_name),
         trg_trn_auc_list, trg_val_auc_list, trg_tst_auc_list)
     plot_LOSS_mmd(
         DA_model_folder +
         '/loss-full_{}.png'.format(DA_model_name), trg_loss_list,
         src_loss_list, mmd_loss_list)
 plot_auc(DA_model_folder + '/auc_{}.png'.format(DA_model_name),
          trg_val_auc_list, trg_tst_auc_list)
 plot_loss_mmd(
     DA_model_folder + '/loss_{}.png'.format(DA_model_name),
     src_loss_list, mmd_loss_list)

for image_indx in range(len(image_ids)):
    # Load the map
    image_id = image_ids[image_indx]
    pha_file_name = os.path.basename(phase_img_files[image_id])
    mask_file_name = 'm_' + pha_file_name.replace('ch0', 'ch4').replace(
        'mhilbert', 'mFL4')
    pha_img = io.imread(phase_img_folder + pha_file_name)
    pha_img = pha_img[::2, ::2]
    map = io.imread(combined_masks + mask_file_name)
    map = map[::2, ::2]
    if image_indx % 10 == 0:
        print_yellow('The {}-th image'.format(image_indx))
        print_yellow(mask_files[image_id])
        print_green(combined_masks + mask_file_name)
    mask_list = []

    # Generate the edge masks for all objects
    uni_values = np.unique(map)
    for cls_indx in range(1, len(uni_values)):
        map_layer = map == uni_values[cls_indx]
        mask_ext = np.zeros([map_layer.shape[0] + 2, map_layer.shape[1] + 2])
        shp = map_layer.shape
        mask_ext[1:-1, 1:-1] = map_layer
        masks = np.zeros(map_layer.shape + (9, ))
        for i in range(-1, 2):
            for j in range(-1, 2):
                masks[:, :,
                      (i + 1) * 3 + (j + 1)] = mask_ext[i + 1:i + shp[0] + 1,
                                                        j + 1:j + shp[1] + 1]
Пример #6
0
    err_map = tf.square(y - x)
elif loss == 'mae':
    err_map = tf.abs(y - x)

# tf.keras.backend.clear_session()
# create a saver
vars_list = tf.global_variables(scope)
key_list = [v.name[:-2] for v in tf.global_variables(scope)]
key_direct = {}
for key, var in zip(key_list, vars_list):
    key_direct[key] = var
saver = tf.train.Saver(key_direct, max_to_keep=1)

# print out trainable parameters
for v in key_list:
    print_green(v)


def evaluate(sess, y, x, is_training, err_map, X_tst, batch_size=100):
    y_list, err_map_list = [], []
    i = 0
    while batch_size * i < X_tst.shape[0]:
        batch_x = X_tst[batch_size * i:min(batch_size *
                                           (i + 1), X_tst.shape[0]), :]
        y_recon = y.eval(session=sess,
                         feed_dict={
                             x: batch_x,
                             is_training: False
                         })
        y_list.append(y_recon)
        err_map_list.append(