Esempio n. 1
0
def train(path_weights, src_images_train, tar_images_train):
    # dataset = [src_images_train, tar_images_train]

    # createing pix2pix
    model = Pix2Pix(IMG_HEIGHT, IMG_WIDTH, INPUT_CHANNELS, OUTPUT_CHANNELS)
    model.compile(discriminator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                                   beta_1=0.5),
                  generator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                               beta_1=0.5),
                  discriminator_loss=discriminator_loss,
                  generator_loss=generator_loss,
                  metrics=['accuracy', dice])

    # Normalization of the train set #1 and #2
    mean = np.mean(src_images_train)  # mean for data centering
    std = np.std(src_images_train)  # std for data normalization
    src_images_train -= mean
    src_images_train /= std

    # train model
    checkpoint = ModelCheckpoint(path_weights +
                                 'gan_lungseg_exp3_100epc_best.hdf5',
                                 monitor='dice',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True,
                                 mode='max')
    #checkpoint2 = ModelCheckpoint(path_weights+'best_weights_val_gan_512_masked_lung_blur_500epc.hdf5', monitor='val_dice', verbose=1, save_best_only=True,save_weights_only=True, mode='max')

    history = model.fit(src_images_train,
                        tar_images_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS,
                        verbose=1,
                        shuffle=True,
                        validation_split=0.1,
                        callbacks=[checkpoint])
    #history=model.fit(src_images_train, tar_images_train, batch_size=BATCH_SIZE, epochs=EPOCHS,callbacks=[checkpoint,checkpoint2],validation_data=(src_images_val, tar_images_val))

    model.save(path_weights + 'gan_lungseg_exp3_100epc_last.hdf5')

    # convert the history.history dict to a pandas DataFrame:
    hist_df = pd.DataFrame(history.history)

    # save to json:
    print("Saving history")
    hist_json_file = path_json + 'gan_lungseg_exp3_100epc_history.json'
    with open(hist_json_file, mode='w') as f:
        hist_df.to_json(f)
    print("History saved")

    plt.plot(history.history['dice'])
    plt.plot(history.history['val_dice'])
    plt.plot(history.history['g_l1'])
    plt.title('Model dice coeff')
    plt.ylabel('Dice coeff')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Val', 'Loss'], loc='upper left')
    # save plot to file
    plt.savefig(path_plot + 'gan_lungseg_exp3_100epc_plot.png')
Esempio n. 2
0
def main():
    args = parser.parse_args()

    filelist_test = args.test
    result_dir = args.out_dir + '/result'
    ckpt_dir = args.ckpt_dir
    back_dir = args.out_dir + '/back'

    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    batch_size = args.visnum

    database = db.DBreader(filelist_test,
                           batch_size=batch_size,
                           labeled=False,
                           resize=[256, 256],
                           shuffle=False)
    #  databaseo = db.DBreader(filelist_toriginal, batch_size=batch_size, labeled=False, resize=[256, 256], suffle=False)

    sess = tf.Session()
    model = Pix2Pix(sess, batch_size)

    saver = tf.train.Saver(tf.global_variables())

    ckpt = tf.train.get_checkpoint_state(ckpt_dir)
    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sys.exit("There is no trained model")

    total_batch = database.total_batch

    print('Generating...')
    for step in range(total_batch):
        #img_input, img_target = split_images(database.next_batch(), direction)
        #img_target = normalize(databaseo.next_batch())
        img_input = normalize(database.next_batch())

        generated_samples = denormalize(
            model.sample_generator(img_input, batch_size=batch_size))
        #img_target = denormalize(img_target)
        img_input = denormalize(img_input)

        img_for_vis = np.concatenate([img_input, generated_samples], axis=2)
        savepath = result_dir + '/output_' + "_Batch" + str(step).zfill(
            6) + '.png'
        savepath2 = back_dir + '/output_' + "_Batch" + str(step).zfill(
            6) + '(back).png'

        save_visualization(img_for_vis, (batch_size, 1), save_path=savepath)
        save_visualization2(generated_samples, (batch_size, 1),
                            save_path=savepath2)

    print('finished!!')
Esempio n. 3
0
def train(path_weights, src_images_train, tar_images_train):
    # dataset = [src_images_train, tar_images_train]

    # createing pix2pix
    model = Pix2Pix(IMG_HEIGHT, IMG_WIDTH, INPUT_CHANNELS, OUTPUT_CHANNELS)
    model.compile(discriminator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                                   beta_1=0.5),
                  generator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                               beta_1=0.5),
                  discriminator_loss=discriminator_loss,
                  generator_loss=generator_loss,
                  metrics=['accuracy', dice])

    # train model
    checkpoint = ModelCheckpoint(
        path_weights + 'originalGen_gan_ds1_150epc_best_k' + str(K) + '.hdf5',
        monitor='dice',
        verbose=1,
        save_best_only=True,
        save_weights_only=True,
        mode='max')

    history = model.fit(src_images_train,
                        tar_images_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS,
                        verbose=1,
                        shuffle=True,
                        validation_split=0.1,
                        callbacks=[checkpoint])

    model.save(path_weights + 'originalGen_gan_ds1_150epc_last_k' + str(K) +
               '.hdf5')

    # convert the history.history dict to a pandas DataFrame:
    hist_df = pd.DataFrame(history.history)

    # save to json:
    print("Saving history")
    hist_json_file = path_json + 'originalGen_gan_ds1_150epc_k' + str(
        K) + '.json'
    with open(hist_json_file, mode='w') as f:
        hist_df.to_json(f)
    print("History saved")

    plt.plot(history.history['dice'])
    plt.plot(history.history['val_dice'])
    plt.plot(history.history['g_l1'])
    plt.title('Model dice coeff')
    plt.ylabel('Dice coeff')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Val', 'Loss'], loc='upper left')
    # save plot to file
    plt.savefig(path_plot + 'originalGen_gan_ds1_150epc_k' + str(K) + '.png')
Esempio n. 4
0
def train(src_images_train, tar_images_train, src_images_val, tar_images_val):
    dataset = [src_images_train, tar_images_train]

    # createing pix2pix
    model = Pix2Pix(IMG_HEIGHT, IMG_WIDTH, INPUT_CHANNELS, OUTPUT_CHANNELS)
    model.compile(discriminator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                                   beta_1=0.5),
                  generator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                               beta_1=0.5),
                  discriminator_loss=discriminator_loss,
                  generator_loss=generator_loss)

    # train model
    checkpoint = ModelCheckpoint(path_weights +
                                 'best_weights_train_gan_aumentation.hdf5',
                                 monitor='dice',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True,
                                 mode='max')
    checkpoint2 = ModelCheckpoint(path_weights +
                                  'best_weights_val_gan_aumentation.hdf5',
                                  monitor='val_dice',
                                  verbose=1,
                                  save_best_only=True,
                                  save_weights_only=True,
                                  mode='max')
    history = model.fit(src_images_train,
                        tar_images_train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS,
                        callbacks=[checkpoint, checkpoint2],
                        validation_data=(src_images_val, tar_images_val))

    # convert the history.history dict to a pandas DataFrame:
    hist_df = pd.DataFrame(history.history)

    # save to json:
    print("anatiel/tf_pix2pix/saving history")
    hist_json_file = 'anatiel/tf_pix2pix/history.json'
    with open(hist_json_file, mode='w') as f:
        hist_df.to_json(f)
    print("history saved")

    plt.plot(history.history['dice'])
    plt.title('Model dice coeff')
    plt.ylabel('Dice coeff')
    plt.xlabel('Epoch')
    plt.legend(['Train'], loc='upper left')
    # save plot to file
    plt.savefig('anatiel/tf_pix2pix/plot_dice.png')
    plt.show()
Esempio n. 5
0
def main():

    ext = '.nii.gz'
    search_pattern = '*'
    dataset = 'dataset2'

    # local
    main_dir = f'/home/anatielsantos/mestrado/datasets/dissertacao/final_tests_dis/teste2_dataset2/Test'
    main_mask_dir = f'/home/anatielsantos/mestrado/datasets/dissertacao/final_tests_dis/teste2_dataset2/Test_mask'
    model_path = '/home/anatielsantos/mestrado/datasets/dissertacao/final_tests_dis/gan_test2_exp2_2_150epc_last.hdf5'

    # remote
    # main_dir = f'/data/flavio/anatiel/datasets/dissertacao/{dataset}/image'
    # main_mask_dir = f'/data/flavio/anatiel/datasets/dissertacao/{dataset}/mask'
    # model_path = '/data/flavio/anatiel/models/dissertacao/unet_500epc_last.h5'

    src_dir = '{}'.format(main_dir)
    mask_dir = '{}'.format(main_mask_dir)
    dst_dir = '{}/GANExp2Tests2Preds'.format(main_dir)

    nproc = mp.cpu_count()
    print('Num Processadores = ' + str(nproc))

    model = Pix2Pix(IMG_HEIGHT, IMG_WIDTH, INPUT_CHANNELS, OUTPUT_CHANNELS)
    model.compile(discriminator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                                   beta_1=0.5),
                  generator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                               beta_1=0.5),
                  discriminator_loss=discriminator_loss,
                  generator_loss=generator_loss)
    model.load_weights(model_path)

    execExecPredictByUnet(src_dir,
                          mask_dir,
                          dst_dir,
                          ext,
                          search_pattern,
                          model,
                          reverse=False,
                          desc='Predicting (GAN)',
                          parallel=False)
Esempio n. 6
0
def eval(input_path, output_path, checkpoint_path, model, gpu):
    input = Image.open(input_path)
    input = input.convert("RGB")

    w, h = input.size
    w_, h_ = 128 * (w // 128), 128 * (h // 128)

    fa = FaceAlignment(LandmarksType._2D, device="cuda:" + str(gpu))
    landmarks = fa.get_landmarks_from_image(input_path)[0]
    landmark_img = plot_landmarks(np.array(input), landmarks)

    transform_forward = transforms.Compose([
        transforms.Resize((w_, h_)),
        transforms.CenterCrop((w_, h_)),
        transforms.ToTensor()
    ])
    transform_backward = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((w, h)),
        transforms.CenterCrop((w, h)),
    ])

    input = transform_forward(input)
    landmark_img = transform_forward(landmark_img)

    if model == "Pix2Pix":
        NFNet = Pix2Pix()
    else:
        NFNet = ResResNet()

    checkpoint = torch.load(checkpoint_path)
    NFNet.load_state_dict(checkpoint['my_classifier'])
    NFNet.to(gpu)

    x = torch.cat((input, landmark_img), 0)
    x = x.unsqueeze(0)
    x = x.to(gpu)
    output = NFNet(x)
    output = output.to("cpu")
    output = transform_backward(output[0])
    output.save(output_path)
def main():

    ext = '.nii.gz'
    search_pattern = '*'
    dataset = 'dataset1'

    # remote
    main_dir = f'/data/flavio/anatiel/datasets/dissertacao/final_tests/kfold/{dataset}/images_fold_2.npz'
    main_mask_dir = f'/data/flavio/anatiel/datasets/dissertacao/final_tests/kfold/{dataset}/masks_fold_2.npz'
    model_path = '/data/flavio/anatiel/models/dissertacao/unet_150epc_last_k2.h5'

    # main_dir = f'/home/anatielsantos/mestrado/datasets/dissertacao/bbox/dataset1/images/k0'
    # main_mask_dir = f'/home/anatielsantos/mestrado/datasets/dissertacao/bbox/dataset1/masks/k0'
    # model_path = '/home/anatielsantos/Downloads/models_dissertacao/gan_ds1_150epc_best_k0.hdf5'

    src_dir = '{}'.format(main_dir)
    mask_dir = '{}'.format(main_mask_dir)
    dst_dir = '{}/gan_ds1_preds'.format(main_dir)

    nproc = mp.cpu_count()
    print('Num Processadores = ' + str(nproc))

    model = Pix2Pix(IMG_HEIGHT, IMG_WIDTH, INPUT_CHANNELS, OUTPUT_CHANNELS)
    model.compile(discriminator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                                   beta_1=0.5),
                  generator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                               beta_1=0.5),
                  discriminator_loss=discriminator_loss,
                  generator_loss=generator_loss)
    model.load_weights(model_path)

    execExecPredictByUnet(src_dir,
                          mask_dir,
                          dst_dir,
                          ext,
                          search_pattern,
                          model,
                          reverse=False,
                          desc='Predicting (GAN)',
                          parallel=False)
Esempio n. 8
0
def main():

    ext = '.nii.gz'
    search_pattern = '*'
    dataset = 'test'

    # local
    # main_dir = f'/home/anatielsantos/mestrado/datasets/dissertacao/{dataset}/image'
    # model_path = '/home/anatielsantos/mestrado/models/dissertacao/gan/gan_500epc_best.hdf5'

    # remote
    main_dir = f'/data/flavio/anatiel/datasets/dissertacao/{dataset}/image'
    model_path = '/data/flavio/anatiel/models/dissertacao/gan_500epc_last.hdf5'

    src_dir = '{}'.format(main_dir)
    dst_dir = '{}/GanPredsLast'.format(main_dir)

    nproc = mp.cpu_count()
    print('Num Processadores = ' + str(nproc))

    model = Pix2Pix(img_rows, img_cols, img_depth, img_depth)
    model.compile(discriminator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                                   beta_1=0.5),
                  generator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                               beta_1=0.5),
                  discriminator_loss=discriminator_loss,
                  generator_loss=generator_loss)
    model.load_weights(model_path)

    execExecPredictByGan(src_dir,
                         dst_dir,
                         ext,
                         search_pattern,
                         model,
                         reverse=False,
                         desc='Predicting (Pix2pix)',
                         parallel=False)
def main():
    args = parser.parse_args()
    result_dir = args.out_dir + '/result'
    ckpt_dir = args.out_dir + '/checkpoint'

    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)

    batch_size = args.batch_size

    sess = tf.Session()
    model = Pix2Pix(sess, batch_size)

    saver = tf.train.Saver(tf.global_variables())

    ckpt = tf.train.get_checkpoint_state(ckpt_dir)
    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(tf.global_variables_initializer())

    input_img = tf.placeholder(tf.float32, [batch_size] + [472, 472, 3])
    target_img = tf.placeholder(tf.float32, [batch_size] + [WIDTH, HEIGHT, 3])
    real_dataset = ImageCollector("../../../new_env_dataset", 1, 64,
                                  batch_size)  # Real data
    simul_dataset = ImageCollector("../../../simul_dataset__", 1, 64,
                                   batch_size)

    #########################
    # tensorboard summary   #
    #########################
    dstep_loss = tf.placeholder(tf.float32)
    gstep_loss = tf.placeholder(tf.float32)
    image_shaped_input = tf.reshape(
        tf.image.resize_images(input_img, [WIDTH, HEIGHT]),
        [-1, WIDTH, HEIGHT, 3])
    image_shaped_output = tf.reshape(target_img, [-1, WIDTH, HEIGHT, 3])

    tf.summary.scalar('d_step_loss', dstep_loss)
    tf.summary.scalar('g_step_loss', gstep_loss)

    tf.summary.image(
        'input / output',
        tf.concat([image_shaped_input, image_shaped_output], axis=2), 3)
    summary_merged = tf.summary.merge_all()

    writer = tf.summary.FileWriter(args.out_dir, sess.graph)

    real_dataset.StartLoadData()
    simul_dataset.StartLoadData()

    iter = 100000
    for i in range(iter):
        real_data = real_dataset.getLoadedData()
        simul_data = simul_dataset.getLoadedData()

        rgb_img = real_data[1]
        simul_img = simul_data[1]

        loss_D = model.train_discrim(
            simul_img, rgb_img)  # Train Discriminator and get the loss value
        loss_GAN = model.train_gen(
            simul_img, rgb_img)  # Train Generator and get the loss value

        if i % 100 == 0:
            print('Step: [', i, '/', iter, '], D_loss: ', loss_D,
                  ', G_loss_GAN: ', loss_GAN)

        if i % 500 == 0:
            generated_samples = model.sample_generator(simul_img,
                                                       batch_size=batch_size)

            summary = sess.run(summary_merged,
                               feed_dict={
                                   dstep_loss: loss_D,
                                   gstep_loss: loss_GAN,
                                   input_img: simul_img,
                                   target_img: generated_samples
                               })
            writer.add_summary(summary, i)

        if i % 5000 == 0:
            saver.save(sess, ckpt_dir + '/model_iter' + str(i))
Esempio n. 10
0
def main():
    global_epoch = tf.Variable(0, trainable=False, name='global_step')
    global_epoch_increase = tf.assign(global_epoch, tf.add(global_epoch, 1))

    args = parser.parse_args()
    direction = args.direction
    filelist_train = args.train
    result_dir = args.out_dir + '/result'
    ckpt_dir = args.out_dir + '/checkpoint'

    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)

    total_epoch = args.epochs
    batch_size = args.batch_size

    database = db.DBreader(filelist_train,
                           batch_size=batch_size,
                           labeled=False,
                           resize=[256, 512])

    sess = tf.Session()
    model = Pix2Pix(sess, batch_size)

    saver = tf.train.Saver(tf.global_variables())

    ckpt = tf.train.get_checkpoint_state(ckpt_dir)
    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(tf.global_variables_initializer())

    total_batch = database.total_batch

    epoch = sess.run(global_epoch)
    while True:
        if epoch == total_epoch:
            break
        for step in range(total_batch):
            img_input, img_target = split_images(database.next_batch(),
                                                 direction)
            img_target = normalize(img_target)
            img_input = normalize(img_input)

            loss_D = model.train_discrim(
                img_input,
                img_target)  # Train Discriminator and get the loss value
            loss_GAN, loss_L1 = model.train_gen(
                img_input,
                img_target)  # Train Generator and get the loss value

            if step % 100 == 0:
                print('Epoch: [', epoch, '/', total_epoch, '], ', 'Step: [',
                      step, '/', total_batch, '], D_loss: ', loss_D,
                      ', G_loss_GAN: ', loss_GAN, ', G_loss_L1: ', loss_L1)

            if step % 500 == 0:
                generated_samples = denormalize(
                    model.sample_generator(img_input, batch_size=batch_size))
                img_target = denormalize(img_target)
                img_input = denormalize(img_input)

                img_for_vis = np.concatenate(
                    [img_input, generated_samples, img_target], axis=2)
                savepath = result_dir + '/output_' + 'EP' + str(epoch).zfill(
                    3) + "_Batch" + str(step).zfill(6) + '.jpg'
                save_visualization(img_for_vis, (batch_size, 1),
                                   save_path=savepath)

        epoch = sess.run(global_epoch_increase)
        saver.save(sess, ckpt_dir + '/model_epoch' + str(epoch).zfill(3))
Esempio n. 11
0
# (nothing gets printed in Jupyter, only if you run it standalone)
sess = tf.Session(config=config)
set_session(
    sess)  # set this TensorFlow session as the default session for Keras

# params

batch_size = 2
timesteps = 2
im_width = im_height = 256
# end params

train_files, test_files = get_train_test_files()
train_gen = get_data_gen(files=train_files,
                         timesteps=timesteps,
                         batch_size=batch_size,
                         im_size=(im_width, im_height))
gan = Pix2Pix(im_height=im_height, im_width=im_width, lookback=timesteps - 1)
print("Generator Summary")
gan.generator.summary()
print()
print("Discriminator Summary")
gan.discriminator.summary()
print()
print("Combined Summary")
gan.combined.summary()
gan.train(train_gen,
          epochs=600,
          batch_size=batch_size,
          save_interval=200,
          save_file_name="r_p2p_gen_t2.model")
Esempio n. 12
0
def main():
    global_epoch = tf.Variable(0, trainable=False, name='global_step')
    global_epoch_increase = tf.assign(global_epoch, tf.add(global_epoch, 1))
    args = parser.parse_args()

    filelist_train = args.train
    filelist_original = args.original
    result_dir = args.out_dir + '/result'
    back_dir = args.out_dir + '/back'
    ckpt_dir = args.out_dir + '/checkpoint'

    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)

    total_epoch = args.epochs
    batch_size = args.batch_size
    database = db.DBreader(filelist_train,
                           batch_size=batch_size,
                           labeled=False,
                           resize=[256, 256])
    databaseo = db.DBreader(filelist_original,
                            batch_size=batch_size,
                            labeled=False,
                            resize=[256, 256])
    sess = tf.Session()
    model = Pix2Pix(sess, batch_size)
    saver = tf.train.Saver(tf.global_variables())

    ckpt = tf.train.get_checkpoint_state(ckpt_dir)

    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(tf.global_variables_initializer())
    total_batch = database.total_batch
    epoch = sess.run(global_epoch)
    print(total_batch)
    lossd = []
    lossgan = []
    lossl1 = []

    while True:
        templossd = []
        templossgan = []
        templossl1 = []

        if epoch == total_epoch:
            flg.plot(range(1, len(lossd) + 1), lossd, 'r', label='loss_Dis')
            flg.plot(range(1,
                           len(lossgan) + 1),
                     lossgan,
                     'g',
                     label='loss_Gan')
            flg.plot(range(1, len(lossl1) + 1), lossl1, 'b', label='loss_L1')
            flg.legend(loc='upper right')
            flg.ylabel('loss')
            flg.xlabel('Number of epoch')
            flg.savefig('graph99(50%).png')
            break

        for step in range(total_batch):
            img_target = normalize(databaseo.next_batch())
            img_input = normalize(database.next_batch())

            loss_D = model.train_discrim(
                img_input,
                img_target)  # Train Discriminator and get the loss value
            loss_GAN, loss_L1 = model.train_gen(
                img_input,
                img_target)  # Train Generator and get the loss value
            templossd.append(loss_D)
            templossgan.append(loss_GAN)
            templossl1.append(loss_L1)

            print('Epoch: [', epoch, '/', total_epoch, '], ', 'Step: [', step,
                  '/', total_batch, '], D_loss: ', loss_D, ', G_loss_GAN: ',
                  loss_GAN, ', G_loss_L1: ', loss_L1)

        # if epoch % 5 == 0:
    #         generated_samples = denormalize(model.sample_generator(img_input, batch_size=batch_size))
    #           img_target = denormalize(img_target)
    #          img_input = denormalize(img_input)

    #           img_for_vis = np.concatenate([img_input, generated_samples, img_target], axis=2)
    #            savepath = result_dir + '/output_' + 'EP' + str(epoch).zfill(3) + "_Batch" + str(step).zfill(6) + '.png'
    #            savepath2 = back_dir + '/output_' + 'EP' + str(epoch).zfill(3) + "_Batch" + str(step).zfill(6) + '(back).png'

    #            save_visualization(img_for_vis, (batch_size, 1), save_path=savepath)
    #            save_visualization2(generated_samples, (batch_size, 1), save_path=savepath2)

        lossd.append(np.mean(templossd))
        lossgan.append(np.mean(templossgan))
        lossl1.append(np.mean(templossl1))

        epoch = sess.run(global_epoch_increase)
        saver.save(sess, ckpt_dir + '/model_epoch' + str(epoch).zfill(3))
Esempio n. 13
0
    image_B = scipy.misc.imresize(image_B, (pix2pix.nW, pix2pix.nH))
    images_B = []
    images_B.append(image_B)
    images_B = np.array(images_B) / 127.5 - 1.
    generates_A = pix2pix.generator.predict(images_B)
    generate_A = generates_A[0]
    generate_A = np.uint8((np.array(generate_A) * 0.5 + 0.5) * 255)
    generate_A = Image.fromarray(generate_A)
    generated_image = Image.new('RGB', (pix2pix.nW, pix2pix.nH))
    generated_image.paste(generate_A, (0, 0, pix2pix.nW, pix2pix.nH))
    generated_image.save(save_path, quality=95)
    pass


def convert_to_gray_single_image(image_path,
                                 save_path,
                                 resize_height=256,
                                 resize_weidth=256):
    img = Image.open(image_path)
    img_color = img.resize((resize_weidth, resize_height), Image.ANTIALIAS)
    img_gray = img_color.convert('L')
    img_gray = img_gray.convert('RGB')
    img_gray.save(save_path, quality=95)


gan = Pix2Pix()
#gan.train(epochs=1200, batch_size=4, sample_interval=10, load_pretrained=True)

predict_single_image(gan, './images/test_1.jpg',
                     './images/generate_test_1.jpg')
Esempio n. 14
0
def test(src_images_test, path_mask_test, weights_path):
    # load model
    model = Pix2Pix(IMG_HEIGHT, IMG_WIDTH, INPUT_CHANNELS, OUTPUT_CHANNELS)
    model.compile(discriminator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                                   beta_1=0.5),
                  generator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                               beta_1=0.5),
                  discriminator_loss=discriminator_loss,
                  generator_loss=generator_loss)

    imgs_test, imgs_maskt = load_images(src_images_test, path_mask_test)

    # # #Normalization of the test set
    # imgs_test = imgs_test.astype('float32')
    # mean = np.mean(imgs_test)  # mean for data centering
    # std = np.std(imgs_test)  # std for data normalization

    # #to float
    # imgs_test = imgs_test.astype('float32')
    # imgs_test -= mean
    # imgs_test /= std
    # imgs_maskt = imgs_maskt.astype('float32')

    # predict
    print('-' * 30)
    print('Loading saved weights...')
    model.load_weights(weights_path)
    print('-' * 30)
    print('Predicting data...')
    output = None
    for i in range(imgs_test.shape[0]):
        pred = model.generator(imgs_test[i:i + 1], training=False).numpy()
        if output is None:
            output = pred
        else:
            output = np.concatenate([output, pred], axis=0)

    # calculate metrics
    print('-' * 30)
    print('Calculating metrics...')
    #print("DICE Test: ", dice(tar_images_test, output).numpy())

    print(np.amin(imgs_maskt))
    print(np.amax(imgs_maskt))
    print(imgs_maskt.dtype)
    print(np.amin(output))
    print(np.amax(output))
    print(output.dtype)

    dice, jaccard, sensitivity, specificity, accuracy, auc, prec, fscore = calc_metric(
        output.astype(int), imgs_maskt.astype(int))
    print("DICE: ", dice)
    print("IoU:", jaccard)
    print("Sensitivity: ", sensitivity)
    print("Specificity", specificity)
    print("ACC: ", accuracy)
    print("AUC: ", auc)
    print("Prec: ", prec)
    print("FScore: ", fscore)

    print('-' * 30)
    print('Saving predicted masks to files...')

    # remote
    # np.savez_compressed('/data/flavio/anatiel/datasets/dissertacao/test/image/teste/gan_mask_test_last_teste.npz', output)
    np.save('/data/flavio/anatiel/datasets/dissertacao/gan_preds_last.npy',
            output)

    # local
    # np.savez_compressed('/home/anatielsantos/mestrado/datasets/dissertacao/test/image/GanPredsLast/exam_test.npz', output)
    # np.save('/home/anatielsantos/mestrado/datasets/dissertacao/test/image/GanPredsLast/gan_preds_best.npy', output)

    print('-' * 30)
Esempio n. 15
0
    # test_set  = MyDataset(root_dir='night2day/test/', transform=my_transforms_val)

    ### dataloaders
    train_loader = DataLoader(train_set, batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=2, pin_memory=True)

    val_loader = DataLoader(val_set, batch_size=BATCH_SIZE,
                            shuffle=False,
                            num_workers=1, pin_memory=True)

    test_loader = DataLoader(test_set, batch_size=1,
                            shuffle=False,
                            num_workers=1, pin_memory=True)
    ### model creatng
    model = Pix2Pix()
    model = model.to(device)
    print(count_parameters(model))

    ### init weights of model: conv, linear layers to N(0, 0.02), biases to 0
    for m in model.parameters():
        classname = m.__class__.__name__
        if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
            init.normal_(m.weight.data, 0.0, 0.02)
            if hasattr(m, 'bias') and m.bias is not None:
                init.constant_(m.bias.data, 0.0)

    ### WANDB
    #wandb.init(project='')
    #wandb.watch(model)
Esempio n. 16
0
def train(path_weights, src_images_train, tar_images_train):
    # dataset = [src_images_train, tar_images_train]

    # createing pix2pix
    model = Pix2Pix(IMG_HEIGHT, IMG_WIDTH, INPUT_CHANNELS, OUTPUT_CHANNELS)
    model.compile(discriminator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                                   beta_1=0.5),
                  generator_optimizer=tf.keras.optimizers.Adam(2e-4,
                                                               beta_1=0.5),
                  discriminator_loss=discriminator_loss,
                  generator_loss=generator_loss,
                  metrics=['accuracy', dice])

    # Normalization of the train set #1 and #2
    mean = np.mean(src_images_train)  # mean for data centering
    std = np.std(src_images_train)  # std for data normalization
    src_images_train -= mean
    src_images_train /= std

    print('Train test split')
    X_train, X_test, y_train, y_test = train_test_split(src_images_train,
                                                        tar_images_train,
                                                        test_size=0.1)

    print('-' * 30)
    print('Data Augmentation Start')
    data_gen_args = dict(shear_range=0.1,
                         rotation_range=20,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         zoom_range=0.3,
                         fill_mode='constant',
                         horizontal_flip=True,
                         vertical_flip=True,
                         cval=0)
    image_datagen = ImageDataGenerator(**data_gen_args)
    mask_datagen = ImageDataGenerator(**data_gen_args)

    seed = 1
    image_datagen.fit(X_train, augment=True, seed=seed)
    mask_datagen.fit(y_train, augment=True, seed=seed)

    image_generator = image_datagen.flow(X_train, batch_size=BATCH_SIZE)
    mask_generator = mask_datagen.flow(y_train, batch_size=BATCH_SIZE)

    train = zip(image_generator, mask_generator)
    # val = zip(X_test, y_test)

    print('-' * 30)
    print('Data Augmentation End')
    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)

    # train model
    checkpoint = ModelCheckpoint(path_weights +
                                 'gan_lungseg_exp2_100epc_augment_best.hdf5',
                                 monitor='dice',
                                 verbose=1,
                                 save_best_only=True,
                                 save_weights_only=True,
                                 mode='max')
    #checkpoint2 = ModelCheckpoint(path_weights+'best_weights_val_gan_512_masked_lung_blur_500epc.hdf5', monitor='val_dice', verbose=1, save_best_only=True,save_weights_only=True, mode='max')

    # history = model.fit(src_images_train, tar_images_train, batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1, shuffle=True, validation_split=0.1, callbacks=[checkpoint])
    history = model.fit(train,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCHS,
                        verbose=1,
                        shuffle=True,
                        validation_data=(X_test, y_test),
                        callbacks=[checkpoint])

    model.save(path_weights + 'gan_lungseg_exp2_100epc_augment_last.hdf5')

    # convert the history.history dict to a pandas DataFrame:
    hist_df = pd.DataFrame(history.history)

    # save to json:
    print("Saving history")
    hist_json_file = path_json + 'gan_lungseg_exp2_100epc_augment_history.json'
    with open(hist_json_file, mode='w') as f:
        hist_df.to_json(f)
    print("History saved")

    plt.plot(history.history['dice'])
    plt.plot(history.history['val_dice'])
    plt.plot(history.history['g_l1'])
    plt.title('Model dice coeff')
    plt.ylabel('Dice coeff')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Val', 'Loss'], loc='upper left')
    # save plot to file
    plt.savefig(path_plot + 'gan_lungseg_exp2_100epc_augment_plot.png')
Esempio n. 17
0
def main(_):
    """

    :return:
    """
    print("=" * 100)
    print("FLAGS")
    pp = pprint.PrettyPrinter()
    pp.pprint(flags.FLAGS.__flags)

    # make sub-directories
    if not os.path.isdir(FLAGS.checkpoint_dir):
        os.mkdir(FLAGS.checkpoint_dir)
    if not os.path.isdir(FLAGS.sample_dir):
        os.mkdir(FLAGS.sample_dir)
    if not os.path.isdir(FLAGS.test_dir):
        os.mkdir(FLAGS.test_dir)

    # Launch Graph
    sess = tf.Session()
    model = Pix2Pix(sess=sess,
                    gan_name=FLAGS.gan_name,
                    dataset_name=FLAGS.dataset_name,
                    input_size=FLAGS.input_size,
                    input_dim=FLAGS.input_dim,
                    output_size=FLAGS.output_size,
                    output_dim=FLAGS.output_dim,
                    batch_size=FLAGS.batch_size,
                    gen_num_filter=FLAGS.gen_num_filter,
                    disc_num_filter=FLAGS.disc_num_filter,
                    learning_rate=FLAGS.learning_rate,
                    beta1=FLAGS.beta1,
                    l1_lambda=FLAGS.l1_lambda,
                    checkpoint_dir=FLAGS.checkpoint_dir,
                    sample_dir=FLAGS.sample_dir,
                    test_dir=FLAGS.test_dir)

    sess.run(tf.global_variables_initializer())

    # show all variables
    model_vars = tf.trainable_variables()
    slim.model_analyzer.analyze_vars(model_vars, print_info=True)

    # load trained model
    flag_checkpoint, counter = load_checkpoint(model)

    dataset_dir = os.path.join("datasets", FLAGS.dataset_name)
    if FLAGS.train:
        # training dataset dir
        trainset_dir = os.path.join(dataset_dir, "train")
        valset_dir = os.path.join(dataset_dir, "val")
        run_train(model=model,
                  trainset_dir=trainset_dir,
                  valset_dir=valset_dir,
                  sample_size=FLAGS.batch_size,
                  scale_size=FLAGS.scale_size,
                  crop_size=FLAGS.crop_size,
                  flip=FLAGS.flip,
                  training_epochs=FLAGS.epoch,
                  flag_checkpoint=flag_checkpoint,
                  checkpoint_counter=counter)

    else:
        # test dir
        testset_dir = os.path.join(dataset_dir, "test")
        if not os.path.isdir(testset_dir):
            testset_dir = os.path.join(dataset_dir, "val")

        run_test(model=model, testset_dir=testset_dir)