test_generator = test_datagen.flow_from_directory(TEST_DIR,
                                                  MaskGenerator(
                                                      512, 512, key, 3),
                                                  target_size=(512, 512),
                                                  batch_size=BATCH_SIZE,
                                                  seed=None,
                                                  shuffle=False)

# Instantiate the model
model = PConvUnet()

#rect_2_2 training weights
#model.load("coco_phase2_weights.50-0.36.h5",train_bn=False, lr=0.00005)

#rect_2
model.load("coco_phase2_weights.43-0.34.h5", train_bn=False, lr=0.00005)

#bs_4
#model.load("coco_phase2_weights.37-0.45.h5",train_bn=False,lr=0.00005)

#bs_6
#model.load("coco_phase2_weights.39-0.36.h5",train_bn=False,lr=0.00005)

#Generating samples
output_plot = 'predicted_coco_dataset/predicted_rect_valset/'
try:
    os.makedirs(output_plot)
except OSError as e:
    if e.errno != errno.EEXIST:
        raise
for i in range(0, 501):
    segydata_org.append(segy.trace[i])
segydata_org = np.array(segydata_org)
segydata = (segydata_org / np.max(abs(segydata_org)) + 1) / 2

mask_generator = MaskGenerator(segydata.shape[0],
                               segydata.shape[1],
                               rand_seed=150)
# mask = mask_generator.fix_generate_mask()
mask = mask_generator.sample()

masked_data = deepcopy(segydata)
masked_data[mask == 0] = 0

model = PConvUnet(inference_only=True)
model.load(os.path.join(os.getcwd(), 'pconv_model.h5'), train_bn=False)

chunker = DataChunker(128, 128, 30)


def plot_datas(data):
    vm = np.percentile(data[0], 99)

    fig = plt.figure(figsize=(10, 10))
    fig.suptitle('', fontsize=20, y=0.96)

    ax = fig.add_subplot(221)
    ax.set_xlabel('Receiver number', fontsize=10, labelpad=6)
    ax.set_ylabel('Time(s)', fontsize=10, labelpad=0)
    ax.tick_params(axis='both', which='major', labelsize=8)
    ax.tick_params(direction='out',
        axes[0].set_title('Masked Image')
        axes[1].set_title('Predicted Image')
        axes[2].set_title('Original Image')

        plt.savefig(output_plot + '/img_{}_{}.png'.format(i, pred_time))
        plt.close()


#Phase 1 -with batch BatchNormalizationN
# Instantiate the model
model = PConvUnet(vgg_weights='vgg16_pytorch2keras.h5')
#coco_phase1weights.45-0.42
#phase_1_rect_coco_weights.45-0.38.h5
#coco_phase1weights.45-0.38
model.load("coco_phase1coco_2017_phase_1_weights.43-1.08.h5",
           train_bn=False,
           lr=0.00005)

FOLDER = './phase_2_coco_2017_data_log/logs/coco_phase2'

# Run training for certain amount of epochs
model.fit_generator(
    train_generator,
    steps_per_epoch=10000,
    validation_data=val_generator,
    validation_steps=1000,
    epochs=50,
    verbose=1,
    callbacks=[
        TensorBoard(log_dir=FOLDER, write_graph=False),
        ModelCheckpoint(FOLDER + '_weights.{epoch:02d}-{loss:.2f}.h5',
예제 #4
0
        axes[1].imshow(pred_img[i, :, :, :], cmap='gray')
        axes[2].imshow(ori[i, :, :, :], cmap='gray')
        axes[0].set_title('Masked Image')
        axes[1].set_title('Predicted Image')
        axes[2].set_title('Original Image')
        plt.savefig(r'/misc/home/u2592/image/img_{i}_{pred_time}.png'.format(
            i=i, pred_time=pred_time))
        plt.close()


"""## Phase 1 - with batch normalization"""

model = PConvUnet(
    vgg_weights='/misc/home/u2592/data/pytorch_to_keras_vgg16.h5')
model.load('/misc/home/u2592/data/phase2/weights.20-0.07.h5',
           train_bn=False,
           lr=0.00005)
FOLDER = r'/misc/home/u2592/data/phase2'
# Run training for certain amount of epochs
model.fit_generator(
    train_generator,
    steps_per_epoch=3522,
    validation_data=val_generator,
    validation_steps=499,
    epochs=20,
    verbose=0,
    callbacks=[
        TensorBoard(log_dir=FOLDER, write_graph=False),
        ModelCheckpoint(
            '/misc/home/u2592/data/phase2/weights.{epoch:02d}-{loss:.2f}.h5',
            monitor='val_loss',
예제 #5
0
        _, axes = plt.subplots(1, 3, figsize=(20, 5))
        axes[0].imshow(masked[i, :, :, :])
        axes[1].imshow(pred_img[i, :, :, :] * 1.)
        axes[2].imshow(ori[i, :, :, :])
        axes[0].set_title('Masked Image')
        axes[1].set_title('Predicted Image')
        axes[2].set_title('Original Image')

        plt.savefig(output_plot + '/img_{}_{}.png'.format(i, pred_time))
        plt.close()


#Phase 1 -with batch BatchNormalizationN
# Instantiate the model
model = PConvUnet(vgg_weights='vgg16_pytorch2keras.h5')
model.load("coco_2017_weights.10-1.48.h5")

FOLDER = './phase_1_coco_2017_data_log/logs/coco_phase1'
# # Run training for certain amount of epochs
model.fit_generator(
    train_generator,
    steps_per_epoch=10000,
    validation_data=val_generator,
    validation_steps=1000,
    epochs=50,
    verbose=1,
    callbacks=[
        TensorBoard(log_dir=FOLDER, write_graph=False),
        ModelCheckpoint(FOLDER +
                        'coco_2017_phase_1_weights.{epoch:02d}-{loss:.2f}.h5',
                        monitor='val_loss',
예제 #6
0
            yield [masked, mask], ori, path


# Create testing generator
test_datagen = AugmentingDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(TEST_DIR,
                                                  MaskGenerator(512, 512, 3),
                                                  target_size=(512, 512),
                                                  batch_size=BATCH_SIZE,
                                                  seed=42,
                                                  shuffle=False)

# Instantiate the model
model = PConvUnet()
#model.load("coco_phase2_weights.37-0.45.h5",train_bn=False,lr=0.00005)
model.load("coco_phase2_weights.01-0.37.h5", train_bn=False)

#Generating samples
output_plot = './predicted_synth_dataset/pred_samples_bs1'
try:
    os.makedirs(output_plot)
except OSError as e:
    if e.errno != errno.EEXIST:
        raise

n = 0

for (masked, mask), ori, path in tqdm(test_generator):
    name = os.path.basename(path)
    print(path)
    #Run predictions for this batch of new_images
예제 #7
0
        axes[3][3].set_title('Residual')

        if not os.path.exists(path + args.name + '_phase1'):
            os.makedirs(path + args.name + '_phase1')
        plt.savefig(path + args.name + '_phase1/' +
                    'img_{}.png'.format(pred_time))

        plt.close()

    # Load the model
    model = PConvUnet()

    # Loading of checkpoint
    if args.checkpoint:
        if args.stage == 'train':
            model.load(args.checkpoint)
        elif args.stage == 'finetune':
            model.load(args.checkpoint, train_bn=False, lr=0.00005)
    else:
        print('no checkpoint file')
    # Fit model
    model.fit(
        train_generator,
        steps_per_epoch=5000,
        # validation_data=val_generator,
        # validation_steps=1000,
        epochs=2000,
        verbose=0,
        callbacks=[
            TensorBoard(log_dir=os.path.join(args.log_path,
                                             args.name + '_phase1'),