def __init__(self, img_rows=512, img_cols=512, channel=1):

        self.img_rows = img_rows
        self.img_cols = img_cols
        self.channel = channel
        self.D = None  # discriminator
        self.G = None  # generator
        self.gen_input = None
        self.gen_output = None
        self.AM = None  # adversarial model
        self.DM = None  # discriminator model
        self.generator_network = PConvUnet(img_rows, img_cols)
Пример #2
0
batch = np.stack([img for _ in range(BATCH_SIZE)], axis=0)
generator = datagen.flow(x=batch, batch_size=BATCH_SIZE)


[m1, m2], o1 = next(generator)
plot_sample_data(m1[0], m2[0]*255, o1[0])

#Training the inpainting UNet on single image
'''
   Now that we have a generator, we can initiate the training, for
   fitting the fit() function of PConvUnet takes a callback, which we
   can use to evaluate and display the progress in terms of reconstructing
   the targe image based on the masked input image.'''

# Instantiate model
model = PConvUnet(vgg_weights='vgg16_pytorch2keras.h5')
#model.load("weights.09-0.67.h5")

model.fit_generator(
    generator,
    steps_per_epoch=2000,
    epochs=10,
    callbacks=[
        TensorBoard(
            log_dir='./coco_2017_data/logs/single_image_test',
            write_graph=False
        ),
        ModelCheckpoint(
            './coco_2017_data/logs/single_image_test/coco_2017_weights.{epoch:02d}-{loss:.2f}.h5',
            monitor='loss',
            save_best_only=True,
segy = segyio.open(datapath, ignore_geometry=True)
for i in range(0, 501):
    segydata_org.append(segy.trace[i])
segydata_org = np.array(segydata_org)
segydata = (segydata_org / np.max(abs(segydata_org)) + 1) / 2

mask_generator = MaskGenerator(segydata.shape[0],
                               segydata.shape[1],
                               rand_seed=150)
# mask = mask_generator.fix_generate_mask()
mask = mask_generator.sample()

masked_data = deepcopy(segydata)
masked_data[mask == 0] = 0

model = PConvUnet(inference_only=True)
model.load(os.path.join(os.getcwd(), 'pconv_model.h5'), train_bn=False)

chunker = DataChunker(128, 128, 30)


def plot_datas(data):
    vm = np.percentile(data[0], 99)

    fig = plt.figure(figsize=(10, 10))
    fig.suptitle('', fontsize=20, y=0.96)

    ax = fig.add_subplot(221)
    ax.set_xlabel('Receiver number', fontsize=10, labelpad=6)
    ax.set_ylabel('Time(s)', fontsize=10, labelpad=0)
    ax.tick_params(axis='both', which='major', labelsize=8)
            #yield [masked, mask], ori,name
            yield [masked, mask], ori, path


# Create testing generator
test_datagen = AugmentingDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(TEST_DIR,
                                                  MaskGenerator(
                                                      512, 512, key, 3),
                                                  target_size=(512, 512),
                                                  batch_size=BATCH_SIZE,
                                                  seed=None,
                                                  shuffle=False)

# Instantiate the model
model = PConvUnet()

#rect_2_2 training weights
#model.load("coco_phase2_weights.50-0.36.h5",train_bn=False, lr=0.00005)

#rect_2
model.load("coco_phase2_weights.43-0.34.h5", train_bn=False, lr=0.00005)

#bs_4
#model.load("coco_phase2_weights.37-0.45.h5",train_bn=False,lr=0.00005)

#bs_6
#model.load("coco_phase2_weights.39-0.36.h5",train_bn=False,lr=0.00005)

#Generating samples
output_plot = 'predicted_coco_dataset/predicted_rect_valset/'
    for i in range(len(ori)):
        _, axes = plt.subplots(1, 3, figsize=(20, 5))
        axes[0].imshow(masked[i, :, :, :])
        axes[1].imshow(pred_img[i, :, :, :] * 1.)
        axes[2].imshow(ori[i, :, :, :])
        axes[0].set_title('Masked Image')
        axes[1].set_title('Predicted Image')
        axes[2].set_title('Original Image')

        plt.savefig(output_plot + '/img_{}_{}.png'.format(i, pred_time))
        plt.close()


#Phase 1 -with batch BatchNormalizationN
# Instantiate the model
model = PConvUnet(vgg_weights='vgg16_pytorch2keras.h5')
#coco_phase1weights.45-0.42
#phase_1_rect_coco_weights.45-0.38.h5
#coco_phase1weights.45-0.38
model.load("coco_phase1coco_2017_phase_1_weights.43-1.08.h5",
           train_bn=False,
           lr=0.00005)

FOLDER = './phase_2_coco_2017_data_log/logs/coco_phase2'

# Run training for certain amount of epochs
model.fit_generator(
    train_generator,
    steps_per_epoch=10000,
    validation_data=val_generator,
    validation_steps=1000,
Пример #6
0
    for i in range(len(ori)):
        _, axes = plt.subplots(1, 3, figsize=(20, 5))
        axes[0].imshow(masked[i, :, :, :], cmap='gray')
        axes[1].imshow(pred_img[i, :, :, :], cmap='gray')
        axes[2].imshow(ori[i, :, :, :], cmap='gray')
        axes[0].set_title('Masked Image')
        axes[1].set_title('Predicted Image')
        axes[2].set_title('Original Image')
        plt.savefig(r'/misc/home/u2592/image/img_{i}_{pred_time}.png'.format(
            i=i, pred_time=pred_time))
        plt.close()


"""## Phase 1 - with batch normalization"""

model = PConvUnet(
    vgg_weights='/misc/home/u2592/data/pytorch_to_keras_vgg16.h5')
model.load('/misc/home/u2592/data/phase2/weights.20-0.07.h5',
           train_bn=False,
           lr=0.00005)
FOLDER = r'/misc/home/u2592/data/phase2'
# Run training for certain amount of epochs
model.fit_generator(
    train_generator,
    steps_per_epoch=3522,
    validation_data=val_generator,
    validation_steps=499,
    epochs=20,
    verbose=0,
    callbacks=[
        TensorBoard(log_dir=FOLDER, write_graph=False),
        ModelCheckpoint(
Пример #7
0
            gc.collect()
            #yield [masked, mask], ori,name
            yield [masked, mask], ori, path


# Create testing generator
test_datagen = AugmentingDataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(TEST_DIR,
                                                  MaskGenerator(512, 512, 3),
                                                  target_size=(512, 512),
                                                  batch_size=BATCH_SIZE,
                                                  seed=42,
                                                  shuffle=False)

# Instantiate the model
model = PConvUnet()
#model.load("coco_phase2_weights.37-0.45.h5",train_bn=False,lr=0.00005)
model.load("coco_phase2_weights.01-0.37.h5", train_bn=False)

#Generating samples
output_plot = './predicted_synth_dataset/pred_samples_bs1'
try:
    os.makedirs(output_plot)
except OSError as e:
    if e.errno != errno.EEXIST:
        raise

n = 0

for (masked, mask), ori, path in tqdm(test_generator):
    name = os.path.basename(path)
Пример #8
0
        axes[2][2].set_title('Original Data')
        axes[2][3].set_title('Residual')
        axes[3][0].set_title('Masked Data')
        axes[3][1].set_title('Predicted Data')
        axes[3][2].set_title('Original Data')
        axes[3][3].set_title('Residual')

        if not os.path.exists(path + args.name + '_phase1'):
            os.makedirs(path + args.name + '_phase1')
        plt.savefig(path + args.name + '_phase1/' +
                    'img_{}.png'.format(pred_time))

        plt.close()

    # Load the model
    model = PConvUnet()

    # Loading of checkpoint
    if args.checkpoint:
        if args.stage == 'train':
            model.load(args.checkpoint)
        elif args.stage == 'finetune':
            model.load(args.checkpoint, train_bn=False, lr=0.00005)
    else:
        print('no checkpoint file')
    # Fit model
    model.fit(
        train_generator,
        steps_per_epoch=5000,
        # validation_data=val_generator,
        # validation_steps=1000,