コード例 #1
0
def img_analysis(original_url, mask_ori_url):
    print('original_url ::: ', original_url)
    print('mask_ori_url ::: ', mask_ori_url)

    # input image check
    ori = img_load(original_url)
    mask = img_load(mask_ori_url)
    masked = img_load(original_url)

    mask = np.where(mask == 0, 1, 0)  # background == white / mask == black
    # background == white / mask == black

    masked[mask == 0] = 1

    model_input_masked = masked[np.newaxis, ...]
    model_input_mask = mask[np.newaxis, ...]

    print(model_input_masked.shape, model_input_mask.shape)

    # Model
    trained_path = '/home/jin/flask/model/pconv_imagenet.26-1.07.h5'
    # vgg_weights = '/home/jin/flask/model/pytorch_to_keras_vgg16.h5'
    vgg_weights = '/home/jin/flask/model/weights.48-0.53.h5'
    model = PConvUnet(vgg_weights=vgg_weights, inference_only=True)
    model.load(trained_path, train_bn=False)

    pred_imgs = model.predict([model_input_masked, model_input_mask])

    return pred_imgs[0], masked
コード例 #2
0
ファイル: test.py プロジェクト: ishay2b/PConv-Keras
class InPantingEngine(object):
    def __init__(self, model_path=MODAL_PATH):
        self.model_path = model_path
        tf.keras.backend.set_learning_phase(0)  # Ignore dropout at inference
        self.model = PConvUnet(vgg_weights=None, inference_only=True)
        tf.keras.backend.set_learning_phase(0)  # Ignore dropout at inference
        self.model.load(self.model_path)

    def preprocess(self, image, mask):
        im = _A(image, dtype=np.float32) / 255.0
        msk = _A(mask, dtype=np.float32) / 255.0
        msk = 1.0 - msk
        im[msk == 0] = 1
        return im, msk

    def __call__(self, image, mask):
        im, msk = self.preprocess(image, mask)
        pred_imgs = self.model.predict([[im], [msk]])[0]
        pred_imgs = (pred_imgs * 255).astype(np.uint8)
        return pred_imgs

    def dump_to_pb(self):
        import shutil
        # The export path contains the name and the version of the model

        # Fetch the Keras session and save the model
        # The signature definition is defined by the input and output tensors
        # And stored with the default serving key
        try:
            shutil.rmtree(EXPORT_PATH)
            print("Deleted prevoius export path", EXPORT_PATH)
        except:
            pass
        with tf.keras.backend.get_session() as sess:
            sess.run(tf.global_variables_initializer())
            tf.saved_model.simple_save(
                sess,
                EXPORT_PATH,
                inputs={
                    'inputs_img': self.model.model.inputs[0],
                    "inputs_mask": self.model.model.inputs[1]
                },
                outputs={t.name: t
                         for t in self.model.model.outputs})

    def dump_to_estimator(self):
        self.model_dir = str(ROOT / 'exported_models' / 'estimator2')
        tf.io.write_graph(self.model.model.output.graph,
                          self.model_dir,
                          'saved_model.pbtxt',
                          as_text=True)
        tf.io.write_graph(self.model.model.output.graph,
                          self.model_dir,
                          'saved_model.pb',
                          as_text=False)
コード例 #3
0
    width_shift_range=0.2,
    height_shift_range=0.2,
    rescale=1. / 255,
    horizontal_flip=True
)
train_generator = train_datagen.flow_from_directory(
    TRAIN_DIR, target_size=(256, 256), batch_size=BATCH_SIZE
)

# Create validation generator
val_datagen = DataGenerator(rescale=1. / 255)
val_generator = val_datagen.flow_from_directory(
    VAL_DIR, target_size=(256, 256), batch_size=BATCH_SIZE, seed=1
)

# Instantiate the model
model = PConvUnet(weight_filepath="{}/PConv-Keras/data/model/".format(path_prefix))
# Run training for certain amount of epochs
model.fit(
    train_generator,
    steps_per_epoch=10,
    validation_data=val_generator,
    validation_steps=100,
    epochs=5,
    plot_callback=None,
    callbacks=[
        TensorBoard(log_dir="{}/PConv-Keras/data/model/initial_training".format(path_prefix), write_graph=False)
    ]
)

コード例 #4
0
ファイル: pipeline.py プロジェクト: gledis69/ComVisProject
 def __init__(self, lisa_config, pconv_weights):
     self.lisa_predictor = get_obj_shadow_masks_predictor(lisa_config)
     self.pconv_model = PConvUnet(vgg_weights=None, inference_only=True)
     self.pconv_model.load(pconv_weights, train_bn=False)
     self._internal_state = None
     self._internal_state_image = None
コード例 #5
0
ファイル: train.py プロジェクト: ykiiiiii/CosmoVAE
        _, axes = plt.subplots(1, 3, figsize=(20, 5))
        axes[0].imshow(masked[i,:,:,:])
        axes[1].imshow(pred_img[i,:,:,:] * 1.)
        axes[2].imshow(ori[i,:,:,:])
        axes[0].set_title('Masked Image')
        axes[1].set_title('Predicted Image')
        axes[2].set_title('Original Image')
                
        plt.savefig(r'data/logs/test/img_{}_{}.png'.format(i, pred_time))
        plt.close()
test_data = next(test_generator)
(masked, mask), ori = test_data


# Instantiate the model
model = PConvUnet(vgg_weights='./data/pytorch_vgg16.h5')

FOLDER = './data/logs/C2/'

# Run training for certain amount of epochs
model.fit_generator(
    train_generator, 
    steps_per_epoch=500,
    validation_data=val_generator,
    validation_steps=50,
    epochs=50,  
    verbose=0,
    callbacks=[
        TensorBoard(
            log_dir=FOLDER,
            write_graph=True
コード例 #6
0
if os.path.basename(os.getcwd()) != 'PConv-Keras':
    os.chdir('..')

from libs.pconv_model import PConvUnet

#Both msk and img should be of order (512,512,3) exactly or else use Image Chunker 
img = '/image.jpg'  #Path of image and mask
msk= '/mask.jpg'

im= Image.open(img)
mk= Image.open(msk)
mk= np.array(mk)/255
im= np.array(im)/255

mk= mk.reshape(-1,512,512,3)
im= im.reshape(-1,512,512,3)  #The model takes 4D input

model = PConvUnet(vgg_weights=None, inference_only=True)
model.load(r"/content/PConv-Keras/pconv_imagenet.26-1.07.h5", train_bn=False) #See more about weight in readme
pred_imgs = model.predict([im,mk])

def plot_images(images, s=5):
    _, axes = plt.subplots(1, len(images), figsize=(s*len(images), s))
    if len(images) == 1:
        axes = [axes]
    for img, ax in zip(images, axes):
        ax.imshow(img)
    plt.show()
plot_images(pred_imgs)
import cv2
cv2.imwrite('inpainted.jpg', pred_imgs)
コード例 #7
0
    color_mode='grayscale',
    batch_size=BATCH_SIZE,
    seed=42
)

## ============================================================================
##                              Training
## ============================================================================

## -------------  Phase 1 - with batch normalization - lr 2e-4 -----------------
print('Starting phase 1')
# output path
FOLDER = PATH+'phase1_GRAY/'

## ********************* Model definition **************************************
model = PConvUnet(vgg_weights=PATH+'vgg_grayscale.h5',
                  gpus=GPUS)

## ************************* Callbacks *****************************************
checkpoint = ModelCheckpoint(
                            FOLDER+'weights.h5',
                            monitor='val_loss',
                            save_best_only=True,
                            save_weights_only=True)

early_stopping = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=10,
                               restore_best_weights=True)

history = my_callback.Histories()
コード例 #8
0
ファイル: main.py プロジェクト: liminn/PConv-Keras
        # Clear current output and display test images
        for i in range(len(ori)):
            _, axes = plt.subplots(1, 3, figsize=(20, 5))
            axes[0].imshow(masked[i,:,:,:])
            axes[1].imshow(pred_img[i,:,:,:] * 1.)
            axes[2].imshow(ori[i,:,:,:])
            axes[0].set_title('Masked Image')
            axes[1].set_title('Predicted Image')
            axes[2].set_title('Original Image')
                    
            plt.savefig(os.path.join(path, '/img_{}_{}.png'.format(i, pred_time)))
            plt.close()

    # Load the model
    if args.vgg_path:
        model = PConvUnet(vgg_weights=args.vgg_path)
    else:
        model = PConvUnet()
    
    # Loading of checkpoint
    if args.checkpoint:
        if args.stage == 'train':
            model.load(args.checkpoint)
        elif args.stage == 'finetune':
            model.load(args.checkpoint, train_bn=False, lr=0.00005)

    # Fit model
    model.fit_generator(
        train_generator, 
        steps_per_epoch=10000,
        validation_data=val_generator,
コード例 #9
0
    def setup(self):
        self.predicted_img1 = None
        self.predicted_img2 = None
        self.predicted_img3 = None
        self.predicted_img4 = None
        self.old_x = None
        self.old_y = None
        self.start_x = None
        self.start_y = None
        self.end_x = None
        self.end_y = None
        self.eraser_on = False
        self.active_button = self.rect_button
        self.isPainting = False
        self.c.bind('<B1-Motion>', self.paint)
        self.c.bind('<ButtonRelease-1>', self.reset)
        self.c.bind('<Button-1>', self.beginPaint)
        self.c.bind('<Enter>', self.icon2pen)
        self.c.bind('<Leave>', self.icon2mice)
        self.mode = 'poly'
        self.rect_buf = None
        self.line_buf = None
        assert self.mode in ['rect', 'poly']
        self.paint_color = self.MARKER_COLOR
        self.mask_candidate = []
        self.rect_candidate = []
        self.im_h = None
        self.im_w = None
        self.mask = None
        self.result = None
        self.blank = None
        self.line_width = 24

        ##################################################################
        # wny self.model = GMCNNModel()
        self.model1 = PConvUnet()
        self.model2 = PConvUnet()
        self.model3 = PConvUnet()
        self.model4 = PConvUnet()
        self.reuse = False
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = False
        self.sess = tf.Session(config=sess_config)

        self.input_image_tf = tf.placeholder(
            dtype=tf.float32,
            shape=[1, self.config.img_shapes[0], self.config.img_shapes[1], 3])
        self.input_mask_tf = tf.placeholder(
            dtype=tf.float32,
            shape=[1, self.config.img_shapes[0], self.config.img_shapes[1], 1])

        ###############################################################
        # wny
        # output = self.model.evaluate(self.input_image_tf, self.input_mask_tf, config=self.config, reuse=self.reuse)
        # output = (output + 1) * 127.5
        # output = tf.minimum(tf.maximum(output[:, :, :, ::-1], 0), 255)
        # self.output = tf.cast(output, tf.uint8)

        # # load pretrained model
        # vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        # assign_ops = list(map(lambda x: tf.assign(x, tf.contrib.framework.load_variable(config.load_model_dir, x.name)),
        #                       vars_list))
        # self.sess.run(assign_ops)
        #self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\pconv_imagenet.26-1.07.h5", train_bn=False)
        # P1T1
        # self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\weights.07-1.89.h5", train_bn=False)
        # P1T2
        # self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\weights.10-1.74.h5", train_bn=False)
        # P1T3
        self.model1.load(
            r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t152\weights.31-1.20.h5",
            train_bn=False)
        self.model2.load(
            r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t167\weights.40-1.13.h5",
            train_bn=False)
        self.model3.load(
            r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t168\weights.32-0.29.h5",
            train_bn=False)
        self.model4.load(
            r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t169\weights.13-0.98.h5",
            train_bn=False)
        # wny
        ###############################################################
        print('Model loaded.')
コード例 #10
0
ファイル: pconv.py プロジェクト: herb711/PConv_Keras
        axes[0].set_title('Masked Image')
        axes[1].set_title('Predicted Image')
        axes[2].set_title('Original Image')
                
        plt.savefig(r'data/test_samples/img_{}_{}.png'.format(i, pred_time))
        plt.close()


# ## Phase 1 - with batch normalization

# In[5]:


# Instantiate the model
#model = PConvUnet(vgg_weights='./data/logs/pytorch_vgg16.h5')
model = PConvUnet()
#model.load(r"C:\Users\Mathias Felix Gruber\Documents\GitHub\PConv-Keras\data\logs\single_image_test\weights.10-0.89.h5")


# In[6]:


FOLDER = './data/logs/word'

# Run training for certain amount of epochs
model.fit_generator(
    train_generator, 
    steps_per_epoch=10000,
    validation_data=val_generator,
    validation_steps=1000,
    epochs=50,  
コード例 #11
0
axes[1][1].imshow(o6[0, :, :, 0], cmap='gray', vmin=0, vmax=1)
axes[1][2].imshow(o7[0, :, :, 0], cmap='gray', vmin=0, vmax=1)
axes[1][3].imshow(o7[0, :, :, 0], cmap='gray', vmin=0, vmax=1)
axes[0][0].set_title(f"Shape: {o1.shape}")
axes[0][1].set_title(f"Shape: {o2.shape}")
axes[0][2].set_title(f"Shape: {o3.shape}")
axes[0][3].set_title(f"Shape: {o4.shape}")
axes[1][0].set_title(f"Shape: {o5.shape}")
axes[1][1].set_title(f"Shape: {o6.shape}")
axes[1][2].set_title(f"Shape: {o7.shape}")
axes[1][3].set_title(f"Shape: {o7.shape}")
plt.show()

# Part 3: Implement U-Net architecture
MAX_BATCH_SIZE = int(128)
PConvUnet().summary()
from keras.preprocessing.image import ImageDataGenerator


class DataGenerator(ImageDataGenerator):
    def flow(self, x, *args, **kwargs):
        while True:

            # Get augmentend image samples
            ori = next(super().flow(x, *args, **kwargs))

            # Get masks for each image sample
            mask = np.stack([
                random_mask(ori.shape[1], ori.shape[2])
                for _ in range(ori.shape[0])
            ],
コード例 #12
0
    # Clear current output and display test images
    for i in range(len(ori)):
        _, axes = plt.subplots(1, 3, figsize=(20, 5))
        axes[0].imshow(masked[i, :, :, :])
        axes[1].imshow(pred_img[i, :, :, :] * 1.)
        axes[2].imshow(ori[i, :, :, :])
        axes[0].set_title('Masked Image')
        axes[1].set_title('Predicted Image')
        axes[2].set_title('Original Image')

        plt.savefig(r'data/test_samples/img_{}_{}.png'.format(i, pred_time))
        plt.close()


# Instantiate the model
model = PConvUnet(weight_filepath='data/logs/')
model.load(
    r"C:\Users\MAFG\Documents\Github-Public\PConv-Keras\data\logs\50_weights_2018-06-01-16-41-43.h5"
)

# Run training for certain amount of epochs
model.fit(train_generator,
          steps_per_epoch=10000,
          validation_data=val_generator,
          validation_steps=100,
          epochs=50,
          plot_callback=plot_callback,
          callbacks=[
              TensorBoard(log_dir='../data/logs/initial_training',
                          write_graph=False)
          ])
コード例 #13
0
def inpainting(quiz, debug=True):

    print('Step 2: 使用你的模型,補全影像\n')
    print('...')
    # Your code may lay here...
    # ======================
    #
    # gen_image = some_black_magic(quiz)
    #
    # ======================

    # Demo: mean-color inpainting
    raw_image = quiz.raw_image.copy()
    bbox = quiz.bbox

    # mean_color = quiz.raw_image.mean(axis=(0, 1))  # shape: (3,)
    # raw_roi = raw_image[bbox['y']:bbox['y']+bbox['h'], bbox['x']:bbox['x']+bbox['w'], :]
    # mask = np.zeros(raw_image.shape[:2])
    # mask_roi = mask[bbox['y']:bbox['y']+bbox['h'], bbox['x']:bbox['x']+bbox['w']]
    # to_filling = (raw_roi[:, :, 1] == 255) & (raw_roi[:, :, 0] < 10) & (raw_roi[:, :, 2] < 10)
    # mask_roi[to_filling] = 1
    # mask = ski_morph.dilation(mask, ski_morph.square(7))
    # mask = np.expand_dims(mask, axis=-1)
    # gen_image = (raw_image * (1 - mask) + mean_color * mask).astype(np.uint8)

    masked = raw_image
    to_filling = (masked[:, :, 1] > 245) & (masked[:, :, 0] < 10) & (masked[:, :, 2] < 10)
    mask_roi = np.zeros((256, 256, 3), np.uint8)
    mask_roi[to_filling] = 1

    mask = 1. - mask_roi

    erosion_size = 11
    erosion_type = 0
    val_type = cv2.MORPH_ELLIPSE
    element = cv2.getStructuringElement(erosion_type, (2 * erosion_size + 1, 2 * erosion_size + 1),
                                        (erosion_size, erosion_size))
    erosion_mask = cv2.erode(mask, element)

    masked_tmp_list = []
    masked_tmp_list.append(masked)
    masked_na = np.array(masked_tmp_list)

    mask_tmp_list = []
    mask_tmp_list.append(erosion_mask)
    mask_na = np.array(mask_tmp_list)

    model = PConvUnet(weight_filepath='{}/PConv-Keras/data/model/'.format(path_prefix))
    model.load("{}/PConv-Keras/data/model/12_weights_2018-09-26-14-05-28.h5".format(path_prefix))

    pred_img_set = model.predict([masked_na, mask_na])

    pred_img = 255. * pred_img_set[0, :, :, :]

    gen_image = masked.copy()
    gen_image[to_filling] = pred_img[to_filling]
    debug = True
    if debug:
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', category=UserWarning)
            os.makedirs('temp', exist_ok=True)
            cv2.imwrite("temp/raw_image.jpg",raw_image) 
            cv2.imwrite("temp/mask.jpg",mask[:, :, 0]) 
            cv2.imwrite("temp/gen_image.jpg",gen_image) 
    

    print('=====================')

    return gen_image
コード例 #14
0
from skimage.measure import compare_mse
from skimage.measure import compare_psnr
from skimage.measure import compare_ssim
from libs.util import MaskGenerator
from libs.pconv_model import PConvUnet
from datetime import datetime

original_img_folder = r'D:\PycharmProjects2\PConv-Keras\result_comparison\original_images'
result_img_folder = r'D:\PycharmProjects2\PConv-Keras\result_comparison\original_images_14_result'


def to_gray(image):
    return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)


model1 = PConvUnet(vgg_weights=None, inference_only=True)
# model2 = PConvUnet(vgg_weights=None, inference_only=True)
#model3 = PConvUnet(vgg_weights=None, inference_only=True)
#model4 = PConvUnet(vgg_weights=None, inference_only=True)
#model5 = PConvUnet(vgg_weights=None, inference_only=True)
#model6 = PConvUnet(vgg_weights=None, inference_only=True)

# model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t172\weights.15-1.18.h5", train_bn=False)
# model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t159\weights.31-1.17.h5", train_bn=False)
model1.load(
    r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t169\weights.13-0.98.h5",
    train_bn=False)
# model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t171\weights.31-0.88.h5", train_bn=False)

#model4.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t55\weights.25-1.28.h5", train_bn=False)
#model5.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t44\weights.40-0.18.h5", train_bn=False)
コード例 #15
0
    # Create masked array
    im = np.array(im) / 255
    mask_gen = MaskGenerator(*crop)
    mask = mask_gen._generate_mask()
    im[mask == 0] = 1

    # Store for prediction
    imgs.append(im)
    masks.append(mask)

    # Show image
    ax.imshow(im)
    ax.set_title("{}x{}".format(crop[0], crop[1]))

from libs.pconv_model import PConvUnet
model = PConvUnet(vgg_weights=None, inference_only=True)
model.load(r"/content/pconv_imagenet.26-1.07.h5", train_bn=False)
chunker = ImageChunker(512, 512, 30)


def plot_images(images, s=5):
    _, axes = plt.subplots(1, len(images), figsize=(s * len(images), s))
    if len(images) == 1:
        axes = [axes]
    for img, ax in zip(images, axes):
        ax.imshow(img)
    plt.show()


for img, mask in zip(imgs, masks):
    print("Image with size: {}".format(img.shape))
コード例 #16
0
import os
import time
from copy import deepcopy

import cv2
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from skimage.io import imsave

from libs.pconv_model import PConvUnet
from libs.util import random_mask

# Settings
start_time = time.time()
MAX_BATCH_SIZE = 128
PConvUnet().summary()


# # Testing out on single image
# Load image
img = cv2.imread('./data/building.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (512, 512)) / 255
shape = img.shape
print(f"Shape of image is: {shape}")

# Load mask
mask = random_mask(shape[0], shape[1])

# Image + mask
masked_img = deepcopy(img)
コード例 #17
0
from libs.pconv_model import PConvUnet
from libs.util import random_mask, plot_images

# Load image
img = cv2.imread('./data/building.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img / 255
shape = img.shape
print(f"Shape of image is: {shape}")

# Load mask
mask = random_mask(shape[0], shape[1])

# Image + mask
masked_img = deepcopy(img)
masked_img[mask == 0] = 1

model = PConvUnet(weight_filepath='result/logs/')
model.load(r"result/logs/1_weights_2019-02-21-04-59-53.h5", train_bn=False)

# Run prediction quickly
pred = model.scan_predict((img, mask))

# Show result
plot_images([img, masked_img, pred])
imsave('result/test_orginal.png', img)
imsave('result/test_masked.png', masked_img)
imsave('result/test_pred.png', pred)

print("finish")
# mask = MaskGenerator(512, 512, 3, rand_seed = 122201)._generate_mask()

# if you want to get mask from author's mask set ,then use the following 2 lines and comment the above line.
# mask_generator= MaskGenerator(512, 512, 3, rand_seed=42, filepath='./data/masks/train')
# mask = mask_generator.sample()

# use following 2 line to load a single mask:
mask = cv2.imread(r"C:\Users\dell\Desktop\paper2\\figure\Fig6\\r2_mask.png")
mask = mask / 255

# This is to fuse mask to input image,and get a masked image
im[mask == 0] = 1

# Create a model instance and import pre trained image_net weights provided by the author.
from libs.pconv_model import PConvUnet
model = PConvUnet(vgg_weights=None, inference_only=True)
#model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\pconv_imagenet.26-1.07.h5", train_bn=False)
# model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t39\weights.16-1.13.h5", train_bn=False)
# model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t34\weights.07-1.29.h5", train_bn=False)
model.load(
    r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t152\weights.31-1.20.h5",
    train_bn=False)
# model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t159\weights.31-1.17.h5", train_bn=False)
#output the predicted image
predicted_img = model.predict([np.expand_dims(im, 0),
                               np.expand_dims(mask, 0)])[0]

# save result
result_name = r"C:\Users\dell\Desktop\paper2\\figure\Fig6\\r2_mapconv.png"
cv2.imwrite(result_name, cv2.cvtColor(predicted_img * 255, cv2.COLOR_BGR2RGB))
# cv2.imwrite("./data/masked_input.png", im*255)
コード例 #19
0
class Paint(object):
    MARKER_COLOR = 'white'

    def __init__(self, config):
        self.config = config

        self.root = Tk()
        self.root.title("Image Inpainting (V1.0)")
        # self.LabelArea=Label(text="中国", bg="green", font=("Arial", 12), width=10, height=2)
        # self.LabelArea.grid(row=1, column=0, rowspan=1)

        self.c = Canvas(self.root,
                        bg='white',
                        width=config.img_shapes[1] + 4,
                        height=config.img_shapes[0])
        self.c.grid(row=0, column=0, rowspan=6)
        # self.masked_input = Canvas(self.root, bg='white', width=config.img_shapes[1] + 4, height=config.img_shapes[0])
        # self.masked_input.grid(row=0, column=1, rowspan=8)
        self.out1 = Canvas(self.root,
                           bg='white',
                           width=config.img_shapes[1] / 2 + 4,
                           height=config.img_shapes[0] / 2)
        self.out1.grid(row=0, column=1, rowspan=3)
        self.out2 = Canvas(self.root,
                           bg='white',
                           width=config.img_shapes[1] / 2 + 4,
                           height=config.img_shapes[0] / 2)
        self.out2.grid(row=0, column=2, rowspan=3)
        self.out3 = Canvas(self.root,
                           bg='white',
                           width=config.img_shapes[1] / 2 + 4,
                           height=config.img_shapes[0] / 2)
        self.out3.grid(row=3, column=1, rowspan=3)
        self.out4 = Canvas(self.root,
                           bg='white',
                           width=config.img_shapes[1] / 2 + 4,
                           height=config.img_shapes[0] / 2)
        self.out4.grid(row=3, column=2, rowspan=3)

        #self.Label(self.root, text="First").grid(row=0)

        self.load_button = Button(self.root,
                                  text='load',
                                  command=self.load,
                                  width=12,
                                  height=3)
        self.load_button.grid(row=0, column=3)
        self.rect_button = Button(self.root,
                                  text='rectangle',
                                  command=self.use_rect,
                                  width=12,
                                  height=3)
        self.rect_button.grid(row=1, column=3)
        self.poly_button = Button(self.root,
                                  text='stroke',
                                  command=self.use_poly,
                                  width=12,
                                  height=3)
        self.poly_button.grid(row=2, column=3)
        self.fill_button = Button(self.root,
                                  text='fill',
                                  command=self.fill,
                                  width=12,
                                  height=3)
        self.fill_button.grid(row=3, column=3)
        self.clear_button = Button(self.root,
                                   text='clear',
                                   command=self.clear,
                                   width=12,
                                   height=3)
        self.clear_button.grid(row=4, column=3)
        self.save_button = Button(self.root,
                                  text="save",
                                  command=self.save,
                                  width=12,
                                  height=3)
        self.save_button.grid(row=5, column=3)
        # self.revoke_button = Button(self.root, text='unused1', command=self.revoke, width=12, height=3)
        # self.revoke_button.grid(row=2, column=6)
        # self.exit_button = Button(self.root, text='Exit', command=self.revoke, width=12, height=3)
        # self.exit_button.grid(row=2, column=7)

        self.filename = None
        self.setup()
        self.root.mainloop()

    def setup(self):
        self.predicted_img1 = None
        self.predicted_img2 = None
        self.predicted_img3 = None
        self.predicted_img4 = None
        self.old_x = None
        self.old_y = None
        self.start_x = None
        self.start_y = None
        self.end_x = None
        self.end_y = None
        self.eraser_on = False
        self.active_button = self.rect_button
        self.isPainting = False
        self.c.bind('<B1-Motion>', self.paint)
        self.c.bind('<ButtonRelease-1>', self.reset)
        self.c.bind('<Button-1>', self.beginPaint)
        self.c.bind('<Enter>', self.icon2pen)
        self.c.bind('<Leave>', self.icon2mice)
        self.mode = 'poly'
        self.rect_buf = None
        self.line_buf = None
        assert self.mode in ['rect', 'poly']
        self.paint_color = self.MARKER_COLOR
        self.mask_candidate = []
        self.rect_candidate = []
        self.im_h = None
        self.im_w = None
        self.mask = None
        self.result = None
        self.blank = None
        self.line_width = 24

        ##################################################################
        # wny self.model = GMCNNModel()
        self.model1 = PConvUnet()
        self.model2 = PConvUnet()
        self.model3 = PConvUnet()
        self.model4 = PConvUnet()
        self.reuse = False
        sess_config = tf.ConfigProto()
        sess_config.gpu_options.allow_growth = False
        self.sess = tf.Session(config=sess_config)

        self.input_image_tf = tf.placeholder(
            dtype=tf.float32,
            shape=[1, self.config.img_shapes[0], self.config.img_shapes[1], 3])
        self.input_mask_tf = tf.placeholder(
            dtype=tf.float32,
            shape=[1, self.config.img_shapes[0], self.config.img_shapes[1], 1])

        ###############################################################
        # wny
        # output = self.model.evaluate(self.input_image_tf, self.input_mask_tf, config=self.config, reuse=self.reuse)
        # output = (output + 1) * 127.5
        # output = tf.minimum(tf.maximum(output[:, :, :, ::-1], 0), 255)
        # self.output = tf.cast(output, tf.uint8)

        # # load pretrained model
        # vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        # assign_ops = list(map(lambda x: tf.assign(x, tf.contrib.framework.load_variable(config.load_model_dir, x.name)),
        #                       vars_list))
        # self.sess.run(assign_ops)
        #self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\pconv_imagenet.26-1.07.h5", train_bn=False)
        # P1T1
        # self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\weights.07-1.89.h5", train_bn=False)
        # P1T2
        # self.model.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\weights.10-1.74.h5", train_bn=False)
        # P1T3
        self.model1.load(
            r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t152\weights.31-1.20.h5",
            train_bn=False)
        self.model2.load(
            r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t167\weights.40-1.13.h5",
            train_bn=False)
        self.model3.load(
            r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t168\weights.32-0.29.h5",
            train_bn=False)
        self.model4.load(
            r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t169\weights.13-0.98.h5",
            train_bn=False)
        # wny
        ###############################################################
        print('Model loaded.')

    def checkResp(self):
        assert len(self.mask_candidate) == len(self.rect_candidate)

    def load(self):
        self.filename = tkFileDialog.askopenfilename(
            initialdir='./imgs',
            title="Select file",
            filetypes=(("all files", "*.*"), ("png files", "*.png"),
                       ("jpg files", "*.jpg")))
        self.filename_ = self.filename.split('/')[-1][:-4]
        self.filepath = '/'.join(self.filename.split('/')[:-1])
        print(self.filename_, self.filepath)
        try:
            photo = Image.open(self.filename)
            self.image = cv2.imread(self.filename)
        except:
            print('do not load image')
        else:
            self.im_w, self.im_h = photo.size
            self.mask = np.zeros((self.im_h, self.im_w, 3)).astype(np.uint8)
            #self.mask=np.zeros_like(self.image)
            print(photo.size)
            self.displayPhoto = photo
            self.displayPhoto = self.displayPhoto.resize(
                (self.im_w, self.im_h))
            self.draw = ImageDraw.Draw(self.displayPhoto)
            self.photo_tk = ImageTk.PhotoImage(image=self.displayPhoto)
            self.c.create_image(0, 0, image=self.photo_tk, anchor=NW)
            self.rect_candidate.clear()
            self.mask_candidate.clear()
            if self.blank is None:
                self.blank = Image.open('imgs/blank.png')
            self.blank = self.blank.resize(
                (int(self.im_w / 2), int(self.im_h / 2)))
            self.blank_tk = ImageTk.PhotoImage(image=self.blank)
            self.out1.create_image(0, 0, image=self.blank_tk, anchor=NW)
            self.out2.create_image(0, 0, image=self.blank_tk, anchor=NW)
            self.out3.create_image(0, 0, image=self.blank_tk, anchor=NW)
            self.out4.create_image(0, 0, image=self.blank_tk, anchor=NW)

    def save(self):
        #img = np.array(self.displayPhoto)
        #cv2.imwrite(os.path.join(self.filepath, 'tmp.png'), img)

        if self.mode == 'rect':
            self.mask[:, :, :] = 0
            for rect in self.mask_candidate:
                self.mask[rect[1]:rect[3], rect[0]:rect[2], :] = 1
        #self.mask=1-self.mask
        cv2.imwrite(os.path.join(self.filepath, self.filename_ + '_mask.png'),
                    (1 - self.mask) * 255)
        #wny cv2.imwrite(os.path.join(self.filepath, self.filename_ + '_gm_result.png'), self.result[0][:, :, ::-1])
        #cv2.imwrite(os.path.join(self.filepath, self.filename_ + '_gm_result.png'), self.predicted_img)
        cv2.imwrite(
            os.path.join(self.filepath, self.filename_ + '_result1.png'),
            cv2.cvtColor(self.predicted_img1, cv2.COLOR_BGR2RGB))
        cv2.imwrite(
            os.path.join(self.filepath, self.filename_ + '_result2.png'),
            cv2.cvtColor(self.predicted_img2, cv2.COLOR_BGR2RGB))
        cv2.imwrite(
            os.path.join(self.filepath, self.filename_ + '_result3.png'),
            cv2.cvtColor(self.predicted_img3, cv2.COLOR_BGR2RGB))
        cv2.imwrite(
            os.path.join(self.filepath, self.filename_ + '_result4.png'),
            cv2.cvtColor(self.predicted_img4, cv2.COLOR_BGR2RGB))

    def fill(self):
        if self.mode == 'rect':
            for rect in self.mask_candidate:
                self.mask[rect[1]:rect[3], rect[0]:rect[2], :] = 1
        ########################################################################
        # wny: to create a three-channel mask, input the original single-layer mask's value to each channel of new mask:
        mask_channel = np.zeros_like(self.image)
        mask_channel[:, :, 0] = self.mask[:, :, 0]
        mask_channel[:, :, 1] = self.mask[:, :, 0]
        mask_channel[:, :, 2] = self.mask[:, :, 0]
        self.mask = mask_channel
        # wny: to exchange 0 with 1, 1 with 0 in new mask:
        #self.mask=1-self.mask
        image_temp = Image.open(self.filename)
        image_temp = np.array(image_temp) / 255
        # wny: add mask to input image:
        image_temp[self.mask == 1] = 1
        image = np.expand_dims(image_temp, 0)
        mask = np.expand_dims(1 - self.mask, 0)

        ########################################################################
        print(image.shape)
        print(mask.shape)

        image_temp = Image.fromarray(np.uint8(image_temp * 255))
        image_temp.save('./imgs/current_masked_input.png')

        cv2.imwrite('./imgs/current_mask.png', (1 - self.mask) * 255)
        #########################################################################
        # wny
        # self.result = self.sess.run(self.output, feed_dict={self.input_image_tf: image * 1.0,
        #                                                     self.input_mask_tf: mask * 1.0})
        # output the predicted image
        self.predicted_img1 = (self.model1.predict([image, mask])[0]) * 255
        self.predicted_img2 = (self.model2.predict([image, mask])[0]) * 255
        self.predicted_img3 = (self.model3.predict([image, mask])[0]) * 255
        self.predicted_img4 = (self.model4.predict([image, mask])[0]) * 255

        # wny cv2.imwrite('./imgs/tmp.png', self.result[0][:, :, ::-1])
        cv2.imwrite('./imgs/current_result1.png',
                    cv2.cvtColor(self.predicted_img1, cv2.COLOR_BGR2RGB))
        cv2.imwrite('./imgs/current_result2.png',
                    cv2.cvtColor(self.predicted_img2, cv2.COLOR_BGR2RGB))
        cv2.imwrite('./imgs/current_result3.png',
                    cv2.cvtColor(self.predicted_img3, cv2.COLOR_BGR2RGB))
        cv2.imwrite('./imgs/current_result4.png',
                    cv2.cvtColor(self.predicted_img4, cv2.COLOR_BGR2RGB))
        ##########################################################################
        photo1 = Image.open('./imgs/current_result1.png')
        photo2 = Image.open('./imgs/current_result2.png')
        photo3 = Image.open('./imgs/current_result3.png')
        photo4 = Image.open('./imgs/current_result4.png')
        self.displayPhotoResult1 = photo1
        self.displayPhotoResult2 = photo2
        self.displayPhotoResult3 = photo3
        self.displayPhotoResult4 = photo4
        self.displayPhotoResult1 = self.displayPhotoResult1.resize(
            (int(self.im_w / 2), int(self.im_h / 2)))
        self.displayPhotoResult2 = self.displayPhotoResult2.resize(
            (int(self.im_w / 2), int(self.im_h / 2)))
        self.displayPhotoResult3 = self.displayPhotoResult2.resize(
            (int(self.im_w / 2), int(self.im_h / 2)))
        self.displayPhotoResult4 = self.displayPhotoResult2.resize(
            (int(self.im_w / 2), int(self.im_h / 2)))
        self.photo_tk_result1 = ImageTk.PhotoImage(
            image=self.displayPhotoResult1)
        self.photo_tk_result2 = ImageTk.PhotoImage(
            image=self.displayPhotoResult2)
        self.photo_tk_result3 = ImageTk.PhotoImage(
            image=self.displayPhotoResult3)
        self.photo_tk_result4 = ImageTk.PhotoImage(
            image=self.displayPhotoResult4)
        self.out1.create_image(0, 0, image=self.photo_tk_result1, anchor=NW)
        self.out2.create_image(0, 0, image=self.photo_tk_result2, anchor=NW)
        self.out3.create_image(0, 0, image=self.photo_tk_result3, anchor=NW)
        self.out4.create_image(0, 0, image=self.photo_tk_result4, anchor=NW)
        return

    def use_rect(self):
        self.activate_button(self.rect_button)
        self.mode = 'rect'

    def use_poly(self):
        self.activate_button(self.poly_button)
        self.mode = 'poly'

    def revoke(self):
        if len(self.rect_candidate) > 0:
            self.c.delete(self.rect_candidate[-1])
            self.rect_candidate.remove(self.rect_candidate[-1])
            self.mask_candidate.remove(self.mask_candidate[-1])
        self.checkResp()

    def clear(self):
        self.mask = np.zeros((self.im_h, self.im_w, 1)).astype(np.uint8)
        if self.mode == 'poly':
            photo = Image.open(self.filename)
            self.image = cv2.imread(self.filename)
            self.displayPhoto = photo
            self.displayPhoto = self.displayPhoto.resize(
                (self.im_w, self.im_h))
            self.draw = ImageDraw.Draw(self.displayPhoto)
            self.photo_tk = ImageTk.PhotoImage(image=self.displayPhoto)
            self.c.create_image(0, 0, image=self.photo_tk, anchor=NW)
        else:
            if self.rect_candidate is None or len(self.rect_candidate) == 0:
                return
            for item in self.rect_candidate:
                self.c.delete(item)
            self.rect_candidate.clear()
            self.mask_candidate.clear()
            self.checkResp()

    #TODO: reset canvas
    #TODO: undo and redo
    #TODO: draw triangle, rectangle, oval, text

    def activate_button(self, some_button, eraser_mode=False):
        self.active_button.config(relief=RAISED)
        some_button.config(relief=SUNKEN)
        self.active_button = some_button
        self.eraser_on = eraser_mode

    def beginPaint(self, event):
        self.start_x = event.x
        self.start_y = event.y
        self.isPainting = True

    def paint(self, event):
        if self.start_x and self.start_y and self.mode == 'rect':
            self.end_x = max(min(event.x, self.im_w), 0)
            self.end_y = max(min(event.y, self.im_h), 0)
            rect = self.c.create_rectangle(self.start_x,
                                           self.start_y,
                                           self.end_x,
                                           self.end_y,
                                           fill=self.paint_color)
            if self.rect_buf is not None:
                self.c.delete(self.rect_buf)
            self.rect_buf = rect
        elif self.old_x and self.old_y and self.mode == 'poly':
            line = self.c.create_line(self.old_x,
                                      self.old_y,
                                      event.x,
                                      event.y,
                                      width=self.line_width,
                                      fill=self.paint_color,
                                      capstyle=ROUND,
                                      smooth=True,
                                      splinesteps=36)
            cv2.line(self.mask, (self.old_x, self.old_y), (event.x, event.y),
                     (1), self.line_width)
        self.old_x = event.x
        self.old_y = event.y

    def reset(self, event):
        self.old_x, self.old_y = None, None
        if self.mode == 'rect':
            self.isPainting = False
            rect = self.c.create_rectangle(self.start_x,
                                           self.start_y,
                                           self.end_x,
                                           self.end_y,
                                           fill=self.paint_color)
            if self.rect_buf is not None:
                self.c.delete(self.rect_buf)
            self.rect_buf = None
            self.rect_candidate.append(rect)

            x1, y1, x2, y2 = min(self.start_x, self.end_x), min(self.start_y, self.end_y),\
                             max(self.start_x, self.end_x), max(self.start_y, self.end_y)
            # up left corner, low right corner
            self.mask_candidate.append((x1, y1, x2, y2))
            print(self.mask_candidate[-1])

    def icon2pen(self, event):
        return

    def icon2mice(self, event):
        return
コード例 #20
0
## ============================================================================
PATH = args.root
save_path = PATH + 'outputs/'
MASK_TEST_DIR = PATH + 'dataset/mask/full/test/'
IM_TEST_DIR = PATH + 'dataset/gt/full/test/'

## ============================================================================
##                              Data generator
## ============================================================================
test_datagen = AugmentingDataGenerator()
test_generator = test_datagen.flow_from_directory(IM_TEST_DIR,
                                                  MASK_TEST_DIR,
                                                  target_size=(256, 256),
                                                  color_mode='grayscale',
                                                  batch_size=BATCH_SIZE,
                                                  seed=42)

## ============================================================================
##                           Load model
## ============================================================================
model = PConvUnet(vgg_weights=None, inference_only=True)
model.load(PATH + 'phase2_gray/weights.h5', train_bn=False)

## ============================================================================
##                           Predict
## ============================================================================
for n in range(steps_test):
    test_data = next(test_generator)
    (masked, mask), ori = test_data
    plot_callback(model, n, save_path, masked, mask, ori)
コード例 #21
0
def main():

    #CALL PARSER
    args = parse_args()
    #

    # Change to root path
    if os.path.basename(os.getcwd()) != 'PConvInpainting':
        os.chdir('..')

    # SETTINGS
    TEST_FOLDER_IMG = args.img_path
    TEST_FOLDER_MASK = args.mask_path
    OUTPUT_FOLDER = args.out_path
    BATCH_SIZE = args.batch_size
    #

    model = PConvUnet(vgg_weights=None, inference_only=True)
    model.load("pconv_imagenet.h5", train_bn=False)

    fileList = os.listdir(TEST_FOLDER_IMG)

    # Used for chunking up images & stiching them back together
    chunker = ImageChunker(512, 512, 30)
    kernel = np.ones((7, 7), np.uint8)

    for i in range(0, len(fileList), BATCH_SIZE):
        ####
        # Lists for saving images and masks
        imgs, masks, indices = [], [], []
        for j in range(0, BATCH_SIZE):
            imgName = "MSRA10K_image_{:06d}.jpg".format(i + j)

            imFile = Image.open(TEST_FOLDER_IMG + imgName)
            im = np.array(imFile) / 255  # convert to float

            maskName = imgName.replace(".jpg", ".png")
            maskName = maskName.replace("image", "mask")

            maskFile = Image.open(TEST_FOLDER_MASK + maskName)
            mask = np.array(maskFile)

            # extend from 1 channel to 3
            mask3d = np.tile(mask[:, :, None], [1, 1, 3])

            # dilate mask to process additional border
            mask3d = cv2.dilate(mask3d, kernel, iterations=1)
            mask3d = mask3d / 255  # convert to float
            mask3d = 1.0 - mask3d  # need to invert mask due to framework

            imgs.append(im)
            masks.append(mask3d)
            indices.append(i + j)

            imFile.close()
            maskFile.close()
            print(imgName, maskName)
            ####

        # print("testing....")
        for img, mask, index in zip(imgs, masks, indices):

            ###begin resize

            height, width, depth = img.shape
            imgScale = 0.5
            newX, newY = int(width * imgScale), int(height * imgScale)

            new_img = cv2.resize(img, (newX, newY))
            new_mask = cv2.resize(mask, (newX, newY))

            chunked_images = chunker.dimension_preprocess(deepcopy(new_img))
            chunked_masks = chunker.dimension_preprocess(deepcopy(new_mask))
            pred_imgs = model.predict([chunked_images, chunked_masks])

            reconstructed_image_resized = chunker.dimension_postprocess(
                pred_imgs, new_img)
            reconstructed_image_original_size = cv2.resize(
                reconstructed_image_resized, (int(width), int(height)))

            maskExpanded = cv2.erode(mask, kernel, iterations=3)

            reconstructed_image_final = np.where(
                maskExpanded == 0, reconstructed_image_original_size,
                img)  #apply generated over masked area only

            result = Image.fromarray(
                (reconstructed_image_final * 255).astype(np.uint8))
            result.save(OUTPUT_FOLDER +
                        "MSRA10K_image_{:06d}.png".format(index))
コード例 #22
0
ファイル: test.py プロジェクト: ishay2b/PConv-Keras
 def __init__(self, model_path=MODAL_PATH):
     self.model_path = model_path
     tf.keras.backend.set_learning_phase(0)  # Ignore dropout at inference
     self.model = PConvUnet(vgg_weights=None, inference_only=True)
     tf.keras.backend.set_learning_phase(0)  # Ignore dropout at inference
     self.model.load(self.model_path)
コード例 #23
0
ファイル: 2-12_training.py プロジェクト: saneatsu/PConv-Keras
train_datagen = DataGenerator(rescale=1. / 255,
                              random_crop_size=(cst.CROP_HEIGHT,
                                                cst.CROP_WIDTH))
train_generator = train_datagen.flow_from_directory(
    directory_small=cst.TRAIN_SMALL_SIZE,
    directory_medium=cst.TRAIN_MEDIUM_SIZE,
    directory_large=cst.TRAIN_LARGE_SIZE,
    batch_size=BATCH_SIZE)

val_datagen = DataGenerator(rescale=1. / 255,
                            random_crop_size=(cst.CROP_HEIGHT, cst.CROP_WIDTH))
val_generator = val_datagen.flow_from_directory(
    directory_small=cst.VALID_SMALL_SIZE,
    directory_medium=cst.VALID_MEDIUM_SIZE,
    directory_large=cst.VALID_LARGE_SIZE,
    batch_size=BATCH_SIZE,
    seed=1)

model = PConvUnet(weight_filepath=cst.WEIGHT_PATH)

model.fit(train_generator,
          steps_per_epoch=8000 // BATCH_SIZE,
          validation_data=val_generator,
          validation_steps=8000 // BATCH_SIZE,
          epochs=300,
          plot_callback=None,
          callbacks=[
              TensorBoard(log_dir=cst.TFLOG_PATH, write_graph=False),
          ])
コード例 #24
0
     mask = mask/255.
     mask = 1 - mask
     print ('original max {}'.format(ori.max()))
     #ori = ori / ori.max()
     ori = ori / 255.
     
     masked = deepcopy(ori)
     masked[mask==mask.min()] = 1
     print("mask shape {}".format(masked.shape))
     
     ori = np.expand_dims(ori, axis=0)
     
     mask = np.uint8(np.expand_dims(mask, axis=0))
     masked = np.expand_dims(masked, axis=0)
     
     #mask = np.stack([random_mask_line_for_ct(ori.shape[1],ori.shape[2]) for _ in range(ori.shape[0])], axis=0)
     #masked = deepcopy(ori)
     masked[mask==0]=1
     masks[i,] = mask
     maskeds[i,] = masked
     
 model = PConvUnet(weight_filepath='data/logs_ct_new/')
 model.load("data/logs_ct_new/316_weights_2018-12-11-10-45-45.h5")
 
 pred_img = model.predict([maskeds, masks])
 for i in range(len(img_list)):
     print("saveing" + str(i))
     split = img_list[i]
     splited = split.split('test_')[-1]
     plt.imsave(args.save_name + splited,pred_img[i]*1.)
     
コード例 #25
0
ファイル: realtime.py プロジェクト: DosImpact/focusonme_core
import cv2
import numpy as np

from Sketcher import Sketcher
from libs.util import MaskGenerator, ImageChunker
from libs.pconv_model import PConvUnet

import sys
from copy import deepcopy

print('load model...')
model = PConvUnet(vgg_weights=None, inference_only=True)
model.load('pconv_imagenet.h5', train_bn=False)
# model.summary()

img = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR)

img_masked = img.copy()
mask = np.zeros(img.shape[:2], np.uint8)

sketcher = Sketcher('image', [img_masked, mask], lambda:
                    ((255, 255, 255), 255))
chunker = ImageChunker(512, 512, 30)

# cv2.imwrite('Sketcher_img_masked.png',img_masked)
# cv2.imwrite('Sketcher_mask.png',mask)

while True:
    key = cv2.waitKey()

    if key == ord('q'):  # quit
コード例 #26
0
            gc.collect()
            yield [masked, mask], ori


test_datagen = DataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(cst.TEST_PATH,
                                                  target_size=(256, 256),
                                                  batch_size=BATCH_SIZE,
                                                  seed=1)

# Pick out an example
test_data = next(test_generator)
(masked, mask), ori = test_data

# Load weights from previous run
model = PConvUnet(weight_filepath='data/model/')
model.load('{}/data/model/weight/3000_weights_2018-09-29-08-46-51.h5'.format(
    cst.MNT_PATH),
           train_bn=False,
           lr=0.00005)

n = 0
for (masked, mask), ori in tqdm(test_generator):
    # Run predictions for this batch of images
    pred_img = model.predict([masked, mask])
    pred_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')

    # Clear current output and display test images
    for i in range(len(ori)):
        _, axes = plt.subplots(1, 2, figsize=(10, 5))
        axes[0].imshow(masked[i, :, :, :])
コード例 #27
0
ファイル: pipeline.py プロジェクト: gledis69/ComVisProject
class LinkedModels:
    def __init__(self, lisa_config, pconv_weights):
        self.lisa_predictor = get_obj_shadow_masks_predictor(lisa_config)
        self.pconv_model = PConvUnet(vgg_weights=None, inference_only=True)
        self.pconv_model.load(pconv_weights, train_bn=False)
        self._internal_state = None
        self._internal_state_image = None

    def run_lisa(self, image):
        lisa_outputs, _ = self.lisa_predictor.run_on_image(image)
        self._internal_state = process_lisa_outputs(lisa_outputs)
        self._internal_state_image = image.copy()
        return self._internal_state

    def rm_objs(self, obj_indx, mask_types, dilation=True):
        if self._internal_state is None:
            raise Exception(
                'No internal state, call `run_lisa` to set the state')

        assert len(obj_indx) == len(mask_types)

        if self.get_object_no() == 0:
            return self._internal_state_image.copy()

        obj_indx = np.array(obj_indx)
        mask_types = np.array(mask_types)

        idx = np.where((obj_indx >= 0) & (obj_indx < self.get_object_no()))[0]

        obj_indx = obj_indx[idx]
        mask_types = mask_types[idx]

        mask_pairs = self._internal_state[obj_indx]
        masks = np.array([
            mask_pairs[i][mask_type] for i, mask_type in enumerate(mask_types)
        ])

        return inpaint(self.pconv_model,
                       self._internal_state_image,
                       masks,
                       dilation=dilation)

    def get_object_no(self):
        if not self._internal_state is None:
            return len(self._internal_state)
        else:
            return 0

    def get_masks(self):
        if not self._internal_state is None:
            return self._internal_state.copy()
        else:
            return None

    def run_full_pipeline(self,
                          image,
                          rm_objs=[0],
                          mask_types=[1],
                          dilation=True):
        self.run_lisa(image)
        return self.rm_objs(rm_objs, mask_types, dilation)
コード例 #28
0
    for i in range(len(ori)):
        _, axes = plt.subplots(1, 3, figsize=(20, 5))
        axes[0].imshow(masked[i, :, :, :])
        axes[1].imshow(pred_img[i, :, :, :] * 1.)
        axes[2].imshow(ori[i, :, :, :])
        axes[0].set_title('Masked Image')
        axes[1].set_title('Predicted Image')
        axes[2].set_title('Original Image')

        plt.savefig(r'data/custom_test_samples/img_{}_{}.png'.format(
            i, pred_time))
        plt.close()


# Instantiate the model
model = PConvUnet()
model = PConvUnet(weight_filepath='data/logs/')
latest_weights = get_latest_weights_file()
print(latest_weights)
model.load(latest_weights)

# Run training for certain amount of epochs
model.fit(train_generator,
          steps_per_epoch=100,
          validation_data=val_generator,
          validation_steps=10,
          epochs=50,
          plot_callback=plot_callback,
          callbacks=[
              TensorBoard(log_dir='../data/logs/initial_training',
                          write_graph=False)
コード例 #29
0
    com_image = np.zeros((400, 400, 3))
    for i in range(400):
        for j in range(400):
            if mask[i, j, 0] == 1:  #vaild pixel
                com_image[i, j, 0] = true[i, j, 0]
                com_image[i, j, 1] = true[i, j, 1]
                com_image[i, j, 2] = true[i, j, 2]
            else:  #hole pixel
                com_image[i, j, 0] = pred[i, j, 0]
                com_image[i, j, 1] = pred[i, j, 1]
                com_image[i, j, 2] = pred[i, j, 2]
    return com_image


weight = options.weight
model = PConvUnet(vgg_weights=None, inference_only=True)
model.load(weight, train_bn=False)
imid = int(options.id)
masks = mpimg.imread('./datasets/mask/data/' + str(imid) +
                     'pict.png')[:, :, :3]
Planck_Image = mpimg.imread('./datasets/test/data/' + str(imid) +
                            'pict.png')[:, :, :3]
CosmoVae = complete_image(
    model.predict([np.expand_dims(Planck_Image, 0),
                   np.expand_dims(masks, 0)])[0], Planck_Image, masks)
masked = deepcopy(Planck_Image)
masked[masks == 0] = 1
if not os.path.exists('./datasets/predicted/'):
    os.makedirs('./datasets/predicted/')
fig = plt.imsave('./datasets/predicted/' + str(imid) + 'pred.png', CosmoVae)
コード例 #30
0
    random_crop_size=(cst.CROP_HEIGHT, cst.CROP_WIDTH))
train_generator = train_datagen.flow_from_directory(
    cst.TRAIN_PATH,
    target_size=(cst.MAX_HEIGHT, cst.MAX_WIDTH),
    batch_size=BATCH_SIZE)
    
val_datagen = DataGenerator(
    rescale=1./255,
    random_crop_size=(cst.CROP_HEIGHT, cst.CROP_WIDTH))
val_generator = val_datagen.flow_from_directory(
    cst.VAL_PATH,
    target_size=(cst.MAX_HEIGHT, cst.MAX_WIDTH),
    batch_size=BATCH_SIZE,
    seed=1)

model = PConvUnet(weight_filepath=cst.WEIGHT_PATH)

model.load_weights('/nfs/host/PConv-Keras/data/model/resize-1536x3072/512x512_GPU-2_Batch-04_RecursiveTraining/weight/32_weights_2019-01-10-10-16-11.h5')

model.fit(
    train_generator,
    steps_per_epoch=8000//BATCH_SIZE,
    validation_data=val_generator,
    validation_steps=8000//BATCH_SIZE,
    epochs=300,
    plot_callback=None,
    callbacks=[
        TensorBoard(log_dir=cst.TFLOG_PATH, write_graph=False),
    ])

# $ tensorboard --logdir=/nfs/host/PConv-Keras/data/model/tflogs --port 8082