Ejemplo n.º 1
0
def inpaint(model, img, masks, dilation=True):
    preped_img = prep_image_for_inpainting(img, masks)
    mask = merge_masks(masks)

    if dilation:
        mask = dilate_mask(mask)

    chunker = ImageChunker(512, 512, 30)
    chunked_images = chunker.dimension_preprocess(preped_img)
    chunked_masks = chunker.dimension_preprocess(prep_mask_for_inpaiting(mask))
    pred_imgs = model.predict([chunked_images, chunked_masks])
    reconstructed_image = chunker.dimension_postprocess(pred_imgs, preped_img)

    return (reconstructed_image * 255).astype(np.uint8)
Ejemplo n.º 2
0
import sys
from copy import deepcopy

print('load model...')
model = PConvUnet(vgg_weights=None, inference_only=True)
model.load('pconv_imagenet.h5', train_bn=False)
# model.summary()

img = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR)

img_masked = img.copy()
mask = np.zeros(img.shape[:2], np.uint8)

sketcher = Sketcher('image', [img_masked, mask], lambda:
                    ((255, 255, 255), 255))
chunker = ImageChunker(512, 512, 30)

# cv2.imwrite('Sketcher_img_masked.png',img_masked)
# cv2.imwrite('Sketcher_mask.png',mask)

while True:
    key = cv2.waitKey()

    if key == ord('q'):  # quit
        break
    if key == ord('r'):  # reset
        print('reset')
        img_masked[:] = img
        mask[:] = 0
        sketcher.show()
    if key == 32:  # hit spacebar to run inpainting
Ejemplo n.º 3
0
def main():

    #CALL PARSER
    args = parse_args()
    #

    # Change to root path
    if os.path.basename(os.getcwd()) != 'PConvInpainting':
        os.chdir('..')

    # SETTINGS
    TEST_FOLDER_IMG = args.img_path
    TEST_FOLDER_MASK = args.mask_path
    OUTPUT_FOLDER = args.out_path
    BATCH_SIZE = args.batch_size
    #

    model = PConvUnet(vgg_weights=None, inference_only=True)
    model.load("pconv_imagenet.h5", train_bn=False)

    fileList = os.listdir(TEST_FOLDER_IMG)

    # Used for chunking up images & stiching them back together
    chunker = ImageChunker(512, 512, 30)
    kernel = np.ones((7, 7), np.uint8)

    for i in range(0, len(fileList), BATCH_SIZE):
        ####
        # Lists for saving images and masks
        imgs, masks, indices = [], [], []
        for j in range(0, BATCH_SIZE):
            imgName = "MSRA10K_image_{:06d}.jpg".format(i + j)

            imFile = Image.open(TEST_FOLDER_IMG + imgName)
            im = np.array(imFile) / 255  # convert to float

            maskName = imgName.replace(".jpg", ".png")
            maskName = maskName.replace("image", "mask")

            maskFile = Image.open(TEST_FOLDER_MASK + maskName)
            mask = np.array(maskFile)

            # extend from 1 channel to 3
            mask3d = np.tile(mask[:, :, None], [1, 1, 3])

            # dilate mask to process additional border
            mask3d = cv2.dilate(mask3d, kernel, iterations=1)
            mask3d = mask3d / 255  # convert to float
            mask3d = 1.0 - mask3d  # need to invert mask due to framework

            imgs.append(im)
            masks.append(mask3d)
            indices.append(i + j)

            imFile.close()
            maskFile.close()
            print(imgName, maskName)
            ####

        # print("testing....")
        for img, mask, index in zip(imgs, masks, indices):

            ###begin resize

            height, width, depth = img.shape
            imgScale = 0.5
            newX, newY = int(width * imgScale), int(height * imgScale)

            new_img = cv2.resize(img, (newX, newY))
            new_mask = cv2.resize(mask, (newX, newY))

            chunked_images = chunker.dimension_preprocess(deepcopy(new_img))
            chunked_masks = chunker.dimension_preprocess(deepcopy(new_mask))
            pred_imgs = model.predict([chunked_images, chunked_masks])

            reconstructed_image_resized = chunker.dimension_postprocess(
                pred_imgs, new_img)
            reconstructed_image_original_size = cv2.resize(
                reconstructed_image_resized, (int(width), int(height)))

            maskExpanded = cv2.erode(mask, kernel, iterations=3)

            reconstructed_image_final = np.where(
                maskExpanded == 0, reconstructed_image_original_size,
                img)  #apply generated over masked area only

            result = Image.fromarray(
                (reconstructed_image_final * 255).astype(np.uint8))
            result.save(OUTPUT_FOLDER +
                        "MSRA10K_image_{:06d}.png".format(index))
Ejemplo n.º 4
0
    return parser.parse_args()


# Run script
if __name__ == '__main__':

    # Parse command-line arguments
    args = parse_args()
    # get input and output path for image processing
    input_path = args.input_path
    out_path = args.out_path
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    # get target image size
    tar_rows, tar_cols = np.array(args.size.split('x'), dtype=np.int32)
    chunker = ImageChunker(tar_rows, tar_cols, args.overlap)

    # get image list from input dataset
    img_list = os.listdir(input_path)
    for i, img_name in enumerate(img_list):
        if i < args.sid:
            continue
        if i >= args.eid:
            break
        if i % 1000 == 0:
            print('processing {:d}/{:d} images'.format(i, len(img_list)))
        ori_img = cv2.imread(os.path.join(input_path, img_name))
        rimg = cv2.resize(ori_img, (420, 512))
        # if ori_img.shape[0] != 218 or ori_img.shape[1] != 178:
        #     print('{} with shape {:d}x{:d}'.format(os.path.join(input_path, img_name), ori_img.shape[0], ori_img.shape[1]))
        chunked_images = chunker.dimension_preprocess(rimg)
Ejemplo n.º 5
0
print('load model...')
model = PConvUnet(vgg_weights=None, inference_only=True)
model.load('pconv_imagenet.h5', train_bn=False)
# model.summary()

# sys.argv.append('data/images/04.jpg')--> run으로 실행가능.
img = cv2.imread(sys.argv[1], cv2.IMREAD_COLOR)

img_masked = img.copy()
mask = np.zeros(img.shape[:2], np.uint8)

# 이미지의 sketcher라는 클래스를 opencv에서 가져옴. 마음대로 이미지에 색칠을 할 수 있게 만들음.
sketcher = Sketcher('image', [img_masked, mask], lambda:
                    ((255, 255, 255), 255))
chunker = ImageChunker(512, 512, 30)  # 이미지 chunker라는 클래스로 이미지를 쪼갰다가 다시 합치는 과정

while True:
    key = cv2.waitKey()

    if key == ord('q'):  # quit
        break
    if key == ord('r'):  # reset
        print('reset')
        img_masked[:] = img
        mask[:] = 0
        sketcher.show()
    if key == 32:  # hit spacebar to run inpainting 스페이스바를 누르면
        input_img = img_masked.copy()  # 마스크된 이미지를 넣어주고
        input_img = input_img.astype(np.float32) / 255.  # 학습되었기 때문에 255로 나눠준다.
Ejemplo n.º 6
0
    mask_gen = MaskGenerator(*crop)
    mask = mask_gen._generate_mask()
    im[mask == 0] = 1

    # Store for prediction
    imgs.append(im)
    masks.append(mask)

    # Show image
    ax.imshow(im)
    ax.set_title("{}x{}".format(crop[0], crop[1]))

from libs.pconv_model import PConvUnet
model = PConvUnet(vgg_weights=None, inference_only=True)
model.load(r"/content/pconv_imagenet.26-1.07.h5", train_bn=False)
chunker = ImageChunker(512, 512, 30)


def plot_images(images, s=5):
    _, axes = plt.subplots(1, len(images), figsize=(s * len(images), s))
    if len(images) == 1:
        axes = [axes]
    for img, ax in zip(images, axes):
        ax.imshow(img)
    plt.show()


for img, mask in zip(imgs, masks):
    print("Image with size: {}".format(img.shape))

    # Process sample