def flow_from_directory(self, directory, mask_generator=MaskGenerator(256,256), *args, **kwargs): generator = super().flow_from_directory(directory, class_mode=None, *args, **kwargs) seed = None if 'seed' not in kwargs else kwargs['seed'] while True: original_image = next(generator) mask = np.stack([mask_generator.sample(seed) for _ in range(original_image.shape[0])], axis=0) masked_image = deepcopy(original_image) masked_image *= mask yield [masked_image, mask], original_image
import cv2 import numpy as np import matplotlib.pyplot as plt from copy import deepcopy from libs.util import MaskGenerator, torch_preprocessing, torch_postprocessing from libs.DataGenerator import MaskedDataGenerator # mask generation example generator = MaskGenerator(256, 256, 3) # get mask mask = generator.sample() # display with opencv cv2.imshow('mask displayed with opencv', mask * 255) print("Cheeck") # get many masks masks = np.stack([generator.sample() for _ in range(3)], axis=0) # display them with matplotlib fig, axes = plt.subplots(1, 3, figsize=(10, 4)) axes[0].imshow(masks[0, :, :, :] * 255) axes[1].imshow(masks[1, :, :, :] * 255) axes[2].imshow(masks[2, :, :, :] * 255) axes[0].set_title('mask 0') axes[1].set_title('mask 1') axes[2].set_title('mask 2') fig.suptitle('Masks displayed with matplotlib', fontsize=12)
# print(masked.shape, ori.shape) gc.collect() yield [masked, mask], ori # Create training generator train_datagen = AugmentingDataGenerator( rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, rescale=1./255, horizontal_flip=True ) train_generator = train_datagen.flow_from_directory( TRAIN_DIR, MaskGenerator(400, 400, 3), target_size=(400, 400), batch_size=BATCH_SIZE ) # Create validation generator val_datagen = AugmentingDataGenerator(rescale=1./255) val_generator = val_datagen.flow_from_directory( VAL_DIR, MaskGenerator(400, 400, 3), target_size=(400, 400), batch_size=BATCH_SIZE, #classes=['val'], ) # Create testing generator
args = parse_args() if args.stage == 'finetune' and not args.checkpoint: raise AttributeError('If you are finetuning your model, you must supply a checkpoint file') # Create training generator train_datagen = AugmentingDataGenerator( rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, rescale=1./255, horizontal_flip=True ) train_generator = train_datagen.flow_from_directory( args.train, MaskGenerator(512, 512, 3), target_size=(512, 512), batch_size=args.batch_size ) # Create validation generator val_datagen = AugmentingDataGenerator(rescale=1./255) val_generator = val_datagen.flow_from_directory( args.validation, MaskGenerator(512, 512, 3), target_size=(512, 512), batch_size=args.batch_size, classes=['val'], seed=42 )
import numpy as np import cv2 from libs.util import MaskGenerator, ImageChunker mask = MaskGenerator(128, 128, 3, rand_seed=1222)._generate_mask() mask = mask[0:63, 0:63, 0] # import keras.activations as activations # import tensorflow as tf # f = np.array([[1, 2, 1], # [1, 0, 0], # [-1, 0, 1]]) # img = np.array([ # [2, 3, 7, 4, 6, 2, 9], # [6, 6, 9, 8, 7, 4, 3], # [3, 4, 8, 3, 8, 9, 7], # [7, 8, 3, 6, 6, 3, 4], # [4, 2, 1, 8, 3, 4, 6], # [3, 2, 4, 1, 9, 8, 3], # [4, 5, 3, 9, 2, 1, 4]]) # img=np.random.randint(0,50,(63, 63)) 这是生成随机数字的矩阵 input_img = cv2.imread(r"C:\Users\dell\Desktop\paper2\\figure\Fig3\\img.jpg") # img0=np.array(input_img) img = input_img[:, :, 0] masked_img = img * mask kernel1 = np.random.rand(7, 7) kernel2 = np.random.rand(5, 5) kernel3 = np.random.rand(3, 3) mask_kernel = np.ones([7, 7]) # f=round(f,1) strde = 1
args = parse_args() if args.stage == 'finetune' and not args.checkpoint: raise AttributeError( 'If you are finetuning your model, you must supply a checkpoint file' ) # Create training generator train_datagen = AugmentingDataGenerator(rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, rescale=1. / 255, horizontal_flip=True) train_generator = train_datagen.flow_from_directory( args.train, MaskGenerator(512, 512, 3, filepath=args.masks), target_size=(512, 512), batch_size=args.batch_size) # Create validation generator val_datagen = AugmentingDataGenerator(rescale=1. / 255) val_generator = val_datagen.flow_from_directory(args.validation, MaskGenerator( 512, 512, 3, filepath=args.masks), target_size=(512, 512), batch_size=args.batch_size, classes=['val'] # seed=42
#model4.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t55\weights.25-1.28.h5", train_bn=False) #model5.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t44\weights.40-0.18.h5", train_bn=False) #model6.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t36\weights.59-0.30.h5", train_bn=False) # model1.load(r"D:\PycharmProjects2\PConv-Keras\data\logs\Thanka_phase1\p1t16\weights.11-1.15.h5", train_bn=False) models = [] models.append(model1) # models.append(model2) #models.append(model3) #models.append(model4) #models.append(model5) #models.append(model6) mse = [0, 0, 0, 0] psnr = [0, 0, 0, 0] ssim = [0, 0, 0, 0] image_num = 0 mask = MaskGenerator(512, 512, 3, rand_seed=4210)._generate_mask() start_time = datetime.now() for filename in os.listdir(original_img_folder): image_num = image_num + 1 image = cv2.imread(os.path.join(original_img_folder, filename)) input_img = image image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) image = np.array(image) / 255 # mask = MaskGenerator(512, 512)._generate_mask() image[mask == 0] = 1 for j in range(0, len(models)): predicted_img = models[j].predict( [np.expand_dims(image, 0), np.expand_dims(mask, 0)])[0] * 255 # if you want to save inpainted result, please use the following 3 lines, else use the 4th line below:
for crop, ax in zip(crops, axes.flatten()): # Load image im = Image.open(SAMPLE_IMAGE).resize((2048, 2048)) # Crop image h, w = im.height, im.width left = np.random.randint(0, w - crop[1]) right = left + crop[1] upper = np.random.randint(0, h - crop[0]) lower = upper + crop[0] im = im.crop((left, upper, right, lower)) # Create masked array im = np.array(im) / 255 mask_gen = MaskGenerator(*crop) mask = mask_gen._generate_mask() im[mask == 0] = 1 # Store for prediction imgs.append(im) masks.append(mask) # Show image ax.imshow(im) ax.set_title("{}x{}".format(crop[0], crop[1])) from libs.pconv_model import PConvUnet model = PConvUnet(vgg_weights=None, inference_only=True) model.load(r"/content/pconv_imagenet.26-1.07.h5", train_bn=False) chunker = ImageChunker(512, 512, 30)
import os from copy import deepcopy import numpy as np import matplotlib.pyplot as plt from PIL import Image import cv2 from libs.util import MaskGenerator # for i in range (0,100): # mask = MaskGenerator(512,512)._generate_mask() # #mask = MaskGenerator(256,256)._generate_mask() # # #mask = MaskGenerator(512, 512, 3, rand_seed = 666)._generate_mask() # cv2.imwrite('./temp_mask/mask_temp_'+str(i)+'.png', mask*255) mask = MaskGenerator(512, 512, 3, rand_seed=22445)._generate_mask() cv2.imwrite('./temp_mask/mask_temp_20200407.png', mask * 255)
import cv2 import numpy as np import matplotlib.pyplot as plt from copy import deepcopy from libs.util import MaskGenerator # mask reduction in partial convolutions: example generator = MaskGenerator(256, 256, 3) # get mask mask = generator.sample() clusterSize = (2, 2) fig, axes = plt.subplots(1, 4, figsize=(10, 5)) for step in range(3): axes[step].imshow(mask * 255) newmask = deepcopy(mask) for i in range(clusterSize[0], 256 - clusterSize[0]): for j in range(clusterSize[1], 256 - clusterSize[1]): for k in range(i - clusterSize[0], i + clusterSize[0]): for m in range(j - clusterSize[1], j + clusterSize[1]): if mask[k, m][0] == 1: newmask[i, j] = (1, 1, 1) mask = newmask axes[3].imshow(mask * 255) plt.show()