def flow_from_directory(self, directory, *args, **kwargs): generator = super().flow_from_directory(directory, class_mode=None, *args, **kwargs) while True: # Get augmentend image samples ori = next(generator) # Get masks for each image sample #fileName = FILE_DIR + 'resized_COCA_COLA.jpg' #print (fileName) mask = np.stack([ random_mask(ori.shape[1], ori.shape[2]) for _ in range(ori.shape[0]) ], axis=0) # Apply masks to all image sample masked = deepcopy(ori) masked[mask == 0] = 1 # Yield ([ori, masl], ori) training batches # print(masked.shape, ori.shape) gc.collect() yield [masked, mask], ori
def flow_from_directory(self, directory, *args, **kwargs): generator = super().flow_from_directory(directory, class_mode=None, *args, **kwargs) cnt = 0 # Data augmentation while True: # Get augmented image samples ori = next(generator) ori_length = ori.shape[0] # Get masks for each image sample mask = np.stack([random_mask(ori.shape[1], ori.shape[2]) for _ in range(ori_length)], axis=0)1 # Crop ori, mask and masked images croped_ori, croped_mask = self.random_crop(ori, mask) # Apply masks to all image sample masked = deepcopy(croped_ori) masked[croped_mask == 0] = 1 # Yield ([ori, masl], ori) training batches gc.collect() # Save image # self.save_img(cnt=cnt, img_size_type='', masked=masked) # cnt += 1 yield [masked, croped_mask], croped_ori
def flow(self, x, *args, **kwargs): while True: # Get augmentend image samples ori = next(super().flow(x, *args, **kwargs)) # Get masks for each image sample mask = np.stack([random_mask(ori.shape[1], ori.shape[2]) for _ in range(ori.shape[0])], axis=0) # Apply masks to all image sample masked = deepcopy(ori) masked[mask==0] = 1 # Yield ([ori, masl], ori) training batches # print(masked.shape, ori.shape) gc.collect() yield [masked, mask], ori
def flow_from_directory(self, directory_small, directory_medium, directory_large, *args, **kwargs): target_small = (512, 1024) target_medium = (768, 1536) target_large = (1536, 3072) generator_small = super().flow_from_directory(directory_small, target_size=target_small, class_mode=None, *args, **kwargs) generator_medium = super().flow_from_directory( directory_medium, target_size=target_medium, class_mode=None, *args, **kwargs) generator_large = super().flow_from_directory(directory_large, target_size=target_large, class_mode=None, *args, **kwargs) ratio_small = 1 / 3 ratio_medium = 1 / 3 ratio_large = 1 / 3 cnt = 0 # Data augmentation while True: # # Choice generator # img_size_type = np.random.choice( ['small', 'medium', 'large'], p=[ratio_small, ratio_medium, ratio_large]) if img_size_type == 'small': generator = generator_small elif img_size_type == 'medium': generator = generator_medium else: generator = generator_large # Get augmented image samples # ori_img = next(generator) * 255 ori_length = ori_img.shape[0] # Change color tone for data augmentation using imgaug seq = iaa.Sequential([ iaa.Add((-20, 20), per_channel=True), ]) ori = seq.augment_images(ori_img) / 255 # # Create images for training # # Crop original images # croped_ori = self.random_crop(ori) # croped_ori_length = croped_ori.shape[0] # Get masks for each image sample mask = np.stack([ random_mask(ori.shape[1], ori.shape[2]) for _ in range(ori_length) ], axis=0) # Crop ori, mask and masked images croped_ori, croped_mask = self.random_crop(ori=ori, mask=mask) # Apply masks to all image sample masked = deepcopy(croped_ori) masked[croped_mask == 0] = 1 # 白色が50%以上ある場合はやり直し if self.has_many_white(cnt, masked): continue # Yield ([ori, masl], ori) training batches gc.collect() # # Optional # # Check mask ratio # self.has_many_mask(croped_mask[0,:,:,:]) # Save images # self.save_img(cnt=cnt, img_size_type=img_size_type, mask=mask, masked=masked, ori=ori) cnt += 1 yield [masked, croped_mask], croped_ori
import cv2 from skimage.io import imsave # Import modules from libs/ directory from libs.pconv_model import PConvUnet from libs.util import random_mask, plot_images # Load image img = cv2.imread('./data/building.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img / 255 shape = img.shape print(f"Shape of image is: {shape}") # Load mask mask = random_mask(shape[0], shape[1]) # Image + mask masked_img = deepcopy(img) masked_img[mask == 0] = 1 model = PConvUnet(weight_filepath='result/logs/') model.load(r"result/logs/1_weights_2019-02-21-04-59-53.h5", train_bn=False) # Run prediction quickly pred = model.scan_predict((img, mask)) # Show result plot_images([img, masked_img, pred]) imsave('result/test_orginal.png', img) imsave('result/test_masked.png', masked_img)
# coding: utf-8 # # Mask Generation with OpenCV # In the paper they generate irregular masks by using occlusion/dis-occlusion between two consecutive frames of videos, as described in [this paper](https://lmb.informatik.uni-freiburg.de/Publications/2010/Bro10e/sundaram_eccv10.pdf). # # Instead we'll simply be using OpenCV to generate some irregular masks, which will hopefully perform just as well. # We've implemented this in the function `random_mask`, which is located in the `util.py` file int he libs directory import itertools import matplotlib.pyplot as plt from libs.util import random_mask # Plot the results _, axes = plt.subplots(5, 5, figsize=(20, 20)) axes = list(itertools.chain.from_iterable(axes)) for i in range(len(axes)): # Generate image img = random_mask(500, 500) # Plot image on axis axes[i].imshow(img * 255) print("finish")