def get_images(directory): imgs = ImageDataGenerator().flow_from_directory( directory, color_mode='rgb', target_size=(image_size_used, image_size_used), class_mode=None, batch_size=(28733)) imgs = imgs.next() imgs = imgs.astype('float32') ret = imgs / 255 ret = np.array(ret) return ret
def load_real_samples(self): # should later change so that we only load one batch into directory X_train = ImageDataGenerator().flow_from_directory( 'train', color_mode='rgb', target_size=(self.img_rows, self.img_cols), class_mode=None, batch_size=1858) X_train = X_train.next() X = X_train.astype('float32') X = (X - 127.5) / 127.5 # print(X) return X
logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) #load model autoencoder = load_model('prevautoencoders/shrooms_ae_filter6_1.h5') test_imgs_clean = [] test_imgs_masked = [] img_to_use = ImageDataGenerator().flow_from_directory("shrooms", color_mode='rgb', target_size = (128, 128), class_mode=None, batch_size=1) img_to_use = img_to_use.next() img_to_use = img_to_use.astype('float32') img_to_use = img_to_use / 255 img_to_use = img_to_use[0] current_mask_size = 25 mask = np.ones((128, 128, 3)) for i in range(20): min_x = 50 if 50+current_mask_size < 127 else 127-current_mask_size x = random.randint(min_x, 127-current_mask_size) y = random.randint(1, 127-current_mask_size) mask[x:x + current_mask_size,y:y + current_mask_size,:] = 0.0 masked_img = np.multiply(img_to_use, mask) test_imgs_clean.append(img_to_use) test_imgs_masked.append(masked_img) test_imgs_clean = np.array(test_imgs_clean)