def test_smart_resize_errors(self): with self.assertRaisesRegex(ValueError, 'a tuple of 2 integers'): preprocessing_image.smart_resize(np.random.random((20, 20, 2)), size=(10, 5, 3)) with self.assertRaisesRegex(ValueError, 'incorrect rank'): preprocessing_image.smart_resize(np.random.random((20, 40)), size=(10, 5))
def test_smart_resize(self): test_input = np.random.random((20, 40, 3)) output = preprocessing_image.smart_resize(test_input, size=(50, 50)) self.assertIsInstance(output, np.ndarray) self.assertListEqual(list(output.shape), [50, 50, 3]) output = preprocessing_image.smart_resize(test_input, size=(10, 10)) self.assertListEqual(list(output.shape), [10, 10, 3]) output = preprocessing_image.smart_resize(test_input, size=(100, 50)) self.assertListEqual(list(output.shape), [100, 50, 3]) output = preprocessing_image.smart_resize(test_input, size=(5, 15)) self.assertListEqual(list(output.shape), [5, 15, 3])
def test_smart_resize_tf_dataset(self, size): test_input_np = np.random.random((2, 20, 40, 3)) test_ds = Dataset.from_tensor_slices(test_input_np) resize = lambda img: preprocessing_image.smart_resize(img, size=size) test_ds = test_ds.map(resize) for sample in test_ds.as_numpy_iterator(): self.assertIsInstance(sample, np.ndarray) self.assertListEqual(list(sample.shape), [size[0], size[1], 3])
def load_image(path, image_size, num_channels, interpolation, smart_resize=False): """Load an image from a path and resize it.""" img = io_ops.read_file(path) img = image_ops.decode_image( img, channels=num_channels, expand_animations=False) if smart_resize: img = keras_image_ops.smart_resize(img, image_size, interpolation=interpolation) else: img = image_ops.resize_images_v2(img, image_size, method=interpolation) img.set_shape((image_size[0], image_size[1], num_channels)) return img
def test_smart_resize_batch(self): img = np.random.random((2, 20, 40, 3)) out = preprocessing_image.smart_resize(img, size=(20, 20)) self.assertListEqual(list(out.shape), [2, 20, 20, 3]) self.assertAllClose(out, img[:, :, 10:-10, :])
save_plots = True target_size = 100 # download dataset from http://vis-www.cs.umass.edu/lfw/lfw.tgz images = glob.glob(os.path.join('data', 'lfw', '**', '*.jpg')) hi_res_dataset = [] lo_res_dataset = [] ratio = 0.40 low_res_dim = (int(target_size * ratio), int(target_size * ratio)) # for i in tqdm(images[:1000]): for i in tqdm(images): hr_img = image.load_img(i, target_size=(target_size, target_size, 3)) hr_img = image.img_to_array(hr_img) lr_img = smart_resize(hr_img, size=low_res_dim) lr_img = smart_resize(lr_img, size=(target_size, target_size)) hr_img = hr_img / 255. lr_img = lr_img / 255. hi_res_dataset.append(hr_img) lo_res_dataset.append(lr_img) hi_res_dataset = np.array(hi_res_dataset) lo_res_dataset = np.array(lo_res_dataset) # Hyperparameters learning_rate = 0.001 n_epochs = 50 batch_size = 256 validation_split = 0.1 activation = 'relu'