def test_edge_to_image_size_vert_horz(): aspect_ratio = 2.0 edge_size = 2 actual = image_.edge_to_image_size(edge_size, aspect_ratio, edge="vert") desired = (edge_size, round(edge_size * aspect_ratio)) assert actual == desired actual = image_.edge_to_image_size(edge_size, aspect_ratio, edge="horz") desired = (round(edge_size / aspect_ratio), edge_size) assert actual == desired
def test_calculate_resized_image_size_long(): edge_size = 2 edge = "long" aspect_ratio = 2.0 actual = image_.edge_to_image_size(edge_size, aspect_ratio, edge) desired = (round(edge_size / aspect_ratio), edge_size) assert actual == desired aspect_ratio = 0.5 actual = image_.edge_to_image_size(edge_size, aspect_ratio, edge) desired = (edge_size, round(edge_size * aspect_ratio)) assert actual == desired
def test_edge_to_image_size_short(): edge_size = 2 edge = "short" aspect_ratio = 2.0 actual = image_.edge_to_image_size(edge_size, aspect_ratio, edge) desired = (edge_size, round(edge_size * aspect_ratio)) assert actual == desired aspect_ratio = 0.5 actual = image_.edge_to_image_size(edge_size, aspect_ratio, edge) desired = (round(edge_size / aspect_ratio), edge_size) assert actual == desired
def test_iter_resize(self): class TestOperator(ops.PixelComparisonOperator): def target_image_to_repr(self, image): return image, None def input_image_to_repr(self, image, ctx): pass def calculate_score(self, input_repr, target_repr, ctx): pass initial_image_size = (5, 4) edge_sizes = (2, 4) torch.manual_seed(0) target_guide = torch.rand((1, 3, *initial_image_size)) target_image = torch.rand((1, 3, *initial_image_size)) input_guide = torch.rand((1, 3, *initial_image_size)) aspect_ratio = calculate_aspect_ratio(initial_image_size) image_sizes = [ edge_to_image_size(edge_size, aspect_ratio) for edge_size in edge_sizes ] op = TestOperator() op.set_target_guide(target_guide) op.set_target_image(target_image) op.set_input_guide(input_guide) image_pyramid = pyramid.ImagePyramid(edge_sizes, 1, resize_targets=(op,)) for pyramid_level, image_size in zip(image_pyramid, image_sizes): for attr in ("target_guide", "target_image", "input_guide"): actual = extract_image_size(getattr(op, attr)) desired = image_size assert actual == desired
def test_read_image_resize_scalar(test_image_file, test_image_pil): edge_size = 200 aspect_ratio = image_.calculate_aspect_ratio( (test_image_pil.height, test_image_pil.width) ) image_size = image_.edge_to_image_size(edge_size, aspect_ratio) actual = image_.read_image(test_image_file, size=edge_size) desired = test_image_pil.resize(image_size[::-1]) pyimagetest.assert_images_almost_equal(actual, desired)
def _resize( self, image: torch.Tensor, aspect_ratio: Optional[float], interpolation_mode: str, ) -> torch.Tensor: if aspect_ratio is None: aspect_ratio = extract_aspect_ratio(image) image_size = edge_to_image_size(self.edge_size, aspect_ratio, edge=self.edge) with torch.no_grad(): image = resize(image, image_size, interpolation=interpolation_mode) return image.detach()
def transform(image): aspect_ratio = calculate_aspect_ratio(image.size[::-1]) image_size = edge_to_image_size(edge_size, aspect_ratio, edge) size = image_size[::-1] return image.resize(size, resample=Image.BILINEAR)