def add_image_transform(self, ctx): image_width, image_height = ctx.image_transformer.output_size if image_height < image_width: sx, sy = image_width / image_height, 1 else: sx, sy = 1, image_height / image_width ctx.image_transformer.mm(mat3.stretch(sx, sy))
def _transform_image(self, image, inverse=False): # Move principle point to origin matrix = torch.DoubleTensor([[1, 0, -self.x0], [0, 1, -self.y0], [0, 0, 1]]) # Apply transformations matrix = self.matrix.mm(matrix) # Restore principle point ow, oh = self.dest_size.tolist() matrix = self._mm( mat3.translate(self.x0 * ow / self.orig_width, self.y0 * oh / self.orig_height), matrix) output_size = self.dest_size.round().int() if inverse: matrix = matrix.inverse() output_size = torch.IntTensor([self.orig_width, self.orig_height]) # Scale up matrix = self._mm(mat3.stretch(self.msaa), matrix) # Apply affine image transformation inv_matrix = matrix.inverse().contiguous() image = image.transform(tuple(output_size * self.msaa), Image.AFFINE, tuple(inv_matrix[0:2].view(6)), Image.BILINEAR) # Scale down to output size if self.msaa != 1: image = image.resize(tuple(output_size), Image.ANTIALIAS) return image
def add_image_transform(self, ctx): ctx.image_transformer.mm(mat3.stretch(self.sx, self.sy))
def add_camera_transform(self, ctx): ctx.camera_transformer.mm(mat3.stretch(self.sx, self.sy))
def add_image_transform(self, ctx): old_dest_size = ctx.image_transformer.dest_size new_dest_size = ctx.image_transformer.set_output_size(self.out_width, self.out_height) scale = new_dest_size / old_dest_size ctx.image_transformer.mm(mat3.stretch(scale[0], scale[1]))
def zoom(self, sx, sy): self.mm(mat3.stretch(sx, sy)) self.sx *= sx self.sy *= sy