def resize(image, output_shape, order=1, mode='constant', cval=0.): """Resize image to match a certain size. Performs interpolation to up-size or down-size images. For down-sampling N-dimensional images by applying the arithmetic sum or mean, see `skimage.measure.local_sum` and `skimage.transform.downscale_local_mean`, respectively. Parameters ---------- image : ndarray Input image. output_shape : tuple or ndarray Size of the generated output image `(rows, cols[, dim])`. If `dim` is not provided, the number of channels is preserved. In case the number of input channels does not equal the number of output channels a 3-dimensional interpolation is applied. Returns ------- resized : ndarray Resized version of the input. Other parameters ---------------- order : int, optional The order of the spline interpolation, default is 1. The order has to be in the range 0-5. See `skimage.transform.warp` for detail. mode : string, optional Points outside the boundaries of the input are filled according to the given mode ('constant', 'nearest', 'reflect' or 'wrap'). cval : float, optional Used in conjunction with mode 'constant', the value outside the image boundaries. Examples -------- >>> from skimage import data >>> from skimage.transform import resize >>> image = data.camera() >>> resize(image, (100, 100)).shape (100, 100) """ rows, cols = output_shape[0], output_shape[1] orig_rows, orig_cols = image.shape[0], image.shape[1] row_scale = float(orig_rows) / rows col_scale = float(orig_cols) / cols # 3-dimensional interpolation if len(output_shape) == 3 and (image.ndim == 2 or output_shape[2] != image.shape[2]): dim = output_shape[2] orig_dim = 1 if image.ndim == 2 else image.shape[2] dim_scale = float(orig_dim) / dim map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim] map_rows = row_scale * (map_rows + 0.5) - 0.5 map_cols = col_scale * (map_cols + 0.5) - 0.5 map_dims = dim_scale * (map_dims + 0.5) - 0.5 coord_map = np.array([map_rows, map_cols, map_dims]) out = ndimage.map_coordinates(image, coord_map, order=order, mode=mode, cval=cval) else: # 2-dimensional interpolation # 3 control points necessary to estimate exact AffineTransform src_corners = np.array([[1, 1], [1, rows], [cols, rows]]) - 1 dst_corners = np.zeros(src_corners.shape, dtype=np.double) # take into account that 0th pixel is at position (0.5, 0.5) dst_corners[:, 0] = col_scale * (src_corners[:, 0] + 0.5) - 0.5 dst_corners[:, 1] = row_scale * (src_corners[:, 1] + 0.5) - 0.5 tform = AffineTransform() tform.estimate(src_corners, dst_corners) out = warp(image, tform, output_shape=output_shape, order=order, mode=mode, cval=cval) return out
pad = args.pad width = in_quad[:, 0].max() - in_quad[:, 0].min() height = in_quad[:, 1].max() - in_quad[:, 1].min() out_quad = array([(0, 0), (width, 0), (width, height), (0, height)]) + pad # import ipdb; ipdb.set_trace() metadata = dict(folder=folder, stem=stem) metadata['polygon'] = f.polygon.points.tolist() highlight = np.zeros((data.height, data.width), dtype=np.uint8) f.draw(highlight, fill=255, outline=128) if use_quad: P = AffineTransform() P.estimate(out_quad, in_quad) output = warp(data, P, output_shape=(height + 2 * pad, width + 2 * pad)) sub_highlight = warp(highlight, P, output_shape=(height + 2 * pad, width + 2 * pad)) projection_matrix = P.params metadata['use_quad'] = True metadata['projection'] = projection_matrix.tolist() metadata['subimage'] = None else: # import ipdb; ipdb.set_trace() data_array = img_as_float(data_array)
def resize_downsample(image, output_shape, order=1, mode=None, cval=0, clip=True, preserve_range=False, anti_aliasing=True, anti_aliasing_sigma=None): """ Resize image to match a certain size. Performs interpolation to up-size or down-size images. Note that anti- aliasing should be enabled when down-sizing images to avoid aliasing artifacts. For down-sampling N-dimensional images with an integer factor also see `skimage.transform.downscale_local_mean`. Parameters This code was copied from: https://github.com/scikit-image/scikit-image/blob/master/skimage/transform/_warps.py#L34 Commit Hush: 94b561e77aa551fa91c52d9140af220885e5181e Because anti-aliasing parameter in resize function was only introduced in skimage 0.15 which is still a dev version. Once 0.15 will become an oficial version and will be updated here, this function can be deleted from the code and just imported. I did not use downscale_local_mean because it only allows for downscaling parameter of int (no float) - output size might be very different than user requested. ---------- image : ndarray Input image. output_shape : tuple or ndarray Size of the generated output image `(rows, cols[, ...][, dim])`. If `dim` is not provided, the number of channels is preserved. In case the number of input channels does not equal the number of output channels a n-dimensional interpolation is applied. Returns ------- resized : ndarray Resized version of the input. Other parameters ---------------- order : int, optional The order of the spline interpolation, default is 1. The order has to be in the range 0-5. See `skimage.transform.warp` for detail. mode : {'constant', 'edge', 'symmetric', 'reflect', 'wrap'}, optional Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of `numpy.pad`. The default mode is 'constant'. cval : float, optional Used in conjunction with mode 'constant', the value outside the image boundaries. clip : bool, optional Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : bool, optional Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of `img_as_float`. anti_aliasing : bool, optional Whether to apply a Gaussian filter to smooth the image prior to down-scaling. It is crucial to filter when down-sampling the image to avoid aliasing artifacts. anti_aliasing_sigma : {float, tuple of floats}, optional Standard deviation for Gaussian filtering to avoid aliasing artifacts. By default, this value is chosen as (1 - s) / 2 where s is the down-scaling factor. Notes ----- Modes 'reflect' and 'symmetric' are similar, but differ in whether the edge pixels are duplicated during the reflection. As an example, if an array has values [0, 1, 2] and was padded to the right by four values using symmetric, the result would be [0, 1, 2, 2, 1, 0, 0], while for reflect it would be [0, 1, 2, 1, 0, 1, 2]. Examples -------- >>> from skimage import data >>> from skimage.transform import resize >>> image = data.camera() >>> resize(image, (100, 100), mode='reflect').shape (100, 100) """ if mode is None: mode = 'constant' output_shape = tuple(output_shape) output_ndim = len(output_shape) input_shape = image.shape if output_ndim > image.ndim: # append dimensions to input_shape input_shape = input_shape + (1, ) * (output_ndim - image.ndim) image = np.reshape(image, input_shape) elif output_ndim == image.ndim - 1: # multichannel case: append shape of last axis output_shape = output_shape + (image.shape[-1], ) elif output_ndim < image.ndim - 1: raise ValueError("len(output_shape) cannot be smaller than the image " "dimensions") factors = (np.asarray(input_shape, dtype=float) / np.asarray(output_shape, dtype=float)) if anti_aliasing_sigma is None: anti_aliasing_sigma = np.maximum(0, (factors - 1) / 2) else: anti_aliasing_sigma = np.atleast_1d(anti_aliasing_sigma) * np.ones_like(factors) if np.any(anti_aliasing_sigma < 0): raise ValueError("Anti-aliasing standard deviation must be " "greater than or equal to zero") image = ndi.gaussian_filter(image, anti_aliasing_sigma, cval=cval, mode=mode) # 2-dimensional interpolation if len(output_shape) == 2 or (len(output_shape) == 3 and output_shape[2] == input_shape[2]): rows = output_shape[0] cols = output_shape[1] input_rows = input_shape[0] input_cols = input_shape[1] if rows == 1 and cols == 1: tform = AffineTransform(translation=(input_cols / 2.0 - 0.5, input_rows / 2.0 - 0.5)) else: # 3 control points necessary to estimate exact AffineTransform src_corners = np.array([[1, 1], [1, rows], [cols, rows]]) - 1 dst_corners = np.zeros(src_corners.shape, dtype=np.double) # take into account that 0th pixel is at position (0.5, 0.5) dst_corners[:, 0] = factors[1] * (src_corners[:, 0] + 0.5) - 0.5 dst_corners[:, 1] = factors[0] * (src_corners[:, 1] + 0.5) - 0.5 tform = AffineTransform() tform.estimate(src_corners, dst_corners) out = warp(image, tform, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range) else: # n-dimensional interpolation coord_arrays = [factors[i] * (np.arange(d) + 0.5) - 0.5 for i, d in enumerate(output_shape)] coord_map = np.array(np.meshgrid(*coord_arrays, sparse=False, indexing='ij')) image = convert_to_float(image, preserve_range) ndi_mode = _to_ndimage_mode(mode) out = ndi.map_coordinates(image, coord_map, order=order, mode=ndi_mode, cval=cval) _clip_warp_output(image, out, order, mode, cval, clip) return out