def transform_img(img, scale=1.0, angle=0.0, tvec=(0, 0), bgval=None, order=1): """ Return translation vector to register images. Args: img (2D or 3D numpy array): What will be transformed. If a 3D array is passed, it is treated in a manner in which RGB images are supposed to be handled - i.e. assume that coordinates are (Y, X, channels). scale (float): The scale factor (scale > 1.0 means zooming in) angle (float): Degrees of rotation (clock-wise) tvec (2-tuple): Pixel translation vector, Y and X component. bgval (float): Shade of the background (filling during transformations) If None is passed, :func:`imreg_dft.utils.get_borderval` with radius of 5 is used to get it. order (int): Order of approximation (when doing transformations). 1 = linear, 3 = cubic etc. Linear works surprisingly well. Returns: The transformed img, may have another i.e. (bigger) shape than the source. """ if img.ndim == 3: # A bloody painful special case of RGB images ret = np.empty_like(img) for idx in range(img.shape[2]): sli = (slice(None), slice(None), idx) ret[sli] = transform_img(img[sli], scale, angle, tvec, bgval, order) return ret if bgval is None: bgval = utils.get_borderval(img, 5) bigshape = np.array(img.shape) * 1.2 bg = np.zeros(bigshape, img.dtype) + bgval dest0 = utils.embed_to(bg, img.copy()) if scale != 1.0: dest0 = ndii.zoom(dest0, scale, order=order, cval=bgval) if angle != 0.0: dest0 = ndii.rotate(dest0, angle, order=order, cval=bgval) if tvec[0] != 0 or tvec[1] != 0: dest0 = ndii.shift(dest0, tvec, order=order, cval=bgval) bg = np.zeros_like(img) + bgval dest = utils.embed_to(bg, dest0) return dest
def transform_img(img, scale=1.0, angle=0.0, tvec=(0, 0), bgval=None, order=1): """ Return translation vector to register images. Args: img (2D or 3D numpy array): What will be transformed. If a 3D array is passed, it is treated in a manner in which RGB images are supposed to be handled - i.e. assume that coordinates are (Y, X, channels). scale (float): The scale factor (scale > 1.0 means zooming in) angle (float): Degrees of rotation (clock-wise) tvec (2-tuple): Pixel translation vector, Y and X component. bgval (float): Shade of the background (filling during transformations) If None is passed, :func:`imreg_dft.utils.get_borderval` with radius of 5 is used to get it. order (int): Order of approximation (when doing transformations). 1 = linear, 3 = cubic etc. Linear works surprisingly well. Returns: np.ndarray: The transformed img, may have another i.e. (bigger) shape than the source. """ if img.ndim == 3: # A bloody painful special case of RGB images ret = np.empty_like(img) for idx in range(img.shape[2]): sli = (slice(None), slice(None), idx) ret[sli] = transform_img(img[sli], scale, angle, tvec, bgval, order) return ret if bgval is None: bgval = utils.get_borderval(img) bigshape = np.round(np.array(img.shape) * 1.2).astype(int) bg = np.zeros(bigshape, img.dtype) + bgval dest0 = utils.embed_to(bg, img.copy()) if scale != 1.0: dest0 = ndii.zoom(dest0, scale, order=order, cval=bgval) if angle != 0.0: dest0 = ndii.rotate(dest0, angle, order=order, cval=bgval) if tvec[0] != 0 or tvec[1] != 0: dest0 = ndii.shift(dest0, tvec, order=order, cval=bgval) bg = np.zeros_like(img) + bgval dest = utils.embed_to(bg, dest0) return dest
def _preprocess_extend_single(im, extend, low, high, cut, rcoef, bigshape): im = utils.extend_by(im, extend) im = utils.imfilter(im, low, high, cut) if rcoef != 1: im = resample(im, rcoef) # Make the shape of images the same bg = np.zeros(bigshape, dtype=im.dtype) + utils.get_borderval(im, 5) im = utils.embed_to(bg, im) return im
def _preprocess_extend_single(im, extend, low, high, cut, rcoef, bigshape): im = utils.extend_by(im, extend) im = utils.imfilter(im, low, high, cut) if rcoef != 1: im = resample(im, rcoef) # Make the shape of images the same bg = np.zeros(bigshape) + utils.get_borderval(im, 5) im = utils.embed_to(bg, im) return im
def _preprocess_extend(ims, extend, low, high, cut, rcoef): ims = [utils.extend_by(img, extend) for img in ims] bigshape = np.array([img.shape for img in ims]).max(0) ims = filter_images(ims, low, high, cut) if rcoef != 1: ims = [resample(img, rcoef) for img in ims] bigshape *= rcoef # Make the shape of images the same bgs = [np.zeros(bigshape) + utils.get_borderval(img, 5) for img in ims] ims = [utils.embed_to(bg, img) for bg, img in zip(bgs, ims)] return ims
def testUndo(self): what = np.random.random(self.whatshape) wheres = [ (20, 11), (21, 12), (22, 13), (50, 60), ] for whs in wheres: where = np.zeros(whs) embd = utils.embed_to(where, what.copy()) undone = utils.undo_embed(embd, what.shape) self.assertEqual(what.shape, undone.shape, ) np.testing.assert_equal(what, undone)
def process_images(ims, opts, tosa=None): # lazy import so no imports before run() is really called import numpy as np from imreg_dft import utils from imreg_dft import imreg ims = [utils.extend_by(img, opts["extend"]) for img in ims] bigshape = np.array([img.shape for img in ims]).max(0) ims = filter_images(ims, opts["low"], opts["high"]) rcoef = opts["resample"] if rcoef != 1: ims = [resample(img, rcoef) for img in ims] bigshape *= rcoef # Make the shape of images the same ims = [ utils.embed_to(np.zeros(bigshape) + utils.get_borderval(img, 5), img) for img in ims ] resdict = imreg.similarity(ims[0], ims[1], opts["iters"], opts["order"], opts["constraints"], opts["filter_pcorr"], opts["exponent"]) im2 = resdict.pop("timg") # Seems that the reampling simply scales the translation resdict["tvec"] /= rcoef ty, tx = resdict["tvec"] resdict["tx"] = tx resdict["ty"] = ty resdict["imgs"] = ims tform = resdict if tosa is not None: tosa[:] = ird.transform_img_dict(tosa, tform) if rcoef != 1: ims = [resample(img, 1.0 / rcoef) for img in ims] im2 = resample(im2, 1.0 / rcoef) resdict["Dt"] /= rcoef resdict["unextended"] = [ utils.unextend_by(img, opts["extend"]) for img in ims + [im2] ] return resdict
def process_images(ims, opts, tosa=None): # lazy import so no imports before run() is really called import numpy as np from imreg_dft import utils from imreg_dft import imreg ims = [utils.extend_by(img, opts["extend"]) for img in ims] bigshape = np.array([img.shape for img in ims]).max(0) ims = filter_images(ims, opts["low"], opts["high"]) rcoef = opts["resample"] if rcoef != 1: ims = [resample(img, rcoef) for img in ims] bigshape *= rcoef # Make the shape of images the same ims = [utils.embed_to(np.zeros(bigshape) + utils.get_borderval(img, 5), img) for img in ims] resdict = imreg.similarity( ims[0], ims[1], opts["iters"], opts["order"], opts["constraints"], opts["filter_pcorr"], opts["exponent"]) im2 = resdict.pop("timg") # Seems that the reampling simply scales the translation resdict["tvec"] /= rcoef ty, tx = resdict["tvec"] resdict["tx"] = tx resdict["ty"] = ty resdict["imgs"] = ims tform = resdict if tosa is not None: tosa[:] = ird.transform_img_dict(tosa, tform) if rcoef != 1: ims = [resample(img, 1.0 / rcoef) for img in ims] im2 = resample(im2, 1.0 / rcoef) resdict["Dt"] /= rcoef resdict["unextended"] = [utils.unextend_by(img, opts["extend"]) for img in ims + [im2]] return resdict
def transform_img(img, scale=1.0, angle=0.0, tvec=(0, 0), mode="constant", bgval=None, order=1): """ Return translation vector to register images. Args: img (2D or 3D numpy array): What will be transformed. If a 3D array is passed, it is treated in a manner in which RGB images are supposed to be handled - i.e. assume that coordinates are (Y, X, channels). Complex images are handled in a way that treats separately the real and imaginary parts. scale (float): The scale factor (scale > 1.0 means zooming in) angle (float): Degrees of rotation (clock-wise) tvec (2-tuple): Pixel translation vector, Y and X component. mode (string): The transformation mode (refer to e.g. :func:`scipy.ndimage.shift` and its kwarg ``mode``). bgval (float): Shade of the background (filling during transformations) If None is passed, :func:`imreg_dft.utils.get_borderval` with radius of 5 is used to get it. order (int): Order of approximation (when doing transformations). 1 = linear, 3 = cubic etc. Linear works surprisingly well. Returns: np.ndarray: The transformed img, may have another i.e. (bigger) shape than the source. """ if img.ndim == 3: # A bloody painful special case of RGB images ret = np.empty_like(img) for idx in range(img.shape[2]): sli = (slice(None), slice(None), idx) ret[sli] = transform_img(img[sli], scale, angle, tvec, mode, bgval, order) return ret elif np.iscomplexobj(img): decomposed = np.empty(img.shape + (2, ), float) decomposed[:, :, 0] = img.real decomposed[:, :, 1] = img.imag # The bgval makes little sense now, as we decompose the image res = transform_img(decomposed, scale, angle, tvec, mode, None, order) ret = res[:, :, 0] + 1j * res[:, :, 1] return ret if bgval is None: bgval = utils.get_borderval(img) bigshape = np.round(np.array(img.shape) * 1.2).astype(int) bg = np.zeros(bigshape, img.dtype) + bgval dest0 = utils.embed_to(bg, img.copy()) # TODO: We have problems with complex numbers # that are not supported by zoom(), rotate() or shift() if scale != 1.0: dest0 = ndii.zoom(dest0, scale, order=order, mode=mode, cval=bgval) if angle != 0.0: dest0 = ndii.rotate(dest0, angle, order=order, mode=mode, cval=bgval) if tvec[0] != 0 or tvec[1] != 0: dest0 = ndii.shift(dest0, tvec, order=order, mode=mode, cval=bgval) bg = np.zeros_like(img) + bgval dest = utils.embed_to(bg, dest0) return dest
def transform_img(img, scale=1.0, angle=0.0, tvec=(0, 0), mode="constant", bgval=None, order=1): """ Return translation vector to register images. Args: img (2D or 3D numpy array): What will be transformed. If a 3D array is passed, it is treated in a manner in which RGB images are supposed to be handled - i.e. assume that coordinates are (Y, X, channels). Complex images are handled in a way that treats separately the real and imaginary parts. scale (float): The scale factor (scale > 1.0 means zooming in) angle (float): Degrees of rotation (clock-wise) tvec (2-tuple): Pixel translation vector, Y and X component. mode (string): The transformation mode (refer to e.g. :func:`scipy.ndimage.shift` and its kwarg ``mode``). bgval (float): Shade of the background (filling during transformations) If None is passed, :func:`imreg_dft.utils.get_borderval` with radius of 5 is used to get it. order (int): Order of approximation (when doing transformations). 1 = linear, 3 = cubic etc. Linear works surprisingly well. Returns: np.ndarray: The transformed img, may have another i.e. (bigger) shape than the source. """ if img.ndim == 3: # A bloody painful special case of RGB images ret = np.empty_like(img) for idx in range(img.shape[2]): sli = (slice(None), slice(None), idx) ret[sli] = transform_img(img[sli], scale, angle, tvec, mode, bgval, order) return ret elif np.iscomplexobj(img): decomposed = np.empty(img.shape + (2,), float) decomposed[:, :, 0] = img.real decomposed[:, :, 1] = img.imag # The bgval makes little sense now, as we decompose the image res = transform_img(decomposed, scale, angle, tvec, mode, None, order) ret = res[:, :, 0] + 1j * res[:, :, 1] return ret if bgval is None: bgval = utils.get_borderval(img) bigshape = np.round(np.array(img.shape) * 1.2).astype(int) bg = np.zeros(bigshape, img.dtype) + bgval dest0 = utils.embed_to(bg, img.copy()) # TODO: We have problems with complex numbers # that are not supported by zoom(), rotate() or shift() if scale != 1.0: dest0 = ndii.zoom(dest0, scale, order=order, mode=mode, cval=bgval) if angle != 0.0: dest0 = ndii.rotate(dest0, angle, order=order, mode=mode, cval=bgval) if tvec[0] != 0 or tvec[1] != 0: dest0 = ndii.shift(dest0, tvec, order=order, mode=mode, cval=bgval) bg = np.zeros_like(img) + bgval dest = utils.embed_to(bg, dest0) return dest