Example #1
0
File: warp.py Project: ta-oyama/PCV
def pw_affine(fromim,toim,fp,tp,tri):
    """ 画像の三角形パッチを変形する。
        fromim = 変形する画像
        toim = 画像の合成先
        fp = 基準点(同次座標系)
        tp = 対応店(同次座標系)
        tri = 三角形分割 """
    
    im = toim.copy()
    
    # 画像がグレースケールかカラーか調べる
    is_color = len(fromim.shape) == 3
    
    # 変形する先の画像を作成する
    im_t = np.zeros(im.shape, 'uint8')
    for t in tri:
        #アフィン変換を計算する
        H = homography.Haffine_from_points(tp[:,t],fp[:,t])
        if is_color:
            for col in range(fromim.shape[2]):
                im_t[:,:,col] = ndimage.affine_transform(
                    fromim[:,:,col],H[:2,:2],(H[0,2],H[1,2]),im.shape[:2])
        else:
            im_t = ndimage.affine_transform(
                fromim,H[:2,:2],(H[0,2],H[1,2]),im.shape[:2])
        
        # 三角形の透明度マップ
        alpha = alpha_for_triangle(tp[:,t].astype('int'),im.shape[0],im.shape[1])
        
        # 三角形を画像に追加する
        im[alpha>0] = im_t[alpha>0]
    
    return im
Example #2
0
File: warp.py Project: Adon-m/PCV
def pw_affine(fromim,toim,fp,tp,tri):
    """ Warp triangular patches from an image.
        fromim = image to warp 
        toim = destination image
        fp = from points in hom. coordinates
        tp = to points in hom.  coordinates
        tri = triangulation. """
                
    im = toim.copy()
    
    # check if image is grayscale or color
    is_color = len(fromim.shape) == 3
    
    # create image to warp to (needed if iterate colors)
    im_t = zeros(im.shape, 'uint8') 
    
    for t in tri:
        # compute affine transformation
        H = homography.Haffine_from_points(tp[:,t],fp[:,t])
        
        if is_color:
            for col in range(fromim.shape[2]):
                im_t[:,:,col] = ndimage.affine_transform(
                    fromim[:,:,col],H[:2,:2],(H[0,2],H[1,2]),im.shape[:2])
        else:
            im_t = ndimage.affine_transform(
                    fromim,H[:2,:2],(H[0,2],H[1,2]),im.shape[:2])
        
        # alpha for triangle
        alpha = alpha_for_triangle(tp[:,t],im.shape[0],im.shape[1])
        
        # add triangle to image
        im[alpha>0] = im_t[alpha>0]
        
    return im
Example #3
0
def _resample_one_img(data, A, b, target_shape,
                      interpolation_order, out, copy=True):
    "Internal function for resample_img, do not use"
    if data.dtype.kind in ('i', 'u'):
        # Integers are always finite
        has_not_finite = False
    else:
        not_finite = np.logical_not(np.isfinite(data))
        has_not_finite = np.any(not_finite)
    if has_not_finite:
        warnings.warn("NaNs or infinite values are present in the data "
                        "passed to resample. This is a bad thing as they "
                        "make resampling ill-defined and much slower.",
                        RuntimeWarning, stacklevel=2)
        if copy:
            # We need to do a copy to avoid modifying the input
            # array
            data = data.copy()
        #data[not_finite] = 0
        from ..masking import _extrapolate_out_mask
        data = _extrapolate_out_mask(data, np.logical_not(not_finite),
                                     iterations=2)[0]

    # See https://github.com/nilearn/nilearn/issues/346 Copying the
    # array makes it C continuous and as such the int32 index in the C
    # code is a lot less likely to overflow
    if (LooseVersion(scipy.__version__) < LooseVersion('0.14.1')):
        data = data.copy()

    # Suppresses warnings in https://github.com/nilearn/nilearn/issues/1363
    with warnings.catch_warnings():
        if LooseVersion(scipy.__version__) >= LooseVersion('0.18'):
            warnings.simplefilter("ignore", UserWarning)
        # The resampling itself
        ndimage.affine_transform(data, A,
                                 offset=b,
                                 output_shape=target_shape,
                                 output=out,
                                 order=interpolation_order)

    # Bug in ndimage.affine_transform when out does not have native endianness
    # see https://github.com/nilearn/nilearn/issues/275
    # Bug was fixed in scipy 0.15
    if (LooseVersion(scipy.__version__) < LooseVersion('0.15') and
        not out.dtype.isnative):
        out.byteswap(True)

    if has_not_finite:
        # Suppresses warnings in https://github.com/nilearn/nilearn/issues/1363
        with warnings.catch_warnings():
            if LooseVersion(scipy.__version__) >= LooseVersion('0.18'):
                warnings.simplefilter("ignore", UserWarning)
            # We need to resample the mask of not_finite values
            not_finite = ndimage.affine_transform(not_finite, A,
                                                offset=b,
                                                output_shape=target_shape,
                                                order=0)
        out[not_finite] = np.nan
    return out
def testRigidTransformEstimation(inImg, level, dTheta, displacement, thr):
    left=ndimage.rotate(inImg, dTheta)
    right=ndimage.rotate(inImg, -dTheta)
    left=ndimage.affine_transform(left , np.eye(2), offset=-1*displacement)
    right=ndimage.affine_transform(right, np.eye(2), offset=displacement)
    
    rightPyramid=[i for i in transform.pyramid_gaussian(right, level)]
    leftPyramid=[i for i in transform.pyramid_gaussian(left, level)]
    sel=level
    beta=estimateRigidTransformation(leftPyramid[sel], rightPyramid[sel], 2.0*dTheta, thr)
    return beta
Example #5
0
    def transform(self, transform, grid_coords=False, reference=None, 
                  dtype=None, interp_order=_INTERP_ORDER):
        """
        Apply a transformation to the image considered as 'floating'
        to bring it into the same grid as a given 'reference'
        image. The transformation is assumed to go from the
        'reference' to the 'floating'.
        
        transform: nd array
    
        either a 4x4 matrix describing an affine transformation
        
        or a 3xN array describing voxelwise displacements of the
        reference grid points
        
        precomputed : boolean
        True for a precomputed transformation, False for affine

        grid_coords : boolean

        True if the transform maps to grid coordinates, False if it maps
        to world coordinates
    
        reference: reference image, defaults to input. 
        """
        if reference == None: 
            reference = self
        
        if dtype == None: 
            dtype = self._get_dtype()

        # Prepare data arrays
        data = self._get_data()
        output = np.zeros(reference._shape, dtype=dtype)
        t = np.asarray(transform)

        # Case: affine transform
        if t.shape[-1] == 4: 
            if not grid_coords:
                t = np.dot(self._inv_affine, np.dot(t, reference._affine))
            ndimage.affine_transform(data, t[0:3,0:3], offset=t[0:3,3],
                                     order=interp_order, cval=self._background, 
                                     output_shape=output.shape, output=output)
    
        # Case: precomputed displacements
        else:
            if not grid_coords:
                t = apply_affine(self._inv_affine, t)
            output = ndimage.map_coordinates(data, np.rollaxis(t, 3, 0), 
                                             order=interp_order, 
                                             cval=self._background,
                                             output=dtype)
    
        return Image(output, affine=reference._affine, world=self._world)
Example #6
0
def affine_transform2(im, rot, shift):
    '''
        Perform affine transform for 2/3D images.
    '''
    if ndim(im) == 2:
        return ndimage.affine_transform(im, rot, shift)
    else:
        imr = ndimage.affine_transform(im[:, :, 0], rot, shift)
        img = ndimage.affine_transform(im[:, :, 1], rot, shift)
        imb = ndimage.affine_transform(im[:, :, 2], rot, shift)

        return dstack((imr, img, imb))
def fast_warp_forward(img, tf, output_shape=(50, 50), mode='constant', order=1):
    """
    This wrapper function is faster than skimage.transform.warp
    """
    m = tf._inv_matrix
    res = np.zeros(shape=(output_shape[0], output_shape[1], 3), dtype=floatX)
    from scipy.ndimage import affine_transform
    trans, offset = m[:2, :2], (m[0, 2], m[1, 2])
    res[:, :, 0] = affine_transform(img[:, :, 0].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order)
    res[:, :, 1] = affine_transform(img[:, :, 1].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order)
    res[:, :, 2] = affine_transform(img[:, :, 2].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order)
    return res
Example #8
0
    def apply_resize(self, workspace, input_image_name, output_image_name):
        image = workspace.image_set.get_image(input_image_name)
        image_pixels = image.pixel_data
        if self.size_method == R_BY_FACTOR:
            factor = self.resizing_factor.value
            shape = (np.array(image_pixels.shape[:2])*factor+.5).astype(int)
        elif self.size_method == R_TO_SIZE:
            if self.use_manual_or_image == C_MANUAL:
                shape = np.array([self.specific_height.value,
                                  self.specific_width.value])
            elif self.use_manual_or_image == C_IMAGE:
                shape = np.array(workspace.image_set.get_image(self.specific_image.value).pixel_data.shape).astype(int)
        #
        # Little bit of wierdness here. The input pixels are numbered 0 to
        # shape-1 and so are the output pixels. Therefore the affine transform
        # is the ratio of the two shapes-1
        #
        ratio = ((np.array(image_pixels.shape[:2]).astype(float)-1) /
                 (shape.astype(float)-1))
        transform = np.array([[ratio[0], 0],[0,ratio[1]]])
        if self.interpolation not in I_ALL:
            raise NotImplementedError("Unsupported interpolation method: %s" %
                                      self.interpolation.value)
        order = (0 if self.interpolation == I_NEAREST_NEIGHBOR
                 else 1 if self.interpolation == I_BILINEAR
                 else 2)
        if image_pixels.ndim == 3:
            output_pixels = np.zeros((shape[0],shape[1],image_pixels.shape[2]), 
                                     image_pixels.dtype)
            for i in range(image_pixels.shape[2]):
                affine_transform(image_pixels[:,:,i], transform,
                                 output_shape = tuple(shape),
                                 output = output_pixels[:,:,i],
                                 order = order)
        else:
            output_pixels = affine_transform(image_pixels, transform,
                                             output_shape = shape,
                                             order = order)
        output_image = cpi.Image(output_pixels, parent_image=image)
        workspace.image_set.add(output_image_name, output_image)

        if self.show_window:
            if not hasattr(workspace.display_data, 'input_images'):
                workspace.display_data.input_images = [image.pixel_data]
                workspace.display_data.output_images = [output_image.pixel_data]
                workspace.display_data.input_image_names = [input_image_name]
                workspace.display_data.output_image_names = [output_image_name]
            else:
                workspace.display_data.input_images += [image.pixel_data]
                workspace.display_data.output_images += [output_image.pixel_data]
                workspace.display_data.input_image_names += [input_image_name]
                workspace.display_data.output_image_names += [output_image_name]
Example #9
0
def scale_images(pixel_array, mask):

    param = random.uniform(0.95,1.05)
    S = trans.zooms.zfdir2mat(param)
    pixel_array = affine_transform(pixel_array,S)
    mask = affine_transform(mask,S)
    
    scaled_data = {
        "pixel_array" : pixel_array,
        "mask" : mask, 
        "params" : param
    }
    
    return scaled_data
Example #10
0
def shear_images(pixel_array, mask):
    # only transform x axis?
    param = random.uniform(0, 0.05)
    S = [param, 0, 0]

    pixel_array = affine_transform(pixel_array,trans.shears.striu2mat(S))
    mask = affine_transform(mask,trans.shears.striu2mat(S))

    sheared_data = {
        "pixel_array" : pixel_array,
        "mask" : mask, 
        "params" : param
    }
    
    return sheared_data
Example #11
0
def _resample_one_img(data, A, b, target_shape,
                      interpolation_order, out, copy=True,
                      fill_value=0):
    "Internal function for resample_img, do not use"
    if data.dtype.kind in ('i', 'u'):
        # Integers are always finite
        has_not_finite = False
    else:
        not_finite = np.logical_not(np.isfinite(data))
        has_not_finite = np.any(not_finite)
    if has_not_finite:
        warnings.warn("NaNs or infinite values are present in the data "
                        "passed to resample. This is a bad thing as they "
                        "make resampling ill-defined and much slower.",
                        RuntimeWarning, stacklevel=2)
        if copy:
            # We need to do a copy to avoid modifying the input
            # array
            data = data.copy()
        #data[not_finite] = 0
        from ..masking import _extrapolate_out_mask
        data = _extrapolate_out_mask(data, np.logical_not(not_finite),
                                     iterations=2)[0]

    # Suppresses warnings in https://github.com/nilearn/nilearn/issues/1363
    with warnings.catch_warnings():
        if LooseVersion(scipy.__version__) >= LooseVersion('0.18'):
            warnings.simplefilter("ignore", UserWarning)
        # The resampling itself
        ndimage.affine_transform(data, A,
                                 offset=b,
                                 output_shape=target_shape,
                                 output=out,
                                 cval=fill_value,
                                 order=interpolation_order)

    if has_not_finite:
        # Suppresses warnings in https://github.com/nilearn/nilearn/issues/1363
        with warnings.catch_warnings():
            if LooseVersion(scipy.__version__) >= LooseVersion('0.18'):
                warnings.simplefilter("ignore", UserWarning)
            # We need to resample the mask of not_finite values
            not_finite = ndimage.affine_transform(not_finite, A,
                                                offset=b,
                                                output_shape=target_shape,
                                                order=0)
        out[not_finite] = np.nan
    return out
    def transform(self,x,label):

        rx = gen_mm_random(np.random.__dict__[self.random_fct],self.fct_settings,self.min,self.max)
        ry = gen_mm_random(np.random.__dict__[self.random_fct],self.fct_settings,self.min,self.max)
        
        # Translate the coordinates of the keypoints
        def translateValue(index, value, rx, ry, imgShape):
            if value == -1:
                # Value is not available
                return value
            elif index % 2 == 0:
                # Value is an x coordinate
                newValue = value + rx
                if newValue < 0 or newValue >= imgShape[0]:
                    return -1
                else:
                    return newValue
            else:
                # Value is a y coordinate
                newValue = value + ry
                if newValue < 0 or newValue >= imgShape[1]:
                    return -1
                else:
                    return newValue
                
        newLabel = [translateValue(i, label[i], -ry, -rx, x.shape) for i in range(len(label))]

        return ndimage.affine_transform(x,self.I,offset=(rx,ry,0)), newLabel
Example #13
0
def transform_img(x, affine):
    matrix   = affine[:2,:2]
    offset   = affine[:2,2]
    x        = np.moveaxis(x, -1, 0)
    channels = [affine_transform(channel, matrix, offset, output_shape=img_shape[:-1], order=1,
                                 mode='constant', cval=np.average(channel)) for channel in x]
    return np.moveaxis(np.stack(channels, axis=0), 0, -1)
Example #14
0
def test_flirt2aff():
    from os.path import join as pjoin
    from nose.tools import assert_true
    import scipy.ndimage as ndi
    import nibabel as nib
    
    '''
    matfile = pjoin('fa_data',
                    '1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_affine_transf.mat')
    in_fname = pjoin('fa_data',
                     '1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_bet_FA.nii.gz')
    '''
    
    matfile=flirtaff
    in_fname = ffa
    
    ref_fname = '/usr/share/fsl/data/standard/FMRIB58_FA_1mm.nii.gz'
    res = flirt2aff_files(matfile, in_fname, ref_fname)
    mat = np.loadtxt(matfile)
    in_img = nib.load(in_fname)
    ref_img = nib.load(ref_fname)
    assert_true(np.all(res == flirt2aff(mat, in_img, ref_img)))
    # mm to mm transform
    mm_in2mm_ref =  np.dot(ref_img.get_affine(),
                           np.dot(res, npl.inv(in_img.get_affine())))
    # make new in image thus transformed
    in_data = in_img.get_data()
    ires = npl.inv(res)
    in_data[np.isnan(in_data)] = 0
    resliced_data = ndi.affine_transform(in_data,
                                         ires[:3,:3],
                                         ires[:3,3],
                                         ref_img.shape)
    resliced_img = nib.Nifti1Image(resliced_data, ref_img.get_affine())
    nib.save(resliced_img, 'test.nii')
Example #15
0
def rigid_alignment(faces,path,plotflag=False):
    """    Align images rigidly and save as new images.
        path determines where the aligned images are saved
        set plotflag=True to plot the images. """
    
    # take the points in the first image as reference points
    refpoints = faces.values()[0]
    
    # warp each image using affine transform
    for face in faces:
        points = faces[face]
        
        R,tx,ty = compute_rigid_transform(refpoints, points)
        T = array([[R[1][1], R[1][0]], [R[0][1], R[0][0]]])    
        
        im = array(Image.open(os.path.join(path,face)))
        im2 = zeros(im.shape, 'uint8')
        
        # warp each color channel
        for i in range(len(im.shape)):
            im2[:,:,i] = ndimage.affine_transform(im[:,:,i],linalg.inv(T),offset=[-ty,-tx])
            
        if plotflag:
            imshow(im2)
            show()
            
        # crop away border and save aligned images
        h,w = im2.shape[:2]
        border = (w+h)/20
        
        # crop away border
        imsave(os.path.join(path, 'aligned/'+face),im2[border:h-border,border:w-border,:])
def test_map_coordinates_dts():
    # check that ndimage accepts different data types for interpolation
    data = np.array([[4, 1, 3, 2],
                     [7, 6, 8, 5],
                     [3, 5, 3, 6]])
    shifted_data = np.array([[0, 0, 0, 0],
                             [0, 4, 1, 3],
                             [0, 7, 6, 8]])
    idx = np.indices(data.shape)
    dts = (np.uint8, np.uint16, np.uint32, np.uint64,
           np.int8, np.int16, np.int32, np.int64,
           np.intp, np.uintp, np.float32, np.float64)
    for order in range(0, 6):
        for data_dt in dts:
            these_data = data.astype(data_dt)
            for coord_dt in dts:
                # affine mapping
                mat = np.eye(2, dtype=coord_dt)
                off = np.zeros((2,), dtype=coord_dt)
                out = ndimage.affine_transform(these_data, mat, off)
                assert_array_almost_equal(these_data, out)
                # map coordinates
                coords_m1 = idx.astype(coord_dt) - 1
                coords_p10 = idx.astype(coord_dt) + 10
                out = ndimage.map_coordinates(these_data, coords_m1, order=order)
                assert_array_almost_equal(out, shifted_data)
                # check constant fill works
                out = ndimage.map_coordinates(these_data, coords_p10, order=order)
                assert_array_almost_equal(out, np.zeros((3,4)))
            # check shift and zoom
            out = ndimage.shift(these_data, 1)
            assert_array_almost_equal(out, shifted_data)
            out = ndimage.zoom(these_data, 1)
            assert_array_almost_equal(these_data, out)
Example #17
0
	def evolve_until(self, t):
		if t is None:
			self.reset()
			return
		
		old_center = np.round(self.center / self.input_grid.delta).astype('int')

		self.center = self.velocity * t
		new_center = np.round(self.center / self.input_grid.delta).astype('int')

		delta = new_center - old_center

		for i in range(abs(delta[0])):
			if delta[0] < 0:
				self._extrude('left')
			else:
				self._extrude('right')

		for i in range(abs(delta[1])):
			if delta[1] < 0:
				self._extrude('bottom')
			else:
				self._extrude('top')
		
		if self.use_interpolation:
			# Use bilinear interpolation to interpolate the achromatic phase screen to the correct position.
			# This is to avoid sudden shifts by discrete pixels.
			ps = self._achromatic_screen.shaped
			sub_delta = self.center - new_center * self.input_grid.delta
			with warnings.catch_warnings():
				warnings.filterwarnings('ignore', message='The behaviour of affine_transform with a one-dimensional array supplied for the matrix parameter has changed in scipy 0.18.0.')
				self._shifted_achromatic_screen = affine_transform(ps, np.array([1,1]), (sub_delta / self.input_grid.delta)[::-1], mode='nearest', order=5).ravel()
		else:
			self._shifted_achromatic_screen = self._achromatic_screen
Example #18
0
def check_basis_equivariance(basis, order_in, order_out, alpha, beta, gamma):
    from se3cnn import SO3
    from scipy.ndimage import affine_transform
    import numpy as np

    n = basis.size(0)
    dim_in = 2 * order_in + 1
    dim_out = 2 * order_out + 1
    size = basis.size(-1)
    assert basis.size() == (n, dim_out, dim_in, size, size, size), basis.size()

    basis = basis / basis.view(n, -1).norm(dim=1).view(-1, 1, 1, 1, 1, 1)

    x = basis.view(-1, size, size, size)
    y = torch.empty_like(x)

    invrot = SO3.rot(-gamma, -beta, -alpha).numpy()
    center = (np.array(x.size()[1:]) - 1) / 2

    for k in range(y.size(0)):
        y[k] = torch.tensor(affine_transform(x[k].numpy(), matrix=invrot, offset=center - np.dot(invrot, center)))

    y = y.view(*basis.size())

    y = torch.einsum(
        "ij,bjkxyz,kl->bilxyz",
        (
            irr_repr(order_out, alpha, beta, gamma, dtype=y.dtype),
            y,
            irr_repr(order_in, -gamma, -beta, -alpha, dtype=y.dtype)
        )
    )

    return torch.tensor([(basis[i] * y[i]).sum() for i in range(n)])
def test_map_coordinates_dts():
    # check that ndimage accepts different data types for interpolation
    data = np.array([[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]])
    shifted_data = np.array([[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]])
    idx = np.indices(data.shape)
    dts = (np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16,
           np.int32, np.int64, np.intp, np.uintp, np.float32, np.float64)
    for order in range(0, 6):
        for data_dt in dts:
            these_data = data.astype(data_dt)
            for coord_dt in dts:
                # affine mapping
                mat = np.eye(2, dtype=coord_dt)
                off = np.zeros((2, ), dtype=coord_dt)
                out = ndimage.affine_transform(these_data, mat, off)
                assert_array_almost_equal(these_data, out)
                # map coordinates
                coords_m1 = idx.astype(coord_dt) - 1
                coords_p10 = idx.astype(coord_dt) + 10
                out = ndimage.map_coordinates(these_data,
                                              coords_m1,
                                              order=order)
                assert_array_almost_equal(out, shifted_data)
                # check constant fill works
                out = ndimage.map_coordinates(these_data,
                                              coords_p10,
                                              order=order)
                assert_array_almost_equal(out, np.zeros((3, 4)))
            # check shift and zoom
            out = ndimage.shift(these_data, 1)
            assert_array_almost_equal(out, shifted_data)
            out = ndimage.zoom(these_data, 1)
            assert_array_almost_equal(these_data, out)
Example #20
0
def resample_np_volume(np_volume,
                       origin,
                       current_pixel_spacing,
                       resampling_px_spacing,
                       bounding_box,
                       order=3):

    zooming_matrix = np.identity(3)
    zooming_matrix[0, 0] = resampling_px_spacing[0] / current_pixel_spacing[0]
    zooming_matrix[1, 1] = resampling_px_spacing[1] / current_pixel_spacing[1]
    zooming_matrix[2, 2] = resampling_px_spacing[2] / current_pixel_spacing[2]

    offset = ((bounding_box[0] - origin[0]) / current_pixel_spacing[0],
              (bounding_box[1] - origin[1]) / current_pixel_spacing[1],
              (bounding_box[2] - origin[2]) / current_pixel_spacing[2])

    output_shape = np.ceil([
        bounding_box[3] - bounding_box[0],
        bounding_box[4] - bounding_box[1],
        bounding_box[5] - bounding_box[2],
    ]) / resampling_px_spacing

    np_volume = affine_transform(np_volume,
                                 zooming_matrix,
                                 offset=offset,
                                 mode='mirror',
                                 order=order,
                                 output_shape=output_shape.astype(int))

    return np_volume
Example #21
0
    def process(data, process_kwargs=None):
        if data is None or process_kwargs is None:
            return data
        smooth_mode = process_kwargs["smooth_mode"]
        size_px = process_kwargs["size"]
        fill = process_kwargs["fill"]

        # fill in nodata values
        values = data["values"].copy()
        no_data_value = data["no_data_value"]
        values[values == no_data_value] = fill

        # compute the sigma
        sigma = 0, size_px[0] / 3, size_px[1] / 3
        ndimage.gaussian_filter(
            values, sigma, output=values, mode="constant", cval=fill
        )

        # remove the margins
        if smooth_mode == "exact":
            my, mx = [int(round(s)) for s in size_px]
            values = values[:, my : values.shape[1] - my, mx : values.shape[2] - mx]
        else:
            _, ny, nx = values.shape
            zy, zx = [1 - 2 * size_px[0] / ny, 1 - 2 * size_px[1] / nx]

            values = ndimage.affine_transform(
                values,
                order=0,
                matrix=np.diag([1, zy, zx]),
                offset=[0, size_px[0], size_px[1]],
            )

        return {"values": values, "no_data_value": no_data_value}
def test_optimize_rot_vol():
    # Test optimization of rotations between two volumes
    print(MY_DIR)
    example_path = pjoin(MY_DIR, EXAMPLE_FILENAME)
    #expected_values = np.loadtxt(pjoin(MY_DIR, 'global_signals.txt'))

    import nibabel as nib
    #img = nib.load('ds114_sub009_t2r1.nii')
    img = nib.load(example_path)
    data = img.get_data()
    vol0 = data[..., 4]
    vol1 = data[..., 5]

    # add an intentionally rotated volume by X, Y, Z
    X = x_rotmat(0.003)
    #- * radians around the y axis, then
    Y = y_rotmat(0.005)
    #- * radians around the z axis.
    Z = z_rotmat(-0.009)
    #- Mutiply matrices together to get matrix describing all 3 rotations
    M = Z.dot(Y.dot(X))
    rotated_vol1 = snd.affine_transform(vol1, M)

    rotations = np.array([-0.003, -0.005, 0.009])

    # test to see whether to optimization of rotation function
    #  indeed captures a very similar volume between the rotated and original

    derotated_vol1, best_params = optimize_rot_vol(vol1, rotated_vol1)
    assert_almost_equal(best_params, rotations, decimal=2)

    return
Example #23
0
def rigid_alignment(faces,path,plotflag=False):
  """ 画像を位置合わせし、新たな画像として保存する。
      pathは、位置合わせした画像の保存先
      plotflag=Trueなら、画像を表示する """

  # 最初の画像の点を参照点とする
  refpoints = faces.values()[0]

  # 各画像を相似変換で変形する
  for face in faces:
    points = faces[face]

    R,tx,ty = compute_rigid_transform(refpoints, points)
    T = array([[R[1][1], R[1][0]], [R[0][1], R[0][0]]])

    im = array(Image.open(os.path.join(path,face)))
    im2 = zeros(im.shape, 'uint8')

    # 色チャンネルごとに変形する
    for i in range(len(im.shape)):
      im2[:,:,i] = ndimage.affine_transform(im[:,:,i],linalg.inv(T),
                                            offset=[-ty,-tx])
    if plotflag:
      imshow(im2)
      show()

    # 境界で切り抜き、位置合わせした画像を保存する
    h,w = im2.shape[:2]
    border = (w+h)/20
    imsave(os.path.join(path, 'aligned/'+face),
          im2[border:h-border,border:w-border,:])
Example #24
0
    def warp_triangles_image(self, preprocessed_image, target_landmarks):
        src_landmarks = self.append_box_landmarks(preprocessed_image.landmarks)
        triangulation = spatial.Delaunay(src_landmarks).simplices
        result = np.zeros((HEIGHT, WIDTH, 3), dtype=float)
        n = len(triangulation)
        mask_sum = np.zeros((HEIGHT, WIDTH, 3), np.int32)
        for (i, tri) in zip(range(0, n), triangulation):
            img = np.zeros((HEIGHT, WIDTH), dtype=np.uint8)
            src_r, src_c = self.marks_to_coords(tri, src_landmarks)
            mask = self.get_mask(src_r, src_c)
            src_coords = np.transpose(np.array([src_r, src_c]))
            target_r, target_c = self.marks_to_coords(tri, target_landmarks)
            target_coords = np.transpose(np.array([target_r, target_c]))
            similarity_transformation = transform.estimate_transform(
                'similarity', target_coords, src_coords
            )
            params = similarity_transformation.params
            m = params.copy()
            m[0][2] = 0
            m[1][2] = 0
            offset = [params[0][2], params[1][2], 0]
            img = mask * preprocessed_image.image
            transformed = ndimage.affine_transform(img, m, offset)
            result += transformed
            transformed_mask = np.where(transformed > 0, 1, 0).astype(np.uint8)
            mask_sum += transformed_mask

        mask_sum = mask_sum + np.where(mask_sum == 0, 1, 0)
        result = result * np.reciprocal(mask_sum)

        # out = Image.fromarray(np.floor(result).astype('uint8'))
        # out.save('tmp/triangles/result.jpg')
        return result
Example #25
0
    def transform(self,x):

#        import matplotlib.pyplot as plt
#
#        print x[:,:,0].min()
#        print x[:,:,1].min()
#        print x[:,:,2].min()
#        print x[:,:,0].max()
#        print x[:,:,1].max()
#        print x[:,:,2].max()
#
##        plt.imshow(x.astype('uint8'))
#        plt.imshow(x)
#        plt.show()

        rx = gen_mm_random(np.random.__dict__[self.random_fct],self.fct_settings,self.min,self.max)
        ry = gen_mm_random(np.random.__dict__[self.random_fct],self.fct_settings,self.min,self.max)

#        x = ndimage.affine_transform(x.astype('uint8'),self.I,offset=(rx,ry,0))
        x = ndimage.affine_transform(x,self.I,offset=(rx,ry,0))

#        plt.imshow(x.astype('uint8'))
#        plt.imshow(x)
#        plt.show()
        return x
def make_truth(img_file='./truth.png'):
    """
    Load in the truth image data, embed it into a 3d array, then rotate it in a
    weird way
    """
    pixels = 1.0 - mplimg.imread(img_file)[:, :, 0]  # b&w image, just grab red

    # Now embed in 3d array and rotate in some interesting way
    voxels = np.zeros(pixels.shape + (pixels.shape[0], ))
    voxels[:, :, voxels.shape[2] // 2] = pixels
    rot_ang_axis = np.array((2, 1, 0.5))  # Something "interesting"
    aff_matrix = angle_axis_to_matrix(rot_ang_axis)
    # Rotate about center, but affine_transform offset parameter is dumb
    center = np.array(voxels.shape) / 2  # whatever close enough
    offset = -(center - center.dot(aff_matrix)).dot(np.linalg.inv(aff_matrix))
    voxels = ndimage.affine_transform(voxels, aff_matrix, offset=offset)

    # Remake the truth figure in 3D
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    x, y, z = np.meshgrid(np.arange(voxels.shape[0]),
                          np.arange(voxels.shape[1]),
                          np.arange(voxels.shape[2]),
                          indexing='ij')
    disp_vox = voxels > 0.3
    ax.scatter(x[disp_vox], y[disp_vox], z[disp_vox])
    plt.savefig(img_file.replace('.png', '_3d.png'))
    # plt.show()

    print("truth:", voxels.shape)
    return voxels
Example #27
0
def makeDeepDreamStepped(img, classifier, end, name, iterations, octaves, octave_scale, start_sigma, end_sigma, start_jitter, end_jitter, start_step_size, end_step_size, scaleZoom):
	newImagePath = pathToOutput+'/'+name+"_i"+str(iterations)+"_o"+str(octaves)+"_os"+str(octave_scale)+"_j"+str(start_jitter)+'_'+str(end_jitter)+'.png'
	frame = deepdream_stepped(classifier, img, iterations, octaves, octave_scale, end, start_sigma, end_sigma, start_jitter, end_jitter, start_step_size, end_step_size)
	PIL.Image.fromarray(np.uint8(np.clip(frame, 0, 255))).save(newImagePath, 'png')
	h, w = frame.shape[:2]
	frame = nd.affine_transform(frame, [1-scaleZoom,1-scaleZoom,1], [h*scaleZoom/2,w*scaleZoom/2,0], order=1)
	return frame
def rigid_align(data, tspath, plotflag=False):
	"""
	Adapted from Programming Computer Vision with Python by
	Jan Erik Solem (2012)
	"""
	refpoints = data.values()[0]
	
	for face in data:
		points = data[face]
		
		R, tx, ty = find_rigid_transform(refpoints, points)
		T = np.array([
				[R[1][1], R[1][0]],
				[R[0][1], R[0][0]]
			])
			
		img = np.array(Image.open(os.path.join(tspath, face)))
		img2 = np.zeros(img.shape, 'uint8')
		
		for i in range(len(img.shape)):
			img2[:,:,i] = ndimage.affine_transform(img[:,:,i], linalg.inv(T), offset=[-ty, -tx])
			
		if plotflag:
			imshow(img2)
			show()
			
		h, w = img2.shape[:2]
		border = (w+h) / 20
		
		imsave(os.path.join(tspath, 'aligned/'+face), img2[border:h-border, border:w-border,:])
def rigid_alignment(faces, path, plotflag=False):
    """ 严格对齐图像,并将其保存为新的图像
	path 决定对齐后图像保存的位置 
	设置 plotflag=True,以绘制图像 """

    # 将第一幅图像中的点作为参考点
    # refpoints = faces.values()[0]  #TypeError: 'dict_values' object is not subscriptable
    refpoints = list(faces.values())[0]
    # 使用仿射变换扭曲每幅图像
    for face in faces:
        points = faces[face]
    R, tx, ty = compute_rigid_transform(refpoints, points)
    T = array([[R[1][1], R[1][0]], [R[0][1], R[0][0]]])

    im = array(Image.open(os.path.join(path, face)))
    im2 = zeros(im.shape, 'uint8')

    # 对每个颜色通道进行扭曲
    for i in range(len(im.shape)):
        im2[:, :, i] = ndimage.affine_transform(im[:, :, i],
                                                linalg.inv(T),
                                                offset=[-ty, -tx])

    if plotflag:
        imshow(im2)
        show()

    # 裁剪边界,并保存对齐后的图像
    h, w = im2.shape[:2]
    border = (w + h) // 20

    # 裁剪边界
    # imsave(os.path.join(path, 'aligned/'+face),im2[border:h-border,border:w-border,:])
    imsave(os.path.join(path, 'aligned/' + face), im2[border:h - border,
                                                      border:w - border, :])
Example #30
0
def SubtractDominantMotion(image1, image2, threshold, num_iters, tolerance):
    """
    :param image1: Images at time t
    :param image2: Images at time t+1
    :param threshold: used for LucasKanadeAffine
    :param num_iters: used for LucasKanadeAffine
    :param tolerance: binary threshold of intensity difference when computing the mask
    :return: mask: [nxm]
    """
    # put your implementation here
    mask = np.zeros(image1.shape, dtype=bool)

    # M_mat = LucasKanadeAffine(image1, image2, threshold, num_iters)
    M_mat = InverseCompositionAffine(image1, image2, threshold, num_iters)

    warped = nd.affine_transform(image1, -M_mat, output_shape=None, offset=0.0)
    diff = abs(warped - image2)

    mask[diff < tolerance] = 0
    mask[diff > tolerance] = 1

    mask = nd.morphology.binary_erosion(mask)
    mask = nd.morphology.binary_dilation(mask, iterations=1)

    return mask
Example #31
0
    def __call__(self, x):
        if self.deterministic:
            transf_matrix = np.eye(2)
            margin = 0
        else:
            a = random.uniform(-self.rot_max, self.rot_max) * np.pi
            rc, rs = np.cos(a), np.sin(a)
            sx = random.choice([1, -1]) * random.uniform(
                self.scale_min, self.scale_max)
            sy = random.choice([1, -1]) * random.uniform(
                self.scale_min, self.scale_max)
            hx = random.uniform(-self.shear, self.shear)
            hy = random.uniform(-self.shear, self.shear)

            # geometry <3
            margin = 2. - 1. / (abs(rc) + abs(rs)) - min(abs(sx), abs(sy))
            margin = int(self.w * 1.5 * margin)
            margin = min(self.w // 2, margin)

            rot_matrix = np.array([[rc, rs], [-rs, rc]])

            scale_shear = np.array([[1. / sx, hx / sx], [hy / sy, 1. / sy]])

            transf_matrix = scale_shear @ rot_matrix

        x = self._random_valid_crop(x, int(2.3 * self.w), 0)

        for i in range(x.shape[0]):
            x[i] = affine_transform(x[i], transf_matrix, mode='mirror')

        x = self._random_valid_crop(x, self.w, margin)

        return x
Example #32
0
def reslice_data(space_define_file, resample_file):
    """ reslices data in space_define_file to matrix of
    resample_file
    Parameters
    ----------
    space_define_file :  filename of space defining image
    resample_file : filename of image be resampled

    Returns
    -------
    img : space_define_file as nibabel image
    data : ndarray of data in resample_file sliced to
           shape of space_define_file
    """
    space_define_file = str(space_define_file)
    resample_file = str(resample_file)
    img = nibabel.load(space_define_file)
    change_img = nibabel.load(resample_file)
    T = eye(4)
    
    Tv = dot(np.linalg.inv(change_img.get_affine()), 
             dot(T, img.get_affine()))
    data = affine_transform(change_img.get_data().squeeze(), 
                            Tv[0:3,0:3], 
                            offset=Tv[0:3,3], 
                            output_shape = img.get_shape()[:3],
                            order=0,mode = 'nearest')

    return img, data
Example #33
0
def normalize_rotation(img,shape=(30,30),normalize=1):
    if type(img)==ListType:
        return [normalize_rotation(image,shape=shape) for image in img]
    # rescale into 0-1 range
    image = normalize_range(img)
    # compute image statistics
    cx,cy,cxx,cxy,cyy = compute_stats(image)
    # compute eigenvectors
    mat = array([[cxx,cxy],[cxy,cyy]])
    v,d = linalg.eig(mat)
    alpha0 = atan2upper(d[1,0],d[0,0]) - pi/2
    alpha1 = atan2upper(d[1,1],d[1,0]) - pi/2
    if abs(alpha0)<abs(alpha1):
        alpha = -alpha0
    else:
        alpha = -alpha1
    # perform the actual transformation
    affine = array([[cos(alpha),-sin(alpha)],[sin(alpha),cos(alpha)]])
    ccenter = array((cx,cy))
    ocenter = array((shape[0]/2,shape[1]/2))
    offset = ccenter - dot(affine,ocenter)
    if normalize: img = image
    return ndimage.affine_transform(img,affine,
                                    offset=offset,
                                    output_shape=shape)
Example #34
0
def reslice_data(img, change_dat, change_aff):
    """ reslices data in space_define_file to matrix of
    resample_file
    Parameters
    ----------
    img  :  nibabel image of space defining image
    change_dat : array if data to resample
    change_aff : 4X4 array defining mapping of change_dat to world space

    Returns
    -------
    data : ndarray of data in change_dat (with corresponding affine
    change_aff)  sliced to shape defined by img (shape and affine)
    """

    T = np.eye(4)
    
    Tv = np.dot(np.linalg.inv(change_aff), 
                np.dot(T, img.get_affine()))
    data = affine_transform(change_dat.squeeze(), 
                            Tv[0:3,0:3], 
                            offset=Tv[0:3,3], 
                            output_shape = img.get_shape()[:3],
                            order=0,mode = 'nearest')

    return  data
def transformImage(img, xfactor, angle=0, msg=False):
        """
        rotates then stretches or compresses an image only along the x-axis
        """
        if xfactor > 1.0:
                mystr = "_S"
        else:
                mystr = "_C"

        if msg is True:
                if xfactor > 1:
                        apDisplay.printMsg("stretching image by "+str(round(xfactor,3)))
                else:
                        apDisplay.printMsg("compressing image by "+str(round(xfactor,3)))
        ### image has swapped coordinates (y,x) from particles
        transMat = numpy.array([[ 1.0, 0.0 ], [ 0.0, 1.0/xfactor ]])
        #print "transMat\n",transMat
        #apImage.arrayToJpeg(img, "img"+mystr+".jpg")

        stepimg  = ndimage.rotate(img, -1.0*angle, mode='reflect')
        stepimg = apImage.frame_cut(stepimg, img.shape)
        #apImage.arrayToJpeg(stepimg, "rotate"+mystr+".jpg")

        newimg  = ndimage.affine_transform(stepimg, transMat, mode='reflect')
        #apImage.arrayToJpeg(newimg, "last_transform"+mystr+".jpg")

        return newimg
Example #36
0
def test_flirt2aff():
    from os.path import join as pjoin
    from nose.tools import assert_true
    import scipy.ndimage as ndi
    import nibabel as nib
    
    '''
    matfile = pjoin('fa_data',
                    '1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_affine_transf.mat')
    in_fname = pjoin('fa_data',
                     '1312211075232351192010092912092080924175865ep2dadvdiffDSI10125x25x25STs005a001_bet_FA.nii.gz')
    '''
    
    matfile=flirtaff
    in_fname = ffa
    
    ref_fname = '/usr/share/fsl/data/standard/FMRIB58_FA_1mm.nii.gz'
    res = flirt2aff_files(matfile, in_fname, ref_fname)
    mat = np.loadtxt(matfile)
    in_img = nib.load(in_fname)
    ref_img = nib.load(ref_fname)
    assert_true(np.all(res == flirt2aff(mat, in_img, ref_img)))
    # mm to mm transform
    # mm_in2mm_ref =  np.dot(ref_img.affine,
    #                        np.dot(res, npl.inv(in_img.affine)))
    # make new in image thus transformed
    in_data = in_img.get_data()
    ires = npl.inv(res)
    in_data[np.isnan(in_data)] = 0
    resliced_data = ndi.affine_transform(in_data,
                                         ires[:3,:3],
                                         ires[:3,3],
                                         ref_img.shape)
    resliced_img = nib.Nifti1Image(resliced_data, ref_img.affine)
    nib.save(resliced_img, 'test.nii')
Example #37
0
    def make_th_in_th_out_map(self):
        if (not self.th2th_data):
            print('make_2th_th_map() must be performed first')
            return
        th2th_params = self.th2th_data.params
        th_steps = th2th_params['y_steps']
        th_max = th2th_params['y_max']
        th_min = th2th_params['y_min']
        th_stepsize = float(th_max - th_min)/th_steps
        th_in = arange(th_steps, dtype = 'float') * th_stepsize + th_min
        twoth_steps = th2th_params['x_steps']
        twoth_max = th2th_params['x_max']
        twoth_min = th2th_params['x_min']
        twoth_stepsize = float(twoth_max - twoth_min)/twoth_steps
        twoth = arange(twoth_steps, dtype = 'float') * twoth_stepsize + twoth_min
        #twoth = arange(self.twoth_steps, dtype = 'float') * self.twoth_stepsize + self.twoth_min_min
        th_out_max = twoth_max - th_min
        th_out_min = twoth_min - th_max
        from scipy import ndimage as nd
        tthdata = th2th_data.bin_data
        affine_transform = array([[1.0, -th_stepsize / twoth_stepsize],[0.,1.]])
        th_out_steps = int((th_max - th_min) / twoth_stepsize + twoth_steps)
        th_in_th_out = zeros((th_out_steps, th_steps,4))

        th_in_th_out[:,:,0] = nd.affine_transform(tthdata[:,:,0], affine_transform, offset = 0.0, output_shape=[th_out_steps,th_steps] )
        th_in_th_out[:,:,1] = nd.affine_transform(tthdata[:,:,1], affine_transform, offset = 0.0, output_shape=[th_out_steps,th_steps] )
        th_in_th_out[:,:,2] = nd.affine_transform(tthdata[:,:,2], affine_transform, offset = 0.0, output_shape=[th_out_steps,th_steps] )
        th_in_th_out[:,:,3] = nd.affine_transform(tthdata[:,:,3], affine_transform, offset = 0.0, output_shape=[th_out_steps,th_steps] )
        print th_in_th_out.shape
        print th_out_max, th_out_min
        th_in_th_out_params = {
          'description': self.description,
          'x_max': th_out_max,
          'x_min': th_out_min,
          'x_steps': th_out_steps,
          'y_max': th_max,
          'y_min': th_min,
          'y_steps': th_steps,
          'x_units': '$\\theta_{\\rm{out}} ({}^{\circ})$',
          'y_units': '$\\theta_{\\rm{in}} ({}^{\circ})$'
          }

        th_in_th_out = flipud(self.th_in_th_out)
        # cut off stuff that should really be zero - bad fp stuff
        th_in_th_out[:,:,3][th_in_th_out[:,:,3] < 1e-16] = 0.
        self.th_in_th_out_data = plottable_2d_data(th_in_th_out, th_in_th_out_params, self)
        return
Example #38
0
def process_brainweb_subject(brainweb_raw_dir = os.path.join('..','data','training_data','brainweb','raw'),
                             subject          = 'subject54',
                             gm_contrast      = 4,
                             mlem_fwhm_mm     = 4.5):

  dmodel_path = os.path.join(brainweb_raw_dir, subject + '_crisp_v.mnc.gz')
  gm_path     = os.path.join(brainweb_raw_dir, subject + '_gm_v.mnc.gz')
  wm_path     = os.path.join(brainweb_raw_dir, subject + '_wm_v.mnc.gz')
  t1_path     = os.path.join(brainweb_raw_dir, subject + '_t1w_p4.mnc.gz')
  
  # the simulated t1 has different voxel size and FOV)
  dmodel_affine = nib.load(dmodel_path).affine.copy()
  t1_affine     = nib.load(t1_path).affine.copy()
  
  dmodel_voxsize = np.sqrt((dmodel_affine**2).sum(0))[:-1]
  t1_voxsize     = np.sqrt((t1_affine**2).sum(0))[:-1]
  
  dmodel = nib.load(dmodel_path).get_data()
  gm     = nib.load(gm_path).get_data()
  wm     = nib.load(wm_path).get_data()
  
  t1     = nib.load(t1_path).get_data()
  
  pet_gt = gm_contrast*gm + wm + 0.5*(dmodel == 5) + 0.5*(dmodel == 6) + 0.25*(dmodel == 4) + 0.1*(dmodel == 7) + 0.2*(dmodel == 11)
  
  mlem = gaussian_filter(pet_gt, mlem_fwhm_mm / (2.35*dmodel_voxsize))
  
  mlem_regrid = affine_transform(mlem, np.linalg.inv(dmodel_affine) @ t1_affine, 
                                 order = 1, prefilter = False, output_shape = t1.shape)
  
  pet_gt_regrid = affine_transform(pet_gt, np.linalg.inv(dmodel_affine) @ t1_affine, 
                                    order = 1, prefilter = False, output_shape = t1.shape)

  # return flipped data a new affine

  swap02_aff = np.array([[0., 0., 1., 0.],
                         [0., 1., 0., 0.],
                         [1., 0., 0., 0.],
                         [0., 0., 0., 1.]])
  
  flip1_aff      = np.eye(4, dtype = np.int)
  flip1_aff[1,1] = -1
  
  new_t1_aff = flip1_aff @ swap02_aff @ t1_affine
  
  return (np.flip(np.swapaxes(t1,0,2),1), np.flip(np.swapaxes(mlem_regrid,0,2),1), 
          np.flip(np.swapaxes(pet_gt_regrid,0,2),1), new_t1_aff)
Example #39
0
def affine_resample(source, 
                    target, 
                    T, 
                    toresample='source', 
                    dtype=None, 
                    order=3, 
                    use_scipy=False): 
    """
    Image resampling using spline interpolation. 

    Parameters
    ----------
    source : image
    
    target : image

    T : source-to-target affine transform
    """
    Tv = np.dot(np.linalg.inv(target.get_affine()), np.dot(T, source.get_affine()))
    if use_scipy or not order==3: 
        use_scipy = True
        from scipy.ndimage import affine_transform 
    if toresample == 'target': 
        if not use_scipy:
            data = cspline_resample(target.get_data(), 
                                    source.get_shape(), 
                                    Tv, 
                                    dtype=dtype)
        else: 
            data = affine_transform(target.get_data(), 
                                    Tv[0:3,0:3], offset=Tv[0:3,3], 
                                    output_shape=source.get_shape(), 
                                    order=order)
        return Image(data, source.get_affine())
    else:
        if not use_scipy:
            data = cspline_resample(source.get_data(), 
                                    target.get_shape(), 
                                    np.linalg.inv(Tv), 
                                    dtype=dtype)
        else: 
            Tv = np.linalg.inv(Tv)
            data = affine_transform(source.get_data(), 
                                    Tv[0:3,0:3], offset=Tv[0:3,3], 
                                    output_shape=target.get_shape(), 
                                    order=order)
        return Image(data, target.get_affine())
Example #40
0
def resample_np_volume_3d(np_volume: NumpyTensorX,
                          np_volume_spacing: Length,
                          np_volume_origin: Length,
                          min_bb_mm: Length,
                          max_bb_mm: Length,
                          resampled_spacing: Length,
                          mode: Literal['reflect', 'constant', 'nearest',
                                        'mirror', 'wrap'] = 'constant',
                          constant_value: Numeric = 0.0,
                          order=1) -> NumpyTensorX:
    """
    Resample a portion of a 3D volume (z, y, x) to a specified spacing/bounding box.

    Args:
        np_volume: a 3D volume
        np_volume_spacing: the spacing [sz, sy, sx] of the input volume
        np_volume_origin: the origin [z, y, x] of the input volume
        min_bb_mm: the min position [z, y, x] of the input volume to be resampled
        max_bb_mm: the max position [z, y, x] of the input volume to be resampled
        resampled_spacing: the spacing of the resampled volume
        mode: specifies how to handle the boundary. See :func:`scipy.ndimage.affine_transform`
        constant_value: if mode == `constant`, use `constant_value` as background value
        order: interpolation order [0..5]

    Returns:
        resampled volume
    """
    zooming_matrix = np.identity(3)
    zooming_matrix[0, 0] = resampled_spacing[0] / np_volume_spacing[0]
    zooming_matrix[1, 1] = resampled_spacing[1] / np_volume_spacing[1]
    zooming_matrix[2, 2] = resampled_spacing[2] / np_volume_spacing[2]

    offset = ((min_bb_mm[0] - np_volume_origin[0]) / np_volume_spacing[0],
              (min_bb_mm[1] - np_volume_origin[1]) / np_volume_spacing[1],
              (min_bb_mm[2] - np_volume_origin[2]) / np_volume_spacing[2])

    output_shape = np.ceil([
        max_bb_mm[0] - min_bb_mm[0],
        max_bb_mm[1] - min_bb_mm[1],
        max_bb_mm[2] - min_bb_mm[2],
    ]) / resampled_spacing

    if order >= 2:
        prefilter = True
    else:
        # pre-filtering is VERY slow and unnecessary for order < 2
        # so diable it
        prefilter = False

    np_volume_r = affine_transform(np_volume,
                                   zooming_matrix,
                                   offset=offset,
                                   mode=mode,
                                   order=1,
                                   prefilter=prefilter,
                                   cval=constant_value,
                                   output_shape=output_shape.astype(int))

    return np_volume_r
def read_cropped_image(p, augment):
    """
    @param p : the name of the picture to read
    @param augment: True/False if data augmentation should be performed
    @return a numpy array with the transformed image
    """
    # If an image id was given, convert to filename
    if p in h2p:
        p = h2p[p]  #'xxxx.jpg'
    size_x, size_y = p2size[p]

    # Determine the region of the original image we want to capture based on the bounding box.
    row = p2bb.loc[p]
    x0, y0, x1, y1 = row['x0'], row['y0'], row['x1'], row['y1']
    dx = x1 - x0
    dy = y1 - y0
    x0 = max(0, x0 - dx * crop_margin)
    x1 = min(size_x, x1 + dx * crop_margin + 1)
    y0 = max(0, y0 - dy * crop_margin)
    y1 = min(size_y, y1 + dy * crop_margin + 1)

    # Generate the transformation matrix
    trans = np.array([[1, 0, -0.5 * img_shape[0]], [0, 1, -0.5 * img_shape[1]],
                      [0, 0, 1]])
    trans = np.dot(
        np.array([[(y1 - y0) / img_shape[0], 0, 0],
                  [0, (x1 - x0) / img_shape[1], 0], [0, 0, 1]]), trans)
    if augment:
        trans = np.dot(
            build_transform(
                random.uniform(-5, 5), random.uniform(-5, 5),
                random.uniform(0.8, 1.0), random.uniform(0.8, 1.0),
                random.uniform(-0.05 * (y1 - y0), 0.05 * (y1 - y0)),
                random.uniform(-0.05 * (x1 - x0), 0.05 * (x1 - x0))), trans)
    trans = np.dot(
        np.array([[1, 0, 0.5 * (y1 + y0)], [0, 1, 0.5 * (x1 + x0)],
                  [0, 0, 1]]), trans)

    # Read the image, transform to black and white and comvert to numpy array
    img = read_raw_image(p).convert('L')
    img = img_to_array(img)

    # Apply affine transformation
    matrix = trans[:2, :2]
    offset = trans[:2, 2]
    img = img.reshape(img.shape[:-1])
    img = affine_transform(img,
                           matrix,
                           offset,
                           output_shape=img_shape[:-1],
                           order=1,
                           mode='constant',
                           cval=np.average(img))
    img = img.reshape(img_shape)

    # Normalize to zero mean and unit variance
    img -= np.mean(img, keepdims=True)
    img /= np.std(img, keepdims=True) + K.epsilon()
    return img
Example #42
0
def fit_into(image,shape=(15,15),eps=0.2):
    x0,y0,x1,y1 = bbox(image,eps=eps)
    scale = 1.0/min(shape[0]*1.0/(x1-x0),shape[1]*1.0/(y1-y0))
    offset = array([(x1+x0)/2-scale*shape[0]/2,(y1+y0)/2-scale*shape[1]/2])
    affine = scale*array([[1,0],[0,1]]) 
    return ndimage.affine_transform(image,affine,
                                    offset=offset,
                                    output_shape=shape)
def imresize(a, scalingFactor, **kw):
  if abs(scalingFactor-1.0) < 1e-10:
    return a
  return spimage.affine_transform(a,
                                  [1./scalingFactor, 1./scalingFactor],
                                  output_shape=[round(a.shape[0] * scalingFactor),
                                                round(a.shape[1] * scalingFactor)],
                                  **kw)
Example #44
0
def test_affine_reshape(x):
    M = np.eye(4)
    np.random.seed(42)
    M[:3] += .1 * np.random.uniform(-1, 1, (3, 4))
    output_shape = (33,45,97)
    out1 = affine(x, M, interpolation = "linear", output_shape = output_shape)
    out2 = ndimage.affine_transform(x, M, order=1, prefilter=False, output_shape = output_shape)
    return out1,out2
Example #45
0
def flip_images(pixel_array, mask):
    # performs vertical flip, but not checked whether it fully works yet

    x,y,z = pixel_array.shape
    
    p = [int(x/2),int(y/2),int(z/2)]
    n = [0,0,-1]
    A = trans.reflections.rfnorm2aff(n,p)
    pixel_array = affine_transform(pixel_array,A)
    mask = affine_transform(mask,A)
    
    flipped_data = {
        "pixel_array" : pixel_array,
        "mask" : mask
    }

    return flipped_data
Example #46
0
 def apply_resize(self, workspace, input_image_name, output_image_name):
     image = workspace.image_set.get_image(input_image_name)
     image_pixels = image.pixel_data
     if self.size_method == R_BY_FACTOR:
         factor = self.resizing_factor.value
         shape = (np.array(image_pixels.shape[:2]) * factor +
                  .5).astype(int)
     elif self.size_method == R_TO_SIZE:
         if self.use_manual_or_image == C_MANUAL:
             shape = np.array(
                 [self.specific_height.value, self.specific_width.value])
         elif self.use_manual_or_image == C_IMAGE:
             shape = np.array(
                 workspace.image_set.get_image(
                     self.specific_image.value).pixel_data.shape).astype(
                         int)
     #
     # Little bit of wierdness here. The input pixels are numbered 0 to
     # shape-1 and so are the output pixels. Therefore the affine transform
     # is the ratio of the two shapes-1
     #
     ratio = ((np.array(image_pixels.shape[:2]).astype(float) - 1) /
              (shape.astype(float) - 1))
     transform = np.array([[ratio[0], 0], [0, ratio[1]]])
     if self.interpolation not in I_ALL:
         raise NotImplementedError("Unsupported interpolation method: %s" %
                                   self.interpolation.value)
     order = (0 if self.interpolation == I_NEAREST_NEIGHBOR else
              1 if self.interpolation == I_BILINEAR else 2)
     if image_pixels.ndim == 3:
         output_pixels = np.zeros(
             (shape[0], shape[1], image_pixels.shape[2]),
             image_pixels.dtype)
         for i in range(image_pixels.shape[2]):
             affine_transform(image_pixels[:, :, i],
                              transform,
                              output_shape=tuple(shape),
                              output=output_pixels[:, :, i],
                              order=order)
     else:
         output_pixels = affine_transform(image_pixels,
                                          transform,
                                          output_shape=shape,
                                          order=order)
     output_image = cpi.Image(output_pixels)
     workspace.image_set.add(output_image_name, output_image)
Example #47
0
def get_smoothed_fieldmap(fm, fmmag, epi_qform, epi_shape):
    import pytave
    # FIXME: how do I automatically get the correct directory here?
    pytave.addpath('/home/bobd/git/nims/nimsutil')

    xform = np.dot(np.linalg.inv(fm.get_qform()), epi_qform)
    fm_pixdim = np.array(fm.get_header().get_zooms()[0:3])
    fm_mag_data = fm_mag.get_data().mean(3).squeeze()
    # clean up with a little median filter and some greyscale open/close.
    # We'll use a structure element that is about 5mm (rounded up to the nearest pixdim)
    filter_size = 5.0/fm_pixdim
    fm_mag_data = ndimage.median_filter(fm_mag_data, filter_size.round().astype(int))
    fm_mag_data = ndimage.morphology.grey_opening(fm_mag_data, filter_size.round().astype(int))
    fm_mag_data = ndimage.morphology.grey_closing(fm_mag_data, filter_size.round().astype(int))

    # Total image volume, in cc:
    fm_volume = np.prod(fm_pixdim) * np.prod(fm.get_shape()) / 1000
    # typical human cranial volume is up to 1800cc. There's also some scalp and maybe neck,
    # so we'll say 2500cc of expected tissue volume.
    mag_thresh = np.percentile(fm_mag_data, max(0.0,100.0*(1.0-2500.0/fm_volume)))
    mask = ndimage.binary_opening(fm_mag_data>mag_thresh, iterations=2)

    # Now delete all the small objects, just keeping the largest (which should be the brain!)
    label_im,num_objects = ndimage.measurements.label(mask)
    h,b = np.histogram(label_im,num_objects)
    mask = label_im==b[h==max(h[1:-1])]
    mask_volume = np.prod(fm_pixdim) * max(h[1:-1]) / 1000.0
    mask_sm = ndimage.gaussian_filter(mask.astype(np.float), filter_size)

    fm_Hz = fm.get_data().astype(np.float).squeeze()

    fm_Hz_sm = ndimage.gaussian_filter(fm_Hz * mask_sm, filter_size/2)

    fm_final = np.empty(epi_shape[0:3])
    ndimage.affine_transform(fm_Hz_sm, xform[0:3,0:3], offset=xform[0:3,3], output_shape=epi_shape[0:3], output=fm_final)

    [xxBc,yyBc,zzBc] = np.mgrid[0:epi_shape[0],0:epi_shape[1],0:epi_shape[2]] # grid the epi, get location of each sampled voxel
    # Now apply the transform. The following is equivalent to dot(xform,coords), where "coords" is the
    # list of all the (homogeneous) coords. Writing out the dot product like this is just faster and easier.
    xxB = xxBc*xform[0,0] + yyBc*xform[0,1] + zzBc*xform[0,2] + xform[0,3]
    yyB = xxBc*xform[1,0] + yyBc*xform[1,1] + zzBc*xform[1,2] + xform[1,3]
    zzB = xxBc*xform[2,0] + yyBc*xform[2,1] + zzBc*xform[2,2] + xform[2,3]
    # use local linear regression to smooth and interpolate the fieldmaps.
    fm_smooth_param = 7.5/np.array(fm.get_header().get_zooms()[0:3])  # Want 7.5mm in voxels, so =7.5/mm_per_vox
    fm_smooth = pytave.feval(1,'localregression3d',fm.get_data(),xxB+1,yyB+1,zzB+1,np.array([]),np.array([]),fm_smooth_param,fm_mag.get_data())[0]
    return fm_smooth
Example #48
0
 def imshow():
     angle_text.Label = "Angle: %d" % int(angle[0])
     angle_text.Refresh()
     my_angle = -angle[0] * np.pi / 180.0
     transform = np.array([[np.cos(my_angle), -np.sin(my_angle)], [np.sin(my_angle), np.cos(my_angle)]])
     # Make it rotate about the center
     offset = affine_offset(pixel_data.shape, transform)
     x = np.dstack(
         (
             scind.affine_transform(pixel_data[:, :, 0], transform, offset, order=0),
             scind.affine_transform(pixel_data[:, :, 1], transform, offset, order=0),
             scind.affine_transform(pixel_data[:, :, 2], transform, offset, order=0),
         )
     )
     buff = x.astype(np.uint8).tostring()
     bitmap = wx.BitmapFromBuffer(x.shape[1], x.shape[0], buff)
     canvas.SetBitmap(bitmap)
Example #49
0
    def apply_merge_2d(self, fm_data, fm_points, channel, show_region, num_channels, idx):
        if channel == 0:
            src = np.array(sorted(fm_points, key=lambda k: [np.cos(30 * np.pi / 180) * k[0] + k[1]]))
            dst = np.array(sorted(self.points, key=lambda k: [np.cos(30 * np.pi / 180) * k[0] + k[1]]))
            self.merge_matrix = tf.estimate_transform('affine', src, dst).params
            if show_region:
                em_data = self.orig_region
            else:
                em_data = self.orig_data
            self.merged[idx] = np.zeros(em_data.shape + (num_channels + 1,))
            self.merged[idx][:, :, -1] = em_data / em_data.max() * 100

        tf_data = ndi.affine_transform(fm_data, np.linalg.inv(self.merge_matrix), order=1, output_shape=self.tf_shape)
        orig_orientation = ndi.affine_transform(tf_data, self.tf_matrix, order=1, output_shape=self.merged[idx].shape[:2])

        self.merged[idx][:, :, channel] = orig_orientation
        self.print('Merged.shape: ', self.merged[idx].shape)
Example #50
0
def window2(image, center, dims, mode='wrap'):
    matrix = np.array([[1, 0], [0, 1]])
    shift = np.array(center) - (np.array(dims) / 2.0)
    return nd.affine_transform(image,
                               matrix,
                               offset=shift,
                               output_shape=dims,
                               mode=mode)
Example #51
0
def transfo(angle_IS, angle_PA, angle_LR, shift_LR, shift_PA, shift_IS, data,
            interpolation, rescale):
    """apply rotation and translation on image
     :param angle_IS: angle of rotation around Inferior/Superior axis
     :param angle_PA: angle of rotation around Posterior/Anterior axis
     :param angle_LR: angle of rotation around Left/Right axis
     :param shift_LR: value of shift along Left/Right axis
     :param shift_PA: value of shift along Posterior/Anterior axis
     :param shift_IS: value of shift along Inferior/Superior axis
     :param data: padded image data
     :param rescale: image rescaling factor
     :return data: image data with a padding
     :return data_rot: return image data after random transformation
     """
    # print angles and shifts
    print('angles of rotation IS:', angle_IS, ' PA:', angle_PA, ' LR:',
          angle_LR)
    print('number of pixel shift LR:', shift_LR, ' PA:', shift_PA, ' IS:',
          shift_IS)

    # find center of data
    c_in = 0.5 * np.array(data.shape)

    # rotation matrix around IS
    cos_theta = np.cos(np.deg2rad(-angle_IS))
    sin_theta = np.sin(np.deg2rad(-angle_IS))
    rotation_affine_IS = np.array([[cos_theta, -sin_theta, 0],
                                   [sin_theta, cos_theta, 0], [0, 0, 1]])
    affine_arr_rotIS = rotation_affine_IS.dot(np.eye(3) * (1 / float(rescale)))

    # rotation matrix around PA
    cos_fi = np.cos(np.deg2rad(-angle_PA))
    sin_fi = np.sin(np.deg2rad(-angle_PA))
    rotation_affine_PA = np.array([[cos_fi, 0, sin_fi], [0, 1, 0],
                                   [-sin_fi, 0, cos_fi]])
    affine_arr_rotIS_rotPA = rotation_affine_PA.dot(affine_arr_rotIS)

    # rotation matrix around LR
    cos_gamma = np.cos(np.deg2rad(-angle_LR))
    sin_gamma = np.sin(np.deg2rad(-angle_LR))
    rotation_affine_LR = np.array([[1, 0, 0], [0, cos_gamma, -sin_gamma],
                                   [0, sin_gamma, cos_gamma]])
    # affine array for rotation around IS, AP and RL
    affine_arr_rotIS_rotPA_rotLR = rotation_affine_LR.dot(
        affine_arr_rotIS_rotPA)

    print('rotation matrix: \n', affine_arr_rotIS_rotPA_rotLR)

    # offset to shift the center of the old grid to the center of the new grid + random shift
    shift = c_in.dot(affine_arr_rotIS_rotPA_rotLR) - c_in - np.array(
        [shift_LR, shift_PA, shift_IS])
    # resampling data
    # TODO; check if order=3 is much faster (order 3 is twice as fast as order 5)
    data_shift_rot = affine_transform(data,
                                      affine_arr_rotIS_rotPA_rotLR,
                                      offset=shift,
                                      order=interpolation)
    return data_shift_rot
def read_cropped_image(p, augment=False):
    if p in df_data_train["p_name"].values:
        size_x = df_data_train.loc[df_data_train["p_name"] == p, "size_x"].values[0]
        size_y = df_data_train.loc[df_data_train["p_name"] == p, "size_y"].values[0]
        _,(x0,y0,x1,y1) = p2bb_train[p]
    else:
        size_x = df_data_test.loc[df_data_test["p_name"] == p, "size_x"].values[0]
        size_y = df_data_test.loc[df_data_test["p_name"] == p, "size_y"].values[0]
        x0,y0,x1,y1 = p2bb_test[p]

    dx = x1-x0
    dy = y1-y0
    x0 = x0-dx*crop_margin
    x1 = x1+dx*crop_margin+1
    y0 = y0-dy*crop_margin
    y1 = y1+dy*crop_margin+1
    if (x0<0): x0=0
    if (x1>size_x): x1=size_x
    if (y0<0): y0=0
    if (y1>size_y): y1=size_y
    dx = x1 - x0
    dy = y1 - y0
    if dx > dy*anisotropy:
        dy = 0.5*(dx/anisotropy-dy)
        y0 = y0-dy
        y1 = y1+dy
    else:
        dx = 0.5*(dy*anisotropy-dx)
        x0 = x0-dx
        x1 = x1+dx

    # generate the transformation
    trans = np.array([[1,0,-0.5*img_shape[0]], [0,1,-0.5*img_shape[1]],[0,0,1]])
    trans = np.dot(np.array([[(y1-y0)/img_shape[0],0,0],[0,(x1-x0)/img_shape[1],0],[0,0,1]]), trans)
    if augment:
        trans = np.dot(build_transform(
            random.uniform(-5,5),
            random.uniform(-5,5),
            random.uniform(0.8,1.0),
            random.uniform(0.8,1.0),
            random.uniform(-0.05*(y1-y0),0.05*(y1-y0)),
            random.uniform(-0.05*(x1-x0),0.05*(x1-x0))),
            trans
        )
    trans = np.dot(np.array([[1,0,0.5*(y1+y0)],[0,1,0.5*(x1+x0)],[0,0,1]]),trans)
    img = read_raw_image(p).convert("L")
    img = img_to_array(img)

    matrix = trans[:2,:2]
    offset = trans[:2,2]
    img = img.reshape(img.shape[:-1])
    img = affine_transform(img,matrix,offset,output_shape=img_shape[:-1],order=1,mode="constant",cval=np.average(img))
    img = img.reshape(img_shape)
    img = img - np.mean(img,keepdims=True)
    if np.std(img,keepdims=True)==0:
        print("hey something is wrong with {}".format(p))
    img = img / np.std(img,keepdims=True) + K.epsilon()
    return img
Example #53
0
def test_transform_rigid():
    #check rigid transform works, using fake data
    FAKE = np.zeros((30,30,30))
    FAKE[10:20,10:20,10:20] = np.random.rand(10,10,10)
    FAKE_affine = np.eye(4)

    #check translation only
    original_translation = [2,2,1]
    original_shift = nib.affines.from_matvec(np.diagflat([1,1,1]), original_translation)

    mat, vec = nib.affines.to_matvec(original_shift)
    FAKE_moved = affine_transform(FAKE, mat, vec, order=1)

    new_affine = transform_rigid(FAKE, FAKE_moved, np.eye(4), np.eye(4), np.eye(4), 10, "translations")
    new_translation = new_affine[:3,3]
    assert(np.allclose(new_translation,original_translation,atol=0.1)) #withing 0.1 vox

    # check rotation only
    original_rotation = [0.5, 0.2, -0.2]
    r_x, r_y, r_z = original_rotation
    rot_mat = z_rotmat(r_z).dot(y_rotmat(r_y)).dot(x_rotmat(r_x))
    original_shift = nib.affines.from_matvec(rot_mat, [0,0,0])

    mat, vec = nib.affines.to_matvec(original_shift)
    FAKE_moved = affine_transform(FAKE, mat, vec, order=1)

    new_affine = transform_rigid(FAKE, FAKE_moved, np.eye(4), np.eye(4), np.eye(4), 10, "rotations")
    new_rotation = decompose_rot_mat(new_affine[:3,:3])

    assert(np.allclose(new_rotation,original_rotation,atol=0.2)) #withing 0.1 radian


    # check translation & rotations
    original_translation = [2,2,1]
    original_rotation = [0.5, -0.2, 0.2]
    r_x, r_y, r_z = original_rotation
    rot_mat = z_rotmat(r_z).dot(y_rotmat(r_y)).dot(x_rotmat(r_x))
    original_shift = nib.affines.from_matvec(rot_mat, original_translation)

    mat, vec = nib.affines.to_matvec(original_shift)
    FAKE_moved = affine_transform(FAKE, mat, vec, order=1)

    new_affine = transform_rigid(FAKE, FAKE_moved, np.eye(4), np.eye(4), np.eye(4), 10)
    new_translation = new_affine[:3,3]
    new_rotation = decompose_rot_mat(new_affine[:3,:3])
def rigid_alignment(coordsOriginal, coordsSkewed):
    R, tx, ty = compute_rigid_transform(coordsOriginal, coordsSkewed)
    T = np.array([[R[1][1], R[1][0]], [R[0][1], R[0][0]]])
    im = np.array(faceSkewed)
    im2 = np.zeros(im.shape, 'uint8')

    for i in range(len(im.shape)):
        im2[:, :, i] = ndimage.affine_transform(im[:, :, i], la.inv(T))
    plt.imshow(im2)
Example #55
0
def least_squared(x, i1, i2, path):
    x = np.array(x)
    T = np.identity(3)
    T[0, 2] = x[1]
    T[1, 2] = x[0]
    images = [i1, affine_transform(i2, T)]
    delta = np.sum((images[0] - images[1])**2)
    path.append((x[0], x[1], delta))
    return delta
Example #56
0
 def f(image, order=1):
     w, h = image.shape
     c = np.array([w, h]) / 2.0
     d = c - np.dot(m, c) + np.array([dx * w, dy * h])
     return ndi.affine_transform(image,
                                 m,
                                 offset=d,
                                 order=order,
                                 mode="nearest")
Example #57
0
def RotateImg(img, ang):
    theta = np.pi / 180 * ang
    c = np.cos(theta)
    s = np.sin(theta)
    x = img.shape[1] / 2 + 0.5
    y = img.shape[0] / 2 + 0.5
    R = img[:, :, 0]
    G = img[:, :, 1]
    B = img[:, :, 2]
    rotation_matrix = np.array([[+c, -s, +0], [+s, +c, +0], [+0, +0, +1]])
    offset_matrix = np.array([[+1, +0, +y], [+0, +1, +x], [+0, +0, +1]])
    reset_matrix = np.array([[+1, +0, -y], [+0, +1, -x], [+0, +0, +1]])
    transofrm_matrix = np.dot(np.dot(offset_matrix, rotation_matrix),
                              reset_matrix)
    RR = nd.affine_transform(R, transofrm_matrix, mode='nearest')
    RG = nd.affine_transform(G, transofrm_matrix, mode='nearest')
    RB = nd.affine_transform(B, transofrm_matrix, mode='nearest')
    return np.stack([RR, RG, RB], axis=-1)
Example #58
0
def random_transform(image):
    M = get_affine_transformation_matrix()
    tr_image = affine_transform(image, M, mode="constant")
    affine_displacement = get_affine_displacement(image.shape, M)
    alpha = np.random.uniform(low=0, high=1000)
    sigma = np.random.uniform(low=11, high=13)
    tr_image, elastic_displacement = elastic_transform(tr_image, alpha, sigma)
    displacement = affine_displacement + elastic_displacement
    return tr_image, displacement
Example #59
0
 def test_affine_transform08(self, order):
     data = numpy.array([[4, 1, 3, 2],
                         [7, 6, 8, 5],
                         [3, 5, 3, 6]])
     out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
                                    [-1, -1], order=order)
     assert_array_almost_equal(out, [[0, 0, 0, 0],
                                     [0, 4, 1, 3],
                                     [0, 7, 6, 8]])
Example #60
0
 def test_affine_transform05(self, order):
     data = numpy.array([[1, 1, 1, 1],
                         [1, 1, 1, 1],
                         [1, 1, 1, 1]])
     out = ndimage.affine_transform(data, [[1, 0], [0, 1]],
                                    [0, -1], order=order)
     assert_array_almost_equal(out, [[0, 1, 1, 1],
                                     [0, 1, 1, 1],
                                     [0, 1, 1, 1]])