コード例 #1
0
def register_image(inp):

    if 'inRAM_flag' in globals():
        inRAM = inRAM_flag
    else:
        inRAM = True

    if 'refIm' in globals():
        refIm_ = refIm
    if 'crop' not in globals() and 'crop' not in locals():
        if inp[1].shape[0]>256:
            crop_ = True
            refIm_ = inp[0][128:-128,128:-128]
            upsample_factor = 10

        else:
            crop_ = False
            refIm_ = inp[0]
            upsample_factor = 1
        image = inp[1]

    else: 
        crop_ = crop
        upsample_factor = 10
        if not inRAM:
            image_idx = inp[0]
            image = inp[1]
            regFile = inp[2]
        else:
            image = inp

    #print refIm_.shape,
    if crop_==True:
        tmpII = image[128:-128,128:-128] - ndimage.gaussian_filter(image[128:-128,128:-128],5)

        #shift, _, _ = register_translation(refIm_,image[128:-128,128:-128],upsample_factor=upsample_factor)
        shift, _, _ = register_translation(refIm_,tmpII,upsample_factor=upsample_factor)
    else:
        shift, _, _ = register_translation(refIm_, image, upsample_factor=upsample_factor)


    #set this 
    if np.any(np.abs(shift)>=20):
        shift = np.array([0,0])
        #print("!   WARNING VERY LARGE SHIFTS   ! %s" %shift)


    
    if np.sum(np.abs(shift))!=0:
        regIm =  np.fft.ifftn(fourier_shift(np.fft.fftn(image), shift)).real
    else:
        regIm = image

   
    #added for reduce memory test
    if not inRAM:    
        regFile[image_idx] = regIm
        return shift
    else:
        return [shift,regIm]
コード例 #2
0
def _determine_registration_offset(base_image, uncorrected_image):
    """
    
    Finds the translational offset required to align this image with all others in the stack.
    Returns dx, dy adjustments in pixels *but does not change the image!*
    
    :param base_image:   a 2D numpy array that the other image should be aligned to
    :param uncorrected_image:   a 2D numpy array
    :returns:   float, float
    
    """

    # Get the dimensions of the images that we're aligning
    base_height, base_width = base_image.shape
    uncorrected_height, uncorrected_width = uncorrected_image.shape

    # We take the area that roughly corresponds to the catch channels. This has two benefits: one, it
    # speeds up the registration significantly (as it scales linearly with image size), and two, if
    # a large amount of debris/yeast/bacteria/whatever shows up in the central trench, the registration
    # algorithm goes bonkers if it's considering that portion of the image.
    # Thus we separately find the registration for the left side and right side, and average them.
    left_base_section = base_image[:, int(base_width * 0.1): int(base_width * 0.3)]
    left_uncorrected = uncorrected_image[:, int(uncorrected_width * 0.1): int(uncorrected_width * 0.3)]
    right_base_section = base_image[:, int(base_width * 0.7): int(base_width * 0.9)]
    right_uncorrected = uncorrected_image[:, int(uncorrected_width * 0.7): int(uncorrected_width * 0.9)]

    # 
    left_dy, left_dx = register_translation(left_base_section, left_uncorrected, upsample_factor=20)[0]
    right_dy, right_dx = register_translation(right_base_section, right_uncorrected, upsample_factor=20)[0]

    return (left_dy + right_dy) / 2.0, (left_dx + right_dx) / 2.0
コード例 #3
0
def calc_shift(image1, image2):
    shift, error, diffphase = register_translation(image1, image2)
    print(shift)

    unrolled = np.roll(np.roll(image2, int(shift[1])), int(shift[0]), axis=0)

    shift2, error2, diffphase2 = register_translation(image1, unrolled)
    print(shift2)
    return unrolled
コード例 #4
0
def Register_Image(image,refIm,crop=False):

    if crop==True:    
        shift, _, _ = register_translation(refIm,image[128:-128,128:-128],upsample_factor=10)
    else:
        shift, _, _ = register_translation(refIm, image, upsample_factor=10)

    if np.sum(np.abs(shift))!=0:
        regIm =  np.fft.ifftn(fourier_shift(np.fft.fftn(image), shift)).real  
    else:
        regIm = image
    return [shift,regIm]
コード例 #5
0
    def updatefig(self, *args):
        f0 = fits.open(self.files[self.loop])
        f1 = fits.open(self.files[self.loop+1])

        with torch.no_grad():

            ims = np.zeros((1,2,self.n_pixel,self.n_pixel))
            ims[0,0,:,:] = f0[0].data[self.origin:self.origin+self.n_pixel,self.origin:self.origin+self.n_pixel]
            ims[0,1,:,:] = f1[0].data[self.origin:self.origin+self.n_pixel,self.origin:self.origin+self.n_pixel]

            minim = np.min(ims, axis=(2,3))
            maxim = np.max(ims, axis=(2,3))

            ims = (ims - minim[:,:,None,None]) / (maxim[:,:,None,None] - minim[:,:,None,None])

            shift, error, diffphase = register_translation(self.reference, ims[0,1,:,:])
            shift = [int(f) for f in shift]                            
            ims[0,1,:,:] = np.roll(ims[0,1,:,:], shift, axis=(0,1))

            shift, error, diffphase = register_translation(self.reference, ims[0,0,:,:])
            shift = [int(f) for f in shift]                            
            ims[0,0,:,:] = np.roll(ims[0,0,:,:], shift, axis=(0,1))

            ims = torch.from_numpy(ims.astype('float32'))
            ims = ims.to(self.device)     
            
            out_forward, flow_forward = self.model(ims, backward=False)

            output = out_forward.cpu().data.numpy()
            flow = flow_forward.cpu().data.numpy()  

            ims = ims.cpu().data.numpy()

            flowx = flow[0,0,:,:]
            flowy = flow[0,1,:,:]

        f0.close()
        f1.close()

        flowx *= self.scale
        flowy *= self.scale
        
        self.im1.set_array(np.flip(ims[0,0,:,:], axis=0))
        self.im2.set_array(np.flip(ims[0,1,:,:], axis=0))
        self.flowx.set_array(np.flip(flow[0,0,:,:], axis=0))
        self.flowy.set_array(np.flip(flow[0,1,:,:], axis=0))
        self.Q.set_UVC(self.n_pixel*flowx[self.steps], self.n_pixel*flowy[self.steps])

        self.loop += 1
        self.pbar.update(1)

        return self.im1, self.im2, self.flowx, self.flowy
コード例 #6
0
ファイル: main.py プロジェクト: ShakedDovrat/defects
 def _register(self):
     shift, _, _ = register_translation(self.inspection_image,
                                        self.reference_image, 10)
     tt = TranslationTransform(shift[1], shift[0])
     self.reference_image_registered = tt.transform(self.reference_image)
     self.valid_registration_mask = tt.get_valid_mask(
         self.reference_image.shape)
コード例 #7
0
ファイル: subpixel.py プロジェクト: Kelvinrr/autocnet
def subpixel_phase(template, search, **kwargs):
    """
    Apply the spectral domain matcher to a search and template image. To
    shift the images, the x_shift and y_shift, need to be subtracted from
    the center of the search image. It may also be necessary to apply the
    fractional pixel adjustment as well (if for example the center of the
    search is not an integer); this function do not manage shifting.

    Parameters
    ----------
    template : ndarray
               The template used to search

    search : ndarray
             The search image

    Returns
    -------
    x_offset : float
               Shift in the x-dimension

    y_offset : float
               Shift in the y-dimension

    strength : tuple
               With the RMSE error and absolute difference in phase
    """
    if not template.shape == search.shape:
        raise ValueError('Both the template and search images must be the same shape.')
    (y_shift, x_shift), error, diffphase = register_translation(search, template, **kwargs)
    return x_shift, y_shift, (error, diffphase)
コード例 #8
0
def lucky_imaging_single_block(images, globalRefImage, blk_size, nbest, binning, x, y):
    # Size of the binned block
    binnedBlkSize = blk_size / binning
    # Step 1 of block processing: Get the binned matrix of the quality metrics
    # qBinnedArrays is identical as its alternate in Lightdrops / C++
    qBinnedArrays = block_processing_setup(images, binning)

    stackedBlks, shifts, best_indices = make_aligned_stack(images, qBinnedArrays, nbest, blk_size, binnedBlkSize, binning, x, y)
    blkSlice = stackedBlks[:, :, 0]

    ## Find the best alignment of the stacked Block onto the global reference image
    # Reference block is the one at expected position in the global reference image
    refBlk = globalRefImage[y: y + blk_size, x: x + blk_size]
    # refBlk = globalRefImage[y-offset: y + blk_size + offset, x - offset: x + blk_size + offset]

    # Again, use phase correlation in fourier space.
    shift, error, diffPhase = register_translation(refBlk, blkSlice)
    # The shifted positions here are in the reference frame of the stacked block,
    # so we need to add the shift instead of subtracting it. We are indeed truly shifting
    # the position of the block

    stacked_blk = np.median(stackedBlks, 2)
    xs = int(x + round(shift[1]))
    ys = int(y + round(shift[0]))

    print('done')

    return stacked_blk, shift, best_indices, xs, ys
コード例 #9
0
def align_images(data):

    numslices=len(data)
    imageshifts = np.zeros((numslices,2))

    # calculate image shifts
    for idx in range(numslices):
        if idx == 0:
            pass
        else:
            image = np.mean(data[idx-1]['data'],0)
            offset_image = np.mean(data[idx]['data'],0)

            ## shifts in pixel precision for speed
            shift, error, diffphase = register_translation(image, offset_image)
            imageshifts[idx,:] = imageshifts[idx-1,:] + shift

    # apply image shifts
    for idx in range(numslices):
        non = lambda s: s if s<0 else None
        mom = lambda s: max(0,s)
        padded = np.zeros_like(data[idx]['data'])
        oy, ox = imageshifts[idx,:]
        padded[:,mom(oy):non(oy), mom(ox):non(ox)] = data[idx]['data'][:,mom(-oy):non(-oy), mom(-ox):non(-ox)]
        data[idx]['data']=padded.copy()
        #tform=SimilarityTransform(translation = imageshifts[idx,:])
        #for idx2 in range(data[idx]['data'].shape[0]):
        #    tformed = warp(data[idx]['data'][idx2,:,:], inverse_map = tform)
        #    data[idx]['data'][idx2,:,:]= tformed

    return data
コード例 #10
0
ファイル: process.py プロジェクト: feldman4/lasagna
def register_images(images, index=None, window=(500, 500), upsample=1.):
    """Register a series of image stacks to pixel accuracy.
    :param images: list of N-dim image arrays, height and width may differ
    :param index: image[index] should yield 2D array with which to perform alignment
    :param window: centered window in which to perform registration, smaller is faster
    :param upsample: align to sub-pixels of width 1/upsample
    :return list[(int)]: list of offsets
    """
    if index is None:
        index = ((0,) * (images[0].ndim - 2) + (slice(None),) * 2)

    sz = [image[index].shape for image in images]
    sz = np.array([max(x) for x in zip(*sz)])

    origin = np.array(images[0].shape) * 0.

    center = tuple([slice(s / 2 - min(s / 2, rw), s / 2 + min(s / 2, rw))
                    for s, rw in zip(sz, window)])

    def pad(img):
        pad_width = [(s / 2, s - s / 2) for s in (sz - img.shape)]
        img = np.pad(img, pad_width, 'constant')
        return img[center], np.array([x[0] for x in pad_width]).astype(float)

    image0, pad_width = pad(images[0][index])
    offsets = [origin.copy()]
    offsets[0][-2:] += pad_width
    for image in [x[index] for x in images[1:]]:
        padded, pad_width = pad(image)
        shift, error, _ = register_translation(image0, padded, upsample_factor=upsample)

        offsets += [origin.copy()]
        offsets[-1][-2:] = shift + pad_width  # automatically cast to uint64

    return offsets
コード例 #11
0
def get_pin_vertical_offset(
    img_0: np.ndarray, 
    img_180: np.ndarray,
    ) -> float:
    """
    Description
    -----------
    Calculate the vertical offset (related to wedge angle) between a 180 
    degree pair of pin during alingment, which can be used to calculate the 
    amount of additinal tilt adjustment needed to level the SMS.

    Parameters
    ----------
    img_0: np.ndarray
        img taken at omega=0
    img_180: np.ndarray
        img taken at omega=180
    
    Return
    ------
    Offset from img_180 to img_0.  For example               
        if offset > 0:  img_180 is higher than img_0
                        img_180
                img_0
        if offset < 0:  img_180 if lower than img_0
                img_0
                        img_180
    """
    img_0 = _safe_read_img(img_0)
    img_180 = _safe_read_img(img_180)
    shift, _, _ = register_translation(img_0, img_180, upsample_factor=100)
    return shift[0]
コード例 #12
0
ファイル: kermitv1.py プロジェクト: gb119/kermit
 def correct_drift(self, ref, threshold=0.005):
     """Align images to correct for image drift.
     Detects common features on the images and tracks them moving.
     
     Parameters
     ----------
     ref: KerrArray or ndarray
         reference image with zero drift
     threshold: float
         threshold for detecting imperfections in images 
         (see skimage.feature.corner_fast for details)
     
     Returns
     -------
     shift: array
         shift vector relative to ref (x drift, y drift)
     transim: KerrArray
         copy of self translated to account for drift"""
     refed=ref.clone
     refed=filters.gaussian(ref,sigma=1)
     refed=feature.corner_fast(refed,threshold=0.005)
     imed=self.clone
     imed=filters.gaussian(imed,sigma=1)
     imco=feature.corner_fast(imed,threshold=0.005)
     shift,err,phase=feature.register_translation(refed,imco,upsample_factor=50)
     #tform = SimilarityTransform(translation=(-shift[1],-shift[0]))
     #imed = transform.warp(im, tform) #back to original image
     self=self.translate(translation=(-shift[1],-shift[0]))
     return [shift,self]   
コード例 #13
0
ファイル: deblur.py プロジェクト: smorad/spin_odom
    def estimate_rate(self):
        window_frames = 40
        base_frames = 5
        fps = 90
        results = []
        for i in range(base_frames):
            base_img = cv2.imread(self.img_paths[i],0)
            cmp_frames = range(i+1, i+window_frames)
            for j in cmp_frames:
                cmp_img = cv2.imread(self.img_paths[j],0)
                shift, error, diffphase = feature.register_translation(base_img, cmp_img)
                if diffphase < 0:
                    # not interested in inverse matches
                    error = 10000
                results += [(error, shift, i, j)]

        rates = []
        for candidate in results:
            error, shift, base_idx, cmp_idx = candidate
            rate = (cmp_idx - base_idx) * (1/fps) * 2 * math.pi
            rates += [rate]

        print(rates, np.mean(rates))
        plt.figure()
        plt.imshow(cv2.imread(self.img_paths[base_idx],0))
        plt.figure()
        plt.imshow(cv2.imread(self.img_paths[cmp_idx],0))
        plt.show()
コード例 #14
0
def registerStack(refImg,testImg,myROI,upsamplingFactor):
    refImgROI = setROIregistration(myROI,refImg)
    #refImgROI = setDefaultROIregistration(refImg)
    testImgROI = setROIregistration(myROI,testImg)
    #testImgROI = setDefaultROIregistration(testImg)
    resReg = register_translation(refImgROI,testImgROI,upsample_factor=upsamplingFactor,space='real')
    theShifts = resReg[0]
    regImage = ndimage.shift(testImg,(theShifts[0],theShifts[1]))
    return theShifts,regImage
コード例 #15
0
ファイル: image.py プロジェクト: histed/PyToolsMH
def align_stack(im, alignNs=r_[0:100], print_status=True, do_plot=False):
    """Realign a stack to an image -- default to mean image from near the start

    Args:
        im
        alignNs: frameNs to average to give the alignment reference image
        print_status: give updates for long calcs to terminal
        do_plot: show a plot with alignment calculations

    Returns:
        aligned stack, same size as input stack, padded with zeros where shifted

    """
    
    # run alignment calculations, saving result in a dataframe
    aligntarg = im[:100,:,:].mean(axis=0)
    tL = []
    nfrdo = im.shape[0]
    if print_status: print('Computing offsets ({} frames)... '.format(nfrdo), end='')
    for iF in range(nfrdo):  
        tL.append(feature.register_translation(aligntarg, im[iF,:,:]))

    regDf = pd.DataFrame(tL, columns=('coords','err','phasediff'))
    regDf['row'] = [x[0][0] for x in tL]
    regDf['col'] = [x[0][1] for x in tL]

    if do_plot:
        gs = mpl.gridspec.GridSpec(2,2)
        fig = plt.figure()
        plt.subplot(gs[0,0])
        plt.plot(regDf.err)
        plt.title('translation-independent error')
        plt.ylabel('RMS error')
        plt.subplot(gs[0,1])
        plt.plot(regDf.col)
        plt.plot(regDf.row)
        plt.title('row and col pixel offsets')
        plt.legend(['col','row'])

    # do the shifts
    regim = im.copy()*0
    maxv = im.max()
    if print_status: print('Aligning frames... ', end='')
    for iF in range(nfrdo): #debug range(nframes):
        regim[iF,:,:] = transform.warp(im[iF,:,:]*1.0/maxv, \
                    transform.SimilarityTransform(translation=(-1*regDf.col[iF],-regDf.row[iF]))) * maxv
        t = transform.warp(im[iF,:,:]*1.0/maxv, \
                    transform.SimilarityTransform(translation=(-1*regDf.col[iF],-regDf.row[iF]))) * maxv
        if print_status and iF % 500 == 0:
            print('%d (%d,%d)'%(iF,-regDf.col[iF],-regDf.row[iF]), end=' ')
    if print_status: print('Done.')

    return regim
コード例 #16
0
ファイル: deshake.py プロジェクト: pmoret/deshake
def find_shift(ref, img):
    """
    Find a translation between two images
    :param ref: The reference image
    :param img: The image
    :return:    The shift
    """
    im0 = prepare(ref)
    im1 = prepare(img)
    shift, error, diffphase = register_translation(im0, im1, 100)

    return shift
コード例 #17
0
ファイル: alignment.py プロジェクト: jimrybarski/fylm_critic
def make_registered_image(image: Image,
                          device: Device,
                          rotation: float,
                          source_image: np.ndarray) -> AdjustedImage:
    normalized_image = _normalize_image(image, device)
    rotated_image = transform.rotate(normalized_image, rotation)
    (y, x), error, phase = feature.register_translation(source_image,
                                                        rotated_image,
                                                        upsample_factor=20)
    registered_image = transform.warp(rotated_image, transform.AffineTransform(translation=(-x, -y)))
    return AdjustedImage(Image(registered_image, image.frame, image.timestamp,
                               image.field_of_view, image.channel, image.z_offset),
                         rotation,
                         (x, y))
コード例 #18
0
ファイル: speckletracking.py プロジェクト: decarlof/wavepy
def _speckleDisplacementSingleCore_method1(image, image_ref, halfsubwidth,
                                           subpixelResolution, stride, verbose):
    '''
    see http://scikit-image.org/docs/dev/auto_examples/transform/plot_register_translation.html
    '''

    irange = np.arange(halfsubwidth,
                       image.shape[0] - halfsubwidth + 1,
                       stride)
    jrange = np.arange(halfsubwidth,
                       image.shape[1] - halfsubwidth + 1,
                       stride)

    pbar = tqdm(total=np.size(irange))  # progress bar

    sx = np.ones(image.shape) * NAN
    sy = np.ones(image.shape) * NAN
    error = np.ones(image.shape) * NAN

    for (i, j) in itertools.product(irange, jrange):

        interrogation_window = image_ref[i - halfsubwidth:i + halfsubwidth + 1,
                               j - halfsubwidth:j + halfsubwidth + 1]

        sub_image = image[i - halfsubwidth:i + halfsubwidth + 1,
                    j - halfsubwidth:j + halfsubwidth + 1]

        shift, error_ij, _ = register_translation(sub_image,
                                                  interrogation_window,
                                                  subpixelResolution)

        sx[i, j] = shift[1]
        sy[i, j] = shift[0]
        error[i, j] = error_ij

        if j == jrange[-1]: pbar.update()  # update progress bar

    print(" ")

    return (sx[halfsubwidth:-halfsubwidth:stride,
            halfsubwidth:-halfsubwidth:stride],
            sy[halfsubwidth:-halfsubwidth:stride,
            halfsubwidth:-halfsubwidth:stride],
            error[halfsubwidth:-halfsubwidth:stride,
            halfsubwidth:-halfsubwidth:stride],
            stride)
コード例 #19
0
ファイル: rotation.py プロジェクト: carterbox/tomopy
def find_center_pc(proj1, proj2, tol=0.5, rotc_guess=None):
    """
    Find rotation axis location by finding the offset between the first
    projection and a mirrored projection 180 degrees apart using
    phase correlation in Fourier space.
    The ``register_translation`` function uses cross-correlation in Fourier
    space, optionally employing an upsampled matrix-multiplication DFT to
    achieve arbitrary subpixel precision. :cite:`Guizar:08`.

    Parameters
    ----------
    proj1 : ndarray
        2D projection data.

    proj2 : ndarray
        2D projection data.

    tol : scalar, optional
        Subpixel accuracy

    rotc_guess : float, optional
        Initual guess value for the rotation center

    Returns
    -------
    float
        Rotation axis location.
    """
    imgshift = 0.0 if rotc_guess is None else rotc_guess - (proj1.shape[1]-1.0)/2.0

    proj1 = ndimage.shift(proj1, [0,-imgshift], mode='constant', cval=0)
    proj2 = ndimage.shift(proj2, [0,-imgshift], mode='constant', cval=0)


    # create reflection of second projection
    proj2 = np.fliplr(proj2)

    # Determine shift between images using scikit-image pcm
    shift = register_translation(proj1, proj2, upsample_factor=1.0/tol)

    # Compute center of rotation as the center of first image and the
    # registered translation with the second image
    center = (proj1.shape[1] + shift[0][1] - 1.0)/2.0

    return center + imgshift
コード例 #20
0
ファイル: imagefuncs.py プロジェクト: gb119/Stoner-PythonCode
def correct_drift(im, ref, threshold=0.005, upsample_factor=50, box=None, do_shift=True):
    """Align images to correct for image drift.

    Args:
        ref (ImageArray): Reference image with assumed zero drift

    Keyword Arguments:
        threshold (float): threshold for detecting imperfections in images
            (see skimage.feature.corner_fast for details)
        upsample_factor (float): the resolution for the shift 1/upsample_factor pixels registered.
            see skimage.feature.register_translation for more details
        box (sequence of 4 ints): defines a region of the image to use for identifyign the drift
            defaults to the whol image. Use this to avoid drift calculations being confused by
            the scale bar/annotation region.
        do_shift (bool): Shift the image, or just calculate the drift and store in metadata (default True, shit)

    Returns:
        A shifted iamge with the image shift added to the metadata as 'correct drift'.

    Detects common features on the images and tracks them moving.
    Adds 'drift_shift' to the metadata as the (x,y) vector that translated the
    image back to it's origin.
    """
    if box is None:
        box = im.max_box
    cim = im.crop_image(box=box)

    refed = ImageArray(ref, get_metadata=False)
    refed = refed.crop_image(box=box)
    refed = refed.filter_image(sigma=1)
    refed = refed > refed.threshold_otsu()
    refed = refed.corner_fast(threshold=threshold)

    imed = cim.clone
    imed = imed.filter_image(sigma=1)
    imed = imed > imed.threshold_otsu()
    imed = imed.corner_fast(threshold=threshold)

    shift, _, phase = feature.register_translation(refed, imed, upsample_factor=upsample_factor)
    if do_shift:
        im = im.translate(translation=(-shift[1], -shift[0]))  # x,y
    im.metadata["correct_drift"] = (-shift[1], -shift[0])
    return im
コード例 #21
0
 def add_target(self, *targets):
     """
     add another target (or more targets) to the list of targets
     """
     def check_shape(target):
         """
         check the shape of the array against our source
         """
         if not np.array_equal(self.source.shape, target.shape):
             raise ValueError("target shape doesn't match source")
     for this_target in targets:
         check_shape(this_target)
         # add our target
         self.images.append(this_target)
         # compute its offset to the source
         translation, _, _ = feature.register_translation(
             self.gray_source,
             self.grayscale_function(this_target))
         self.translations.append(translation)
コード例 #22
0
ファイル: opflowreg.py プロジェクト: chrinide/image-funcut
    def greenberg_kerr(image, template, nparam=11, transpose=True, **fnargs):
        if transpose:
            template = template.T
            image = image.T
        aligner = GK_image_aligner()
        shift = skfeature.register_translation(template, image, upsample_factor=4.)[0]
        p0x,p0y = np.ones(nparam)*shift[1], np.ones(nparam)*shift[0]

        if not 'maxiter' in fnargs:
            fnargs['maxiter'] = 25
            
        res, p = aligner(image, template, p0x,p0y, **fnargs)
        def _regfn(coordinates):
            sh = coordinates[0].shape
            dx = aligner.wcoords_from_params1d(p[0], sh)
            dy = aligner.wcoords_from_params1d(p[1], sh)
            if transpose:
                dx,dy = dy,dx
            return [coordinates[0]-dy, coordinates[1]-dx]
        return _regfn
コード例 #23
0
def register_translation(device=0):
    """
    capture 2 frames and use feature.register_translation to estimate a translation between them.

    TARGET moves to SOURCE.

    """

    # 2 framez
    frames = capture(2, device=device)

    # estimate parameters
    shifts, error, phasediff = feature.register_translation(
        visual_fields.grayscale(frames[0]),
        visual_fields.grayscale(frames[1]))

    # we'll just print them for now
    print("translation vector: {}".format(shifts))
    print("error: {}".format(error))
    print("phase difference (should be zero): {}".format(phasediff))
コード例 #24
0
ファイル: find_center.py プロジェクト: lbluque/random
def find_center_pc(proj1, proj2, tol=0.5):
    """
    Find rotation axis location by finding the offset between the first
    projection and a mirrored projection 180 degrees apart using
    phase correlation in Fourier space.
    The ``register_translation`` function uses cross-correlation in Fourier
    space, optionally employing an upsampled matrix-multiplication DFT to
    achieve arbitrary subpixel precision. [1]_

    [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
       "Efficient subpixel image registration algorithms," Optics Letters 33,
       156-158 (2008).

    Parameters
    ----------
    proj1 : ndarray
        2D projection data.

    proj2 : ndarray
        2D projection data.

    tol : scalar, optional
        Subpixel accuracy

    Returns
    -------
    float
        Rotation axis location.
    """

    # create reflection of second projection
    proj2 = np.fliplr(proj2)

    # Determine shift between images using scikit-image pcm
    shift = register_translation(proj1, proj2, upsample_factor=1.0/tol)

    # Compute center of rotation as the center of first image and the
    # registered translation with the second image
    center = (proj1.shape[1] + shift[0][1])/2.0

    return center
コード例 #25
0
ファイル: tools_3d.py プロジェクト: TomSlater/My-Repository
def ss_align(series,short_series,ss_shifts=[]):
    
    alignments_init = []
    for i in xrange(len(short_series)):
        j = i*((len(series)-1)/(len(short_series)-1))
        alignments_init.append(register_translation(short_series[i],series[j],100)[0])
    
    if ss_shifts != []:
        alignments_init=ss_shifts
        
    alignments = []
    for i in xrange(len(series)):
        if i==0 or i%((len(series)-1)/(len(short_series)-1))==0:
            alignments.append(alignments_init[i/((len(series)-1)/(len(short_series)-1))])
        
        else:
            diff = alignments_init[i/((len(series)-1)/(len(short_series)-1))+1]-alignments_init[i/((len(series)-1)/(len(short_series)-1))]
            alignment = alignments_init[i/((len(series)-1)/(len(short_series)-1))]+((i%((len(series)-1)/(len(short_series)-1)))*(diff/float(((len(series)-1)/(len(short_series)-1)))))
            alignments.append(alignment)
    
    return alignments
コード例 #26
0
ファイル: speckletracking.py プロジェクト: decarlof/wavepy
def _func_4_starmap_async_method1(args, parList):
    '''
    see http://scikit-image.org/docs/dev/auto_examples/transform/plot_register_translation.html
    '''
    i = args[0]
    j = args[1]
    image = parList[0]
    image_ref = parList[1]
    halfsubwidth = parList[2]
    subpixelResolution = parList[3]


    interrogation_window = image_ref[i - halfsubwidth:i + halfsubwidth + 1,
                           j - halfsubwidth:j + halfsubwidth + 1]

    sub_image = image[i - halfsubwidth:i + halfsubwidth + 1,
                j - halfsubwidth:j + halfsubwidth + 1]

    shift, error_ij, _ = register_translation(sub_image,
                                              interrogation_window,
                                              subpixelResolution)

    return shift[1], shift[0], error_ij
コード例 #27
0
ファイル: kfuncs.py プロジェクト: gb119/kermit
def correct_drift(im, ref, threshold=0.005, upsample_factor=50):
    """Align images to correct for image drift.
    Detects common features on the images and tracks them moving.
    Adds 'drift_shift' to the metadata as the (x,y) vector that translated the
    image back to it's origin.
    
    Parameters
    ----------
    ref: KerrArray or ndarray
        reference image with zero drift
    threshold: float
        threshold for detecting imperfections in images 
        (see skimage.feature.corner_fast for details)
    upsample_factor:
        the resolution for the shift 1/upsample_factor pixels registered.
        see skimage.feature.register_translation for more details
    
    Returns
    -------
    shift: array
        shift vector relative to ref (x drift, y drift)
    transim: KerrArray
        copy of im translated to account for drift"""
    
    refed=KerrArray(ref,get_metadata=False)
    refed=refed.filter_image(sigma=1)
    refed=refed.corner_fast(threshold=threshold)
    
    imed=im.clone
    imed=imed.filter_image(sigma=1)
    imed=imed.corner_fast(threshold=threshold)
    
    shift,err,phase=feature.register_translation(refed,imed,upsample_factor=upsample_factor)
    im=im.translate(translation=(-shift[1],-shift[0])) #x,y
    im.metadata['correct_drift']=(-shift[1],-shift[0])
    return im  
コード例 #28
0
ファイル: alignment.py プロジェクト: tekinbicer/tomopy
def align_seq(
        prj, ang, fdir='.', iters=10, pad=(0, 0),
        blur=True, center=None, algorithm='sirt',
        upsample_factor=10, rin=0.5, rout=0.8,
        save=False, debug=True):
    """
    Aligns the projection image stack using the sequential
    re-projection algorithm :cite:`Gursoy:17`.

    Parameters
    ----------
    prj : ndarray
        3D stack of projection images. The first dimension
        is projection axis, second and third dimensions are
        the x- and y-axes of the projection image, respectively.
    ang : ndarray
        Projection angles in radians as an array.
    iters : scalar, optional
        Number of iterations of the algorithm.
    pad : list-like, optional
        Padding for projection images in x and y-axes.
    blur : bool, optional
        Blurs the edge of the image before registration.
    center: array, optional
        Location of rotation axis.
    algorithm : {str, function}
        One of the following string values.

        'art'
            Algebraic reconstruction technique :cite:`Kak:98`.
        'gridrec'
            Fourier grid reconstruction algorithm :cite:`Dowd:99`,
            :cite:`Rivers:06`.
        'mlem'
            Maximum-likelihood expectation maximization algorithm
            :cite:`Dempster:77`.
        'sirt'
            Simultaneous algebraic reconstruction technique.
        'tv'
            Total Variation reconstruction technique
            :cite:`Chambolle:11`.
        'grad'
            Gradient descent method with a constant step size

    upsample_factor : integer, optional
        The upsampling factor. Registration accuracy is
        inversely propotional to upsample_factor. 
    rin : scalar, optional
        The inner radius of blur function. Pixels inside
        rin is set to one.
    rout : scalar, optional
        The outer radius of blur function. Pixels outside
        rout is set to zero.
    save : bool, optional
        Saves projections and corresponding reconstruction
        for each algorithm iteration.
    debug : book, optional
        Provides debugging info such as iterations and error.

    Returns
    -------
    ndarray
        3D stack of projection images with jitter.
    ndarray
        Error array for each iteration.
    """

    # Needs scaling for skimage float operations.
    prj, scl = scale(prj)

    # Shift arrays
    sx = np.zeros((prj.shape[0]))
    sy = np.zeros((prj.shape[0]))

    conv = np.zeros((iters))

    # Pad images.
    npad = ((0, 0), (pad[1], pad[1]), (pad[0], pad[0]))
    prj = np.pad(prj, npad, mode='constant', constant_values=0)

    # Register each image frame-by-frame.
    for n in range(iters):
        # Reconstruct image.
        rec = recon(prj, ang, center=center, algorithm=algorithm)

        # Re-project data and obtain simulated data.
        sim = project(rec, ang, center=center, pad=False)

        # Blur edges.
        if blur:
            _prj = blur_edges(prj, rin, rout)
            _sim = blur_edges(sim, rin, rout)
        else:
            _prj = prj
            _sim = sim

        # Initialize error matrix per iteration.
        err = np.zeros((prj.shape[0]))

        # For each projection
        for m in range(prj.shape[0]):

            # Register current projection in sub-pixel precision
            shift, error, diffphase = register_translation(
                _prj[m], _sim[m], upsample_factor)
            err[m] = np.sqrt(shift[0]*shift[0] + shift[1]*shift[1])
            sx[m] += shift[0]
            sy[m] += shift[1]

            # Register current image with the simulated one
            tform = tf.SimilarityTransform(translation=(shift[1], shift[0]))
            prj[m] = tf.warp(prj[m], tform, order=5)

        if debug:
            print('iter=' + str(n) + ', err=' + str(np.linalg.norm(err)))
            conv[n] = np.linalg.norm(err)

        if save:
            dxchange.write_tiff(prj, fdir + '/tmp/iters/prj/prj')
            dxchange.write_tiff(sim, fdir + '/tmp/iters/sim/sim')
            dxchange.write_tiff(rec, fdir + '/tmp/iters/rec/rec')

    # Re-normalize data
    prj *= scl
    return prj, sx, sy, conv
コード例 #29
0
ファイル: calculations.py プロジェクト: bsipocz/POCS
def measure_offset(d0, d1, info={}, crop=True, pixel_factor=100, rate=None, verbose=False):
    """ Measures the offset of two images.

    This is a small wrapper around `scimage.feature.register_translation`. For now just
    crops the data to be the center image.

    Note
    ----
        This method will automatically crop_data data sets that are large. To prevent
        this, set crop_data=False.

    Parameters
    ----------
    d0 : {np.array}
        Array representing PGM data for first file (i.e. the first image)
    d1 : {np.array}
        Array representing PGM data for second file (i.e. the second image)
    info : {dict}, optional
        Optional information about the image, such as pixel scale, rotation, etc. (the default is {})
    crop : {bool}, optional
        Crop the image before offseting (the default is True, which crops the data to 500x500)
    pixel_factor : {number}, optional
        Subpixel factor (the default is 100, which will give precision to 1/100th of a pixel)
    rate : {number}, optional
        The rate at which the mount is moving (the default is sidereal rate)
    verbose : {bool}, optional
        Print messages (the default is False)

    Returns
    -------
    dict
        A dictionary of information related to the offset
    """

    assert d0.shape == d1.shape, 'Data sets must be same size to measure offset'

    if crop and d0.shape[0] > 500:
        d0 = crop_data(d0)
        d1 = crop_data(d1)

    offset_info = {}

    # Default for tranform matrix
    unit_pixel = 1 * (u.degree / u.pixel)

    # Get the WCS transformation matrix
    transform = np.array([
        [info.get('cd11', unit_pixel).value, info.get('cd12', unit_pixel).value],
        [info.get('cd21', unit_pixel).value, info.get('cd22', unit_pixel).value]
    ])

    # We want the negative of the applied orientation
    # theta = info.get('orientation', 0 * u.deg) * -1

    # Rotate the images so N is up (+y) and E is to the right (+x)
    # rd0 = rotate(d0, theta.value)
    # rd1 = rotate(d1, theta.value)

    shift, error, diffphase = register_translation(d0, d1, pixel_factor)

    offset_info['shift'] = (shift[0], shift[1])
    # offset_info['error'] = error
    # offset_info['diffphase'] = diffphase

    if transform is not None:

        coords_delta = np.array(shift).dot(transform)
        if verbose:
            print("Δ coords: {}".format(coords_delta))

        # pixel_scale = float(info.get('pixscale', 10.2859)) * (u.arcsec / u.pixel)

        sidereal = (15 * (u.arcsec / u.second))

        # Default to guide rate (0.9 * sidereal)
        if rate is None:
            rate = 0.9 * sidereal

        # # Number of arcseconds we moved
        ra_delta_as = (coords_delta[0] * u.deg).to(u.arcsec)
        dec_delta_as = (coords_delta[1] * u.deg).to(u.arcsec)
        offset_info['ra_delta_as'] = ra_delta_as
        offset_info['dec_delta_as'] = dec_delta_as

        # # How many milliseconds at current rate we are off
        ra_ms_offset = (ra_delta_as / rate).to(u.ms)
        dec_ms_offset = (dec_delta_as / rate).to(u.ms)
        offset_info['ra_ms_offset'] = ra_ms_offset.round()
        offset_info['dec_ms_offset'] = dec_ms_offset.round()

        delta_time = info.get('delta_time', 125 * u.second)

        ra_rate_rate = ra_delta_as / delta_time
        dec_rate_rate = dec_delta_as / delta_time

        ra_delta_rate = 1.0 - ((sidereal + ra_rate_rate) / sidereal)  # percentage of sidereal
        dec_delta_rate = 1.0 - ((sidereal + dec_rate_rate) / sidereal)  # percentage of sidereal
        offset_info['ra_delta_rate'] = round(ra_delta_rate.value, 4)
        offset_info['dec_delta_rate'] = round(dec_delta_rate.value, 4)

    return offset_info
コード例 #30
0
ファイル: tools_3d.py プロジェクト: TomSlater/edx_abs
def series_align(im_series,align_output=[],start='Mid',smooth=True,smooth_window='3',sobel=True):
    '''Function to align a series of images.'''
    if align_output == []:
        series_dim = len(im_series)
        
        filtered_series = []
        
        for i in range(series_dim):
            filtered_series.append(im_series[i].copy())
        
        align_output = []
        
        if smooth == True:
            for i in range(series_dim):
                filtered_series[i] = filters.gaussian_filter(filtered_series[i],3)
        
        if sobel == True:
            for i in range(series_dim):
                filtered_series[i] = filters.sobel(filtered_series[i])
        
        #Align from first image
        if start == 'First':
            align_output.append(register_translation(filtered_series[0], filtered_series[0],100))
            for i in range(series_dim-1):
                align_output.append(register_translation(filtered_series[i], filtered_series[i+1],100))
                align_output[i+1][0][0] = align_output[i+1][0][0] + align_output[i][0][0]
                align_output[i+1][0][1] = align_output[i+1][0][1] + align_output[i][0][1]
        
        #Align from mid-image
        if start == 'Mid':
            
            #Ensure compatibility with Pyton 2 and 3
            if series_dim % 2 == 0:
                mid_point = series_dim / 2
            else:
                mid_point = series_dim // 2
            
            align_output.append(register_translation(filtered_series[mid_point], filtered_series[mid_point], 100))
            
            for i in range(mid_point,0,-1):
                align_output.append(register_translation(filtered_series[i], filtered_series[i-1], 100))
                align_output[mid_point-i+1][0][0] = align_output[mid_point-i+1][0][0] + align_output[mid_point-i][0][0]
                align_output[mid_point-i+1][0][1] = align_output[mid_point-i+1][0][1] + align_output[mid_point-i][0][1]
                
            align_output = list(reversed(align_output))
            
            for i in range(mid_point,series_dim-1):
                align_output.append(register_translation(filtered_series[i], filtered_series[i+1], 100))
                align_output[i+1][0][0] = align_output[i+1][0][0] + align_output[i][0][0]
                align_output[i+1][0][1] = align_output[i+1][0][1] + align_output[i][0][1]
        
    #Apply calculated shifts to the image series
    shifted_im_series = []
    im_count = 0
    for im in im_series:
        shifted_im_series.append(interpolation.shift(im,align_output[im_count][0]))
        im_count = im_count + 1
        
    shifted_im_series = np.asarray(shifted_im_series)
        
    return(shifted_im_series, align_output)
コード例 #31
0
def patch_register_roi_locs(meanIm1,meanIm2,roiinfo1,im_path,l=32):
    """ Function that takes as input two mean images and 
        roi coordinates on one of the images and returns 
        the ROI coordinates on a second image
        
        Arguments:
        ============================
        
        meanIm1:    np.array
                    mean image with known roi coordinates
        
        meanIm2:    np.array
                    mean image to map rois onto
                 
        roiinfo1:   dict
                    contains field idxs which is a list of arrays of 
                    roi coordinates on meanIm1
                
        l:          int
                    size of grid on which to motion register patches
        """
    
    xsz,ysz = meanIm1.shape

    mask_num = np.zeros([512,512])

    for n_,idxp in enumerate(roiinfo1['idxs']):
        #if n_ in table[table.columns[1]].tolist():
        mask_num[idxp[1],idxp[0]] = n_
        nPatch = xsz/float(l)

    #First register the whole images in rigid fashion to one another
    out = MP.image_registration.Register_Image(meanIm1,meanIm2[128:-128,128:-128],crop=1)
    shift_all = out[0]
    regIm2 =  np.fft.ifftn(fourier_shift(np.fft.fftn(meanIm1), shift_all)).real

    #plt.figure(figsize=(12,12))
    #plt.imshow(regIm2,cmap='binary_r',interpolation='None')

    #pth = os.path.join(im_path,'reg_img'+str(len(os.listdir(im_path)))+'.jpg')
    #plt.savefig(pth,dpi=100)
    #plt.clf()

    rois1 = []
    im_pairs = []
    roi_pairs = []
    roi_locs = cp.deepcopy(roiinfo1['idxs'])
    moved_rois = []
    shifts = []
    for i in range(int(nPatch)):


        sty = i*l
        for j in range(int(nPatch)):

            stx = j*l
            patch = meanIm2[stx:stx+l,sty:sty+l]
            bigpatch = patch#np.pad(patch,maxShift,mode='median')
            shift,_,_ = register_translation(bigpatch,
                                             regIm2[stx:stx+l,sty:sty+l]
                                             ,upsample_factor=5.)
            #print shift
            if np.any(shift>30):
                shift = np.array([0,0])

            rois_patch = np.unique(mask_num[stx:stx+l,sty:sty+l]).tolist()

            for roi in rois_patch:
                roi = int(roi)
                if roi not in moved_rois:
                    roi_locs[roi][0] = roi_locs[roi][0] + 1*int(shift[1]) + 1*int(shift_all[1])
                    roi_locs[roi][1] = roi_locs[roi][1] + 1*int(shift[0]) + 1*int(shift_all[0])

            moved_rois = moved_rois + rois_patch



    return roi_locs,shift_all
コード例 #32
0
 def func(dframe):
     frame1,frame2 = dframe[0], dframe[1]
     shift,error,diffphase = register_translation(frame1, frame2, 10)
     tframe = fourier_shift(np.fft.fftn(frame2), shift)
     tframe = np.fft.ifftn(tframe)
     return tframe.real
コード例 #33
0
 def register(images):
     imp1, imp2 = images[0], images[1]
     shifts, _, _ = register_translation(imp1, imp2)
     return shifts
コード例 #34
0
import matplotlib.pyplot as plt

from skimage import data
from skimage.feature import register_translation
from skimage.feature.register_translation import _upsampled_dft
from scipy.ndimage import fourier_shift

image = data.camera()
shift = (-22.4, 13.32)
# The shift corresponds to the pixel offset relative to the reference image
offset_image = fourier_shift(np.fft.fftn(image), shift)
offset_image = np.fft.ifftn(offset_image)
print("Known offset (y, x): {}".format(shift))

# pixel precision first
shift, error, diffphase = register_translation(image, offset_image)

fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1)
ax3 = plt.subplot(1, 3, 3)

ax1.imshow(image, cmap='gray')
ax1.set_axis_off()
ax1.set_title('Reference image')

ax2.imshow(offset_image.real, cmap='gray')
ax2.set_axis_off()
ax2.set_title('Offset image')

# Show the output of a cross-correlation to show what the algorithm is
コード例 #35
0
ファイル: DriftCorrection.py プロジェクト: asawaric/Newbie
image = make_img(num_obj=8, obj_rad=8, nr=512, nc=512)
plt.imshow(image)

## The shift corresponds to the pixel offset relative to the reference image.
shift = (-22.4, 13.32)

## fourier_shift(input, shift) = Multi-dimensional fourier shift filter.
#The array is multiplied with the fourier transform of a shift operation. 
#offset_image is a frequency domain shift of reference image.
offset_image = fourier_shift(np.fft.fftn(image), shift)  # Compute the N-dimensional DFT of an 'image'.
offset_image = np.fft.ifftn(offset_image) #Computes the N-dimensional inverse discrete Fourier Transform.
print("Known offset (y, x): {}".format(shift))

## pixel precision first
shift, error, diffphase = register_translation(image, offset_image)  #Efficient subpixel image 
                                                                     #translation registration by cross-correlation.

fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)

ax1.imshow(image, cmap='gray')
ax1.set_axis_off()
ax1.set_title('Reference image')

ax2.imshow(offset_image.real, cmap='gray')
ax2.set_axis_off()
ax2.set_title('Offset image')
コード例 #36
0
ファイル: mtools.py プロジェクト: ERCpy/ercpy
def align_img(img_one, img_two, method = 'imgreg', show=False, roi=True, sb_filtering=False, filt_size= 200,
              binning=2, feducial=1, manualxy = None, **kwargs):
    '''
    Function to align images or holograms using X-correlation
    Parameters
    ----------
    img_one : ndarray
        The refrence image
    img_two : ndarray
        An image to align
    method : string
        Either 'imgreg' to use image registartion from skimage,
        or 'xcorr' to use crosscorrelation in real space from scipy,
        or 'feducial' to use feducial markers,
        or 'manual' to displace the images manually
#        or 'gui_shift' to shift image intractively
    show : boolean or string
        Set True to plot the results, set 'diff' to show difference of the images
    roi : boolean
        Set true to do alignament on ROI instead of whole image
    sb_filtering : boolean
        Set True, to apply for holograms
    filt_size : int
        Size of the filter for main band filtering
        used only for alignment of holograms
    binning : int
        Binning for the images during alignment used in 'xcorr' method
    feducial : int
        Number of feducial markers for 'feducial' method
    manualxy : tuple of 2 int
        Coordiantes x,y for manual alignment

    Returns
    -------
    img_algn : ndarray
        Aligned image img_two
    (xdrift, ydrift): tuple
        drift correction coordinates

    Notes
    -----
    * Manual alignamnt method requiers cooridnates provided using 'manualxy' parameter
    * X-correlation method is slow! Use only small images and small ROI (use of binning is recomended)
    * Binning is implemented for "xcorr" method only

    See Also
    --------

    '''

    (ry,cx) = img_one.shape
#    img_one = img_one.astype(float)
#    img_two = img_two.astype(float)

    # --- IFFT main band:
    if sb_filtering:
        fft_img_one = fftshift(fft2(img_one))
        (xx,yy) = np.meshgrid(np.linspace(-ry/2, ry/2-1, ry), np.linspace(-cx/2, cx/2-1, cx))
        rr = np.sqrt(xx**2+yy**2)
        mask = np.zeros((ry,cx))
        mask[rr<filt_size] = 1
        img_one_m = np.absolute(ifft2(ifftshift(fft_img_one*mask)))
        # --- Processing second image
        fft_img_two = fftshift(fft2(img_two))
        img_two_m = np.absolute(ifft2(ifftshift(fft_img_two*mask)))
    else:
        img_one_m = img_one
        img_two_m = img_two

    # --- Use ROI if True
    if roi:
        # --- GUI based assignment of ROI
        f, ax = plt.subplots(1, 1)
        ax.imshow(img_one_m, cmap=cm.binary_r)
        rect = utils.RoiRect()
        if hasattr(f.canvas.manager, 'window'): f.canvas.manager.window.raise_()
        plt.waitforbuttonpress(100)
        plt.waitforbuttonpress(5)
        plt.close(f)
    else:
        rect = Rectangle((0,0), 1, 1,fc='none', ec='r')
        rect.x0 = 0
        rect.y0 = 0
        rect.y1 = ry-1
        rect.x1 = cx-1

    # --- Select allignment method
    if method is 'imgreg':

        img_one_roi = img_one_m[rect.y0:rect.y1, rect.x0:rect.x1]
        img_two_roi = img_two_m[rect.y0:rect.y1, rect.x0:rect.x1]

#        px_rescale_y = np.float(img_one_m.shape[0])/np.float(img_one_roi.shape[0])
#        px_rescale_x = np.float(img_one_m.shape[1])/np.float(img_one_roi.shape[1])
        upsample = 4
        # --- Upsampled image registration for ROI
        shift, error, diffphase = register_translation(img_one_roi, img_two_roi, upsample)

        ydrift = shift[0]
        xdrift = shift[1]
        print(shift)

#        # --- Accounting for change in pixel size   <- is not needed since ROI doesn't change px-size!!
#        ydrift = ydrift*px_rescale_y
#        xdrift = xdrift*px_rescale_x

    elif method is 'xcorr': # slow X-cor for small images only!
        # TODO: Check if the metod is working properly
        # --- Selecting ROI and X-correlating
        img_one_m = imresize(img_one_m, 1.0/binning)
        img_two_m = imresize(img_two_m, 1.0/binning)
        template = img_two_m[rect.y0/binning:rect.y1/binning, rect.x0/binning:rect.x1/binning]
        cc = correlate2d(img_one_m, template, boundary='symm', mode='same')
        imax = np.argmax(np.absolute(cc))
        ypeak, xpeak = np.unravel_index(imax, cc.shape) # coordinates of X-corr max
#        ydrift = (rect.y0/binning-ypeak-template.shape[0]-1)*binning
#        xdrift = (rect.x0/binning-xpeak-template.shape[1]-1)*binning
        ydrift = (ypeak-(rect.y1-rect.y0)/2)*binning
        xdrift = (xpeak-(rect.x1-rect.x0)/2)*binning

    elif method is 'feducial': # alignment using feducial markers
        # TODO: add multiple marker alignments
        img_one_roi = img_one_m[rect.y0:rect.y1, rect.x0:rect.x1]
        img_two_roi = img_two_m[rect.y0:rect.y1, rect.x0:rect.x1]

        f, ax = plt.subplots(1, 1)
        ax.imshow(img_one_roi, cmap=cm.binary_r)
        ax.set_title('Please set feducial marker position for 1st image')
        marker_one = utils.RoiPoint()
        if hasattr(f.canvas.manager, 'window'): f.canvas.manager.window.raise_()
        plt.waitforbuttonpress(100)
        plt.waitforbuttonpress(1)
        plt.close(f)

        f, ax = plt.subplots(1, 1)
        ax.imshow(img_two_roi, cmap=cm.binary_r)
        ax.set_title('Please set feducial marker position for 2nd image')
        marker_two = utils.RoiPoint()
        if hasattr(f.canvas.manager, 'window'): f.canvas.manager.window.raise_()
        plt.waitforbuttonpress(100)
        plt.waitforbuttonpress(1)
        plt.close(f)

        xdrift = marker_one.x0 - marker_two.x0
        ydrift = marker_one.y0 - marker_two.y0

    elif method is 'manual':
        if manualxy:
            xdrift = manualxy[0]
            ydrift = manualxy[1]
        else:
            raise ValueError('Method manual requires shifts provided in manualxy argument.')

#    elif method is 'gui_shift': #TODO: create 'gui_shift' method

    else:
        raise ValueError('Wrong method argument! Check doc.')

    print("xydrift = %d, %d" % (xdrift, ydrift) )

    img_algn = np.roll(img_two, np.int(ydrift), axis=0)
    img_algn = np.roll(img_algn, np.int(xdrift), axis=1)

    if isinstance(show, basestring):
        if show is 'diff':
            f, ax = plt.subplots(1,1)
            ax.imshow(img_algn - img_one, cmap=cm.binary_r)
            ax.set_title(('xydrift = ', str(xdrift), str(ydrift)))
    elif show:
        f, ax = plt.subplots(1,1)
        ax.imshow(img_algn, cmap=cm.binary_r)
    return (img_algn, (xdrift, ydrift))