コード例 #1
0
def align_img_stackreg(img_ref, img, align_flag=1, method='translation'):
    '''
    :param img_ref: reference image
    :param img: image need to align
    :param align_flag: 1: will do alignment; 0: output shift list only
    :param method:
        'translation': x, y shift
        'rigid': translation + rotation
        'scaled rotation': translation + rotation + scaling
        'affine': translation + rotation + scaling + shearing
    :return:
        align_flag == 1: img_ali, row_shift, col_shift, sr (row_shift and col_shift only valid for translation)
        align_flag == 0: row_shift, col_shift, sr (row_shift and col_shift only valid for translation)
    '''
    if method == 'translation':
        sr = StackReg(StackReg.TRANSLATION)
    elif method == 'rigid':
        sr = StackReg(StackReg.RIGID_BODY)
    elif method == 'scaled rotation':
        sr = StackReg(StackReg.SCALED_ROTATION)
    elif method == 'affine':
        sr = StackReg(StackReg.AFFINE)
    else:
        sr = [[1, 0, 0],[0, 1, 0], [0, 0, 1]]
        print('unrecognized align method, no aligning performed')
    tmat = sr.register(img_ref, img)
    row_shift = -tmat[1, 2]
    col_shift = -tmat[0, 2]
    if align_flag:
        img_ali = sr.transform(img)
        return img_ali, row_shift, col_shift, sr
    else:
        return row_shift, col_shift, sr
コード例 #2
0
def correct_drift(reference, move):
    sr = StackReg(StackReg.RIGID_BODY)

    transformation_matrix = sr.register(reference, move)

    out_rot = sr.transform(move, transformation_matrix)
    return out_rot, transformation_matrix
コード例 #3
0
def align_3D_coarse_axes(img_ref, img1, circle_mask_ratio=0.6, axes=0, shift_flag=1):
    '''
    Aligning the reconstructed tomo with assigned 3D reconstruction along given axis. It will project the 3D data along given axis to find the shifts

    Inputs:
    -----------
    ref: 3D array

    data: 3D array need to align

    axis: int
        along which axis to project the 3D reconstruction to find image shifts 
        0, or 1, or 2
    
    Output:
    ----------------
    aligned tomo, shfit_matrix

    '''

    img_tmp = img_ref.copy()
    if circle_mask_ratio < 1:
        img_ref_crop = pyxas.circ_mask(img_tmp, axis=0, ratio=circle_mask_ratio, val=0)   
    else:
        img_ref_crop = img_tmp.copy() 
    s = img_ref_crop.shape
    stack_range = [int(s[0]*(0.5-circle_mask_ratio/2)), int(s[0]*(0.5+circle_mask_ratio/2))]
    prj0 = np.sum(img_ref_crop[stack_range[0]:stack_range[1]], axis=axes)

    img_tmp = img1.copy()    
    if circle_mask_ratio < 1:
        img_raw_crop = pyxas.circ_mask(img_tmp, axis=0, ratio=circle_mask_ratio, val=0)
    else:
        img_raw_crop = img_tmp.copy()
    prj1 = np.sum(img_raw_crop[stack_range[0]:stack_range[1]], axis=axes)
    
    sr = StackReg(StackReg.TRANSLATION)
    tmat = sr.register(prj0, prj1)
    r = -tmat[1, 2]
    c = -tmat[0, 2]  

    if axes == 0:
        shift_matrix = np.array([0, r, c])
    elif axes == 1:
        shift_matrix = np.array([r, 0, c])
    elif axes == 2:
        shift_matrix = np.array([r, c, 0])
    else:
        shift_matrix = np.array([0, 0, 0])
    if shift_flag:
        img_ali = pyxas.shift(img1, shift_matrix, order=0)
        return img_ali, shift_matrix    
    else:
        return shift_matrix
コード例 #4
0
class ImageTransformOpticalFlow():
    """
    Class written to register stack of images for AET.
    Uses correlation based method to determine subpixel shift between predicted and measured images.
    Input parameters:
        - shape: shape of the image
    """ 
    def __init__(self, shape, method="turboreg"):
        self.shape = shape
        self.x_lin, self.y_lin = np.meshgrid(np.arange(self.shape[1]), np.arange(self.shape[0]))
        self.xy_lin = np.concatenate((self.x_lin[np.newaxis,], self.y_lin[np.newaxis,])).astype('float32')
        self.sr = StackReg(StackReg.RIGID_BODY)

    def _estimate_single(self, predicted, measured):
        assert predicted.shape == self.shape
        assert measured.shape == self.shape
        aff_mat = self.sr.register(measured, predicted)
        tform = transform.AffineTransform(matrix = aff_mat)
        measured_warp = transform.warp(measured, tform.inverse, cval = 1.0, order = 5)
        transform_final = aff_mat.flatten()[0:6]
        return measured_warp, transform_final

    def estimate(self, predicted_stack, measured_stack):
        assert predicted_stack.shape == measured_stack.shape
        transform_vec_list = np.zeros((6,measured_stack.shape[2]), dtype="float32")

        #Change from torch array to numpy array
        flag_predicted_gpu = predicted_stack.is_cuda
        if flag_predicted_gpu:
            predicted_stack = predicted_stack.cpu()

        flag_measured_gpu = measured_stack.is_cuda
        if flag_measured_gpu:
            measured_stack = measured_stack.cpu()        
        
        predicted_np = np.array(predicted_stack.detach())
        measured_np  = np.array(measured_stack.detach())
        
        #For each image, estimate the affine transform error
        for img_idx in range(measured_np.shape[2]):
            measured_np[...,img_idx], transform_vec = self._estimate_single(predicted_np[...,img_idx], \
                                                                      measured_np[...,img_idx])
            transform_vec_list[...,img_idx] = transform_vec
        
        #Change data back to torch tensor format
        if flag_predicted_gpu:
            predicted_stack = predicted_stack.cuda()

        measured_np = torch.tensor(measured_np)
        if flag_measured_gpu:
            measured_stack  = measured_stack.cuda()        
            measured_np     = measured_np.cuda()

        return measured_np, torch.tensor(transform_vec_list)
コード例 #5
0
    def alignChannels(self):
        h, w = self.stackedImage.shape[:2]
        gray = cv2.cvtColor(self.stackedImage, cv2.COLOR_BGR2GRAY)
        sr = StackReg(StackReg.TRANSLATION)

        for i, C in enumerate(cv2.split(self.stackedImage)):
            M = sr.register(C, gray)
            self.stackedImage[:, :, i] = cv2.warpPerspective(
                self.stackedImage[:, :, i],
                M, (w, h),
                borderMode=cv2.BORDER_REPLICATE)
            g.ui.childConn.send("Aligning RGB")
コード例 #6
0
def reg(fix, fix_enface, mov):

    #assumes (x,z,y) shape. x and y are same shape
    sr = StackReg(StackReg.TRANSLATION)  #may need to update to rigid body

    #first do enface by rotating and averaging down stack
    #this will find x-y shift and just the y-shift will be taken
    #rot_fix_mean = np.mean(np.rot90(fix, axes=(0,1)), axis=0)
    mov_enface = segment_nfl(mov)
    rot_mov = np.rot90(mov, axes=(0, 1))
    #rot_mov_mean = np.mean(rot_mov, axis=0)

    #do the registration
    y_mat = sr.register(fix_enface, mov_enface)
    #copy matrix for all frames in moving stack
    #probably a more elegant way to do this with reeat or tile, but...
    enface_mat = np.zeros((mov.shape[1], 3, 3))
    enface_mat[:, 0, 0] = 1
    enface_mat[:, 1, 1] = 1
    enface_mat[:, 2, 2] = 1
    enface_mat[:, 1, -1] = y_mat[1, -1]

    y_mov = sr.transform_stack(rot_mov, tmats=enface_mat)

    #now do x-z registration
    #rotate back to normal orientation
    temp_mov = np.rot90(y_mov, axes=(1, 0))

    #do the registration
    xz_mat = np.zeros((mov.shape[0], 3, 3))
    #sr = StackReg(StackReg.AFFINE)
    for i, fix_frame, mov_frame in zip(range(mov.shape[0]), fix, temp_mov):
        #skip frame if empty with np.all(mov_frame==0)?
        tmat = sr.register(fix_frame, mov_frame)
        xz_mat[i] = tmat

    #do the transform
    reg_mov = sr.transform_stack(temp_mov, tmats=xz_mat)

    return reg_mov
コード例 #7
0
def camera_registration_transform(path, ref_name, mov_name, method):
    print("Registering " + ref_name + " and " + mov_name)

    # Generate the Path
    ref_path = os.path.join(path, ref_name)
    mov_path = os.path.join(path, mov_name)

    # load reference and "moved" image
    ref_image = imread(ref_path, key=0)
    mov_image = imread(mov_path, key=0)

    # Specify the type of registration
    if method is 'TRANSLATION':
        transform_object = StackReg(StackReg.TRANSLATION)
    elif method is 'AFFINE':
        transform_object = StackReg(StackReg.AFFINE)
    elif method is 'RIGID_BODY':
        transform_object = StackReg(StackReg.RIGID_BODY)
    elif method is 'SCALED_ROTATION':
        transform_object = StackReg(StackReg.SCALED_ROTATION)
    elif method is 'BILINEAR':
        transform_object = StackReg(StackReg.BILINEAR)
    else:
        print('The registration method you selected is unsupported - options include translation'
              ', affine, rigid body, scaled rotation, and bilinear')

    transform_object.register(ref_image, mov_image)
    print(transform_object.get_matrix())

    transformed_image = np.uint16(transform_object.transform(mov_image))

    # Root Mean Square
    score1 = mean_squared_error(ref_image, mov_image)
    score2 = mean_squared_error(ref_image, transformed_image)
    print("RMS Before Registration = " + str(score1))
    print("RMS After Registration = " + str(score2))

    return transform_object
コード例 #8
0
def find_rot(fn, thresh=0.05, method=1):
    from pystackreg import StackReg

    sr = StackReg(StackReg.TRANSLATION)
    f = h5py.File(fn, "r")
    ang = np.array(list(f["angle"]))
    img_bkg = np.squeeze(np.array(f["img_bkg_avg"]))
    if np.abs(ang[0]) < np.abs(ang[0] - 90):  # e.g, rotate from 0 - 180 deg
        tmp = np.abs(ang - ang[0] - 180).argmin()
    else:  # e.g.,rotate from -90 - 90 deg
        tmp = np.abs(ang - np.abs(ang[0])).argmin()
    img0 = np.array(list(f["img_tomo"][0]))
    img180_raw = np.array(list(f["img_tomo"][tmp]))
    f.close()
    img0 = img0 / img_bkg
    img180_raw = img180_raw / img_bkg
    img180 = img180_raw[:, ::-1]
    s = np.squeeze(img0.shape)
    im1 = -np.log(img0)
    im2 = -np.log(img180)
    im1[np.isnan(im1)] = 0
    im2[np.isnan(im2)] = 0
    im1[im1 < thresh] = 0
    im2[im2 < thresh] = 0
    im1 = medfilt2d(im1, 5)
    im2 = medfilt2d(im2, 5)
    im1_fft = np.fft.fft2(im1)
    im2_fft = np.fft.fft2(im2)
    results = dftregistration(im1_fft, im2_fft)
    row_shift = results[2]
    col_shift = results[3]
    rot_cen = s[1] / 2 + col_shift / 2 - 1

    tmat = sr.register(im1, im2)
    rshft = -tmat[1, 2]
    cshft = -tmat[0, 2]
    rot_cen0 = s[1] / 2 + cshft / 2 - 1

    print(f"rot_cen = {rot_cen} or {rot_cen0}")
    if method:
        return rot_cen
    else:
        return rot_cen0
コード例 #9
0
def stack_reg_consecutive_frames(z_reg, top_Z):
    T = len(z_reg)
    Z, _, _, C = z_reg[0].shape

    for t in range(T - 1):
        print(f'XY registering t = {t}')

        ref = z_reg[0][top_Z, :, :, 1]
        target_img = z_reg[t + 1][top_Z, :, :, 1]

        sr = StackReg(StackReg.RIGID_BODY)
        reg_matrix = sr.register(ref, target_img)

        # # Use registration matrix on whole stack

        for z in range(Z):
            for c in range(3):
                z_reg[t + 1][z, :, :, c] = sr.transform(z_reg[t + 1][z, :, :,
                                                                     c])

    return z_reg
コード例 #10
0
def align(frames, reference, transformation, normalize, pa1, pa2, conn):
    ref = cv2.imread(g.tmp + "cache/" + reference + ".png",
                     cv2.IMREAD_GRAYSCALE)
    if (pa1 != (0, 0) and pa2 != (0, 0)):
        # Processing Area
        ref = ref[pa1[1]:pa2[1], pa1[0]:pa2[0]]
    sr = StackReg(transformation)
    tmats = []
    h, w = ref.shape[:2]
    scaleFactor = min(1.0, (100 / h))
    ref = cv2.resize(ref, (int(w * scaleFactor), int(h * scaleFactor)))
    if (normalize):
        ref = cv2.normalize(ref,
                            ref,
                            alpha=0,
                            beta=255,
                            norm_type=cv2.NORM_MINMAX)
    i = 0
    for frame in frames:
        mov = cv2.imread(frame.replace("frames", "cache"),
                         cv2.IMREAD_GRAYSCALE)
        if (pa1 != (0, 0) and pa2 != (0, 0)):
            # Processing Area
            mov = mov[pa1[1]:pa2[1], pa1[0]:pa2[0]]
        mov = cv2.resize(mov, (int(w * scaleFactor), int(h * scaleFactor)))
        if (normalize):
            mov = cv2.normalize(mov,
                                mov,
                                alpha=0,
                                beta=255,
                                norm_type=cv2.NORM_MINMAX)
        M = sr.register(mov, ref)
        M[0][2] /= scaleFactor  # X
        M[1][2] /= scaleFactor  # Y
        tmats.append(M)
        conn.send("Aligning Frames")
        i += 1
    return tmats
コード例 #11
0
def process(pth):

    print(f'\nProcessing {pth}')

    save_pth = pth / 'avg_bscans'
    #tmat_pth = pth / 'transformation_matrices'
    try:
        save_pth.mkdir()
        #tmat_pth.mkdir()
    except FileExistsError:
        ex('Save File for reg stacks or tmats already exists. Delete and re-run.'
           )

    #tell pystack reg that we will use a translational transformation
    #there shouldn't be intra-volume rotation or shear (there might be for rod blink)
    sr = StackReg(StackReg.TRANSLATION)
    #register to the first slice and output the transfomation matrix without transforming
    #iterate through the files in the stacks folder
    files = pth.glob('*.tif')
    loop_starts = time.time()

    for i, file in enumerate(files):
        if i == 0:
            fixed = io.imread(str(file))
            avg_fixed = np.average(fixed, axis=0)
            save_name = save_pth / f'avg_{file.name}'
            io.imsave(arr=img_as_float32(avg_fixed), fname=str(save_name))
        #start_time = time.time()
        print(f'Processing: {file}')
        #Intravolume registration of the fixed volume

        #load first stack and do a 3d gaussian blur with a sigma=1
        #str needed for imread, otherwise only one frame is loaded
        moving = io.imread(str(file))  #had gauss(), testing
        avg_moving = np.average(moving, axis=0)

        t_mats = sr.register(gauss(avg_fixed), gauss(avg_moving))
        #remove the x shift from all the matrices - horizontal movement isn't relevant here,
        #the volume should be acquired quickly enough that this isn't a problem.
        t_mats[0, -1] = 0
        avg_moving = sr.transform(avg_moving, tmat=t_mats)

        #Using previous to see if I could get rid of wigge. Didn't seem to work.
        #t_mats = sr.register_stack(gauss(fixed), reference='previous')
        #t_mats[:,1,-1] = 0
        #fixed = sr.transform_stack(fixed, tmats=t_mats)
        #save the register fixed volume in the parent directory for reference
        save_name = save_pth / f'avg_{file.name}'
        #io.imsave(arr=img_as_uint(fixed), fname=str(save_name))
        io.imsave(arr=img_as_float32(avg_moving), fname=str(save_name))
        #get fixed out of memory - may not be worth the time?
        #print(f'Intravolume registration complete. File saved at {save_name}')

        #end_time = time.time()
        #print(f'{file} was processed in {(end_time - start_time):.2f}s. \
        #\n{((end_time - loop_starts)/60):.2f} minutes have elapsed.')

        #de;ete emumerate
        #if i==4:
        #ex('auto break')
    end_time = time.time()
    print(f'Run took {(end_time-loop_starts)/60:.2f} minutes')
コード例 #12
0
    if use_average:
        mov_img = mov_his.read_frame_average(frames_for_average)
    else:
        mov_img = mov_his.read_frame(0)
    del mov_his

    print(f"Aligning {mov_img_path}")

    # increase the image contrast. this improved the results from stackreg drastically
    p2, p98 = np.percentile(mov_img, (2, 98))
    mov_img = exposure.rescale_intensity(mov_img, in_range=(p2, p98))

    # find the transformation matrix
    # we only use rigid body, gives: x shift, y shift, rotation
    sreg = StackReg(StackReg.RIGID_BODY)
    tmat = sreg.register(ref=ref_img, mov=mov_img)

    # if average_over_experiments:
    #     out_img = sreg.transform(mov=mov_img, tmat=tmat)
    #     ref_img = np.mean([ref_img, out_img], axis=0)

    # apply the matrix to the old ROIs
    # this is essentially just:
    # src = np.vstack((x, y, np.ones_like(x)))
    # dst = src.T @ matrix.T
    mov_points = np.copy(ref_points)
    mov_points[:, 0:2] = tf.matrix_transform(coords=ref_points[:, 0:2],
                                             matrix=tmat)

    # save in netcals image format. import via "load roi (legacy)"
    x, y, i = mov_points[:].T
コード例 #13
0
z_stack

#%% Save stack

for idx, img in enumerate(z_stack):
    io.imsave(f'/Users/xies/Desktop/z_reg_t{idx}.tif', img.astype(np.int16))

#%% Manual adjustment

t = 8

ref = z_reg[t - 1][ref_z[t - 1], ...]
target_img = z_reg[t][ref_z[t], ...]

sr = StackReg(StackReg.RIGID_BODY)
sr.register(ref, target_img)
target_reg = sr.transform(z_reg[t][ref_z[t], ...])

plt.figure()
io.imshow(ref)
plt.figure()
io.imshow(target_reg - target_reg.min())
# for z in range(z_reg[t].shape[0]):
#     z_reg[t][z,...] = sr.transform( z_reg[t][z,...])

# io.imsave(f'/Users/xies/Desktop/z_reg_t{t}.tif',z_reg[t].astype(np.int16))

#%% Try Optic flow

from skimage import registration
from skimage import transform
コード例 #14
0
ファイル: misc.py プロジェクト: gmysage/pyxas
def align_proj_sub(fn_ref, angle_ref, fn, angle1, binning=2, ratio=0.6, block_list=[], sli=[], ali_method='stackreg'):
    # ali_method can be 'stackreg' or 'cross_corr'
    num_angle = len(angle1)
    theta1 = angle1/180.*np.pi
    img1_ali = []
    rshft = []
    cshft = []
    img1_all, eng1, _ = pyxas.retrieve_norm_tomo_image(fn, index=-1, binning=binning, sli=sli)
    # img1_all = tomopy.prep.stripe.remove_stripe_fw(img1_all, level=5, wname='db5', sigma=1, pad=True)
    for j in range(num_angle):
        #sr = StackReg(StackReg.RIGID_BODY)
        sr = StackReg(StackReg.TRANSLATION)
        if not j%20:
            print(f'process file {fn}, angle #{j}/{num_angle}')
        ang_index = int(pyxas.find_nearest(angle_ref, angle1[j]))
        img0, _, _ = pyxas.retrieve_norm_tomo_image(fn_ref, index=ang_index, binning=binning, sli=sli)
        img0 = np.squeeze(img0)
        s = img0.shape
        ratio_s = 0.5 - ratio / 2
        ratio_e = 0.5 + ratio /2
        img0 = img0[int(s[0]*ratio_s) : int(s[0]*ratio_e), int(s[1]*ratio_s) : int(s[1]*ratio_e)]
        #img1, eng1, _ = pyxas.retrieve_norm_tomo_image(fn, index=j, binning=binning, sli=sli)
        img1 = np.squeeze(img1_all[j])
        tmp = img1[int(s[0]*ratio_s) : int(s[0]*ratio_e), int(s[1]*ratio_s) : int(s[1]*ratio_e)]
        
        if ali_method == 'stackreg':
            tmat = sr.register(img0, tmp)
            tmp = sr.transform(img1)
            rs, cs = -tmat[1, 2], -tmat[0, 2]
        else:
            _,rs,cs = pyxas.align_img(img0, tmp)
            tmp = pyxas.shift(img1, [rs, cs], order=0)

        img1_ali.append(tmp)
        rshft.append(rs)
        cshft.append(cs)
    '''
    allow_list = list(set(np.arange(len(angle1))) - set(block_list))
    x = angle1/180.0*np.pi
    x = x[allow_list]
    y = np.array(cshft)[allow_list]
    zero_angle_pos = pyxas.find_nearest(theta1, 0)
    y0 = y[zero_angle_pos]
    y_scale = np.max(np.abs(y-y0))
    y = (y - y0)/y_scale
    r_fit, phi_fit= fit_cos(x, y, num_iter=6000, learning_rate=0.005)
    r_fit *= y_scale
    Y_fit = eval_fit_cos(r_fit, phi_fit, theta1)+y0

    r_shift_mean = np.mean(np.array(rshft)[allow_list])
    for j in range(num_angle):
        if not j%20:
            print(f'process file {fn}, angle #{j}/{num_angle}')
        tmp_mat=np.array([[1,0,-Y_fit[j]],[0,1,-r_shift_mean], [0,0,1]])
        img1, eng1, _ = pyxas.retrieve_norm_tomo_image(fn, index=j, binning=binning)
        img1 = np.squeeze(img1)
        tmp = sr.transform(img1, tmp_mat)
        img1_ali.append(tmp)
    '''

    img1_ali = np.array(img1_ali)
    return img1_ali, eng1, rshft, cshft
コード例 #15
0
prev = (img).astype('uint8')

sr = StackReg(StackReg.RIGID_BODY)

for i in range(n_frames - 1):
    # Detect feature points in previous frame

    im.seek(i)
    img = np.array(im)
    #img = im#/np.amax(im)*255
    curr = (img).astype('uint8')

    t0 = time.time()
    #Translational transformation

    sr.register(prev, curr)

    out = sr.transform(curr)
    out = out.astype('uint8')
    print(np.amax(curr))
    print(np.amax(out))

    #frame_out = cv2.hconcat([curr, out])
    #out = out/np.amax(out)*255
    #print(np.amax(out))
    #cv2.imshow("Before", curr)
    cv2.imshow("After", out)

    cv2.waitKey(200)

    prev = out
コード例 #16
0
def image_reg(im0, im1, im2reg):
    '''Enter function general description + arguments'''
    sr = StackReg(StackReg.TRANSLATION)
    sr.register(im0, im1)
    im_reg = sr.transform(im2reg)
    return im_reg
コード例 #17
0
def align(frames, file, reference, referenceIndex, transformation, normalize,
          totalFrames, startFrame, dx, dy, aoi1, aoi2, conn):
    i = startFrame
    tmats = []
    minX = minY = maxX = maxY = 0
    video = Video()

    # Load Reference
    refOrig = ref = cv2.cvtColor(video.getFrame(file, reference),
                                 cv2.COLOR_BGR2GRAY)
    h1, w1 = ref.shape[:2]

    # Drift
    rfdx, rfdy, rfdx1, rfdy1 = Align.calcDriftDeltas(dx, dy, referenceIndex,
                                                     totalFrames)
    ref = ref[int(rfdy1):int(h1 - rfdy), int(rfdx1):int(w1 - rfdx)]

    # Area of Interest
    ref = cropAreaOfInterest(ref, aoi1, aoi2)
    refOrig = cropAreaOfInterest(refOrig, aoi1, aoi2, rfdx1, rfdy1)

    if (transformation != -1):
        sr = StackReg(transformation)
    else:
        sr = None

    h, w = ref.shape[:2]
    scaleFactor = min(1.0, (100 / h))
    ref = cv2.resize(ref, (int(w * scaleFactor), int(h * scaleFactor)))
    refOrig = cv2.resize(refOrig, (64, 64))

    if (normalize):
        ref = normalizeImg(ref)
        refOrig = normalizeImg(refOrig)

    for frame in frames:
        try:
            # Load Frame
            movOrig = mov = cv2.cvtColor(video.getFrame(file, frame),
                                         cv2.COLOR_BGR2GRAY)

            # Drift
            fdx, fdy, fdx1, fdy1 = Align.calcDriftDeltas(
                dx, dy, i, totalFrames)
            mov = mov[int(fdy1):int(h1 - fdy), int(fdx1):int(w1 - fdx)]

            # Area of Interest
            mov = cropAreaOfInterest(mov, aoi1, aoi2)

            # Resize
            mov = cv2.resize(mov, (int(w * scaleFactor), int(h * scaleFactor)))

            if (normalize):
                mov = normalizeImg(mov)

            # Stack Registration
            if (sr is not None):
                M = sr.register(mov, ref)
            else:
                M = np.identity(3)

            # Scale back up
            M[0][2] /= scaleFactor  # X
            M[1][2] /= scaleFactor  # Y

            # Shift the matrix origin to the Area of Interest, and then shift back
            M = shiftOrigin(M, aoi1[0], aoi1[1])

            # Add drift transform
            M[0][2] -= int(fdx1) - int(rfdx1)
            M[1][2] -= int(fdy1) - int(rfdy1)
            M = shiftOrigin(M, int(fdx1), int(fdy1))

            # Apply transformation to small version to check similarity to reference
            movOrig = cv2.warpPerspective(movOrig,
                                          M, (w1, h1),
                                          borderMode=cv2.BORDER_REPLICATE)

            if (aoi1 != (0, 0) and aoi2 != (0, 0)):
                # Area of Interest
                movOrig = cropAreaOfInterest(movOrig, aoi1, aoi2, rfdx1, rfdy1)
                xFactor = None
                yFactor = None
            else:
                xFactor = 64 / movOrig.shape[1]
                yFactor = 64 / movOrig.shape[0]
            movOrig = cv2.resize(movOrig, (64, 64))

            if (normalize):
                movOrig = normalizeImg(movOrig)

            # Similarity
            diff = calculateDiff(refOrig, movOrig, xFactor, yFactor, M, i)

            # Used for auto-crop
            minX, maxX, minY, maxY = Align.calcMinMax(M, minX, maxX, minY,
                                                      maxY)

            tmats.append((frame, M, diff))
        except Exception as e:
            print(e)
        conn.send("Aligning Frames")
        i += 1
    return (tmats, minX, minY, maxX, maxY)