def test_fit(eng):
	reference = arange(25).reshape(5, 5)
	algorithm = CrossCorr()
	deltas = [[1, 2], [-2, 1]]
	shifted = [shift(reference, delta, mode='wrap', order=0) for delta in deltas]
	model = algorithm.fit(shifted, reference=reference)
	assert allclose(model.toarray(), deltas)
def test_fit(eng):
    reference = arange(25).reshape(5, 5)
    algorithm = CrossCorr()
    deltas = [[1, 2], [-2, 1]]
    shifted = [shift(reference, delta, mode='wrap', order=0) for delta in deltas]
    model = algorithm.fit(shifted, reference=reference)
    assert allclose(model.toarray(), deltas)
Example #3
0
    def register(self, src, trg, trg_mask=None, src_mask=None):
        """ Implementation of pair-wise registration using thunder-registration

        For more information on the model estimation, refer to https://github.com/thunder-project/thunder-registration
        This function takes two 2D single channel images and estimates a 2D translation that best aligns the pair. The
        estimation is done by maximising the correlation of the Fourier transforms of the images. Once, the translation
        is estimated, it is applied to the (multi-channel) image to warp and, possibly, ot hte ground-truth. Different
        interpolations schemes could be more suitable for images and ground-truth values (or masks).

        :param src: 2D single channel source moving image
        :param trg: 2D single channel target reference image
        :param src_mask: Mask of source image. Not used in this method.
        :param trg_mask: Mask of target image. Not used in this method.
        :return: Estimated 2D transformation matrix of shape 2x3
        """
        # Initialise instance of CrossCorr object
        ccreg = CrossCorr()
        # padding_value = 0
        # Compute translation between pair of images
        model = ccreg.fit(src, reference=trg)
        # Get translation as an array
        translation = [-x for x in model.toarray().tolist()[0]]
        # Fill in transformation matrix
        warp_matrix = np.eye(2, 3)
        warp_matrix[0, 2] = translation[1]
        warp_matrix[1, 2] = translation[0]
        # Return transformation matrix
        return warp_matrix
Example #4
0
def main(config_data):

    data = td.images.fromtif(path=config_data["input_path"],
                             engine=sc,
                             npartitions=int(config_data["npartitions"]))
    ############################################################
    #Code for reduction of noise from image using gaussion filter
    ############################################################
    data = data.map(lambda x: gaussian_filter(
        x, sigma=float(config_data["sigma"]), order=0))

    ####################################################################################################
    # Code for Motion Correction using Image Registration , this process help in alignment of the images
    ####################################################################################################
    reference = data.mean().toarray()
    algorithmMC = CrossCorr()
    model = algorithmMC.fit(data, reference)
    shifts = model.transformations
    registered = model.transform(data)

    ####################################################################################
    # Code for Local Non-negative Matrix Factorization for Image Extraction
    ####################################################################################

    algorithm = NMF(k=int(config_data["k"]),
                    percentile=int(config_data["percentile"]),
                    min_size=int(config_data["min_size"]),
                    max_iter=int(config_data["max_iter_nmf"]),
                    overlap=float(config_data["overlap_nmf"]))

    model = algorithm.fit(registered,
                          chunk_size=(int(config_data["chunk_size_1"]),
                                      int(config_data["chunk_size_2"])),
                          padding=(int(config_data["padding_1"]),
                                   int(config_data["padding_2"])))

    ####################################################################################
    #Code for finding ROI using spatial region extracted in NMF process
    ####################################################################################
    merged = model.merge(overlap=float(config_data["overlap_merge"]),
                         max_iter=int(config_data["max_iter_merge"]),
                         k_nearest=int(config_data["k_nearest"]))

    print('Total no of regions found %g' % merged.regions.count)

    #####################################################################
    #Code for dumping the identified ROI co-ordinates in JSON file
    #####################################################################
    regions = [{
        'coordinates': region.coordinates.tolist()
    } for region in merged.regions]

    result = {'dataset': config_data["dataset"], 'regions': regions}
    submission.append(result)
    with open(config_data["output"] + '.json', 'w') as f:
        f.write(json.dumps(submission))
def test_fit_axis(eng):
	reference = arange(60).reshape(2, 5, 6)
	algorithm = CrossCorr(axis=0)
	a = shift(reference[0], [1, 2], mode='wrap', order=0)
	b = shift(reference[1], [-2, 1], mode='wrap', order=0)
	c = shift(reference[0], [2, 1], mode='wrap', order=0)
	d = shift(reference[1], [1, -2], mode='wrap', order=0)
	shifted = [asarray([a, b]), asarray([c, d]),]
	model = algorithm.fit(shifted, reference=reference)
	assert allclose(model.toarray(), [[[1, 2], [-2, 1]], [[2, 1], [1, -2]]])
def test_fit_axis(eng):
    reference = arange(60).reshape(2, 5, 6)
    algorithm = CrossCorr(axis=0)
    a = shift(reference[0], [1, 2], mode='wrap', order=0)
    b = shift(reference[1], [-2, 1], mode='wrap', order=0)
    c = shift(reference[0], [2, 1], mode='wrap', order=0)
    d = shift(reference[1], [1, -2], mode='wrap', order=0)
    shifted = [asarray([a, b]), asarray([c, d]),]
    model = algorithm.fit(shifted, reference=reference)
    assert allclose(model.toarray(), [[[1, 2], [-2, 1]], [[2, 1], [1, -2]]])
def main():

    data = td.images.fromtif(
        path='/home/yash/PycharmProjects/Thunder/neurofinder.00.00/images/*',
        npartitions=4)

    #print (data)
    reference = data.mean().toarray()
    algorithm = CrossCorr()
    model = algorithm.fit(data, reference)
    shifts = model.transformations
    model = algorithm.fit(shifts, reference=reference)
    registered = model.transform(data)
    print(registered)
Example #8
0
def register(data):
    """
    Function for registering data. The reference image is computed as the mean of
    the provided data.

    Parameters
    ----------
    data : numpy array or images
        Raw image data as a numpy array or thunder images object.

    """

    if len(data.shape) == 4:
        algorithm = CrossCorr(axis=0)
    else:
        algorithm = CrossCorr()

    ref = data.mean().toarray()

    model = algorithm.fit(data, ref)
    registered = model.transform(data)
    shifts = model.toarray()

    return registered, shifts
Example #9
0
def register_blocks_piecewise(data, size = (64, 64)):
    """
    Function for registering data. The reference image is computed as the mean of
    the provided data. The registration is done indpendently on blocks of specified
    size. The resulting shifts are used to compute a piecewise affine transform and shift
    the images

    Parameters
    ----------
    data : numpy array or images
        Raw image data as a numpy array or thunder images object.

    size : tuple
        Tuple specifying the block sizes to use for chuncking.
    """

    blocks = data.toblocks(chunk_size=size, padding=(int(size[0]*0.1),int(size[0]*0.1)))
    algorithm = CrossCorr()

    def reg_shifts(data):
        ref = data.mean(axis=0)
        model = algorithm.fit(data, ref)
        return model.toarray()

    shifts = blocks.map_generic(reg_shifts)

    src_cols = linspace(0, data.shape[1], shifts.shape[0])
    src_rows = linspace(0, data.shape[2], shifts.shape[1])
    src_rows, src_cols = meshgrid(src_rows, src_cols)
    src = dstack([src_cols.flat, src_rows.flat])[0]

    shifts = shifts.toarray().flatten()

    def piecewise_shift(item):
        (k, v) = item

        frame_shift = asarray([x[k] for x in shifts])
        dst = asarray([s + x for s, x in zip(src, frame_shift)])

        tform = PiecewiseAffineTransform()
        tform.estimate(src, dst)
        return warp(v, tform)

    return data.map(piecewise_shift, value_shape=data.value_shape, dtype=data.dtype, with_keys=True)
Example #10
0
def register_blocks(data, size = (64, 64)):
    """
    Function for registering data. The reference image is computed as the mean of
    the provided data. The registration is done indpendently on blocks of specified
    size and shifts are also applied indpendently.

    Parameters
    ----------
    data : numpy array or images
        Raw image data as a numpy array or thunder images object.

    size : tuple
        Tuple specifying the block sizes to use for chuncking.
    """

    blocks = data.toblocks(chunk_size=size, padding=(int(size[0]*0.1),int(size[0]*0.1)))
    algorithm = CrossCorr()

    def reg(data):
        ref = data.mean(axis=0)
        model = algorithm.fit(data, ref)
        return model.transform(data).toarray()

    return blocks.map(reg).toimages()
Example #11
0
def Image_Alignment(reference, skew_image,method = "AKAZE" ,save_match = False, save= False):
    if method == "AKAZE":
        import cv2

        print("Alignement is in process using '{}' method".format(method))        
        img1 = cv.imread(reference, cv.IMREAD_GRAYSCALE)  # referenceImage
        img2 = cv.imread(skew_image, cv.IMREAD_GRAYSCALE)  # sensedImage
    
        # Initiate AKAZE detector
        akaze = cv.AKAZE_create()
        # Find the keypoints and descriptors with SIFT
        kp1, des1 = akaze.detectAndCompute(img1, None)
        kp2, des2 = akaze.detectAndCompute(img2, None)
        # BFMatcher with default params
        bf = cv.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)
        # Apply ratio test
        good_matches = []
        for m,n in matches:
            if m.distance < 0.75*n.distance:
                good_matches.append([m])   
        if save_match:
            # Draw matches
            img3 = cv.drawMatchesKnn(img1,kp1,img2,kp2,good_matches,None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
            cv.imwrite('matches_Akaze.jpg', img3)
            # =============================================================================
        # Image Warping
        # =============================================================================
        # Select good matched keypoints
        ref_matched_kpts = np.float32([kp1[m[0].queryIdx].pt for m in good_matches])
        sensed_matched_kpts = np.float32([kp2[m[0].trainIdx].pt for m in good_matches])
        
        # Compute homography
        H, status = cv.findHomography(sensed_matched_kpts, ref_matched_kpts, cv.RANSAC,5.0)
        # Warp image
        warped_image = cv.warpPerspective(img2, H, (img1.shape[1], img1.shape[0]))
        if save:
#            cv.imwrite("Aligned_Images"+skew_image, warped_image)
            cv.imwrite(os.path.join("Aligned_Images",skew_image.split("\\")[-1]), warped_image)            
        return warped_image

    if method == "SURF":
        import cv2
#        reference =  "0_Sample_Form.jpg"
        #Skewed = "0_Guard_ CMS1500 5.jpg"
        #Skewed = "E:\\Tushar\\Projects\\Image_Analytics\\OCR\\Template_Based_OCR\\DATA\\Sample\\Guard_ CMS1500 12\\page_0.jpg"
#        skew_image = "E:\\Tushar\\Projects\\Image_Analytics\\OCR\\Template_Based_OCR\\DATA\\Sample\\Guard_ CMS1500 2\\page_0.jpg"
   
        print("Alignement is in process using '{}' method".format(method))        
        img1 = cv.imread(reference, cv.IMREAD_GRAYSCALE)  # referenceImage
        img2 = cv.imread(skew_image, cv.IMREAD_GRAYSCALE)  # sensedImage
        
        surf = cv2.xfeatures2d.SURF_create(400)
        kp1, des1 = surf.detectAndCompute(img1, None)
        kp2, des2 = surf.detectAndCompute(img2, None)
        
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)
        
        # store all the good matches as per Lowe's ratio test.
        good = []
        for m, n in matches:
            if m.distance < 0.7 * n.distance:
                good.append(m)
        
        MIN_MATCH_COUNT = 10
        if len(good) > MIN_MATCH_COUNT:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)        
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)    
        #    # see https://ch.mathworks.com/help/images/examples/find-image-rotation-and-scale-using-automated-feature-matching.html for details
        #    ss = M[0, 1]
        #    sc = M[0, 0]
        #    scaleRecovered = math.sqrt(ss * ss + sc * sc)
        #    thetaRecovered = math.atan2(ss, sc) * 180 / math.pi
        #    print("Calculated scale difference: %.2f\nCalculated rotation difference: %.2f" % (scaleRecovered, thetaRecovered))
            im_out = cv2.warpPerspective(img2, np.linalg.inv(M), (img1.shape[1], img1.shape[0]))
    
        
        else:
            print("Not  enough  matches are found   -   %d/%d" % (len(good), MIN_MATCH_COUNT))
#            matchesMask = None
        if save:
#            cv.imwrite("Aligned_"+skew_image, im_out)
            cv.imwrite(os.path.join("Aligned_Images",skew_image.split("\\")[-1]), im_out) 
        return im_out


    if method == "SIFT":
        import cv2

##        print("Alignement is in process using '{}' method".format(method))        
#        reference =  "TestTemplate.jpg"
#        skew_image = "Test_Images\Guard_ CMS1500 1_page_0.jpg"
        print("Alignement is in process using '{}' method".format(method))        
        img1 = cv2.imread(reference, cv2.IMREAD_GRAYSCALE)  # referenceImage
        img2 = cv2.imread(skew_image, cv2.IMREAD_GRAYSCALE)  # sensedImage
    
        # Initiate SIFT detector
        sift = cv2.xfeatures2d.SIFT_create()
        
        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1,None)
        kp2, des2 = sift.detectAndCompute(img2,None)
        
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
        search_params = dict(checks = 100)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1,des2,k=2)
        
        # store all the good matches as per Lowe's ratio test.
        good = []
        for m,n in matches:
            if m.distance < 0.5*n.distance:
                good.append(m)
                
        MIN_MATCH_COUNT = 10
        if len(good)>MIN_MATCH_COUNT:
            src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
            dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
        
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
            matchesMask = mask.ravel().tolist()
        
#            h,w = img1.shape
#            pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
#            dst = cv2.perspectiveTransform(pts,M)
#        
#            img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
#        
#        else:
#            print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
#            matchesMask = None    
#        draw_params = dict(matchColor = (0,255,0), # draw matches in green color
#                           singlePointColor = None,
#                           matchesMask = matchesMask, # draw only inliers
#                           flags = 2)    
#        img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)   
        
            im_out = cv2.warpPerspective(img2, np.linalg.inv(M), (img1.shape[1], img1.shape[0]))
            if save:
                cv2.imwrite(os.path.join("Aligned_Images",skew_image.split("\\")[-1]), im_out)                    
                return im_out
        else:
            return img2
#        cv2.imwrite("Aligned_SURF_CMS1500.jpg",im_out)
            
    if method == "CrossCorr":
        from registration import CrossCorr
        register = CrossCorr()
        import cv2
        
        print("Alignement is in process using '{}' method".format(method))        
#        img1 = cv2.imread(reference, cv2.IMREAD_GRAYSCALE)  # referenceImage
#        img2 = cv2.imread(skew_image, cv2.IMREAD_GRAYSCALE)  # sensedImage
#        model = register.fit(shifted, reference=reference)
#        
#        # the estimated transformations should match the deltas we used
#        reference =  "TestTemplate.jpg"
#        skew_image = "Test_Images\Guard_ CMS1500 1_page_0.jpg"
        img1 = cv2.imread(reference, cv2.IMREAD_GRAYSCALE)  # referenceImage
        img2 = cv2.imread(skew_image, cv2.IMREAD_GRAYSCALE)  # sensedImage        
        (w,h) = img1.shape
        img2_resized = cv2.resize(img2, (h,w))        
        model = register.fit(img2_resized, reference=img1)
        registered = model.transform(img2_resized)
        return np.array(registered)

    if method == "ORB":
        import cv2
        MIN_MATCHES = 24
#        reference = "TestTemplate.jpg"
#        skew_image = 'Test_Images\\Guard_ CMS1500 12_page_0.jpg'
        print("Alignement is in process using '{}' method".format(method))        
        img2 = cv2.imread(reference, cv2.IMREAD_GRAYSCALE)  # referenceImage
        img1 = cv2.imread(skew_image, cv2.IMREAD_GRAYSCALE)  # sensedImage
        orb = cv2.ORB_create(nfeatures=15000)
        kp1, des1 = orb.detectAndCompute(img1, None)
        kp2, des2 = orb.detectAndCompute(img2, None)
        
        index_params = dict(algorithm=6,
                            table_number=6,
                            key_size=12,
                            multi_probe_level=2)
        search_params = {}
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)
        
        # As per Lowe's ratio test to filter good matches
        good_matches = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good_matches.append(m)
        
        if len(good_matches) > MIN_MATCHES:
            src_points = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            dst_points = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            m, mask = cv2.findHomography(src_points, dst_points, cv2.RANSAC, 5.0)
            corrected_img = cv2.warpPerspective(img1, m, (img2.shape[1], img2.shape[0]))
#            plt.imshow(corrected_img)
            return corrected_img
        return img2
Example #12
0
def estimate(img):
    even = img[0:-2:2, :]
    odd = img[1:-1:2, :]
    algorithm = CrossCorr()
    model = algorithm.fit(even, odd)
    return model.toarray()[0][1]
Example #13
0
from numpy import arange
from scipy.ndimage.interpolation import shift

from registration import CrossCorr

reference = arange(9).reshape(3, 3)
deltas = [[1, 0], [0, 1]]
shifted = [shift(reference, delta, mode='wrap', order=0) for delta in deltas]

register = CrossCorr()
model = register.fit(shifted, reference=reference)

print 'real deltas: '
print deltas

print ''

print 'estimated deletas: '
print model.toarray().tolist()
Example #14
0
def estimate(img):
    even = img[0:-2:2,:]
    odd = img[1:-1:2,:]
    algorithm = CrossCorr()
    model = algorithm.fit(even, odd)
    return model.toarray()[0][1]