Пример #1
0
def mapping_automatic(
        file_tetra,
        tf1_matrix,
        show=0,
        f=None,
        bg=None,
        tol=1):  #'E:\CMJ trace analysis\\autopick\\tetraspeck.tif'
    #print(tf1_matrix)
    root, name = os.path.split(file_tetra)
    save_fn = os.path.join(root, name[:-4] + '-P.map')

    if os.path.isfile(save_fn):
        #print('loading automatic mapping')
        P = np.zeros((4, 4))
        Q = np.zeros((4, 4))

        tm = np.zeros((3, 3))

        with open(save_fn, 'r') as infile:
            for ii in range(0, 16):
                P[ii // 4, ii % 4] = float(infile.readline())
            for ii in range(0, 16):
                Q[ii // 4, ii % 4] = float(infile.readline())

        tm[0, 2] = P[0, 0]
        tm[0, 1] = P[0, 1]
        tm[0, 0] = P[1, 0]
        tm[1, 2] = Q[0, 0]
        tm[1, 1] = Q[0, 1]
        tm[1, 0] = Q[1, 0]
        tm[2, 2] = 1
        transformation_matrixC = np.linalg.inv(tm)

    else:
        #print('çalculating automatic map')
        # Open image
        if type(file_tetra) == str:
            image_tetra = tiff.imread(file_tetra)
        else:  #assume you are passing an image
            image_tetra = file_tetra

        # default for 16 bits 50000, for 8 bits 200 (=256*50000/64000)
        if f == None:
            if np.max(image_tetra) > 256:
                f = 50000
            else:
                f = 200

        if bg == None:
            # take two different backgrounds, one for donor, one for acceptor channel
            sh = np.shape(image_tetra)
            thr_donor = get_threshold(image_tetra[:, 1:sh[0] // 2])
            thr_acceptor = get_threshold(image_tetra[:, sh[0] // 2:])
            bg = np.zeros(sh)
            bg[:, 1:sh[0] // 2] = thr_donor
            bg[:, sh[0] // 2:] = thr_acceptor
        image_tetra = remove_background(image_tetra.astype(float), bg)
        #    image_tetra=image_tetra.astype(float)-bg
        #    image_tetra[image_tetra<0]=0
        image_tetra = image_tetra.astype(np.uint16)

        position1 = []
        position2 = []
        # left, right, enhanced left and enhanced right image for keypoint detection, adapt f
        while np.shape(position1)[0] < 50 or np.shape(position2)[0] < 50:
            # while loop to lower f and increase the number of spots found
            l, r, l_enh, r_enh = enhance_blobies(image_tetra, f, tol)

            gray1 = l_enh
            gray2 = r_enh

            # initialize the AKAZE descriptor, then detect keypoints and extract
            # local invariant descriptors from the image
            detector = cv2.AKAZE_create()
            (kps1, descs1) = detector.detectAndCompute(gray1, None)
            (kps2, descs2) = detector.detectAndCompute(gray2, None)
            position1 = cv2.KeyPoint_convert(kps1)
            position2 = cv2.KeyPoint_convert(kps2)
            f = f * 0.9

        gray1 = l_enh
        gray2 = r_enh

        # initialize the AKAZE descriptor, then detect keypoints and extract
        # local invariant descriptors from the image
        detector = cv2.AKAZE_create()
        (kps1, descs1) = detector.detectAndCompute(gray1, None)
        (kps2, descs2) = detector.detectAndCompute(gray2, None)
        position1 = cv2.KeyPoint_convert(kps1)
        position2 = cv2.KeyPoint_convert(kps2)

        if 0:  #automatic mapping based on matching features
            print("keypoints: {}, descriptors: {}".format(
                len(kps1), descs1.shape))
            print("keypoints: {}, descriptors: {}".format(
                len(kps2), descs2.shape))

            # Match the features
            #this part is working properly according to the overlayed images
            bf = cv2.BFMatcher(cv2.NORM_HAMMING)
            matches = bf.knnMatch(descs1, descs2, k=2)  # typo fixed
            # Apply ratio test
            pts1, pts2 = [], []
            size_im = np.shape(gray1)
            heigh, widt = np.shape(gray1)

            for m in matches:  # BF Matcher already found the matched pairs, no need to double check them
                pts1.append(kps1[m[0].queryIdx].pt)
                pts2.append(kps2[m[0].trainIdx].pt)

            pts1 = np.array(pts1).astype(np.float32)
            pts2 = np.array(pts2).astype(np.float32)

            transformation_matrixC, mask = cv2.findHomography(
                pts2, pts1, cv2.RANSAC, 20)

            A = pts1[0:len(matches):int(len(matches) / 15)]
            im3 = cv2.drawMatchesKnn(gray1,
                                     kps1,
                                     gray2,
                                     kps2,
                                     matches[1:100],
                                     None,
                                     flags=2)
            if show:
                plt.figure(1)
                plt.imshow(im3)
                plt.title('mapping matching keypoints')
                plt.show

        else:
            # find matching points (no other features) based on the manual mapping
            dst = cv2.perspectiveTransform(
                position1.reshape(-1, 1, 2),
                np.linalg.inv(tf1_matrix))  #reshape needed for persp.transform
            dst = dst.reshape(-1, 2)

            dist = np.zeros((len(position2), len(dst)))
            for ii in range(0, len(position2)):
                for jj in range(0, len(dst)):
                    dist[ii, jj] = np.sqrt((position2[ii, 0] - dst[jj, 0])**2 +
                                           (position2[ii, 1] - dst[jj, 1])**2)
            pts1, pts2 = [], []
            for ii in range(0, len(position2)):
                jj = np.where(dist[ii, :] == min(dist[ii, :]))
                if dist[ii, jj] < 4:
                    pts1.append(position1[jj])
                    pts2.append(position2[ii])
            pts1 = np.array(pts1).astype(np.float32)
            pts2 = np.array(pts2).astype(np.float32)

            transformation_matrixC, mask = cv2.findHomography(
                pts2, pts1, cv2.RANSAC, 20)

        # produce an image in which the overlay between two channels is shown
        array_size = np.shape(gray2)
        imC = cv2.warpPerspective(gray2, transformation_matrixC,
                                  array_size[::-1])

        #cv2.imshow("transformed ", im4)
        if show:
            plt.figure(11, figsize=(18, 9))
            plt.subplot(1, 6, 6),
            AA = (gray1 > 0) + 2 * (imC > 0)
            plt.imshow((gray1 > 0) + 2 * (imC > 0),
                       extent=[0, array_size[1], 0, array_size[0]],
                       aspect=1)
            #plt.colorbar()
            plt.title('automatic align \n#spots overlap {:d}'.format(
                np.sum(AA == 3)))
            plt.show()
            plt.pause(0.05)

        #saving to .map file
        P = np.zeros((4, 4))
        tm = np.linalg.inv(transformation_matrixC)
        P[0, 0] = tm[0, 2]
        P[0, 1] = tm[0, 1]
        P[1, 0] = tm[0, 0]
        Q = np.zeros((4, 4))
        Q[0, 0] = tm[1, 2]
        Q[0, 1] = tm[1, 1]
        Q[1, 0] = tm[1, 0]

        with open(save_fn, 'w') as outfile:
            for ii in range(P.size):
                outfile.write('{0:4.10e}\n'.format(np.hstack(P)[ii]))
            for ii in range(Q.size):
                outfile.write('{0:4.10e}\n'.format(np.hstack(Q)[ii]))
    #print('done with automatic align')

    return transformation_matrixC
Пример #2
0
    def set_background_and_transformation(self):
        """
        sum 20 image, find spots& background. Then loop over all image, do background subtract+ extract traces
        :return:
        """
        _, hdim, vdim, n_images = read_one_page(self.image_fn, pageNb=0)
        im_array = np.dstack([(read_one_page(self.image_fn,
                                             pageNb=ii)[0]).astype(float)
                              for ii in range(20)])
        im_mean20 = np.mean(im_array, axis=2).astype(int)
        bg = rollingball(im_mean20)[1]
        im_mean20_correct = im_mean20 - bg
        im_mean20_correct[im_mean20_correct < 0] = 0
        threshold = get_threshold(im_mean20_correct)
        im_mean20_correct = remove_background(im_mean20_correct, threshold)

        #note: optionally a fixed threshold can be set, like with IDL
        # note 2: do we need a different threshold for donor and acceptor?

        root, name = os.path.split(self.image_fn)
        pks_fn = os.path.join(root, name[:-4] + '-P.pks')
        if os.path.isfile(pks_fn):
            ptsG = []
            dstG = []
            with open(pks_fn, 'r') as infile:
                for jj in range(0, 10000):
                    A = infile.readline()
                    if A == '':
                        break
                    ptsG.append([float(A.split()[1]), float(A.split()[2])])
                    A = infile.readline()
                    dstG.append([float(A.split()[1]), float(A.split()[2])])
            ptsG = np.array(ptsG)
            dstG = np.array(dstG)
            pts_number = len(ptsG)
        # load
        else:
            pts_number, label_size, ptsG = analyze_label.analyze(
                im_mean20_correct[:, 0:int(vdim / 2)])
            # there should be different options:
            #        donor: im_mean20_correct[:,0:vdim//2]
            #        acceptor: im_mean20_correct[:,vdim//2:]
            #        donor+acceptor
            dstG = cv2.perspectiveTransform(
                ptsG.reshape(-1, 1, 2),
                np.linalg.inv(self.mapping._tf2_matrix))  #transform_matrix))
            dstG = dstG.reshape(-1, 2)
            dstG = np.array([[ii[0] + 256, ii[1]] for ii in dstG])

            #saving to pks file
            with open(pks_fn, 'w') as outfile:
                for jj in range(0, pts_number):
                    pix0 = ptsG[jj][0]
                    pix1 = ptsG[jj][1]
                    outfile.write(
                        ' {0:4.0f} {1:4.4f} {2:4.4f} {3:4.4f} {4:4.4f} \n'.
                        format((jj * 2) + 1,
                               pix0,
                               pix1,
                               0,
                               0,
                               width4=4,
                               width6=6))
                    pix0 = dstG[jj][0]
                    pix1 = dstG[jj][1]
                    outfile.write(
                        ' {0:4.0f} {1:4.4f} {2:4.4f} {3:4.4f} {4:4.4f} \n'.
                        format((jj * 2) + 2,
                               pix0,
                               pix1,
                               0,
                               0,
                               width4=4,
                               width6=6))

#        ALL_Gaussians_ptsG=np.zeros((11,11,pts_number))
#        ALL_Gaussians_dstG=np.zeros((11,11,pts_number))
        ALL_GAUSS = makeGaussian(11, fwhm=3, center=(5, 5))
        #        for jj in range(0,pts_number):
        #            xpix = ptsG[jj][1]
        #            ypix = ptsG[jj][0]
        #
        #            xpix_int = int(xpix)
        #            ypix_int = int(ypix)
        #            ALL_Gaussians_ptsG[:,:,jj]=makeGaussian(11, fwhm=3, center=(ypix - ypix_int + 5, xpix - xpix_int + 5))
        #
        #            xf2 = dstG[jj][1]  # approach3
        #            yf2 = dstG[jj][0]  # approach3
        #            xf2_int = int(xf2)  # approach3
        #            yf2_int = int(yf2)  # approach3
        #            ALL_Gaussians_dstG[:,:,jj]= makeGaussian(11, fwhm=3, center=(yf2 - yf2_int + 5, xf2 - xf2_int + 5))  # approach3
        #
        return bg, threshold, pts_number, dstG, ptsG, im_mean20_correct, n_images, hdim, ALL_GAUSS
Пример #3
0
def mapping_manual(file_tetra,
                   show=0,
                   f=None,
                   bg=None,
                   tol=1):  #'E:\CMJ trace analysis\\autopick\\tetraspeck.tif'

    root, name = os.path.split(file_tetra)
    save_fn = os.path.join(root, name[:-4] + '-P.coeff')
    transformation_matrixC = np.zeros((3, 3))
    transformation_matrixC[2, 2] = 1
    if os.path.isfile(save_fn):
        #print('loading manual transformation coeff')
        #import from .coeff file:
        with open(save_fn, 'r') as infile:
            transformation_matrixC[0, 2] = float(infile.readline()) - 256
            transformation_matrixC[0, 0] = float(infile.readline())
            transformation_matrixC[0, 1] = float(infile.readline())
            transformation_matrixC[1, 2] = float(infile.readline())
            transformation_matrixC[1, 0] = float(infile.readline())
            transformation_matrixC[1, 1] = float(infile.readline())
        return transformation_matrixC
    else:
        global points_right  # globals in nested function, need to be defined here as well
        global ii
        # Open image
        if type(file_tetra) == str:
            image_tetra = tiff.imread(file_tetra)
        else:  #assume you are passing an image
            image_tetra = file_tetra

        # default for 16 bits 50000, for 8 bits 200 (=256*50000/64000)
        if f == None:
            f = 50000
        #print(f)

        if type(bg) == type(None):
            # take two different backgrounds, one for donor, one for acceptor channel
            sh = np.shape(image_tetra)
            thr_donor = get_threshold(image_tetra[:, 1:sh[0] // 2])
            thr_acceptor = get_threshold(image_tetra[:, sh[0] // 2:])
            bg = np.zeros(sh)
            bg[:, 1:sh[0] // 2] = thr_donor
            bg[:, sh[0] // 2:] = thr_acceptor
        image_tetra = remove_background(image_tetra.astype(float), bg)
        #    image_tetra=image_tetra.astype(float)-bg
        #    image_tetra[image_tetra<0]=0
        image_tetra = image_tetra.astype(np.uint16)
        position1 = []
        position2 = []
        # left, right, enhanced left and enhanced right image for keypoint detection, adapt f
        while np.shape(position1)[0] < 50 or np.shape(position2)[0] < 50:
            # while loop to lower f and increase the number of spots found
            l, r, l_enh, r_enh = enhance_blobies(image_tetra, f, tol)

            gray1 = l_enh
            gray2 = r_enh

            # initialize the AKAZE descriptor, then detect keypoints and extract
            # local invariant descriptors from the image
            detector = cv2.AKAZE_create()
            (kps1, descs1) = detector.detectAndCompute(gray1, None)
            (kps2, descs2) = detector.detectAndCompute(gray2, None)
            position1 = cv2.KeyPoint_convert(kps1)
            position2 = cv2.KeyPoint_convert(kps2)
            f = f * 0.9

    # now we start to move the markers in the right part, first an overall move
        from pynput.keyboard import Key, Listener
        import matplotlib.pyplot as plt
        import time

        #    points_left=[[ii, ii+10] for ii in range(11)]
        #    points_right=points_left.copy()

        fig = plt.figure(10, figsize=(18, 9))
        ax1 = fig.add_subplot(1, 2, 1)
        ax1.imshow(gray1)
        ax1.set_title('click on bright spots in the left image')
        ax2 = fig.add_subplot(1, 2, 2, sharex=ax1, sharey=ax1)
        ax2.imshow(gray2)

        points_left = plt.ginput(
            4
        )  # adapted it to be 4, I expected 3 to work, to match with IDL code
        points_right = points_left.copy()
        [
            ax1.plot(xx,
                     yy,
                     markersize=10,
                     c='w',
                     marker='o',
                     fillstyle='none') for xx, yy in points_left
        ]

        xp = [xx for xx, yy in points_right]
        yp = [yy for xx, yy in points_right]
        line2, = ax2.plot(xp,
                          yp,
                          markersize=10,
                          c='w',
                          marker='o',
                          fillstyle='none',
                          linestyle='none')
        fig.canvas.draw()
        fig.canvas.flush_events()
        plt.pause(1)

        ax1.set_title('')
        ax2.set_title(
            'move point with arrows, press esc when it matches the location in other channel'
        )

        def on_release(key):
            global points_right
            global ii
            #print(points_right)
            if key == Key.up:
                points_right[ii] = [
                    points_right[ii][0], points_right[ii][1] + 1
                ]
            # points_right= [ [xx,yy+1] for xx,yy in points_right]
            elif key == Key.down:
                points_right[ii] = [
                    points_right[ii][0], points_right[ii][1] - 1
                ]
            # points_right= [ [xx,yy-1] for xx,yy in points_right]
            elif key == Key.right:
                points_right[ii] = [
                    points_right[ii][0] + 1, points_right[ii][1]
                ]
            # points_right= [ [xx+1,yy] for xx,yy in points_right]
            elif key == Key.left:
                points_right[ii] = [
                    points_right[ii][0] - 1, points_right[ii][1]
                ]
            # points_right= [ [xx-1,yy] for xx,yy in points_right]

            if key == Key.esc or (key == Key.up) or (key == Key.down) or (
                    key == Key.right) or (key == Key.left):
                # Stop listener
                return False

        matches = len(points_left)
        for ii in range(matches):
            xp_new = 0 * xp
            yp_new = 0 * yp
            ax1.set_xlim(points_left[ii][0] - 50, points_left[ii][0] + 50)
            ax1.set_ylim(points_left[ii][1] - 50, points_left[ii][1] + 50)
            line1 = ax1.plot(points_left[ii][0],
                             points_left[ii][1],
                             markersize=10,
                             c='y',
                             marker='+',
                             fillstyle='none')
            line2b = ax2.plot(points_right[ii][0],
                              points_right[ii][1],
                              markersize=10,
                              c='y',
                              marker='+',
                              fillstyle='none')
            fig.canvas.draw()
            fig.canvas.flush_events()
            plt.pause(0.1)
            while xp != xp_new or yp != yp_new:
                xp = [xx for xx, yy in points_right]
                yp = [yy for xx, yy in points_right]
                line2.set_xdata(xp)
                line2.set_ydata(yp)
                fig.canvas.draw()
                fig.canvas.flush_events()
                plt.pause(0.1)
                # Collect events until released
                with Listener(on_release=on_release) as listener:
                    listener.join()

                xp_new = [xx for xx, yy in points_right]
                yp_new = [yy for xx, yy in points_right]
    #        print(np.int(xp_new[ii]),np.int(yp_new[ii]))

            line1[-1].remove()
            line2b[-1].remove()

            fig.canvas.draw()
            fig.canvas.flush_events()
            plt.pause(0.1)
        # now loop over each spot to update its position in a zoomed in window

        # get the points_right to match syntax pts1, pts2
        points_right = np.array(points_right)
        points_left = np.array(points_left)
        transformation_matrixC, mask = cv2.findHomography(
            points_right, points_left, cv2.RANSAC, 20)

        # produce an image in which the overlay between two channels is shown
        array_size = np.shape(gray2)
        imC = cv2.warpPerspective(gray2, transformation_matrixC,
                                  array_size[::-1])

        #cv2.imshow("transformed ", im4)
        if show:
            plt.figure(11, figsize=(18, 9))
            plt.subplot(1, 1, 1)
            plt.subplot(1, 6, 1),
            plt.imshow(gray1,
                       extent=[0, array_size[1], 0, array_size[0]],
                       aspect=1)
            plt.title('green channel')

            plt.subplot(1, 6, 2),
            plt.imshow(gray2,
                       extent=[0, array_size[1], 0, array_size[0]],
                       aspect=1)
            plt.title('red channel')

            plt.subplot(1, 6, 3),
            plt.imshow(imC,
                       extent=[0, array_size[1], 0, array_size[0]],
                       aspect=1)
            plt.title('red transformed')
            plt.show()

            plt.subplot(1, 6, 4),
            A = (gray1 > 0) + 2 * (gray2 > 0)
            plt.imshow(A,
                       extent=[0, array_size[1], 0, array_size[0]],
                       aspect=1)
            # plt.colorbar()
            plt.title('unaligned #(yellow) \nspots overlap {:d}'.format(
                np.sum(A == 3)))

            plt.subplot(1, 6, 5),
            AA = (gray1 > 0) + 2 * (imC > 0)
            plt.imshow((gray1 > 0) + 2 * (imC > 0),
                       extent=[0, array_size[1], 0, array_size[0]],
                       aspect=1)
            #plt.colorbar()
            plt.title('manual align \n#spots overlap y{:d}'.format(
                np.sum(AA == 3)))
            plt.show()
            plt.pause(0.05)

        plt.figure(10), plt.close()

        #saving to .coeff file:
        with open(save_fn, 'w') as outfile:
            outfile.write('{0:4.10e}\n'.format(transformation_matrixC[0, 2] +
                                               256))
            outfile.write('{0:4.10e}\n'.format(transformation_matrixC[0, 0]))
            outfile.write('{0:4.10e}\n'.format(transformation_matrixC[0, 1]))
            outfile.write('{0:4.10e}\n'.format(transformation_matrixC[1, 2]))
            outfile.write('{0:4.10e}\n'.format(transformation_matrixC[1, 0]))
            outfile.write('{0:4.10e}\n'.format(transformation_matrixC[1, 1]))

#    print(transformation_matrixC)
#    print('done with manual align')
    return transformation_matrixC