示例#1
0
def find_inliers(src, dst):
    # affine transform
    model = AffineTransform()
    model.estimate(src, dst)
    # robustly estimate affine transform model with RANSAC
    model_robust, inliers = ransac((src, dst), AffineTransform, min_samples=3,residual_threshold=2, max_trials=100)
    return inliers
示例#2
0
def Testing(GCPs, img, outpImg):
    from skimage.transform import AffineTransform, warp
    from skimage.measure import ransac
    from affine import Affine

    img_info = rt.GetRasterInfo(inputRaster=img)
    img_array = np.array(img_info["raster"].GetRasterBand(1).ReadAsArray())

    map = GCPs[:, 0:2]
    pixel = GCPs[:, 2:4]

    model = AffineTransform()
    model.estimate(src=pixel, dst=map)
    print(model.params)

    model_robust, inliers = ransac((pixel, map),
                                   AffineTransform,
                                   min_samples=3,
                                   residual_threshold=1,
                                   max_trials=1000)
    print(model_robust.params)

    geoTransform = Affine.from_gdal(*img_info["raster"].GetGeoTransform())
    # print(geoTransform)
    geoTransform = Affine.from_gdal(model_robust.params[0, 2],
                                    model_robust.params[0, 0],
                                    model_robust.params[0, 1],
                                    model_robust.params[1, 2],
                                    model_robust.params[1, 0],
                                    model_robust.params[1, 1])
    print(geoTransform)

    point_A = []
    point_B = []
    for i, val in enumerate(GCPs):
        point_A.append(tuple([val[0], val[1]]))
        point_B.append(tuple([val[2], val[3]]))
    point_A = tuple(point_A)
    point_B = tuple(point_B)
    trn = Affine_Fit(from_pts=point_B, to_pts=point_A)
    res, tr = trn.To_Str()

    geoTransform_ = Affine.from_gdal(tr[2][3], tr[0][3], tr[1][3], tr[2][4],
                                     tr[0][4], tr[1][4])
    print(geoTransform_)

    # geoTransform = Affine.from_gdal((1,2,3,5))

    nrows, ncols = np.shape(img_array)
    ras_name = gdal.GetDriverByName("GTiff").Create(outpImg, ncols, nrows, 1,
                                                    gdal.GDT_Float64)
    ras_name.SetGeoTransform(geoTransform_.to_gdal())
    # important part ends
    wkt = img_info["raster"].GetProjection()
    ras_name.SetProjection(wkt)
    ras_name.GetRasterBand(1).WriteArray(img_array,
                                         resample_alg=gdal.GRA_Lanczos)

    resampling_method = gdal.GRA_Lanczos
    ras_name = None
def test_affine_estimation():
    # exact solution
    tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
    assert_array_almost_equal(tform(SRC[:3, :]), DST[:3, :])

    # over-determined
    tform2 = estimate_transform('affine', SRC, DST)
    assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)

    # via estimate method
    tform3 = AffineTransform()
    tform3.estimate(SRC, DST)
    assert_array_almost_equal(tform3._matrix, tform2._matrix)
def Ransac(observation):
    from skimage import transform
    from skimage.transform import AffineTransform
    from skimage.measure import ransac

    ## Estimate affin transformation without RANSAC
    model = AffineTransform()
    model.estimate(src=observation[:, 2:4], dst=observation[:, 0:2])
    # print("model.params=\n",model.params)
    nbPts = np.shape(observation)[0]
    B = []
    for i in range(nbPts):
        B.append(observation[i, 0])
        B.append(observation[i, 1])
    B = np.asarray(B)
    A = np.zeros((2 * nbPts, 6))
    A[::2, 0] = observation[:, 2]
    A[::2, 1] = observation[:, 3]
    A[::2, 2] = np.ones(nbPts)
    A[1::2, 3] = observation[:, 2]
    A[1::2, 4] = observation[:, 3]
    A[1::2, 5] = np.ones(nbPts)
    P_est_ = model.params.flatten()[:-3]
    np.reshape(P_est_, (len(P_est_), 1))
    absRsid_ = np.abs(np.dot(A, P_est_) - B)
    print("UsingSKimage:")
    print("Estimatedparam=", P_est_)
    print(("max=%.4f,mean=%.4f,std=%.4f,rmse=%.4f") %
          (np.max(absRsid_), np.mean(absRsid_), np.std(absRsid_),
           np.mean(absRsid_**2)))

    print("Using Ransac:")
    model_robust, inliers = ransac((observation[:, 2:4], observation[:, 0:2]),
                                   AffineTransform,
                                   min_samples=3,
                                   residual_threshold=10,
                                   max_trials=1000)
    P_est_R = model_robust.params.flatten()[:-3]
    np.reshape(P_est_R, (len(P_est_R), 1))
    absRsid_R = np.abs(np.dot(A, P_est_R) - B)
    print("Estimated Param:", P_est_R)
    inliers = np.asarray(inliers * 1)
    inliers_ = inliers[inliers.nonzero()]
    print("Number of ouliers=", len(inliers) - len(inliers_))
    print(("max=%.4f,mean=%.4f,std=%.4f,rmse=%.4f") %
          (np.max(absRsid_R), np.mean(absRsid_R), np.std(absRsid_R),
           np.mean(absRsid_R**2)))

    return P_est_R
示例#5
0
def test_estimate_affine_3d():
    ndim = 3
    src = np.random.random((25, ndim)) * 2**np.arange(7, 7 + ndim)
    matrix = np.array([[4.8, 0.1, 0.2, 25], [0.0, 1.0, 0.1, 30],
                       [0.0, 0.0, 1.0, -2], [0.0, 0.0, 0.0, 1.]])
    tf = AffineTransform(matrix=matrix)
    dst = tf(src)
    dst_noisy = dst + np.random.random((25, ndim))
    tf2 = AffineTransform(dimensionality=ndim)
    tf2.estimate(src, dst_noisy)
    # we check rot/scale/etc more tightly than translation because translation
    # estimation is on the 1 pixel scale
    assert_almost_equal(tf2.params[:, :-1], matrix[:, :-1], decimal=2)
    assert_almost_equal(tf2.params[:, -1], matrix[:, -1], decimal=0)
    _assert_least_squares(tf2, src, dst_noisy)
示例#6
0
def face_warp_coord(src_face,
                    src_face_lm,
                    dst_face_lm,
                    tri,
                    bg,
                    warp_only=False,
                    use_bg=True):
    """
    Function takes two faces and landmarks and warp one face around another
    according to the face landmarks.

    script modified from
    https://github.com/marsbroshok/face-replace/blob/master/faceWarp.py


    :param src_face: grayscale (?) image (np.array of int) of face
        which will warped around second face
    :param src_face_lm: landmarks for the src_face
    :param dst_face: predicted image landmarks (np.array of int) which will
        be replaced by src_face.
    :param bg: image background
    :return: image with warped face
    """
    src_face_coord = src_face_lm
    dst_face_coord = dst_face_lm

    affines = []
    # find affine mapping from source positions to destination
    for k in tri:
        affine = AffineTransform()
        affine.estimate(src_face_coord[k, :], dst_face_coord[k, :])
        affines.append(affine)

    inverse_affines = []
    # find the inverse affine mapping
    for k in tri:
        affine = AffineTransform()
        affine.estimate(dst_face_coord[k, :], src_face_coord[k, :])
        inverse_affines.append(affine)

    coords = warp_coords(coord_map, src_face.shape)
    warped_face = map_coordinates(src_face, coords)
    if not warp_only:
        if use_bg:
            warped_face = _merge_images(warped_face, bg)
        else:
            warped_face = _merge_images(warped_face, src_face)
    return warped_face
示例#7
0
def AffineTransform_based(im1,
                          im2,
                          detector='SIFT',
                          max_features=5000,
                          feature_retention=0.15,
                          MIN_MATCH_COUNT=10):

    points1, points2 = featureAlign(im1, im2, detector, max_features,
                                    feature_retention, MIN_MATCH_COUNT)

    # estimate affine transform model using all coordinates
    model = AffineTransform()
    model.estimate(points1, points2)

    return model
    """ Third approach """
示例#8
0
def test_estimate_affine_3d(array_like_input):
    ndim = 3
    src = np.random.random((25, ndim)) * 2 ** np.arange(7, 7 + ndim)
    matrix = np.array([
        [4.8, 0.1, 0.2, 25],
        [0.0, 1.0, 0.1, 30],
        [0.0, 0.0, 1.0, -2],
        [0.0, 0.0, 0.0, 1.]
    ])

    if array_like_input:
        # list of lists for matrix and src coords
        src = [list(c) for c in src]
        matrix = [list(c) for c in matrix]

    tf = AffineTransform(matrix=matrix)
    dst = tf(src)
    dst_noisy = dst + np.random.random((25, ndim))
    if array_like_input:
        # list of lists for destination coords
        dst = [list(c) for c in dst]
    tf2 = AffineTransform(dimensionality=ndim)
    assert tf2.estimate(src, dst_noisy)
    # we check rot/scale/etc more tightly than translation because translation
    # estimation is on the 1 pixel scale
    matrix = np.asarray(matrix)
    assert_almost_equal(tf2.params[:, :-1], matrix[:, :-1], decimal=2)
    assert_almost_equal(tf2.params[:, -1], matrix[:, -1], decimal=0)
    _assert_least_squares(tf2, src, dst_noisy)
示例#9
0
def estimateStabTform(imgA, imgB):
    """
    @param arg1: input numpy image array at t frame in video sequence. 
    @param arg2: input numpy image array at t+1 frame in video sequence.
    
    @return: 
        Tranform: stabilized genrelized affine transform of form
          |a_0  a_2  t_x|
          |a_1  a_3  t_y|
          | 0    0    1 |
    parameter a_* define scale, rotation and shear and t_* represents translations.
        PointsLeft: Corner points in arg1. For visualization.
        PointsRight: Corner points in arg2. For visualization.
        inliers: clean correspondence from PointsLeft to PointsRight
        outliers: not clean correspondence from PointsLeft and PointsRight
    """

    PointsA, image_corner_ptsA = detectFASTFeatures(imgA, threshold=20)
    PointsB, image_corner_ptsB = detectFASTFeatures(imgB, threshold=20)
    freakExtractor = cv2.xfeatures2d.FREAK_create()
    pointsA,descriptorsA= freakExtractor.compute(imgA, PointsA)
    pointsB,descriptorsB= freakExtractor.compute(imgB, PointsB)

    # create BFMatcher object
    bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
    # Match descriptors.
    matches = bf.match(descriptorsA,descriptorsB)

    A, B= [], []
    matches = sorted(matches, key = lambda x:x.distance)
    sel_matches = matches
    for m in sel_matches:
        A.append(list(pointsA[m.queryIdx].pt))
        B.append(list(pointsB[m.trainIdx].pt))
    Points_Left = np.array(A)
    Points_Right = np.array(B)

    model = AffineTransform()
    model.estimate(Points_Left, Points_Right)

    model_robust, inliers = ransac((Points_Right, Points_Left), AffineTransform, min_samples=3,
                                residual_threshold=2, max_trials=100)

    outliers = inliers == False

    return model_robust, Points_Left, Points_Right, inliers, outliers
示例#10
0
def estimate(self, src, dst):
    """Extended original function from: https://github.com/scikit-image/scikit-image/blob/master/skimage/transform/_geometric.py#L439
	Set the control points with which to perform the piecewise mapping.
	Return triangles generated from source and destination coordinates.
	Number of source and destination coordinates must match.
	Parameters
	----------
	src : (N, 2) array
		Source coordinates.
	dst : (N, 2) array
		Destination coordinates.
	Returns
	-------
	src_triangles : list
		List of triangles produced by Delaunay Triangulation on the source coordinates
	dst_triangles : list
		List of triangles produced by Delaunay Triangulation on the destination coordinates
	"""

    # forward piecewise affine
    # triangulate input positions into mesh
    self._tesselation = spatial.Delaunay(src)
    # find affine mapping from source positions to destination
    self.affines = []
    src_triangles = []
    dst_triangles = []

    for tri in self._tesselation.vertices:
        affine = AffineTransform()
        affine.estimate(src[tri, :], dst[tri, :])
        self.affines.append(affine)
        src_triangles.append(src[tri, :])
        dst_triangles.append(dst[tri, :])
    # inverse piecewise affine
    # triangulate input positions into mesh
    self._inverse_tesselation = spatial.Delaunay(dst)
    # find affine mapping from source positions to destination
    self.inverse_affines = []
    for tri in self._inverse_tesselation.vertices:
        affine = AffineTransform()
        affine.estimate(dst[tri, :], src[tri, :])
        self.inverse_affines.append(affine)

    return src_triangles, dst_triangles
示例#11
0
    def estimate(self, src, dst, delaunay):
        """Estimate the transformation from a set of corresponding points.
        Number of source and destination coordinates must match.
        Parameters
        ----------
        src : (N, 2) array
            Source coordinates.
        dst : (N, 2) array
            Destination coordinates.
        delaunay : (*, 3) array.
            The given triangulation
        Returns
        -------
        success : bool
            True, if model estimation succeeds.
        """

        # forward piecewise affine for a given triangulation
        self._tesselation = delaunay

        # import pdb; pdb.set_trace()
        # find affine mapping from source positions to destination
        self.affines = []
        for tri in self._tesselation:
            print(tri)
            print(src[tri], dst[tri])
            affine = AffineTransform()
            affine.estimate(src[tri], dst[tri])
            self.affines.append(affine)
        # import pdb; pdb.set_trace()
        # inverse piecewise affine
        # keep the same trianglulation
        # find affine mapping from source positions to destination
        self._inverse_tesselation = delaunay
        self.inverse_affines = []
        for tri in self._inverse_tesselation:
            affine = AffineTransform()
            affine.estimate(dst[tri], src[tri])
            self.inverse_affines.append(affine)

        return True
示例#12
0
def transformation_from_points(points1, points2):
    """
    https://scikit-image.org/docs/0.14.x/auto_examples/transform/plot_matching.html
    """

    points1, points2 = whiten(points1, points2)

    model = AffineTransform()
    model.estimate(points1, points2)
    model_robust, inliers = ransac((points1, points2),
                                   AffineTransform,
                                   min_samples=2,
                                   residual_threshold=20,
                                   max_trials=100)

    scale = np.round(model_robust.scale, 2)
    translation = np.round(model_robust.translation, 2)
    rotation = np.round(model_robust.rotation, 2)
    outliers = inliers == False

    return scale, translation, rotation, inliers, outliers
示例#13
0
    def estimate(self, src, dst):
        """Estimate the transformation from a set of corresponding points.

        Number of source and destination coordinates must match.

        Parameters
        ----------
        src : (N, 2) array
            Source coordinates.
        dst : (N, 2) array
            Destination coordinates.

        Returns
        -------
        success : bool
            True, if model estimation succeeds.

        """

        # forward piecewise affine
        # triangulate input positions into mesh
        self._tesselation = Delaunay(src)
        # find affine mapping from source positions to destination
        self.affines = []
        for tri in self._tesselation.vertices:
            affine = AffineTransform()
            affine.estimate(src[tri, :], dst[tri, :])
            self.affines.append(affine)

        # inverse piecewise affine
        # triangulate input positions into mesh
        self._inverse_tesselation = Delaunay(dst)
        # find affine mapping from source positions to destination
        self.inverse_affines = []
        for tri in self._inverse_tesselation.vertices:
            affine = AffineTransform()
            affine.estimate(dst[tri, :], src[tri, :])
            self.inverse_affines.append(affine)

        return True
def affine_transform(image, output_shape):

    rows, cols = output_shape[0], output_shape[1]
    orig_rows, orig_cols = image.shape[0], image.shape[1]

    row_scale = float(orig_rows) / rows
    col_scale = float(orig_cols) / cols
    if rows == 1 and cols == 1:
        tform = AffineTransform(translation=(orig_cols / 2.0 - 0.5,
                                             orig_rows / 2.0 - 0.5))
    else:
        # 3 control points necessary to estimate exact AffineTransform
        src_corners = np.array([[1, 1], [1, rows], [cols, rows]]) - 1
        dst_corners = np.zeros(src_corners.shape, dtype=np.double)
        # take into account that 0th pixel is at position (0.5, 0.5)
        dst_corners[:, 0] = col_scale * (src_corners[:, 0] + 0.5) - 0.5
        dst_corners[:, 1] = row_scale * (src_corners[:, 1] + 0.5) - 0.5

        tform = AffineTransform()
        tform.estimate(src_corners, dst_corners)

    return tform
示例#15
0
def frametracker_keypoints(
    img1,
    img2,
    nk=50,
    fn=9,
    ft=0.001,
    hk=0.1,
    min_samples=10,
    xchange=300,
    ychange=30,
    debug=False,
):
    """
    Determine overall frame shift using ORB detection
    and keypoint matching

    Parameters
    ------
    img1 : ndarray (2D)
        The original image to analyse
    img2 : ndarray (2D)
        The new image to analyse
    nk : int, float
        The number of keypoints to use (default : 50)
    fn : int, optional
         fast_n from skimage.feature.ORB
         Minimum number of consecutive pixels out of 16 pixels on the circle
         that should all be either brighter or darker (default : 9)
    ft : float, optional
        fast_threshold from skimage.feature.ORB
        Threshold used to decide whether the pixels on the circle are
        brighter, darker or similar (default : 0.001)
    hk : float, optional
        harris_k from skimage.feature.ORB
        Sensitivity factor to separate corners from edges (default : 0.1)
    min_samples : int, optional
        min_samples from skimage.measure.ransac
        The minimum number of data points to fit a model to (default : 10)
    xchange : int, optional
        Maximum change along x-axis (default : 300)
    ychange : int, optional
        Maximum change along y-axis (default : 30)
    debug : Boolean
        Whether to add debugging outputs (default : False)

    Returns
    ------
    trans : array [-dy,-dx]
        The frame shift between wellim1 and wellim2
    """
    img1 = skutil.img_as_float(img1)
    img2 = skutil.img_as_float(img2)

    descriptor_extractor = ORB(
        n_keypoints=nk,
        fast_n=fn,
        fast_threshold=ft,
        harris_k=hk,
    )

    # determine keypoints and extract coordinates for first image
    descriptor_extractor.detect_and_extract(img1)
    keypoints1 = descriptor_extractor.keypoints
    descriptors1 = descriptor_extractor.descriptors

    # determine keypoints and extract coordinates for second image
    descriptor_extractor.detect_and_extract(img2)
    keypoints2 = descriptor_extractor.keypoints
    descriptors2 = descriptor_extractor.descriptors

    # determine matching coordinates
    matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)

    # create empty lists
    src = []
    dst = []

    for matches in matches12:
        # find index of the match from original image and image being compared
        a_index = matches[0]
        b_index = matches[1]
        # use the index from above to find the original coordinates from the
        # images
        a1orig = keypoints1[a_index]
        b1orig = keypoints2[b_index]
        # Create a list of the matched coordinates
        a1x = a1orig[1]
        a1y = a1orig[0]
        b1x = b1orig[1]
        b1y = b1orig[0]
        xch = abs(a1x - b1x)
        ych = abs(a1y - b1y)
        # Create a list of the matched coordinates
        if (xch < xchange) & (ych < ychange):
            src.append(a1orig)
            dst.append(b1orig)

    src = np.array(src)
    dst = np.array(dst)

    if debug:
        plt.figure()
        plt.imshow(img1, cmap='gray')
        plt.plot(keypoints1[:, 1], keypoints1[:, 0], '.r')
        plt.savefig("DEBUG_WELLTRACKING_frame_a_plus_keypoints.jpg")
        plt.close()
        plt.figure()
        plt.imshow(img2, cmap='gray')
        plt.plot(keypoints2[:, 1], keypoints2[:, 0], '.r')
        plt.savefig("DEBUG_WELLTRACKING_frame_b_plus_keypoints.jpg")
        plt.close()
        plt.figure()
        ax0 = plt.gca()
        plot_matches(ax0, img1, img2, keypoints1, keypoints2, matches12)
        plt.savefig("DEBUG_WELLTRACKING_matches.jpg")
        plt.close()

    try:
        # estimate affine transform model using all coordinates
        model = AffineTransform()
        model.estimate(src, dst)

        # robustly estimate affine transform model with RANSAC
        model_robust = ransac((dst, src),
                              AffineTransform,
                              min_samples=min_samples,
                              residual_threshold=2,
                              max_trials=100)[0]
        trans = model_robust.translation
        return trans
    except BaseException:
        debugfolder = tempfile.mkdtemp()
        plt.figure()
        plt.imshow(img1, cmap='gray')
        plt.plot(keypoints1[:, 1], keypoints1[:, 0], '.r')
        plt.savefig(os.path.join(debugfolder, "img1_plus_keypoints.jpg"))
        plt.close()
        plt.figure()
        plt.imshow(img2, cmap='gray')
        plt.plot(keypoints2[:, 1], keypoints2[:, 0], '.r')
        plt.savefig(os.path.join(debugfolder, "img2_plus_keypoints.jpg"))
        plt.close()
        plt.figure()
        ax1 = plt.gca()
        plot_matches(ax1, img1, img2, keypoints1, keypoints2, matches12)
        plt.savefig(os.path.join(debugfolder, "matches.jpg"))
        plt.close()
        logger.critical("Failed to estimate affine transform!")
        logger.critical("Debugging images saved to ", debugfolder)
示例#16
0
def plot_annotations(imgid, clustered_annotations):
    try:
        img = mpimg.imread(IMAGE_DIR + imgid + ".JPG")
        pass
    except IOError:
        print "WARNING: couldn't find image '%s'" % imgid
        return False

    # Plot all annotations
    fig = plt.figure()
    ax = plt.gca()
    plt.imshow(img)
    for annotation, cluster_id in clustered_annotations:
        color = COLOR_MAP[cluster_id]
        rect = Rectangle((annotation.left, annotation.top),
                         annotation.width, annotation.height,
                         fill=False, color=color)
        ax.add_patch(rect)
    plt.show()

    # Plot median human and computer annotations
    by_cluster = annotations_by_cluster(clustered_annotations)
    all_medians = { clusterid :
                    (median_annotation(annotations),
                     median_annotation([annotation for annotation in annotations
                                        if annotation[0].is_human]),
                     median_annotation([annotation for annotation in annotations
                                        if not annotation[0].is_human]))
                      for clusterid, annotations in by_cluster.iteritems() }
    plt.figure()
    ax = plt.gca()
    plt.imshow(img)
    for clusterid, medians in all_medians.iteritems():
        color = COLOR_MAP[clusterid]
        for median in medians:
            if median is None: continue
            rect = Rectangle((median.left, median.top),
                             median.width, median.height, fill=False, color=color)
            ax.add_patch(rect)
    plt.show()

    # Affine transform image to consistent shape and plot again.
    from skimage.transform import AffineTransform, warp

    # calculate scale and coreners
    row_scale = float(img.shape[0]) / 400.0
    col_scale = float(img.shape[1]) / 400.0
    src_corners = np.array([[1, 1], [1, 400.0], [400.0, 400.0]]) - 1
    dst_corners = np.zeros(src_corners.shape, dtype=np.double)
    # take into account that 0th pixel is at position (0.5, 0.5)
    dst_corners[:, 0] = col_scale * (src_corners[:, 0] + 0.5) - 0.5
    dst_corners[:, 1] = row_scale * (src_corners[:, 1] + 0.5) - 0.5

    # do the transformation
    tform = AffineTransform()
    tform.estimate(src_corners, dst_corners)
    resized = warp(img, tform, output_shape=[400.0, 400.0], order=1,
                   mode='constant', cval=0)

    # plot the transformed image
    plt.figure()
    ax = plt.gca()
    plt.imshow(resized)
    for clusterid, medians in all_medians.iteritems():
        color = COLOR_MAP[clusterid]

        for median in medians:
            if median is None: continue

            # apply the transformation to each rectangle
            corners = np.array([[median.left, median.top],
                                [median.left + median.width,
                                 median.top + median.height]])
            new_corners = tform.inverse(corners)
            rect = Rectangle(new_corners[0, :],
                             new_corners[1,0] - new_corners[0,0],
                             new_corners[1,1] - new_corners[0,1],
                             fill=False, color=color)
            ax.add_patch(rect)
    plt.show()
    return True
示例#17
0
def _do_detection(config: CommonParameters, ov_parameters: OverviewParameters, det_params: DetectionParameters, img=None):
    logger = logging.getLogger(__name__)
    logger.info('finished overview, detecting wings...')

    with open(ov_parameters.field_def_file, 'r') as fd:
        field_calib = json.load(fd)

    # pixel to world coordinates transformation from 3-point calibration stored in field_calib file
    coords_px = np.array(field_calib['coords_px'], dtype=np.float)
    coords_st = np.array(field_calib['coords_st'], dtype=np.float)
    at = AffineTransform()
    at.estimate(coords_px, coords_st)

    _suffix = TIFF_SUFFIX if ov_parameters.export_as_tiff else ND2_SUFFIX
    remote_path = '/'.join([config.server_path_remote, config.prefix, 'overviews', config.prefix + '_overview' + _suffix])

    progress_indicator = config.progress_indicator
    if progress_indicator is not None:
        progress_indicator.set_status('detecting wings')

    # where to save the segmentation to
    label_export_path = '/'.join([config.server_path_remote, config.prefix, 'overviews', config.prefix + '_segmentation' + TIFF_SUFFIX])

    # do the detection
    with ServerProxy(det_params.detector_adress) as proxy:
        try:
            bboxes = proxy.detect_bbox(remote_path, det_params.object_filter, label_export_path)
        except Exception as e:
            bboxes = None
            traceback.print_exc()

        bboxes = [] if bboxes is None else bboxes
        print(bboxes)

    if det_params.do_manual_annotation:
        if len(bboxes)==0:
            annots = manually_correct_rois(img, [], [1])
        else:
            annots = manually_correct_rois(img, [[x0, y0, x1 - x0, y1 - y0] for (y0, x0, y1, x1) in bboxes],
                                           [1] * len(bboxes))
        annotation_out = os.path.join(config.server_path_local, config.prefix, 'overviews', config.prefix + '_manualrois.json')
        with open(annotation_out, 'w') as fd:
            json.dump([a.to_json() for a in annots], fd)
        bboxes = [a.roi for a in annots]
        logger.debug(bboxes)
        # change to other format
        bboxes = [[y0, x0, y0 + h, x0 + w] for (x0, y0, w, h) in bboxes]
        logger.debug(bboxes)

    # save rois, regardless of wheter we did manual annotation or not
    annotation_out = os.path.join(config.server_path_local, config.prefix, 'overviews', config.prefix + '_autorois.json')
    bboxes_json = [{'y0': int(y0), 'y1': int(y1), 'x0': int(x0), 'x1': int(x1)} for (y0, x0, y1, x1) in bboxes]
    with open(annotation_out, 'w') as fd:
        json.dump(bboxes_json, fd)

    if det_params.plot_detection and img is not None:
        plt.figure()
        plt.imshow(img)

    # extract binning factor again
    # set overview optical configuration
    nis_util.set_optical_configuration(config.path_to_nis, ov_parameters.oc_overview)
    # get resolution, binning and fov
    (xres, yres, siz, mag) = nis_util.get_resolution(config.path_to_nis)
    live_fmt, capture_fmt = nis_util.get_camera_format(config.path_to_nis)
    color = nis_util.is_color_camera(config.path_to_nis)
    # we have to parse capture_fmt differently for color and gray camera
    # TODO: extract to separate function
    if color:
        binning_factor = 1.0 if not '1/3' in capture_fmt else 3.0
    else:
        binning_factor = float(capture_fmt.split()[1].split('x')[0])

    bboxes_scaled = []
    for bbox in bboxes:
        # upsample bounding boxes if necessary
        bbox_scaled = np.array(tuple(bbox)) * binning_factor
        logger.debug('bbox: {}, upsampled: {}'.format(bbox, bbox_scaled))
        bboxes_scaled.append(bbox_scaled)

        # plot bbox
        if det_params.plot_detection and img is not None:
            minr, minc, maxr, maxc = tuple(list(bbox))
            rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                      fill=False, edgecolor='red', linewidth=2)
            plt.gca().add_patch(rect)
    if det_params.plot_detection and img is not None:
        plt.show()

    # use scaled bboxes from here on
    bboxes = bboxes_scaled

    # pixels to units
    bboxes = [bbox_pix2unit(b, at) for b in bboxes]

    # expand bounding boxes
    bboxes = [scale_bbox(bbox, expand_factor=.2) for bbox in bboxes]

    logger.info('detected {} wings:'.format(len(bboxes)))

    return bboxes
示例#18
0
    min_idx = np.argmin(SSDs)
    return coords_warped_subpix[min_idx]


# find correspondences using simple weighted sum of squared differences
src = []
dst = []
for coord in coords_orig_subpix:
    src.append(coord)
    dst.append(match_corner(coord))
src = np.array(src)
dst = np.array(dst)

# estimate affine transform model using all coordinates
model = AffineTransform()
model.estimate(src, dst)

# robustly estimate affine transform model with RANSAC
model_robust, inliers = ransac((src, dst),
                               AffineTransform,
                               min_samples=3,
                               residual_threshold=2,
                               max_trials=100)
outliers = inliers == False

# compare "true" and estimated transform parameters
print(tform.scale, tform.translation, tform.rotation)
print(model.scale, model.translation, model.rotation)
print(model_robust.scale, model_robust.translation, model_robust.rotation)

# visualize correspondence
def stabilize(video_name_input='INPUT.avi',
              video_name_output='stabilized.avi'):
    vid = video_name_input
    outvid = video_name_output
    print('vid: {}'.format(vid))
    print('outvid: {}'.format(outvid))
    # Read input video
    cap = cv2.VideoCapture(vid)

    # Get frame count
    n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

    # Get width and height of video stream
    w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    fps = cap.get(cv2.CAP_PROP_FPS)

    # Define the codec for output video
    fourcc = cv2.VideoWriter_fourcc(*'DIVX')
    # Set up output video
    out = cv2.VideoWriter(outvid, fourcc, fps, (w, h))
    _, prev = cap.read()
    out.write(prev)
    prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)

    # Pre-define transformation-store array
    transforms = np.zeros((n_frames - 1, 3), np.float32)

    for i in range(n_frames - 1):
        # Detect feature points in previous frame
        prev_pts = cv2.goodFeaturesToTrack(prev_gray,
                                           maxCorners=200,
                                           qualityLevel=0.01,
                                           minDistance=20,
                                           blockSize=3)

        # Read next frame
        success, curr = cap.read()

        if not success:
            break
        # Convert to grayscale
        curr_gray = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)

        # Calculate optical flow (i.e. track feature points)
        curr_pts, status, err = cv2.calcOpticalFlowPyrLK(
            prev_gray, curr_gray, prev_pts, None)

        # Sanity check
        assert prev_pts.shape == curr_pts.shape

        # Filter only valid points
        idx = np.where(status == 1)[0]
        prev_pts = np.squeeze(prev_pts[idx])
        curr_pts = np.squeeze(curr_pts[idx])

        # Find transformation matrix using ransac with 99% success
        p = 0.99
        n = 3
        inliers_percent = 0.7

        model = AffineTransform()
        model.estimate(prev_pts, curr_pts)

        k = np.ceil(np.log(1 - p) / np.log(1 - inliers_percent**n)).astype(int)
        A, inliers = ransac((prev_pts, curr_pts),
                            AffineTransform,
                            min_samples=5,
                            residual_threshold=2,
                            max_trials=k)
        A = A.params[:2, :]
        # Extract translation
        dx = A[0, 2]
        dy = A[1, 2]

        # Extract rotation angle
        da = np.arctan2(A[1, 0], A[0, 0])

        # Store transformation
        transforms[i] = [dx, dy, da]

        # Move to next frame
        prev_gray = curr_gray

        print("Frame: " + str(i + 1) + "/" + str(n_frames - 1) +
              " -  Tracked points : " + str(len(prev_pts)))
    # Compute trajectory using cumulative sum of transformations
    trajectory = np.cumsum(transforms, axis=0)
    smoothed_trajectory = smooth(trajectory)
    # Calculate difference in smoothed_trajectory and trajectory
    difference = smoothed_trajectory - trajectory

    # Calculate newer transformation array
    transforms_smooth = transforms + difference

    # Reset stream to first frame
    cap.set(cv2.CAP_PROP_POS_FRAMES, 0)

    # Write n_frames-1 transformed frames
    pbar = tqdm(total=n_frames - 1, desc='Stabilizing Video')

    for i in range(n_frames - 1):
        # Read next frame
        success, frame = cap.read()
        if not success:
            break
        pbar.update(1)

        # Extract transformations from the new transformation array
        dx = transforms_smooth[i, 0]
        dy = transforms_smooth[i, 1]
        da = transforms_smooth[i, 2]

        # Reconstruct transformation matrix accordingly to new values
        m = np.zeros((2, 3), np.float32)
        m[0, 0] = np.cos(da)
        m[0, 1] = -np.sin(da)
        m[1, 0] = np.sin(da)
        m[1, 1] = np.cos(da)
        m[0, 2] = dx
        m[1, 2] = dy

        # Apply affine wrapping to the given frame
        frame_stabilized = cv2.warpAffine(frame, m, (w, h))

        # Fix border artifacts
        frame_stabilized = fixBorder(frame_stabilized)

        # Write the frame to the file
        out.write(frame_stabilized)
    pbar.update(1)
    pbar.close()
示例#20
0
    return coords_warped_subpix[min_idx]


# find correspondences using simple weighted sum of squared differences
src = []
dst = []
for coord in coords_orig_subpix:
    src.append(coord)
    dst.append(match_corner(coord))
src = np.array(src)
dst = np.array(dst)


# estimate affine transform model using all coordinates
model = AffineTransform()
model.estimate(src, dst)

# robustly estimate affine transform model with RANSAC
model_robust, inliers = ransac((src, dst), AffineTransform, min_samples=3,
                               residual_threshold=2, max_trials=100)
outliers = inliers == False


# compare "true" and estimated transform parameters
print tform.scale, tform.translation, tform.rotation
print model.scale, model.translation, model.rotation
print model_robust.scale, model_robust.translation, model_robust.rotation


# visualize correspondences
img_combined = np.concatenate((img_orig_gray, img_warped_gray), axis=1)
示例#21
0
class DefaultRS(ReferenceSpace):
    """Default reference space.

    Attributes
    ----------
    tform : skimage.transform.GeometricTransform
        Affine transformation.

    keypoints : dict
        Defining landmarks used for estimating the parameters of the model.

    """
    def __init__(self):
        """Construct."""
        self.tform = AffineTransform()
        self.keypoints = {
            'CHIN': (0, 1),
            'UPPER_TEMPLE_L': (-1, -1),
            'UPPER_TEMPLE_R': (1, -1),
            'UPPERMOST_NOSE': (0, -1),
            'MIDDLE_NOSTRIL': (0, 0)
        }

    def estimate(self, lf):
        """Estimate parameters of the affine transformation.

        Parameters
        ----------
        lf : pychubby.detect.LandmarFace
            Instance of the ``LandmarkFace``.

        """
        src = []
        dst = []
        for name, ref_coordinate in self.keypoints.items():
            dst.append(ref_coordinate)
            src.append(lf[name])

        src = np.array(src)
        dst = np.array(dst)

        self.tform.estimate(src, dst)

    def ref2inp(self, coords):
        """Transform from reference to input space.

        Parameters
        ----------
        coords : np.ndarray
            Array of shape `(N, 2)` where the columns represent x and y reference coordinates.

        Returns
        -------
        tformed_coords : np.ndarray
            Array of shape `(N, 2)` where the columns represent x and y coordinates in the input image
            correspoding row-wise to `coords`.

        """
        return self.tform.inverse(coords)

    def inp2ref(self, coords):
        """Transform from input to reference space.

        Parameters
        ----------
        coords : np.ndarray
            Array of shape `(N, 2)` where the columns represent x and y coordinates in the input space.

        Returns
        -------
        tformed_coords : np.ndarray
            Array of shape `(N, 2)` where the columns represent x and y coordinates in the reference space
            correspoding row-wise to `coords`.

        """
        return self.tform(coords)