예제 #1
0
def transform_img(img,
                  rotx,
                  roty,
                  rotz,
                  tx=0,
                  ty=0,
                  scale=1,
                  adjust_frame=True):
    roll = rotx * math.pi / 180.0
    pitch = roty * math.pi / 180.0
    yaw = rotz * math.pi / 180.0
    # N.B.: in the computed homography_matrix we set d=1 (see homography_matrix())
    # u=fx*X/Z => on Z=d=1 one has u=fx*X/1
    # if we shift the camera of tz along Z, then one has u'=fx*X/(1-tz)
    # hence we have a zoom_factor = 1/(1-tz) => tz = (zoom_factor - 1)/zoom_factor
    tz = (scale - 1) / scale
    (h, w) = img.shape[:2]
    center = np.float32([w / 2, h / 2, 1])
    img_box = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]])
    #print('img_box:',img_box)
    H = homography_matrix(img, roll, pitch, yaw, tx, ty, tz)
    #print('H:',H)
    transformed_img_box = (H @ add_ones(img_box).T)
    transformed_img_box = (transformed_img_box[:2] / transformed_img_box[2]).T
    transformed_center = (H @ center.T).T
    #print('transformed_img_box:',transformed_img_box)
    if adjust_frame:
        # adjust the frame in order to contain the transformed image
        min_u = math.floor(transformed_img_box[:, 0].min())
        max_u = math.ceil(transformed_img_box[:, 0].max())
        min_v = math.floor(transformed_img_box[:, 1].min())
        max_v = math.ceil(transformed_img_box[:, 1].max())
        new_w = max_u - min_u
        new_h = max_v - min_v
        if H[2, 2] != 0:
            H = H / H[2, 2]
        T = np.array([[1, 0, -min_u], [0, 1, -min_v], [0, 0, 1]])
        H = T @ H
        transformed_img_box = (H @ add_ones(img_box).T)
        transformed_img_box = (transformed_img_box[:2] /
                               transformed_img_box[2]).T
        transformed_center = (H @ center.T).T
    else:
        # simulate the camera pose change
        new_w = w
        new_h = h
    img_out = cv2.warpPerspective(img, H, (new_w, new_h))
    return img_out, transformed_img_box, H
예제 #2
0
def compute_hom_reprojection_error(H, kps1, kps2, mask=None):
    if mask is not None:
        mask_idxs = (mask.ravel() == 1)
        kps1 = kps1[mask_idxs]
        kps2 = kps2[mask_idxs]
    kps1_reproj = H @ add_ones(kps1).T
    kps1_reproj = kps1_reproj[:2] / kps1_reproj[2]
    error_vecs = kps1_reproj.T - kps2
    return np.mean(np.sum(error_vecs * error_vecs, axis=1))
예제 #3
0
    def unproject(self, uvs, depth):

        uvs = gutils.add_ones(uvs)

        if self.distorted:
            unprojs = self.newKinv @ uvs.T
        else:
            unprojs = self.Kinv @ uvs.T

        unprojs = unprojs.T * depth

        return unprojs
예제 #4
0
def rotate_img(img, center=None, angle=0, scale=1):
    (h, w) = img.shape[:2]
    if center is None:
        center = (w / 2, h / 2)
    img_box = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]])
    #print('img_box:',img_box)
    M = cv2.getRotationMatrix2D(center, angle, scale)
    # grab sin and cos from matrix
    cos = np.abs(M[0, 0])
    sin = np.abs(M[0, 1])
    # compute the new bounding dimensions of the image
    new_w = int((w * cos) + (h * sin))
    new_h = int((w * sin) + (h * cos))
    # adjust the rotation matrix to take into account translation (in the new image)
    M[0, 2] += (new_w / 2) - center[0]
    M[1, 2] += (new_h / 2) - center[1]
    rotated_img_box = (M @ add_ones(img_box).T).T
    #print('rotated_img_box:',rotated_img_box)
    img_out = cv2.warpAffine(img, M, (new_w, new_h))
    return img_out, rotated_img_box, M
예제 #5
0
 def unproject_points(self, uvs):
     return np.dot(self.Kinv, add_ones(uvs).T).T[:, 0:2]
예제 #6
0
    def add_points(self,
                   points3d,
                   mask_pts3d,
                   kf1,
                   kf2,
                   idxs1,
                   idxs2,
                   img1,
                   do_check=True,
                   cos_max_parallax=Parameters.kCosMaxParallax):
        with self._lock:
            assert (kf1.is_keyframe
                    and kf2.is_keyframe)  # kf1 and kf2 must be keyframes
            assert (points3d.shape[0] == len(idxs1))
            assert (len(idxs2) == len(idxs1))

            added_points = []
            out_mask_pts3d = np.full(points3d.shape[0], False, dtype=bool)
            if mask_pts3d is None:
                mask_pts3d = np.full(points3d.shape[0], True, dtype=bool)

            ratio_scale_consistency = Parameters.kScaleConsistencyFactor * Frame.feature_manager.scale_factor

            if do_check:
                # project points
                uvs1, depths1 = kf1.project_points(points3d)
                bad_depths1 = depths1 <= 0
                uvs2, depths2 = kf2.project_points(points3d)
                bad_depths2 = depths2 <= 0

                # compute back-projected rays (unit vectors)
                rays1 = np.dot(kf1.Rwc, add_ones(kf1.kpsn[idxs1]).T).T
                norm_rays1 = np.linalg.norm(rays1, axis=-1, keepdims=True)
                rays1 /= norm_rays1
                rays2 = np.dot(kf2.Rwc, add_ones(kf2.kpsn[idxs2]).T).T
                norm_rays2 = np.linalg.norm(rays2, axis=-1, keepdims=True)
                rays2 /= norm_rays2

                # compute dot products of rays
                cos_parallaxs = np.sum(rays1 * rays2, axis=1)
                bad_cos_parallaxs = np.logical_or(
                    cos_parallaxs < 0, cos_parallaxs > cos_max_parallax)

                # compute reprojection errors
                errs1 = uvs1 - kf1.kpsu[idxs1]
                errs1_sqr = np.sum(errs1 * errs1,
                                   axis=1)  # squared reprojection errors
                kps1_levels = kf1.octaves[idxs1]
                invSigmas2_1 = Frame.feature_manager.inv_level_sigmas2[
                    kps1_levels]
                chis2_1 = errs1_sqr * invSigmas2_1  # chi square
                bad_chis2_1 = chis2_1 > Parameters.kChi2Mono  # chi-square 2 DOFs  (Hartley Zisserman pg 119)

                errs2 = uvs2 - kf2.kpsu[idxs2]
                errs2_sqr = np.sum(errs2 * errs2,
                                   axis=1)  # squared reprojection errors
                kps2_levels = kf2.octaves[idxs2]
                invSigmas2_2 = Frame.feature_manager.inv_level_sigmas2[
                    kps2_levels]
                chis2_2 = errs2_sqr * invSigmas2_2  # chi square
                bad_chis2_2 = chis2_2 > Parameters.kChi2Mono  # chi-square 2 DOFs  (Hartley Zisserman pg 119)

                # scale consistency
                scale_factors_x_depths1 = Frame.feature_manager.scale_factors[
                    kps1_levels] * depths1
                scale_factors_x_depths1_x_ratio_scale_consistency = scale_factors_x_depths1 * ratio_scale_consistency
                scale_factors_x_depths2 = Frame.feature_manager.scale_factors[
                    kps2_levels] * depths2
                scale_factors_x_depths2_x_ratio_scale_consistency = scale_factors_x_depths2 * ratio_scale_consistency
                bad_scale_consistency = np.logical_or(
                    (scale_factors_x_depths1 >
                     scale_factors_x_depths2_x_ratio_scale_consistency),
                    (scale_factors_x_depths2 >
                     scale_factors_x_depths1_x_ratio_scale_consistency))

                # combine all checks
                bad_points = bad_cos_parallaxs | bad_depths1 | bad_depths2 | bad_chis2_1 | bad_chis2_2 | bad_scale_consistency

            img_coords = np.rint(kf1.kps[idxs1]).astype(
                np.intp)  # image keypoints coordinates
            # build img patches coordinates
            delta = Parameters.kColorPatchDelta
            patch_extension = 1 + 2 * delta  # patch_extension x patch_extension
            img_pts_start = img_coords - delta
            img_pts_end = img_coords + delta
            img_ranges = np.linspace(img_pts_start,
                                     img_pts_end,
                                     patch_extension,
                                     dtype=np.intp)[:, :].T

            def img_range_elem(ranges, i):
                return ranges[:, i]

            for i, p in enumerate(points3d):
                if not mask_pts3d[i]:
                    #print('p[%d] not good' % i)
                    continue

                idx1_i = idxs1[i]
                idx2_i = idxs2[i]

                # perform different required checks before adding the point
                if do_check:
                    if bad_points[i]:
                        continue

                    # check parallax is large enough (this is going to filter out all points when the inter-frame motion is almost zero)
                    # ray1 = np.dot(kf1.Rwc, add_ones_1D(kf1.kpsn[idx1_i]))
                    # ray2 = np.dot(kf2.Rwc, add_ones_1D(kf2.kpsn[idx2_i]))
                    # cos_parallax = ray1.dot(ray2) / (np.linalg.norm(ray1) * np.linalg.norm(ray2))
                    # if cos_parallax < 0 or cos_parallax > cos_max_parallax:
                    #     #print('p[',i,']: ',p,' not enough parallax: ', cos_parallaxs[i])
                    #     continue

                    # check points are visible on f1
                    # uv1, depth1 = kf1.project_point(p)
                    # is_visible1  = kf1.is_in_image(uv1, depth1) # N.B.: is_in_image() check is redundant since we check the reproj errror
                    # if not is_visible1:
                    #     continue

                    # check points are visible on f2
                    # uv2, depth2 = kf2.project_point(p)
                    # is_visible2  = kf2.is_in_image(uv2, depth2)  # N.B.: is_in_image() check is redundant since we check the reproj errror
                    # if not is_visible2:
                    #     continue

                    # check reprojection error on f1
                    #kp1_level = kf1.octaves[idx1_i]
                    #invSigma2_1 = Frame.feature_manager.inv_level_sigmas2[kp1_level]
                    # err1 = uvs1[i] - kf1.kpsu[idx1_i]
                    # chi2_1 = np.inner(err1,err1)*invSigma2_1
                    # if chi2_1 > Parameters.kChi2Mono: # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                    #     continue

                    # check reprojection error on f2
                    # kp2_level = kf2.octaves[idx2_i]
                    # invSigma2_2 = Frame.feature_manager.inv_level_sigmas2[kp2_level]
                    # err2 = uvs2[i] - kf2.kpsu[idx2_i]
                    # chi2_2 = np.inner(err2,err2)*invSigma2_2
                    # if chi2_2 > Parameters.kChi2Mono: # chi-square 2 DOFs  (Hartley Zisserman pg 119)
                    #     continue

                    #check scale consistency
                    # scale_factor_x_depth1 =  Frame.feature_manager.scale_factors[kps1_levels[i]] * depths1[i]
                    # scale_factor_x_depth2 =  Frame.feature_manager.scale_factors[kps2_levels[i]] * depths2[i]
                    # if (scale_factor_x_depth1 > scale_factor_x_depth2*ratio_scale_consistency) or \
                    #    (scale_factor_x_depth2 > scale_factor_x_depth1*ratio_scale_consistency):
                    #     continue

                # get the color of the point
                try:
                    #color = img1[int(round(kf1.kps[idx1_i, 1])), int(round(kf1.kps[idx1_i, 0]))]
                    #img_pt = np.rint(kf1.kps[idx1_i]).astype(np.int)
                    # color at the point
                    #color = img1[img_pt[1],img_pt[0]]
                    # buils color patch
                    #color_patch = img1[img_pt[1]-delta:img_pt[1]+delta,img_pt[0]-delta:img_pt[0]+delta]
                    #color = color_patch.mean(axis=0).mean(axis=0)  # compute the mean color in the patch

                    # average color in a (1+2*delta) x (1+2*delta) patch
                    #pt_start = img_pts_start[i]
                    #pt_end   = img_pts_end[i]
                    #color_patch = img1[pt_start[1]:pt_end[1],pt_start[0]:pt_end[0]]

                    # average color in a (1+2*delta) x (1+2*delta) patch
                    img_range = img_range_elem(img_ranges, i)
                    color_patch = img1[img_range[1][:, np.newaxis],
                                       img_range[0]]
                    #print('color_patch.shape:',color_patch.shape)

                    color = cv2.mean(
                        color_patch)[:3]  # compute the mean color in the patch
                except IndexError:
                    Printer.orange('color out of range')
                    color = (255, 0, 0)

                # add the point to this map
                mp = MapPoint(p[0:3], color, kf2, idx2_i)
                self.add_point(mp)  # add point to this map
                mp.add_observation(kf1, idx1_i)
                mp.add_observation(kf2, idx2_i)
                mp.update_info()
                out_mask_pts3d[i] = True
                added_points.append(mp)
            return len(added_points), out_mask_pts3d, added_points
예제 #7
0
def search_frame_for_triangulation_test(f1, f2, img2, img1 = None):   

    idxs2_out = []
    idxs1_out = []
    lines_out = [] 
    num_found_matches = 0
    img2_epi = None     

    if __debug__:
        timer = Timer()
        timer.start()

    O1w = f1.Ow
    O2w = f2.Ow 
    # compute epipoles
    e1,_ = f1.project_point(O2w)  # in first frame  
    e2,_ = f2.project_point(O1w)  # in second frame  
    #print('e1: ', e1)
    #print('e2: ', e2)    
    
    baseline = np.linalg.norm(O1w-O2w) 
        
    # if the translation is too small we cannot triangulate 
    # if False: 
    #     if baseline < Parameters.kMinTraslation:  # we assume the Inializer has been used for building the first map 
    #         Printer.red("search for triangulation: impossible with almost zero translation!")
    #         return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT
    # else:
    medianDepthF2 = f2.compute_points_median_depth()
    ratioBaselineDepth = baseline/medianDepthF2
    if ratioBaselineDepth < Parameters.kMinRatioBaselineDepth:  
        Printer.red("search for triangulation: impossible with too low ratioBaselineDepth!")
        return idxs1_out, idxs2_out, num_found_matches, img2_epi # EXIT           

    # compute the fundamental matrix between the two frames by using their estimated poses 
    F12, H21 = computeF12(f1, f2)

    idxs1 = []
    for i, p in enumerate(f1.get_points()): 
        if p is None:  # we consider just unmatched keypoints 
            kp = f1.kpsu[i]
            scale_factor = Frame.feature_manager.scale_factors[f1.octaves[i]]
            # discard points which are too close to the epipole 
            if np.linalg.norm(kp-e1) < Parameters.kMinDistanceFromEpipole * scale_factor:             
                continue    
            idxs1.append(i)      
    kpsu1 = f1.kpsu[idxs1]
    
    if __debug__:
        print('search_frame_for_triangulation - timer1: ', timer.elapsed())  
        timer.start()        

    # compute epipolar lines in second image 
    lines2 = cv2.computeCorrespondEpilines(kpsu1.reshape(-1,1,2), 2, F12)  
    lines2 = lines2.reshape(-1,3)
    
    xs2_inf = np.dot(H21, add_ones(kpsu1).T).T # x2inf = H21 * x1  where x2inf is corresponding point according to infinite homography [Hartley Zisserman pag 339]
    xs2_inf = xs2_inf[:, 0:2] / xs2_inf[:, 2:]  
    line_edges = [ epiline_to_end_points(line, e2, x2_inf, f2.width) for line, x2_inf in zip(lines2, xs2_inf) ] 
    #print("line_edges: ", line_edges)

    if __debug__:
        print('search_frame_for_triangulation - timer3: ', timer.elapsed())
        assert(len(line_edges) == len(idxs1))    
        timer.start()

    len_des2 = len(f2.des)
    flag_match = np.full(len_des2, False, dtype=bool)
    dist_match = np.zeros(len_des2)
    index_match = np.full(len_des2, 0, dtype=int)
    for i, idx in enumerate(idxs1):   # N.B.: a point in f1 can be matched to more than one point in f2, we avoid this by caching matches with f2 points
        f2_idx, dist = find_matches_along_line(f2, e2, line_edges[i], f1.des[idx])
        if f2_idx > -1:
            if not flag_match[f2_idx]: # new match
                flag_match[f2_idx] = True 
                dist_match[f2_idx] = dist 
                idxs2_out.append(f2_idx)
                idxs1_out.append(idx)
                index_match[f2_idx] = len(idxs2_out)-1                
                assert(f2.get_point_match(f2_idx) is None)            
                assert(f1.get_point_match(idx) is None)
                if __debug__:
                    lines_out.append(line_edges[i])
            else: # already matched
                if dist < dist_match[f2_idx]:  # update in case of a smaller distance 
                    dist_match[f2_idx] = dist 
                    index = index_match[f2_idx]  
                    idxs1_out[index] = idx    
                    if __debug__:
                        lines_out[index] = line_edges[i]                              
 
    num_found_matches = len(idxs1_out)
    assert(len(idxs1_out) == len(idxs2_out)) 

    if __debug__:   
        #print("num found matches: ", num_found_matches)
        if True: 
            kpsu2 = f2.kpsu[idxs2_out]
            img2_epi = draw_lines(img2.copy(), lines_out, kpsu2)
            #cv2.imshow("epipolar lines",img2_epi)
            #cv2.waitKey(0)

    if __debug__:
        print('search_frame_for_triangulation - timer4: ', timer.elapsed())

    return idxs1_out, idxs2_out, num_found_matches, img2_epi