Ejemplo n.º 1
0
def main():
    filenum = 1
    lidar_pitch = 9.5/180*np.pi
    
    filename = "/home/henry/Documents/projects/avl/Detection/ground/data/points_data_"+repr(filenum)+".txt"
    planes, pcds = read_points_data(filename)
    
    tf = Rotation(roll=0.0, pitch=lidar_pitch, yaw=0.0)
    for plane in planes:
        plane.rotate_around_axis(axis="y", angle=-lidar_pitch)
    for pcd in pcds:
        pcd.rotate(tf)
    
    
    pcd = pcds[0]
    plane = planes[1]
    # print(np.min(pcd.data[0,:]), np.max(pcd.data[0,:]))
    vis(plane, pcd, dim_2d=True)

    # the onlyfloor points
    # pcd = pcds[2]
    # plane = planes[2]
    # the reestimated plane
    sac = RANSAC(Plane3D, min_sample_num=3, threshold=0.1, iteration=50, method="MSAC")
    plane2, _, _, _ = sac.ransac(pcd.data.T)
    vis(plane2, pcd, dim_2d=True)
    plt.show()
    def __init__(self):
        #Subscribers
        self.image_sub = rospy.Subscriber('/ardrone/bottom/image_raw', Image,
                                          self.image_callback)
        self.sub_vicon = rospy.Subscriber('/vicon/ARDroneCarre/ARDroneCarre',
                                          TransformStamped,
                                          self.update_quadrotor_state)

        #Global variables
        self.image = Image
        self.bridge = CvBridge()
        self.quad_state = TransformStamped()
        self.T_body2vicon = np.identity(4)

        #wait for msg for initiating processing
        rospy.wait_for_message('/ardrone/bottom/image_raw', Image)

        #time flags for detecting when bag file is done publishing
        self.current_time = rospy.Time.now().to_sec()
        self.last_msg_time = rospy.Time.now().to_sec()

        #Define landmark locations
        self.landmarks = np.array([[4.32, 8.05], [0.874, 5.49], [7.68, 4.24],
                                   [4.27, 1.23]])
        #list of
        self.landmark_prev_save_rad = [
            float('inf'),
            float('inf'),
            float('inf'),
            float('inf')
        ]
        self.save_radius = 0.5

        #pose graph
        self.G = {}

        #load images for classification
        img1 = cv2.imread('cn_tower.png')
        img2 = cv2.imread('casa_loma.png')
        img3 = cv2.imread('nathan_philips_square.png')
        img4 = cv2.imread('princes_gates.png')
        self.landmark_ref_images = [img1, img2, img3, img4]
        self.landmark_names = [
            'cn_tower', 'casa_loma', 'nathan_philips_square', 'princes_gates'
        ]

        #Initialize ransac
        self.ransac = RANSAC()
Ejemplo n.º 3
0
def main(im1loc, im2loc, output):
    correspondences = flipxy(get_matches(im1loc, im2loc))
    model = RANSAC(correspondences)

    im1, im2 = Image.open(im1loc).convert('L'), Image.open(im2loc).convert('L')
    w1, h1 = im1.size
    w2, h2 = im2.size
    panorama = im2.transform((w1 + w2, h1), Image.PERSPECTIVE, model,
                             Image.BILINEAR)
    panorama.save("transformed.jpg")

    im1_arr, panorama_arr = np.asarray(im1), np.asarray(panorama)
    final = np.copy(panorama_arr)
    for i in range(len(panorama_arr)):
        for j in range(len(panorama_arr[i])):
            if j < len(im1_arr[i]):
                pixel1 = im1_arr[i, j]
                pixel2 = panorama_arr[i, j]
                if pixel2 == 0 and pixel1 != 0: final[i, j] = pixel1
                if pixel2 == 0 and pixel1 == 0: final[i, j] = 0
                if pixel2 != 0 and pixel1 != 0:
                    final[i, j] = pixel2  #(int(pixel1) + int(pixel2))/2
                if pixel2 != 0 and pixel1 == 0: final[i, j] = pixel2
    test = Image.fromarray(np.uint8(final)).convert('L')
    test.save(output)
Ejemplo n.º 4
0
    def __init__(self):
        # specify topic and data type
        self.sub_bbox_1 = rospy.Subscriber("camera1/detection/vision_objects",
                                           DetectedObjectArray,
                                           self.bbox_array_callback)
        self.sub_bbox_6 = rospy.Subscriber("camera6/detection/vision_objects",
                                           DetectedObjectArray,
                                           self.bbox_array_callback)
        self.sub_pcd = rospy.Subscriber("/points_raw", PointCloud2,
                                        self.pcd_callback)
        # self.sub_pose = rospy.Subscriber("/current_pose", PoseStamped, self.pose_callback)

        # Publisher
        self.pub_intersect_markers = rospy.Publisher(
            "/vision_objects_position_rviz", MarkerArray, queue_size=10)
        self.pub_plane_markers = rospy.Publisher("/estimated_plane_rviz",
                                                 MarkerArray,
                                                 queue_size=10)
        self.pub_plane = rospy.Publisher("/estimated_plane",
                                         Plane,
                                         queue_size=10)
        self.pub_pcd_inlier = rospy.Publisher("/points_inlier",
                                              PointCloud2,
                                              queue_size=10)
        self.pub_pcd_outlier = rospy.Publisher("/points_outlier",
                                               PointCloud2,
                                               queue_size=10)

        self.cam1 = camera_setup_1()
        self.cam6 = camera_setup_6()
        self.plane = Plane3D(-0.157, 0, 0.988, 1.9)
        self.plane_world = None
        self.plane_tracker = None
        self.sac = RANSAC(Plane3D,
                          min_sample_num=3,
                          threshold=0.22,
                          iteration=200,
                          method="MSAC")

        self.tf_listener = TransformListener()
        self.tfmr = Transformer()
        self.tf_ros = TransformerROS()
Ejemplo n.º 5
0
    def test_ransac(self):
        """
        Test ransac by following steps:
        1. make random homography
        2. make inliers with noise
        3. make outliers
        4. RUN
        """
        N_TOTAL = 1000
        N_INLIER = 500
        N_ITER = trial_count(N_INLIER / N_TOTAL, 4)
        N_OUTLIER = N_TOTAL - N_INLIER
        NOISE_SIZE = 0

        # step 1
        gt = np.random.random((3, 3))
        gt[2, :] *= 100
        gt[2, 2] = 1

        # step 2
        hom = Homography(gt)
        x = np.random.random((N_INLIER, 2))
        y = hom.transform(x)

        x += np.random.random((N_INLIER, 2)) * NOISE_SIZE
        y += np.random.random((N_INLIER, 2)) * NOISE_SIZE
        inliers = np.concatenate((x, y), axis=1)

        # step 3
        outliers = np.random.random((N_OUTLIER, 4))
        data = np.concatenate((inliers, outliers), axis=0)

        # step 4
        from ransac import RANSAC
        ransac_inst = RANSAC(
            n_sample=4,
            n_iter=N_ITER,
            err_thres=0.1,
        )
        model = ProjectionModel()
        best_func, best_inliers = ransac_inst(data, model)
        np.testing.assert_almost_equal(best_func.M, gt)
Ejemplo n.º 6
0
Archivo: GAtest.py Proyecto: Suke-H/CSG
def PlaneDetect(points, normals, epsilon, alpha):

    # 平面検出
    # index: pointsからフィットした点のインデックス
    plane, index, num = RANSAC(points, normals, epsilon=epsilon, alpha=alpha)

    selectIndex = np.array(
        [True if i in index else False for i in range(points.shape[0])])

    # フィット点を平面射影
    # plane_points: 射影後の3d座標点群
    # UVvector: 射影後の2d座標点群
    plane_points, UVvector, u, v, O = Plane2DProjection(points[index], plane)

    # # プロット準備
    # fig = plt.figure()
    # ax = Axes3D(fig)
    # ax.set_xlabel("X")
    # ax.set_ylabel("Y")
    # ax.set_zlabel("Z")
    # # 点群描画
    # X, Y, Z = Disassemble(points)
    # MX, MY, MZ = X[index], Y[index], Z[index]
    # PX, PY, PZ = Disassemble(plane_points)
    # ax.plot(X, Y, Z, marker="o", linestyle='None', color="white")
    # ax.plot(MX, MY, MZ, marker=".", linestyle='None', color="red")
    # ax.plot(PX, PY, PZ, marker=".", linestyle='None', color="blue")
    # # 平面描画
    # plot_implicit(ax, plane.f_rep, points, AABB_size=1, contourNum=15)

    # plt.show()
    # plt.close()

    # # 射影2d点群描画
    # UX, UY = Disassemble2d(UVvector)
    # plt.plot(UX, UY, marker="o",linestyle="None",color="red")

    # plt.show()
    # plt.close()

    return UVvector, plane, u, v, O, selectIndex
Ejemplo n.º 7
0
            # Compute CCL image
            ccl = computeCCL(filteredLabelNum, label)
            ################## RANSAC PROGRAM ##################
            i = 1 # Counter 
            # Run RANSAC on each connected component
            for labelNum in filteredLabelNum : 
                logging.debug(f'COMPONENT : {i}')
                # Compiling the coordinates of the pixels in the contour into data points
                dataPoints = compileDataPoints(label, labelNum)
                # Initialize an ellispe class 
                ellipse = Ellipse()
                # Run RANSAC on the data points 
                model = RANSAC(
                    ellipse, 
                    dataPoints, 
                    args.sample_size, 
                    args.iteration, 
                    args.threshold, 
                    args.tolerance
                )
                # Draw ellipse 
                if (model != []) :
                    drawEllipse(out, model, args.crop)

                logging.info(f'Component {i} process complete.')
                i = i + 1
            ####################################################
            if args.crop : 
                out = cv.bitwise_and(out, img)
                out = makeTransparent(out)
            out = cv.resize(out, (w, h))
            # Save images 
Ejemplo n.º 8
0
kp2, ds2 = matcher_obj.getFeatures(img2)
matches = matcher_obj.match(ds1, ds2)
print("Number of matches: ", len(matches))
print('Keypoint detection and matching done in {:.6f}s'.format(time.time() -
                                                               t0))

src_orig = np.float32([kp1[m.queryIdx].pt for m in matches])
dst_orig = np.float32([kp2[m.trainIdx].pt for m in matches])
src_orig = np.vstack((src_orig.T, np.ones((1, len(matches)))))
dst_orig = np.vstack((dst_orig.T, np.ones((1, len(matches)))))

##################
# Outlier removal.
##################
t1 = time.time()
ransac = RANSAC(config.M, config.thr)
src_fine, dst_fine = ransac(img1, img2, src_orig, dst_orig)
print("Number of inliers: ", src_fine.shape[1])
print('RANSAC sampling done in {:.6f}s'.format(time.time() - t1))

########################
# Global homography (H).
########################
print("\nDLT (projective transform) on inliers")
# Refine homography using DLT on inliers.
Hg = homography_fit(src_fine, dst_fine)

#############################################
# Image stitching with global homography (H).
#############################################
# Obtaining size of canvas (using global Homography)
Ejemplo n.º 9
0
def DetectViewer2(path):
    #点群,法線,OBBの対角線の長さ  取得
    #points, X, Y, Z, normals, length = PreProcess(path)
    
    #自作の点群を扱いたいときはこちら
    #points, X, Y, Z, normals, length = PreProcess2()

    # PLYデータを扱いたいときはこちら
    points, X, Y, Z, normals, length = ViewPLY(path)

    #元の点群データを保存しておく
    ori_points = points[:, :]
    #ori_normals = normals[:, :]

    # 検知した図形のリスト
    fitting_figures = []
    
    print("points:{}".format(points.shape[0]))

    ###グラフ初期化###
    ax = ViewerInit(points, X, Y, Z, normals)

    while points.shape[0] >= ori_points.shape[0] * 0.05:
        print("points:{}".format(points.shape[0]))

        scores = []
        paras = []
        indices = []

        ###最適化###
        for fig_type in [0,1,2,3]:

            ###グラフ初期化##
            #ax = ViewerInit(points, X, Y, Z, normals)

            #図形フィッティング
            #result = figOptimize(points, normals, length, fig_type)
            #result = figOptimize2(X, Y, Z, normals, length, fig_type)
            result, MX, MY, MZ, num, index = RANSAC(fig_type, points, normals, X, Y, Z, length)
            print(result)

            #fig_typeに応じた図形を選択
            if fig_type==0:
                figure = F.sphere(result)
            elif fig_type==1:
                figure = F.plane(result)
            elif fig_type==2:
                figure = F.cylinder(result)
            elif fig_type==3:
                figure = F.cone(result)

            #図形描画
            #plot_implicit(ax, figure.f_rep, points, AABB_size=1, contourNum=50)

            #図形に対して"条件"を満たす点群を数える、これをスコアとする
            #MX, MY, MZ, num, index = CountPoints(figure, points, X, Y, Z, normals, epsilon=0.08*length, alpha=np.pi/9)
            #print("AFTER_num:{}".format(num))

            #条件を満たす点群, 最適化された図形描画
            #ax.plot(MX,MY,MZ,marker=".",linestyle='None',color="orange")
            
            #最後に.show()を書いてグラフ表示
            #plt.show()

            #スコアとパラメータ,インデックスを保存
            scores.append(num)
            paras.append(result)
            indices.append(index)

            print("="*100)

        if max(scores) <= ori_points.shape[0] * 0.05:
            print("おわり!")
            break

        ###グラフ初期化###
        ax = ViewerInit(points, X, Y, Z, normals)

        # スコアが最大の図形を描画
        best_fig = scores.index(max(scores))

        # スコアが最大の図形を保存
        fitting_figures.append([best_fig, paras[best_fig]])

        if best_fig==0:
            figure = F.sphere(paras[best_fig])
            print("球の勝ち")
            
        elif best_fig==1:
            figure = F.plane(paras[best_fig])
            print("平面の勝ち")

        elif best_fig==2:
            figure = F.cylinder(paras[best_fig])
            print("円柱の勝ち")

        elif best_fig==3:
            figure = F.cone(paras[best_fig])
            print("円錐の勝ち")


        # フィット点描画
        ax.plot(X[indices[best_fig]],Y[indices[best_fig]],Z[indices[best_fig]],\
                marker=".",linestyle='None',color="orange")

        # 図形描画
        plot_implicit(ax, figure.f_rep, points, AABB_size=1, contourNum=15)

        plt.show()

        #フィットした点群を削除
        points = np.delete(points, indices[best_fig], axis=0)
        normals = np.delete(normals, indices[best_fig], axis=0)
        X, Y, Z = Disassemble(points)
        
        ###グラフ初期化###
        #ax = ViewerInit(points, X, Y, Z, normals)

        #plt.show()
        ##################

        print("="*100)
        

    print(len(fitting_figures), fitting_figures)
    plt.show()
Ejemplo n.º 10
0
def OptiViewer(path, fig_type):

    #点群,法線,OBBの対角線の長さ  取得
    #points, X, Y, Z, normals, length = PreProcess(path)
    
    #自作の点群を扱いたいときはこちら
    #points, X, Y, Z, normals, length = PreProcess2()

    # PLYデータを扱いたいときはこちら
    points, X, Y, Z, normals, length = ViewPLY(path)

    #U, V, W = Disassemble(normals)

    #法線を描画
    #ax.quiver(X, Y, Z, U, V, W,  length=0.1, normalize=True)

    while True:
        print("points:{}".format(points.shape[0]))

        #グラフの枠を作っていく
        fig = plt.figure()
        ax = Axes3D(fig)

        #軸にラベルを付けたいときは書く
        ax.set_xlabel("X")
        ax.set_ylabel("Y")
        ax.set_zlabel("Z")

        #点群を描画
        ax.plot(X,Y,Z,marker="o",linestyle='None',color="white")

        #OBBを描画
        OBBViewer(ax, points)

        ###最適化###
        #result = figOptimize(points, normals, length, fig_type)
        #result = figOptimize2(X, Y, Z, normals, length, fig_type)
        result, MX, MY, MZ, num, index = RANSAC(fig_type, points, normals, X, Y, Z, length)
        print(result)

        #fig_typeに応じた図形を選択
        if fig_type==0:
            figure = F.sphere(result)
        elif fig_type==1:
            figure = F.plane(result)
        elif fig_type==2:
            figure = F.cylinder(result)
        else:
            figure = F.cone(result)

        #最適化された図形を描画
        plot_implicit(ax, figure.f_rep, points, AABB_size=1, contourNum=15)

        #S_optを検出
        #MX, MY, MZ, num, index = CountPoints(figure, points, X, Y, Z, normals, epsilon=0.08*length, alpha=np.pi/9)

        print("num:{}".format(num))
        ax.plot(MX,MY,MZ,marker=".",linestyle='None',color="red")

        # グラフ表示
        plt.show()

        # フィットした点群を削除
        points = np.delete(points, index, axis=0)
        normals = np.delete(normals, index, axis=0)
        X, Y, Z = Disassemble(points)
Ejemplo n.º 11
0
class RoadEstimation:
    def __init__(self):
        # specify topic and data type
        self.sub_bbox_1 = rospy.Subscriber("camera1/detection/vision_objects",
                                           DetectedObjectArray,
                                           self.bbox_array_callback)
        self.sub_bbox_6 = rospy.Subscriber("camera6/detection/vision_objects",
                                           DetectedObjectArray,
                                           self.bbox_array_callback)
        self.sub_pcd = rospy.Subscriber("/points_raw", PointCloud2,
                                        self.pcd_callback)
        # self.sub_pose = rospy.Subscriber("/current_pose", PoseStamped, self.pose_callback)

        # Publisher
        self.pub_intersect_markers = rospy.Publisher(
            "/vision_objects_position_rviz", MarkerArray, queue_size=10)
        self.pub_plane_markers = rospy.Publisher("/estimated_plane_rviz",
                                                 MarkerArray,
                                                 queue_size=10)
        self.pub_plane = rospy.Publisher("/estimated_plane",
                                         Plane,
                                         queue_size=10)
        self.pub_pcd_inlier = rospy.Publisher("/points_inlier",
                                              PointCloud2,
                                              queue_size=10)
        self.pub_pcd_outlier = rospy.Publisher("/points_outlier",
                                               PointCloud2,
                                               queue_size=10)

        self.cam1 = camera_setup_1()
        self.cam6 = camera_setup_6()
        self.plane = Plane3D(-0.157, 0, 0.988, 1.9)
        self.plane_world = None
        self.plane_tracker = None
        self.sac = RANSAC(Plane3D,
                          min_sample_num=3,
                          threshold=0.22,
                          iteration=200,
                          method="MSAC")

        self.tf_listener = TransformListener()
        self.tfmr = Transformer()
        self.tf_ros = TransformerROS()

    def display_bboxes_in_world(self, cam, bboxes):
        vis_array = MarkerArray()
        xlist, ylist = [], []
        for box_id, bbox in enumerate(bboxes):
            d, C = cam.bounding_box_to_ray(bbox)
            intersection = self.plane.plane_ray_intersection(d, C)
            # print(intersection[0,0],'\t', intersection[1,0],'\t', intersection[2,0])
            marker = visualize_marker(intersection,
                                      mkr_id=box_id,
                                      scale=0.5,
                                      frame_id="velodyne",
                                      mkr_color=[0.0, 0.8, 0.0, 1.0])
            vis_array.markers.append(marker)
            xlist.append(intersection[0, 0])
            ylist.append(intersection[1, 0])

        self.pub_intersect_markers.publish(vis_array)

        # plt.pause(0.001)

    def bbox_array_callback(self, msg):
        if msg.header.frame_id == "camera1":
            cam = self.cam1
        elif msg.header.frame_id == "camera6":
            cam = self.cam6
        else:
            rospy.logwarn(
                "unrecognized frame id {}, bounding box callback in road estimation fail",
                msg.header.frame_id)
            return

        # rospy.loginfo("camera {:d} message received!!".format(camera.id))
        bboxes = []
        for obj in msg.objects:
            # rospy.loginfo("{}".format(obj.label))
            # rospy.loginfo("x:{} y:{} width:{} height:{} angle:{}".format(obj.x, obj.y, obj.width, obj.height, obj.angle))
            bbox = BoundingBox(obj.x,
                               obj.y,
                               obj.width,
                               obj.height,
                               obj.angle,
                               label=obj.label)
            bboxes.append(bbox)
        self.display_bboxes_in_world(cam, bboxes)

    def estimate_plane(self, pcd):
        # threshold_z = [2.0, -0.5]
        # pcd_z = clip_pcd_by_distance_plane(pcd, self.plane, threshold_z, in_only=True)
        vec1 = np.array([1, 0, 0])
        vec2 = np.array([0, 0, 1])
        pt1 = np.array([0, 0, 0])
        threshold = [-3.0, 6.0]
        plane_from_vec = Plane3D.create_plane_from_vectors_and_point(
            vec1, vec2, pt1)
        pcd_close = clip_pcd_by_distance_plane(pcd,
                                               plane_from_vec,
                                               threshold,
                                               in_only=True)

        pcd_close = pcd_close.extract_low()

        seed = 0
        np.random.seed(seed)
        plane1, _, _, _ = self.sac.ransac(pcd_close.data.T,
                                          constraint=self.plane.param,
                                          constraint_threshold=0.5)
        # vis(plane1, pcd, dim_2d=True)

        # normal = vec1.reshape([-1,1]) / np.linalg.norm(vec1)
        # depth = np.matmul(pcd_close.data.T , normal).reshape([-1])
        distance = plane1.distance_to_plane(pcd_close.data.T)
        # threshold_outer = 0.3
        # threshold_inner = 0.1
        # mask_outer = distance < threshold_outer
        # mask_inner = distance < threshold_inner
        # bin_dist = 5.0
        # depth_min = np.min(depth)
        # bin_num = int((np.max(depth) -  depth_min)/ bin_dist) + 1
        # for i in range(bin_num):
        #     depth_thred_min, depth_thred_max = i*bin_dist+depth_min, (i+1)*bin_dist+depth_min
        #     mask_depth = np.logical_and(depth > depth_thred_min, depth < depth_thred_max)
        #     part_inner = np.logical_and(mask_depth, mask_inner)
        #     part_outer = np.logical_and(mask_depth, mask_outer)
        #     sum_outer = np.sum(part_outer)
        #     sum_inner = np.sum(part_inner)
        #     if sum_outer == 0:
        #         ratio = 1
        #     else:
        #         ratio = float(sum_inner) / sum_outer
        #     if not ratio == 1:
        #         print(i, "{:.1f}".format(depth_thred_min), "{:.1f}".format(depth_thred_max), sum_inner, sum_outer, "{:.4f}".format(ratio))

        print('Plane params:', plane1.param.T)
        threshold_inlier = 0.15
        pcd_inlier = pcd_close.data[:, distance <= threshold_inlier]
        pcd_outlier = pcd_close.data[:, distance > threshold_inlier]
        return plane1, pcd_inlier, pcd_outlier

    def create_and_publish_plane_markers(self,
                                         plane,
                                         frame_id='velodyne',
                                         center=None,
                                         normal=None):
        if normal is None:
            v1 = np.array([[0, 0, 1.0]]).T
        else:
            v1 = normal
        v2 = np.array([[plane.a, plane.b, plane.c]]).T
        q_self = Quaternion_self.create_quaternion_from_vector_to_vector(
            v1, v2)
        q = Quaternion(q_self.x, q_self.y, q_self.z, q_self.w)
        euler = np.array(euler_from_quaternion(
            (q.x, q.y, q.z, q.w))) * 180 / np.pi
        print("Euler: ", euler)
        marker_array = MarkerArray()
        if center is None:
            center = [10, 0, (-plane.a * 10 - plane.d) / plane.c]
        marker = visualize_marker(center,
                                  frame_id=frame_id,
                                  mkr_type='cube',
                                  orientation=q,
                                  scale=[20, 10, 0.05],
                                  lifetime=30,
                                  mkr_color=[0.0, 0.8, 0.0, 0.8])
        marker_array.markers.append(marker)
        return marker_array

    # @profile # profiling for analysis
    def pcd_callback(self, msg):
        rospy.logwarn("Getting pcd at: %d.%09ds, (%d,%d)",
                      msg.header.stamp.secs, msg.header.stamp.nsecs,
                      msg.height, msg.width)

        pcd_original = pointcloud2_to_xyz_array(msg)

        pcd = PointCloud(pcd_original.T)

        self.plane, pcd_inlier, pcd_outlier = self.estimate_plane(pcd)
        transform_matrix, trans, rot, euler = get_transformation(
            frame_from='/velodyne',
            frame_to='/world',
            time_from=msg.header.stamp,
            time_to=msg.header.stamp,
            static_frame='/world',
            tf_listener=self.tf_listener,
            tf_ros=self.tf_ros)
        if not transform_matrix is None:
            plane_world_param = np.matmul(
                np.linalg.inv(transform_matrix).T,
                np.array(
                    [[self.plane.a, self.plane.b, self.plane.c,
                      self.plane.d]]).T)
            plane_world_param = plane_world_param / np.linalg.norm(
                plane_world_param[0:3])

            if self.plane_tracker is None:
                self.plane_tracker = Tracker(msg.header.stamp,
                                             plane_world_param)
            else:
                self.plane_tracker.predict(msg.header.stamp)
                self.plane_tracker.update(plane_world_param)
            print("plane_world:", plane_world_param.T)
            print("plane_traker:", self.plane_tracker.filter.x_post.T)

            # self.plane_world = Plane3D(plane_world_param[0,0], plane_world_param[1,0], plane_world_param[2,0], plane_world_param[3,0])
            self.plane_world = Plane3D(self.plane_tracker.filter.x_post[0, 0],
                                       self.plane_tracker.filter.x_post[1, 0],
                                       self.plane_tracker.filter.x_post[2, 0],
                                       self.plane_tracker.filter.x_post[3, 0])
            center_pos = np.matmul(
                transform_matrix,
                np.array([[
                    10, 0, (-self.plane.a * 10 - self.plane.d) / self.plane.c,
                    1
                ]]).T)
            center_pos = center_pos[0:3].flatten()
            # normal = np.matmul( transform_matrix, np.array([[0., 0., 1., 0.]]).T)
            # normal = normal[0:3]
            normal = None
            marker_array = self.create_and_publish_plane_markers(
                self.plane_world,
                frame_id='world',
                center=center_pos,
                normal=normal)
            self.pub_plane_markers.publish(marker_array)

        plane_msg = Plane()
        plane_msg.coef[0], plane_msg.coef[1], plane_msg.coef[
            2], plane_msg.coef[
                3] = self.plane.a, self.plane.b, self.plane.c, self.plane.d

        self.pub_plane.publish(plane_msg)

        # pcd_msg_inlier = create_point_cloud(pcd_inlier.T, frame_id='velodyne')
        # pcd_msg_outlier = create_point_cloud(pcd_outlier.T, frame_id='velodyne')
        pcd_msg_inlier = xyz_array_to_pointcloud2(pcd_inlier.T,
                                                  stamp=msg.header.stamp,
                                                  frame_id='velodyne')
        pcd_msg_outlier = xyz_array_to_pointcloud2(pcd_outlier.T,
                                                   stamp=msg.header.stamp,
                                                   frame_id='velodyne')
        self.pub_pcd_inlier.publish(pcd_msg_inlier)
        self.pub_pcd_outlier.publish(pcd_msg_outlier)

        rospy.logwarn("Finished plane estimation")

    def pose_callback(self, msg):
        rospy.logdebug("Getting pose at: %d.%09ds", msg.header.stamp.secs,
                       msg.header.stamp.nsecs)
Ejemplo n.º 12
0
def main():
    # Parse input arguments
    Parser = argparse.ArgumentParser()
    Parser.add_argument(
        '--Path',
        default="../Oxford_dataset/stereo/centre",
        help='Path to dataset, Default:../Oxford_dataset/stereo/centre')
    Parser.add_argument(
        '--ransacEpsilonThreshold',
        default=0.01,
        help='Threshold used for deciding inlier during RANSAC, Default:0.01')
    Parser.add_argument(
        '--inlierRatioThreshold',
        default=0.8,
        help='Threshold to consider a fundamental matrix as valid, Default:0.85'
    )

    Args = Parser.parse_args()
    path = Args.Path
    epsilonThresh = Args.ransacEpsilonThreshold
    inlierRatioThresh = Args.inlierRatioThreshold

    # pre-process data to get undistorted images
    dataPrep.undistortImage(path_to_model='./model', path_to_images=path)

    # extract calibration matrix from
    K = dataPrep.extractCalibrationMatrix(path_to_model='./model')

    # extract images from undistort
    new_path = './undistort'
    filesnumber = sorted(glob.glob(new_path + "/frame*.png"))

    # extract calibration matrix
    K = dataPrep.extractCalibrationMatrix(path_to_model='./model')

    T = []
    T_own = []
    R = []
    H = np.identity(4)
    H_own = np.identity(4)

    for imageIndex in range(50, len(filesnumber) - 60):
        print('Image number:', imageIndex)
        # bgrImages, vizImages = extractImages(new_path, 20)
        # ------------Process pair of images -------------------------------------
        img1 = cv2.imread(new_path + "/frame" + str(imageIndex) + ".png", -1)

        # histogram equalization of the image
        equ1 = cv2.equalizeHist(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
        # blur the image
        img1_gray = cv2.GaussianBlur(equ1, (3, 3), 0)
        # second image
        img2 = cv2.imread(new_path + "/frame" + str(imageIndex + 1) + ".png",
                          -1)
        # histogram equalization of the image
        equ2 = cv2.equalizeHist(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
        # blur the image
        img2_gray = cv2.GaussianBlur(equ2, (3, 3), 0)

        # extract images from the input array
        pixelsImg1, pixelsImg2 = features.extractSIFTFeatures(
            img1_gray, img2_gray)
        # vizMatches(img1, img2, pixelsImg1, pixelsImg2) # visualize the feature matches before RANSAC

        F, inlierImg1Pixels, inlierImg2Pixels, _, _ = RANSAC(
            pixelsImg1, pixelsImg2, epsilonThresh, inlierRatioThresh)
        # vizMatches(img1, img2, inlierImg1Pixels, inlierImg2Pixels) # visualize after RANSAC

        E, Cset, Rset = extractPose.extractPose(F, K)
        # take points in image frame before checking chirality condition
        points1new = np.hstack(
            (np.array(inlierImg1Pixels), np.ones(
                (len(inlierImg1Pixels), 1)))).T
        points2new = np.hstack(
            (np.array(inlierImg2Pixels), np.ones(
                (len(inlierImg2Pixels), 1)))).T
        points1k = np.linalg.inv(K).dot(points1new)
        points1 = points1k.T
        points2k = np.linalg.inv(K).dot(points2new)
        points2 = points2k.T

        # check chirality and obtain the true pose
        newR, newT, X = checkChirality(Rset, Cset, points1, points2)

        # perform non-liner triangulation
        # X = triangulation.nonLinearTriangulation(inlierImg1Pixels, inlierImg2Pixels, H_own, X)

        temp_H = np.hstack((newR, newT.reshape(3, 1)))
        temp_H = np.vstack((temp_H, [0, 0, 0, 1]))
        H_own = np.matmul(H_own, temp_H)

        print('-----------------------own', H_own[0:3, 3])
        T_own.append(H_own[0:3, 3])

        #---------Inbuilt------------------
        # E_cv2, mask1 = cv2.findFundamentalMat(np.array(pixelsImg1), np.array(pixelsImg2), method=cv2.RANSAC,
        # 									  focal=964.828979, pp=(643.788025, 484.40799), prob=0.85, threshold=0.5)
        E_cv2, mask1 = cv2.findEssentialMat(np.array(pixelsImg1),
                                            np.array(pixelsImg2),
                                            method=cv2.RANSAC,
                                            focal=964.828979,
                                            pp=(643.788025, 484.40799),
                                            prob=0.85,
                                            threshold=3.0)
        points, r, t, mask = cv2.recoverPose(E_cv2,
                                             np.array(pixelsImg1),
                                             np.array(pixelsImg2),
                                             focal=964.828979,
                                             pp=(643.788025, 484.40799),
                                             mask=mask1)

        newH = np.hstack((r.T, -r.T.dot(t.reshape(3, 1))))
        newH = np.vstack((newH, [0, 0, 0, 1]))
        H = np.matmul(H, newH)
        T.append(H[0:3, 3])
        print(H[0:3, 3])
        print('--------------------------')
        #----------------------------------

        # visualize
        vizCameraPose(T_own, T)
    plt.legend()
    plt.show()
class LandmarkClassifier(object):
    """Class for classifying landmarks
    """
    def __init__(self):
        #Subscribers
        self.image_sub = rospy.Subscriber('/ardrone/bottom/image_raw', Image,
                                          self.image_callback)
        self.sub_vicon = rospy.Subscriber('/vicon/ARDroneCarre/ARDroneCarre',
                                          TransformStamped,
                                          self.update_quadrotor_state)

        #Global variables
        self.image = Image
        self.bridge = CvBridge()
        self.quad_state = TransformStamped()
        self.T_body2vicon = np.identity(4)

        #wait for msg for initiating processing
        rospy.wait_for_message('/ardrone/bottom/image_raw', Image)

        #time flags for detecting when bag file is done publishing
        self.current_time = rospy.Time.now().to_sec()
        self.last_msg_time = rospy.Time.now().to_sec()

        #Define landmark locations
        self.landmarks = np.array([[4.32, 8.05], [0.874, 5.49], [7.68, 4.24],
                                   [4.27, 1.23]])
        #list of
        self.landmark_prev_save_rad = [
            float('inf'),
            float('inf'),
            float('inf'),
            float('inf')
        ]
        self.save_radius = 0.5

        #pose graph
        self.G = {}

        #load images for classification
        img1 = cv2.imread('cn_tower.png')
        img2 = cv2.imread('casa_loma.png')
        img3 = cv2.imread('nathan_philips_square.png')
        img4 = cv2.imread('princes_gates.png')
        self.landmark_ref_images = [img1, img2, img3, img4]
        self.landmark_names = [
            'cn_tower', 'casa_loma', 'nathan_philips_square', 'princes_gates'
        ]

        #Initialize ransac
        self.ransac = RANSAC()

    def image_callback(self, msg):
        """Callback for image topic
        """

        #store the current pose which will be attached to the current
        #camera frame
        current_quad_state = copy.deepcopy(self.quad_state)
        self.image = msg
        self.T_body2vicon[0, 3] = current_quad_state.transform.translation.x
        self.T_body2vicon[1, 3] = current_quad_state.transform.translation.y
        self.T_body2vicon[2, 3] = current_quad_state.transform.translation.z

        q = [
            current_quad_state.transform.rotation.x,
            current_quad_state.transform.rotation.y,
            current_quad_state.transform.rotation.z,
            current_quad_state.transform.rotation.w
        ]

        R = quaternion_matrix(q)

        self.T_body2vicon[0:3, 0:3] = R[0:3, 0:3]

        self.last_msg_time = rospy.Time.now().to_sec()

    def update_quadrotor_state(self, msg):
        """Callback for vicon topic
        """
        self.quad_state = msg

    def save_image(self, image, image_name):
        """Function for saving images to file
        """
        im_pil = IM.fromarray(image)
        b, g, r = im_pil.split()
        im_pil = IM.merge("RGB", (r, g, b))
        im_pil.save(image_name + '.png')

    def save_landmark_images(self):
        """Function that saves landmark images from project.bag, images
        that minimizes the distance between the drone and landmarks are
        saved
        """
        #run ros loop to save images
        while not rospy.is_shutdown():

            #Exit rosloop if project.bag is done publishing
            self.current_time = rospy.Time.now().to_sec()
            if (self.current_time - self.last_msg_time) > 2:
                break

            #Store the current pose and image
            T_b2vic = copy.deepcopy(self.T_body2vicon)
            img = copy.deepcopy(self.image)

            #convert image to opencv format
            cv2_img = self.bridge.imgmsg_to_cv2(img, "bgr8")

            #detect obstacle pixels
            pos = T_b2vic[0:2, 3]

            for i in range(self.landmarks.shape[0]):
                #landmark position
                l_pos = self.landmarks[i, :]

                rad = np.linalg.norm(pos - l_pos)
                #save the landmark if it will reduce the distance between drone and landmark
                if rad < self.save_radius and rad < self.landmark_prev_save_rad[
                        i]:
                    self.save_image(cv2_img, str(i))
                    self.G[i] = T_b2vic  #save pose for that landmark
                    self.landmark_prev_save_rad[i] = rad

    def classify_landmarks(self):
        """Function that classifies the landmarks based on the largest
        set of inliers from ransac
        """

        #Classify Landmarks
        for i in range(self.landmarks.shape[0]):
            #load saved images
            imgq = cv2.imread(str(i) + '.png')

            max_inliers = 0
            for j in range(len(self.landmark_ref_images)):
                #load reference image
                imgt = self.landmark_ref_images[j]

                #perform ransac outlier rejection
                inlier_count, avg_angle = self.ransac.ransac(imgq, imgt)

                #largest set of inliers record
                if inlier_count > max_inliers:
                    best_count = inlier_count
                    best_angle = avg_angle
                    max_inliers = inlier_count
                    best_idx = j

            #calculate landmark orientation
            R = self.G[i][0:3, 0:3]
            (phi, theta, psi) = euler_from_matrix(R, 'rxyz')
            landmark_orientation = psi + best_angle

            #print results
            print('Image :' + str(i) + '.png')
            print('location : ', self.landmarks[i])
            print('Landmark :' + self.landmark_names[best_idx])
            print('Landmark Orientation :' + str(landmark_orientation))
            print('inlier_count :' + str(best_count))