Beispiel #1
0
def test_SLAM_init():
    # "/slamdoom/libs/orbslam2/Vocabulary/ORBvoc.txt"
    # "/slamdoom/libs/orbslam2/Examples/RGB-D/TUM1.yaml"
    print("Initializing SLAM...")
    slam_obj = os2.SLAM()
    slam_obj.init("/slamdoom/libs/orbslam2/Vocabulary/ORBvoc.txt",
                  "/slamdoom/libs/orbslam2/Examples/Stereo/EuRoC.yaml",
                  "stereo", True)
    print("SLAM was successfully initialized!")
    input("Press key to continue...")
    fs0 = sorted(glob("/home/slam_data/mav0/cam0/data/*"))
    fs1 = sorted(glob("/home/slam_data/mav0/cam1/data/*"))
    for i_frame, (f0, f1) in enumerate(zip(fs0, fs1)):

        print("frame {} from {}".format(i_frame, len(fs1)))
        img0 = cv2.imread(f0)
        img1 = cv2.imread(f1)
        slam_obj.track_stereo(img1, img0, time.time())

        # if i_frame == 150:
        #     # sleep(10)
        #     out = slam_obj.getmap()
        #     with open("/home/slam_data/data_sets/out.pickle", "wb") as conn:
        #         pickle.dump(out, conn)
        #     print(out.shape, out.shape[0] / 6)
        #     exit()
    out = slam_obj.getmap()
    with open("/home/slam_data/data_sets/out.pickle", "wb") as conn:
        pickle.dump(out, conn)
    print("SLAM was successfully continued!")
    input("Press key to finish...")
    pass
Beispiel #2
0
def test_SLAM_init():
    # "/slamdoom/libs/orbslam2/Vocabulary/ORBvoc.txt"
    # "/slamdoom/libs/orbslam2/Examples/RGB-D/TUM1.yaml"

    flag_visualize_gridmap = False
    cap = cv2.VideoCapture(0)
    # ret, frame = cap.read()
    # print(frame.shape)
    # exit()
    print("Initializing SLAM...")
    slam_obj = os2.SLAM()
    # slam_obj.init("/slamdoom/libs/orbslam2/Vocabulary/ORBvoc.txt", "../logitec.yaml", "mono", not flag_visualize_gridmap)
    slam_obj.init("/slamdoom/tmp/orbslam2/Vocabulary/ORBvoc.txt",
                  "../logitec.yaml", "mono", True)
    print("SLAM was successfully initialized!")
    if flag_visualize_gridmap:
        displayer = DisplayMap()
    i_frame = 0
    while True:
        i_frame += 1
        ret, frame = cap.read()
        slam_obj.track_mono(frame, time())

        if flag_visualize_gridmap:
            kps = slam_obj.get_feature_kps()
            displayer.new_frame(frame, kps, slam_obj.tracking_state() == 2)

            if i_frame % 100 == 0:
                pts = slam_obj.getmap()
                if pts is not None:
                    displayer.new_map(pts)
Beispiel #3
0
def test_SLAM_init():
    # "/slamdoom/libs/orbslam2/Vocabulary/ORBvoc.txt"
    # "/slamdoom/libs/orbslam2/Examples/RGB-D/TUM1.yaml"
    print("Initializing SLAM...")
    slam_obj = os2.SLAM()
    slam_obj.init("/slamdoom/libs/orbslam2/Vocabulary/ORBvoc.txt", "/slamdoom/libs/orbslam2/Examples/RGB-D/TUM1.yaml")
    print("SLAM was successfully initialized!")
    input("Press key to continue...")
    for i in range(1000):
        array = np.random.randint(256, size=320 * 240 * 3, dtype=np.uint8).reshape((320, 240, 3))
        array_d = np.random.rand(320, 240, 1).astype("float32") * 50
        slam_obj.track(array, array_d, time.time())
        sleep(1)
    print("SLAM was successfully continued!")
    input("Press key to finish...")
    pass
Beispiel #4
0
import pyrealsense2 as rs

# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
pipeline.start(config)

flag_visualize_gridmap = False
# ret, frame = cap.read()
# print(frame.shape)
# exit()
print("Initializing SLAM...")
slam = os2.SLAM()
# slam_obj.init("/slamdoom/libs/orbslam2/Vocabulary/ORBvoc.txt", "../logitec.yaml", "mono", not flag_visualize_gridmap)
slam.init("/slamdoom/tmp/orbslam2/Vocabulary/ORBvoc.txt",
          "/SP-SLAM/Examples/RGB-D/realsense.yaml", "rgbd", False)
print("SLAM was successfully initialized!")

displayer = DisplayMap()

i_frame = 0
while True:
    i_frame += 1

    # Wait for a coherent pair of frames: depth and color
    frames = pipeline.wait_for_frames()
    depth_frame = frames.get_depth_frame()
    color_frame = frames.get_color_frame()
Beispiel #5
0
import ORBSLAM2 as os2
from time import time, sleep
# import pickle
# from gridmap import to_gridmap, DisplayMap

import pyrealsense2 as rs

# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
pipeline.start(config)

print("Initializing SLAM...")
slam_obj = os2.SLAM()
slam_obj.init("/slamdoom/tmp/orbslam2/Vocabulary/ORBvoc.txt", "../realsense.yaml", "rgbd", True)
print("SLAM was successfully initialized!")

i_frame = 0
while True:
    i_frame += 1

    # Wait for a coherent pair of frames: depth and color
    frames = pipeline.wait_for_frames()
    depth_frame = frames.get_depth_frame()
    color_frame = frames.get_color_frame()
    if not depth_frame or not color_frame:
        continue

    depth_image = np.asanyarray(depth_frame.get_data())