Esempio n. 1
0
    def __init__(self, inputdirectory, feature = "ORB"):
        self.input_directory = inputdirectory

        # Use ORB features
        self.detector = cv2.ORB_create()
        self.bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
        
        self.frame_generator = FrameGenerator(self.detector)
        self.list_of_frames = []
        self.map = Map()
        self.three_dim_viewport = Display3D()

        self.feature_mapper = {}
        self.feature_history = {}
Esempio n. 2
0
        print("Map:      %d points, %d frames" %
              (len(self.mapp.points), len(self.mapp.frames)))
        print("Time:     %.2f ms" % ((time.time() - start_time) * 1000.0))
        print(np.linalg.inv(f1.pose))


if __name__ == "__main__":
    if len(sys.argv) < 2:
        print("%s <video.mp4>" % sys.argv[0])
        exit(-1)

    disp2d, disp3d = None, None

    if os.getenv("HEADLESS") is None:
        disp3d = Display3D()

    cap = cv2.VideoCapture(sys.argv[1])

    # camera parameters
    W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    CNT = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    F = float(os.getenv("F", "525"))
    if os.getenv("SEEK") is not None:
        cap.set(cv2.CAP_PROP_POS_FRAMES, int(os.getenv("SEEK")))

    if W > 1024:
        downscale = 1024.0 / W
        F *= downscale