def main(args): capture = cv2.VideoCapture(args.video) W = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) H = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) FPS = int(capture.get(cv2.CAP_PROP_FPS)) if args.save: writer = cv2.VideoWriter(args.save, cv2.VideoWriter_fourcc(*"mp4v"), FPS, (W, H)) if W > 1024: downscale = 1024.0 / W H = int(H * downscale) W = 1024 if not args.headless: display2d = Display2D(W, H) ret, frame = capture.read() if not ret: print("Fatal - failed to load video") exit(-1) matcher = FrameMatcher() prevframe = Frame(frame) while capture.isOpened(): ret, frame = capture.read() if not ret: print("No more frames to read") break frame = Frame(frame) if frame.roi is not None and prevframe.roi is not None: matches = matcher(frame, prevframe) print('matches:', len(matches)) else: print('no ROI') if not args.headless: display2d.paint(cv2.resize(frame.getAnnotated(), (W, H))) if args.save: writer.write(frame.getAnnotated()) prevframe = frame
cap.set(cv2.CAP_PROP_POS_FRAMES, int(os.getenv("SEEK"))) if W > 1024: downscale = 1024.0 / W F *= downscale H = int(H * downscale) W = 1024 print("using camera %dx%d with F %f" % (W, H, F)) # camera intrinsics K = np.array([[F, 0, W // 2], [0, F, H // 2], [0, 0, 1]]) Kinv = np.linalg.inv(K) # create 2-D display if os.getenv("HEADLESS") is None: disp2d = Display2D(W, H) slam = SLAM(W, H, K) """ mapp.deserialize(open('map.json').read()) while 1: disp3d.paint(mapp) time.sleep(1) """ gt_pose = None if len(sys.argv) >= 3: gt_pose = np.load(sys.argv[2])['pose'] # add scale param? gt_pose[:, :3, 3] *= 50
W = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) H = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) F = float(os.getenv("F", "525")) CNT = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) K = np.array([[F, 0, W // 2], [0, F, H // 2], [0, 0, 1]]) Kinv = np.linalg.inv(K) if os.getenv("SEEK") is not None: video.set(cv2.CAP_PROP_POS_FRAMES, int(os.getenv("SEEK"))) if W > 1024: downscale = 1024.0 / W F *= downscale H = int(H * downscale) W = 1024 print("using camera %dx%d with F %f" % (W, H, F)) disp = Display2D("Display Window", W, H) # 2d display window i = 0 while (video.isOpened()): ret, frame = video.read() if ret == True: process_frame(frame) else: break i += 1 video.release() cv2.destroyAllWindows()
if __name__ == '__main__': if len(sys.argv) < 2: print('%s <video.mp4>' % sys.argv[0]) exit(-1) cap = cv2.VideoCapture(sys.argv[1]) # frame props W = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))//2 H = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))//2 # focal length F = 1 # camera matrix # adjust mapping: img coords have origin at center, digital have origin bottom-left K = np.array([[F, 0, W//2], [0, F, H//2], [0, 0, 1]]) slam = SLAM(W, H, K) disp = Display2D(W, H) while cap.isOpened(): ret, frame = cap.read() frame = cv2.resize(frame, (W, H)) if ret == True: img = slam.process_frame(frame) disp.paint(img) else: break
path = os.path.expanduser(path) self.left = ImageReader(self.listdir(os.path.join(path, 'image_0'))) self.right = ImageReader(self.listdir(os.path.join(path, 'image_1'))) assert len(self.left) == len(self.right) def listdir(self, dir): files = [_ for _ in os.listdir(dir) if _.endswith('.png')] return [os.path.join(dir, _) for _ in self.sort(files)] def sort(self, xs): return sorted(xs, key=lambda x: float(x[:-4])) def __getitem__(self, idx): return self.left[idx] def __len__(self): return len(self.left) if __name__ == '__main__': display = Display2D(1241, 376) t = KittiDataReader('dataset/sequences/00') for i in range(len(t.left)): path = t.left.ids[i] img = np.array(cv2.imread(path)) display.paint(img)
if args.type == 'KITTI': # Kitti configuration config = KittiConfig() # Kitti dataset loader dataset = KittiLoader(args.path) # camera object camera = Camera(dataset.cam.fx, dataset.cam.fy, dataset.cam.cx, dataset.cam.cy, dataset.cam.width, dataset.cam.height) # global SLAM object slam = PYSLAM(config) # display viewer = Display2D(slam, config) frames = [] for i in range(len(dataset)): img = dataset[i] # extract features feature = Feature(img, config, camera) feature.extract() # init frame frame = Frame(i, np.eye(4), feature, camera) if slam.is_initialized(): img = slam.observe(frame) else: