if __name__ == '__main__': ## @MentalMap # Calibration is needed to undistort # Perspective reverses the image parser = ArgumentParser() parser.add_argument('calibration', help="path to json calibration file") parser.add_argument('map_path', help="path to maps yaml file") parser.add_argument('images_path', help="path to folder with images") #'/home/mwm/Desktop/datadumps/01-07-19-drive-v1_22' parser.add_argument('assoc_file', help="path to assoc.json file") #'./assets/maps/testmap1/assoc.json' args = parser.parse_args() img_shape = 320,240 calibration = VideoCalibration(args.calibration, img_shape, new_shape=img_shape) mapper = Mapper2(map_shape=(800,800,3), type='additive') perspective = InversePerspective(img_shape=(250, 250), desired_shape=(100, 130), marigin=0.25) id_and_pose = get_poses(args.map_path) xy_transformation = Pose3Dto2D([idp[1] for idp in id_and_pose]) for kf_id, pose, ts, filename, img in data_iterator(id_and_pose, args.images_path, args.assoc_file): if True: cv2.imshow('img',img) undi = calibration.undistort(img) undi[:100,:,:] = 0 img = perspective.inverse(undi) cv2.imshow('undi', img) cv2.waitKey(0) new_pose = xy_transformation.transform.dot(pose) new_angles = transform.Rotation.from_dcm(new_pose[:3,:3]).as_euler('xyz') position = new_pose[:2,3] offset_position = np.array((-position[0],position[1])) mapper.update(img, angle=(90-np.rad2deg(new_angles[2])), position=offset_position)
import cv2 import numpy as np import quaternion from logic.mapper import Mapper, calculate_position def row_to_quaternion(row): return np.quaternion(row.q4, row.q1, row.q2, row.q3) if __name__ == '__main__': cal_data = r"C:\repositories\autonomic_car\selfie_car\src\pc\settings\camera_calibration\calib.json" cal = VideoCalibration(cal_data, (320, 240)) dirpath = r"C:\repositories\autonomic_car\selfie_car\data_intake4\v1.011" perspective = InversePerspective(img_shape=(250, 250), desired_shape=(80, 160), marigin=0.25) mapper = Mapper(map_size=(1200, 1200), scale=300.0, type='additive') df = pd.read_csv(os.path.join(dirpath, 'vis_v1.csv')) df = df[~df['filenames'].isnull()] columns = ['time', 'x', 'y', 'z', 'q1', 'q2', 'q3', 'q4'] slam_df = pd.read_csv(os.path.join(dirpath, 'slam_trajectory.csv'), sep=' ', names=columns) # imu_df = calculate_position(df, dirpath) for i, row in slam_df.iterrows(): #if pd.isnull(row['filenames']): # continue slamstamp = float(row['time']) diff = df['time'] - slamstamp closest_args = diff.abs().argsort()
parser.add_argument("images_path", help="path to folder with images") #'/home/mwm/Desktop/datadumps/01-07-19-drive-v1_22' parser.add_argument("assoc_file", help="path to assoc.json file") #'./assets/maps/testmap1/assoc.json' args = parser.parse_args() img_shape = 320, 240 calibration = VideoCalibration(args.calibration, img_shape, new_shape=img_shape) mapper = Mapper2( map_shape=(800, 800, 3), coord_coef=((50, 400), (150, 400)), rot_cent_coef=((0.5, 0), (1.1, 0)), type="additive", ) perspective = InversePerspective( perspective_area=HAXOR_SRC3, img_shape=(250, 250), desired_shape=(100, 130), marigin=0.25, ) perspective.calculate_roi_mask() my_osmap = OsmapData.from_map_path(args.map_path) id_and_pose = my_osmap.id_and_pose() transformator = Transform3Dto2D(my_osmap) to_world = Pose3DtoWorld() # @TODO fix this, this is an issue of calibration not being accurate enough calibration.dist[0, 1] = 0.12 calibration.calculate_calibration() print("new dst", calibration.dist) print("roi", calibration.roi) mask = np.zeros(perspective.canvas_shape, np.uint8) a = 20 b = 165