def transform_folder_to_world(sub_dir): base_dir = "E:\Datasets\DataHack\Train" base_res_dir = "E:\Datasets\DataHack\World\Train" print('Working on sub_dir: {}'.format(sub_dir)) for idx in data_utils.enumerate_frames(os.path.join(base_dir, sub_dir)): pc_file = os.path.join( base_res_dir, data_utils.frame_to_filename(sub_dir, idx, 'pointcloud')) if os.path.exists(pc_file): continue pc, ego, label = data_utils.read_all_data( os.path.join(base_dir, sub_dir), idx) ego_pc = transform_frame_to_world(pc, ego) ego_pc = np.concatenate((ego_pc, pc[:, 3:4]), -1) df = (pd.DataFrame(ego_pc) * 100).astype(int) res_dir = os.path.join(base_res_dir, sub_dir) if not os.path.exists(res_dir): os.makedirs(res_dir) file_name = os.path.join(res_dir, str(idx).zfill(7)) df.to_csv(file_name + '_pointcloud.csv', header=None, index=False) pd.DataFrame([0] * df.shape[0]).to_csv(file_name + '_labels.csv', header=None, index=False) pd.DataFrame([0.] * 6).T.to_csv(file_name + '_egomotion.csv', header=None, index=False)
#elMin = 9999 #azMin = 9999 #elMax = -9999 #azMax = -9999 # agg_point_cloud_list=[] prev_img=np.zeros((EL_NUM,AZ_NUM)) # init imgSum = np.zeros((EL_NUM,AZ_NUM)) prev_img_list=[] idxXY_list = [] label_list = [] fn_list = [] cloud_len_list=[] for idx, frame in enumerate(data_utils.enumerate_frames(video_dir)): if idx < min_idx or idx % decimate != 0: continue pc, ego, label,label_pred_new,fn = data_utils.read_all_data(video_dir, frame) # label=label_pred_new ego_rt = RotationTranslationData(vecs=(ego[:3], ego[3:])) ego_pc = ego_rt.apply_transform(pc[:, :3]) ego_pc = np.concatenate((ego_pc, pc[:, 3:4]), -1) # labeled_pc = np.concatenate((ego_pc, label), -1) # agg_point_cloud_list.append(labeled_pc) # img = np.zeros((EL_NUM,AZ_NUM)) # # img1 = np.zeros((EL_NUM,AZ_NUM)) # el = np.rad2deg(np.arctan2(pc[:,2],pc[:,0])) # az = np.rad2deg(np.arctan2(pc[:,1],pc[:,0])) # rng = np.sqrt(pc[:,0]**2 + pc[:,1]**2 + pc[:,2]**2) # idxEl = np.round((elMax - el)/elRes).astype('uint32')
from visualizations.vis import pcshow from utilities import data_utils if __name__ == '__main__': base_dir = os.path.dirname(os.getcwd()) video_dir = os.path.join(base_dir, 'data_examples', 'test_video') agg_point_cloud_list = [] max_frames_to_keep = 10 min_idx = 0 decimate = 1 max_dist = 100 for idx in data_utils.enumerate_frames(video_dir): if idx < min_idx or idx % decimate != 0: continue pc_file = data_utils.frame_to_filename(video_dir, idx, 'pointcloud') pc, ego, label = data_utils.read_all_data(video_dir, idx) ego_rt = RotationTranslationData(vecs=(ego[:3], ego[3:])) ego_pc = ego_rt.apply_transform(pc[:, :3]) ego_pc = np.concatenate((ego_pc, pc[:, 3:4]), -1) labeled_pc = np.concatenate((ego_pc, label), -1) agg_point_cloud_list.append(labeled_pc) if len(agg_point_cloud_list) > max_frames_to_keep: agg_point_cloud_list = agg_point_cloud_list[1:] agg_point_cloud = np.concatenate(agg_point_cloud_list, 0) pc2disp = ego_rt.inverse().apply_transform(agg_point_cloud[:, :3]) pc2disp = np.concatenate((pc2disp, agg_point_cloud[:, 3:]), -1) pc2disp = pc2disp[np.linalg.norm(pc2disp[:, :3], axis=1) < max_dist] pcshow(pc2disp, on_screen_text=pc_file, max_points=32000 * max_frames_to_keep)
# of the BSD 3-Clause license. See the LICENSE file for details. import os os.chdir("D:\\DataHack2018\\from_shlomi") import os.path as osp from visualizations.vis import pcshow import numpy as np from utilities import data_utils if __name__ == '__main__': # base_dir = os.path.dirname(os.getcwd()) base_dir = os.getcwd() #video_dir = os.path.join(base_dir, 'data_examples', 'test_video') # video_dir='D:\\DataHack18\\Dataset\\Train\\vid_2' #video_dir='D:\\DataHack18\\Dataset\\DataHack2018-master\\data_examples\\test_video' video_dir = 'D:\\DataHack2018\\from_shlomi\\data\\Test\\vid_21' frame_num = data_utils.count_frames(video_dir) min_idx = 900 decimate = 1 for idx, frame in enumerate(data_utils.enumerate_frames(video_dir)): if idx < min_idx or idx % decimate != 0: continue pc, ego, label, label_tmp, fn = data_utils.read_all_data( video_dir, frame) labeled_pc = np.concatenate((pc, label), -1) pcshow(labeled_pc, on_screen_text=osp.join(video_dir, str(frame)), max_points=80000) if idx == min_idx + 100: break
#import pdb;pdb.set_trace() frame_num = data_utils.count_frames(video_dir) min_idx = 31 decimate = 1 #elMin = 9999 #azMin = 9999 #elMax = -9999 #azMax = -9999 ego_last = 0 img_last = np.zeros((EL_NUM, AZ_NUM, 3)).astype('uint8') #fgbg = cv2.createBackgroundSubtractorMOG2() for idx, frame in enumerate(data_utils.enumerate_frames(video_dir)): if idx < min_idx or idx % decimate != 0: continue pc, ego, label, label_pred, fn = data_utils.read_all_data( video_dir, frame, (0.0, 0.0, 0.0)) if idx == min_idx: pc_last = pc.copy() img_dil_new = pc2img(pc) img_dil_old = pc2img(pc_last) pc_last = pc.copy() if idx == min_idx: keyFrame = img_dil_old.copy() if idx > 0: imReg, h = alignImages(img_dil_new, keyFrame) rows, cols, ch = img_dil_new.shape imReg = cv2.warpPerspective(img_dil_new, h, (cols, rows)) print(h, ego) imgDiff = (