def load_dataset(self): Instruction_cache_file = path_add(self.data_path, 'Instruction_cache_data.pkl') train_cache_file = path_add(self.data_path, 'train_cache_data.pkl') valid_cache_file = path_add(self.data_path, 'valid_cache_data.pkl') test_cache_file = path_add(self.data_path, 'test_cache_data.pkl') if os.path.exists(train_cache_file) & os.path.exists( valid_cache_file) & os.path.exists( test_cache_file) & os.path.exists(Instruction_cache_file): print blue('Loaded the STi dataset from pkl cache files ...') with open(Instruction_cache_file, 'rb') as fid: key_points = cPickle.load(fid) print yellow(' NOTICE: the groundtruth range is [{}] meters, the label to keep is {},\n including folders:{},\n Please verify that meets requirement !' \ .format(key_points[0], key_points[1], key_points[2])) with open(train_cache_file, 'rb') as fid: train_set = cPickle.load(fid) print ' train gt set(cnt:{}) loaded from {}'.format( len(train_set), train_cache_file) with open(valid_cache_file, 'rb') as fid: valid_set = cPickle.load(fid) print ' valid gt set(cnt:{}) loaded from {}'.format( len(valid_set), valid_cache_file) with open(test_cache_file, 'rb') as fid: test_set = cPickle.load(fid) print ' test gt set(cnt:{}) loaded from {}'.format( len(test_set), test_cache_file) return train_set, valid_set, test_set print blue('Prepare the STi dataset for training, please wait ...') self.total_roidb = self.load_sti_annotation() self.filter_roidb = self.filter(self.total_roidb, self.type_to_keep) train_set, valid_set, test_set = self.assign_dataset( self.filter_roidb) # train,valid percent with open(Instruction_cache_file, 'wb') as fid: cPickle.dump( [cfg.DETECTION_RANGE, self.type_to_keep, self.folder_list], fid, cPickle.HIGHEST_PROTOCOL) print yellow(' NOTICE: the groundtruth range is [{}] meters, the label to keep is {},\n use the dataset:{},\n Please verify that meets requirement !' \ .format(cfg.DETECTION_RANGE, self.type_to_keep, self.folder_list)) with open(train_cache_file, 'wb') as fid: cPickle.dump(train_set, fid, cPickle.HIGHEST_PROTOCOL) print ' Wrote and loaded train gt roidb(cnt:{}) to {}'.format( len(train_set), train_cache_file) with open(valid_cache_file, 'wb') as fid: cPickle.dump(valid_set, fid, cPickle.HIGHEST_PROTOCOL) print ' Wrote and loaded valid gt roidb(cnt:{}) to {}'.format( len(valid_set), valid_cache_file) with open(test_cache_file, 'wb') as fid: cPickle.dump(test_set, fid, cPickle.HIGHEST_PROTOCOL) print ' Wrote and loaded test gt roidb(cnt:{}) to {}'.format( len(test_set), test_cache_file) return train_set, valid_set, test_set
def get_minibatch(self, idx_array, data_type='train', classify='positive'): one_piece = self.load_all_data need_check = False zeros_start = 0 if one_piece: if data_type == 'train' and classify == 'positive': extractor = self.TrainSet_POS elif data_type == 'train' and classify == 'negative': extractor = self.TrainSet_NEG elif data_type == 'valid' and classify == 'positive': extractor = self.ValidSet_POS else: extractor = self.ValidSet_NEG ret = extractor[idx_array].reshape(-1, cfg.CUBIC_SIZE[0], cfg.CUBIC_SIZE[1], cfg.CUBIC_SIZE[2], 1) else: # TODO:hxd: add filter when using mode: Not one piece if data_type == 'train': file_prefix = path_add(self.path, 'KITTI_TRAIN_BOX') up_limit = self.train_positive_cube_cnt else: file_prefix = path_add(self.path, 'KITTI_VALID_BOX') up_limit = self.valid_positive_cube_cnt if classify == 'positive': file_prefix = path_add(file_prefix, 'POSITIVE') need_check = True else: file_prefix = path_add(file_prefix, 'NEGATIVE') res = [] for idx in idx_array: data = np.load( path_add(file_prefix, str(idx).zfill(6) + '.npy')) if need_check: if data.sum() < self.arg.positive_points_needed: if (idx_array[-1] + 1) < up_limit: idx_array.append(idx_array[-1] + 1) if data_type == 'train': self.dataset_TrainP_record = idx_array[-1] + 1 else: self.dataset_ValidP_record = idx_array[-1] + 1 else: idx_array.append(zeros_start) zeros_start += 1 if data_type == 'train': self.dataset_TrainP_record = zeros_start else: self.dataset_ValidP_record = zeros_start continue res.append(data) ret = np.array(res, dtype=np.uint8).reshape(-1, cfg.CUBIC_SIZE[0], cfg.CUBIC_SIZE[1], cfg.CUBIC_SIZE[2], 1) return ret
def get_minibatch(self,idx=0,name='test'): """Given a roidb, construct a minibatch sampled from it.""" dataset = self.test_set fname = dataset[idx]['files_list'] lidar_data = pcd2npScan.from_path(path_add(self.data_path, fname.split('/')[0], 'pcd', fname.split('/')[1])) # gt_label = dataset[idx]['labels'] blobs = dict({'lidar3d_data': lidar_data.pc_data, # 'gt_boxes_3d': gt_label, }) return blobs
def load_dataset(self): train_cache_file = path_add(self.data_path, 'train_cache_data.pkl') valid_cache_file = path_add(self.data_path, 'valid_cache_data.pkl') test_cache_file = path_add(self.data_path, 'test_cache_data.pkl') if os.path.exists(train_cache_file) & os.path.exists(valid_cache_file) & os.path.exists(test_cache_file): print 'Loaded the STi dataset from pkl cache files ...' # with open(train_cache_file, 'rb') as fid: # train_set = cPickle.load(fid) # print ' Train gt set loaded from {}'.format(train_cache_file) # # with open(valid_cache_file, 'rb') as fid: # valid_set = cPickle.load(fid) # print ' valid gt set loaded from {}'.format(valid_cache_file) with open(test_cache_file, 'rb') as fid: test_set = cPickle.load(fid) print ' test gt set loaded from {}'.format(test_cache_file) return test_set print 'Prepare the STi dataset for training, please wait ...' self.total_roidb = self.load_sti_annotation() self.filter_roidb = self.filter(self.total_roidb, self.type_to_keep) train_set, valid_set, test_set = self.assign_dataset(self.filter_roidb) # train,valid percent with open(train_cache_file, 'wb') as fid: cPickle.dump(train_set, fid, cPickle.HIGHEST_PROTOCOL) print ' Wrote and loaded train gt roidb to {}'.format(train_cache_file) with open(valid_cache_file, 'wb') as fid: cPickle.dump(valid_set, fid, cPickle.HIGHEST_PROTOCOL) print ' Wrote and loaded valid gt roidb to {}'.format(valid_cache_file) with open(test_cache_file, 'wb') as fid: cPickle.dump(test_set, fid, cPickle.HIGHEST_PROTOCOL) print ' Wrote and loaded test gt roidb to {}'.format(test_cache_file) return test_set
def get_minibatch(self, _idx=0): """Given a roidb, construct a minibatch sampled from it.""" index_dataset = self.test_set fname = index_dataset[_idx] timer = Timer() timer.tic() lidar_data = pcd2np.from_path(fname) angel = 0 # (np_random.rand() - 0.500) * np.pi * 0.9 points_rot = self.rotation(lidar_data.pc_data, angel) timer.toc() time1 = timer.average_time timer.tic() grid_voxel = voxel_grid(points_rot, cfg, thread_sum=cfg.CPU_CNT) timer.toc() time2 = timer.average_time timer.tic() apollo_8feature = np.load( path_add(self.data_path, fname.split('/')[-3], 'feature_pcd_name', fname.split('/')[-1][0:-4] + '.npy')).reshape( -1, cfg.CUBIC_SIZE[0], cfg.CUBIC_SIZE[1], 8) apollo_8feature_rot = self.apollo_feature_rotation(apollo_8feature, degree=angel * 57.29578) timer.toc() time3 = timer.average_time blob = dict({ 'serial_num': fname.split('/')[-1], 'lidar3d_data': lidar_data.pc_data, 'grid_stack': grid_voxel['feature_buffer'], 'coord_stack': grid_voxel['coordinate_buffer'], 'ptsnum_stack': grid_voxel['number_buffer'], 'apollo_8feature': apollo_8feature_rot, 'voxel_gen_time': (time1, time2, time3) }) return blob
def load_sti_annotation(self): """ Load points and bounding boxes info from txt file in the KITTI format. """ for index, folder in enumerate(self.folder_list): libel_fname = path_add(self.data_path, folder, 'label', 'result.txt') label = [] files_names = [] with open(libel_fname, 'r') as f: lines = f.readlines() for line in lines: files_names.append(self.get_fname_from_label(line)) line = line.replace('unknown', '0.0').replace('smallMot', '1.0').replace('bigMot', '2.0').replace( 'nonMot', '3.0').replace('pedestrian', '4.0') object_str = line.translate(None, '\"').split('position:{')[1:] label_in_frame = [] for obj in object_str: f_str_num = re.findall('[-+]?\d+\.\d+', obj) for j, num in enumerate(f_str_num): pass f_str_num[j] = float(num) if j == 10: # filter the wrong type label like type: position label_in_frame.append(f_str_num) selected_label = np.array(label_in_frame, dtype=np.float32) label.append( selected_label[:, (0, 1, 2, 6, 7, 8, 3, 9)]) # extract the valuable data:x,y,z,l,w,h,theta,type if index == 0: total_labels = label total_fnames = files_names else: total_labels.extend(label) total_fnames.extend(files_names) dataset = [dict({'files_list': total_fnames[i], 'labels': total_labels[i]}) for i in range(len(total_fnames))] return dataset
def pcd_vispy(scans=None, img=None, boxes=None, name=None, index=0, vis_size=(800, 600), save_img=False, visible=True, no_gt=False, multi_vis=False, point_size=0.02): if multi_vis: canvas = vispy.scene.SceneCanvas(title=name, keys='interactive', size=vis_size, show=True) else: canvas = vispy.scene.SceneCanvas(title=name, keys='interactive', size=vis_size, show=visible) grid = canvas.central_widget.add_grid() vb = grid.add_view(row=0, col=0, row_span=2) vb_img = grid.add_view(row=1, col=0) vb.camera = 'turntable' vb.camera.elevation = 90 # 21.0 vb.camera.center = (6.5, -0.5, 9.0) vb.camera.azimuth = -90 # -75.5 vb.camera.scale_factor = 63 # 32.7 if scans is not None: if not isinstance(scans, list): pos = scans[:, :3] scatter = visuals.Markers() scatter.set_gl_state('translucent', depth_test=False) scatter.set_data(pos, edge_width=0, face_color=(1, 1, 1, 1), size=point_size, scaling=True) vb.add(scatter) else: pos = scans[0][:, :3] scatter = visuals.Markers() scatter.set_gl_state('translucent', depth_test=False) scatter.set_data(pos, edge_width=0, face_color=(1, 1, 1, 1), size=point_size, scaling=True) vb.add(scatter) pos = scans[1][:, :3] scatter = visuals.Markers() scatter.set_gl_state('translucent', depth_test=False) scatter.set_data(pos, edge_width=0, face_color=(0, 1, 1, 1), size=point_size, scaling=True) vb.add(scatter) axis = visuals.XYZAxis() vb.add(axis) if img is None: img = np.zeros(shape=[1, 1, 3], dtype=np.float32) image = visuals.Image(data=img, method='auto') vb_img.camera = 'turntable' vb_img.camera.elevation = -90.0 vb_img.camera.center = (2100, -380, -500) vb_img.camera.azimuth = 0.0 vb_img.camera.scale_factor = 1500 vb_img.add(image) if boxes is not None: gt_indice = np.where(boxes["cls_rpn"] == 4)[0] gt_cnt = len(gt_indice) boxes_cnt = boxes["center"].shape[0] i = 0 for k in range(boxes_cnt): radio = max(boxes["score"][k] - 0.5, 0.005) * 2.0 color = (0, radio, 0, 1) # Green if boxes["cls_rpn"][k] == 4: # gt boxes i = i + 1 vsp_box = visuals.Box(depth=boxes["size"][k][0], width=boxes["size"][k][1], height=boxes["size"][k][2], color=(0.3, 0.4, 0.0, 0.06), edge_color='pink') mesh_box = vsp_box.mesh.mesh_data mesh_border_box = vsp_box.border.mesh_data vertices = mesh_box.get_vertices() center = np.array([ boxes["center"][k][0], boxes["center"][k][1], boxes["center"][k][2] ], dtype=np.float32) vertices_roa_trans = box_rot_trans(vertices, -boxes["yaw"][k][0], center) # mesh_border_box.set_vertices(vertices_roa_trans) mesh_box.set_vertices(vertices_roa_trans) vb.add(vsp_box) if True: text = visuals.Text(text='det: ({}/{})'.format(i, gt_cnt), color='white', face='OpenSans', font_size=12, pos=[ boxes["center"][k][0], boxes["center"][k][1], boxes["center"][k][2] ], anchor_x='left', anchor_y='top', font_manager=None) vb.add(text) elif (boxes["cls_rpn"][k] + boxes["cls_cube"][k] ) == 0: # True negative cls rpn divided by cube vb.add( line_box(boxes["center"][k], boxes["size"][k], -boxes["yaw"][k], color=color)) elif (boxes["cls_rpn"][k] + boxes["cls_cube"][k] ) == 1: # False negative cls rpn divided by cube vb.add( line_box(boxes["center"][k], boxes["size"][k], -boxes["yaw"][k], color="red")) elif (boxes["cls_rpn"][k] + boxes["cls_cube"][k] ) == 2: # False positive cls rpn divided by cube vb.add( line_box(boxes["center"][k], boxes["size"][k], -boxes["yaw"][k], color="blue")) elif (boxes["cls_rpn"][k] + boxes["cls_cube"][k] ) == 3: # True positive cls rpn divided by cube vb.add( line_box(boxes["center"][k], boxes["size"][k], -boxes["yaw"][k], color="yellow")) text = visuals.Text( text=str(k), color=color, face='OpenSans', font_size=12, pos=[ boxes["center"][k][0] - boxes["size"][k][0] / 2, boxes["center"][k][1] - boxes["size"][k][1] / 2, boxes["center"][k][2] - boxes["size"][k][2] / 2 ], anchor_x='left', anchor_y='top', font_manager=None) vb.add(text) if save_img: folder = path_add(cfg.TEST_RESULT, cfg.RANDOM_STR) if not os.path.exists(folder): os.makedirs(folder) fileName = path_add(folder, str(index).zfill(6) + '.png') res = canvas.render(bgcolor='black')[:, :, 0:3] vispy_file.write_png(fileName, res) @canvas.connect def on_key_press(ev): if ev.key.name in '+=': a = vb.camera.get_state() print(a) if visible: pass vispy.app.run() return canvas
def input_data(self, scans=None, img=None, boxes=None, index=0, save_img=False, no_gt=False): self.canvas = vispy.scene.SceneCanvas(show=True) self.grid = self.canvas.central_widget.add_grid() self.vb = self.grid.add_view(row=0, col=0, row_span=2) self.vb_img = self.grid.add_view(row=1, col=0) self.vb.camera = 'turntable' self.vb.camera.elevation = 90 #21.0 self.vb.camera.center = (6.5, -0.5, 9.0) self.vb.camera.azimuth = -90 #-75.5 self.vb.camera.scale_factor = 63 #32.7 self.vb_img.camera = 'turntable' self.vb_img.camera.elevation = -90.0 self.vb_img.camera.center = (2100, -380, -500) self.vb_img.camera.azimuth = 0.0 self.vb_img.camera.scale_factor = 1500 pos = scans[:, 0:3] scatter = visuals.Markers() scatter.set_gl_state('translucent', depth_test=False) scatter.set_data(pos, edge_width=0, face_color=(1, 1, 1, 1), size=0.01, scaling=True) self.vb.add(scatter) if img is None: img = np.zeros(shape=[1, 1, 3], dtype=np.float32) image = visuals.Image(data=img, method='auto') self.vb_img.add(image) if boxes is not None: if len(boxes.shape) == 1: boxes = boxes.reshape(1, -1) gt_indice = np.where(boxes[:, -1] == 2)[0] gt_cnt = len(gt_indice) i = 0 for box in boxes: radio = max(box[0] - 0.5, 0.005) * 2.0 color = (0, radio, 0, 1) # Green if box[-1] == 4: # gt boxes i = i + 1 vsp_box = visuals.Box(width=box[4], depth=box[5], height=box[6], color=(0.6, 0.8, 0.0, 0.3)) #edge_color='yellow') mesh_box = vsp_box.mesh.mesh_data mesh_border_box = vsp_box.border.mesh_data vertices = mesh_box.get_vertices() center = np.array([box[1], box[2], box[3]], dtype=np.float32) vtcs = np.add(vertices, center) mesh_border_box.set_vertices(vtcs) mesh_box.set_vertices(vtcs) self.vb.add(vsp_box) if False: text = visuals.Text(text='gt: ({}/{})'.format( i, gt_cnt), color='white', face='OpenSans', font_size=12, pos=[box[1], box[2], box[3]], anchor_x='left', anchor_y='top', font_manager=None) self.vb.add(text) if (box[-1] + box[-2]) == 0: # True negative cls rpn divided by cube self.vb.add(line_box(box, color=color)) if (box[-1] + box[-2] ) == 1: # False negative cls rpn divided by cube self.vb.add(line_box(box, color='red')) if (box[-1] + box[-2] ) == 2: # False positive cls rpn divided by cube if no_gt: self.vb.add(line_box(box, color='yellow')) else: self.vb.add(line_box(box, color='blue')) if (box[-1] + box[-2]) == 3: # True positive cls rpn divided by cube self.vb.add(line_box(box, color='yellow')) if save_img: if not os.path.exists(folder): os.makedirs(folder) fileName = path_add(folder, str(index).zfill(6) + '.png') res = self.canvas.render(bgcolor='black')[:, :, 0:3] vispy_file.write_png(fileName, res) @self.canvas.connect def on_key_press(ev): if ev.key.name in '+=': a = self.vb.camera.get_state() print(a)
def get_minibatch(self, _idx=0, name='train'): """Given a roidb, construct a minibatch sampled from it.""" if name == 'train': index_dataset = self.train_set elif name == 'valid': index_dataset = self.valid_set else: index_dataset = self.test_set fname = index_dataset[_idx]['files_name'] timer = Timer() timer.tic() lidar_data = pcd2np.from_path( path_add(self.data_path, fname.split('/')[0], 'pcd', fname.split('/')[1])) angel = (np_random.rand() - 0.500) * np.pi * 0.95 points_rot = self.rotation(lidar_data.pc_data, angel) boxes_rot = np.add(index_dataset[_idx]['boxes_labels'], [0., 0., 0., 0., 0., 0., angel, 0.]) # yaw category_rot = self.label_rotation( index_dataset[_idx]['object_labels'], degree=angel * 57.29578) timer.toc() time1 = timer.average_time timer.tic() grid_voxel = voxel_grid(points_rot, cfg, thread_sum=cfg.CPU_CNT) timer.toc() time2 = timer.average_time timer.tic() apollo_8feature = np.load( path_add(self.data_path, fname.split('/')[0], 'feature_pcd_name', fname.split('/')[1][0:-4] + '.npy')).reshape( -1, cfg.CUBIC_SIZE[0], cfg.CUBIC_SIZE[1], 8) apollo_8feature_rot = self.apollo_feature_rotation(apollo_8feature, degree=angel * 57.29578) timer.toc() time3 = timer.average_time blob = dict({ 'serial_num': fname, 'voxel_gen_time': (time1, time2, time3), 'lidar3d_data': np.hstack((points_rot, lidar_data.pc_data[:, 3:4])), 'boxes_labels': boxes_rot, 'object_labels': category_rot, 'grid_stack': grid_voxel['feature_buffer'], 'coord_stack': grid_voxel['coordinate_buffer'], 'ptsnum_stack': grid_voxel['number_buffer'], 'apollo_8feature': apollo_8feature_rot, }) return blob
def load_sti_annotation(self): total_box_labels, total_fnames, total_object_labels, total_height_labels = [], [], [], [] for index, folder in enumerate(self.folder_list): print(green(' Process the folder {}'.format(folder))) # TODO:declaration: the result.txt file in shrink_box_label_bk contains illegal number like: "x":"-1.#IND00","y":"-1.#IND00","z":"-1.#IND00" libel_fname = path_add(self.data_path, folder, 'label', 'result.txt') pixel_libel_folder = path_add(self.data_path, folder, 'label_rect') box_label, files_names, one_object_label, one_height_label = [], [], [], [] with open(libel_fname, 'r') as f: frames = f.readlines() for idx__, one_frame in enumerate( frames): # one frame in a series data one_frame = one_frame.replace('unknown', '0.0').replace('smallMot', '1.0').replace('bigMot', '2.0') \ .replace('nonMot', '3.0').replace('pedestrian', '4.0').replace('dontcare', '0.0') object_str = one_frame.translate(None, '\"').split('position:{')[1:] label_in_frame = [] if idx__ % 150 == 0: print(" Process is going on {}/{} ".format( idx__, len(frames))) for obj in object_str: f_str_num = re.findall('[-+]?\d+\.\d+', obj) f_num = map(float, f_str_num) if len( f_num ) == 11: # filter the wrong type label like type: position label_in_frame.append(f_num) else: # toxic label ! shit! print( red(' There is a illegal lbael(length:{}) in result.txt in frame-{} without anything in folder {} and it has been dropped' .format(len(f_num), idx__, folder))) print f_num # print one_frame label_in_frame_np = np.array(label_in_frame, dtype=np.float32).reshape(-1, 11) if label_in_frame_np.shape[0] == 0: print( red(' There is a empty frame-{} without anything in folder {} and it has been dropped' .format(idx__, folder))) continue if len(np.where(label_in_frame_np[:, 9] != 0)[0]) == 0: print( red(' There is a frame-{} without any object in folder {} and it has been dropped' .format(idx__, folder))) continue box_label.append(label_in_frame_np[:, ( 0, 1, 2, 6, 7, 8, 3, 9)]) # extract the valuable data:x,y,z,l,w,h,theta,type files_names.append(self.get_fname_from_label(one_frame)) print(" Loading .npy labels ... ") for file_ in sorted(os.listdir(pixel_libel_folder), key=lambda name: int(name[0:-4])): data_matrix = np.load(path_add(pixel_libel_folder, file_)) one_object_label.append(data_matrix[:, :, 0:1]) # TODO:check one_height_label.append(data_matrix[:, :, 1:2]) assert len(one_object_label) == len( files_names ), "There happens a ERROR when generating dataset in dataset.py" total_box_labels.extend(box_label) total_fnames.extend(files_names) total_object_labels.extend(one_object_label) total_height_labels.extend(one_height_label) print(" Completing loading {} is done! ".format(folder)) print(" Zip data in one dict ... ") return_dataset = [ dict({ 'files_name': total_fnames[i], 'boxes_labels': total_box_labels[i], 'object_labels': total_object_labels[i], 'height_labels': total_height_labels[i] }) for i in range(len(total_fnames)) ] print(" Total number of frames is {}".format(len(total_fnames))) return return_dataset
def pcd_vispy_standard(scans=None, img=None, boxes=None, name=None, index=0, vis_size=(800, 600), save_img=False, visible=True, multi_vis=False, point_size=0.02, lidar_view_set=None): if multi_vis: canvas = vispy.scene.SceneCanvas(title=name, keys='interactive', size=vis_size, show=True) else: canvas = vispy.scene.SceneCanvas(title=name, keys='interactive', size=vis_size, show=visible) grid = canvas.central_widget.add_grid() vb = grid.add_view(row=0, col=0, row_span=2) vb_img = grid.add_view(row=1, col=0) if lidar_view_set is None: vb.camera = 'turntable' vb.camera.elevation = 90 # 21.0 vb.camera.center = (6.5, -0.5, 9.0) vb.camera.azimuth = -90 # -75.5 vb.camera.scale_factor = 63 # 32.7 else: vb.camera = 'turntable' vb.camera.elevation = lidar_view_set['elevation'] # 21.0 vb.camera.center = lidar_view_set['center'] vb.camera.azimuth = lidar_view_set['azimuth'] vb.camera.scale_factor = lidar_view_set['scale_factor'] if scans is not None: if not isinstance(scans, list): pos = scans[:, :3] scatter = visuals.Markers() scatter.set_gl_state('translucent', depth_test=False) scatter.set_data(pos, edge_width=0, face_color=(1, 1, 1, 1), size=point_size, scaling=True) vb.add(scatter) else: pos = scans[0][:, :3] scatter = visuals.Markers() scatter.set_gl_state('translucent', depth_test=False) scatter.set_data(pos, edge_width=0, face_color=(1, 1, 1, 1), size=point_size, scaling=True) vb.add(scatter) pos = scans[1][:, :3] scatter = visuals.Markers() scatter.set_gl_state('translucent', depth_test=False) scatter.set_data(pos, edge_width=0, face_color=(0, 1, 1, 1), size=0.1, scaling=True) vb.add(scatter) axis = visuals.XYZAxis() vb.add(axis) if img is None: img = np.zeros(shape=[1, 1, 3], dtype=np.float32) image = visuals.Image(data=img, method='auto') vb_img.camera = 'turntable' vb_img.camera.elevation = -90.0 vb_img.camera.center = (1900, 160, -1300) vb_img.camera.azimuth = 0.0 vb_img.camera.scale_factor = 1500 vb_img.add(image) if boxes is not None: if len(boxes.shape) == 1: boxes = boxes.reshape(-1, boxes.shape[0]) # one box: type,xyz,lwh,yaw,[score,reserve1,reserve2] for box in boxes: if box[0] == 1: # type:car vb.add(line_box_stand(box, color="yellow")) elif box[0] == 2: # type:Perdestrain vb.add(line_box_stand(box, color="red")) elif box[0] == 3: # type:Cyclist vb.add(line_box_stand(box, color="blue")) elif box[0] == 4: # type:Van vb.add(line_box_stand(box, color="pink")) else: vb.add(line_box_stand(box, color="green")) if save_img: folder = path_add(cfg.TEST_RESULT, cfg.RANDOM_STR) if not os.path.exists(folder): os.makedirs(folder) fileName = path_add(folder, str(index).zfill(6) + '.png') res = canvas.render(bgcolor='black')[:, :, 0:3] vispy_file.write_png(fileName, res) @canvas.connect def on_key_press(ev): if ev.key.name in '+=': a = vb.camera.get_state() print(a) if visible: pass vispy.app.run() return canvas
def eat_data_in_one_piece(self): if not os.path.exists(path_add(self.path, 'data_in_one_piece')): os.mkdir(path_add(self.path, 'data_in_one_piece')) TrainSet_POS_file_name = path_add(self.path, 'data_in_one_piece', 'TrainSet_POS.npy') TrainSet_NEG_file_name = path_add(self.path, 'data_in_one_piece', 'TrainSet_NEG.npy') ValidSet_POS_file_name = path_add(self.path, 'data_in_one_piece', 'ValidSet_POS.npy') ValidSet_NEG_file_name = path_add(self.path, 'data_in_one_piece', 'ValidSet_NEG.npy') if not os.path.exists(path_add(self.path, 'filter_data_in_one_piece')): os.mkdir(path_add(self.path, 'filter_data_in_one_piece')) info_file_name = path_add(self.path, 'filter_data_in_one_piece', 'information_about_files.npy') TrainSet_POS_filter_file_name = path_add(self.path, 'filter_data_in_one_piece', 'Filter_TrainSet_POS.npy') ValidSet_POS_filter_file_name = path_add(self.path, 'filter_data_in_one_piece', 'Filter_ValidSet_POS.npy') TrainSet_NEG_filter_file_name = path_add(self.path, 'filter_data_in_one_piece', 'Filter_TrainSet_NEG.npy') ValidSet_NEG_filter_file_name = path_add(self.path, 'filter_data_in_one_piece', 'Filter_ValidSet_NEG.npy') if os.path.exists(TrainSet_POS_filter_file_name) and os.path.exists(ValidSet_POS_filter_file_name) \ and os.path.exists(TrainSet_NEG_filter_file_name) and os.path.exists(ValidSet_NEG_filter_file_name) \ and os.path.exists(info_file_name): print( 'Eating filtered data(Points more than {}) from npy zip file in folder:filter_data_in_one_piece ...' .format(darkyellow('[' + str(np.load(info_file_name)) + ']'))) self.TrainSet_POS = np.load(TrainSet_POS_filter_file_name) self.TrainSet_NEG = np.load(TrainSet_NEG_filter_file_name) self.ValidSet_POS = np.load(ValidSet_POS_filter_file_name) self.ValidSet_NEG = np.load(ValidSet_NEG_filter_file_name) self.train_positive_cube_cnt = self.TrainSet_POS.shape[0] self.train_negative_cube_cnt = self.TrainSet_NEG.shape[0] self.valid_positive_cube_cnt = self.ValidSet_POS.shape[0] self.valid_negative_cube_cnt = self.ValidSet_NEG.shape[0] print( ' emmm,there are TP:{} TN:{} VP:{} VN:{} in my belly.'.format( purple(str(self.TrainSet_POS.shape[0])), purple(str(self.TrainSet_NEG.shape[0])), purple(str(self.ValidSet_POS.shape[0])), purple(str(self.ValidSet_NEG.shape[0])), )) return None if os.path.exists(TrainSet_POS_file_name) and os.path.exists(TrainSet_NEG_file_name) \ and os.path.exists(ValidSet_POS_file_name) and os.path.exists(ValidSet_NEG_file_name): print(blue('Let`s eating exiting data(without filter) !')) self.TrainSet_POS = np.load(TrainSet_POS_file_name) self.TrainSet_NEG = np.load(TrainSet_NEG_file_name) self.ValidSet_POS = np.load(ValidSet_POS_file_name) self.ValidSet_NEG = np.load(ValidSet_NEG_file_name) else: print(darkyellow('Let`s eating raw data onr by one !')) train_pos_name_list = sorted( os.listdir(path_add(self.path, 'KITTI_TRAIN_BOX', 'POSITIVE'))) train_neg_name_list = sorted( os.listdir(path_add(self.path, 'KITTI_TRAIN_BOX', 'NEGATIVE'))) valid_pos_name_list = sorted( os.listdir(path_add(self.path, 'KITTI_VALID_BOX', 'POSITIVE'))) valid_neg_name_list = sorted( os.listdir(path_add(self.path, 'KITTI_VALID_BOX', 'NEGATIVE'))) for name in train_pos_name_list: data = np.load( path_add(self.path, 'KITTI_TRAIN_BOX', 'POSITIVE') + '/' + name) self.TrainSet_POS.append(data) self.TrainSet_POS = np.array(self.TrainSet_POS, dtype=np.uint8) np.save(TrainSet_POS_file_name, self.TrainSet_POS) print(' Yummy!') for name in train_neg_name_list: data = np.load( path_add(self.path, 'KITTI_TRAIN_BOX', 'NEGATIVE') + '/' + name) self.TrainSet_NEG.append(data) self.TrainSet_NEG = np.array(self.TrainSet_NEG, dtype=np.uint8) np.save(TrainSet_NEG_file_name, self.TrainSet_NEG) print(' Take another piece!') for name in valid_pos_name_list: data = np.load( path_add(self.path, 'KITTI_VALID_BOX', 'POSITIVE') + '/' + name) self.ValidSet_POS.append(data) self.ValidSet_POS = np.array(self.ValidSet_POS, dtype=np.uint8) np.save(ValidSet_POS_file_name, self.ValidSet_POS) print(' One more!') for name in valid_neg_name_list: data = np.load( path_add(self.path, 'KITTI_VALID_BOX', 'NEGATIVE') + '/' + name) self.ValidSet_NEG.append(data) self.ValidSet_NEG = np.array(self.ValidSet_NEG, dtype=np.uint8) np.save(ValidSet_NEG_file_name, self.ValidSet_NEG) print(' I`m full ...') print('All data has been saved in zip npy file!') print( 'There are TP:{} TN:{} VP:{} VN:{} and has been successfully eaten!' .format(self.TrainSet_POS.shape[0], self.TrainSet_NEG.shape[0], self.ValidSet_POS.shape[0], self.ValidSet_NEG.shape[0])) print( darkyellow( 'Filter the positive data which has less points({}) inside ... ' .format(self.arg.positive_points_needed))) train_sum = np.array([ self.TrainSet_POS[i].sum() for i in range(self.TrainSet_POS.shape[0]) ]) keep_mask1 = np.where(train_sum > self.arg.positive_points_needed) self.TrainSet_POS = self.TrainSet_POS[keep_mask1] np.save(TrainSet_POS_filter_file_name, self.TrainSet_POS) valid_sum = np.array([ self.ValidSet_POS[i].sum() for i in range(self.ValidSet_POS.shape[0]) ]) keep_mask2 = np.where(valid_sum > self.arg.positive_points_needed) self.ValidSet_POS = self.ValidSet_POS[keep_mask2] np.save(ValidSet_POS_filter_file_name, self.ValidSet_POS) np.save(ValidSet_NEG_filter_file_name, self.ValidSet_NEG) np.save(TrainSet_NEG_filter_file_name, self.TrainSet_NEG) np.save(info_file_name, self.arg.positive_points_needed) self.train_positive_cube_cnt = self.TrainSet_POS.shape[0] self.train_negative_cube_cnt = self.TrainSet_NEG.shape[0] self.valid_positive_cube_cnt = self.ValidSet_POS.shape[0] self.valid_negative_cube_cnt = self.ValidSet_NEG.shape[0] print( green( 'Done! TrainPositive remain: {},ValidPositive remain: {} and has been saved' ).format( self.TrainSet_POS.shape[0], self.ValidSet_POS.shape[0], ))
import vispy.io as vispy_file import cv2 from tools.utils import scales_to_255 import tensorflow as tf import numpy as np from network.config import cfg from os.path import join as path_add if socket.gethostname() == "hexindong": import vispy.app v = vispy.app.Canvas() vispy.set_log_level('CRITICAL', match='-.-') folder = path_add(cfg.TEST_RESULT, cfg.RANDOM_STR) os.makedirs(folder) # common functions =========================== def BoxAry_Theta(gt_box3d=None, pre_box3d=None, pre_theta_value=None, pre_cube_cls=None): # gt_box3d: (x1,y1,z1),(x2,y2,z2),dt_cls,yaw # pre_box3d: (x1,y1,z1),(x2,y2,z2),score,rpn_cls_label # cubic_theta_value:pre_box3d's yaw value boxes = dict({}) if gt_box3d is None: gt_box3d = np.zeros([1, 8], dtype=np.float32) if pre_box3d is None: