def proprecess_rgb(save_preprocess_dir, dataset, date, drive, frames_index, overwrite=False): dataset_dir = os.path.join(save_preprocess_dir, 'rgb', date, drive) os.makedirs(dataset_dir, exist_ok=True) count = 0 for n in frames_index: path = os.path.join(dataset_dir, '%05d.png' % n) if overwrite == False and os.path.isfile(path): count += 1 continue print('rgb images={}'.format(n)) rgb = dataset.rgb[count][0] rgb = (rgb * 255).astype(np.uint8) rgb = preprocess.rgb(rgb) # todo fit it to didi dataset later. gt_boxes3d = np.load( os.path.join(save_preprocess_dir, 'gt_boxes3d', date, drive, '%05d.npy' % n)) rgb_image = draw_box3d_on_camera(rgb, gt_boxes3d, color=(0, 0, 80)) cv2.imwrite(os.path.join(path), rgb_image) #cv2.imwrite(save_preprocess_dir + '/rgb/rgb_%05d.png'%n,rgb) count += 1 print('rgb image save done\n')
def dump_bbox_on_camera_image(save_preprocess_dir, dataset, objects, date, drive, frames_index, overwrite=False): dataset_dir = os.path.join(save_preprocess_dir, 'gt_box_plot', date, drive) os.makedirs(dataset_dir, exist_ok=True) count = 0 for n in frames_index: print('rgb images={}'.format(n)) rgb = dataset.rgb[count][0] rgb = (rgb * 255).astype(np.uint8) rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR) rgb = crop_image(rgb) objs = objects[count] gt_boxes3d, gt_labels = obj_to_gt_boxes3d(objs) img = draw.draw_box3d_on_camera(rgb, gt_boxes3d) new_size = (img.shape[1] // 3, img.shape[0] // 3) img = cv2.resize(img, new_size) cv2.imwrite(os.path.join(dataset_dir, '%05d.png' % n), img) count += 1 print('gt box image save done\n')
def draw_bbox_on_rgb(rgb, boxes3d, one_frame_tag): img = draw.draw_box3d_on_camera(rgb, boxes3d) new_size = (img.shape[1] // 3, img.shape[0] // 3) img = cv2.resize(img, new_size) path = os.path.join(config.cfg.LOG_DIR, 'test', 'rgb', '%s.png' % one_frame_tag.replace('/', '_')) cv2.imwrite(path, img) print('write %s finished' % path)
def draw_bbox_on_rgb(rgb, boxes3d, one_frame_tag, calib_velo_to_rgb): img = draw.draw_box3d_on_camera(rgb, boxes3d, calib_velo_to_rgb) #new_size = (img.shape[1] // 3, img.shape[0] // 3) #img = cv2.resize(img, new_size) path = os.path.join(config.cfg.LOG_DIR, 'test', 'rgb', '%s.png' % one_frame_tag.replace('/', '_')) cv2.imwrite(path, img) print('write %s finished' % path)
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name=''): self.top_image = data.draw_top_image(self.top_view[0]) self.top_image = self.top_image_padding(self.top_image) if log_rpn: self.log_rpn(step=step, scope_name=scope_name) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) text_lables = [ 'No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs) ] predict_camera_view = nud.draw_box3d_on_camera(self.rgb_image[0], self.boxes3d, text_lables=text_lables) new_size = (predict_camera_view.shape[1] // 2, predict_camera_view.shape[0] // 2) predict_camera_view = cv2.resize(predict_camera_view, new_size) # nud.imsave('predict_camera_view' , predict_camera_view, log_subdir) self.summary_image(predict_camera_view, scope_name + '/predict_camera_view', step=step) predict_top_view = data.draw_box3d_on_top(self.top_image, self.boxes3d) # nud.imsave('predict_top_view' , predict_top_view, log_subdir) self.summary_image(predict_top_view, scope_name + '/predict_top_view', step=step)
def dump_bbox_on_camera_image(save_preprocess_dir, dataset, date, drive, frames_index, overwrite=False): dataset_dir = os.path.join(save_preprocess_dir, 'gt_box_plot', date, drive) os.makedirs(dataset_dir, exist_ok=True) count = 0 for n in frames_index: print('rgb images={}'.format(n)) rgb = dataset.rgb[count][0] rgb = (rgb * 255).astype(np.uint8) rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR) rgb = crop_image(rgb) objects = readLabels( '/home/mohsen/Desktop/MV3D/data/raw/kitti/object3d_all/object3d_all_drive_all_sync/label_2', n) P, R0_rect, Tr_velo_to_cam = readCalibration( '/home/mohsen/Desktop/MV3D/data/raw/kitti/object3d_all/object3d_all_drive_all_sync/calib', n, 2) gt_boxes3d = [] for obj in range(len(objects)): gt_box3d, face_idx, P, R = computeBox3D(objects[obj], P) gt_boxes3d.append(gt_box3d) img = draw.draw_box3d_on_camera(rgb, gt_boxes3d) new_size = (img.shape[1] // 3, img.shape[0] // 3) img = cv2.resize(img, new_size) cv2.imwrite(os.path.join(dataset_dir, '%06d.png' % n), img) count += 1 print('gt box image save done\n')
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name='', loss: tuple = None, frame_tag='unknown_tag', is_train_mode=False): font = cv2.FONT_HERSHEY_SIMPLEX text_pos = (5, 25) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) # origin top view self.top_image = data.draw_top_image(self.batch_top_view[0]) top_view_log = self.top_image.copy() # add text on origin text = frame_tag cv2.putText(top_view_log, text, text_pos, font, 0.5, (0, 255, 100), 0, cv2.LINE_AA) if log_rpn: rpn_img = self.log_rpn(step=step, scope_name=scope_name, tensor_board=False, draw_rpn_target=False) top_view_log = np.concatenate((top_view_log, rpn_img), 1) # all prediction on top probs, boxes3d = rcnn_nms(self.fuse_probs, self.fuse_deltas, self.rois3d, score_threshold=0.) fusion_proposal_top = data.draw_box3d_on_top(self.top_image, boxes3d) prediction_top = data.draw_box3d_on_top(self.top_image, self.boxes3d) # add fusion loss text text = '' if loss != None: text += 'loss c: %6f r: %6f' % loss cv2.putText(fusion_proposal_top, text, text_pos, font, 0.5, (0, 255, 100), 0, cv2.LINE_AA) # concatenate top_view_log and final prediction top_view_log = np.concatenate((top_view_log, fusion_proposal_top), 1) top_view_log = np.concatenate((top_view_log, prediction_top), 1) self.summary_image(top_view_log, scope_name + '/top_view', step=step) # prediction on rgb text_lables = [ 'No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs) ] prediction_on_rgb = nud.draw_box3d_on_camera(self.batch_rgb_images[0], self.boxes3d, text_lables=text_lables) self.summary_image(prediction_on_rgb, scope_name + '/prediction_on_rgb', step=step)
def visualize_result(tag, top_view, rgb, boxes3d, probs, gt_boxes3d=[]): top_image = data.draw_top_image(top_view) top_image = top_image_padding(top_image) text_lables = [ 'No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(probs) ] predict_camera_view = nud.draw_box3d_on_camera(rgb, boxes3d, text_lables=text_lables) predict_top_view = data.draw_box3d_on_top(top_image, boxes3d) # draw gt on camera and top view: if len(gt_boxes3d) > 0: # array size > 1 cannot directly used in if predict_top_view = data.draw_box3d_on_top(predict_top_view, gt_boxes3d, color=(0, 0, 255)) predict_camera_view = draw_box3d_on_camera(predict_camera_view, gt_boxes3d, color=(0, 0, 255)) new_size = (predict_camera_view.shape[1] // 2, predict_camera_view.shape[0] // 2) predict_camera_view = cv2.resize(predict_camera_view, new_size) cv2.imwrite(os.path.join(args.target_dir, 'image', tag + 'rgb_.png'), predict_camera_view) cv2.imwrite(os.path.join(args.target_dir, 'image', tag + 'top_.png'), predict_top_view)
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name='',loss:tuple =None, frame_tag='unknown_tag',is_train_mode=False): font = cv2.FONT_HERSHEY_SIMPLEX text_pos = (5, 25) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) # origin top view self.top_image = data.draw_top_image(self.top_view[0]) top_view_log = self.top_image.copy() # add text on origin text = frame_tag cv2.putText(top_view_log, text, text_pos, font, 0.3, (0, 255, 100), 0, cv2.LINE_AA) if log_rpn: rpn_img = self.log_rpn(step=step, scope_name=scope_name, is_train_mode=is_train_mode, tensor_board=False) top_view_log = np.concatenate((top_view_log, rpn_img), 1) # prediction on top predict_top_view = data.draw_box3d_on_top(self.top_image, self.boxes3d) # add fusion loss text text = '' if loss != None: text += 'loss c: %6f r: %6f' % loss cv2.putText(predict_top_view, text, text_pos, font, 0.4, (0, 255, 100), 0, cv2.LINE_AA) # concatenate top_view_log and final prediction top_view_log = np.concatenate((top_view_log, predict_top_view), 1) # new_size = (top_view_log.shape[1] // 2, top_view_log.shape[0] // 2) # top_view_log = cv2.resize(top_view_log, new_size) self.summary_image(top_view_log, scope_name + '/top_view', step=step) # prediction on rgb text_lables = ['No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs)] prediction_on_rgb = nud.draw_box3d_on_camera(self.rgb_image[0], self.boxes3d, text_lables=text_lables) self.summary_image(prediction_on_rgb, scope_name + '/prediction_on_rgb', step=step)
def save_projection(self, nms_proposals, scores, dir=None): text_labels = ['Prob: %.4f' % (prob) for prob in scores] prediction_on_rgb = nud.draw_box3d_on_camera(self.batch_rgb_images[0], nms_proposals, text_lables=text_labels) cv2.imwrite(dir + '/' + self.frame_id.replace('/', '_') + '.png', prediction_on_rgb)
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name='', loss: tuple = None, frame_tag='unknown_tag', is_train_mode=False): font = cv2.FONT_HERSHEY_SIMPLEX text_pos = (5, 25) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) # origin top view self.top_image = data.draw_top_image(self.top_view[0]) top_view_log = self.top_image.copy() # add text on origin text = frame_tag cv2.putText(top_view_log, text, text_pos, font, 0.3, (0, 255, 100), 0, cv2.LINE_AA) if log_rpn: rpn_img = self.log_rpn(step=step, scope_name=scope_name, is_train_mode=is_train_mode, tensor_board=False) top_view_log = np.concatenate((top_view_log, rpn_img), 1) # prediction on top predict_top_view = data.draw_box3d_on_top(self.top_image, self.boxes3d) # add fusion loss text text = '' if loss != None: text += 'loss c: %6f r: %6f' % loss cv2.putText(predict_top_view, text, text_pos, font, 0.4, (0, 255, 100), 0, cv2.LINE_AA) # concatenate top_view_log and final prediction top_view_log = np.concatenate((top_view_log, predict_top_view), 1) # new_size = (top_view_log.shape[1] // 2, top_view_log.shape[0] // 2) # top_view_log = cv2.resize(top_view_log, new_size) self.summary_image(top_view_log, scope_name + '/top_view', step=step) # prediction on rgb text_lables = [ 'No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs) ] prediction_on_rgb = nud.draw_box3d_on_camera(self.rgb_image[0], self.boxes3d, text_lables=text_lables) self.summary_image(prediction_on_rgb, scope_name + '/prediction_on_rgb', step=step)
def train_data_render(gt_boxes3d_dir, gt_labels_dir, rgb_dir, top_dir, lidar_dir, save_video_name): files = glob.glob(os.path.join(gt_boxes3d_dir, "*.npy")) files_count = len(files) vid_in = skvideo.io.FFmpegWriter(os.path.join(cfg.LOG_DIR, save_video_name)) for i in range(files_count): name = "{:05}".format(i) gt_boxes3d_file = os.path.join(gt_boxes3d_dir, name + '.npy') gt_labels_file = os.path.join(gt_labels_dir, name + '.npy') rgb_file = os.path.join(rgb_dir, name + '.png') top_file = os.path.join(top_dir, name + '.npy') lidar_file = os.path.join(lidar_dir, name + ".npy") #print(gt_boxes3d_file) boxes3d = np.load(gt_boxes3d_file) rgb_image = cv2.imread(rgb_file) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) rgb_image_undistort = cv2.undistort(rgb_image, cameraMatrix, cameraDist, None, cameraMatrix) #top = np.load(top_file) #top_image = data.draw_top_image(top) lidar = np.load(lidar_file) top = lidar_to_top(lidar) top_image = draw_top_image(top) front = lidar_to_front(lidar) front_image = draw_front_image(front) lidar_to_top(lidar) if len(boxes3d) > 0: rgb_image = draw.draw_box3d_on_camera(rgb_image, boxes3d, color=(0, 0, 255), thickness=1) #rgb_image_undistort = draw.draw_boxed3d_to_rgb(rgb_image_undistort, boxes3d, color=(0, 0, 80), thickness=3) top_image_boxed = data.draw_box3d_on_top(top_image, boxes3d[:, :, :], color=(255, 255, 0), thickness=1) #rgb_image_undistort = cv2.resize(rgb_image_undistort, (500, 400)) new_image = np.concatenate((top_image, top_image_boxed), axis=1) new_image = np.concatenate((front_image, new_image), axis=0) rgb_image = cv2.resize(rgb_image, (int(new_image.shape[0] * rgb_image.shape[1] / rgb_image.shape[0]), new_image.shape[0])) new_image = np.concatenate((new_image, rgb_image), axis=1) vid_in.writeFrame(new_image) vid_in.close() pass
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name=''): self.top_image = data.draw_top_image(self.top_view[0]) self.top_image = self.top_image_padding(self.top_image) if log_rpn: self.log_rpn(step=step ,scope_name=scope_name) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) text_lables = ['No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs)] predict_camera_view = nud.draw_box3d_on_camera(self.rgb_image[0], self.boxes3d, text_lables=text_lables) new_size = (predict_camera_view.shape[1] // 2, predict_camera_view.shape[0] // 2) predict_camera_view = cv2.resize(predict_camera_view, new_size) # nud.imsave('predict_camera_view' , predict_camera_view, log_subdir) self.summary_image(predict_camera_view, scope_name + '/predict_camera_view', step=step) predict_top_view = data.draw_box3d_on_top(self.top_image, self.boxes3d) # nud.imsave('predict_top_view' , predict_top_view, log_subdir) self.summary_image(predict_top_view, scope_name + '/predict_top_view', step=step)
def dump_bbox_on_camera_image(save_preprocess_dir,dataset,objects,date,drive,frames_index,overwrite=False): dataset_dir = os.path.join(save_preprocess_dir, 'gt_box_plot', date, drive) os.makedirs(dataset_dir, exist_ok=True) count = 0 for n in frames_index: print('rgb images={}'.format(n)) rgb = dataset.rgb[count][0] rgb = (rgb * 255).astype(np.uint8) rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR) rgb=crop_image(rgb) objs = objects[count] gt_boxes3d, gt_labels = obj_to_gt_boxes3d(objs) img = draw.draw_box3d_on_camera(rgb, gt_boxes3d) new_size = (img.shape[1] // 3, img.shape[0] // 3) img = cv2.resize(img, new_size) cv2.imwrite(os.path.join(dataset_dir,'%05d.png' % n), img) count += 1 print('gt box image save done\n')
def train_data_render(gt_boxes3d_dir, gt_labels_dir, rgb_dir, top_dir, lidar_dir, save_video_name): files = glob.glob(os.path.join(gt_boxes3d_dir,"*.npy")) files_count = len(files) vid_in = skvideo.io.FFmpegWriter(os.path.join(cfg.LOG_DIR, save_video_name)) for i in range(files_count): name = "{:05}".format(i) gt_boxes3d_file = os.path.join(gt_boxes3d_dir, name + '.npy') gt_labels_file = os.path.join(gt_labels_dir, name + '.npy') rgb_file = os.path.join(rgb_dir, name + '.png') top_file = os.path.join(top_dir, name + '.npy') lidar_file = os.path.join(lidar_dir, name + ".npy") #print(gt_boxes3d_file) boxes3d = np.load(gt_boxes3d_file) rgb_image = cv2.imread(rgb_file) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) rgb_image_undistort = cv2.undistort(rgb_image, cameraMatrix, cameraDist, None, cameraMatrix) #top = np.load(top_file) #top_image = data.draw_top_image(top) lidar = np.load(lidar_file) top = lidar_to_top(lidar) top_image = draw_top_image(top) front = lidar_to_front(lidar) front_image = draw_front_image(front) lidar_to_top(lidar) if len(boxes3d) > 0: rgb_image = draw.draw_box3d_on_camera(rgb_image, boxes3d, color=(0, 0, 255), thickness=1) #rgb_image_undistort = draw.draw_boxed3d_to_rgb(rgb_image_undistort, boxes3d, color=(0, 0, 80), thickness=3) top_image_boxed = data.draw_box3d_on_top(top_image, boxes3d[:, :, :], color=(255, 255, 0), thickness=1) #rgb_image_undistort = cv2.resize(rgb_image_undistort, (500, 400)) new_image = np.concatenate((top_image, top_image_boxed), axis=1) new_image = np.concatenate((front_image, new_image), axis=0) rgb_image = cv2.resize(rgb_image, (int(new_image.shape[0] * rgb_image.shape[1] / rgb_image.shape[0]), new_image.shape[0])) new_image = np.concatenate((new_image, rgb_image), axis=1) vid_in.writeFrame(new_image) vid_in.close() pass
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name='',loss:tuple =None, frame_tag='unknown_tag',is_train_mode=False): font = cv2.FONT_HERSHEY_SIMPLEX text_pos = (5, 25) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) # origin top view self.top_image = data.draw_top_image(self.batch_top_view[0]) top_view_log = self.top_image.copy() # add text on origin text = frame_tag cv2.putText(top_view_log, text, text_pos, font, 0.5, (0, 255, 100), 0, cv2.LINE_AA) if log_rpn: rpn_img = self.log_rpn(step=step, scope_name=scope_name, tensor_board=False,draw_rpn_target=False) top_view_log = np.concatenate((top_view_log, rpn_img), 1) # all prediction on top probs, boxes3d = rcnn_nms(self.fuse_probs, self.fuse_deltas,self.rois3d, score_threshold=0.) fusion_proposal_top = data.draw_box3d_on_top(self.top_image, boxes3d,scores=probs,thickness=0) prediction_top = data.draw_box3d_on_top(self.top_image, self.boxes3d, scores=self.probs, thickness=0) # add fusion loss text text = '' if loss != None: text += 'loss c: %6f r: %6f' % loss cv2.putText(fusion_proposal_top, text, text_pos, font, 0.5, (0, 255, 100), 0, cv2.LINE_AA) # concatenate top_view_log and final prediction top_view_log = np.concatenate((top_view_log, fusion_proposal_top), 1) top_view_log = np.concatenate((top_view_log, prediction_top), 1) self.summary_image(top_view_log, scope_name + '/top_view', step=step) # prediction on rgb text_lables = ['No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs)] prediction_on_rgb = nud.draw_box3d_on_camera(self.batch_rgb_images[0], self.boxes3d, text_lables=text_lables) self.summary_image(prediction_on_rgb, scope_name + '/prediction_on_rgb', step=step)
def pred_and_save(tracklet_pred_dir, dataset, generate_video=False, frame_offset=16, log_tag=None, weights_tag=None): # Tracklet_saver will check whether the file already exists. tracklet = Tracklet_saver(tracklet_pred_dir, 'pred') os.makedirs(os.path.join(log_dir, 'image'), exist_ok=True) gt_tracklet = Tracklet_saver(tracklet_pred_dir, 'gt') top_shape, front_shape, rgb_shape = dataset.get_shape() predict = mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag) if generate_video: vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir, 'output.mp4')) # timer timer_step = 100 if cfg.TRACKING_TIMER: time_it = timer() print('dataset.size') print(dataset.size) lenght = [] gt_lenght = [] frame_num = 0 for i in range(dataset.size if fast_test == False else frame_offset + 1): rgb, top, front, _, _, _ = dataset.load(size=1) frame_num = i - frame_offset print('frame_num') print(frame_num) if frame_num < 0: continue gt_boxes3d_tmp = np.load( '/home/mohsen/Desktop/MV3D/data/preprocessed/kitti/gt_boxes3d/object3d/test/%05d.npy' % i) #remove gt boxes with hiegh less than 40 gt_boxes3d_list = [] for gt_box3d_tmp in gt_boxes3d_tmp: # if gt_box3d_tmp[0,0]>0: gt_box3d_tmp_list = [] gt_box3d_tmp_list.append(gt_box3d_tmp) gt_project = box3d.box3d_to_rgb_box(gt_box3d_tmp_list) if abs(gt_project[0][0, 1] - gt_project[0][4, 1]) >= 40: gt_box3d = gt_box3d_tmp gt_boxes3d_list.append(gt_box3d) gt_boxes3d = np.array(gt_boxes3d_list) # gt_boxes3d = gt_boxes3d_tmp ##################################### boxes3d_tmp, probs = predict(top, front, rgb) predict.dump_log(log_subdir=log_subdir, n_frame=i) # time timer_step iterations. Turn it on/off in config.py if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0: predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \ (time_it.time_diff_per_n_loops(), timer_step)) # for debugging: save image and show image. top_image = draw_top_image(top[0]) rgb_image = rgb[0] if len(gt_boxes3d) != 0: gt_lenght.append(len(gt_boxes3d)) gt_translation, gt_size, gt_rotation = boxes3d_decompose( gt_boxes3d[:, :, :]) # todo: remove it after gtbox is ok gt_size[:, 1:3] = gt_size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE for j in range(len(gt_translation)): gt_tracklet.add_tracklet(frame_num, gt_size[j], gt_translation[j], gt_rotation[j]) #remove predicted boxes with hiegh less than 40 boxes3d_list = [] for box3d_tmp in boxes3d_tmp: # if box3d_tmp[0, 0] > 0: box3d_tmp_list = [] box3d_tmp_list.append(box3d_tmp) project = box3d.box3d_to_rgb_box(box3d_tmp_list) if abs(project[0][0, 1] - project[0][4, 1]) >= 40: print(project[0][0, 1] - project[0][4, 1]) pred_box3d = box3d_tmp boxes3d_list.append(pred_box3d) boxes3d = np.array(boxes3d_list) # boxes3d = boxes3d_tmp ##################################### print('sizes') print(np.size(boxes3d)) print(gt_boxes3d) print(np.size(gt_boxes3d)) if len(boxes3d) != 0: lenght.append(len(boxes3d)) top_image = draw_box3d_on_top(top_image, boxes3d[:, :, :], color=(80, 80, 0), thickness=3) rgb_image = draw_box3d_on_camera(rgb_image, boxes3d[:, :, :], color=(0, 0, 80), thickness=3) if len(gt_boxes3d) != 0: rgb_image = draw_box3d_on_camera(rgb_image, gt_boxes3d[:, :, :], color=(0, 80, 0), thickness=3) translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :]) # todo: remove it after gtbox is ok size[:, 1:3] = size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE for j in range(len(translation)): tracklet.add_tracklet(frame_num, size[j], translation[j], rotation[j]) resize_scale = top_image.shape[0] / rgb_image.shape[0] rgb_image = cv2.resize( rgb_image, (int(rgb_image.shape[1] * resize_scale), top_image.shape[0])) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) new_image = np.concatenate((top_image, rgb_image), axis=1) cv2.imwrite(os.path.join(log_dir, 'image', '%5d_image.jpg' % i), new_image) if generate_video: vid_in.writeFrame(new_image) vid_in.close() print(lenght) print(sum(lenght)) tracklet.write_tracklet() predict.dump_weigths(os.path.join(log_dir, 'pretrained_model')) print(gt_lenght) print(sum(gt_lenght)) gt_tracklet.write_tracklet() if cfg.TRACKING_TIMER: predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \ (time_it.total_time())) print("tracklet file named tracklet_labels.xml is written successfully.") return tracklet.path, gt_tracklet.path
os.makedirs(os.path.join(config.cfg.LOG_DIR, 'test', 'top'), exist_ok=True) for one_frame_tag in tags: # load objs = raw_tracklet.load(one_frame_tag) rgb = raw_img.load(one_frame_tag) lidar = raw_lidar.load(one_frame_tag) # preprocess rgb = preprocess.rgb(rgb) top = preprocess.lidar_to_top(lidar) boxes3d = [preprocess.bbox3d(obj) for obj in objs] labels = [preprocess.label(obj) for obj in objs] # dump rgb img = draw.draw_box3d_on_camera(rgb, boxes3d) new_size = (img.shape[1] // 3, img.shape[0] // 3) img = cv2.resize(img, new_size) path = os.path.join(config.cfg.LOG_DIR, 'test', 'rgb', '%s.png' % one_frame_tag.replace('/', '_')) cv2.imwrite(path, img) print('write %s finished' % path) # draw bbox on top image path = os.path.join(config.cfg.LOG_DIR, 'test', 'top', '%s.png' % one_frame_tag.replace('/', '_')) top_image = data.draw_top_image(top) top_image = data.draw_box3d_on_top(top_image, boxes3d, color=(0, 0, 80)) cv2.imwrite(path, top_image)
def iteration_summary(self, prefix, summaryWriter): """ Method for creating numerous image summaries. :param prefix: Can be 'training' or 'validation' :param summaryWriter: Writer instance either for training or validation """ # get the indices of pos+neg, positive anchors as well as labels and regression targets top_inds, pos_inds, labels, targets = self.network.get_targets( self.batch_gt_labels, self.batch_gt_boxes3d) top = data.draw_top_image(self.batch_top_view[0]).copy() self.log_image(step=self.n_global_step, prefix=prefix, frame_tag=self.frame_id, summary_writer=summaryWriter) proposals, scores, probs = self.predict(self.batch_top_view, self.batch_rgb_images) nms_proposals, nms_scores = self.post_process(proposals, probs, minProb=0.1) ###################### ## probabilities ## ###################### for i in range(1, cfg.NUM_CLASSES): # reshape from [-1, NUM_CLASSES] # @TODO don't make the reshape fixed self.summary_image(np.reshape(probs[::2, i], (176 * 2, 200 * 2)), prefix + '/probabilities' + str(i) + '_Up', summaryWriter, step=self.n_global_step) self.summary_image(np.reshape(probs[1::2, i], (176 * 2, 200 * 2)), prefix + '/probabilities' + str(i) + '_Right', summaryWriter, step=self.n_global_step) ###################### ## proposals ## ###################### proposalIdx = np.where(np.argmax(probs, axis=1) != 0) numProposals = np.sum(proposalIdx) Boxes3d = [] if (numProposals > 0): Boxes3d = net.processing.boxes.box_transform_voxelnet_inv( proposals[proposalIdx], self.network.anchors[proposalIdx]) topbox = net.processing.boxes3d.draw_box3d_on_top(top, Boxes3d) self.summary_image(topbox, prefix + '/proposalBoxes', summaryWriter, step=self.n_global_step) ###################### ## top 10 proposals ## ###################### sortedIdx = np.argsort(-probs[:, 1].flatten()) top10Proposals = proposals[sortedIdx[:10]] ### top visualization ### Boxes3d = net.processing.boxes.box_transform_voxelnet_inv( top10Proposals, self.network.anchors[sortedIdx[:10]]) topbox = net.processing.boxes3d.draw_box3d_on_top(top, Boxes3d) self.summary_image(topbox, prefix + '/top10proposalBoxes', summaryWriter, step=self.n_global_step) ####################### ## nms visualization ## ####################### topbox = top for i in range(0, cfg.NUM_CLASSES - 1): topbox = net.processing.boxes3d.draw_box3d_on_top( topbox, nms_proposals[i][:10, :, :], scores=nms_scores[i]) self.summary_image(topbox, prefix + '/top10NMSBoxes', summaryWriter, step=self.n_global_step) ### to rgb image ### colors = [(255, 0, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255)] rgb_image = self.batch_rgb_images[0] for i in range(0, cfg.NUM_CLASSES - 1): text_labels = ['Prob: %.4f' % (prob) for prob in nms_scores[i]] rgb_image = nud.draw_box3d_on_camera(rgb_image, nms_proposals[i][:, :, :], text_lables=text_labels, color=colors[i]) # add the groundtruth # Boxes3d = net.processing.boxes.box_transform_voxelnet_inv(targets, self.voxelnet.anchors[pos_inds]) # rgb_image = nud.draw_box3d_on_camera(rgb_image, Boxes3d, # text_lables=text_labels, color=(0, 255, 255)) self.summary_image(rgb_image, prefix + '/prediction_on_rgb', summaryWriter, step=self.n_global_step) ###################### ## groundtruth ## ###################### Boxes3d = net.processing.boxes.box_transform_voxelnet_inv( targets, self.network.anchors[pos_inds]) topbox = net.processing.boxes3d.draw_box3d_on_top(top, Boxes3d) self.summary_image(topbox, prefix + '/Targets', summaryWriter, step=self.n_global_step)
def pred_and_save(tracklet_pred_dir, dataset, generate_video=False, frame_offset=16, log_tag=None, weights_tag=None): # Tracklet_saver will check whether the file already exists. tracklet = Tracklet_saver(tracklet_pred_dir) os.makedirs(os.path.join(log_dir, 'image'), exist_ok=True) top_shape, front_shape, rgb_shape = dataset.get_shape() predict = mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag) if generate_video: vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir, 'output.mp4')) # timer timer_step = 100 if cfg.TRACKING_TIMER: time_it = timer() frame_num = 0 for i in range(dataset.size if fast_test == False else frame_offset + 1): rgb, top, front, _, _, _ = dataset.load() frame_num = i - frame_offset if frame_num < 0: continue boxes3d, probs = predict(top, front, rgb) predict.dump_log(log_subdir=log_subdir, n_frame=i) # time timer_step iterations. Turn it on/off in config.py if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0: predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \ (time_it.time_diff_per_n_loops(), timer_step)) # for debugging: save image and show image. top_image = draw_top_image(top[0]) rgb_image = rgb[0] if len(boxes3d) != 0: top_image = draw_box3d_on_top(top_image, boxes3d[:, :, :], color=(80, 80, 0), thickness=3) rgb_image = draw_box3d_on_camera(rgb_image, boxes3d[:, :, :], color=(0, 0, 80), thickness=3) translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :]) # todo: remove it after gtbox is ok size[:, 1:3] = size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE for j in range(len(translation)): if 0 < translation[j, 1] < 8: tracklet.add_tracklet(frame_num, size[j], translation[j], rotation[j]) resize_scale = top_image.shape[0] / rgb_image.shape[0] rgb_image = cv2.resize( rgb_image, (int(rgb_image.shape[1] * resize_scale), top_image.shape[0])) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) new_image = np.concatenate((top_image, rgb_image), axis=1) cv2.imwrite(os.path.join(log_dir, 'image', '%5d_image.jpg' % i), new_image) if generate_video: vid_in.writeFrame(new_image) vid_in.close() tracklet.write_tracklet() predict.dump_weigths(os.path.join(log_dir, 'pretrained_model')) if cfg.TRACKING_TIMER: predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \ (time_it.total_time())) print("tracklet file named tracklet_labels.xml is written successfully.") return tracklet.path
for one_frame_tag in tags: # load objs = raw_tracklet.load(one_frame_tag) rgb = raw_img.load(one_frame_tag) lidar = raw_lidar.load(one_frame_tag) # preprocess rgb = preprocess.rgb(rgb) top = preprocess.lidar_to_top(lidar) boxes3d = [preprocess.bbox3d(obj) for obj in objs] labels = [preprocess.label(obj) for obj in objs] # dump rgb img = draw.draw_box3d_on_camera(rgb, boxes3d) new_size = (img.shape[1] // 3, img.shape[0] // 3) img = cv2.resize(img, new_size) path = os.path.join(config.cfg.LOG_DIR,'test','rgb', '%s.png' % one_frame_tag.replace('/','_')) cv2.imwrite(path, img) print('write %s finished' % path) # draw bbox on top image path = os.path.join(config.cfg.LOG_DIR, 'test', 'top', '%s.png' % one_frame_tag.replace('/', '_')) top_image = data.draw_top_image(top) top_image = data.draw_box3d_on_top(top_image, boxes3d, color=(0, 0, 80)) cv2.imwrite(path, top_image) print('write %s finished' % path)
def pred_and_save(tracklet_pred_dir, dataset, generate_video=False, frame_offset=16, log_tag=None, weights_tag=None): # Tracklet_saver will check whether the file already exists. tracklet = Tracklet_saver(tracklet_pred_dir) os.makedirs (os.path.join(log_dir,'image'),exist_ok=True) top_shape, front_shape, rgb_shape=dataset.get_shape() predict=mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag) if generate_video: vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir,'output.mp4')) # timer timer_step = 100 if cfg.TRACKING_TIMER: time_it = timer() frame_num = 0 for i in range(dataset.size): rgb, top, front, _, _,_= dataset.load(1) frame_num = i - frame_offset if frame_num < 0: continue boxes3d,probs=predict(top, front, rgb) predict.dump_log(log_subdir=log_subdir,n_frame=i) # time timer_step iterations. Turn it on/off in config.py if cfg.TRACKING_TIMER and i%timer_step ==0 and i!=0: predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \ (time_it.time_diff_per_n_loops(), timer_step)) # for debugging: save image and show image. top_image = draw_top_image(top[0]) rgb_image = rgb[0] if len(boxes3d)!=0: top_image = draw_box3d_on_top(top_image, boxes3d[:,:,:], color=(80, 80, 0), thickness=3) rgb_image = draw_box3d_on_camera(rgb_image, boxes3d[:, :, :], color=(0, 0, 80), thickness=3) translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :]) #todo: remove it after gtbox is ok size[:,1:3] = size[:,1:3]/cfg.TRACKLET_GTBOX_LENGTH_SCALE for j in range(len(translation)): if 0<translation[j,1]<8: tracklet.add_tracklet(frame_num, size[j], translation[j], rotation[j]) resize_scale=top_image.shape[0]/rgb_image.shape[0] rgb_image = cv2.resize(rgb_image,(int(rgb_image.shape[1]*resize_scale), top_image.shape[0])) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) new_image = np.concatenate((top_image, rgb_image), axis = 1) cv2.imwrite(os.path.join(log_dir,'image','%5d_image.jpg'%i), new_image) if generate_video: vid_in.writeFrame(new_image) vid_in.close() tracklet.write_tracklet() predict.dump_weigths(os.path.join(log_dir, 'pretrained_model')) if cfg.TRACKING_TIMER: predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \ (time_it.total_time())) print("tracklet file named tracklet_labels.xml is written successfully.") return tracklet.path
def save_rgb_batch(batch_rgb_images, batch_gt_boxes3d, name): rgb = nud.draw_box3d_on_camera(batch_rgb_images[0], batch_gt_boxes3d[0]) nud.imsave('RGB_' + name.split('/')[-1], rgb)