def visualize_result(tag, top_view, rgb, boxes3d, probs, gt_boxes3d=[]): top_image = data.draw_top_image(top_view) top_image = top_image_padding(top_image) text_lables = [ 'No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(probs) ] predict_camera_view = nud.draw_box3d_on_camera(rgb, boxes3d, text_lables=text_lables) predict_top_view = data.draw_box3d_on_top(top_image, boxes3d) # draw gt on camera and top view: if len(gt_boxes3d) > 0: # array size > 1 cannot directly used in if predict_top_view = data.draw_box3d_on_top(predict_top_view, gt_boxes3d, color=(0, 0, 255)) predict_camera_view = draw_box3d_on_camera(predict_camera_view, gt_boxes3d, color=(0, 0, 255)) new_size = (predict_camera_view.shape[1] // 2, predict_camera_view.shape[0] // 2) predict_camera_view = cv2.resize(predict_camera_view, new_size) cv2.imwrite(os.path.join(args.target_dir, 'image', tag + 'rgb_.png'), predict_camera_view) cv2.imwrite(os.path.join(args.target_dir, 'image', tag + 'top_.png'), predict_top_view)
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name='', loss: tuple = None, frame_tag='unknown_tag', is_train_mode=False): font = cv2.FONT_HERSHEY_SIMPLEX text_pos = (5, 25) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) # origin top view self.top_image = data.draw_top_image(self.batch_top_view[0]) top_view_log = self.top_image.copy() # add text on origin text = frame_tag cv2.putText(top_view_log, text, text_pos, font, 0.5, (0, 255, 100), 0, cv2.LINE_AA) if log_rpn: rpn_img = self.log_rpn(step=step, scope_name=scope_name, tensor_board=False, draw_rpn_target=False) top_view_log = np.concatenate((top_view_log, rpn_img), 1) # all prediction on top probs, boxes3d = rcnn_nms(self.fuse_probs, self.fuse_deltas, self.rois3d, score_threshold=0.) fusion_proposal_top = data.draw_box3d_on_top(self.top_image, boxes3d) prediction_top = data.draw_box3d_on_top(self.top_image, self.boxes3d) # add fusion loss text text = '' if loss != None: text += 'loss c: %6f r: %6f' % loss cv2.putText(fusion_proposal_top, text, text_pos, font, 0.5, (0, 255, 100), 0, cv2.LINE_AA) # concatenate top_view_log and final prediction top_view_log = np.concatenate((top_view_log, fusion_proposal_top), 1) top_view_log = np.concatenate((top_view_log, prediction_top), 1) self.summary_image(top_view_log, scope_name + '/top_view', step=step) # prediction on rgb text_lables = [ 'No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs) ] prediction_on_rgb = nud.draw_box3d_on_camera(self.batch_rgb_images[0], self.boxes3d, text_lables=text_lables) self.summary_image(prediction_on_rgb, scope_name + '/prediction_on_rgb', step=step)
def draw_bbox_on_lidar_top(top, boxes3d, one_frame_tag): path = os.path.join(config.cfg.LOG_DIR, 'test', 'top', '%s.png' % one_frame_tag.replace('/', '_')) top_image = data.draw_top_image(top) top_image = data.draw_box3d_on_top(top_image, boxes3d, color=(0, 0, 80)) cv2.imwrite(path, top_image) print('write %s finished' % path)
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name='',loss:tuple =None, frame_tag='unknown_tag',is_train_mode=False): font = cv2.FONT_HERSHEY_SIMPLEX text_pos = (5, 25) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) # origin top view self.top_image = data.draw_top_image(self.top_view[0]) top_view_log = self.top_image.copy() # add text on origin text = frame_tag cv2.putText(top_view_log, text, text_pos, font, 0.3, (0, 255, 100), 0, cv2.LINE_AA) if log_rpn: rpn_img = self.log_rpn(step=step, scope_name=scope_name, is_train_mode=is_train_mode, tensor_board=False) top_view_log = np.concatenate((top_view_log, rpn_img), 1) # prediction on top predict_top_view = data.draw_box3d_on_top(self.top_image, self.boxes3d) # add fusion loss text text = '' if loss != None: text += 'loss c: %6f r: %6f' % loss cv2.putText(predict_top_view, text, text_pos, font, 0.4, (0, 255, 100), 0, cv2.LINE_AA) # concatenate top_view_log and final prediction top_view_log = np.concatenate((top_view_log, predict_top_view), 1) # new_size = (top_view_log.shape[1] // 2, top_view_log.shape[0] // 2) # top_view_log = cv2.resize(top_view_log, new_size) self.summary_image(top_view_log, scope_name + '/top_view', step=step) # prediction on rgb text_lables = ['No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs)] prediction_on_rgb = nud.draw_box3d_on_camera(self.rgb_image[0], self.boxes3d, text_lables=text_lables) self.summary_image(prediction_on_rgb, scope_name + '/prediction_on_rgb', step=step)
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name=''): self.top_image = data.draw_top_image(self.top_view[0]) self.top_image = self.top_image_padding(self.top_image) if log_rpn: self.log_rpn(step=step, scope_name=scope_name) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) text_lables = [ 'No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs) ] predict_camera_view = nud.draw_box3d_on_camera(self.rgb_image[0], self.boxes3d, text_lables=text_lables) new_size = (predict_camera_view.shape[1] // 2, predict_camera_view.shape[0] // 2) predict_camera_view = cv2.resize(predict_camera_view, new_size) # nud.imsave('predict_camera_view' , predict_camera_view, log_subdir) self.summary_image(predict_camera_view, scope_name + '/predict_camera_view', step=step) predict_top_view = data.draw_box3d_on_top(self.top_image, self.boxes3d) # nud.imsave('predict_top_view' , predict_top_view, log_subdir) self.summary_image(predict_top_view, scope_name + '/predict_top_view', step=step)
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name='', loss: tuple = None, frame_tag='unknown_tag', is_train_mode=False): font = cv2.FONT_HERSHEY_SIMPLEX text_pos = (5, 25) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) # origin top view self.top_image = data.draw_top_image(self.top_view[0]) top_view_log = self.top_image.copy() # add text on origin text = frame_tag cv2.putText(top_view_log, text, text_pos, font, 0.3, (0, 255, 100), 0, cv2.LINE_AA) if log_rpn: rpn_img = self.log_rpn(step=step, scope_name=scope_name, is_train_mode=is_train_mode, tensor_board=False) top_view_log = np.concatenate((top_view_log, rpn_img), 1) # prediction on top predict_top_view = data.draw_box3d_on_top(self.top_image, self.boxes3d) # add fusion loss text text = '' if loss != None: text += 'loss c: %6f r: %6f' % loss cv2.putText(predict_top_view, text, text_pos, font, 0.4, (0, 255, 100), 0, cv2.LINE_AA) # concatenate top_view_log and final prediction top_view_log = np.concatenate((top_view_log, predict_top_view), 1) # new_size = (top_view_log.shape[1] // 2, top_view_log.shape[0] // 2) # top_view_log = cv2.resize(top_view_log, new_size) self.summary_image(top_view_log, scope_name + '/top_view', step=step) # prediction on rgb text_lables = [ 'No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs) ] prediction_on_rgb = nud.draw_box3d_on_camera(self.rgb_image[0], self.boxes3d, text_lables=text_lables) self.summary_image(prediction_on_rgb, scope_name + '/prediction_on_rgb', step=step)
def train_data_render(gt_boxes3d_dir, gt_labels_dir, rgb_dir, top_dir, lidar_dir, save_video_name): files = glob.glob(os.path.join(gt_boxes3d_dir, "*.npy")) files_count = len(files) vid_in = skvideo.io.FFmpegWriter(os.path.join(cfg.LOG_DIR, save_video_name)) for i in range(files_count): name = "{:05}".format(i) gt_boxes3d_file = os.path.join(gt_boxes3d_dir, name + '.npy') gt_labels_file = os.path.join(gt_labels_dir, name + '.npy') rgb_file = os.path.join(rgb_dir, name + '.png') top_file = os.path.join(top_dir, name + '.npy') lidar_file = os.path.join(lidar_dir, name + ".npy") #print(gt_boxes3d_file) boxes3d = np.load(gt_boxes3d_file) rgb_image = cv2.imread(rgb_file) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) rgb_image_undistort = cv2.undistort(rgb_image, cameraMatrix, cameraDist, None, cameraMatrix) #top = np.load(top_file) #top_image = data.draw_top_image(top) lidar = np.load(lidar_file) top = lidar_to_top(lidar) top_image = draw_top_image(top) front = lidar_to_front(lidar) front_image = draw_front_image(front) lidar_to_top(lidar) if len(boxes3d) > 0: rgb_image = draw.draw_box3d_on_camera(rgb_image, boxes3d, color=(0, 0, 255), thickness=1) #rgb_image_undistort = draw.draw_boxed3d_to_rgb(rgb_image_undistort, boxes3d, color=(0, 0, 80), thickness=3) top_image_boxed = data.draw_box3d_on_top(top_image, boxes3d[:, :, :], color=(255, 255, 0), thickness=1) #rgb_image_undistort = cv2.resize(rgb_image_undistort, (500, 400)) new_image = np.concatenate((top_image, top_image_boxed), axis=1) new_image = np.concatenate((front_image, new_image), axis=0) rgb_image = cv2.resize(rgb_image, (int(new_image.shape[0] * rgb_image.shape[1] / rgb_image.shape[0]), new_image.shape[0])) new_image = np.concatenate((new_image, rgb_image), axis=1) vid_in.writeFrame(new_image) vid_in.close() pass
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name='',loss:tuple =None, frame_tag='unknown_tag',is_train_mode=False): font = cv2.FONT_HERSHEY_SIMPLEX text_pos = (5, 25) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) # origin top view self.top_image = data.draw_top_image(self.batch_top_view[0]) top_view_log = self.top_image.copy() # add text on origin text = frame_tag cv2.putText(top_view_log, text, text_pos, font, 0.5, (0, 255, 100), 0, cv2.LINE_AA) if log_rpn: rpn_img = self.log_rpn(step=step, scope_name=scope_name, tensor_board=False,draw_rpn_target=False) top_view_log = np.concatenate((top_view_log, rpn_img), 1) # all prediction on top probs, boxes3d = rcnn_nms(self.fuse_probs, self.fuse_deltas,self.rois3d, score_threshold=0.) fusion_proposal_top = data.draw_box3d_on_top(self.top_image, boxes3d,scores=probs,thickness=0) prediction_top = data.draw_box3d_on_top(self.top_image, self.boxes3d, scores=self.probs, thickness=0) # add fusion loss text text = '' if loss != None: text += 'loss c: %6f r: %6f' % loss cv2.putText(fusion_proposal_top, text, text_pos, font, 0.5, (0, 255, 100), 0, cv2.LINE_AA) # concatenate top_view_log and final prediction top_view_log = np.concatenate((top_view_log, fusion_proposal_top), 1) top_view_log = np.concatenate((top_view_log, prediction_top), 1) self.summary_image(top_view_log, scope_name + '/top_view', step=step) # prediction on rgb text_lables = ['No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs)] prediction_on_rgb = nud.draw_box3d_on_camera(self.batch_rgb_images[0], self.boxes3d, text_lables=text_lables) self.summary_image(prediction_on_rgb, scope_name + '/prediction_on_rgb', step=step)
def predict_log(self, log_subdir, log_rpn=False, step=None, scope_name=''): self.top_image = data.draw_top_image(self.top_view[0]) self.top_image = self.top_image_padding(self.top_image) if log_rpn: self.log_rpn(step=step ,scope_name=scope_name) self.log_fusion_net_detail(log_subdir, self.fuse_probs, self.fuse_deltas) text_lables = ['No.%d class:1 prob: %.4f' % (i, prob) for i, prob in enumerate(self.probs)] predict_camera_view = nud.draw_box3d_on_camera(self.rgb_image[0], self.boxes3d, text_lables=text_lables) new_size = (predict_camera_view.shape[1] // 2, predict_camera_view.shape[0] // 2) predict_camera_view = cv2.resize(predict_camera_view, new_size) # nud.imsave('predict_camera_view' , predict_camera_view, log_subdir) self.summary_image(predict_camera_view, scope_name + '/predict_camera_view', step=step) predict_top_view = data.draw_box3d_on_top(self.top_image, self.boxes3d) # nud.imsave('predict_top_view' , predict_top_view, log_subdir) self.summary_image(predict_top_view, scope_name + '/predict_top_view', step=step)
def train_data_render(gt_boxes3d_dir, gt_labels_dir, rgb_dir, top_dir, lidar_dir, save_video_name): files = glob.glob(os.path.join(gt_boxes3d_dir,"*.npy")) files_count = len(files) vid_in = skvideo.io.FFmpegWriter(os.path.join(cfg.LOG_DIR, save_video_name)) for i in range(files_count): name = "{:05}".format(i) gt_boxes3d_file = os.path.join(gt_boxes3d_dir, name + '.npy') gt_labels_file = os.path.join(gt_labels_dir, name + '.npy') rgb_file = os.path.join(rgb_dir, name + '.png') top_file = os.path.join(top_dir, name + '.npy') lidar_file = os.path.join(lidar_dir, name + ".npy") #print(gt_boxes3d_file) boxes3d = np.load(gt_boxes3d_file) rgb_image = cv2.imread(rgb_file) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) rgb_image_undistort = cv2.undistort(rgb_image, cameraMatrix, cameraDist, None, cameraMatrix) #top = np.load(top_file) #top_image = data.draw_top_image(top) lidar = np.load(lidar_file) top = lidar_to_top(lidar) top_image = draw_top_image(top) front = lidar_to_front(lidar) front_image = draw_front_image(front) lidar_to_top(lidar) if len(boxes3d) > 0: rgb_image = draw.draw_box3d_on_camera(rgb_image, boxes3d, color=(0, 0, 255), thickness=1) #rgb_image_undistort = draw.draw_boxed3d_to_rgb(rgb_image_undistort, boxes3d, color=(0, 0, 80), thickness=3) top_image_boxed = data.draw_box3d_on_top(top_image, boxes3d[:, :, :], color=(255, 255, 0), thickness=1) #rgb_image_undistort = cv2.resize(rgb_image_undistort, (500, 400)) new_image = np.concatenate((top_image, top_image_boxed), axis=1) new_image = np.concatenate((front_image, new_image), axis=0) rgb_image = cv2.resize(rgb_image, (int(new_image.shape[0] * rgb_image.shape[1] / rgb_image.shape[0]), new_image.shape[0])) new_image = np.concatenate((new_image, rgb_image), axis=1) vid_in.writeFrame(new_image) vid_in.close() pass
# load objs = raw_tracklet.load(one_frame_tag) rgb = raw_img.load(one_frame_tag) lidar = raw_lidar.load(one_frame_tag) # preprocess rgb = preprocess.rgb(rgb) top = preprocess.lidar_to_top(lidar) boxes3d = [preprocess.bbox3d(obj) for obj in objs] labels = [preprocess.label(obj) for obj in objs] # dump rgb img = draw.draw_box3d_on_camera(rgb, boxes3d) new_size = (img.shape[1] // 3, img.shape[0] // 3) img = cv2.resize(img, new_size) path = os.path.join(config.cfg.LOG_DIR,'test','rgb', '%s.png' % one_frame_tag.replace('/','_')) cv2.imwrite(path, img) print('write %s finished' % path) # draw bbox on top image path = os.path.join(config.cfg.LOG_DIR, 'test', 'top', '%s.png' % one_frame_tag.replace('/', '_')) top_image = data.draw_top_image(top) top_image = data.draw_box3d_on_top(top_image, boxes3d, color=(0, 0, 80)) cv2.imwrite(path, top_image) print('write %s finished' % path)
# num libs import math import random import numpy as np import cv2 import config import data import net.utility.draw as draw if __name__ == '__main__': # preprocessed_dir=config.cfg.PREPROCESSED_DATA_SETS_DIR preprocessed_dir=config.cfg.PREPROCESSING_DATA_SETS_DIR dataset='/1/15/00070.npy' top_view_dir =preprocessed_dir+ '/top'+dataset gt_boxes3d_dir =preprocessed_dir+'/gt_boxes3d'+dataset top=np.load(top_view_dir) gt_boxes3d=np.load(gt_boxes3d_dir) top_img=data.draw_top_image(top) top_img_marked=data.draw_box3d_on_top(top_img,gt_boxes3d) draw.imsave('top_img_marked',top_img_marked,'debug') print('top_img_marked dump finished!!')
# load objs = raw_tracklet.load(one_frame_tag) rgb = raw_img.load(one_frame_tag) lidar = raw_lidar.load(one_frame_tag) # preprocess rgb = preprocess.rgb(rgb) top = preprocess.lidar_to_top(lidar) boxes3d = [preprocess.bbox3d(obj) for obj in objs] labels = [preprocess.label(obj) for obj in objs] # dump rgb img = draw.draw_box3d_on_camera(rgb, boxes3d) new_size = (img.shape[1] // 3, img.shape[0] // 3) img = cv2.resize(img, new_size) path = os.path.join(config.cfg.LOG_DIR, 'test', 'rgb', '%s.png' % one_frame_tag.replace('/', '_')) cv2.imwrite(path, img) print('write %s finished' % path) # draw bbox on top image path = os.path.join(config.cfg.LOG_DIR, 'test', 'top', '%s.png' % one_frame_tag.replace('/', '_')) top_image = data.draw_top_image(top) top_image = data.draw_box3d_on_top(top_image, boxes3d, color=(0, 0, 80)) cv2.imwrite(path, top_image) print('write %s finished' % path)
def pred_and_save(tracklet_pred_dir, dataset, generate_video=False, frame_offset=16, log_tag=None, weights_tag=None): # Tracklet_saver will check whether the file already exists. tracklet = Tracklet_saver(tracklet_pred_dir) os.makedirs(os.path.join(log_dir, 'image'), exist_ok=True) top_shape, front_shape, rgb_shape = dataset.get_shape() predict = mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag) if generate_video: vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir, 'output.mp4')) # timer timer_step = 100 if cfg.TRACKING_TIMER: time_it = timer() frame_num = 0 for i in range(dataset.size if fast_test == False else frame_offset + 1): rgb, top, front, _, _, _ = dataset.load() frame_num = i - frame_offset if frame_num < 0: continue boxes3d, probs = predict(top, front, rgb) predict.dump_log(log_subdir=log_subdir, n_frame=i) # time timer_step iterations. Turn it on/off in config.py if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0: predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \ (time_it.time_diff_per_n_loops(), timer_step)) # for debugging: save image and show image. top_image = draw_top_image(top[0]) rgb_image = rgb[0] if len(boxes3d) != 0: top_image = draw_box3d_on_top(top_image, boxes3d[:, :, :], color=(80, 80, 0), thickness=3) rgb_image = draw_box3d_on_camera(rgb_image, boxes3d[:, :, :], color=(0, 0, 80), thickness=3) translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :]) # todo: remove it after gtbox is ok size[:, 1:3] = size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE for j in range(len(translation)): if 0 < translation[j, 1] < 8: tracklet.add_tracklet(frame_num, size[j], translation[j], rotation[j]) resize_scale = top_image.shape[0] / rgb_image.shape[0] rgb_image = cv2.resize( rgb_image, (int(rgb_image.shape[1] * resize_scale), top_image.shape[0])) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) new_image = np.concatenate((top_image, rgb_image), axis=1) cv2.imwrite(os.path.join(log_dir, 'image', '%5d_image.jpg' % i), new_image) if generate_video: vid_in.writeFrame(new_image) vid_in.close() tracklet.write_tracklet() predict.dump_weigths(os.path.join(log_dir, 'pretrained_model')) if cfg.TRACKING_TIMER: predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \ (time_it.total_time())) print("tracklet file named tracklet_labels.xml is written successfully.") return tracklet.path
def pred_and_save(tracklet_pred_dir, dataset, generate_video=False, frame_offset=16, log_tag=None, weights_tag=None): # Tracklet_saver will check whether the file already exists. tracklet = Tracklet_saver(tracklet_pred_dir, 'pred') os.makedirs(os.path.join(log_dir, 'image'), exist_ok=True) gt_tracklet = Tracklet_saver(tracklet_pred_dir, 'gt') top_shape, front_shape, rgb_shape = dataset.get_shape() predict = mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag) if generate_video: vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir, 'output.mp4')) # timer timer_step = 100 if cfg.TRACKING_TIMER: time_it = timer() print('dataset.size') print(dataset.size) lenght = [] gt_lenght = [] frame_num = 0 for i in range(dataset.size if fast_test == False else frame_offset + 1): rgb, top, front, _, _, _ = dataset.load(size=1) frame_num = i - frame_offset print('frame_num') print(frame_num) if frame_num < 0: continue gt_boxes3d_tmp = np.load( '/home/mohsen/Desktop/MV3D/data/preprocessed/kitti/gt_boxes3d/object3d/test/%05d.npy' % i) #remove gt boxes with hiegh less than 40 gt_boxes3d_list = [] for gt_box3d_tmp in gt_boxes3d_tmp: # if gt_box3d_tmp[0,0]>0: gt_box3d_tmp_list = [] gt_box3d_tmp_list.append(gt_box3d_tmp) gt_project = box3d.box3d_to_rgb_box(gt_box3d_tmp_list) if abs(gt_project[0][0, 1] - gt_project[0][4, 1]) >= 40: gt_box3d = gt_box3d_tmp gt_boxes3d_list.append(gt_box3d) gt_boxes3d = np.array(gt_boxes3d_list) # gt_boxes3d = gt_boxes3d_tmp ##################################### boxes3d_tmp, probs = predict(top, front, rgb) predict.dump_log(log_subdir=log_subdir, n_frame=i) # time timer_step iterations. Turn it on/off in config.py if cfg.TRACKING_TIMER and i % timer_step == 0 and i != 0: predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \ (time_it.time_diff_per_n_loops(), timer_step)) # for debugging: save image and show image. top_image = draw_top_image(top[0]) rgb_image = rgb[0] if len(gt_boxes3d) != 0: gt_lenght.append(len(gt_boxes3d)) gt_translation, gt_size, gt_rotation = boxes3d_decompose( gt_boxes3d[:, :, :]) # todo: remove it after gtbox is ok gt_size[:, 1:3] = gt_size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE for j in range(len(gt_translation)): gt_tracklet.add_tracklet(frame_num, gt_size[j], gt_translation[j], gt_rotation[j]) #remove predicted boxes with hiegh less than 40 boxes3d_list = [] for box3d_tmp in boxes3d_tmp: # if box3d_tmp[0, 0] > 0: box3d_tmp_list = [] box3d_tmp_list.append(box3d_tmp) project = box3d.box3d_to_rgb_box(box3d_tmp_list) if abs(project[0][0, 1] - project[0][4, 1]) >= 40: print(project[0][0, 1] - project[0][4, 1]) pred_box3d = box3d_tmp boxes3d_list.append(pred_box3d) boxes3d = np.array(boxes3d_list) # boxes3d = boxes3d_tmp ##################################### print('sizes') print(np.size(boxes3d)) print(gt_boxes3d) print(np.size(gt_boxes3d)) if len(boxes3d) != 0: lenght.append(len(boxes3d)) top_image = draw_box3d_on_top(top_image, boxes3d[:, :, :], color=(80, 80, 0), thickness=3) rgb_image = draw_box3d_on_camera(rgb_image, boxes3d[:, :, :], color=(0, 0, 80), thickness=3) if len(gt_boxes3d) != 0: rgb_image = draw_box3d_on_camera(rgb_image, gt_boxes3d[:, :, :], color=(0, 80, 0), thickness=3) translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :]) # todo: remove it after gtbox is ok size[:, 1:3] = size[:, 1:3] / cfg.TRACKLET_GTBOX_LENGTH_SCALE for j in range(len(translation)): tracklet.add_tracklet(frame_num, size[j], translation[j], rotation[j]) resize_scale = top_image.shape[0] / rgb_image.shape[0] rgb_image = cv2.resize( rgb_image, (int(rgb_image.shape[1] * resize_scale), top_image.shape[0])) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) new_image = np.concatenate((top_image, rgb_image), axis=1) cv2.imwrite(os.path.join(log_dir, 'image', '%5d_image.jpg' % i), new_image) if generate_video: vid_in.writeFrame(new_image) vid_in.close() print(lenght) print(sum(lenght)) tracklet.write_tracklet() predict.dump_weigths(os.path.join(log_dir, 'pretrained_model')) print(gt_lenght) print(sum(gt_lenght)) gt_tracklet.write_tracklet() if cfg.TRACKING_TIMER: predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \ (time_it.total_time())) print("tracklet file named tracklet_labels.xml is written successfully.") return tracklet.path, gt_tracklet.path
def pred_and_save(tracklet_pred_dir, dataset, generate_video=False, frame_offset=16, log_tag=None, weights_tag=None): # Tracklet_saver will check whether the file already exists. tracklet = Tracklet_saver(tracklet_pred_dir) os.makedirs (os.path.join(log_dir,'image'),exist_ok=True) top_shape, front_shape, rgb_shape=dataset.get_shape() predict=mv3d.Predictor(top_shape, front_shape, rgb_shape, log_tag=log_tag, weights_tag=weights_tag) if generate_video: vid_in = skvideo.io.FFmpegWriter(os.path.join(log_dir,'output.mp4')) # timer timer_step = 100 if cfg.TRACKING_TIMER: time_it = timer() frame_num = 0 for i in range(dataset.size): rgb, top, front, _, _,_= dataset.load(1) frame_num = i - frame_offset if frame_num < 0: continue boxes3d,probs=predict(top, front, rgb) predict.dump_log(log_subdir=log_subdir,n_frame=i) # time timer_step iterations. Turn it on/off in config.py if cfg.TRACKING_TIMER and i%timer_step ==0 and i!=0: predict.track_log.write('It takes %0.2f secs for inferring %d frames. \n' % \ (time_it.time_diff_per_n_loops(), timer_step)) # for debugging: save image and show image. top_image = draw_top_image(top[0]) rgb_image = rgb[0] if len(boxes3d)!=0: top_image = draw_box3d_on_top(top_image, boxes3d[:,:,:], color=(80, 80, 0), thickness=3) rgb_image = draw_box3d_on_camera(rgb_image, boxes3d[:, :, :], color=(0, 0, 80), thickness=3) translation, size, rotation = boxes3d_decompose(boxes3d[:, :, :]) #todo: remove it after gtbox is ok size[:,1:3] = size[:,1:3]/cfg.TRACKLET_GTBOX_LENGTH_SCALE for j in range(len(translation)): if 0<translation[j,1]<8: tracklet.add_tracklet(frame_num, size[j], translation[j], rotation[j]) resize_scale=top_image.shape[0]/rgb_image.shape[0] rgb_image = cv2.resize(rgb_image,(int(rgb_image.shape[1]*resize_scale), top_image.shape[0])) rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB) new_image = np.concatenate((top_image, rgb_image), axis = 1) cv2.imwrite(os.path.join(log_dir,'image','%5d_image.jpg'%i), new_image) if generate_video: vid_in.writeFrame(new_image) vid_in.close() tracklet.write_tracklet() predict.dump_weigths(os.path.join(log_dir, 'pretrained_model')) if cfg.TRACKING_TIMER: predict.log_msg.write('It takes %0.2f secs for inferring the whole test dataset. \n' % \ (time_it.total_time())) print("tracklet file named tracklet_labels.xml is written successfully.") return tracklet.path
import os os.environ["DISPLAY"] = ":0" # std libs import glob # num libs import math import random import numpy as np import cv2 import config import data import net.utility.draw as draw if __name__ == '__main__': # preprocessed_dir=config.cfg.PREPROCESSED_DATA_SETS_DIR preprocessed_dir = config.cfg.PREPROCESSING_DATA_SETS_DIR dataset = '/1/15/00070.npy' top_view_dir = preprocessed_dir + '/top' + dataset gt_boxes3d_dir = preprocessed_dir + '/gt_boxes3d' + dataset top = np.load(top_view_dir) gt_boxes3d = np.load(gt_boxes3d_dir) top_img = data.draw_top_image(top) top_img_marked = data.draw_box3d_on_top(top_img, gt_boxes3d) draw.imsave('top_img_marked', top_img_marked, 'debug') print('top_img_marked dump finished!!')