def main(): # load config dataset_root = '/media/zxy/Samsung_T5/Data/DataSets/LaSOT/LaSOT_test' # create tracker '''Pytracking-RF tracker''' tracker_info = Tracker(args.tracker_name, args.tracker_param, None) params = tracker_info.get_parameters() params.visualization = args.vis params.debug = args.debug params.visdom_info = { 'use_visdom': False, 'server': '127.0.0.1', 'port': 8097 } tracker = tracker_info.tracker_class(params) '''Refinement module''' # refine_path = "/home/zxy/Desktop/AlphaRefine/experiments/SEx_beta/SEcm_r34_15sr_fcn/SEcmnet_ep0040-a.pth.tar" # RF_CrsM_R34SR15FCN_a # refine_path = "/home/zxy/Desktop/AlphaRefine/experiments/SEx_beta/SEcm_r34/SEcmnet_ep0040-a.pth.tar" # dimp_dimp50RF_CrsM_R34SR20FCN_a-0_1 # refine_path = "/home/zxy/Desktop/AlphaRefine/experiments/SEx_beta/SEcm_r34/SEcmnet_ep0040-b.pth.tar" # refine_path = "/home/zxy/Desktop/AlphaRefine/experiments/SEx_beta/SEcm_r34/SEcmnet_ep0040-c.pth.tar" # refine_path = "/home/zxy/Desktop/AlphaRefine/experiments/SEx_beta/SEcm_r34/SEcmnet_ep0040-d.pth.tar" # refine_path = "/home/zxy/Desktop/AlphaRefine/experiments/SEx_beta/SEcm_r34/SEcmnet_ep0040-e.pth.tar" refine_path = "/home/zxy/Desktop/AlphaRefine/experiments/SEbcm/SEbcm-8gpu/SEbcmnet_ep0040.pth.tar" # RF_CrsM_ARv1_d selector_path = 1 branches = ['corner', 'mask'][0:1] sr = 2.0 input_sz = int(128 * sr) # 2.0 by default RF_module = RefineModule(refine_path, selector_path, branches=branches, search_factor=sr, input_sz=input_sz) RF_type = 'RF_CrsM_R34SR20_e' # RF_type = 'RF_CrsM_ARv1' model_name = args.tracker_name + '_' + args.tracker_param + '{}-{}'.format( RF_type, selector_path) + '_%d' % (args.run_id) # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root, load_img=False) if args.dataset in ['VOT2016', 'VOT2018', 'VOT2019']: total_lost = 0 # restart tracking for v_idx, video in enumerate(dataset): if args.video != '': # test one special video if video.name != args.video: continue frame_counter = 0 lost_number = 0 toc = 0 '''对refinement module计时''' toc_refine = 0 pred_bboxes = [] for idx, (img, gt_bbox) in enumerate(video): if len(gt_bbox) == 4: gt_bbox = [ gt_bbox[0], gt_bbox[1], gt_bbox[0], gt_bbox[1] + gt_bbox[3] - 1, gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1] + gt_bbox[3] - 1, gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1] ] tic = cv2.getTickCount() '''get RGB format image''' img_RGB = img[:, :, ::-1].copy() # BGR --> RGB if idx == frame_counter: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''Initialize''' gt_bbox_np = np.array(gt_bbox_) gt_bbox_torch = torch.from_numpy( gt_bbox_np.astype(np.float32)) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = tracker.initialize(img_RGB, init_info) '''##### initilize refinement module for specific video''' RF_module.initialize(img_RGB, np.array(gt_bbox_)) pred_bbox = gt_bbox_ pred_bboxes.append(1) elif idx > frame_counter: '''Track''' outputs = tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] '''##### refine tracking results #####''' result_dict = RF_module.refine(img_RGB, np.array(pred_bbox)) bbox_report = result_dict['bbox_report'] bbox_state = result_dict['bbox_state'] '''report result and update state''' pred_bbox = bbox_report x1, y1, w, h = bbox_state.tolist() '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 new_pos = torch.from_numpy( np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32)) new_target_sz = torch.from_numpy( np.array([h, w]).astype(np.float32)) new_scale = torch.sqrt(new_target_sz.prod() / tracker.base_target_sz.prod()) ##### update tracker.pos = new_pos.clone() tracker.target_sz = new_target_sz tracker.target_scale = new_scale overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0])) if overlap > 0: # not lost pred_bboxes.append(pred_bbox) else: # lost object pred_bboxes.append(2) frame_counter = idx + 5 # skip 5 frames lost_number += 1 else: pred_bboxes.append(0) toc += cv2.getTickCount() - tic if idx == 0: cv2.destroyAllWindows() if args.vis and idx > frame_counter: cv2.polylines( img, [np.array(gt_bbox, np.int).reshape( (-1, 1, 2))], True, (0, 255, 0), 3) if len(pred_bbox) == 8: cv2.polylines( img, [np.array(pred_bbox, np.int).reshape( (-1, 1, 2))], True, (0, 255, 255), 3) else: bbox = list(map(int, pred_bbox)) cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.putText(img, str(lost_number), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results video_path = os.path.join(save_dir, args.dataset, model_name, 'baseline', video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: if isinstance(x, int): f.write("{:d}\n".format(x)) else: f.write(','.join([vot_float2str("%.4f", i) for i in x]) + '\n') print( '({:3d}) Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}' .format(v_idx + 1, video.name, toc, idx / toc, lost_number)) total_lost += lost_number print("{:s} total lost: {:d}".format(model_name, total_lost)) else: # OPE tracking for v_idx, video in enumerate(dataset): if args.video != '': # test one special video if video.name != args.video: continue toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): '''get RGB format image''' img_RGB = img[:, :, ::-1].copy() # BGR --> RGB tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''Initialize''' gt_bbox_np = np.array(gt_bbox_) gt_bbox_torch = torch.from_numpy( gt_bbox_np.astype(np.float32)) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = tracker.initialize(img_RGB, init_info) '''##### initilize refinement module for specific video''' RF_module.initialize(img_RGB, np.array(gt_bbox_)) pred_bbox = gt_bbox_ scores.append(None) if 'VOT2018-LT' == args.dataset: pred_bboxes.append([1]) else: pred_bboxes.append(pred_bbox) else: '''Track''' outputs = tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] '''##### refine tracking results #####''' pred_bbox = RF_module.refine( cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(pred_bbox)) x1, y1, w, h = pred_bbox.tolist() '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 new_pos = torch.from_numpy( np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32)) new_target_sz = torch.from_numpy( np.array([h, w]).astype(np.float32)) new_scale = torch.sqrt(new_target_sz.prod() / tracker.base_target_sz.prod()) ##### update tracker.pos = new_pos.clone() tracker.target_sz = new_target_sz tracker.target_scale = new_scale pred_bboxes.append(pred_bbox) # scores.append(outputs['best_score']) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle( img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 255, 0), 3) cv2.rectangle(img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results if 'VOT2018-LT' == args.dataset: video_path = os.path.join(save_dir, args.dataset, model_name, 'longterm', video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') result_path = os.path.join( video_path, '{}_001_confidence.value'.format(video.name)) with open(result_path, 'w') as f: for x in scores: f.write('\n') if x is None else f.write( "{:.6f}\n".format(x)) result_path = os.path.join(video_path, '{}_time.txt'.format(video.name)) with open(result_path, 'w') as f: for x in track_times: f.write("{:.6f}\n".format(x)) elif 'GOT-10k' == args.dataset: video_path = os.path.join(save_dir, args.dataset, model_name, video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') result_path = os.path.join(video_path, '{}_time.txt'.format(video.name)) with open(result_path, 'w') as f: for x in track_times: f.write("{:.6f}\n".format(x)) else: model_path = os.path.join(save_dir, args.dataset, model_name) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'. format(v_idx + 1, video.name, toc, idx / toc))
def main(): # create tracker tracker_info = Tracker(args.tracker_name, args.tracker_param, None) params = tracker_info.get_parameters() params.visualization = args.vis params.debug = args.debug params.visdom_info = { 'use_visdom': False, 'server': '127.0.0.1', 'port': 8097 } tracker = tracker_info.tracker_class(params) model_name = args.tracker_name + '_' + args.tracker_param # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) if args.dataset in ['VOT2016', 'VOT2018', 'VOT2019']: total_lost = 0 # restart tracking for v_idx, video in enumerate(dataset): if args.video != '': # test one special video if video.name != args.video: continue frame_counter = 0 lost_number = 0 toc = 0 '''对refinement module计时''' toc_refine = 0 pred_bboxes = [] for idx, (img, gt_bbox) in enumerate(video): if len(gt_bbox) == 4: gt_bbox = [ gt_bbox[0], gt_bbox[1], gt_bbox[0], gt_bbox[1] + gt_bbox[3] - 1, gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1] + gt_bbox[3] - 1, gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1] ] tic = cv2.getTickCount() '''get RGB format image''' img_RGB = img[:, :, ::-1].copy() # BGR --> RGB if idx == frame_counter: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''Initialize''' gt_bbox_np = np.array(gt_bbox_) gt_bbox_torch = torch.from_numpy( gt_bbox_np.astype(np.float32)) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = tracker.initialize(img_RGB, init_info) pred_bbox = gt_bbox_ pred_bboxes.append(1) elif idx > frame_counter: '''Track''' outputs = tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0])) if overlap > 0: # not lost pred_bboxes.append(pred_bbox) else: # lost object pred_bboxes.append(2) frame_counter = idx + 5 # skip 5 frames lost_number += 1 else: pred_bboxes.append(0) toc += cv2.getTickCount() - tic if idx == 0: cv2.destroyAllWindows() if args.vis and idx > frame_counter: cv2.polylines( img, [np.array(gt_bbox, np.int).reshape( (-1, 1, 2))], True, (0, 255, 0), 3) if len(pred_bbox) == 8: cv2.polylines( img, [np.array(pred_bbox, np.int).reshape( (-1, 1, 2))], True, (0, 255, 255), 3) else: bbox = list(map(int, pred_bbox)) cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.putText(img, str(lost_number), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results video_path = os.path.join(save_dir, args.dataset, model_name, 'baseline', video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: if isinstance(x, int): f.write("{:d}\n".format(x)) else: f.write(','.join([vot_float2str("%.4f", i) for i in x]) + '\n') print( '({:3d}) Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}' .format(v_idx + 1, video.name, toc, idx / toc, lost_number)) total_lost += lost_number print("{:s} total lost: {:d}".format(model_name, total_lost))
def main(): # create tracker tracker_info = Tracker(args.tracker_name, args.tracker_param, None) params = tracker_info.get_parameters() params.visualization = args.vis params.debug = args.debug params.visdom_info = { 'use_visdom': False, 'server': '127.0.0.1', 'port': 8097 } tracker = tracker_info.tracker_class(params) model_name = args.tracker_name + '_' + args.tracker_param + '_%d' % ( args.run_id) # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) # OPE tracking for v_idx, video in enumerate(dataset): if os.path.exists( os.path.join(save_dir, args.dataset, model_name, '{}.txt'.format(video.name))): continue if args.video != '': # test one special video if video.name != args.video: continue toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): '''get RGB format image''' img_RGB = img[:, :, ::-1].copy() # BGR --> RGB tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''Initialize''' gt_bbox_np = np.array(gt_bbox_) gt_bbox_torch = torch.from_numpy(gt_bbox_np.astype(np.float32)) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = tracker.initialize(img_RGB, init_info) pred_bbox = gt_bbox_ scores.append(None) if 'VOT2018-LT' == args.dataset: pred_bboxes.append([1]) else: pred_bboxes.append(pred_bbox) else: '''Track''' outputs = tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] '''##### refine tracking results #####''' pred_bboxes.append(pred_bbox) # scores.append(outputs['best_score']) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle( img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 255, 0), 3) cv2.rectangle( img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results model_path = os.path.join(save_dir, args.dataset, model_name) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format( v_idx + 1, video.name, toc, idx / toc))
def main(model_code): RF_module = RefineModule(refine_path.format(model_code), selector_path, search_factor=sr, input_sz=input_sz) model_name = 'AR_' + '{}'.format( RF_type.format(model_code)) + '_iter_%d' % (n_iter) # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) if args.dataset in ['VOT2016', 'VOT2018', 'VOT2019']: # restart tracking for v_idx, video in enumerate(dataset): if args.video != '': # test one special video if video.name != args.video: continue frame_counter = 0 lost_number = 0 toc = 0 pred_bboxes = [] for idx, (img, gt_bbox) in enumerate(video): if len(gt_bbox) == 4: gt_bbox = [ gt_bbox[0], gt_bbox[1], gt_bbox[0], gt_bbox[1] + gt_bbox[3] - 1, gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1] + gt_bbox[3] - 1, gt_bbox[0] + gt_bbox[2] - 1, gt_bbox[1] ] tic = cv2.getTickCount() if idx == frame_counter: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''##### initilize refinement module for specific video''' RF_module.initialize(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(gt_bbox_)) pred_bbox = gt_bbox_ pred_bboxes.append(1) elif idx > frame_counter: '''##### refine tracking results #####''' for _ in range(n_iter): pred_bbox = RF_module.refine( cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(pred_bbox)) x1, y1, w, h = pred_bbox.tolist() '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 pred_bbox = np.array([x1, y1, w, h]) overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0])) if overlap > 0: # not lost pred_bboxes.append(pred_bbox) else: # lost object pred_bboxes.append(2) frame_counter = idx + 5 # skip 5 frames lost_number += 1 else: pred_bboxes.append(0) toc += cv2.getTickCount() - tic if idx == 0: cv2.destroyAllWindows() if args.vis and idx > frame_counter: cv2.polylines( img, [np.array(gt_bbox, np.int).reshape( (-1, 1, 2))], True, (0, 255, 0), 3) bbox = list(map(int, pred_bbox)) cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.putText(img, str(lost_number), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results video_path = os.path.join(save_dir, args.dataset, model_name, 'baseline', video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: if isinstance(x, int): f.write("{:d}\n".format(x)) else: f.write(','.join([vot_float2str("%.4f", i) for i in x]) + '\n') print( '({:3d}) Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}' .format(v_idx + 1, video.name, toc, idx / toc, lost_number))
def main(): RF_module = RefineModule(refine_path, selector_path, search_factor=sr, input_sz=input_sz) # refine_method = args.refine_method model_name = 'siamrpn_' + RF_type + '_m2b_{}'.format(args.thres) snapshot_path = os.path.join( project_path_, 'experiments/%s/model.pth' % args.tracker_name) config_path = os.path.join( project_path_, 'experiments/%s/config.yaml' % args.tracker_name) cfg.merge_from_file(config_path) # create model model = ModelBuilder() # a sub-class of `torch.nn.Module` model = load_pretrain(model, snapshot_path).cuda().eval() # build tracker tracker = build_tracker( model) # tracker is a object consisting of NN and some post-processing # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) # OPE tracking for v_idx, video in enumerate(dataset): if os.path.exists( os.path.join(save_dir, args.dataset, model_name, '{}.txt'.format(video.name))): continue if args.video != '': # test one special video if video.name != args.video: continue toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] tracker.init(img, gt_bbox_) '''##### initilize refinement module for specific video''' RF_module.initialize(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(gt_bbox_)) pred_bbox = gt_bbox_ scores.append(None) pred_bboxes.append(pred_bbox) else: outputs = tracker.track(img) pred_bbox = outputs['bbox'] '''##### refine tracking results #####''' mask_pred = RF_module.get_mask( cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(pred_bbox)) pred_bbox = mask2bbox(mask_pred, pred_bbox, MASK_THRESHOLD=args.thres) x1, y1, w, h = pred_bbox.tolist() '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 pred_bbox = np.array([x1, y1, w, h]) tracker.center_pos = np.array([x1 + w / 2, y1 + h / 2]) tracker.size = np.array([w, h]) pred_bboxes.append(pred_bbox) scores.append(outputs['best_score']) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle( img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 255, 0), 3) cv2.rectangle( img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results model_path = os.path.join(save_dir, args.dataset, model_name + '_' + str(selector_path)) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format( v_idx + 1, video.name, toc, idx / toc))
def main(): # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) '''##### build a Refinement module #####''' RF_module = RefineModule(refine_path, selector_path, search_factor=sr, input_sz=input_sz) model_name = 'RTMDNet' + '{}-{}'.format(RF_type, selector_path) # OPE tracking for v_idx, video in enumerate(dataset): if os.path.exists( os.path.join(save_dir, args.dataset, model_name, '{}.txt'.format(video.name))): continue if args.video != '': # test one special video if video.name != args.video: continue '''build tracker''' tracker = RT_MDNet() toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): img_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB format tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''initialize tracker''' tracker.initialize_seq(img_RGB, np.array(gt_bbox_)) '''initilize refine module for specific video''' RF_module.initialize(img_RGB, np.array(gt_bbox_)) pred_bbox = gt_bbox_ scores.append(None) if 'VOT2018-LT' == args.dataset: pred_bboxes.append([1]) else: pred_bboxes.append(pred_bbox) else: ori_bbox = tracker.track(img_RGB) '''##### refine tracking results #####''' pred_bbox = RF_module.refine(img_RGB, np.array(ori_bbox)) '''boundary and size limit''' pred_bbox = bbox_clip(pred_bbox, (H, W)) '''update state''' tracker.target_bbox = pred_bbox.copy() pred_bboxes.append(pred_bbox) # scores.append(outputs['best_score']) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) ori_bbox = list(map(int, ori_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle( img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 0, 255), 3) cv2.rectangle( img, (ori_bbox[0], ori_bbox[1]), (ori_bbox[0] + ori_bbox[2], ori_bbox[1] + ori_bbox[3]), (255, 0, 0), 3) cv2.rectangle( img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 0), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results model_path = os.path.join(save_dir, args.dataset, model_name) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format( v_idx + 1, video.name, toc, idx / toc))
def main(): # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) model_name = 'RTMDNet-oracle' # OPE tracking for v_idx, video in enumerate(dataset): if os.path.exists( os.path.join(save_dir, args.dataset, model_name, '{}.txt'.format(video.name))): continue if args.video != '': # test one special video if video.name != args.video: continue '''build tracker''' tracker = RT_MDNet() toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): img_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB format tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''initialize tracker''' tracker.initialize_seq(img_RGB, np.array(gt_bbox_)) pred_bbox = gt_bbox_ scores.append(None) pred_bboxes.append(pred_bbox) else: ori_bbox = tracker.track(img_RGB) pred_bbox = bbox_clip(ori_bbox, (H, W)) oracle_box = pred_bbox.copy() cx, cy, _, _ = get_axis_aligned_bbox(np.array(gt_bbox)) oracle_box[:2] = np.array([cx, cy]) - oracle_box[2:] / 2 tracker.target_bbox = oracle_box pred_bboxes.append(pred_bbox) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) ori_bbox = list(map(int, ori_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle( img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 0, 255), 3) cv2.rectangle(img, (oracle_box[0], oracle_box[1]), (oracle_box[0] + oracle_box[2], oracle_box[1] + oracle_box[3]), (255, 0, 0), 3) cv2.rectangle( img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 0), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results model_path = os.path.join(save_dir, args.dataset, model_name) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format( v_idx + 1, video.name, toc, idx / toc))
def main(): # create tracker tracker_info = Tracker(args.tracker_name, args.tracker_param, None) params = tracker_info.get_parameters() params.visualization = args.vis params.debug = args.debug params.visdom_info = { 'use_visdom': False, 'server': '127.0.0.1', 'port': 8097 } tracker = tracker_info.tracker_class(params) '''Refinement module''' RF_module = RefineModule(refine_path, selector_path, search_factor=sr, input_sz=input_sz) model_name = args.tracker_name + '_' + args.tracker_param + '{}-{}'.format( RF_type, selector_path) + '_%d' % (args.run_id) # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) # OPE tracking for v_idx, video in enumerate(dataset): color = np.array(COLORS[random.randint(0, len(COLORS) - 1)])[None, None, ::-1] vis_result = os.path.join( '/home/zxy/Desktop/AlphaRefine/CVPR21/material/quality_analysis/mask_vis', '{}'.format(video.name)) if args.video != '': # test one special video if video.name != args.video: continue else: print() if not os.path.exists(vis_result): os.makedirs(vis_result) toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): '''get RGB format image''' img_RGB = img[:, :, ::-1].copy() # BGR --> RGB tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''Initialize''' gt_bbox_np = np.array(gt_bbox_) gt_bbox_torch = torch.from_numpy(gt_bbox_np.astype(np.float32)) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = tracker.initialize(img_RGB, init_info) '''##### initilize refinement module for specific video''' RF_module.initialize(img_RGB, np.array(gt_bbox_)) pred_bbox = gt_bbox_ scores.append(None) pred_bboxes.append(pred_bbox) else: '''Track''' outputs = tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] '''##### refine tracking results #####''' pred_bbox = RF_module.refine( cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(pred_bbox)) x1, y1, w, h = pred_bbox.tolist() '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 new_pos = torch.from_numpy( np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32)) new_target_sz = torch.from_numpy( np.array([h, w]).astype(np.float32)) new_scale = torch.sqrt(new_target_sz.prod() / tracker.base_target_sz.prod()) ##### update tracker.pos = new_pos.clone() tracker.target_sz = new_target_sz tracker.target_scale = new_scale mask_pred = RF_module.get_mask( cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(pred_bbox)) from external.pysot.toolkit.visualization import draw_mask draw_mask(img, mask_pred, idx=idx, show=True, save_dir='dimpsuper_armask_crocodile-3') pred_bboxes.append(pred_bbox) # scores.append(outputs['best_score']) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: im4show = img mask_pred = np.uint8(mask_pred > 0.5)[:, :, None] contours, _ = cv2.findContours(mask_pred.squeeze(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) im4show = im4show * (1 - mask_pred) + np.uint8( im4show * mask_pred / 2) + mask_pred * np.uint8(color) * 128 pred_bbox = list(map(int, pred_bbox)) # gt_bbox = list(map(int, gt_bbox)) # cv2.rectangle(im4show, (gt_bbox[0], gt_bbox[1]), # (gt_bbox[0]+gt_bbox[2], gt_bbox[1]+gt_bbox[3]), (0, 255, 0), 3) # cv2.rectangle(im4show, (pred_bbox[0], pred_bbox[1]), # (pred_bbox[0]+pred_bbox[2], pred_bbox[1]+pred_bbox[3]), color[::-1].squeeze().tolist(), 3) cv2.drawContours(im4show, contours, -1, color[::-1].squeeze(), 2) cv2.putText(im4show, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) # cv2.imshow(video.name, im4show) cv2.imwrite(os.path.join(vis_result, '{:06}.jpg'.format(idx)), im4show) cv2.waitKey(1) toc /= cv2.getTickFrequency()
def main(): # create tracker tracker_info = Tracker(args.tracker_name, args.tracker_param, None) params = tracker_info.get_parameters() params.visualization = args.vis params.debug = args.debug params.visdom_info = { 'use_visdom': False, 'server': '127.0.0.1', 'port': 8097 } tracker = tracker_info.tracker_class(params) model_name = 'atom_oracle' # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) # OPE tracking for v_idx, video in enumerate(dataset): if os.path.exists( os.path.join(save_dir, args.dataset, model_name, '{}.txt'.format(video.name))): continue if args.video != '': # test one special video if video.name != args.video: continue toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): '''get RGB format image''' img_RGB = img[:, :, ::-1].copy() # BGR --> RGB tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''Initialize''' gt_bbox_np = np.array(gt_bbox_) gt_bbox_torch = torch.from_numpy(gt_bbox_np.astype(np.float32)) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = tracker.initialize(img_RGB, init_info) pred_bbox = gt_bbox_ scores.append(None) pred_bboxes.append(pred_bbox) else: '''Track''' outputs = tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] x1, y1, w, h = pred_bbox '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 pred_bbox = np.array([x1, y1, w, h]) '''##### reset tracking results #####''' cx, cy, _, _ = get_axis_aligned_bbox(np.array(gt_bbox)) if not gt_bbox == [0, 0, 0, 0]: new_pos = torch.from_numpy( np.array([cy, cx]).astype(np.float32)) else: new_pos = torch.from_numpy( np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32)) new_target_sz = torch.from_numpy( np.array([h, w]).astype(np.float32)) new_scale = torch.sqrt(new_target_sz.prod() / tracker.base_target_sz.prod()) ##### update tracker.pos = new_pos.clone() tracker.target_sz = new_target_sz tracker.target_scale = new_scale pred_bboxes.append(pred_bbox) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle( img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 255, 0), 3) cv2.rectangle( img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results model_path = os.path.join(save_dir, args.dataset, model_name) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format( v_idx + 1, video.name, toc, idx / toc))
def main(): # create tracker tracker_info = Tracker(args.tracker_name, args.tracker_param, None) params = tracker_info.get_parameters() params.visualization = args.vis params.debug = args.debug params.visdom_info = {'use_visdom': False, 'server': '127.0.0.1', 'port': 8097} tracker = tracker_info.tracker_class(params) '''Refinement module''' RF_module = RefineModule(refine_path, selector_path, search_factor=sr, input_sz=input_sz) model_name = args.tracker_name + '_' + args.tracker_param + '{}-{}'.format(RF_type, selector_path) + '_%d'%(args.run_id) model_name = 'LaSOT_gt' # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) # OPE tracking for v_idx, video in enumerate(dataset): if os.path.exists(os.path.join(save_dir, args.dataset, model_name, '{}.txt'.format(video.name))): continue if args.video != '': # test one special video if video.name != args.video: continue pred_bboxes = [] for idx, (img, gt_bbox) in enumerate(video): cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] pred_bboxes.append(gt_bbox_) continue '''get RGB format image''' img_RGB = img[:, :, ::-1].copy() # BGR --> RGB tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h] '''Initialize''' gt_bbox_np = np.array(gt_bbox_) gt_bbox_torch = torch.from_numpy(gt_bbox_np.astype(np.float32)) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = tracker.initialize(img_RGB, init_info) '''##### initilize refinement module for specific video''' RF_module.initialize(img_RGB, np.array(gt_bbox_)) pred_bbox = gt_bbox_ scores.append(None) pred_bboxes.append(pred_bbox) else: '''Track''' outputs = tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] ''' refine tracking results ''' pred_bbox = RF_module.refine(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(pred_bbox)) x1, y1, w, h = pred_bbox.tolist() w, h = get_mean_wh(pred_bboxes, w, h) '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 new_pos = torch.from_numpy(np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32)) new_target_sz = torch.from_numpy(np.array([h, w]).astype(np.float32)) new_scale = torch.sqrt(new_target_sz.prod() / tracker.base_target_sz.prod()) # update tracker.pos = new_pos.clone() tracker.target_sz = new_target_sz tracker.target_scale = new_scale pred_bboxes.append(pred_bbox) toc += cv2.getTickCount() - tic track_times.append((cv2.getTickCount() - tic)/cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle(img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0]+gt_bbox[2], gt_bbox[1]+gt_bbox[3]), (0, 255, 0), 3) cv2.rectangle(img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0]+pred_bbox[2], pred_bbox[1]+pred_bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) k = cv2.waitKey(0) if k == ord('q'): exit() elif k == ord('s'): cv2.imwrite(os.path.join(os.environ['HOME'], 'Desktop/demo', video.name+'_{}.jpg'.format(idx)), img) # save results model_path = os.path.join(save_dir, args.dataset, model_name) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x])+'\n') print(video.name)
def main(): model_name = 'siamRPN' snapshot_path = os.path.join( project_path_, 'experiments/%s/model.pth' % args.tracker_name) config_path = os.path.join( project_path_, 'experiments/%s/config.yaml' % args.tracker_name) cfg.merge_from_file(config_path) # create model model = ModelBuilder() # a model is a Neural Network.(a torch.nn.Module) # load model model = load_pretrain(model, snapshot_path).cuda().eval() # build tracker tracker = build_tracker( model ) # a tracker is a object consisting of not only a NN and some post-processing # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) # OPE tracking for v_idx, video in enumerate(dataset): if os.path.exists( os.path.join(save_dir, args.dataset, model_name, '{}.txt'.format(video.name))): continue if args.video != '': # test one special video if video.name != args.video: continue toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] tracker.init(img, gt_bbox_) pred_bboxes.append(gt_bbox_) else: outputs = tracker.track(img) pred_bbox = outputs['bbox'] x1, y1, w, h = pred_bbox '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 pred_bbox = np.array([x1, y1, w, h]) tracker.center_pos = np.array([x1 + w / 2, y1 + h / 2]) tracker.size = np.array([w, h]) pred_bboxes.append(pred_bbox) scores.append(outputs['best_score']) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle( img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 255, 0), 3) cv2.rectangle( img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results model_path = os.path.join(save_dir, args.dataset, model_name) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format( v_idx + 1, video.name, toc, idx / toc))
def main(): # create tracker tracker_info = Tracker(args.tracker_name, args.tracker_param, None) params = tracker_info.get_parameters() params.visualization = args.vis params.debug = args.debug params.visdom_info = { 'use_visdom': False, 'server': '127.0.0.1', 'port': 8097 } tracker = tracker_info.tracker_class(params) # setup refine module RF_module = RefineModule(refine_path, selector_path, search_factor=sr, input_sz=input_sz) model_name = args.tracker_name + '_' + args.tracker_param + '{}-{}'.format( RF_type, selector_path) + '_%d' % (args.run_id) # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) # OPE tracking for v_idx, video in enumerate(dataset): if os.path.exists( os.path.join(save_dir, args.dataset, model_name, '{}.txt'.format(video.name))): continue if args.video != '': # test one special video if video.name != args.video: continue toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): '''get RGB format image''' img_RGB = img[:, :, ::-1].copy() # BGR --> RGB tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] '''Initialize''' gt_bbox_np = np.array(gt_bbox_) gt_bbox_torch = torch.from_numpy(gt_bbox_np.astype(np.float32)) init_info = {} init_info['init_bbox'] = gt_bbox_torch _ = tracker.initialize(img_RGB, init_info) '''##### initilize refinement module for specific video''' RF_module.initialize(img_RGB, np.array(gt_bbox_)) pred_bbox = gt_bbox_ scores.append(None) if 'VOT2018-LT' == args.dataset: pred_bboxes.append([1]) else: pred_bboxes.append(pred_bbox) else: '''Track''' outputs = tracker.track(img_RGB) pred_bbox = outputs['target_bbox'] '''##### refine tracking results #####''' pred_bbox = RF_module.refine( cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(pred_bbox)) x1, y1, w, h = pred_bbox.tolist() '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 new_pos = torch.from_numpy( np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32)) new_target_sz = torch.from_numpy( np.array([h, w]).astype(np.float32)) new_scale = torch.sqrt(new_target_sz.prod() / tracker.base_target_sz.prod()) ##### update tracker.pos = new_pos.clone() tracker.target_sz = new_target_sz tracker.target_scale = new_scale pred_bboxes.append(pred_bbox) # scores.append(outputs['best_score']) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle( img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 255, 0), 3) cv2.rectangle( img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results if 'VOT2018-LT' == args.dataset: video_path = os.path.join(save_dir, args.dataset, model_name, 'longterm', video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') result_path = os.path.join( video_path, '{}_001_confidence.value'.format(video.name)) with open(result_path, 'w') as f: for x in scores: f.write('\n') if x is None else f.write( "{:.6f}\n".format(x)) result_path = os.path.join(video_path, '{}_time.txt'.format(video.name)) with open(result_path, 'w') as f: for x in track_times: f.write("{:.6f}\n".format(x)) elif 'GOT-10k' == args.dataset: video_path = os.path.join(save_dir, args.dataset, model_name, video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') result_path = os.path.join(video_path, '{}_time.txt'.format(video.name)) with open(result_path, 'w') as f: for x in track_times: f.write("{:.6f}\n".format(x)) else: model_path = os.path.join(save_dir, args.dataset, model_name) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format( v_idx + 1, video.name, toc, idx / toc))
def main(): # refine_method = args.refine_method n_iter = 2 # number of the the refine iterations model_name = 'RF_' + RF_type + 'iter-{}'.format(n_iter) dataset_root = dataset_root_ # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root, load_img=False) # create refine module RF_module = RefineModule(refine_path, selector_path, search_factor=sr, input_sz=input_sz) # OPE tracking for v_idx, video in enumerate(dataset): if args.video != '': # test one special video if video.name != args.video: continue toc = 0 pred_bboxes = [] scores = [] track_times = [] for idx, (img, gt_bbox) in enumerate(video): tic = cv2.getTickCount() if idx == 0: H, W, _ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx - (w - 1) / 2, cy - (h - 1) / 2, w, h] # initialize refinement module for specific video RF_module.initialize(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(gt_bbox_)) pred_bbox = gt_bbox_ scores.append(None) pred_bboxes.append(pred_bbox) else: # refine tracking results for i in range(n_iter): pred_bbox = RF_module.refine( cv2.cvtColor(img, cv2.COLOR_BGR2RGB), np.array(pred_bbox)) x1, y1, w, h = pred_bbox.tolist() '''add boundary and min size limit''' x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (H, W)) w = x2 - x1 h = y2 - y1 pred_bbox = np.array([x1, y1, w, h]) pred_bboxes.append(pred_bbox) scores.append(None) toc += cv2.getTickCount() - tic track_times.append( (cv2.getTickCount() - tic) / cv2.getTickFrequency()) if idx == 0: cv2.destroyAllWindows() if args.vis and idx > 0: gt_bbox = list(map(int, gt_bbox)) pred_bbox = list(map(int, pred_bbox)) cv2.rectangle( img, (gt_bbox[0], gt_bbox[1]), (gt_bbox[0] + gt_bbox[2], gt_bbox[1] + gt_bbox[3]), (0, 255, 0), 3) cv2.rectangle( img, (pred_bbox[0], pred_bbox[1]), (pred_bbox[0] + pred_bbox[2], pred_bbox[1] + pred_bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results model_path = os.path.join(save_dir, args.dataset, model_name + '_' + str(selector_path)) if not os.path.isdir(model_path): os.makedirs(model_path) result_path = os.path.join(model_path, '{}.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: f.write(','.join([str(i) for i in x]) + '\n') print('({:3d}) Video: {:12s} Time: {:5.1f}s Speed: {:3.1f}fps'.format( v_idx + 1, video.name, toc, idx / toc))
def main(): model_name = 'siamRPN' snapshot_path = os.path.join(project_path_, 'experiments/%s/model.pth' % args.tracker_name) config_path = os.path.join(project_path_, 'experiments/%s/config.yaml' % args.tracker_name) cfg.merge_from_file(config_path) # create model model = ModelBuilder() # a model is a Neural Network.(a torch.nn.Module) # load model model = load_pretrain(model, snapshot_path).cuda().eval() # build tracker tracker = build_tracker(model) # a tracker is a object consisting of not only a NN and some post-processing # create dataset dataset = DatasetFactory.create_dataset(name=args.dataset, dataset_root=dataset_root_, load_img=False) total_lost = 0 if args.dataset in ['VOT2016', 'VOT2018', 'VOT2019']: # restart tracking for v_idx, video in enumerate(dataset): if args.video != '': # test one special video if video.name != args.video: continue frame_counter = 0 lost_number = 0 toc = 0 pred_bboxes = [] for idx, (img, gt_bbox) in enumerate(video): if len(gt_bbox) == 4: gt_bbox = [gt_bbox[0], gt_bbox[1], gt_bbox[0], gt_bbox[1]+gt_bbox[3]-1, gt_bbox[0]+gt_bbox[2]-1, gt_bbox[1]+gt_bbox[3]-1, gt_bbox[0]+gt_bbox[2]-1, gt_bbox[1]] tic = cv2.getTickCount() if idx == frame_counter: H,W,_ = img.shape cx, cy, w, h = get_axis_aligned_bbox(np.array(gt_bbox)) gt_bbox_ = [cx-(w-1)/2, cy-(h-1)/2, w, h] tracker.init(img, gt_bbox_) pred_bbox = gt_bbox_ pred_bboxes.append(1) elif idx > frame_counter: outputs = tracker.track(img) pred_bbox = outputs['bbox'] overlap = vot_overlap(pred_bbox, gt_bbox, (img.shape[1], img.shape[0])) if overlap > 0: # not lost pred_bboxes.append(pred_bbox) else: # lost object pred_bboxes.append(2) frame_counter = idx + 5 # skip 5 frames lost_number += 1 else: pred_bboxes.append(0) toc += cv2.getTickCount() - tic if idx == 0: cv2.destroyAllWindows() if args.vis and idx > frame_counter: cv2.polylines(img, [np.array(gt_bbox, np.int).reshape((-1, 1, 2))], True, (0, 255, 0), 3) bbox = list(map(int, pred_bbox)) cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), (0, 255, 255), 3) cv2.putText(img, str(idx), (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2) cv2.putText(img, str(lost_number), (40, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) cv2.imshow(video.name, img) cv2.waitKey(1) toc /= cv2.getTickFrequency() # save results video_path = os.path.join(save_dir, args.dataset, model_name, 'baseline', video.name) if not os.path.isdir(video_path): os.makedirs(video_path) result_path = os.path.join(video_path, '{}_001.txt'.format(video.name)) with open(result_path, 'w') as f: for x in pred_bboxes: if isinstance(x, int): f.write("{:d}\n".format(x)) else: f.write(','.join([vot_float2str("%.4f", i) for i in x])+'\n') print('({:3d}) Video: {:12s} Time: {:4.1f}s Speed: {:3.1f}fps Lost: {:d}'.format( v_idx+1, video.name, toc, idx / toc, lost_number))