def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30, use_cuda=True): if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 0 #for path, img, img0 in dataloader: for i, (path, img, img0) in enumerate(dataloader): #if i % 8 != 0: #continue if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() if use_cuda: blob = torch.from_numpy(img).cuda().unsqueeze(0) else: blob = torch.from_numpy(img).unsqueeze(0) online_targets = tracker.update(blob, img0) online_tlwhs = [] online_ids = [] #online_scores = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) #online_scores.append(t.score) timer.toc() # save results results.append((frame_id + 1, online_tlwhs, online_ids)) #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results write_results(result_filename, results, data_type) #write_results_score(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def send_image(self, img0, online_tlwhs, online_ids, frame_id, fps): online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=fps) if self.video_saver is not None: self.video_saver.write(online_im) return online_im
def eval_seq(opt, data_path, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30): tracker = JDETracker(opt) # timer = Timer() results = [] frame_id = 0 frame_nums = 60 #len(os.listdir(data_path))//2 #np_res = [] for _ in range(frame_nums): frame_id += 1 dets = np.loadtxt(os.path.join(data_path, str(frame_id) + '.txt'), dtype=np.float32, delimiter=',') online_targets = tracker.update(dets) online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id if tlwh[2] * tlwh[3] > opt.min_box_area and tlwh[2] / tlwh[3] < 1.6: online_tlwhs.append(tlwh) online_ids.append(tid) # np_res.append([frame_id,tid,tlwh[0],tlwh[1],tlwh[2],tlwh[3],1,0]) ## save results results.append((frame_id, online_tlwhs, online_ids)) if show_image or save_dir is not None: if save_dir: mkdir_if_missing(save_dir) img = cv2.imread(os.path.join(data_path, str(frame_id) + '.jpg')) online_im = vis.plot_tracking(img, online_tlwhs, online_ids, frame_id=frame_id) # if show_image: # cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) # save results write_results(result_filename, results, data_type)
def eval_seq(opt, dataloader, data_type, save_dir=None, show_image=True, frame_rate=30): if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 0 writer = VideoWriter(save_dir, dataloader) for path, img, img0 in dataloader: # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze(0) online_targets = tracker.update(blob, img0) online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) timer.toc() # save results results.append((frame_id + 1, online_tlwhs, online_ids)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: # cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) writer.write(online_im) frame_id += 1 print("***************************** DONE *****************************")
def eval_seq(opt, save_dir='', frame_rate=30): dataloader = LoadImages(opt.multi_test_images_dataset) opt = opt.update_dataset_info_and_set_heads(opt, dataloader) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 0 fd = os.open(save_dir + '/args.txt', os.O_RDWR) for i, (path, img, img0) in enumerate(dataloader): # run tracking timer.tic() blob = torch.from_numpy(img).unsqueeze(0) online_targets = tracker.update(blob, img0) online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 online_tlwhs.append(tlwh) online_ids.append(tid) centre = tlwh[:2] + tlwh[2:] / 2 text = str(frame_id) + ' ' + str(tid) + ' ' + str( centre[1]) + ' ' + str(centre[0]) + '\n' os.write(fd, str.encode(text)) timer.toc() # save results online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results os.close(fd) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30): if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 0 ''' img: Normalized RGB image img0: BGR image ''' for path, img, img0 in dataloader: if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze(0) # print("\n==> blob.size", blob.size()) 1, 3, 608, 1088 ''' tracker update ''' online_targets = tracker.update(blob, img0) online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) ''' print("==> [track.eval_seq] tracker's output-> online_targets:", online_targets) try: print("==> [track.eval_seq] len(online_tlwhs):", len(online_tlwhs)) print("==> [track.eval_seq] online_tlwhs[0]:", online_tlwhs[0]) print("==> [track.eval_seq] online_ids[0]:", online_ids[0]) except: pass partial output: ==> [multi-tracker.update] len(output_stracks): 5 ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-13), OT_2_(1-13), OT_3_(1-13), OT_20_(10-13), OT_7_(2-13)] ==> [track.eval_seq] len(online_tlwhs): 5 ==> [track.eval_seq] online_tlwhs[0]: [ 802.38 163.64 24.074 57.376] ==> [track.eval_seq] online_ids[0]: 1 ==> [multi-tracker.update] len(output_stracks): 7 ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-14), OT_2_(1-14), OT_3_(1-14), OT_20_(10-14), OT_7_(2-14), OT_23_(13-14), OT_13_(4-14)] ==> [track.eval_seq] len(online_tlwhs): 7 ==> [track.eval_seq] online_tlwhs[0]: [ 809.96 163.69 25.305 60.319] ==> [track.eval_seq] online_ids[0]: 1 ==> [multi-tracker.update] len(output_stracks): 7 ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-15), OT_2_(1-15), OT_3_(1-15), OT_20_(10-15), OT_7_(2-15), OT_23_(13-15), OT_19_(10-15)] ==> [track.eval_seq] len(online_tlwhs): 7 ==> [track.eval_seq] online_tlwhs[0]: [ 818.46 164.4 26.832 63.971] ==> [track.eval_seq] online_ids[0]: 1 ''' timer.toc() # save results results.append((frame_id + 1, online_tlwhs, online_ids)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results write_results(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30): if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 0 tid_max, tid_temp = 1, 1 for path, img, img0 in dataloader: if frame_id < 300: frame_id += 1 continue elif frame_id > 302: break else: print(frame_id) filename = path.split('/')[-1] if '0000001' in path: tid_max = tid_temp + 1 print(path, tid_max) tracker = JDETracker(opt, frame_rate=frame_rate) if frame_id % 100 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # if frame_id >20: # break # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze(0) online_targets = tracker.update_sep3( blob, img0, conf_thres=[0.4, 0.3, 0.4, 0.4, 0.4, 0.4, 0.4, 0.5, 0.5]) # print(online_targets) online_tlwhs = [] online_ids = [] online_cids = [] #class id for t in online_targets: tlwh = t.tlwh tid = t.track_id + tid_max tcid = t.class_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area: # and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) online_cids.append(tcid) tid_temp = max(tid, tid_temp) timer.toc() # save results results.append((filename, online_tlwhs, online_ids, online_cids)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results write_results(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, polygon, paths, data_type, result_filename, frame_dir=None, save_dir=None, bbox_dir=None, show_image=True, frame_rate=30): count = 0 if save_dir: mkdir_if_missing(save_dir) if bbox_dir: mkdir_if_missing(bbox_dir) if frame_dir: mkdir_if_missing(frame_dir) tracker = JDETracker(opt, polygon, paths, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 1 f = open(opt.input_video.split('/')[-1][:-4] + '.txt', 'w') for path, img, img0 in dataloader: img0_clone = copy.copy(img0) if frame_id % 1 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze( 0) if opt.gpus[0] >= 0 else torch.from_numpy(img).cpu().unsqueeze( 0) online_targets, detection_boxes, out_of_polygon_tracklet = tracker.update( blob, img0) if len(out_of_polygon_tracklet) > 0: for track in np.asarray(out_of_polygon_tracklet)[:, 2]: if track in ['person', 'bicycle', 'motorcycle']: count += 1 print('count : ' + str(count)) online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id if tlwh[2] * tlwh[3] > opt.min_box_area: online_tlwhs.append(tlwh) online_ids.append(tid) #bbox detection plot box_tlbrs = [] box_scores = [] box_classes = [] box_occlusions = [] img_bbox = img0.copy() for box in detection_boxes: tlbr = box.tlbr tlwh = box.tlwh vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area: box_tlbrs.append(tlbr) box_scores.append(box.score) box_classes.append(box.infer_type()) box_occlusions.append('occ' if box.occlusion_status == True else 'non_occ') timer.toc() # save results for track in out_of_polygon_tracklet: frame_idx, id, classes, movement = track results.append((opt.input_video.split('/')[-1][:-4], frame_idx, classes, movement)) f.write(','.join([ opt.input_video.split('/')[-1][:-4], str(frame_idx), str(classes), str(movement) ]) + '\n') if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time, out_track=out_of_polygon_tracklet) bbox_im = vis.plot_detections(img_bbox, box_tlbrs, scores=box_scores, box_occlusion=None, btypes=box_classes) if show_image: cv2.polylines(online_im, [np.asarray(polygon)], True, (0, 255, 255)) cv2.polylines(bbox_im, [np.asarray(polygon)], True, (0, 255, 255)) cv2.polylines(img0_clone, [np.asarray(polygon)], True, (0, 255, 255)) cv2.imshow('online_im', online_im) cv2.imshow('bbox_im', bbox_im) if save_dir is not None: cv2.polylines(online_im, [np.asarray(polygon)], True, (0, 255, 255)) cv2.polylines(bbox_im, [np.asarray(polygon)], True, (0, 255, 255)) cv2.polylines(img0_clone, [np.asarray(polygon)], True, (0, 255, 255)) cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) cv2.imwrite(os.path.join(bbox_dir, '{:05d}.jpg'.format(frame_id)), bbox_im) cv2.imwrite(os.path.join(frame_dir, '{:05d}.jpg'.format(frame_id)), img0_clone) frame_id += 1 # save results return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30): #verifica che sia presente la cartella di salvataggio e in caso non lo sia la crea if save_dir: mkdir_if_missing(save_dir) #crea il JDETracker che esegue le seguenti operazioni (che vengono trovate in src/lib/tracker/multitracker.py nella funzione init) #1) Verifica se il programma va eseguito con CPU o GPU #2) Crea il modello e lo valuta #3) Applica un Kalman Filter: e’ uno strumento per stimare lo stato di un sistema dinamico lineare perturbato da rumore, sulla base di misure (o osservazioni) linearmente dipendenti tracker = JDETracker(opt, frame_rate=frame_rate) #viene inizializzato il timer per monitorare il tempo di elaborazione timer = Timer() #inizializzazione array dei risultati results = [] #identificatore del frame frame_id = 0 #scorriamo gli elementi all'interno del dataloader che erano le immagini del database caricate nel main for path, img, img0 in dataloader: #visualizza il frame rate dopo 20 frame elaborati if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() #questa funzione è specifica per le GPU NVIDIA, infatti usa CUDA, si può sostituire con CPU nel caso si voglia eseguire il programma con CPU #crea una matrice multidimensionale di almeno 3 elementi per rappresentare l'immagine img blob = torch.from_numpy(img).cuda().unsqueeze(0) #viene applicata la funzione update del JDETracker che svolge le seguenti funzioni (src/lib/tracker/multitracker.py) #1) Vengono passati come parametri gli elementi blob e img0 #2) Da entrambi si estraggono altezza e larghezza e vengono memorizzate in una variabile #3) viene creata una variabile c che memorizza un array di float che contiene come due elementi altezza e larghezza dimezzata di img0 #4) viene creata una variabile s che memorizza il massimo fra max(float(inp_width) (blob) / float(inp_height) (blob) * height (img0), width (img0)) * 1.0 #5) Network forward, get detections & embeddings: rileva gli oggetti e gli aggiunge a una pila di tracklet #6) Prima associazione con Embedding: Da Paper, il ruolo dell'embedding è quello di distinguere diversi tipi di oggetti #7) Seconda associazione con IoU #8) Inizializza nuovi Stracks #9) Aggiorna lo stato #10) Ritorna nella variabile il valore degli stracks attivi online_targets = tracker.update(blob, img0) online_tlwhs = [] online_ids = [] #vengono iterati i vari stracks for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) timer.toc() # save results results.append((frame_id + 1, online_tlwhs, online_ids)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results write_results(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename=None, save_dir=None, show_image=False, frame_rate=10, use_cuda=True): if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() person_mot = PersonMOT() results = [] frame_id = 0 rflag = False # for path, img, img0 in dataloader: for i, (path, img, img0) in enumerate(dataloader): if i % 12 != 0: continue if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() if use_cuda: blob = torch.from_numpy(img).cuda().unsqueeze(0) else: blob = torch.from_numpy(img).unsqueeze(0) online_targets = tracker.update(blob, img0) online_tlwhs = [] online_ids = [] #online_scores = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) # online_scores.append(t.score) timer.toc() # 发送atc帧信息 rflag = atcs.send_info(opt.device_id, opt.url, online_ids, online_tlwhs, opt.input_stream) if rflag and i % opt.check_setp == 0: person_mot.handle_crime(opt.device_id, opt.url, opt.trigger, online_ids, online_tlwhs, img0) # 是否渲染图像 if show_image or save_dir: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) results.append((frame_id + 1, online_tlwhs, online_ids)) # 得分 # results.append((frame_id + 1, online_tlwhs, online_ids, online_scores)) # 展示图像 if show_image: cv2.imshow('online_im', online_im) cv2.waitKey(1) # 存储图像 if save_dir: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results if opt.save: write_results(result_filename, results, data_type) #write_results_score(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30, use_cuda=True, gt_path=None): gt_results_dict = dict() if gt_path: if os.path.isfile(gt_path): with open(gt_path, 'r') as f: for line in f.readlines(): linelist = line.split(',') if len(linelist) < 7: continue fid = int(linelist[0]) if fid < 1: continue gt_results_dict.setdefault(fid, list()) box_size = float(linelist[4]) * float(linelist[5]) score = 1 # if box_size > 7000: # if box_size <= 7000 or box_size >= 15000: # if box_size < 15000: # continue tlwh = tuple(map(float, linelist[2:6])) target_id = (linelist[1]) gt_results_dict[fid].append((tlwh, target_id, score)) if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 0 #for path, img, img0 in dataloader: for i, (path, img, img0) in enumerate(dataloader): #if i % 8 != 0: #continue if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() if use_cuda: blob = torch.from_numpy(img).cuda().unsqueeze(0) else: blob = torch.from_numpy(img).unsqueeze(0) online_targets = tracker.update(blob, img0) online_tlwhs = [] online_ids = [] #online_scores = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) #online_scores.append(t.score) timer.toc() # save results results.append((frame_id + 1, online_tlwhs, online_ids)) #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time, gt_box=gt_results_dict[frame_id + 1]) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results write_results(result_filename, results, data_type) #write_results_score(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def main(opt): acl_resource = AclResource() acl_resource.init() mot_model = Model('../model/dlav0.om') # Create output dir if not exist; default outputs result_root = opt.output_root if opt.output_root != '' else '.' mkdir_if_missing(result_root) video_name = os.path.basename(opt.input_video).replace(' ', '_').split('.')[0] # setup dataloader, use LoadVideo or LoadImages dataloader = LoadVideo(opt.input_video, (1088, 608)) # result_filename = os.path.join(result_root, 'results.txt') frame_rate = dataloader.frame_rate # dir for output images; default: outputs/'VideoFileName' save_dir = os.path.join(result_root, video_name) if save_dir and os.path.exists(save_dir) and opt.rm_prev: shutil.rmtree(save_dir) mkdir_if_missing(save_dir) # initialize tracker tracker = JDETracker(opt, mot_model, frame_rate=frame_rate) timer = Timer() results = [] # img: h w c; 608 1088 3 # img0: c h w; 3 608 1088 for frame_id, (path, img, img0) in enumerate(dataloader): if frame_id % 20 == 0: print('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking, start tracking timer timer.tic() # list of Tracklet; see multitracker.STrack online_targets = tracker.update(np.array([img]), img0) # prepare for drawing, get all bbox and id online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) timer.toc() # draw bbox and id online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
def visualize_comparison(dataroot, seq, save_dir, result_dir_1, result_dir_2, compile_images_only=False): import pandas as pd from cython_bbox import bbox_overlaps as bbox_ious from tracker import matching from tracking_utils import visualization as vis res_1 = pd.read_csv(osp.join(result_dir_1, f"{seq}.txt"), sep=',', names=[ 'frame', 'id', 'left', 'top', 'width', 'height', 'conf', 'misc1', 'misc2', 'misc3' ]) res_2 = pd.read_csv(osp.join(result_dir_2, f"{seq}.txt"), sep=',', names=[ 'frame', 'id', 'left', 'top', 'width', 'height', 'conf', 'misc1', 'misc2', 'misc3' ]) img_root = osp.join(dataroot, seq, 'img1') save_root_1 = osp.join(osp.abspath(save_dir), osp.basename(result_dir_1), seq) os.makedirs(save_root_1, exist_ok=True) save_root_2 = osp.join(osp.abspath(save_dir), osp.basename(result_dir_2), seq) os.makedirs(save_root_2, exist_ok=True) if not compile_images_only: # for frame in np.unique(res_1['frame']): for frame in range(np.min(res_1['frame']), np.max(res_1['frame'])): # if frame == 250: # break res_1_frame = res_1[res_1['frame'] == frame] res_2_frame = res_2[res_2['frame'] == frame] res_1_boxes_np = np.array( res_1_frame[['left', 'top', 'width', 'height']]) res_1_boxes_np[:, 2:] += res_1_boxes_np[:, :2] res_2_boxes_np = np.array( res_2_frame[['left', 'top', 'width', 'height']]) res_2_boxes_np[:, 2:] += res_2_boxes_np[:, :2] img1 = cv2.imread(osp.join(img_root, f"{frame:06d}.jpg")) img2 = cv2.imread(osp.join(img_root, f"{frame:06d}.jpg")) if res_1_boxes_np.shape[0] > 0 and res_2_boxes_np.shape[0] > 0: iou = bbox_ious(res_1_boxes_np, res_2_boxes_np) matches, u_res_1, u_res_2 = matching.linear_assignment( -iou, thresh=-0.7) if len(matches) > 0: # Plot the matchings in normal colors matches_res_1 = res_1_frame.iloc[matches[:, 0]] matches_res_2 = res_2_frame.iloc[matches[:, 1]] # matches_res_2['id'].iloc[matches[:, 1]] = matches_res_1['id'].iloc[matches[:, 0]] online_ids = matches_res_1['id'].to_list() # online_ids = matches_res_2['id'].to_list() matches_res_1_np = np.array( matches_res_1[['left', 'top', 'width', 'height']]) matches_res_2_np = np.array( matches_res_2[['left', 'top', 'width', 'height']]) online_im_1 = vis.plot_tracking(img1, matches_res_1_np, online_ids, frame_id=frame, fps=-1, line_thickness=1) online_im_2 = vis.plot_tracking(img2, matches_res_2_np, online_ids, frame_id=frame, fps=-1, line_thickness=1) else: online_im_1 = img1 online_im_2 = img2 # ipdb.set_trace() # Plot the unmatched in thicker colors unmatches_res_1 = res_1_frame.iloc[u_res_1] unmatches_res_1_np = np.array( unmatches_res_1[['left', 'top', 'width', 'height']]) if len(unmatches_res_1) > 0: if len(unmatches_res_1.shape) == 1: unmatched_res_1_ids = [unmatches_res_1['id']] scores = [unmatches_res_1['conf']] unmatches_res_1_np = unmatches_res_1_np.reshape(1, -1) else: unmatched_res_1_ids = unmatches_res_1['id'].to_list() scores = unmatches_res_1['conf'].to_list() online_im_1 = vis.plot_tracking(online_im_1, unmatches_res_1_np, unmatched_res_1_ids, frame_id=frame, fps=-1, line_thickness_unmatched=4) # ipdb.set_trace() print( f"{len(unmatches_res_1)} unmatches found in frame {frame}" ) # Plot the unmatched in thicker colors unmatches_res_2 = res_2_frame.iloc[u_res_2] unmatches_res_2_np = np.array( unmatches_res_2[['left', 'top', 'width', 'height']]) if len(unmatches_res_2) > 0: if len(unmatches_res_2.shape) == 1: unmatched_res_2_ids = [unmatches_res_2['id']] scores = [unmatches_res_2['conf']] unmatches_res_2_np = unmatches_res_2_np.reshape(1, -1) else: unmatched_res_2_ids = unmatches_res_2['id'].to_list() scores = unmatches_res_2['conf'].to_list() online_im_2 = vis.plot_tracking(online_im_2, unmatches_res_2_np, unmatched_res_2_ids, frame_id=frame, fps=-1, line_thickness_unmatched=4) cv2.imwrite(osp.join(save_root_1, f"{frame:06d}.jpg"), online_im_1) cv2.imwrite(osp.join(save_root_2, f"{frame:06d}.jpg"), online_im_2) else: if len(res_1_boxes_np) > 0 and len(res_2_boxes_np) == 0: matches_res_1 = res_1_frame matches_res_1_np = np.array( matches_res_1[['left', 'top', 'width', 'height']]) if len(matches_res_1.shape) == 1: online_ids = [matches_res_1['id']] scores = [matches_res_1['conf']] else: online_ids = matches_res_1['id'].to_list() scores = matches_res_1['conf'].to_list() online_im_1 = vis.plot_tracking(img1, matches_res_1_np, online_ids, frame_id=frame, fps=-1, line_thickness_unmatched=4) cv2.imwrite(osp.join(save_root_1, f"{frame:06d}.jpg"), online_im_1) cv2.imwrite(osp.join(save_root_2, f"{frame:06d}.jpg"), img2) if len(res_1_boxes_np) == 0 and len(res_2_boxes_np) > 0: matches_res_2 = res_2_frame matches_res_2_np = np.array( matches_res_2[['left', 'top', 'width', 'height']]) if len(matches_res_2.shape) == 1: online_ids = [matches_res_2['id']] scores = [matches_res_2['conf']] else: online_ids = matches_res_2['id'].to_list() scores = matches_res_2['conf'].to_list() online_im_2 = vis.plot_tracking(img2, matches_res_2_np, online_ids, frame_id=frame, fps=-1, line_thickness_unmatched=4) cv2.imwrite(osp.join(save_root_1, f"{frame:06d}.jpg"), img1) cv2.imwrite(osp.join(save_root_2, f"{frame:06d}.jpg"), online_im_2) if len(res_1_boxes_np) == 0 and len(res_1_boxes_np) == 0: cv2.imwrite(osp.join(save_root_1, f"{frame:06d}.jpg"), img1) cv2.imwrite(osp.join(save_root_2, f"{frame:06d}.jpg"), img2) cmd_str = 'ffmpeg -framerate 8 -y -f image2 -i {}/%06d.jpg -vcodec libx264 -c:v copy {}'.format( save_root_1, osp.join(save_root_1, f'GNN_{seq}.avi')) os.system(cmd_str) cmd_str = 'ffmpeg -framerate 8 -y -f image2 -i {}/%06d.jpg -vcodec libx264 -c:v copy {}'.format( save_root_2, osp.join(save_root_2, f'noGNN_{seq}.avi')) os.system(cmd_str)
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, bbox_dir=None, show_image=True, frame_rate=30): if save_dir: mkdir_if_missing(save_dir) if bbox_dir: mkdir_if_missing(bbox_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 0 for path, img0, detection in dataloader: if frame_id % 1 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() online_targets, detection_boxes = tracker.update(detection, img0) online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) #bbox detection plot box_tlbrs = [] box_scores = [] img_bbox = img0.copy() for box in detection_boxes: tlbr = box.tlbr tlwh = box.tlwh vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area: box_tlbrs.append(tlbr) box_scores.append(box.score) timer.toc() # save results results.append( ([frame_id + 1] * len(online_tlwhs), online_tlwhs, online_ids)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) bbox_im = vis.plot_detections(img_bbox, box_tlbrs, scores=box_scores) if show_image: cv2.imshow('online_im', online_im) cv2.imshow('bbox_im', bbox_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) cv2.imwrite(os.path.join(bbox_dir, '{:05d}.jpg'.format(frame_id)), bbox_im) frame_id += 1 # save results track_pools = [] id_pools = [] write_results(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30): if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] len_all = len(dataloader) start_frame = int(len_all / 2) frame_id = int(len_all / 2) out_queue = Deque(maxlen=5) for i, (path, img, img0) in enumerate(dataloader): if i < start_frame: continue if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze(0) while len(out_queue) < out_queue.maxlen - 1: output = tracker.model.features(blob)[-1] out_queue.append(output) ref = torch.cat(list(out_queue), dim=0) output = tracker.model((blob, ref))[0] out_queue.append(output.pop('raw')) online_targets = tracker.update(blob, img0, output) online_tlwhs = [] online_ids = [] #online_scores = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) #online_scores.append(t.score) timer.toc() # save results results.append((frame_id + 1, online_tlwhs, online_ids)) #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results write_results(result_filename, results, data_type) #write_results_score(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq_realtime(opt, save_dir=None, show_image=True, frame_rate=30): tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() frame_id = 0 cv2.namedWindow('online_im', cv2.WINDOW_FREERATIO) cv2.setWindowProperty('online_im', cv2.WND_PROP_AUTOSIZE, cv2.WND_PROP_AUTOSIZE) cap = cv2.VideoCapture(0) ret, im = cap.read() height = im.shape[0] width = im.shape[1] while True: ret, img0 = cap.read() # Padded resize img, _, _, _ = letterbox(img0, height=height, width=width) # Normalize RGB img = img[:, :, ::-1].transpose(2, 0, 1) img = np.ascontiguousarray(img, dtype=np.float32) img /= 255.0 if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze(0) online_targets = tracker.update(blob, img0) online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) timer.toc() online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) text_scale = max(1, online_im.shape[1] / 1600.) text_thickness = 1 if text_scale > 1.1 else 1 line_thickness = max(1, int(online_im.shape[1] / 500.)) cv2.putText(online_im, 'Press ESC to STOP', (300, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), thickness=2) cv2.imshow('online_im', online_im) key = cv2.waitKey(1) if key == 27: break frame_id += 1 cv2.destroyAllWindows()
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30, conf_thres=0.3): if save_dir: mkdir_if_missing(save_dir) tracker = GNNTracker(opt, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 0 for i, (path, img, img0, p_img_path, p_img) in enumerate(dataloader): if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze(0) if i == 0: p_boxes, init_img_path, p_img = dataloader.initialize( use_letter_box=opt.use_letter_box) else: p_boxes, p_img = prepare_prev_img(dataloader, online_targets, opt, p_img) if opt.use_roi_align: p_crops = p_boxes.clone() _, h, w = p_img.shape p_crops = p_crops.cuda() p_crops_lengths = [len(p_crops)] edge_index = create_inference_time_graph(opt, p_boxes, p_crops, p_img) # convert boxes from xyxy to normalized according to p_img dimensions p_crops[:, 0] = p_crops[:, 0] / w p_crops[:, 1] = p_crops[:, 1] / h p_crops[:, 2] = p_crops[:, 2] / w p_crops[:, 3] = p_crops[:, 3] / h online_targets = tracker.update( blob, img0, p_crops, p_crops_lengths, edge_index, gnn_output_layer=opt.inference_gnn_output_layer, p_imgs=p_img.unsqueeze(0).cuda(), conf_thres=conf_thres) else: p_crops = torchvision.ops.roi_align(input=p_img.unsqueeze(0), boxes=[p_boxes], output_size=opt.crop_size) p_crops = p_crops.cuda() p_crops_lengths = [len(p_crops)] edge_index = create_inference_time_graph(opt, p_boxes, p_crops, p_img) online_targets = tracker.update( blob, img0, p_crops, p_crops_lengths, edge_index, gnn_output_layer=opt.inference_gnn_output_layer, p_imgs=None, conf_thres=conf_thres) online_tlwhs = [] online_ids = [] online_confs = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id t_conf = t.score vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) online_confs.append(t_conf) timer.toc() # save results results.append((frame_id + 1, online_tlwhs, online_ids, online_confs)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, scores=online_confs, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results write_results(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, polygon, paths, data_type, result_filename, frame_dir=None, save_dir=None, bbox_dir=None, show_image=True, frame_rate=30, polygon2=None, line1=None, line2=None, cam_id=None): count = 0 if save_dir: mkdir_if_missing(save_dir) if bbox_dir: mkdir_if_missing(bbox_dir) if frame_dir: mkdir_if_missing(frame_dir) if cam_id is not None: if cam_id is not None: f = open('/data/submission_output/cam_' + str(cam_id) + ".txt", "w") else: f = None tracker = JDETracker(opt, polygon, paths, frame_rate=frame_rate, polygon2=polygon2) timer = Timer() results = [] frame_id = 1 for path, img, img0 in dataloader: img0_clone = copy.copy(img0) # if frame_id % 1 == 0: # logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze( 0) if opt.gpus[0] >= 0 else torch.from_numpy(img).cpu().unsqueeze( 0) online_targets, detection_boxes, out_of_polygon_tracklet = tracker.update( blob, img0) if f is not None: for frame_ind, _, track_type, mov_id in out_of_polygon_tracklet: if mov_id != 'undetermine' and track_type != 'undetermine': if track_type in [ 'person', 'motor', 'motorcycle', 'bicycle', "tricycle" ]: track_type = 1 f.write('cam_' + str(cam_id) + ',' + str(frame_ind) + ',' + str(mov_id) + ',' + str(track_type) + '\n') elif track_type in ['car', 'van']: track_type = 2 f.write('cam_' + str(cam_id) + ',' + str(frame_ind) + ',' + str(mov_id) + ',' + str(track_type) + '\n') elif track_type in ['bus']: track_type = 3 f.write('cam_' + str(cam_id) + ',' + str(frame_ind) + ',' + str(mov_id) + ',' + str(track_type) + '\n') elif track_type in ['truck']: track_type = 4 f.write('cam_' + str(cam_id) + ',' + str(frame_ind) + ',' + str(mov_id) + ',' + str(track_type) + '\n') online_tlwhs = [] online_ids = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id online_tlwhs.append(tlwh) online_ids.append(tid) #bbox detection plot box_tlbrs = [] box_scores = [] box_occlusions = [] types = [] img_bbox = img0.copy() for box in detection_boxes: tlbr = box.tlbr tlwh = box.tlwh box_tlbrs.append(tlbr) box_scores.append(box.score) box_occlusions.append('occ' if box.occlusion_status == True else 'non_occ') types.append(box.infer_type()) timer.toc() # save results for track in out_of_polygon_tracklet: frame_idx, id, classes, movement = track results.append((opt.input_video.split('/')[-1][:-4], frame_idx, classes, movement)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time, out_track=out_of_polygon_tracklet) bbox_im = vis.plot_detections(img_bbox, box_tlbrs, scores=box_scores, box_occlusion=None, types=types) if show_image: cv2.polylines(online_im, [np.asarray(polygon)], True, (0, 255, 255)) cv2.polylines(bbox_im, [np.asarray(polygon)], True, (0, 255, 255)) cv2.polylines(img0_clone, [np.asarray(polygon)], True, (0, 255, 255)) cv2.imshow('online_im', online_im) cv2.imshow('bbox_im', bbox_im) # if save_dir is not None: # cv2.polylines(online_im,[np.asarray(polygon)],True,(0,255,255)) # cv2.polylines(bbox_im,[np.asarray(polygon)],True,(0,255,255)) # # cv2.polylines(bbox_im,[np.asarray(paths['3'])],True,(0,255,255)) # # cv2.polylines(bbox_im,[np.asarray(paths['4'])],True,(0,255,255)) # if polygon2 is not None: # cv2.polylines(online_im,[np.asarray(polygon2)],True,(0,0,255)) # cv2.polylines(bbox_im,[np.asarray(polygon2)],True,(0,0,255)) # if line1 is not None and line2 is not None: # cv2.polylines(online_im,[np.asarray(line1)],True,(134,128,255)) # cv2.polylines(online_im,[np.asarray(line2)],True,(134,128,255)) # #cv2.polylines(img0_clone,[np.asarray(polygon)],True,(0,255,255)) # cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) # cv2.imwrite(os.path.join(bbox_dir, '{:05d}.jpg'.format(frame_id)), bbox_im) # cv2.imwrite(os.path.join(frame_dir, '{:05d}.jpg'.format(frame_id)),img0_clone) frame_id += 1 # if frame_id==150: # BaseTrack._count=0 # return frame_id, timer.average_time, timer.calls # save results return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30, use_cuda=True): if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] frame_id = 0 #for path, img, img0 in dataloader: out_queue = Deque(maxlen=6) for i, (path, img, img0) in enumerate(dataloader): #if i % 8 != 0: #continue if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() time_clean() time_sync('tracker all') if use_cuda: blob = torch.from_numpy(img).cuda().unsqueeze(0) else: blob = torch.from_numpy(img).unsqueeze(0) with torch.no_grad(): time_sync('forward_half and queue') out = tracker.model.backend.forward_half( blob, roi_num=tracker.model.backend.roi_top_k) out_ref = {} out_ref.update(out) out_ref['rois'], _ = tracker.model.backend.get_rois( out['rpn_map'], 300) refs = tracker.model.backend.forward_rois(out_ref) out_queue.append(refs) while len(out_queue) < out_queue.maxlen: out_queue.append(refs) time_sync('forward_half and queue') time_sync('forward_all') output, stuffs, _ = tracker.model.forward_half(out, out_queue) time_sync('forward_all') # info_debug(output) # jj = output['rois'][0] % out['rpn_map'].shape[-1] # ii = output['rois'][0] // out['rpn_map'].shape[-1] # u = output['rpn_map'][:, :, ii, jj] # print(output['hm'][:20]) # input() # import pickle # print(u.shape) # print(u.flatten().sigmoid()) # print((u.flatten().sigmoid() - output['hm'].flatten().sigmoid()).abs().max()) # with open('two_none.pkll', 'wb') as fd: # pickle.dump(output, fd) # input() time_sync('tracker update') online_targets = tracker.update(blob, img0, output) time_sync('tracker update') online_tlwhs = [] online_ids = [] #online_scores = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) #online_scores.append(t.score) time_sync('tracker all') timer.toc() # save results results.append((frame_id + 1, online_tlwhs, online_ids)) #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results import pickle with open(result_filename + '.dets.pkl', 'wb') as fd: pickle.dump(tracker.raw_dets, fd) write_results(result_filename, results, data_type) #write_results_score(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30): if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] len_all = len(dataloader) start_frame = int(len_all / 2) frame_id = int(len_all / 2) for i, (path, img, img0) in enumerate(dataloader): if i < start_frame: continue if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) img_pil = Image.open(path).convert('RGB') normalize = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) img_norm = transforms.Compose( [T.RandomResize([800], max_size=1333), normalize]) img_norm = img_norm(img_pil) if frame_id % 20 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format( frame_id, 1. / max(1e-5, timer.average_time))) # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze(0) online_targets = tracker.update(img_norm.cuda().unsqueeze(0), img0) online_tlwhs = [] online_ids = [] #online_scores = [] for t in online_targets: tlwh = t.tlwh tid = t.track_id vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) #online_scores.append(t.score) timer.toc() # save results results.append((frame_id + 1, online_tlwhs, online_ids)) #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results write_results(result_filename, results, data_type) #write_results_score(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename, save_dir='.', show_image=True, frame_rate=25): tracker = JDETracker(opt, frame_rate=frame_rate) p = path_root_index[5] if save_dir: save_dir = osp.join(save_dir, p) mkdir_if_missing(save_dir) image_path = getimage_path(path_root + p) timer = Timer() results = [] frame_id = -1 result_array_list = [] result = [] for path in image_path: # img=cv2.imread(path) img0 = cv2.imread(path) # BGR # assert img0 is not None, 'Failed to load ' + img_path img_height = img0.shape[0] img_width = img0.shape[1] # print(img_height,img_width) # print(img0.shape) # Padded resize img, _, _, _ = letterbox(img0, height=608, width=1088) # Normalize RGB img = img[:, :, ::-1].transpose(2, 0, 1) img = np.ascontiguousarray(img, dtype=np.float32) img /= 255.0 frame_id += 1 # if frame_id % 20 == 0: # logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time))) # if frame_id==2: # break # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze(0) online_targets = tracker.update(blob, img0) online_tlwhs = [] online_ids = [] online_cref = [] # result_array_list=[] for t in online_targets: tlwh = t.tlwh tid = t.track_id confidence = t.score vertical = tlwh[2] / tlwh[3] > 1.6 if confidence < 0.3: if tlwh[2] * tlwh[3] > 2700 and not vertical and tlwh[ 2] * tlwh[3] < 100000: res = [frame_id, tid] res += list(tlwh) res += [1, 0] online_tlwhs.append(tlwh) result_array_list.append(res) online_cref.append(confidence) # print(confidence) # result_array_list.append(tlwh) online_ids.append(tid) elif confidence >= 0.3: if tlwh[2] * tlwh[3] > 1000 and not vertical and tlwh[ 2] * tlwh[3] < 100000: # print(tlwh[2] * tlwh[3]) res = [frame_id, tid] res += list(tlwh) res += [1, 0] online_tlwhs.append(tlwh) result_array_list.append(res) online_cref.append(confidence) # print(confidence) # result_array_list.append(tlwh) online_ids.append(tid) # if frame_id==2: # break timer.toc() # save results print(frame_id) # if result_array_list: online_tlwhs = np.array(online_tlwhs) online_ids = np.array(online_ids) online_cref = np.array(online_cref) # print(online_tlwhs) # print(online_tlwhs.shape) # print(online_ids.shape) # pick=non_max_suppression(online_tlwhs,0.7,online_cref) # online_tlwhsnms=online_tlwhs[pick] # online_idsnms=online_ids[pick] # online_crefnms=online_cref[pick] # result_array_list2=np.array(result_array_list).copy()[pick] # result+=list(result_array_list2) # print(result) # print(frame_id,online_idsnms) # result.append(online_tlwhsnms) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, scores=online_cref, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) cv2.waitKey(1) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) res_array = np.array(result_array_list) # print(res_array) # print(res_array.shape) tmp_data1 = np.where(res_array[:, [2]] < 0, res_array[:, [2]] + res_array[:, [4]], res_array[:, [4]]) res_array[:, [4]] = tmp_data1 tmp_data = np.where(res_array[:, [3]] < 0, res_array[:, [3]] + res_array[:, [5]], res_array[:, [5]]) res_array[:, [5]] = tmp_data res_array[:, [2, 3]] = np.maximum(res_array[:, [2, 3]], 0) # print(res_array) res_array = np.round(res_array, 0) # res_array=cutmorecord(res_array,img_width,img_height) # print(res_array) np.savetxt("{}.txt".format(p), res_array, fmt='%d,%d,%d,%d,%d,%d,%d,%d', delimiter=',') # save results # write_results(result_filename, results, data_type) return frame_id, timer.average_time, timer.calls
def eval_seq(opt, dataloader, data_type, result_filename, gt_filename, save_dir=None, show_image=True, frame_rate=30): if save_dir: mkdir_if_missing(save_dir) tracker = JDETracker(opt, frame_rate=frame_rate) timer = Timer() results = [] gts = [] frame_id = 0 tid_max, tid_temp = 1, 1 for path, img, img0 in dataloader: if frame_id % 100 == 0: logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time))) # if frame_id>400: # break if '0000001' in path: tid_max = tid_temp tracker = JDETracker(opt, frame_rate=frame_rate) # run tracking timer.tic() blob = torch.from_numpy(img).cuda().unsqueeze(0) # online_targets = tracker.update(blob, img0) online_targets = tracker.update_sep3(blob, img0, conf_thres=[0.4, 0.3, 0.4, 0.4, 0.4, 0.4, 0.4, 0.5, 0.5]) ## use class-separated tracker # print(online_targets) online_tlwhs = [] online_ids = [] online_cids = [] #class id for t in online_targets: tlwh = t.tlwh tid = t.track_id + tid_max tcid = t.class_id # vertical = tlwh[2] / tlwh[3] > 1.6 # if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical: if tlwh[2] * tlwh[3] > opt.min_box_area: online_tlwhs.append(tlwh) online_ids.append(tid) online_cids.append(tcid) tid_temp = max(tid, tid_temp) timer.toc() gt_tlwhs = [] gt_ids = [] gt_cids = [] gt_path = path.replace('images', 'labels_with_ids').replace('jpg', 'txt') gt_targets = read_gt_txts(gt_path) for (tlwh, tid, tcid) in gt_targets: gt_tlwhs.append(tlwh) gt_ids.append(tid) gt_cids.append(tcid) # save results results.append((frame_id + 1, online_tlwhs, online_ids, online_cids)) # save gts gts.append((frame_id + 1, gt_tlwhs, gt_ids, gt_cids)) if show_image or save_dir is not None: online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id, fps=1. / timer.average_time) if show_image: cv2.imshow('online_im', online_im) if save_dir is not None: cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im) frame_id += 1 # save results write_results(result_filename, results, data_type) # save gts write_results(gt_filename, gts, data_type) return frame_id, timer.average_time, timer.calls