def frame_callback(vis, frame_idx): print("Frame idx", frame_idx) image = cv2.imread(seq_info["image_filenames"][frame_idx], cv2.IMREAD_COLOR) vis.set_image(image.copy()) if seq_info["detections"] is not None: detections = deep_sort_app.create_detections( seq_info["detections"], frame_idx) vis.draw_detections(detections) mask = results[:, 0].astype(np.int) == frame_idx track_ids = results[mask, 1].astype(np.int) boxes = results[mask, 2:6] vis.draw_groundtruth(track_ids, boxes) if show_false_alarms: groundtruth = seq_info["groundtruth"] mask = groundtruth[:, 0].astype(np.int) == frame_idx gt_boxes = groundtruth[mask, 2:6] for box in boxes: # NOTE(nwojke): This is not strictly correct, because we don't # solve the assignment problem here. min_iou_overlap = 0.5 if iou(box, gt_boxes).max() < min_iou_overlap: vis.viewer.color = 0, 0, 255 vis.viewer.thickness = 4 vis.viewer.rectangle(*box.astype(np.int))
def frame_callback(vis, frame_idx): print("Frame idx", frame_idx) image = cv2.imread( seq_info["image_filenames"][frame_idx], cv2.IMREAD_COLOR) vis.set_image(image.copy()) if seq_info["detections"] is not None: detections = deep_sort_app.create_detections( seq_info["detections"], frame_idx) vis.draw_detections(detections) mask = results[:, 0].astype(np.int) == frame_idx track_ids = results[mask, 1].astype(np.int) boxes = results[mask, 2:6] vis.draw_groundtruth(track_ids, boxes) if show_false_alarms: groundtruth = seq_info["groundtruth"] mask = groundtruth[:, 0].astype(np.int) == frame_idx gt_boxes = groundtruth[mask, 2:6] for box in boxes: # NOTE(nwojke): This is not strictly correct, because we don't # solve the assignment problem here. min_iou_overlap = 0.5 if iou(box, gt_boxes).max() < min_iou_overlap: vis.viewer.color = 0, 0, 255 vis.viewer.thickness = 4 vis.viewer.rectangle(*box.astype(np.int))
def frame_callback(vis, frame_idx): #프레임별로 처리 print("Frame idx", frame_idx) image = cv2.imread( seq_info["image_filenames"][frame_idx], cv2.IMREAD_COLOR) vis.set_image(image.copy()) if seq_info["detections"] is not None: detections = deep_sort_app.create_detections( seq_info["detections"], frame_idx) vis.draw_detections(detections) mask = results[:, 0].astype(np.int) == frame_idx track_ids = results[mask, 1].astype(np.int) #해당 frame_id인 mask값들 중 [1]들인 id값 추출 boxes = results[mask, 2:6] vis.draw_groundtruth(track_ids, boxes) # 발위치 10개 중 y값만 빼기 h_file = os.path.dirname(result_file) # result/text/ with open(h_file + '/ID_h.txt', 'r') as f_hi: line_splits = [int(l.split(',')[1]) for l in f_hi.read().splitlines()[1:]] i = 0 #print(line_splits) if show_false_alarms: groundtruth = seq_info["groundtruth"] mask = groundtruth[:, 0].astype(np.int) == frame_idx gt_boxes = groundtruth[mask, 2:6] for box in boxes: # NOTE(nwojke): This is not strictly correct, because we don't # solve the assignment problem here. min_iou_overlap = 0.5 if iou(box, gt_boxes).max() < min_iou_overlap: vis.viewer.color = 0, 0, 255 vis.viewer.thickness = 4 vis.viewer.rectangle(*box.astype(np.int)) if IDnum != 0: # Tracking 하는 ID만 보여주고 발 표시 !!!!!!! vis.viewer.circle( box[0] + box[2] / 2, box[1] + box[3], 3)
def process_next_frame(self, frame): """ Track objects from specified detections :param frame: frame data + list of detections, map-like object with mandatory keys: image, detections :return: detections populated with object ids """ if len(frame['detections']['rois']) == 0: return frame frame['detections']['rois'][:, 2] -= frame['detections']['rois'][:, 0] frame['detections']['rois'][:, 3] -= frame['detections']['rois'][:, 1] frame['detections']['features'] = self.__feature_extractor( frame['image'], frame['detections']['rois']) self.__tracker.predict() self.__tracker.update([ Detection(frame['detections']['rois'][idx], frame['detections']['scores'][idx], frame['detections']['features'][idx]) for idx, d in enumerate(frame['detections']['rois']) ]) tracked_bbox = [ track.to_tlwh() for track in self.__tracker.tracks if track.state == TrackState.Confirmed ] for idx_detection, detection in enumerate(frame['detections']['rois']): for idx_track, track in enumerate(tracked_bbox): if iou( tracked_bbox[idx_track], np.array(frame['detections']['rois'][idx_detection], dtype=np.float).reshape(1, 4))[0] >= 0.7: frame['detections']['ids'][ idx_detection] = self.__tracker.tracks[ idx_track].track_id return frame
def draw_trackers(self, tracks, gts): #TODO make another option where you show any track which overlaps with the given object # this will require the groundtruth SHOW_OVERLAPPED = True if self.vis_method == "one-gt": if tracks == []: # there's issues with zero lists return # for convenience we'll use self.gt_to_vis and self.tracks_to_vis # perhaps this isn't the way to do it. There will need to be a single gt index and a set of track indices # the logic is going to be pretty funky here gt_ids = gts[0] gt_boxs = gts[1] assert len(gt_ids) == len(gt_boxs) if (len(gt_ids) > 0 and self.gt_to_vis is None) or\ (self.gt_to_vis is not None and self.gt_to_vis not in gt_ids and len(gt_ids) > 0): # there are two cases to change the gt_ids, either it is unset or the one we were tracking is no longer present # the groundtruth should be in the form (List(ids), List(boxes)) self.gt_to_vis = int(random.choice(gt_ids)) self.tracks_to_vis = [] if self.gt_to_vis is not None and len(gt_ids) > 0: # here we want to add any tracks that overlap at all and remove a # TODO run through all of the tracks and see if they overlap with the selceted gt # TODO determine which of the bboxs coresponds to the index that's being visualized # find the index of self.gt_to_vis in groundtruths[0] gt_box = gt_boxs[gt_ids.index( self.gt_to_vis )] # find the box which coresponds to the index we are visualizing track_boxes = np.asarray([t.to_tlwh() for t in tracks]) track_indices = np.asarray([t.track_id for t in tracks]) overlaps = iou(gt_box, track_boxes) # these should be tracks which overlap with the groundtruth track we've picked # TODO determine why additional tracks are being added new_tracks_to_vis = track_indices[np.nonzero(overlaps)[ 0]] # for some reason this return a tuple of arrays def union(a, b): """ return the union of two lists """ return list(set(a) | set(b)) self.tracks_to_vis = union(self.tracks_to_vis, new_tracks_to_vis) tracks_ = [ t for t in tracks if t.track_id in self.tracks_to_vis ] # I don't want to change `tracks` as it was passed by reference for track in tracks_: if not track.is_confirmed(): # or track.time_since_update > 0: continue self.viewer.color = create_unique_color_uchar(track.track_id) if track.time_since_update > 0: self.viewer.thickness = 2 else: self.viewer.thickness = 5 self.viewer.rectangle(*track.to_tlwh().astype(np.int), label=str(track.track_id)) elif self.vis_method == "one-track": # I believe these tracks are sorted w.r.t. to seniority, so this should handle it niavely # check if the one we want to visualize confirmed_ids = [ track.track_id for track in tracks if track.is_confirmed() ] if self.index_to_vis not in confirmed_ids and len( confirmed_ids) > 0: # the track must have died self.index_to_vis = random.choice(confirmed_ids) tracks = [t for t in tracks if t.track_id == self.index_to_vis ] # this is the cleanest way I found to get the item if len(tracks) == 0: return track = tracks[0] self.viewer.color = create_unique_color_uchar(track.track_id) if track.time_since_update > 0: self.viewer.thickness = 2 else: self.viewer.thickness = 5 self.viewer.rectangle(*track.to_tlwh().astype(np.int), label=str(track.track_id)) elif self.vis_method == "show-all": for track in tracks: #HACK #if not track.is_confirmed():# or track.time_since_update > 0: # continue self.viewer.color = create_unique_color_uchar(track.track_id) if track.time_since_update > 0: self.viewer.thickness = 2 else: self.viewer.thickness = 5 self.viewer.rectangle(*track.to_tlwh().astype(np.int), label=str(track.track_id)) #if track.time_since_update > 0: # self.viewer.color = (255, 255, 255) # self.viewer.thickness = 2 # self.viewer.rectangle( # *track.to_tlwh().astype(np.int), label=str(track.track_id)) #self.viewer.gaussian(track.mean[:2], track.covariance[:2, :2], # label="%d" % track.track_id) else: raise ValueError( "self.vis_method should be `show all`, `one-track`, or `one-gt` but insted was {}" .format(self.vis_method))
gt_ids, gt_boxes = zip( *[(g['local_id'], _tlbr_to_tlwh(g['box'])) for g in gt[frame_idx]]) if len(gt[frame_idx]) > 0 else ([], []) det_ids, det_boxes = zip( *[(d['local_id'], _tlbr_to_tlwh(d['location'])) for d in context[frame_idx]['context']]) if len( context[frame_idx]['context']) > 0 else ([], []) candidates = np.asarray(det_boxes) gt_boxes = np.asarray(gt_boxes) costs = [] for gt_box in gt_boxes: if len(candidates) == 0: costs.append([]) else: cost = 1. - iou(gt_box, candidates) cost = list( map(lambda x: np.nan if x >= args.iou_cost_th else x, cost)) costs.append(cost) acc.update(gt_ids, det_ids, costs) # 集計 df_sub = mh.compute(acc, metrics=metrics, name=video_name) if df is None: df = df_sub else: df = pd.concat([df, df_sub]) # motmetric出力 pkl_filepath = os.path.join(args.save_root, video_name, 'motacc.pkl') with open(pkl_filepath, 'wb') as f: pickle.dump(acc, f)