Exemplo n.º 1
0
 def __init__(self, id, cam_id, box, time, feature=None, num_clusters=4, crops=None, orientation=None):
     self.id = id
     self.cam_id = cam_id
     self.f_avg = AverageEstimator()
     self.f_clust = ClusterFeature(num_clusters)
     self.f_orient = OrientationFeature(4, (feature, orientation))
     self.features = [feature]
     self.boxes = [box]
     self.timestamps = [time]
     self.crops = [crops]
     if feature is not None:
         self.f_avg.update(feature)
         self.f_clust.update(feature)
Exemplo n.º 2
0
 def __init__(self, feature_len, initial_feature=(None, None)):
     assert feature_len > 0
     self.orientation_features = [AverageEstimator() for _ in range(feature_len)]
     self.is_initialized = False
     if initial_feature[0] is not None and initial_feature[1] is not None and initial_feature[1] >= 0:
         self.is_initialized = True
         self.orientation_features[initial_feature[1]].update(initial_feature[0])
class Track:
    def __init__(self,
                 id,
                 cam_id,
                 box,
                 time,
                 feature=None,
                 num_clusters=4,
                 crops=None,
                 orientation=None):
        self.id = id
        self.cam_id = cam_id
        self.f_avg = AverageEstimator()
        self.f_clust = ClusterFeature(num_clusters)
        self.f_orient = OrientationFeature(4, (feature, orientation))
        self.features = [feature]
        self.boxes = [box]
        self.timestamps = [time]
        self.crops = [crops]
        if feature is not None:
            self.f_avg.update(feature)
            self.f_clust.update(feature)

    def get_last_feature(self):
        return self.features[-1]

    def get_end_time(self):
        return self.timestamps[-1]

    def get_start_time(self):
        return self.timestamps[0]

    def get_last_box(self):
        return self.boxes[-1]

    def __len__(self):
        return len(self.timestamps)

    def _interpolate(self, target_box, timestamp, skip_size):
        last_box = self.get_last_box()
        for t in range(1, skip_size):
            interp_box = [
                int(b1 + (b2 - b1) / skip_size * t)
                for b1, b2 in zip(last_box, target_box)
            ]
            self.boxes.append(interp_box)
            self.timestamps.append(self.get_end_time() + 1)
            self.features.append(None)

    def _filter_last_box(self, filter_speed):
        if self.timestamps[-1] - self.timestamps[-2] == 1:
            filtered_box = list(self.boxes[-2])
            for j in range(len(self.boxes[-1])):
                filtered_box[j] = int((1 - filter_speed) * filtered_box[j] +
                                      filter_speed * self.boxes[-1][j])
            self.boxes[-1] = tuple(filtered_box)

    def add_detection(self,
                      box,
                      feature,
                      timestamp,
                      max_skip_size=1,
                      filter_speed=0.7,
                      crop=None):
        skip_size = timestamp - self.get_end_time()
        if 1 < skip_size <= max_skip_size:
            self._interpolate(box, timestamp, skip_size)
            assert self.get_end_time() == timestamp - 1

        self.boxes.append(box)
        self.timestamps.append(timestamp)
        self.features.append(feature)
        self._filter_last_box(filter_speed)
        if feature is not None:
            self.f_clust.update(feature)
            self.f_avg.update(feature)
        if crop is not None:
            self.crops.append(crop)

    def merge_continuation(self, other, interpolate_time_thresh=0):
        assert self.get_end_time() < other.get_start_time()
        skip_size = other.get_start_time() - self.get_end_time()
        if 1 < skip_size <= interpolate_time_thresh:
            self._interpolate(other.boxes[0], other.get_start_time(),
                              skip_size)
            assert self.get_end_time() == other.get_start_time() - 1

        self.f_avg.merge(other.f_avg)
        self.f_clust.merge(self.features, other.f_clust, other.features)
        self.f_orient.merge(other.f_orient)
        self.timestamps += other.timestamps
        self.boxes += other.boxes
        self.features += other.features
        self.crops += other.crops
def run(params, config, capture, detector, reid):
    win_name = 'Multi camera tracking'
    frame_number = 0
    avg_latency = AverageEstimator()
    output_detections = [[] for _ in range(capture.get_num_sources())]
    key = -1

    if config['normalizer_config']['enabled']:
        capture.add_transform(
            NormalizerCLAHE(
                config['normalizer_config']['clip_limit'],
                config['normalizer_config']['tile_size'],
            ))

    tracker = MultiCameraTracker(capture.get_num_sources(),
                                 reid,
                                 config['sct_config'],
                                 **config['mct_config'],
                                 visual_analyze=config['analyzer'])

    thread_body = FramesThreadBody(capture,
                                   max_queue_length=len(capture.captures) * 2)
    frames_thread = Thread(target=thread_body)
    frames_thread.start()

    if len(params.output_video):
        frame_size, fps = capture.get_source_parameters()
        target_width, target_height = get_target_size(
            frame_size, None, **config['visualization_config'])
        video_output_size = (target_width, target_height)
        fourcc = cv.VideoWriter_fourcc(*'XVID')
        output_video = cv.VideoWriter(params.output_video, fourcc, min(fps),
                                      video_output_size)
    else:
        output_video = None

    prev_frames = thread_body.frames_queue.get()
    detector.run_async(prev_frames, frame_number)
    presenter = monitors.Presenter(params.utilization_monitors, 0)

    while thread_body.process:
        if not params.no_show:
            key = check_pressed_keys(key)
            if key == 27:
                break
            presenter.handleKey(key)
        start = time.perf_counter()
        try:
            frames = thread_body.frames_queue.get_nowait()
        except queue.Empty:
            frames = None

        if frames is None:
            continue

        all_detections = detector.wait_and_grab()
        if params.save_detections:
            update_detections(output_detections, all_detections, frame_number)
        frame_number += 1
        detector.run_async(frames, frame_number)

        all_masks = [[] for _ in range(len(all_detections))]
        for i, detections in enumerate(all_detections):
            all_detections[i] = [det[0] for det in detections]
            all_masks[i] = [det[2] for det in detections if len(det) == 3]

        tracker.process(prev_frames, all_detections, all_masks)
        tracked_objects = tracker.get_tracked_objects()

        latency = max(time.perf_counter() - start, sys.float_info.epsilon)
        avg_latency.update(latency)
        fps = round(1. / latency, 1)

        vis = visualize_multicam_detections(prev_frames, tracked_objects, fps,
                                            **config['visualization_config'])
        presenter.drawGraphs(vis)
        if not params.no_show:
            cv.imshow(win_name, vis)

        if output_video:
            output_video.write(cv.resize(vis, video_output_size))

        print('\rProcessing frame: {}, fps = {} (avg_fps = {:.3})'.format(
            frame_number, fps, 1. / avg_latency.get()),
              end="")
        prev_frames, frames = frames, prev_frames
    print(presenter.reportMeans())
    print('')

    thread_body.process = False
    frames_thread.join()

    if len(params.history_file):
        save_json_file(params.history_file,
                       tracker.get_all_tracks_history(),
                       description='History file')
    if len(params.save_detections):
        save_json_file(params.save_detections,
                       output_detections,
                       description='Detections')

    if len(config['embeddings']['save_path']):
        save_embeddings(tracker.scts, **config['embeddings'])
def run(params, config, capture, detector, reid):
    ix,iy = -1,-1
    
    pts_src = np.array([[561,1022],[990,698],[486,273],[95,504]],dtype='float32')
    pts_dest = np.array([[0,0],[0,400],[400,700],[0,700]],dtype='float32')
    # calculate matrix H
    h, status = cv.findHomography(pts_src,pts_dest)

    
    win_name = 'Multi camera tracking'
    frame_number = 0
    avg_latency = AverageEstimator()
    output_detections = [[] for _ in range(capture.get_num_sources())]
    key = -1
    refObj = []
    
    if config['normalizer_config']['enabled']:
        capture.add_transform(
            NormalizerCLAHE(
                config['normalizer_config']['clip_limit'],
                config['normalizer_config']['tile_size'],
            )
        )

    tracker = MultiCameraTracker(capture.get_num_sources(), reid, config['sct_config'], **config['mct_config'],
                                 visual_analyze=config['analyzer'])

    thread_body = FramesThreadBody(capture, max_queue_length=len(capture.captures) * 2)
    frames_thread = Thread(target=thread_body)
    frames_thread.start()

  
    if len(params.output_video):
        frame_size, fps = capture.get_source_parameters()
        target_width, target_height = get_target_size(frame_size, None, **config['visualization_config'])
        video_output_size = (target_width, target_height)
        fourcc = cv.VideoWriter_fourcc(*'XVID')
        output_video = cv.VideoWriter(params.output_video,cv.VideoWriter_fourcc('M','J','P','G'),min(fps),video_output_size)
        # output_video = cv.VideoWriter(params.output_video, fourcc, min(fps), video_output_size)
    else:
        output_video = None

    prev_frames = thread_body.frames_queue.get()
    detector.run_async(prev_frames, frame_number)

    while thread_body.process:
        if not params.no_show:
            key = check_pressed_keys(key)
            if key == 27:
                break
        start = time.time()
        try:
            frames = thread_body.frames_queue.get_nowait()
        except queue.Empty:
            frames = None

        if frames is None:
            continue

        all_detections = detector.wait_and_grab()
        
        for det in all_detections:
            for obj in det:
                print("Boxes:",obj[0])
                print("Confidence:",obj[1])
                
        if params.save_detections:
            update_detections(output_detections, all_detections, frame_number)
        frame_number += 1
        detector.run_async(frames, frame_number)

        all_masks = [[] for _ in range(len(all_detections))]
        for i, detections in enumerate(all_detections):
            all_detections[i] = [det[0] for det in detections]
            all_masks[i] = [det[2] for det in detections if len(det) == 3]

        tracker.process(prev_frames, all_detections, all_masks)
        tracked_objects = tracker.get_tracked_objects()

        latency = time.time() - start
        avg_latency.update(latency)
        fps = round(1. / latency, 1)

        vis = visualize_multicam_detections(prev_frames, tracked_objects, fps, **config['visualization_config'],h=h)
                
        if not params.no_show:
            cv.setMouseCallback(win_name, getMousePointer)
            if ix!=-1 and iy!=-1:
                refObj.append((ix,iy))
                ix=-1
                iy=-1
                print(len(refObj))
    
            if len(refObj)==2:
                print("Len 2 Rectangle Drawn.")
                vis = cv.rectangle(vis, refObj[0], refObj[1],(255,0,0), 2)
                refObj.clear()    
            
            cv.imshow(win_name, vis)
            # cv.imwrite("refPicture.png",vis)
            
            
        if output_video:
            output_video.write(cv.resize(vis, video_output_size))

    #     print('\rProcessing frame: {}, fps = {} (avg_fps = {:.3})'.format(
    #                         frame_number, fps, 1. / avg_latency.get()), end="")
        prev_frames, frames = frames, prev_frames
    # print('')

    thread_body.process = False
    frames_thread.join()

    if len(params.history_file):
        save_json_file(params.history_file, tracker.get_all_tracks_history(), description='History file')
    if len(params.save_detections):
        save_json_file(params.save_detections, output_detections, description='Detections')

    if len(config['embeddings']['save_path']):
        save_embeddings(tracker.scts, **config['embeddings'])