Example #1
0
class TrackerManager(object):
    def __init__(self, vid_list, init_time):
        self.init_time = init_time

        video_id, camera_id, max_frames, width, height = vid_list[1:]
        self.max_frames = max_frames

        self.tracker = Tracker(init_time, video_id, max_frames, camera_id,
                               width, height)

        self.postprocess_trans = get_postprocess_trans(height, width)

        self.prev_img = None
        self.n = 0

    def process_output(self, dets):
        dets = post_process(dets, self.postprocess_trans)[0]
        self.tracker.step(dets)
        self.n += 1

    def is_done(self):
        return self.n >= self.max_frames

    def finalize(self):
        self.tracker.finalize()
def tracker_thread_fn(q_in,
                      init_time,
                      path,
                      debug=0,
                      new_thresh=0.4,
                      track_thresh=0.2):
    video_id, camera_id, max_frames, width, height = get_video_params(path)

    postprocess_trans = get_postprocess_trans(height, width)
    tracker = Tracker(init_time,
                      video_id,
                      max_frames,
                      camera_id,
                      width,
                      height,
                      new_thresh=new_thresh,
                      track_thresh=track_thresh)

    for i in range(max_frames):
        dets = q_in.get()
        get_time = time.time()

        for k in dets:
            dets[k] = dets[k].detach().cpu().numpy()

        dets = post_process(dets, postprocess_trans,
                            track_thresh=track_thresh)[0]
        tracker.step(dets)

        if debug > 0 and i % 100 == 99:
            frame_time = time.time() - init_time
            FPS = (i + 1) / frame_time
            print("At frame {} FPS {}".format(i + 1, FPS), file=sys.stderr)

    tracker.finalize()
Example #3
0
class VideoManager(object):
    def __init__(self, path, vid_list, model_loading_time):
        init_time = time.time() - model_loading_time
        self.init_time = init_time

        vid_filename = vid_list[0]
        video_path = os.path.join(path, vid_filename)
        self.cap = cv2.VideoCapture(video_path)

        video_id, camera_id, max_frames, width, height = vid_list[1:]
        self.max_frames = max_frames

        self.tracker = Tracker(init_time, video_id, max_frames, camera_id, width, height)

        self.preprocess_function = get_img_transform(height, width, new_size=512)
        self.postprocess_trans = get_postprocess_trans(height, width)

        region_mask = get_region_mask(camera_id, height, width)
        self.region_mask = np.where(region_mask, 255, 0).astype(np.uint8)

        self.prev_img = None
        self.n = 0

    def get_img(self):
        ret, frame = self.cap.read()
        frame = cv2.bitwise_and(frame, frame, mask=self.region_mask)
        img = self.preprocess_function(frame)
        img = torch.from_numpy(img).to(torch.device('cuda'))
        self.n += 1
        prev_img = self.prev_img if self.prev_img is not None else img
        self.prev_img = img

        return img, prev_img

    def process_output(self, dets):
        dets = post_process(dets, self.postprocess_trans)[0]
        self.tracker.step(dets)

    def is_done(self):
        return self.n >= self.max_frames

    def finalize(self):
        self.tracker.finalize()
Example #4
0
def run_single_video_serial(path, debug=0, full_precision=False):
    init_time = time.time()
    if debug >= 1:
        print("Starting for video: {}".format(path), file=sys.stderr)

    video_id, camera_id, max_frames, width, height = get_video_params(path)

    cap = cv2.VideoCapture(path)

    model = create_model()
    model = load_model(model, 'checkpoints/coco_tracking.pth')
    model.to(torch.device('cuda'))
    model.eval()

    tracker = Tracker(init_time,
                      video_id,
                      max_frames,
                      camera_id,
                      width,
                      height,
                      debug=debug)

    preprocess_function = get_img_transform(height, width, new_size=512)
    postprocess_trans = get_postprocess_trans(height, width)
    region_mask = get_region_mask(camera_id, height, width)
    region_mask = np.where(region_mask, 255, 0).astype(np.uint8)

    if debug > 2:
        cv2.imwrite("mask.png", region_mask)

    pre_img = None

    for i in range(max_frames):
        ret, frame = cap.read()
        if debug >= 2:
            cv2.imshow("Frame", frame)
            cv2.waitKey(1)
            tracker.frame = np.copy(frame)

        frame = cv2.bitwise_and(frame, frame, mask=region_mask)

        img = preprocess_function(frame)
        img = torch.from_numpy(img).to(torch.device('cuda'))

        if pre_img is None:
            pre_img = img

        with torch.no_grad():
            with torch.cuda.amp.autocast(enabled=not full_precision):
                out = model(img, pre_img, None)[-1]
                out = sigmoid_output(out)
                dets = generic_decode(out)

        pre_img = img

        for k in dets:
            dets[k] = dets[k].detach().cpu().numpy()

        dets = post_process(dets, postprocess_trans)[0]
        tracker.step(dets)

        if debug >= 1 and i % 100 == 99:
            frame_time = time.time() - init_time
            FPS = (i + 1) / frame_time
            print("At frame {} FPS {}".format(i + 1, FPS), file=sys.stderr)

    tracker.finalize()

    if debug >= 1:
        print("Finished video: {}".format(path), file=sys.stderr)