Ejemplo n.º 1
0
    def update(self, img0, im_blob):
        detections = mot_detector(img0, im_blob, self.model,
                                  self.opt.conf_thres, self.opt.nms_thres,
                                  self.opt.img_size)
        self.frame_id += 1
        detections = [d for d in detections if numpy.prod(d.tlwh[-2:]) > 250]
        frame = vis.plot_tracking(img0, [act.tlwh for act in detections],
                                  [act.track_id for act in detections],
                                  frame_id=self.frame_id,
                                  fps=1.)
        import cv2
        cv2.imshow('detections', frame)
        cv2.waitKey(1)
        output_stracks, activated, lost, tracked = self._update(detections)
        lost_frame = vis.plot_tracking(img0, [act.tlwh for act in lost],
                                       [act.track_id for act in lost],
                                       frame_id=self.frame_id,
                                       fps=1.)
        tracked = vis.plot_tracking(img0, [act.tlwh for act in output_stracks],
                                    [act.track_id for act in output_stracks],
                                    frame_id=self.frame_id,
                                    fps=1.)
        cv2.imshow('output_stracks', tracked)
        cv2.imshow('lost', lost_frame)
        cv2.waitKey(1)
        # detections1 = self.substractor.detect(img0)
        # output_stracks, activated, lost = self._update(detections1)

        return output_stracks + [t for t in lost if self.time_lost(t) < 10]
Ejemplo n.º 2
0
def test(args):
    # Step 1: initialize ACL and ACL runtime
    acl_resource = AclLiteResource()

    # 1.2: one line of code, call the 'init' function of the AclLiteResource object, to initilize ACL and ACL runtime
    acl_resource.init()

    # Step 2: Load models
    mot_model = AclLiteModel('../model/mot_v2.om')

    dataloader = loader.LoadImages(args.test_img)

    # initialize tracker
    tracker = JDETracker(args, mot_model, frame_rate=30)
    timer = Timer()
    results = []

    # img:  h w c; 608 1088 3
    # img0: c h w; 3 608 1088
    for frame_id, (path, img, img0) in enumerate(dataloader):
        if frame_id % 20 == 0 and frame_id != 0:
            print('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking, start tracking timer
        timer.tic()

        # list of Tracklet; see multitracker.STrack
        online_targets = tracker.update(np.array([img]), img0)

        # prepare for drawing, get all bbox and id
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > args.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()

        # draw bbox and id
        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        cv2.imwrite(os.path.join('../data', 'test_output.jpg'), online_im)

    # verify if result is expected
    result = image_contrast('../data/test_output.jpg', args.verify_img)
    print(result)
    if (result > 420 or result < 0):
        print("Similarity Test Fail!")
        sys.exit(1)
    else:
        print("Similarity Test Pass!")
        sys.exit(0)
Ejemplo n.º 3
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets, scores = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        frame_time = []
        timestamp = time.time()
        localTime = time.localtime(timestamp)
        strTime = time.strftime("%Y-%m-%d %H:%M:%S", localTime)

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                frame_time.append(strTime)
        timer.toc()
        # save results
        results.append(
            (frame_id + 1, online_tlwhs, online_ids, frame_time, scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=scores,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Ejemplo n.º 4
0
def run(dataloader, result_filename, save_dir=None, show_image=True):
    if save_dir is not None:
        mkdirs(save_dir)

    tracker = OnlineTracker()
    timer = Timer()
    results = []
    wait_time = 1
    #images =[]
    for frame_id, batch in enumerate(dataloader):
        if frame_id % 50 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        frame, det_tlwhs, det_scores, _, _ = batch

        # run tracking
        timer.tic()
        online_targets = tracker.update(frame, det_tlwhs, None)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            online_tlwhs.append(t.tlwh)
            online_ids.append(t.track_id)
        timer.toc()

        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))

        online_im = vis.plot_tracking(frame,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        #online_im = vis.plot_trajectory(frame, online_tlwhs, online_ids)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        #online_im = online_im[:,:,::-1]

#images.append(online_im)
        key = cv2.waitKey(wait_time)
        key = chr(key % 128).lower()
        if key == 'q':
            exit(0)
        elif key == 'p':
            cv2.waitKey(0)
        elif key == 'a':
            wait_time = int(not wait_time)

    # save results
    #imageio.mimsave('MOT16-04.gif', images, fps=10)
    write_results(result_filename, results)
Ejemplo n.º 5
0
def eval_seq(dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             args=None):
    if save_dir is not None:
        mkdirs(save_dir)

    tracker = OnlineTracker(metric_net=args.metric, ide=args.ide)
    timer = Timer()
    results = []
    wait_time = 1
    for frame_id, batch in enumerate(dataloader):
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        frame, det_tlwhs, det_scores, _, _ = batch

        # run tracking
        timer.tic()
        online_targets = tracker.update(frame, det_tlwhs, None)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            online_tlwhs.append(t.tlwh)
            online_ids.append(t.track_id)
        timer.toc()

        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))

        online_im = vis.plot_tracking(frame,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)

        key = cv2.waitKey(wait_time)
        key = chr(key % 128).lower()
        if key == 'q':
            exit(0)
        elif key == 'p':
            cv2.waitKey(0)
        elif key == 'a':
            wait_time = int(not wait_time)

    # save results
    write_results(result_filename, results, data_type)
Ejemplo n.º 6
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    if opt.save_var:
        import pickle
        filename = os.path.join(opt.save_var, '/MOT_result.pickle')
        with open(filename, 'wb') as f:
            pickle.dump(results, f)
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Ejemplo n.º 7
0
def tracking(data_root,seq, result_root, save_dir=None, show_image=True,from_video = False,video_path =None,write_video = False,live_demo = False):
    if save_dir is not None:
        mkdirs(save_dir)
    yolo3 = YOLO3("detector/cfg/yolo_v3.cfg","detector/yolov3.weights","detector/cfg/coco.names", is_xywh=True)
    tracker = OnlineTracker()
    timer = Timer()
    wait_time = 1
    frame_no = 1
    
    
    if from_video:
        assert os.path.isfile(video_path), "Error: path error"
        if live_demo:
            vdo = cv2.VideoCapture("http://10.196.30.16:8081") ##http://10.3.0.24:8081
        else:
            vdo = cv2.VideoCapture()
            vdo.open(video_path)
        im_width = int(vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
        im_height = int(vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))         
        if write_video:
            fourcc =  cv2.VideoWriter_fourcc(*'MJPG')
            output = cv2.VideoWriter(os.path.join(result_root,'{}.avi'.format(seq)), fourcc, 10, (im_width,im_height))
        while vdo.grab(): 
            if frame_no % 10 == 0:
                logger.info('Processing frame {} ({:.2f} fps)'.format(frame_no, 1./max(1e-5, timer.average_time)))
            timer.tic()
            _, image = vdo.retrieve()
            #image = ori_im[0:im_height, 0:im_width, (2,1,0)]
            bbox_xywh, det_scores, cls_ids = yolo3(image)
            if bbox_xywh is not None:
                mask = cls_ids==0
                bbox_xywh = bbox_xywh[mask]
                bbox_xywh[:,3] *= 1.15
                tlwhs = np.empty_like(bbox_xywh[:,:4])
                tlwhs[:,2] = bbox_xywh[:,2] # w
                tlwhs[:,3] = bbox_xywh[:,3] # h
                tlwhs[:,0] = bbox_xywh[:,0] - bbox_xywh[:,2]/2 # x1
                tlwhs[:,1] = bbox_xywh[:,1] - bbox_xywh[:,3]/2 # y1
                #frame, det_tlwhs, det_scores, _, _ = batch

                # run tracking
                #timer.tic()
                online_targets = tracker.update(image, tlwhs, det_scores)
                online_tlwhs = []
                online_ids = []
                for t in online_targets:
                    online_tlwhs.append(t.tlwh)
                    online_ids.append(t.track_id)
                timer.toc()
                #results.append((frame_no, online_tlwhs, online_ids))
                frame_no +=1
                online_im = vis.plot_tracking(image, online_tlwhs, online_ids, frame_id=frame_no,fps=1. / timer.average_time)
                #online_im = vis.plot_trajectory(frame, online_tlwhs, online_ids)
                if show_image:
                    cv2.imshow('online_im', online_im)
                if save_dir is not None:
                    cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_no)), online_im)
                #online_im = online_im[:,:,::-1]
                #images.append(online_im)
                if write_video:
                    output.write(online_im)
                key = cv2.waitKey(wait_time)
                key = chr(key % 128).lower()
                if key == 'q':
                    exit(0)
                elif key == 'p':
                    cv2.waitKey(0)
                elif key == 'a':
                    wait_time = int(not wait_time)
    else:

        directory = os.path.join(data_root,seq,'img1')
        TEST_IMAGE_PATHS = os.listdir(directory)
        TEST_IMAGE_PATHS.sort(key=str.lower)
        #images =[]
        results = []
        if write_video:
            image_path=os.path.join(directory,TEST_IMAGE_PATHS[0])
            image = cv2.imread(image_path)
            fourcc =  cv2.VideoWriter_fourcc(*'MJPG')
            output = cv2.VideoWriter(os.path.join(result_root,'{}.avi'.format(seq)), fourcc, 10, (image.shape[1],image.shape[0]))
        for image in TEST_IMAGE_PATHS:
            
            if frame_no % 50 == 0:
                logger.info('Processing frame {} ({:.2f} fps)'.format(frame_no, 1./max(1e-5, timer.average_time)))
            image_path=os.path.join(directory,image)
            timer.tic()
            image = cv2.imread(image_path)
            #image  = image[:,:,::-1]
            #image_num = self.load_image_into_numpy_array(image)
            #print("before detector")
            bbox_xywh, det_scores, cls_ids = yolo3(image)
            mask = cls_ids==0
            bbox_xywh = bbox_xywh[mask]
            bbox_xywh[:,3] *= 1.15
            ## convert to top left width height
            tlwhs = np.empty_like(bbox_xywh[:,:4])
            tlwhs[:,2] = bbox_xywh[:,2] # w
            tlwhs[:,3] = bbox_xywh[:,3] # h
            tlwhs[:,0] = bbox_xywh[:,0] - bbox_xywh[:,2]/2 # x1
            tlwhs[:,1] = bbox_xywh[:,1] - bbox_xywh[:,3]/2 # y1
            #frame, det_tlwhs, det_scores, _, _ = batch

            # run tracking
            #timer.tic()
            online_targets = tracker.update(image, tlwhs, det_scores)
            online_tlwhs = []
            online_ids = []
            for t in online_targets:
                online_tlwhs.append(t.tlwh)
                online_ids.append(t.track_id)
            timer.toc()

            # save results
            results.append((frame_no, online_tlwhs, online_ids))
            frame_no +=1
            online_im = vis.plot_tracking(image, online_tlwhs, online_ids, frame_id=frame_no,
                                          fps=1. / timer.average_time)

            #online_im = vis.plot_trajectory(frame, online_tlwhs, online_ids)
            if show_image:
                cv2.imshow('online_im', online_im)
            if save_dir is not None:
                cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_no)), online_im)
            #online_im = online_im[:,:,::-1]
            #images.append(online_im)
            if write_video:
                fourcc =  cv2.VideoWriter_fourcc(*'MJPG')
                output = cv2.VideoWriter(os.path.join(result_root,'{}.avi'.format(seq)), fourcc, 10, (online_im.shape[1],online_im.shape[0]))
                output.write(online_im)
            key = cv2.waitKey(wait_time)
            key = chr(key % 128).lower()
            if key == 'q':
                exit(0)
            elif key == 'p':
                cv2.waitKey(0)
            elif key == 'a':
                wait_time = int(not wait_time)

        # save results
        #imageio.mimsave('MOT16-04.gif', images, fps=10)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        write_results(result_filename, results)
Ejemplo n.º 8
0
    def run(self):
        results = []
        idx_frame = 0
        idx_frame1 = 0
        face_names = {}

        self.start_thread()

        timer = Timer()

        while True:
            # _, ori_im = self.vdo.retrieve()
            if self.args.rtsp != "" or self.args.cam != -1:
                if len(self.tmp_imgs) == 0:
                    continue
                ori_im = copy.deepcopy(self.tmp_imgs[-1])
                self.tmp_imgs = []
            else:
                ret, ori_im = self.vdo.read()

            idx_frame += 1
            if idx_frame % self.args.frame_interval:
                continue
            idx_frame1 += 1

            start = time.time()

            im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
            center = (int(ori_im.shape[1] / 2), int(ori_im.shape[0] / 2))
            # cv2.circle(ori_im, center, 2, (255,0,0), 0)

            img, _, _, _ = letterbox(ori_im,
                                     height=self.height,
                                     width=self.width)
            # Normalize RGB
            img = img[:, :, ::-1].transpose(2, 0, 1)
            img = np.ascontiguousarray(img, dtype=np.float32)
            img /= 255.0

            # run tracking
            timer.tic()

            time_start = time.time()
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
            print("input time: {}".format(time.time() - time_start))

            time_start = time.time()
            online_targets = self.tracker.update(blob, ori_im)
            print("tracker time: {}".format(time.time() - time_start))

            online_tlwhs = []
            online_ids = []
            for t in online_targets:
                tlwh = t.tlwh
                tid = t.track_id
                vertical = tlwh[2] / tlwh[3] > 1.6
                if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
                    online_tlwhs.append(tlwh)
                    online_ids.append(tid)
            timer.toc()

            for tlwh, tid in zip(online_tlwhs, online_ids):
                face_name = face_names.get(tid)
                if face_name is None:
                    bb_xyxy = tlwh[0], tlwh[
                        1], tlwh[0] + tlwh[2], tlwh[1] + tlwh[3]
                    face_name = self.faceUtils.detect_face(im, bb_xyxy)
                    face_names[tid] = face_name
                print("{} {}".format(tid, face_names.get(tid)))

                if (idx_frame1 % self.args.move_skip
                    ) == 0 and face_names.get(tid) == self.args.obj:
                    print("move")
                    # self.tmp_moves.append((center, xywh))
                    self.camUtils.move_cam(center, tlwh)

            # save results
            results.append((idx_frame - 1, online_tlwhs, online_ids))

            ori_im = vis.plot_tracking(ori_im,
                                       online_tlwhs,
                                       online_ids,
                                       frame_id=idx_frame,
                                       fps=1. / timer.average_time)
            end = time.time()

            self.result_imgs.put_nowait(ori_im)

            # logging
            self.logger.info("time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}" \
                             .format(end - start, 1 / (end - start), len(online_tlwhs), len(online_ids)))
Ejemplo n.º 9
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    # tracker = JDETracker(opt, frame_rate=frame_rate)
    tracker = SubstractorTracker(opt, frame_rate=frame_rate, device='cuda')
    timer = Timer()
    results = []
    frame_id = 0
    manualMode = True
    import time
    # key = track id, value is list
    # ['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'],
    # list contains (frame_id, tlwh)
    history = defaultdict(list)
    for path, img, img0 in dataloader:
        frame_id += 1
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # if frame_id < 700:
        #     continue
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        ''' Step 1: Network forward, get detections & embeddings'''
        online_targets = tracker.update(img0, blob)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            if (t.tracklet_age > 3):
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                history[tid].append((frame_id, tlwh))
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
            wait_time = 0 if manualMode else 1
            k: int = cv2.waitKey(wait_time)
            if (k == ord('q')):
                break
            elif k == ord('m') or k == ord('M'):
                manualMode = not manualMode
                if manualMode:
                    cv2.waitKey(0)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
    f = open('result.csv', 'w')
    for key in history.keys():
        for item in history[key]:
            tlwh = [round(x, 2) for x in item[1]]
            string = '{frame},{id},{x},{y},{w},{h},1,-1,-1'.format(
                frame=item[0],
                id=key,
                x=tlwh[0],
                y=tlwh[1],
                w=tlwh[2],
                h=tlwh[3])
            f.write(string + '\n')
    f.close()
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
            poseKeypoints = datum.poseKeypoints

            if poseKeypoints is None:
                print("No detected poses")
                continue
            else:
                frame_info[str(tid)] = poseKeypoints[0]
                tlwh = (x, y, w, h)
                vertical = tlwh[2] / tlwh[3] > 1.6
                if tlwh[2] * tlwh[3] > command_args.min_box_area and not vertical:
                    online_tlwhs.append(tlwh)
                    online_ids.append(tid)

            detected_cropped_image = datum.cvOutputData.copy()
            orig_img[max(0, int(y - 0.2 * h)): min(int(y + h + 0.2 * h), orig_height - 1),
                     max(0, int(x - 0.2 * w)): min(int(x + w + 0.2 * w), orig_width - 1)] = detected_cropped_image.copy()

        timer.toc()
        online_im = vis.plot_tracking(image=orig_img, tlwhs=online_tlwhs, obj_ids=online_ids, frame_id=frame_id,
                                      fps=1. / timer.average_time)

        video_info[str(frame_id)] = frame_info
        video_writer.write(online_im)
        frame_id += 1

    with open(out_json_filename, "w+") as f:
        json.dump(video_info, f, indent=3, cls=NumpyEncoder)
    video_writer.release()
    print("Saved video")
    print("Finished")
Ejemplo n.º 11
0
def eval_video(**kwargs):

    logger.setLevel(logging.INFO)

    cap = cv2.VideoCapture(kwargs['video_source'])
    fps = cap.get(cv2.CAP_PROP_FPS)
    fourcc = cv2.VideoWriter_fourcc(
        *'MP4V')  # int(cap.get(cv2.CAP_PROP_FOURCC))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    frame_count = -1
    iter_count = 0
    each_frame = kwargs['each_frame']
    save_dir = kwargs['save_dir']
    frames_limit = kwargs['frames_limit']

    video_writer = None
    video_output = kwargs['video_output']
    if video_output is not None:
        logger.info(
            f'Write video to {video_output} ({width}x{height}, {fps/each_frame} fps) ...'
        )
        video_writer = cv2.VideoWriter(video_output,
                                       fourcc,
                                       fps / each_frame,
                                       frameSize=(width, height))

    write_report_to = None
    data = {}
    if kwargs['report_output']:
        write_report_to = kwargs['report_output']

    tracker = OnlineTracker(**kwargs)
    timer = Timer()
    results = []
    wait_time = 1

    drv = driver.load_driver('tensorflow')

    logger.info(f'init person detection driver...')
    person_detect_driver = drv()
    person_detect_model = kwargs['person_detect_model']
    logger.info(f'loading person detection model {person_detect_model}...')
    person_detect_driver.load_model(person_detect_model)
    logger.info(f'person detection model {person_detect_model} loaded')

    try:
        while True:

            frame_count += 1
            if frames_limit is not None and frame_count > frames_limit:
                logger.warn('frames limit {} reached'.format(frames_limit))
                break

            # read each X bgr frame
            frame = cap.read()  # bgr
            if frame_count % each_frame > 0:
                continue

            if isinstance(frame, tuple):
                frame = frame[1]
            if frame is None:
                logger.warn('video capturing finished')
                break

            if iter_count % 20 == 0:
                logger.info(
                    'Processing frame {} (iteration {}) ({:.2f} fps)'.format(
                        frame_count, iter_count,
                        1. / max(1e-5, timer.average_time)))

            det_tlwhs, det_scores = detect_persons_tf(person_detect_driver,
                                                      frame,
                                                      threshold=.5)

            # run tracking
            timer.tic()
            online_targets = tracker.update(frame, det_tlwhs, None)
            online_tlwhs = []
            online_ids = []
            for t in online_targets:
                online_tlwhs.append(t.tlwh)
                online_ids.append(t.track_id)
            timer.toc()

            if write_report_to:

                for i, id in enumerate(online_ids):
                    if id not in data:
                        data[id] = {
                            'intervals': [],
                            'images': [],
                            'last_image': None,
                        }
                    di = data[id]['intervals']
                    if len(di) == 0 or di[-1][1] < frame_count - each_frame:
                        if len(di) > 0 and di[-1][0] == di[-1][1]:
                            di = di[:-1]
                        di.append([frame_count, frame_count])
                    else:
                        di[-1][1] = frame_count
                    if not data[id]['last_image'] or data[id][
                            'last_image'] < frame_count - fps * 10:
                        data[id]['last_image'] = frame_count
                        tlwh = [max(0, int(o)) for o in online_tlwhs[i]]
                        pers_img = frame[tlwh[1]:tlwh[1] + tlwh[3],
                                         tlwh[0]:tlwh[0] + tlwh[2]].copy()
                        if max(pers_img.shape[0], pers_img.shape[1]) > 100:
                            coef = max(pers_img.shape[0],
                                       pers_img.shape[1]) / 100
                            pers_img = cv2.resize(
                                pers_img, (int(pers_img.shape[1] / coef),
                                           int(pers_img.shape[0] / coef)))
                        _, pers_img = cv2.imencode('.jpeg', pers_img)
                        data[id]['images'].append(
                            base64.b64encode(pers_img).decode())

            # save results
            frame_id = frame_count  # or make it incremental?
            results.append((frame_id + 1, online_tlwhs, online_ids))

            online_im = vis.plot_tracking(frame,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)

            for tlwh in det_tlwhs:
                cv2.rectangle(
                    online_im,
                    (tlwh[0], tlwh[1]),  # (left, top)
                    (tlwh[0] + tlwh[2], tlwh[1] + tlwh[3]),  # (right, bottom)
                    (0, 255, 0),
                    1,
                )

            if kwargs['show_image']:
                cv2.imshow('online_im', online_im)
            if save_dir is not None:
                save_to = os.path.join(save_dir, '{:05d}.jpg'.format(frame_id))
                cv2.imwrite(save_to, online_im)

            if video_writer is not None:
                video_writer.write(cv2.resize(online_im, (width, height)))

            key = cv2.waitKey(wait_time)
            key = chr(key % 128).lower()
            if key in [ord('q'), 202,
                       27]:  # 'q' or Esc or 'q' in russian layout
                exit(0)
            elif key == 'p':
                cv2.waitKey(0)
            elif key == 'a':
                wait_time = int(not wait_time)

            iter_count += 1

    except (KeyboardInterrupt, SystemExit) as e:
        logger.info('Caught %s: %s' % (e.__class__.__name__, e))
    finally:
        cv2.destroyAllWindows()
        if video_writer is not None:
            logger.info('Written video to %s.' % video_output)
            video_writer.release()

        if write_report_to:

            for i in data:
                di = data[i]
                di['index'] = i
                di['duration'] = sum([i[1] - i[0] for i in di['intervals']])
                di['duration_sec'] = '{:.2f}'.format(di['duration'] / fps)
                di['intervals_str'] = ', '.join([
                    '{:.2f}-{:.2f}'.format(i[0] / fps, i[1] / fps)
                    for i in di['intervals']
                ])

            data = data.values()
            data = sorted(data, key=lambda x: x['duration'], reverse=True)

            # prepare html
            tpl = jinja2.Template(template)

            html = tpl.render(data=data)
            with open(write_report_to, 'w') as f:
                f.write(html)

            update_data({'#documents.persons.html': html}, use_mlboard,
                        mlboard)
Ejemplo n.º 12
0
    def workOnDetections(opt, pred, results, img0, frame_id, save_dir,
                         show_image, self_dict):
        output_tracks = []
        self_dict['frame_id'] += 1

        activated_starcks = []
        refind_stracks = []
        lost_stracks = []
        removed_stracks = []

        if len(pred) > 0:
            dets = non_max_suppression(pred.unsqueeze(0), opt.conf_thres,
                                       opt.nms_thres)[0]
            scale_coords(opt.img_size, dets[:, :4], img0.shape).round()
            dets, embs = dets[:, :5].cpu().numpy(), dets[:, 6:].cpu().numpy()
            '''Detections'''
            detections = [
                STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30)
                for (tlbrs, f) in zip(dets, embs)
            ]
        else:
            detections = []
        ''' Add newly detected tracklets to tracked_stracks'''
        unconfirmed = []
        tracked_stracks = []  # type: list[STrack]
        for track in self_dict['tracked_stracks']:
            if not track.is_activated:
                unconfirmed.append(track)
            else:
                tracked_stracks.append(track)
        ''' Step 2: First association, with embedding'''
        strack_pool = joint_stracks(tracked_stracks, self_dict['lost_stracks'])
        # Predict the current location with KF
        STrack.multi_predict(strack_pool)
        dists = matching.embedding_distance(strack_pool, detections)
        dists = matching.fuse_motion(self_dict['kalman_filter'], dists,
                                     strack_pool, detections)
        matches, u_track, u_detection = matching.linear_assignment(dists,
                                                                   thresh=0.7)

        for itracked, idet in matches:
            track = strack_pool[itracked]
            det = detections[idet]
            if track.state == TrackState.Tracked:
                track.update(detections[idet], self_dict['frame_id'])
                activated_starcks.append(track)
            else:
                track.re_activate(det, self_dict['frame_id'], new_id=False)
                refind_stracks.append(track)
        ''' Step 3: Second association, with IOU'''
        detections = [detections[i] for i in u_detection]
        r_tracked_stracks = [
            strack_pool[i] for i in u_track
            if strack_pool[i].state == TrackState.Tracked
        ]
        dists = matching.iou_distance(r_tracked_stracks, detections)
        matches, u_track, u_detection = matching.linear_assignment(dists,
                                                                   thresh=0.5)

        for itracked, idet in matches:
            track = r_tracked_stracks[itracked]
            det = detections[idet]
            if track.state == TrackState.Tracked:
                track.update(det, self_dict['frame_id'])
                activated_starcks.append(track)
            else:
                track.re_activate(det, self_dict['frame_id'], new_id=False)
                refind_stracks.append(track)

        for it in u_track:
            track = r_tracked_stracks[it]
            if not track.state == TrackState.Lost:
                track.mark_lost()
                lost_stracks.append(track)
        '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
        detections = [detections[i] for i in u_detection]
        dists = matching.iou_distance(unconfirmed, detections)
        matches, u_unconfirmed, u_detection = matching.linear_assignment(
            dists, thresh=0.7)
        for itracked, idet in matches:
            unconfirmed[itracked].update(detections[idet],
                                         self_dict['frame_id'])
            activated_starcks.append(unconfirmed[itracked])
        for it in u_unconfirmed:
            track = unconfirmed[it]
            track.mark_removed()
            removed_stracks.append(track)
        """ Step 4: Init new stracks"""
        for inew in u_detection:
            track = detections[inew]
            if track.score < self_dict['det_thresh']:
                continue
            track.activate(self_dict['kalman_filter'], self_dict['frame_id'])
            activated_starcks.append(track)
        """ Step 5: Update state"""
        for track in self_dict['lost_stracks']:
            if self_dict['frame_id'] - track.end_frame > self_dict[
                    'max_time_lost']:
                track.mark_removed()
                removed_stracks.append(track)

        self_dict['tracked_stracks'] = [
            t for t in self_dict['tracked_stracks']
            if t.state == TrackState.Tracked
        ]
        self_dict['tracked_stracks'] = joint_stracks(
            self_dict['tracked_stracks'], activated_starcks)
        self_dict['tracked_stracks'] = joint_stracks(
            self_dict['tracked_stracks'], refind_stracks)
        self_dict['lost_stracks'] = sub_stracks(self_dict['lost_stracks'],
                                                self_dict['tracked_stracks'])
        self_dict['lost_stracks'].extend(lost_stracks)
        self_dict['lost_stracks'] = sub_stracks(self_dict['lost_stracks'],
                                                self_dict['removed_stracks'])
        self_dict['removed_stracks'].extend(removed_stracks)
        self_dict['tracked_stracks'], self_dict[
            'lost_stracks'] = remove_duplicate_stracks(
                self_dict['tracked_stracks'], self_dict['lost_stracks'])

        # get scores of lost tracks
        output_stracks = [
            track for track in self_dict['tracked_stracks']
            if track.is_activated
        ]

        logger.debug('===========Frame {}=========='.format(
            self_dict['frame_id']))
        logger.debug('Activated: {}'.format(
            [track.track_id for track in activated_starcks]))
        logger.debug('Refind: {}'.format(
            [track.track_id for track in refind_stracks]))
        logger.debug('Lost: {}'.format(
            [track.track_id for track in lost_stracks]))
        logger.debug('Removed: {}'.format(
            [track.track_id for track in removed_stracks]))

        online_targets = output_stracks
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)

        results.append((frame_id['0'] + 1, online_tlwhs, online_ids))

        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id['0'])  ##
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(
                os.path.join(save_dir, '{:05d}.jpg'.format(frame_id['0'])),
                online_im)
        frame_id['0'] += 1
        # print("Processed frame: ", str(frame_id['0']))

        # print("End of post-processing")
        return self_dict, frame_id
Ejemplo n.º 13
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    '''
       Processes the video sequence given and provides the output of tracking result (write the results in video file)

       It uses JDE model for getting information about the online targets present.

       Parameters
       ----------
       opt : Namespace
             Contains information passed as commandline arguments.

       dataloader : LoadVideo
                    Instance of LoadVideo class used for fetching the image sequence and associated data.

       data_type : String
                   Type of dataset corresponding(similar) to the given video.

       result_filename : String
                         The name(path) of the file for storing results.

       save_dir : String
                  Path to the folder for storing the frames containing bounding box information (Result frames).

       show_image : bool
                    Option for shhowing individial frames during run-time.

       frame_rate : int
                    Frame-rate of the given video.

       Returns
       -------
       (Returns are not significant here)
       frame_id : int
                  Sequence number of the last sequence
       '''
    width, height = dataloader.w, dataloader.h
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0

    # for selected object tracking
    global click_pos
    global is_selected
    selected_id = None

    # set video output writer
    counter = 0
    encode = 0x00000021
    output_video = cv2.VideoWriter(
        os.path.join(save_dir, f'result_{counter}.mp4'), encode, 5,
        (width, height), True)

    # start tracking
    for path, img, img0 in dataloader:
        if frame_id % 100 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
            output_video.release()
            # Call MP4Box to divide new mp4 file
            output_video = cv2.VideoWriter(
                os.path.join(save_dir, f'result_{counter}.mp4'), encode, 5,
                (width, height), True)
            counter += 1

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()

        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            # get visualization result and some control flags for selected object tracking
            online_im, click_pos, selected_id, is_selected = vis.plot_tracking(
                img0,
                online_tlwhs,
                online_ids,
                frame_id=frame_id,
                fps=1. / timer.average_time,
                selected_id=selected_id,
                click_pos=click_pos,
                is_selected=is_selected)
        if show_image:
            # bind mouse event linstener
            cv2.setMouseCallback("online_im", on_click)
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(
                os.path.join(save_dir, 'frame', '{:05d}.jpg'.format(frame_id)),
                online_im)
            output_video.write(online_im)
        frame_id += 1
    output_video.release()
    # save results
    write_results(result_filename, results, data_type)

    return frame_id, timer.average_time, timer.calls
Ejemplo n.º 14
0
def main(args):
    """main"""
    # Step 1: initialize ACL and ACL runtime 
    acl_resource = AclLiteResource()

    # 1.2: one line of code, call the 'init' function of the AclLiteResource object, to initilize ACL and ACL runtime 
    acl_resource.init()

    # Step 2: Load models 
    mot_model = AclLiteModel('../model/mot_v2.om')

    # Create output dir if not exist; default outputs
    result_root = args.output_root if args.output_root != '' else '.'
    mkdir_if_missing(result_root)

    video_name = os.path.basename(args.input_video).replace(' ', '_').split('.')[0]

    # setup dataloader, use LoadVideo or LoadImages
    dataloader = LoadVideo(args.input_video, (1088, 608))
    # result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    # dir for output images; default: outputs/'VideoFileName'
    save_dir = os.path.join(result_root, video_name)    

    mkdir_if_missing(save_dir)

    # initialize tracker
    tracker = JDETracker(args, mot_model, frame_rate=frame_rate)
    timer = Timer()
    results = []
    
    print("Results will be saved at {}".format(save_dir))
    # img:  h w c; 608 1088 3
    # img0: c h w; 3 608 1088
    for frame_id, (path, img, img0) in enumerate(dataloader):
        if frame_id % 20 == 0 and frame_id != 0:
            print('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking, start tracking timer 
        timer.tic()

        # list of Tracklet; see multitracker.STrack
        online_targets = tracker.update(np.array([img]), img0)

        # prepare for drawing, get all bbox and id
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > args.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()

        # draw bbox and id
        online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
                                        fps=1. / timer.average_time)
        cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)


    if args.output_type == 'video':
        output_video_path = os.path.join(result_root, os.path.basename(args.input_video).replace(' ', '_'))
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(save_dir, output_video_path)
        os.system(cmd_str)
Ejemplo n.º 15
0
Archivo: track.py Proyecto: QMUL/AVA
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             countFileName,
             save_dir=None,
             show_image=True,
             frame_rate=30):

    device = torch.device("cpu" if opt.cpu else "cuda")

    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    ids = set()
    if opt.le:
        skip_frames = 30

    for path, img, img0 in tqdm(dataloader):

        if opt.le and frame_id % skip_frames != 0:
            frame_id += 1
            continue

        if frame_id % opt.skip != 0:
            frame_id += 1
            continue

        #logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))

        # run trackering
        timer.tic()
        start = time.time()
        blob = torch.from_numpy(img).to(device).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                ids.add(tid)

        end = time.time()
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)

        ratio_x = float(dataloader.width) / img0.shape[1]
        ratio_y = float(dataloader.height) / img0.shape[0]
        if opt.AVA:
            ratio_y = 1080. / img0.shape[0]
            countFile = open(countFileName, 'a')
            txt = '{:.6f}'.format(end - start)

            for id, box in zip(online_ids, online_tlwhs):
                txt += ',{:.1f},{:.1f},{:.1f},{:.1f},-2,-2,-2,-2,{:d},-2,-2'.format(
                    box[0] * ratio_x, box[1] * ratio_y,
                    (box[0] + box[2]) * ratio_x, (box[1] + box[3]) * ratio_y,
                    id)
            txt += '\n'
            countFile.write(txt)
            countFile.close()

        if opt.video_path == 0:
            for id, box in zip(online_ids, online_tlwhs):
                cv2.rectangle(img0,
                              (int(box[0] * ratio_x), int(box[1] * ratio_y)),
                              (int((box[0] + box[2]) * ratio_x),
                               int((box[1] + box[3]) * ratio_y)), (0, 255, 0),
                              2)
            cv2.imshow('Live', img0)
            cv2.waitKey(1)

        frame_id += 1

    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Ejemplo n.º 16
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    '''
       Processes the video sequence given and provides the output of tracking result (write the results in video file)

       It uses JDE model for getting information about the online targets present.

       Parameters
       ----------
       opt : Namespace
             Contains information passed as commandline arguments.

       dataloader : LoadVideo
                    Instance of LoadVideo class used for fetching the image sequence and associated data.

       data_type : String
                   Type of dataset corresponding(similar) to the given video.

       result_filename : String
                         The name(path) of the file for storing results.

       save_dir : String
                  Path to the folder for storing the frames containing bounding box information (Result frames).

       show_image : bool
                    Option for shhowing individial frames during run-time.

       frame_rate : int
                    Frame-rate of the given video.

       Returns
       -------
       (Returns are not significant here)
       frame_id : int
                  Sequence number of the last sequence
       '''

    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Ejemplo n.º 17
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    '''
       Processes the video sequence given and provides the output of tracking result (write the results in video file)

       It uses JDE model for getting information about the online targets present.

       Parameters
       ----------
       opt : Namespace
             Contains information passed as commandline arguments.

       dataloader : LoadVideo
                    Instance of LoadVideo class used for fetching the image sequence and associated data.

       data_type : String
                   Type of dataset corresponding(similar) to the given video.

       result_filename : String
                         The name(path) of the file for storing results.

       save_dir : String
                  Path to the folder for storing the frames containing bounding box information (Result frames).

       show_image : bool
                    Option for shhowing individial frames during run-time.

       frame_rate : int
                    Frame-rate of the given video.

       Returns
       -------
       (Returns are not significant here)
       frame_id : int
                  Sequence number of the last sequence
       '''
    '''
    width = dataloader.vw
    height = dataloader.vh
    '''
    width = 640
    height = 480
    '''
    process = (
        ffmpeg
        #new added re
        #new added preset ultrafast (try different mode if not ok)
        .input('pipe:', format = 'rawvideo', pix_fmt = 'rgb24', s = '{}x{}'.format(width, height), re = None)
        #new added
        #.setpts('1.7*PTS')
        .output('../try.m3u8', format = 'hls', pix_fmt = 'yuv420p', vcodec = 'libx264', preset = "ultrafast", hls_time = 10, hls_list_size = 2, start_number = 0, hls_flags = 'delete_segments+append_list', hls_segment_filename = '../try_%05d.ts')
        .overwrite_output()
        .run_async(pipe_stdin = True)
    )
    '''
    track_id = 0
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    cv2.namedWindow('online_im')
    cv2.setMouseCallback('online_im', mouse_click)

    #ffmpeg process
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im, track_id = vis.plot_tracking(img0,
                                                    online_tlwhs,
                                                    online_ids,
                                                    frame_id=frame_id,
                                                    fps=1. /
                                                    timer.average_time,
                                                    single=single,
                                                    mouse_x=mouse_x,
                                                    mouse_y=mouse_y,
                                                    track_id=track_id)
        if show_image:
            pass
            #cv2.imshow('online_im', online_im)
            #cv2.waitKey(1)
            #plt.imshow(online_im)
            #plt.show()
        #online_im_rgb = cv2.cvtColor(online_im, cv2.COLOR_BGR2RGB)
        #write_frame(process, online_im_rgb)
        stream(online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    # close process
    #close_process(process)
    terminate_stream()
    return frame_id, timer.average_time, timer.calls