Exemple #1
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:
    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets, scores = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        frame_time = []
        timestamp = time.time()
        localTime = time.localtime(timestamp)
        strTime = time.strftime("%Y-%m-%d %H:%M:%S", localTime)

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                frame_time.append(strTime)
        timer.toc()
        # save results
        results.append(
            (frame_id + 1, online_tlwhs, online_ids, frame_time, scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=scores,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #3
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    if opt.save_var:
        import pickle
        filename = os.path.join(opt.save_var, '/MOT_result.pickle')
        with open(filename, 'wb') as f:
            pickle.dump(results, f)
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #4
0
def eval_seq(opt,
             data_path,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    tracker = JDETracker(opt)
    # timer = Timer()
    results = []
    frame_id = 0
    frame_nums = 60  #len(os.listdir(data_path))//2
    #np_res = []
    for _ in range(frame_nums):
        frame_id += 1
        dets = np.loadtxt(os.path.join(data_path,
                                       str(frame_id) + '.txt'),
                          dtype=np.float32,
                          delimiter=',')

        online_targets = tracker.update(dets)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            if tlwh[2] * tlwh[3] > opt.min_box_area and tlwh[2] / tlwh[3] < 1.6:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                # np_res.append([frame_id,tid,tlwh[0],tlwh[1],tlwh[2],tlwh[3],1,0])

        ## save results
        results.append((frame_id, online_tlwhs, online_ids))

        if show_image or save_dir is not None:
            if save_dir:
                mkdir_if_missing(save_dir)
            img = cv2.imread(os.path.join(data_path, str(frame_id) + '.jpg'))
            online_im = vis.plot_tracking(img,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id)
        # if show_image:
        #     cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
    # save results
    write_results(result_filename, results, data_type)
Exemple #5
0
    def eval(self, skip_frame=1, show_image=False):
        tracker = JDETracker(self.opt, frame_rate=self.frame_rate)
        timer = Timer()
        frame_id = 0

        for i, (path, img, img0) in enumerate(self.dataloader):
            if i % skip_frame != 0:
                continue
            if frame_id % 20 == 0:
                logger.info('Processing frame {} ({:.2f} fps)'.format(
                    frame_id, 1. / max(1e-5, timer.average_time)))

            # run tracking
            timer.tic()
            if self.use_cuda:
                blob = torch.from_numpy(img).cuda().unsqueeze(0)
            else:
                blob = torch.from_numpy(img).unsqueeze(0)
            online_targets = tracker.update(blob, img0)
            online_tlwhs = []
            online_ids = []
            online_scores = []
            for t in online_targets:
                tlwh = t.tlwh
                tid = t.track_id
                vertical = tlwh[2] / tlwh[3] > 1.6
                if tlwh[2] * tlwh[3] > self.opt.min_box_area and not vertical:
                    online_tlwhs.append(tlwh)
                    online_ids.append(tid)
                    online_scores.append(t.score)
            timer.toc()
            tmp_result = {
                "frame_id": frame_id + 1,
                "bounding_box": online_tlwhs,
                "ids": online_ids,
                "scores": online_scores
            }
            self.send_result(tmp_result, raw_img=img0)

            frame_id += 1
            if show_image:
                online_im = self.send_image(img0, online_tlwhs, online_ids,
                                            frame_id,
                                            1. / max(1e-5, timer.average_time))
                cv2.imshow('Result', online_im)
        if self.video_saver is not None:
            self.video_saver.release()
        return frame_id, timer.average_time, timer.calls
Exemple #6
0
def eval_seq(opt,
             dataloader,
             data_type,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    writer = VideoWriter(save_dir, dataloader)
    for path, img, img0 in dataloader:

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            # cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
            writer.write(online_im)
        frame_id += 1
    print("***************************** DONE *****************************")
def eval_seq(opt, save_dir='', frame_rate=30):

    dataloader = LoadImages(opt.multi_test_images_dataset)
    opt = opt.update_dataset_info_and_set_heads(opt, dataloader)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    fd = os.open(save_dir + '/args.txt', os.O_RDWR)
    for i, (path, img, img0) in enumerate(dataloader):

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            online_tlwhs.append(tlwh)
            online_ids.append(tid)
            centre = tlwh[:2] + tlwh[2:] / 2
            text = str(frame_id) + ' ' + str(tid) + ' ' + str(
                centre[1]) + ' ' + str(centre[0]) + '\n'
            os.write(fd, str.encode(text))
        timer.toc()
        # save results

        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                    online_im)
        frame_id += 1
    # save results
    os.close(fd)

    return frame_id, timer.average_time, timer.calls
Exemple #8
0
class Tracking:
    def __init__(self, args):
        self.args = args
        self.tracker = JDETracker(max_age=args.max_age, buffer_size=args.buffer_size, det_thresh=args.conf_thres,
                                  thresh1=args.tracker_thresh1, thresh2=args.tracker_thresh2, thresh3=args.tracker_thresh3)
        
        self.track_outputs = args.track_outputs
        self.track_results = []
        
        
    def run(self):
        df = pd.read_csv(self.args.detect_outputs, sep=' ')
        frames = sorted(df['frame_id'].unique())
        for frame_id in frames:
            frame_id = int(frame_id)
            records = df[df['frame_id']==frame_id]
            dets = records.values
            dets = dets[:, 1:]
            # det: columns = ['x1', 'y1', 'x2', 'y2', 'score', 'label', features]
            # features = dets[:, 6:]
            online_targets = self.tracker.update(dets[:, :6], dets[:, 6:])
            if len(online_targets) == 0:
                continue 
            tracks = [np.r_[frame_id, t.track_id, t.tlwh, t.score, t.label] for t in online_targets]
            tracks = np.array(tracks)
            self.track_results.append(tracks)
    
    def save(self):
        # must save
        self.track_results = np.concatenate(self.track_results, axis=0)
        columns = ['frame_id', 'track_id', 'x', 'y', 'w', 'h', 'score', 'label']
        df = pd.DataFrame(self.track_results, columns=columns)
        df.to_csv(self.track_outputs, index=False, sep=' ')
            
            
        
        
Exemple #9
0
def eval_seq_realtime(opt, save_dir=None, show_image=True, frame_rate=30):
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    frame_id = 0

    cv2.namedWindow('online_im', cv2.WINDOW_FREERATIO)
    cv2.setWindowProperty('online_im', cv2.WND_PROP_AUTOSIZE,
                          cv2.WND_PROP_AUTOSIZE)
    cap = cv2.VideoCapture(0)
    ret, im = cap.read()

    height = im.shape[0]
    width = im.shape[1]

    while True:
        ret, img0 = cap.read()

        # Padded resize
        img, _, _, _ = letterbox(img0, height=height, width=width)

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img /= 255.0

        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        text_scale = max(1, online_im.shape[1] / 1600.)
        text_thickness = 1 if text_scale > 1.1 else 1
        line_thickness = max(1, int(online_im.shape[1] / 500.))
        cv2.putText(online_im,
                    'Press ESC to STOP', (300, int(15 * text_scale)),
                    cv2.FONT_HERSHEY_PLAIN,
                    text_scale, (0, 0, 255),
                    thickness=2)
        cv2.imshow('online_im', online_im)
        key = cv2.waitKey(1)
        if key == 27:
            break
        frame_id += 1
    cv2.destroyAllWindows()
Exemple #10
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    '''
       Processes the video sequence given and provides the output of tracking result (write the results in video file)

       It uses JDE model for getting information about the online targets present.

       Parameters
       ----------
       opt : Namespace
             Contains information passed as commandline arguments.

       dataloader : LoadVideo
                    Instance of LoadVideo class used for fetching the image sequence and associated data.

       data_type : String
                   Type of dataset corresponding(similar) to the given video.

       result_filename : String
                         The name(path) of the file for storing results.

       save_dir : String
                  Path to the folder for storing the frames containing bounding box information (Result frames).

       show_image : bool
                    Option for shhowing individial frames during run-time.

       frame_rate : int
                    Frame-rate of the given video.

       Returns
       -------
       (Returns are not significant here)
       frame_id : int
                  Sequence number of the last sequence
       '''

    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #11
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    '''
    img:  Normalized RGB image
    img0: BGR image
    '''
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        # print("\n==> blob.size", blob.size()) 1, 3, 608, 1088
        '''
        tracker update
        '''
        online_targets = tracker.update(blob, img0)

        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        ''' 
        print("==> [track.eval_seq] tracker's output-> online_targets:", online_targets)
        try:
            print("==> [track.eval_seq] len(online_tlwhs):", len(online_tlwhs))
            print("==> [track.eval_seq] online_tlwhs[0]:", online_tlwhs[0])
            print("==> [track.eval_seq] online_ids[0]:", online_ids[0])
        except:
            pass
        
        partial output:
        ==> [multi-tracker.update] len(output_stracks): 5
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-13), OT_2_(1-13), OT_3_(1-13), OT_20_(10-13), OT_7_(2-13)]
        ==> [track.eval_seq] len(online_tlwhs): 5
        ==> [track.eval_seq] online_tlwhs[0]: [     802.38      163.64      24.074      57.376]
        ==> [track.eval_seq] online_ids[0]: 1
        
        ==> [multi-tracker.update] len(output_stracks): 7
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-14), OT_2_(1-14), OT_3_(1-14), OT_20_(10-14), OT_7_(2-14), OT_23_(13-14), OT_13_(4-14)]
        ==> [track.eval_seq] len(online_tlwhs): 7
        ==> [track.eval_seq] online_tlwhs[0]: [     809.96      163.69      25.305      60.319]
        ==> [track.eval_seq] online_ids[0]: 1
        
        ==> [multi-tracker.update] len(output_stracks): 7
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-15), OT_2_(1-15), OT_3_(1-15), OT_20_(10-15), OT_7_(2-15), OT_23_(13-15), OT_19_(10-15)]
        ==> [track.eval_seq] len(online_tlwhs): 7
        ==> [track.eval_seq] online_tlwhs[0]: [     818.46       164.4      26.832      63.971]
        ==> [track.eval_seq] online_ids[0]: 1
        '''

        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #12
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir='.',
             show_image=True,
             frame_rate=25):

    tracker = JDETracker(opt, frame_rate=frame_rate)
    p = path_root_index[5]
    if save_dir:
        save_dir = osp.join(save_dir, p)
        mkdir_if_missing(save_dir)
    image_path = getimage_path(path_root + p)
    timer = Timer()
    results = []
    frame_id = -1
    result_array_list = []
    result = []
    for path in image_path:

        # img=cv2.imread(path)

        img0 = cv2.imread(path)  # BGR
        # assert img0 is not None, 'Failed to load ' + img_path
        img_height = img0.shape[0]
        img_width = img0.shape[1]
        # print(img_height,img_width)
        # print(img0.shape)
        # Padded resize
        img, _, _, _ = letterbox(img0, height=608, width=1088)

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img /= 255.0

        frame_id += 1

        # if frame_id % 20 == 0:
        #     logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
        # if frame_id==2:
        #   break
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        online_cref = []
        # result_array_list=[]
        for t in online_targets:

            tlwh = t.tlwh

            tid = t.track_id
            confidence = t.score

            vertical = tlwh[2] / tlwh[3] > 1.6
            if confidence < 0.3:
                if tlwh[2] * tlwh[3] > 2700 and not vertical and tlwh[
                        2] * tlwh[3] < 100000:
                    res = [frame_id, tid]
                    res += list(tlwh)
                    res += [1, 0]
                    online_tlwhs.append(tlwh)
                    result_array_list.append(res)
                    online_cref.append(confidence)
                    # print(confidence)
                    # result_array_list.append(tlwh)
                    online_ids.append(tid)

            elif confidence >= 0.3:
                if tlwh[2] * tlwh[3] > 1000 and not vertical and tlwh[
                        2] * tlwh[3] < 100000:
                    # print(tlwh[2] * tlwh[3])
                    res = [frame_id, tid]
                    res += list(tlwh)
                    res += [1, 0]
                    online_tlwhs.append(tlwh)
                    result_array_list.append(res)
                    online_cref.append(confidence)
                    # print(confidence)
                    # result_array_list.append(tlwh)
                    online_ids.append(tid)
        # if frame_id==2:
        #   break
        timer.toc()
        # save results
        print(frame_id)
        # if result_array_list:

        online_tlwhs = np.array(online_tlwhs)
        online_ids = np.array(online_ids)
        online_cref = np.array(online_cref)
        # print(online_tlwhs)
        # print(online_tlwhs.shape)
        # print(online_ids.shape)
        # pick=non_max_suppression(online_tlwhs,0.7,online_cref)

        # online_tlwhsnms=online_tlwhs[pick]
        # online_idsnms=online_ids[pick]
        # online_crefnms=online_cref[pick]
        # result_array_list2=np.array(result_array_list).copy()[pick]
        # result+=list(result_array_list2)
        # print(result)

        # print(frame_id,online_idsnms)
        # result.append(online_tlwhsnms)
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=online_cref,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.waitKey(1)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
    res_array = np.array(result_array_list)
    # print(res_array)
    # print(res_array.shape)
    tmp_data1 = np.where(res_array[:, [2]] < 0,
                         res_array[:, [2]] + res_array[:, [4]], res_array[:,
                                                                          [4]])
    res_array[:, [4]] = tmp_data1
    tmp_data = np.where(res_array[:, [3]] < 0,
                        res_array[:, [3]] + res_array[:, [5]], res_array[:,
                                                                         [5]])
    res_array[:, [5]] = tmp_data
    res_array[:, [2, 3]] = np.maximum(res_array[:, [2, 3]], 0)
    # print(res_array)
    res_array = np.round(res_array, 0)
    # res_array=cutmorecord(res_array,img_width,img_height)
    # print(res_array)
    np.savetxt("{}.txt".format(p),
               res_array,
               fmt='%d,%d,%d,%d,%d,%d,%d,%d',
               delimiter=',')

    # save results
    # write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #13
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             bbox_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    if bbox_dir:
        mkdir_if_missing(bbox_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0

    for path, img0, detection in dataloader:
        if frame_id % 1 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        online_targets, detection_boxes = tracker.update(detection, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        #bbox detection plot
        box_tlbrs = []
        box_scores = []
        img_bbox = img0.copy()
        for box in detection_boxes:
            tlbr = box.tlbr
            tlwh = box.tlwh
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area:
                box_tlbrs.append(tlbr)
                box_scores.append(box.score)

        timer.toc()
        # save results
        results.append(
            ([frame_id + 1] * len(online_tlwhs), online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
            bbox_im = vis.plot_detections(img_bbox,
                                          box_tlbrs,
                                          scores=box_scores)
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.imshow('bbox_im', bbox_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
            cv2.imwrite(os.path.join(bbox_dir, '{:05d}.jpg'.format(frame_id)),
                        bbox_im)

        frame_id += 1
    # save results
    track_pools = []
    id_pools = []
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
    video_writer = cv2.VideoWriter(os.path.join(command_args.output_dir, os.path.splitext(os.path.basename(command_args.input_video))[0] + ".mp4"),
                                   cv2.VideoWriter_fourcc(*"mp4v"), float(frame_rate), (orig_width, orig_height))

    tracker = JDETracker(opt=command_args, frame_rate=frame_rate)

    video_info = {}
    frame_id = 0
    for path, img, img0, orig_img in dataloader:
        frame_info = {}
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            datum = op.Datum()
            tlwh = t.tlwh
            tid = t.track_id
            x, y, w, h = tlwh
            x = (orig_width / command_args.img_size[0]) * x
            y = (orig_height / command_args.img_size[1]) * y
            w = (orig_width / command_args.img_size[0]) * w
            h = (orig_height / command_args.img_size[1]) * h
            image_to_detected = orig_img[max(0, int(y - 0.2 * h)): min(int(y + h + 0.2 * h), orig_height - 1),
                                         max(0, int(x - 0.2 * w)): min(int(x + w + 0.2 * w), orig_width - 1)].copy()
            datum.cvInputData = image_to_detected
            opWrapper.emplaceAndPop(op.VectorDatum([datum]))
Exemple #15
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    '''
       Processes the video sequence given and provides the output of tracking result (write the results in video file)

       It uses JDE model for getting information about the online targets present.

       Parameters
       ----------
       opt : Namespace
             Contains information passed as commandline arguments.

       dataloader : LoadVideo
                    Instance of LoadVideo class used for fetching the image sequence and associated data.

       data_type : String
                   Type of dataset corresponding(similar) to the given video.

       result_filename : String
                         The name(path) of the file for storing results.

       save_dir : String
                  Path to the folder for storing the frames containing bounding box information (Result frames).

       show_image : bool
                    Option for shhowing individial frames during run-time.

       frame_rate : int
                    Frame-rate of the given video.

       Returns
       -------
       (Returns are not significant here)
       frame_id : int
                  Sequence number of the last sequence
       '''
    width, height = dataloader.w, dataloader.h
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0

    # for selected object tracking
    global click_pos
    global is_selected
    selected_id = None

    # set video output writer
    counter = 0
    encode = 0x00000021
    output_video = cv2.VideoWriter(
        os.path.join(save_dir, f'result_{counter}.mp4'), encode, 5,
        (width, height), True)

    # start tracking
    for path, img, img0 in dataloader:
        if frame_id % 100 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
            output_video.release()
            # Call MP4Box to divide new mp4 file
            output_video = cv2.VideoWriter(
                os.path.join(save_dir, f'result_{counter}.mp4'), encode, 5,
                (width, height), True)
            counter += 1

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()

        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            # get visualization result and some control flags for selected object tracking
            online_im, click_pos, selected_id, is_selected = vis.plot_tracking(
                img0,
                online_tlwhs,
                online_ids,
                frame_id=frame_id,
                fps=1. / timer.average_time,
                selected_id=selected_id,
                click_pos=click_pos,
                is_selected=is_selected)
        if show_image:
            # bind mouse event linstener
            cv2.setMouseCallback("online_im", on_click)
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(
                os.path.join(save_dir, 'frame', '{:05d}.jpg'.format(frame_id)),
                online_im)
            output_video.write(online_im)
        frame_id += 1
    output_video.release()
    # save results
    write_results(result_filename, results, data_type)

    return frame_id, timer.average_time, timer.calls
Exemple #16
0
class VideoTracker(object):
    def __init__(self, args, video_path):
        self.args = args
        self.video_path = video_path
        self.logger = get_logger("root")

        use_cuda = args.use_cuda and torch.cuda.is_available()
        if not use_cuda:
            warnings.warn("Running in cpu mode which maybe very slow!",
                          UserWarning)

        if args.display:
            cv2.namedWindow("test", cv2.WINDOW_NORMAL)
            cv2.resizeWindow("test", args.display_width, args.display_height)

        if args.rtsp != "":
            print("Using rtsp " + str(args.rtsp))
            self.vdo = cv2.VideoCapture(args.rtsp)
        elif args.cam != -1:
            print("Using webcam " + str(args.cam))
            self.vdo = cv2.VideoCapture(args.cam)
        else:
            self.vdo = cv2.VideoCapture()

        frame_rate = 30
        cfg_dict = parse_model_cfg(args.cfg)
        self.width, self.height = int(cfg_dict[0]['width']), int(
            cfg_dict[0]['height'])
        args.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]
        self.tracker = JDETracker(args, frame_rate=frame_rate)

        self.known_faces = {}

        self.faceUtils = FaceUtils()
        self.camUtils = CamUtils(args)
        self.tmp_imgs = []
        self.tmp_moves = []
        self.result_imgs = queue.Queue()
        self.is_running = True
        print("Loading Control Done.")

    def __enter__(self):
        if self.args.rtsp != "":
            ret, frame = self.vdo.read()
            assert ret, "Error: RTSP error"
            self.im_width = frame.shape[0]
            self.im_height = frame.shape[1]
        elif self.args.cam != -1:
            ret, frame = self.vdo.read()
            assert ret, "Error: Camera error"
            self.im_width = frame.shape[0]
            self.im_height = frame.shape[1]
        else:
            assert os.path.isfile(self.video_path), "Path error"
            self.vdo.open(self.video_path)
            self.im_width = int(self.vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
            self.im_height = int(self.vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
            assert self.vdo.isOpened()

        if self.args.save_path:
            os.makedirs(self.args.save_path, exist_ok=True)

            # path of saved video and results
            self.save_video_path = os.path.join(self.args.save_path,
                                                "results.avi")
            self.save_results_path = os.path.join(self.args.save_path,
                                                  "results.txt")

            # create video writer
            fourcc = cv2.VideoWriter_fourcc(*'MJPG')
            fps = int(self.vdo.get(cv2.CAP_PROP_FPS))
            self.writer = cv2.VideoWriter(self.save_video_path, fourcc, fps,
                                          (self.im_width, self.im_height))

            # logging
            self.logger.info("Save results to {}".format(self.args.save_path))

        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        if exc_type:
            print(exc_type, exc_value, exc_traceback)

    def read_thread(self):
        while True:
            time_start = time.time()
            # _, ori_im = self.vdo.retrieve()
            ret, ori_im = self.vdo.read()
            if not ret:
                if self.args.rtsp != "":
                    print("Using rtsp " + str(self.args.rtsp))
                    self.vdo = cv2.VideoCapture(self.args.rtsp)
                elif self.args.cam != -1:
                    print("Using webcam " + str(self.args.cam))
                    self.vdo = cv2.VideoCapture(self.args.cam)
                else:
                    self.vdo = cv2.VideoCapture()
                continue

            # ori_im = cv2.resize(ori_im, (800, 600))
            self.tmp_imgs.append(copy.deepcopy(ori_im))

            # print("read time: {}".format(time.time() - time_start))

    def jpeg_thread(self):
        mjpeg_server = MjpegServer(port=8080)
        print('MJPEG server started... 8080')
        while self.is_running:
            ori_im = self.result_imgs.get()
            mjpeg_server.send_img(ori_im)
        mjpeg_server.shutdown()

    def move_thread(self):
        while True:
            if len(self.tmp_moves) == 0:
                continue
            center, xywh = copy.deepcopy(self.tmp_moves[-1])
            self.tmp_moves = []
            self.camUtils.move_cam(center, xywh)

    def start_thread(self):
        thread_read = threading.Thread(target=self.read_thread, args=())
        thread_read.daemon = True
        thread_read.start()

        thread_jpeg = threading.Thread(target=self.jpeg_thread, args=())
        thread_jpeg.daemon = True
        thread_jpeg.start()

        thread_move = threading.Thread(target=self.move_thread, args=())
        thread_move.daemon = True
        thread_move.start()

    def run(self):
        results = []
        idx_frame = 0
        idx_frame1 = 0
        face_names = {}

        self.start_thread()

        timer = Timer()

        while True:
            # _, ori_im = self.vdo.retrieve()
            if self.args.rtsp != "" or self.args.cam != -1:
                if len(self.tmp_imgs) == 0:
                    continue
                ori_im = copy.deepcopy(self.tmp_imgs[-1])
                self.tmp_imgs = []
            else:
                ret, ori_im = self.vdo.read()

            idx_frame += 1
            if idx_frame % self.args.frame_interval:
                continue
            idx_frame1 += 1

            start = time.time()

            im = cv2.cvtColor(ori_im, cv2.COLOR_BGR2RGB)
            center = (int(ori_im.shape[1] / 2), int(ori_im.shape[0] / 2))
            # cv2.circle(ori_im, center, 2, (255,0,0), 0)

            img, _, _, _ = letterbox(ori_im,
                                     height=self.height,
                                     width=self.width)
            # Normalize RGB
            img = img[:, :, ::-1].transpose(2, 0, 1)
            img = np.ascontiguousarray(img, dtype=np.float32)
            img /= 255.0

            # run tracking
            timer.tic()

            time_start = time.time()
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
            print("input time: {}".format(time.time() - time_start))

            time_start = time.time()
            online_targets = self.tracker.update(blob, ori_im)
            print("tracker time: {}".format(time.time() - time_start))

            online_tlwhs = []
            online_ids = []
            for t in online_targets:
                tlwh = t.tlwh
                tid = t.track_id
                vertical = tlwh[2] / tlwh[3] > 1.6
                if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:
                    online_tlwhs.append(tlwh)
                    online_ids.append(tid)
            timer.toc()

            for tlwh, tid in zip(online_tlwhs, online_ids):
                face_name = face_names.get(tid)
                if face_name is None:
                    bb_xyxy = tlwh[0], tlwh[
                        1], tlwh[0] + tlwh[2], tlwh[1] + tlwh[3]
                    face_name = self.faceUtils.detect_face(im, bb_xyxy)
                    face_names[tid] = face_name
                print("{} {}".format(tid, face_names.get(tid)))

                if (idx_frame1 % self.args.move_skip
                    ) == 0 and face_names.get(tid) == self.args.obj:
                    print("move")
                    # self.tmp_moves.append((center, xywh))
                    self.camUtils.move_cam(center, tlwh)

            # save results
            results.append((idx_frame - 1, online_tlwhs, online_ids))

            ori_im = vis.plot_tracking(ori_im,
                                       online_tlwhs,
                                       online_ids,
                                       frame_id=idx_frame,
                                       fps=1. / timer.average_time)
            end = time.time()

            self.result_imgs.put_nowait(ori_im)

            # logging
            self.logger.info("time: {:.03f}s, fps: {:.03f}, detection numbers: {}, tracking numbers: {}" \
                             .format(end - start, 1 / (end - start), len(online_tlwhs), len(online_ids)))
Exemple #17
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename=None,
             save_dir=None,
             show_image=False,
             frame_rate=10,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    person_mot = PersonMOT()

    results = []
    frame_id = 0
    rflag = False

    # for path, img, img0 in dataloader:
    for i, (path, img, img0) in enumerate(dataloader):
        if i % 12 != 0:
            continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                # online_scores.append(t.score)
        timer.toc()

        # 发送atc帧信息
        rflag = atcs.send_info(opt.device_id, opt.url, online_ids,
                               online_tlwhs, opt.input_stream)

        if rflag and i % opt.check_setp == 0:
            person_mot.handle_crime(opt.device_id, opt.url, opt.trigger,
                                    online_ids, online_tlwhs, img0)

        # 是否渲染图像
        if show_image or save_dir:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
            results.append((frame_id + 1, online_tlwhs, online_ids))
            # 得分
            # results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))

        # 展示图像
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.waitKey(1)

        # 存储图像
        if save_dir:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1

    # save results
    if opt.save:
        write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #18
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             countFileName,
             save_dir=None,
             show_image=True,
             frame_rate=30):

    device = torch.device("cpu" if opt.cpu else "cuda")

    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    ids = set()
    if opt.le:
        skip_frames = 30

    for path, img, img0 in tqdm(dataloader):

        if opt.le and frame_id % skip_frames != 0:
            frame_id += 1
            continue

        if frame_id % opt.skip != 0:
            frame_id += 1
            continue

        #logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))

        # run trackering
        timer.tic()
        start = time.time()
        blob = torch.from_numpy(img).to(device).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                ids.add(tid)

        end = time.time()
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)

        ratio_x = float(dataloader.width) / img0.shape[1]
        ratio_y = float(dataloader.height) / img0.shape[0]
        if opt.AVA:
            ratio_y = 1080. / img0.shape[0]
            countFile = open(countFileName, 'a')
            txt = '{:.6f}'.format(end - start)

            for id, box in zip(online_ids, online_tlwhs):
                txt += ',{:.1f},{:.1f},{:.1f},{:.1f},-2,-2,-2,-2,{:d},-2,-2'.format(
                    box[0] * ratio_x, box[1] * ratio_y,
                    (box[0] + box[2]) * ratio_x, (box[1] + box[3]) * ratio_y,
                    id)
            txt += '\n'
            countFile.write(txt)
            countFile.close()

        if opt.video_path == 0:
            for id, box in zip(online_ids, online_tlwhs):
                cv2.rectangle(img0,
                              (int(box[0] * ratio_x), int(box[1] * ratio_y)),
                              (int((box[0] + box[2]) * ratio_x),
                               int((box[1] + box[3]) * ratio_y)), (0, 255, 0),
                              2)
            cv2.imshow('Live', img0)
            cv2.waitKey(1)

        frame_id += 1

    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #19
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True,
             gt_path=None):

    gt_results_dict = dict()
    if gt_path:
        if os.path.isfile(gt_path):
            with open(gt_path, 'r') as f:
                for line in f.readlines():
                    linelist = line.split(',')
                    if len(linelist) < 7:
                        continue
                    fid = int(linelist[0])
                    if fid < 1:
                        continue
                    gt_results_dict.setdefault(fid, list())

                    box_size = float(linelist[4]) * float(linelist[5])

                    score = 1

                    # if box_size > 7000:
                    # if box_size <= 7000 or box_size >= 15000:
                    # if box_size < 15000:
                    # continue

                    tlwh = tuple(map(float, linelist[2:6]))
                    target_id = (linelist[1])

                    gt_results_dict[fid].append((tlwh, target_id, score))

    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:

    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))

        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time,
                                          gt_box=gt_results_dict[frame_id + 1])
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #20
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    #verifica che sia presente la cartella di salvataggio e in caso non lo sia la crea
    if save_dir:
        mkdir_if_missing(save_dir)

    #crea il JDETracker che esegue le seguenti operazioni (che vengono trovate in src/lib/tracker/multitracker.py nella funzione init)
    #1) Verifica se il programma va eseguito con CPU o GPU
    #2) Crea il modello e lo valuta
    #3) Applica un Kalman Filter: e’ uno strumento per stimare lo stato di un sistema dinamico lineare perturbato da rumore, sulla base di misure (o osservazioni) linearmente dipendenti
    tracker = JDETracker(opt, frame_rate=frame_rate)
    #viene inizializzato il timer per monitorare il tempo di elaborazione
    timer = Timer()
    #inizializzazione array dei risultati
    results = []
    #identificatore del frame
    frame_id = 0
    #scorriamo gli elementi all'interno del dataloader che erano le immagini del database caricate nel main
    for path, img, img0 in dataloader:
        #visualizza il frame rate dopo 20 frame elaborati
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()

        #questa funzione è specifica per le GPU NVIDIA, infatti usa CUDA, si può sostituire con CPU nel caso si voglia eseguire il programma con CPU
        #crea una matrice multidimensionale di almeno 3 elementi per rappresentare l'immagine img

        blob = torch.from_numpy(img).cuda().unsqueeze(0)

        #viene applicata la funzione update del JDETracker che svolge le seguenti funzioni (src/lib/tracker/multitracker.py)
        #1) Vengono passati come parametri gli elementi blob e img0
        #2) Da entrambi si estraggono altezza e larghezza e vengono memorizzate in una variabile
        #3) viene creata una variabile c che memorizza un array di float che contiene come due elementi altezza e larghezza dimezzata di img0
        #4) viene creata una variabile s che memorizza il massimo fra max(float(inp_width) (blob) / float(inp_height) (blob) * height (img0), width (img0)) * 1.0
        #5) Network forward, get detections & embeddings: rileva gli oggetti e gli aggiunge a una pila di tracklet
        #6) Prima associazione con Embedding: Da Paper, il ruolo dell'embedding è quello di distinguere diversi tipi di oggetti
        #7) Seconda associazione con IoU
        #8) Inizializza nuovi Stracks
        #9) Aggiorna lo stato
        #10) Ritorna nella variabile il valore degli stracks attivi
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #vengono iterati i vari stracks
        for t in online_targets:

            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #21
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    len_all = len(dataloader)
    start_frame = int(len_all / 2)
    frame_id = int(len_all / 2)
    for i, (path, img, img0) in enumerate(dataloader):
        if i < start_frame:
            continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        img_pil = Image.open(path).convert('RGB')
        normalize = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        img_norm = transforms.Compose(
            [T.RandomResize([800], max_size=1333), normalize])
        img_norm = img_norm(img_pil)
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(img_norm.cuda().unsqueeze(0), img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Exemple #22
0
class Pipeline:
    def __init__(self, args):

        self.args = args
        self.num_classes = 4

        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        assert self.device == 'cuda', 'CUDA is not available'
        images_folder = os.path.join(args.images_root, args.videoname)
        self.dataset = Vehicle(images_folder,
                               model=args.model_type,
                               image_size_effdet=args.image_size_effdet)
        self.loader = DataLoader(self.dataset,
                                 batch_size=args.batch_size,
                                 shuffle=False,
                                 sampler=SequentialSampler(self.dataset),
                                 num_workers=args.num_workers,
                                 pin_memory=False,
                                 drop_last=False)

        self.detector = Detector(args.weight, args.conf_thres, args.iou_thres,
                                 args.model_type, args.image_size_effdet)

        self.tracker = JDETracker(max_age=args.max_age,
                                  buffer_size=args.buffer_size,
                                  det_thresh=args.conf_thres,
                                  thresh1=args.tracker_thresh1,
                                  thresh2=args.tracker_thresh2,
                                  thresh3=args.tracker_thresh3)
        self.track_features_type = args.track_features_type

        self.save_detects = args.save_detects
        self.detect_outputs = args.detect_outputs
        self.detect_results = []

        self.track_outputs = args.track_outputs
        self.track_results = []

    def run(self):
        pbar = tqdm(self.loader)

        i = 0
        for imgs, imgs0, frame_ids in pbar:
            preds = self.detector(imgs, imgs0)

            i += 1
            if self.args.test_docker and i == 5:
                break

            for xi, det in enumerate(preds):
                if det is None:
                    continue
                det = det.cpu().numpy()

                img = imgs0[xi].numpy()
                features = compute_track_features(self.track_features_type,
                                                  det, img)
                online_targets = self.tracker.update(det, features=features)
                if len(online_targets) > 0:
                    tracks = [
                        np.r_[int(frame_ids[xi]), t.track_id, t.tlwh, t.score,
                              t.label] for t in online_targets
                    ]
                    tracks = np.array(tracks)
                    self.track_results.append(tracks)

                # concate features
                det = np.c_[det[:, :6], features]
                frames = np.ones(
                    (det.shape[0], 1), dtype=np.int) * int(frame_ids[xi])
                res = np.c_[frames, det]
                self.detect_results.append(res)

    def save(self):
        # consider save this one, because too large
        if self.save_detects:
            self.detect_results = np.concatenate(self.detect_results, axis=0)
            features = [
                f"f{i}" for i in range(self.detect_results[:, 7:].shape[1])
            ]
            columns = ['frame_id', 'x1', 'y1', 'x2', 'y2', 'score', 'label']
            columns.extend(features)
            df = pd.DataFrame(self.detect_results, columns=columns)
            df.to_csv(self.detect_outputs, index=False, sep=' ')

        self.track_results = np.concatenate(self.track_results, axis=0)
        columns = [
            'frame_id', 'track_id', 'x', 'y', 'w', 'h', 'score', 'label'
        ]
        df = pd.DataFrame(self.track_results, columns=columns)
        df.to_csv(self.track_outputs, index=False, sep=' ')
Exemple #23
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    '''
       Processes the video sequence given and provides the output of tracking result (write the results in video file)

       It uses JDE model for getting information about the online targets present.

       Parameters
       ----------
       opt : Namespace
             Contains information passed as commandline arguments.

       dataloader : LoadVideo
                    Instance of LoadVideo class used for fetching the image sequence and associated data.

       data_type : String
                   Type of dataset corresponding(similar) to the given video.

       result_filename : String
                         The name(path) of the file for storing results.

       save_dir : String
                  Path to the folder for storing the frames containing bounding box information (Result frames).

       show_image : bool
                    Option for shhowing individial frames during run-time.

       frame_rate : int
                    Frame-rate of the given video.

       Returns
       -------
       (Returns are not significant here)
       frame_id : int
                  Sequence number of the last sequence
       '''
    '''
    width = dataloader.vw
    height = dataloader.vh
    '''
    width = 640
    height = 480
    '''
    process = (
        ffmpeg
        #new added re
        #new added preset ultrafast (try different mode if not ok)
        .input('pipe:', format = 'rawvideo', pix_fmt = 'rgb24', s = '{}x{}'.format(width, height), re = None)
        #new added
        #.setpts('1.7*PTS')
        .output('../try.m3u8', format = 'hls', pix_fmt = 'yuv420p', vcodec = 'libx264', preset = "ultrafast", hls_time = 10, hls_list_size = 2, start_number = 0, hls_flags = 'delete_segments+append_list', hls_segment_filename = '../try_%05d.ts')
        .overwrite_output()
        .run_async(pipe_stdin = True)
    )
    '''
    track_id = 0
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    cv2.namedWindow('online_im')
    cv2.setMouseCallback('online_im', mouse_click)

    #ffmpeg process
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im, track_id = vis.plot_tracking(img0,
                                                    online_tlwhs,
                                                    online_ids,
                                                    frame_id=frame_id,
                                                    fps=1. /
                                                    timer.average_time,
                                                    single=single,
                                                    mouse_x=mouse_x,
                                                    mouse_y=mouse_y,
                                                    track_id=track_id)
        if show_image:
            pass
            #cv2.imshow('online_im', online_im)
            #cv2.waitKey(1)
            #plt.imshow(online_im)
            #plt.show()
        #online_im_rgb = cv2.cvtColor(online_im, cv2.COLOR_BGR2RGB)
        #write_frame(process, online_im_rgb)
        stream(online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    # close process
    #close_process(process)
    terminate_stream()
    return frame_id, timer.average_time, timer.calls