示例#1
0
def run_mot_16(opt):
    list_vid = [
        'MOT16-02'
    ]  #,'MOT16-04','MOT16-05','MOT16-09','MOT16-10','MOT16-11','MOT16-13'
    for vid in list_vid:
        result_root = '../results/' + vid
        mkdir_if_missing(result_root)

        logger.info('Starting tracking...')
        out_size = (1920, 1080) if vid != 'MOT16-05' else (640, 480)
        dataloader = datasets.LoadImages(
            '../MOT16_Data/train/' + vid + '/img1',
            '../MOT16_Data/generate_detection/' + vid,
            out_size=out_size)
        result_filename = os.path.join(result_root, 'iou.txt')
        frame_rate = dataloader.frame_rate

        frame_dir = None if opt.output_format == 'text' else osp.join(
            result_root, 'frame')
        bbox_dir = None if opt.output_format == 'text' else osp.join(
            result_root, 'bbox_detection')
        eval_seq(opt,
                 dataloader,
                 'mot',
                 result_filename,
                 save_dir=frame_dir,
                 bbox_dir=bbox_dir,
                 show_image=False,
                 frame_rate=frame_rate)

        if opt.output_format == 'video':
            output_video_path = osp.join(result_root, 'result.mp4')
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
                osp.join(result_root, 'frame'), output_video_path)
            os.system(cmd_str)
示例#2
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             show_image=False,
             frame_rate=frame_rate,
             use_cuda=opt.gpus != [-1])

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, 'MOT16-03-results.mp4')
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
            osp.join(result_root, 'frame'), output_video_path)
        os.system(cmd_str)
def demo(opt, ROS_Img, frame_id):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    # dataloader = datasets.LoadVideo(input_file, opt.img_size)
    # print(init)
    img, img0 = preprocessing(ROS_Img)

    result_filename = os.path.join(result_root, 'results.txt')
    #frame_rate = dataloader.frame_rate
    frame_rate = 10

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    #frame_dir = "../demos/img_results"
    print(opt.output_root)
    eval_seq_single(
        opt,
        img,
        img0,
        tracker,
        results,
        frame_id,
        'mot',
        result_filename,
        save_dir=frame_dir,
        show_image=False,
        frame_rate=frame_rate,
    )
示例#4
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != "" else "."
    mkdir_if_missing(result_root)

    logger.info("Starting tracking...")
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, "results.txt")
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == "text" else osp.join(
        result_root, "frame")
    eval_seq(
        opt,
        dataloader,
        "mot",
        result_filename,
        save_dir=frame_dir,
        show_image=False,
        frame_rate=frame_rate,
    )

    if opt.output_format == "video":
        output_video_path = osp.join(result_root, "result.mp4")
        cmd_str = "ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}".format(
            osp.join(result_root, "frame"), output_video_path)
        os.system(cmd_str)
示例#5
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    print('OPT: {}'.format(opt.input_video))
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    try:
        eval_seq(opt,
                 dataloader,
                 'mot',
                 result_filename,
                 save_dir=frame_dir,
                 show_image=True,
                 frame_rate=frame_rate)
    except Exception as e:
        logger.info(e)

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, 'result.mp4')
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
            osp.join(result_root, 'frame'), output_video_path)
        os.system(cmd_str)
示例#6
0
文件: demo.py 项目: allenwu5/FairMOT
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')

    assert osp.exists(opt.input_video), f'{opt.input_video} does NOT exist !'
    file_name, file_ext = osp.splitext(opt.input_video)
    if file_ext in ['.mp4', 'avi']:
        dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    else:
        dataloader = datasets.LoadImages(opt.input_video, opt.img_size)
        dataloader.frame_rate = int(round(opt.frame_rate))
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             show_image=False,
             frame_rate=frame_rate,
             use_cuda=opt.gpus != [-1])

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, 'MOT16-03-results.mp4')
        frame_path = osp.join(result_root, 'frame')
        cmd_str = f'ffmpeg -framerate {opt.frame_rate} -f image2 -i {frame_path}/%05d.jpg -b 5000k -c:v mpeg4 {output_video_path}'
        os.system(cmd_str)
示例#7
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:
    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#8
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadImages(
        opt.input_path, opt.input_detection_path)  #, out_size = (640,480)
    result_filename = os.path.join(result_root, 'baseline.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    bbox_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'bbox_detection')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             bbox_dir=bbox_dir,
             show_image=False,
             frame_rate=frame_rate)

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, 'result.mp4')
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
            osp.join(result_root, 'frame'), output_video_path)
        os.system(cmd_str)
示例#9
0
文件: demo.py 项目: grossular/FairMOT
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)
    file_name = osp.basename(opt.input_video).split('.')[0].replace(' ', '_')
    model_name = osp.basename(opt.load_model).split('.')[0]
    base_name = f'{file_name}_{model_name}_{opt.conf_thres}'

    logger.info('Starting tracking...')
    logger.info(f'Working on: {opt.input_video}')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, f'{base_name}_results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, f'{file_name}-frames')
    eval_seq(opt,
             dataloader,
             'kitti',
             result_filename,
             save_dir=frame_dir,
             show_image=False,
             frame_rate=frame_rate)

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, f'{base_name}_result.mp4')
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
            osp.join(result_root, f'{file_name}-frames'), output_video_path)
        os.system(cmd_str)
示例#10
0
文件: demo.py 项目: isangu/FairMOT
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    # dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    # result_filename = os.path.join(result_root, 'results.txt')
    # frame_rate = dataloader.frame_rate

    eval_seq_realtime(opt)
示例#11
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'bdd'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
        # dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        dataloader = datasets.LoadImages(data_root, opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        gt_filename = os.path.join(result_root, '{}_gt.txt'.format(seq))
        frame_rate = 30
        # eval_det(opt, dataloader, data_type, result_filename,
        #          save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename, gt_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type, gt_filename)
        # accs.append(evaluator.eval_file(result_filename))
        for i in range(len(class_names_valid)):
            accs.append(evaluator.eval_file_sp(i, result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, names=class_names_valid, metrics=metrics)
    strsummary = mm.io.render_summary(
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names
    )
    print(strsummary)
    print('mMOTA:', summary['mota'].mean())
示例#12
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'
    print(opt)
    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
#        print(osp.join(data_root, seq, 'img1'))
        dataloader = datasets.LoadImages(osp.join(data_root, seq), opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        meta_info = None #open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        frame_rate = 15 #int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names
    )
    print(strsummary)
    Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
示例#13
0
def eval_det(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id < 302:
            frame_id += 1
            continue
        elif frame_id > 302:
            break
        else:
            print(frame_id)
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
            # if frame_id>20:
            #     break

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        dets = tracker.detect(blob, img0)
        # print(path, dets)
        tlbrs = []
        scores = []
        class_ids = []
        for det in dets:
            tlbrs.append(det[:4])
            scores.append(det[4])
            class_ids.append(int(det[5] - 1))
        # print(class_ids)
        if show_image or save_dir is not None:
            online_im = vis.plot_detections(img0,
                                            tlbrs,
                                            scores=None,
                                            ids=class_ids)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
示例#14
0
def eval_seq(opt,
             data_path,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    tracker = JDETracker(opt)
    # timer = Timer()
    results = []
    frame_id = 0
    frame_nums = 60  #len(os.listdir(data_path))//2
    #np_res = []
    for _ in range(frame_nums):
        frame_id += 1
        dets = np.loadtxt(os.path.join(data_path,
                                       str(frame_id) + '.txt'),
                          dtype=np.float32,
                          delimiter=',')

        online_targets = tracker.update(dets)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            if tlwh[2] * tlwh[3] > opt.min_box_area and tlwh[2] / tlwh[3] < 1.6:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                # np_res.append([frame_id,tid,tlwh[0],tlwh[1],tlwh[2],tlwh[3],1,0])

        ## save results
        results.append((frame_id, online_tlwhs, online_ids))

        if show_image or save_dir is not None:
            if save_dir:
                mkdir_if_missing(save_dir)
            img = cv2.imread(os.path.join(data_path, str(frame_id) + '.jpg'))
            online_im = vis.plot_tracking(img,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id)
        # if show_image:
        #     cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
    # save results
    write_results(result_filename, results, data_type)
示例#15
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    tracklist = os.listdir(opt.data_root)
    for tra in tracklist:
        data_path = os.path.join(opt.data_root, tra)
        result_filename = os.path.join(result_root, tra + '.txt')
        frame_dir = osp.join(result_root, tra)
        eval_seq(opt,
                 data_path,
                 'mot',
                 result_filename,
                 save_dir=frame_dir,
                 show_image=False)
        BaseTrack.clear_id()
示例#16
0
def demo(opt):
    paths = pickle.load(open(opt.paths_pkl, 'rb'))
    gpu_list = opt.custom_gpus.split(',')
    if opt.range[1] == -1:
        opt.range[1] = len(paths)
    tasks = paths[opt.range[0]:opt.range[1]]
    to_del_list = []
    for idx, path in enumerate(tasks):
        video_name = osp.splitext(osp.split(path)[1])[0]
        result_root = opt.output_root if opt.output_root != '' else '.'
        result_filename = os.path.join(result_root, '%s.txt' % video_name)
        if os.path.exists(result_filename):
            to_del_list.append(idx)
    deleted_tasks = [
        tasks[idx] for idx in range(len(tasks)) if idx not in to_del_list
    ]
    opt.range[0] = 0
    opt.range[1] = len(deleted_tasks)
    if len(gpu_list) > 1:
        pickle.dump(deleted_tasks, open('cache.pkl', 'wb'))
        opt.paths_pkl = 'cache.pkl'
        multiproc(opt, gpu_list, len(deleted_tasks))
    else:
        for idx, path in enumerate(deleted_tasks):
            video_name = osp.splitext(osp.split(path)[1])[0]
            result_root = opt.output_root if opt.output_root != '' else '.'
            mkdir_if_missing(result_root)
            result_filename = os.path.join(result_root, '%s.txt' % video_name)
            if os.path.exists(result_filename):
                continue

            # logger.info('Starting tracking...')
            # dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
            dataloader = datasets.LoadVideo(path, opt.img_size)
            frame_rate = dataloader.frame_rate
            frame_dir = None if opt.output_format == 'text' else osp.join(
                result_root, 'frame')
            eval_seq(opt,
                     dataloader,
                     'mot',
                     result_filename,
                     save_dir=frame_dir,
                     show_image=False,
                     frame_rate=frame_rate,
                     tasks_num=len(deleted_tasks),
                     idx=idx)
示例#17
0
def recogniton():
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)
    print("start tracking")
    dataloader = datasets.LoadVideo(0, opt.img_size)
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else os.path.join(
        result_root, 'frame')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             show_image=True,
             frame_rate=frame_rate)
示例#18
0
def main(opt,
         data_root='/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'bdd'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name,
                                  seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
        # dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        dataloader = datasets.LoadImages(data_root, opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        # meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        # frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
        frame_rate = 30
        # eval_det(opt, dataloader, data_type, result_filename,
        #          save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        nf, ta, tc = eval_seq(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=output_dir,
                              show_image=show_image,
                              frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
        all_time, 1.0 / avg_time))
示例#19
0
文件: demo.py 项目: pxthanh98/demxe
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             show_image=False,
             frame_rate=frame_rate)
示例#20
0
def eval_seq(opt,
             dataloader,
             data_type,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    writer = VideoWriter(save_dir, dataloader)
    for path, img, img0 in dataloader:

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            # cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
            writer.write(online_im)
        frame_id += 1
    print("***************************** DONE *****************************")
示例#21
0
def recogniton(opt):

    print("start tracking")
    if opt.input_stream == '':
        dataloader = datasets.LoadVideo(opt.input_camera, opt.img_size)
    else:
        dataloader = datasets.LoadVideo(opt.input_stream, opt.img_size)

    frame_rate = dataloader.frame_rate

    if opt.save:
        result_root = opt.output_root if opt.output_root != '' else '.'
        mkdir_if_missing(result_root)
        result_filename = os.path.join(result_root, 'results.txt')
        frame_dir = None if opt.output_format == 'text' else os.path.join(
            result_root, 'frame')
        eval_seq(opt, dataloader, 'mot', result_filename,
                 save_dir=frame_dir, show_image=opt.show, frame_rate=frame_rate,
                 use_cuda=opt.gpus != [-1])
    else:
        eval_seq(opt, dataloader, 'mot', show_image=opt.show,
                 frame_rate=frame_rate, use_cuda=opt.gpus != [-1])
示例#22
0
def demo(opt, polygon1, polygon2, prepath=None, cam_id=None):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    polygon, paths = load_zone_anno(opt.input_meta)
    if prepath is not None:
        paths = prepath
    polygon = np.int32(polygon1)
    #line1,line2=[polygon[4],polygon[3]],[polygon[1],polygon[2]]
    polygon2, _ = np.int32(polygon2), None
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    print(cam_id)
    frame_tracking_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame_tracking')
    bbox_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'bbox_detection')
    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame_dir')

    eval_seq(opt,
             dataloader,
             polygon,
             paths,
             'mot',
             result_filename,
             frame_dir=frame_dir,
             save_dir=frame_tracking_dir,
             bbox_dir=bbox_dir,
             show_image=False,
             frame_rate=frame_rate,
             polygon2=polygon2,
             line1=None,
             line2=None,
             cam_id=cam_id)
示例#23
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    # result_root = os.path.join(data_root, '..', 'results', exp_name)
    result_root = os.path.join('/home/liujierui/proj/FairMOT-master/outputs', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join('/home/liujierui/proj/FairMOT-master/outputs', exp_name, seq) if save_images or save_videos else None
        video_dir = os.path.join('/home/liujierui/proj/FairMOT-master/outputs', exp_name) if save_images or save_videos else None
        mkdir_if_missing(output_dir)
        mkdir_if_missing(video_dir)
            
        
        logger.info('start seq: {}'.format(seq))
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        # meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        # frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
        frame_rate = 15
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # # eval
        # logger.info('Evaluate seq: {}'.format(seq))
        # evaluator = Evaluator(data_root, seq, data_type)
        # accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            # output_video_path = osp.join(video_dir, '{}.mp4'.format(seq))
            # cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            # # cmd_str = 'ffmpeg -f image2 -i {}/%d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            # os.system(cmd_str)
            path=output_dir
            
            # path=join(self.video_path,str(id_video),'img1')
            filelist_0 = os.listdir(path) #获取该目录下的所有文件名
            filelist=[str(i) for i in range(1,len(filelist_0)+1)]   

            avi_name = join(video_dir,seq + ".avi")#导出路径

            item = path + '/1.jpg' 
            ori_im = cv2.imread(item)
            video = cv2.VideoWriter(avi_name, cv2.VideoWriter_fourcc('M','J','P','G'), 15, (ori_im .shape[1],ori_im.shape[0]))
            for id_img in filelist:
                item = path + '/' + id_img+'.jpg' 
                ori_im = cv2.imread(item)
                video.write(ori_im) 
            video.release()
    
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
示例#24
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    #verifica che sia presente la cartella di salvataggio e in caso non lo sia la crea
    if save_dir:
        mkdir_if_missing(save_dir)

    #crea il JDETracker che esegue le seguenti operazioni (che vengono trovate in src/lib/tracker/multitracker.py nella funzione init)
    #1) Verifica se il programma va eseguito con CPU o GPU
    #2) Crea il modello e lo valuta
    #3) Applica un Kalman Filter: e’ uno strumento per stimare lo stato di un sistema dinamico lineare perturbato da rumore, sulla base di misure (o osservazioni) linearmente dipendenti
    tracker = JDETracker(opt, frame_rate=frame_rate)
    #viene inizializzato il timer per monitorare il tempo di elaborazione
    timer = Timer()
    #inizializzazione array dei risultati
    results = []
    #identificatore del frame
    frame_id = 0
    #scorriamo gli elementi all'interno del dataloader che erano le immagini del database caricate nel main
    for path, img, img0 in dataloader:
        #visualizza il frame rate dopo 20 frame elaborati
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()

        #questa funzione è specifica per le GPU NVIDIA, infatti usa CUDA, si può sostituire con CPU nel caso si voglia eseguire il programma con CPU
        #crea una matrice multidimensionale di almeno 3 elementi per rappresentare l'immagine img

        blob = torch.from_numpy(img).cuda().unsqueeze(0)

        #viene applicata la funzione update del JDETracker che svolge le seguenti funzioni (src/lib/tracker/multitracker.py)
        #1) Vengono passati come parametri gli elementi blob e img0
        #2) Da entrambi si estraggono altezza e larghezza e vengono memorizzate in una variabile
        #3) viene creata una variabile c che memorizza un array di float che contiene come due elementi altezza e larghezza dimezzata di img0
        #4) viene creata una variabile s che memorizza il massimo fra max(float(inp_width) (blob) / float(inp_height) (blob) * height (img0), width (img0)) * 1.0
        #5) Network forward, get detections & embeddings: rileva gli oggetti e gli aggiunge a una pila di tracklet
        #6) Prima associazione con Embedding: Da Paper, il ruolo dell'embedding è quello di distinguere diversi tipi di oggetti
        #7) Seconda associazione con IoU
        #8) Inizializza nuovi Stracks
        #9) Aggiorna lo stato
        #10) Ritorna nella variabile il valore degli stracks attivi
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #vengono iterati i vari stracks
        for t in online_targets:

            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#25
0
def main(opt,
         data_root='/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []

    #scorre tutti gli elementi della sequenza di frame previsti dal MOT passato come parametro
    for seq in seqs:
        #stabilisce la cartella in cui dovrà salvare gli output elaborati
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name,
                                  seq) if save_images or save_videos else None
        #messaggio che viene restituito all'utente per aggiornarlo sull'andamento dell'esecuzione
        logger.info('start seq: {}'.format(seq))
        #carica l'immagine contenuta all'interno del dataset passato come parametro da seq
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'),
                                         opt.img_size)
        #Il file testuale con il risultato dell'elaborazione è salvato in result_filename
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        #legge un file per seguire le indicazioni
        meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        #qui avviene il calcolo del frame rate, per comprendere quanti frame fa nell'unità di tempo
        frame_rate = int(meta_info[meta_info.find('frameRate') +
                                   10:meta_info.find('\nseqLength')])
        #esecuzione di eval_seq
        nf, ta, tc = eval_seq(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=output_dir,
                              show_image=show_image,
                              frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(
                output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
        all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(
        summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
示例#26
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:
    out_queue = Deque(maxlen=6)
    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        time_clean()
        time_sync('tracker all')
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        with torch.no_grad():
            time_sync('forward_half and queue')
            out = tracker.model.backend.forward_half(
                blob, roi_num=tracker.model.backend.roi_top_k)
            out_ref = {}
            out_ref.update(out)
            out_ref['rois'], _ = tracker.model.backend.get_rois(
                out['rpn_map'], 300)
            refs = tracker.model.backend.forward_rois(out_ref)
            out_queue.append(refs)
            while len(out_queue) < out_queue.maxlen:
                out_queue.append(refs)
            time_sync('forward_half and queue')
            time_sync('forward_all')
            output, stuffs, _ = tracker.model.forward_half(out, out_queue)
            time_sync('forward_all')
        # info_debug(output)
        # jj = output['rois'][0] % out['rpn_map'].shape[-1]
        # ii = output['rois'][0] // out['rpn_map'].shape[-1]
        # u = output['rpn_map'][:, :, ii, jj]
        # print(output['hm'][:20])
        # input()

        # import pickle
        # print(u.shape)
        # print(u.flatten().sigmoid())
        # print((u.flatten().sigmoid() - output['hm'].flatten().sigmoid()).abs().max())
        # with open('two_none.pkll', 'wb') as fd:
        #     pickle.dump(output, fd)
        # input()
        time_sync('tracker update')
        online_targets = tracker.update(blob, img0, output)
        time_sync('tracker update')
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        time_sync('tracker all')
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    import pickle
    with open(result_filename + '.dets.pkl', 'wb') as fd:
        pickle.dump(tracker.raw_dets, fd)
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#27
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             conf_thres=0.3):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = GNNTracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for i, (path, img, img0, p_img_path, p_img) in enumerate(dataloader):
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        if i == 0:
            p_boxes, init_img_path, p_img = dataloader.initialize(
                use_letter_box=opt.use_letter_box)
        else:
            p_boxes, p_img = prepare_prev_img(dataloader, online_targets, opt,
                                              p_img)

        if opt.use_roi_align:
            p_crops = p_boxes.clone()
            _, h, w = p_img.shape
            p_crops = p_crops.cuda()
            p_crops_lengths = [len(p_crops)]
            edge_index = create_inference_time_graph(opt, p_boxes, p_crops,
                                                     p_img)

            # convert boxes from xyxy to normalized according to p_img dimensions
            p_crops[:, 0] = p_crops[:, 0] / w
            p_crops[:, 1] = p_crops[:, 1] / h
            p_crops[:, 2] = p_crops[:, 2] / w
            p_crops[:, 3] = p_crops[:, 3] / h
            online_targets = tracker.update(
                blob,
                img0,
                p_crops,
                p_crops_lengths,
                edge_index,
                gnn_output_layer=opt.inference_gnn_output_layer,
                p_imgs=p_img.unsqueeze(0).cuda(),
                conf_thres=conf_thres)
        else:
            p_crops = torchvision.ops.roi_align(input=p_img.unsqueeze(0),
                                                boxes=[p_boxes],
                                                output_size=opt.crop_size)
            p_crops = p_crops.cuda()
            p_crops_lengths = [len(p_crops)]

            edge_index = create_inference_time_graph(opt, p_boxes, p_crops,
                                                     p_img)

            online_targets = tracker.update(
                blob,
                img0,
                p_crops,
                p_crops_lengths,
                edge_index,
                gnn_output_layer=opt.inference_gnn_output_layer,
                p_imgs=None,
                conf_thres=conf_thres)
        online_tlwhs = []
        online_ids = []
        online_confs = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            t_conf = t.score
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                online_confs.append(t_conf)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids, online_confs))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=online_confs,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#28
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir='.',
             show_image=True,
             frame_rate=25):

    tracker = JDETracker(opt, frame_rate=frame_rate)
    p = path_root_index[5]
    if save_dir:
        save_dir = osp.join(save_dir, p)
        mkdir_if_missing(save_dir)
    image_path = getimage_path(path_root + p)
    timer = Timer()
    results = []
    frame_id = -1
    result_array_list = []
    result = []
    for path in image_path:

        # img=cv2.imread(path)

        img0 = cv2.imread(path)  # BGR
        # assert img0 is not None, 'Failed to load ' + img_path
        img_height = img0.shape[0]
        img_width = img0.shape[1]
        # print(img_height,img_width)
        # print(img0.shape)
        # Padded resize
        img, _, _, _ = letterbox(img0, height=608, width=1088)

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img /= 255.0

        frame_id += 1

        # if frame_id % 20 == 0:
        #     logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
        # if frame_id==2:
        #   break
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        online_cref = []
        # result_array_list=[]
        for t in online_targets:

            tlwh = t.tlwh

            tid = t.track_id
            confidence = t.score

            vertical = tlwh[2] / tlwh[3] > 1.6
            if confidence < 0.3:
                if tlwh[2] * tlwh[3] > 2700 and not vertical and tlwh[
                        2] * tlwh[3] < 100000:
                    res = [frame_id, tid]
                    res += list(tlwh)
                    res += [1, 0]
                    online_tlwhs.append(tlwh)
                    result_array_list.append(res)
                    online_cref.append(confidence)
                    # print(confidence)
                    # result_array_list.append(tlwh)
                    online_ids.append(tid)

            elif confidence >= 0.3:
                if tlwh[2] * tlwh[3] > 1000 and not vertical and tlwh[
                        2] * tlwh[3] < 100000:
                    # print(tlwh[2] * tlwh[3])
                    res = [frame_id, tid]
                    res += list(tlwh)
                    res += [1, 0]
                    online_tlwhs.append(tlwh)
                    result_array_list.append(res)
                    online_cref.append(confidence)
                    # print(confidence)
                    # result_array_list.append(tlwh)
                    online_ids.append(tid)
        # if frame_id==2:
        #   break
        timer.toc()
        # save results
        print(frame_id)
        # if result_array_list:

        online_tlwhs = np.array(online_tlwhs)
        online_ids = np.array(online_ids)
        online_cref = np.array(online_cref)
        # print(online_tlwhs)
        # print(online_tlwhs.shape)
        # print(online_ids.shape)
        # pick=non_max_suppression(online_tlwhs,0.7,online_cref)

        # online_tlwhsnms=online_tlwhs[pick]
        # online_idsnms=online_ids[pick]
        # online_crefnms=online_cref[pick]
        # result_array_list2=np.array(result_array_list).copy()[pick]
        # result+=list(result_array_list2)
        # print(result)

        # print(frame_id,online_idsnms)
        # result.append(online_tlwhsnms)
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=online_cref,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.waitKey(1)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
    res_array = np.array(result_array_list)
    # print(res_array)
    # print(res_array.shape)
    tmp_data1 = np.where(res_array[:, [2]] < 0,
                         res_array[:, [2]] + res_array[:, [4]], res_array[:,
                                                                          [4]])
    res_array[:, [4]] = tmp_data1
    tmp_data = np.where(res_array[:, [3]] < 0,
                        res_array[:, [3]] + res_array[:, [5]], res_array[:,
                                                                         [5]])
    res_array[:, [5]] = tmp_data
    res_array[:, [2, 3]] = np.maximum(res_array[:, [2, 3]], 0)
    # print(res_array)
    res_array = np.round(res_array, 0)
    # res_array=cutmorecord(res_array,img_width,img_height)
    # print(res_array)
    np.savetxt("{}.txt".format(p),
               res_array,
               fmt='%d,%d,%d,%d,%d,%d,%d,%d',
               delimiter=',')

    # save results
    # write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#29
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    tid_max, tid_temp = 1, 1
    for path, img, img0 in dataloader:
        if frame_id < 300:
            frame_id += 1
            continue
        elif frame_id > 302:
            break
        else:
            print(frame_id)
        filename = path.split('/')[-1]
        if '0000001' in path:
            tid_max = tid_temp + 1
            print(path, tid_max)
            tracker = JDETracker(opt, frame_rate=frame_rate)

        if frame_id % 100 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # if frame_id >20:
        #     break
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update_sep3(
            blob,
            img0,
            conf_thres=[0.4, 0.3, 0.4, 0.4, 0.4, 0.4, 0.4, 0.5, 0.5])
        # print(online_targets)
        online_tlwhs = []
        online_ids = []
        online_cids = []  #class id
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id + tid_max
            tcid = t.class_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area:  # and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                online_cids.append(tcid)
            tid_temp = max(tid, tid_temp)
        timer.toc()
        # save results
        results.append((filename, online_tlwhs, online_ids, online_cids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#30
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    '''
    img:  Normalized RGB image
    img0: BGR image
    '''
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        # print("\n==> blob.size", blob.size()) 1, 3, 608, 1088
        '''
        tracker update
        '''
        online_targets = tracker.update(blob, img0)

        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        ''' 
        print("==> [track.eval_seq] tracker's output-> online_targets:", online_targets)
        try:
            print("==> [track.eval_seq] len(online_tlwhs):", len(online_tlwhs))
            print("==> [track.eval_seq] online_tlwhs[0]:", online_tlwhs[0])
            print("==> [track.eval_seq] online_ids[0]:", online_ids[0])
        except:
            pass
        
        partial output:
        ==> [multi-tracker.update] len(output_stracks): 5
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-13), OT_2_(1-13), OT_3_(1-13), OT_20_(10-13), OT_7_(2-13)]
        ==> [track.eval_seq] len(online_tlwhs): 5
        ==> [track.eval_seq] online_tlwhs[0]: [     802.38      163.64      24.074      57.376]
        ==> [track.eval_seq] online_ids[0]: 1
        
        ==> [multi-tracker.update] len(output_stracks): 7
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-14), OT_2_(1-14), OT_3_(1-14), OT_20_(10-14), OT_7_(2-14), OT_23_(13-14), OT_13_(4-14)]
        ==> [track.eval_seq] len(online_tlwhs): 7
        ==> [track.eval_seq] online_tlwhs[0]: [     809.96      163.69      25.305      60.319]
        ==> [track.eval_seq] online_ids[0]: 1
        
        ==> [multi-tracker.update] len(output_stracks): 7
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-15), OT_2_(1-15), OT_3_(1-15), OT_20_(10-15), OT_7_(2-15), OT_23_(13-15), OT_19_(10-15)]
        ==> [track.eval_seq] len(online_tlwhs): 7
        ==> [track.eval_seq] online_tlwhs[0]: [     818.46       164.4      26.832      63.971]
        ==> [track.eval_seq] online_ids[0]: 1
        '''

        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls