Esempio n. 1
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    data_type = 'mot'
    result_root = '/home/liujierui/proj/deep_sort_pytorch-master/demo/A-track/ensemble_MOT17_0.75'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        
    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names
    )
    print(strsummary)
Esempio n. 2
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'bdd'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
        # dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        dataloader = datasets.LoadImages(data_root, opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        gt_filename = os.path.join(result_root, '{}_gt.txt'.format(seq))
        frame_rate = 30
        # eval_det(opt, dataloader, data_type, result_filename,
        #          save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename, gt_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type, gt_filename)
        # accs.append(evaluator.eval_file(result_filename))
        for i in range(len(class_names_valid)):
            accs.append(evaluator.eval_file_sp(i, result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, names=class_names_valid, metrics=metrics)
    strsummary = mm.io.render_summary(
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names
    )
    print(strsummary)
    print('mMOTA:', summary['mota'].mean())
Esempio n. 3
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'
    print(opt)
    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
#        print(osp.join(data_root, seq, 'img1'))
        dataloader = datasets.LoadImages(osp.join(data_root, seq), opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        meta_info = None #open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        frame_rate = 15 #int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names
    )
    print(strsummary)
    Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
def main(data_root='', seqs=('', ), args=""):
    logger = get_logger()
    logger.setLevel(logging.INFO)
    data_type = 'mot'
    result_root = os.path.join(Path(data_root), "mot_results")
    #print("RESULT ROOT:",result_root)
    mkdir_if_missing(result_root)

    cfg = get_config()
    #cfg.merge_from_file(args.config_detection)
    cfg.merge_from_file(args.config_deepsort)

    fcos = FCOSEvaluator("weights/FCOS_imprv_R_50_FPN_1x.pth",
                         config="./configs/fcos/fcos_imprv_R_50_FPN_1x.yaml")
    #print("fcos.config",fcos.config)
    fcos.prepare_detector()  #build_detector(cfg, use_cuda=use_cuda)

    # run tracking
    accs = []
    for seq in seqs:
        logger.info('start seq: {}'.format(seq))
        result_filename = "./tracking_output/results.txt"  #os.path.join(result_root, '{}.txt'.format(seq))
        video_path = data_root + "/" + seq + "/" + seq + ".mp4"
        print(video_path)
        with VideoTracker(cfg, args, video_path, fcos=fcos) as vdo_trk:
            vdo_trk.run()

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(summary,
                           os.path.join(result_root, 'summary_global.xlsx'))
Esempio n. 5
0
def measure2files(results_root, data_root, seqs):
    """

    @param results_root:        mot轨迹预测结果文件的根目录,文件中格式 <frame>, <id>, <bb_left>, <bb_top>, <bb_width>, <bb_height>, <conf>, <x>, <y>, <z>
    @param data_root:           gt文件的路径,不包含后三级,因为在Evaluator初始化函数中已经写了全路径的拼接方式
    @param seqs:                gt路径的倒数第三级路径
    @return:                    存储seqs中,每一个路径下track结果的评价指标,以及全部文件夹汇总后的指标
    """

    data_type = 'mot'
    result_root = "/home/shuai.li/code/FairMOT/MOT15/images/results/temp/"
    exp_name = "test_evalMot15"

    accs = []
    # eval
    for seq in tqdm(seqs):

        result_filename = osp.join(results_root, seq) + '.txt'
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(
            data_root, seq,
            data_type)  # 在初始化中,根据data_root, seq自动加载ground truth数据
        accs.append(evaluator.eval_file(
            result_filename))  # 在读取存储的检测结果,并进行计算,一帧对应一个acc对象
        # if save_videos:
        #     output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
        #     cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
        #     os.system(cmd_str)

    # get summary
    metrics = mm.metrics.motchallenge_metrics  # 18个评价指标
    mh = mm.metrics.create()  # 创建指标计算工厂,后续传入相关数据即可给出指标
    summary = Evaluator.get_summary(
        accs, seqs, metrics)  # 计算MOT challenge中的指标,参数:、eval的帧序列名称、指标序列
    strsummary = mm.io.render_summary(  # 将eval指标进行字符串格式化,用于在console上面的显示
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names)
    print(strsummary)  # 显示在命令行中
    Evaluator.save_summary(
        summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
Esempio n. 6
0
def main(opt,
         data_root='/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []

    #scorre tutti gli elementi della sequenza di frame previsti dal MOT passato come parametro
    for seq in seqs:
        #stabilisce la cartella in cui dovrà salvare gli output elaborati
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name,
                                  seq) if save_images or save_videos else None
        #messaggio che viene restituito all'utente per aggiornarlo sull'andamento dell'esecuzione
        logger.info('start seq: {}'.format(seq))
        #carica l'immagine contenuta all'interno del dataset passato come parametro da seq
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'),
                                         opt.img_size)
        #Il file testuale con il risultato dell'elaborazione è salvato in result_filename
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        #legge un file per seguire le indicazioni
        meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        #qui avviene il calcolo del frame rate, per comprendere quanti frame fa nell'unità di tempo
        frame_rate = int(meta_info[meta_info.find('frameRate') +
                                   10:meta_info.find('\nseqLength')])
        #esecuzione di eval_seq
        nf, ta, tc = eval_seq(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=output_dir,
                              show_image=show_image,
                              frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(
                output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
        all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(
        summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
Esempio n. 7
0
def visualize_fp_fns_seq(result_dir,
                         dataroot,
                         seqs,
                         return_fp=True,
                         return_fn=True):
    from tracking_utils.evaluation import Evaluator
    accs = []
    fp_info = []
    tp_info = []
    for seq in seqs:
        img_dir = osp.join(dataroot, seq, 'img1')
        output_dir = osp.join(f"{result_dir.replace('results', 'outputs')}",
                              'analyze', seq)
        os.makedirs(output_dir, exist_ok=True)
        evaluator = Evaluator(data_root=dataroot,
                              seq_name=seq,
                              data_type='mot',
                              return_fp=return_fp,
                              return_fn=return_fn)

        if return_fp and return_fn:
            acc, fns, fps, matched, fp_confs = evaluator.eval_file(
                osp.join(result_dir, f"{seq}.txt"))
            accs.append(acc)

            # draw fp and fn
            for frame_id in fps.keys():

                fp = fps[frame_id]
                fp_conf = fp_confs[frame_id]
                fp[:, 2:] += fp[:, :2]
                fn = fns[frame_id]
                fn[:, 2:] += fn[:, :2]
                match = matched[frame_id]
                match['gt'][:, 2:] += match['gt'][:, :2]
                match['trk'][:, 2:] += match['trk'][:, :2]

                im = cv2.imread(osp.join(img_dir, f"{frame_id:06d}.jpg"))
                for box, conf in zip(fp, fp_conf):
                    im = cv2.rectangle(im, tuple(box[0:2].astype(int)),
                                       tuple(box[2:4].astype(int)),
                                       (0, 0, 255), 2)
                    im = cv2.putText(im,
                                     f"c:{conf:.3f}",
                                     (int(box[0]), int(box[1] + 30)),
                                     cv2.FONT_HERSHEY_PLAIN,
                                     0.75, (0, 255, 255),
                                     thickness=1)
                    height = box[3] - box[1]
                    im = cv2.putText(im,
                                     f"h:{height:.3f}",
                                     (int(box[0]), int(box[1] + 60)),
                                     cv2.FONT_HERSHEY_PLAIN,
                                     0.75, (0, 255, 255),
                                     thickness=1)
                    fp_info.append([height, conf])

                for box in fn:
                    im = cv2.rectangle(im, tuple(box[0:2].astype(int)),
                                       tuple(box[2:4].astype(int)), (0, 0, 0),
                                       2)

                for gt, trk, conf in zip(match['gt'], match['trk'],
                                         match['trk_confs']):
                    im = cv2.rectangle(im, tuple(gt[0:2].astype(int)),
                                       tuple(gt[2:4].astype(int)), (0, 255, 0),
                                       2)
                    im = cv2.rectangle(im, tuple(trk[0:2].astype(int)),
                                       tuple(trk[2:4].astype(int)),
                                       (255, 255, 0), 2)
                    im = cv2.putText(im,
                                     f"c:{conf:.3f}",
                                     (int(trk[0]), int(trk[1] + 30)),
                                     cv2.FONT_HERSHEY_PLAIN,
                                     0.75, (0, 255, 255),
                                     thickness=1)
                    height = trk[3] - trk[1]
                    im = cv2.putText(im,
                                     f"h:{height:.3f}",
                                     (int(trk[0]), int(trk[1] + 60)),
                                     cv2.FONT_HERSHEY_PLAIN,
                                     0.75, (0, 255, 255),
                                     thickness=1)
                    tp_info.append([height, conf])
                cv2.imwrite(osp.join(output_dir, f"{frame_id:06d}.jpg"), im)
        else:
            accs.append(evaluator.eval_file(osp.join(result_dir,
                                                     f"{seq}.txt")))
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    fp_info = np.array(fp_info)
    tp_info = np.array(tp_info)
    import ipdb
    ipdb.set_trace()
def main(opt,
         data_root='/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    # result_root = os.path.join(data_root, '..', 'results', exp_name)
    result_root = os.path.join('/home/liujierui/proj/FairMOT-master/outputs',
                               exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(
            '/home/liujierui/proj/FairMOT-master/outputs', exp_name,
            seq) if save_images or save_videos else None
        video_dir = os.path.join(
            '/home/liujierui/proj/FairMOT-master/outputs',
            exp_name) if save_images or save_videos else None
        mkdir_if_missing(output_dir)
        mkdir_if_missing(video_dir)

        logger.info('start seq: {}'.format(seq))
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'),
                                         opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        # meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        # frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
        frame_rate = 15
        nf, ta, tc = eval_seq(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=output_dir,
                              show_image=show_image,
                              frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            # output_video_path = osp.join(video_dir, '{}.mp4'.format(seq))
            # cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            # # cmd_str = 'ffmpeg -f image2 -i {}/%d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            # os.system(cmd_str)
            path = output_dir

            # path=join(self.video_path,str(id_video),'img1')
            filelist_0 = os.listdir(path)  #获取该目录下的所有文件名
            filelist = [
                '{:06d}'.format(i) for i in range(1,
                                                  len(filelist_0) + 1)
            ]

            avi_name = join(video_dir, seq + ".avi")  #导出路径

            item = path + '/000001.jpg'
            ori_im = cv2.imread(item)
            video = cv2.VideoWriter(avi_name,
                                    cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                    15, (ori_im.shape[1], ori_im.shape[0]))
            for id_img in filelist:
                item = path + '/' + id_img + '.jpg'
                ori_im = cv2.imread(item)
                video.write(ori_im)
            video.release()

    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
        all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(
        summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
Esempio n. 9
0
def main(opt,
         data_root='/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(opt.output_root, opt.exp_id)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    experiment = Experiment(api_key="SK59eWBf9ldDhEMbsQx7IW9G6",
                            project_name="fairmot",
                            workspace="noudvdgevel",
                            auto_param_logging=False,
                            auto_metric_logging=False,
                            auto_output_logging=False)  #Comet experiment.

    hyper_params = {
        "conf_thres": opt.conf_thres,
        "model": opt.load_model.split('/')[-1],
        "data": opt.data_cfg,
        "re_id_dim": opt.reid_dim,
        "architecture": opt.arch
    }
    experiment.log_parameters(hyper_params)
    experiment.set_name(opt.exp_id)

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(result_root, 'outputs',
                                  seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
        meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()

        imDir = str(meta_info[meta_info.find('imDir') +
                              6:meta_info.find('\nframeRate')])
        dataloader = datasets.LoadImages(osp.join(data_root, seq, imDir),
                                         opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))

        frame_rate = int(meta_info[meta_info.find('frameRate') +
                                   10:meta_info.find('\nseqLength')])
        nf, ta, tc = eval_seq(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=output_dir,
                              show_image=show_image,
                              frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))

        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(
                output_dir, output_video_path)
            os.system(cmd_str)
            experiment.log_asset(output_video_path,
                                 file_name="tracking_results.mp4",
                                 copy_to_tmp=False)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
        all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)

    summary_items = summary.to_numpy()
    for c, seq in enumerate(seqs):
        for cc, metric in enumerate(summary):
            experiment.log_metric(metric + "_" + seq, summary_items[c][cc])

    print(strsummary)
    Evaluator.save_summary(
        summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
    experiment.log_asset(os.path.join(result_root,
                                      'summary_{}.xlsx'.format(exp_name)),
                         file_name="tracking_results.xlsx",
                         copy_to_tmp=False)
Esempio n. 10
0
def main(
    opt,
    data_root="/data/MOT16/train",
    det_root=None,
    seqs=("MOT16-05", ),
    exp_name="demo",
    save_images=False,
    save_videos=False,
    show_image=True,
):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, "..", "results", exp_name)
    mkdir_if_missing(result_root)
    data_type = "mot"

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = (os.path.join(data_root, "..", "outputs", exp_name, seq)
                      if save_images or save_videos else None)
        logger.info("start seq: {}".format(seq))
        dataloader = datasets.LoadImages(osp.join(data_root, seq, "img1"),
                                         opt.img_size)
        result_filename = os.path.join(result_root, "{}.txt".format(seq))
        meta_info = open(os.path.join(data_root, seq, "seqinfo.ini")).read()
        frame_rate = int(meta_info[meta_info.find("frameRate") +
                                   10:meta_info.find("\nseqLength")])
        nf, ta, tc = eval_seq(
            opt,
            dataloader,
            data_type,
            result_filename,
            save_dir=output_dir,
            show_image=show_image,
            frame_rate=frame_rate,
        )
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info("Evaluate seq: {}".format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, "{}.mp4".format(seq))
            cmd_str = "ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}".format(
                output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info("Time elapsed: {:.2f} seconds, FPS: {:.2f}".format(
        all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(
        summary, os.path.join(result_root, "summary_{}.xlsx".format(exp_name)))