示例#1
0
文件: demo.py 项目: grossular/FairMOT
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)
    file_name = osp.basename(opt.input_video).split('.')[0].replace(' ', '_')
    model_name = osp.basename(opt.load_model).split('.')[0]
    base_name = f'{file_name}_{model_name}_{opt.conf_thres}'

    logger.info('Starting tracking...')
    logger.info(f'Working on: {opt.input_video}')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, f'{base_name}_results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, f'{file_name}-frames')
    eval_seq(opt,
             dataloader,
             'kitti',
             result_filename,
             save_dir=frame_dir,
             show_image=False,
             frame_rate=frame_rate)

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, f'{base_name}_result.mp4')
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
            osp.join(result_root, f'{file_name}-frames'), output_video_path)
        os.system(cmd_str)
示例#2
0
def write_results(filename, results, data_type):
    if data_type == "mot":
        save_format = "{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n"
    elif data_type == "kitti":
        save_format = "{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n"
    else:
        raise ValueError(data_type)

    with open(filename, "w") as f:
        for frame_id, tlwhs, track_ids in results:
            if data_type == "kitti":
                frame_id -= 1
            for tlwh, track_id in zip(tlwhs, track_ids):
                if track_id < 0:
                    continue
                x1, y1, w, h = tlwh
                x2, y2 = x1 + w, y1 + h
                line = save_format.format(frame=frame_id,
                                          id=track_id,
                                          x1=x1,
                                          y1=y1,
                                          x2=x2,
                                          y2=y2,
                                          w=w,
                                          h=h)
                f.write(line)
    logger.info("save results to {}".format(filename))
示例#3
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != "" else "."
    mkdir_if_missing(result_root)

    logger.info("Starting tracking...")
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, "results.txt")
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == "text" else osp.join(
        result_root, "frame")
    eval_seq(
        opt,
        dataloader,
        "mot",
        result_filename,
        save_dir=frame_dir,
        show_image=False,
        frame_rate=frame_rate,
    )

    if opt.output_format == "video":
        output_video_path = osp.join(result_root, "result.mp4")
        cmd_str = "ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}".format(
            osp.join(result_root, "frame"), output_video_path)
        os.system(cmd_str)
示例#4
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             show_image=False,
             frame_rate=frame_rate,
             use_cuda=opt.gpus != [-1])

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, 'MOT16-03-results.mp4')
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
            osp.join(result_root, 'frame'), output_video_path)
        os.system(cmd_str)
示例#5
0
def write_results(filename, results, data_type):

    #data type rappresenta il tipo di dato che viene passato in input che può essere di tipo MOT o di tipo Kitti
    #a seconda del formato in cui viene salvato produce una variabile save_format che contiene un determinato formato
    #kitti si riferisce ad una piattaforma di test che valuta gli algoritmi di machine vision soprattutto per la guida automatizzata
    #l'else rappresenta la situazione di errore nel caso il tipo di dato non è MOT o Kitti

    if data_type == 'mot':
        save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
    elif data_type == 'kitti':
        save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
    else:
        raise ValueError(data_type)

    #apre un file in scrittura e lo chiama f

    with open(filename, 'w') as f:
        for frame_id, tlwhs, track_ids in results:
            if data_type == 'kitti':
                frame_id -= 1
            for tlwh, track_id in zip(tlwhs, track_ids):
                if track_id < 0:
                    continue
                x1, y1, w, h = tlwh
                x2, y2 = x1 + w, y1 + h
                line = save_format.format(frame=frame_id,
                                          id=track_id,
                                          x1=x1,
                                          y1=y1,
                                          x2=x2,
                                          y2=y2,
                                          w=w,
                                          h=h)
                f.write(line)
    logger.info('save results to {}'.format(filename))
示例#6
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:
    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#7
0
def write_results(filename, results_dict: Dict, data_type: str):
    if not filename:
        return
    path = os.path.dirname(filename)
    if not os.path.exists(path):
        os.makedirs(path)

    if data_type in ('mot', 'mcmot', 'lab'):
        save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
    elif data_type == 'kitti':
        save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n'
    else:
        raise ValueError(data_type)

    with open(filename, 'w') as f:
        for frame_id, frame_data in results_dict.items():
            if data_type == 'kitti':
                frame_id -= 1
            for tlwh, track_id in frame_data:
                if track_id < 0:
                    continue
                x1, y1, w, h = tlwh
                x2, y2 = x1 + w, y1 + h
                line = save_format.format(frame=frame_id,
                                          id=track_id,
                                          x1=x1,
                                          y1=y1,
                                          x2=x2,
                                          y2=y2,
                                          w=w,
                                          h=h,
                                          score=1.0)
                f.write(line)
    logger.info('Save results to {}'.format(filename))
示例#8
0
def write_results_score(filename, results, data_type):
    if data_type == 'mot':
        save_format = '{frame},{id},{x1},{y1},{w},{h},{s},1,-1,-1,-1\n'
    elif data_type == 'kitti':
        save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
    else:
        raise ValueError(data_type)

    with open(filename, 'w') as f:
        for frame_id, tlwhs, track_ids, scores in results:
            if data_type == 'kitti':
                frame_id -= 1
            for tlwh, track_id, score in zip(tlwhs, track_ids, scores):
                if track_id < 0:
                    continue
                x1, y1, w, h = tlwh
                x2, y2 = x1 + w, y1 + h
                line = save_format.format(frame=frame_id,
                                          id=track_id,
                                          x1=x1,
                                          y1=y1,
                                          x2=x2,
                                          y2=y2,
                                          w=w,
                                          h=h,
                                          s=score)
                f.write(line)
    logger.info('save results to {}'.format(filename))
示例#9
0
文件: demo.py 项目: allenwu5/FairMOT
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')

    assert osp.exists(opt.input_video), f'{opt.input_video} does NOT exist !'
    file_name, file_ext = osp.splitext(opt.input_video)
    if file_ext in ['.mp4', 'avi']:
        dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    else:
        dataloader = datasets.LoadImages(opt.input_video, opt.img_size)
        dataloader.frame_rate = int(round(opt.frame_rate))
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             show_image=False,
             frame_rate=frame_rate,
             use_cuda=opt.gpus != [-1])

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, 'MOT16-03-results.mp4')
        frame_path = osp.join(result_root, 'frame')
        cmd_str = f'ffmpeg -framerate {opt.frame_rate} -f image2 -i {frame_path}/%05d.jpg -b 5000k -c:v mpeg4 {output_video_path}'
        os.system(cmd_str)
示例#10
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    data_type = 'mot'
    result_root = '/home/liujierui/proj/deep_sort_pytorch-master/demo/A-track/ensemble_MOT17_0.75'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        
    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names
    )
    print(strsummary)
示例#11
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    print('OPT: {}'.format(opt.input_video))
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    try:
        eval_seq(opt,
                 dataloader,
                 'mot',
                 result_filename,
                 save_dir=frame_dir,
                 show_image=True,
                 frame_rate=frame_rate)
    except Exception as e:
        logger.info(e)

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, 'result.mp4')
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
            osp.join(result_root, 'frame'), output_video_path)
        os.system(cmd_str)
示例#12
0
def run_mot_16(opt):
    list_vid = [
        'MOT16-02'
    ]  #,'MOT16-04','MOT16-05','MOT16-09','MOT16-10','MOT16-11','MOT16-13'
    for vid in list_vid:
        result_root = '../results/' + vid
        mkdir_if_missing(result_root)

        logger.info('Starting tracking...')
        out_size = (1920, 1080) if vid != 'MOT16-05' else (640, 480)
        dataloader = datasets.LoadImages(
            '../MOT16_Data/train/' + vid + '/img1',
            '../MOT16_Data/generate_detection/' + vid,
            out_size=out_size)
        result_filename = os.path.join(result_root, 'iou.txt')
        frame_rate = dataloader.frame_rate

        frame_dir = None if opt.output_format == 'text' else osp.join(
            result_root, 'frame')
        bbox_dir = None if opt.output_format == 'text' else osp.join(
            result_root, 'bbox_detection')
        eval_seq(opt,
                 dataloader,
                 'mot',
                 result_filename,
                 save_dir=frame_dir,
                 bbox_dir=bbox_dir,
                 show_image=False,
                 frame_rate=frame_rate)

        if opt.output_format == 'video':
            output_video_path = osp.join(result_root, 'result.mp4')
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
                osp.join(result_root, 'frame'), output_video_path)
            os.system(cmd_str)
示例#13
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadImages(
        opt.input_path, opt.input_detection_path)  #, out_size = (640,480)
    result_filename = os.path.join(result_root, 'baseline.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    bbox_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'bbox_detection')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             bbox_dir=bbox_dir,
             show_image=False,
             frame_rate=frame_rate)

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, 'result.mp4')
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
            osp.join(result_root, 'frame'), output_video_path)
        os.system(cmd_str)
def demo(opt, ROS_Img, frame_id):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    # dataloader = datasets.LoadVideo(input_file, opt.img_size)
    # print(init)
    img, img0 = preprocessing(ROS_Img)

    result_filename = os.path.join(result_root, 'results.txt')
    #frame_rate = dataloader.frame_rate
    frame_rate = 10

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    #frame_dir = "../demos/img_results"
    print(opt.output_root)
    eval_seq_single(
        opt,
        img,
        img0,
        tracker,
        results,
        frame_id,
        'mot',
        result_filename,
        save_dir=frame_dir,
        show_image=False,
        frame_rate=frame_rate,
    )
示例#15
0
def write_results(filename, results, data_type):
    if data_type == 'mot':
        # save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
        save_format = '{frame},{id},{x1},{y1},{w},{h},1,0\n'
    elif data_type == 'kitti':
        save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
    else:
        raise ValueError(data_type)

    with open(filename, 'w') as f:
        for frame_id, tlwhs, track_ids in results:
            # if data_type == 'kitti':
            #     frame_id -= 1
            # for tlwh, track_id in zip(tlwhs, track_ids):
            #     if track_id < 0:
            #         continue
            #     x1, y1, w, h = tlwh
            #     x2, y2 = x1 + w, y1 + h
            #     line = save_format.format(frame=frame_id, id=track_id, \
            #                             x1=int(round(x1)), y1=int(round(y1)), \
            #                             x2=int(round(x2)), y2=int(round(y2)), \
            #                             w=int(round(w)), h=int(round(h)))
            #     f.write(line)
            if frame_id==2:
                for tlwh, track_id in zip(tlwhs, track_ids):
                    if track_id < 0:
                        continue
                    x1, y1, w, h = tlwh
                    x2, y2 = x1 + w, y1 + h
                    line = save_format.format(frame=1, id=track_id, \
                                            x1=int(round(x1)), y1=int(round(y1)), \
                                            x2=int(round(x2)), y2=int(round(y2)), \
                                            w=int(round(w)), h=int(round(h)))
                    f.write(line)
                for tlwh, track_id in zip(tlwhs, track_ids):
                    if track_id < 0:
                        continue
                    x1, y1, w, h = tlwh
                    x2, y2 = x1 + w, y1 + h
                    line = save_format.format(frame=frame_id, id=track_id, \
                                            x1=int(round(x1)), y1=int(round(y1)), \
                                            x2=int(round(x2)), y2=int(round(y2)), \
                                            w=int(round(w)), h=int(round(h)))
                    f.write(line)
            else:
                if data_type == 'kitti':
                    frame_id -= 1
                for tlwh, track_id in zip(tlwhs, track_ids):
                    if track_id < 0:
                        continue
                    x1, y1, w, h = tlwh
                    x2, y2 = x1 + w, y1 + h
                    line = save_format.format(frame=frame_id, id=track_id, \
                                            x1=int(round(x1)), y1=int(round(y1)), \
                                            x2=int(round(x2)), y2=int(round(y2)), \
                                            w=int(round(w)), h=int(round(h)))
                    f.write(line)
    logger.info('save results to {}'.format(filename))
示例#16
0
文件: demo.py 项目: isangu/FairMOT
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    # dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    # result_filename = os.path.join(result_root, 'results.txt')
    # frame_rate = dataloader.frame_rate

    eval_seq_realtime(opt)
示例#17
0
def eval_det(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id < 302:
            frame_id += 1
            continue
        elif frame_id > 302:
            break
        else:
            print(frame_id)
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
            # if frame_id>20:
            #     break

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        dets = tracker.detect(blob, img0)
        # print(path, dets)
        tlbrs = []
        scores = []
        class_ids = []
        for det in dets:
            tlbrs.append(det[:4])
            scores.append(det[4])
            class_ids.append(int(det[5] - 1))
        # print(class_ids)
        if show_image or save_dir is not None:
            online_im = vis.plot_detections(img0,
                                            tlbrs,
                                            scores=None,
                                            ids=class_ids)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
示例#18
0
文件: track.py 项目: hulkwork/FairMOT
    def eval(self, skip_frame=1, show_image=False):
        tracker = JDETracker(self.opt, frame_rate=self.frame_rate)
        timer = Timer()
        frame_id = 0

        for i, (path, img, img0) in enumerate(self.dataloader):
            if i % skip_frame != 0:
                continue
            if frame_id % 20 == 0:
                logger.info('Processing frame {} ({:.2f} fps)'.format(
                    frame_id, 1. / max(1e-5, timer.average_time)))

            # run tracking
            timer.tic()
            if self.use_cuda:
                blob = torch.from_numpy(img).cuda().unsqueeze(0)
            else:
                blob = torch.from_numpy(img).unsqueeze(0)
            online_targets = tracker.update(blob, img0)
            online_tlwhs = []
            online_ids = []
            online_scores = []
            for t in online_targets:
                tlwh = t.tlwh
                tid = t.track_id
                vertical = tlwh[2] / tlwh[3] > 1.6
                if tlwh[2] * tlwh[3] > self.opt.min_box_area and not vertical:
                    online_tlwhs.append(tlwh)
                    online_ids.append(tid)
                    online_scores.append(t.score)
            timer.toc()
            tmp_result = {
                "frame_id": frame_id + 1,
                "bounding_box": online_tlwhs,
                "ids": online_ids,
                "scores": online_scores
            }
            self.send_result(tmp_result, raw_img=img0)

            frame_id += 1
            if show_image:
                online_im = self.send_image(img0, online_tlwhs, online_ids,
                                            frame_id,
                                            1. / max(1e-5, timer.average_time))
                cv2.imshow('Result', online_im)
        if self.video_saver is not None:
            self.video_saver.release()
        return frame_id, timer.average_time, timer.calls
示例#19
0
def main(opt,
         data_root='/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'bdd'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name,
                                  seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
        # dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        dataloader = datasets.LoadImages(data_root, opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        # meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        # frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
        frame_rate = 30
        # eval_det(opt, dataloader, data_type, result_filename,
        #          save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        nf, ta, tc = eval_seq(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=output_dir,
                              show_image=show_image,
                              frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
        all_time, 1.0 / avg_time))
示例#20
0
文件: demo.py 项目: pxthanh98/demxe
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             show_image=False,
             frame_rate=frame_rate)
示例#21
0
def measure2files(results_root, data_root, seqs):
    """

    @param results_root:        mot轨迹预测结果文件的根目录,文件中格式 <frame>, <id>, <bb_left>, <bb_top>, <bb_width>, <bb_height>, <conf>, <x>, <y>, <z>
    @param data_root:           gt文件的路径,不包含后三级,因为在Evaluator初始化函数中已经写了全路径的拼接方式
    @param seqs:                gt路径的倒数第三级路径
    @return:                    存储seqs中,每一个路径下track结果的评价指标,以及全部文件夹汇总后的指标
    """

    data_type = 'mot'
    result_root = "/home/shuai.li/code/FairMOT/MOT15/images/results/temp/"
    exp_name = "test_evalMot15"

    accs = []
    # eval
    for seq in tqdm(seqs):

        result_filename = osp.join(results_root, seq) + '.txt'
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(
            data_root, seq,
            data_type)  # 在初始化中,根据data_root, seq自动加载ground truth数据
        accs.append(evaluator.eval_file(
            result_filename))  # 在读取存储的检测结果,并进行计算,一帧对应一个acc对象
        # if save_videos:
        #     output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
        #     cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
        #     os.system(cmd_str)

    # get summary
    metrics = mm.metrics.motchallenge_metrics  # 18个评价指标
    mh = mm.metrics.create()  # 创建指标计算工厂,后续传入相关数据即可给出指标
    summary = Evaluator.get_summary(
        accs, seqs, metrics)  # 计算MOT challenge中的指标,参数:、eval的帧序列名称、指标序列
    strsummary = mm.io.render_summary(  # 将eval指标进行字符串格式化,用于在console上面的显示
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names)
    print(strsummary)  # 显示在命令行中
    Evaluator.save_summary(
        summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
示例#22
0
def demo(opt, polygon1, polygon2, prepath=None, cam_id=None):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    polygon, paths = load_zone_anno(opt.input_meta)
    if prepath is not None:
        paths = prepath
    polygon = np.int32(polygon1)
    #line1,line2=[polygon[4],polygon[3]],[polygon[1],polygon[2]]
    polygon2, _ = np.int32(polygon2), None
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    print(cam_id)
    frame_tracking_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame_tracking')
    bbox_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'bbox_detection')
    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame_dir')

    eval_seq(opt,
             dataloader,
             polygon,
             paths,
             'mot',
             result_filename,
             frame_dir=frame_dir,
             save_dir=frame_tracking_dir,
             bbox_dir=bbox_dir,
             show_image=False,
             frame_rate=frame_rate,
             polygon2=polygon2,
             line1=None,
             line2=None,
             cam_id=cam_id)
示例#23
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'bdd'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
        # dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        dataloader = datasets.LoadImages(data_root, opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        gt_filename = os.path.join(result_root, '{}_gt.txt'.format(seq))
        frame_rate = 30
        # eval_det(opt, dataloader, data_type, result_filename,
        #          save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename, gt_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type, gt_filename)
        # accs.append(evaluator.eval_file(result_filename))
        for i in range(len(class_names_valid)):
            accs.append(evaluator.eval_file_sp(i, result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, names=class_names_valid, metrics=metrics)
    strsummary = mm.io.render_summary(
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names
    )
    print(strsummary)
    print('mMOTA:', summary['mota'].mean())
示例#24
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'
    print(opt)
    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
#        print(osp.join(data_root, seq, 'img1'))
        dataloader = datasets.LoadImages(osp.join(data_root, seq), opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        meta_info = None #open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        frame_rate = 15 #int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(
        summary,
        formatters=mh.formatters,
        namemap=mm.io.motchallenge_metric_names
    )
    print(strsummary)
    Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
示例#25
0
文件: track.py 项目: isangu/FairMOT
def eval_seq_realtime(opt, save_dir=None, show_image=True, frame_rate=30):
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    frame_id = 0

    cv2.namedWindow('online_im', cv2.WINDOW_FREERATIO)
    cv2.setWindowProperty('online_im', cv2.WND_PROP_AUTOSIZE,
                          cv2.WND_PROP_AUTOSIZE)
    cap = cv2.VideoCapture(0)
    ret, im = cap.read()

    height = im.shape[0]
    width = im.shape[1]

    while True:
        ret, img0 = cap.read()

        # Padded resize
        img, _, _, _ = letterbox(img0, height=height, width=width)

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img /= 255.0

        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        text_scale = max(1, online_im.shape[1] / 1600.)
        text_thickness = 1 if text_scale > 1.1 else 1
        line_thickness = max(1, int(online_im.shape[1] / 500.))
        cv2.putText(online_im,
                    'Press ESC to STOP', (300, int(15 * text_scale)),
                    cv2.FONT_HERSHEY_PLAIN,
                    text_scale, (0, 0, 255),
                    thickness=2)
        cv2.imshow('online_im', online_im)
        key = cv2.waitKey(1)
        if key == 27:
            break
        frame_id += 1
    cv2.destroyAllWindows()
示例#26
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             conf_thres=0.3):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = GNNTracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for i, (path, img, img0, p_img_path, p_img) in enumerate(dataloader):
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        if i == 0:
            p_boxes, init_img_path, p_img = dataloader.initialize(
                use_letter_box=opt.use_letter_box)
        else:
            p_boxes, p_img = prepare_prev_img(dataloader, online_targets, opt,
                                              p_img)

        if opt.use_roi_align:
            p_crops = p_boxes.clone()
            _, h, w = p_img.shape
            p_crops = p_crops.cuda()
            p_crops_lengths = [len(p_crops)]
            edge_index = create_inference_time_graph(opt, p_boxes, p_crops,
                                                     p_img)

            # convert boxes from xyxy to normalized according to p_img dimensions
            p_crops[:, 0] = p_crops[:, 0] / w
            p_crops[:, 1] = p_crops[:, 1] / h
            p_crops[:, 2] = p_crops[:, 2] / w
            p_crops[:, 3] = p_crops[:, 3] / h
            online_targets = tracker.update(
                blob,
                img0,
                p_crops,
                p_crops_lengths,
                edge_index,
                gnn_output_layer=opt.inference_gnn_output_layer,
                p_imgs=p_img.unsqueeze(0).cuda(),
                conf_thres=conf_thres)
        else:
            p_crops = torchvision.ops.roi_align(input=p_img.unsqueeze(0),
                                                boxes=[p_boxes],
                                                output_size=opt.crop_size)
            p_crops = p_crops.cuda()
            p_crops_lengths = [len(p_crops)]

            edge_index = create_inference_time_graph(opt, p_boxes, p_crops,
                                                     p_img)

            online_targets = tracker.update(
                blob,
                img0,
                p_crops,
                p_crops_lengths,
                edge_index,
                gnn_output_layer=opt.inference_gnn_output_layer,
                p_imgs=None,
                conf_thres=conf_thres)
        online_tlwhs = []
        online_ids = []
        online_confs = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            t_conf = t.score
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                online_confs.append(t_conf)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids, online_confs))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=online_confs,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#27
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:
    out_queue = Deque(maxlen=6)
    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        time_clean()
        time_sync('tracker all')
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        with torch.no_grad():
            time_sync('forward_half and queue')
            out = tracker.model.backend.forward_half(
                blob, roi_num=tracker.model.backend.roi_top_k)
            out_ref = {}
            out_ref.update(out)
            out_ref['rois'], _ = tracker.model.backend.get_rois(
                out['rpn_map'], 300)
            refs = tracker.model.backend.forward_rois(out_ref)
            out_queue.append(refs)
            while len(out_queue) < out_queue.maxlen:
                out_queue.append(refs)
            time_sync('forward_half and queue')
            time_sync('forward_all')
            output, stuffs, _ = tracker.model.forward_half(out, out_queue)
            time_sync('forward_all')
        # info_debug(output)
        # jj = output['rois'][0] % out['rpn_map'].shape[-1]
        # ii = output['rois'][0] // out['rpn_map'].shape[-1]
        # u = output['rpn_map'][:, :, ii, jj]
        # print(output['hm'][:20])
        # input()

        # import pickle
        # print(u.shape)
        # print(u.flatten().sigmoid())
        # print((u.flatten().sigmoid() - output['hm'].flatten().sigmoid()).abs().max())
        # with open('two_none.pkll', 'wb') as fd:
        #     pickle.dump(output, fd)
        # input()
        time_sync('tracker update')
        online_targets = tracker.update(blob, img0, output)
        time_sync('tracker update')
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        time_sync('tracker all')
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    import pickle
    with open(result_filename + '.dets.pkl', 'wb') as fd:
        pickle.dump(tracker.raw_dets, fd)
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#28
0
def main(opt,
         data_root='/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []

    #scorre tutti gli elementi della sequenza di frame previsti dal MOT passato come parametro
    for seq in seqs:
        #stabilisce la cartella in cui dovrà salvare gli output elaborati
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name,
                                  seq) if save_images or save_videos else None
        #messaggio che viene restituito all'utente per aggiornarlo sull'andamento dell'esecuzione
        logger.info('start seq: {}'.format(seq))
        #carica l'immagine contenuta all'interno del dataset passato come parametro da seq
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'),
                                         opt.img_size)
        #Il file testuale con il risultato dell'elaborazione è salvato in result_filename
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        #legge un file per seguire le indicazioni
        meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        #qui avviene il calcolo del frame rate, per comprendere quanti frame fa nell'unità di tempo
        frame_rate = int(meta_info[meta_info.find('frameRate') +
                                   10:meta_info.find('\nseqLength')])
        #esecuzione di eval_seq
        nf, ta, tc = eval_seq(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=output_dir,
                              show_image=show_image,
                              frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(
                output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
        all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(
        summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
示例#29
0
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
         save_images=False, save_videos=False, show_image=True):
    logger.setLevel(logging.INFO)
    # result_root = os.path.join(data_root, '..', 'results', exp_name)
    result_root = os.path.join('/home/liujierui/proj/FairMOT-master/outputs', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join('/home/liujierui/proj/FairMOT-master/outputs', exp_name, seq) if save_images or save_videos else None
        video_dir = os.path.join('/home/liujierui/proj/FairMOT-master/outputs', exp_name) if save_images or save_videos else None
        mkdir_if_missing(output_dir)
        mkdir_if_missing(video_dir)
            
        
        logger.info('start seq: {}'.format(seq))
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        # meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        # frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
        frame_rate = 15
        nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
                              save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # # eval
        # logger.info('Evaluate seq: {}'.format(seq))
        # evaluator = Evaluator(data_root, seq, data_type)
        # accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            # output_video_path = osp.join(video_dir, '{}.mp4'.format(seq))
            # cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            # # cmd_str = 'ffmpeg -f image2 -i {}/%d.jpg -c:v copy {}'.format(output_dir, output_video_path)
            # os.system(cmd_str)
            path=output_dir
            
            # path=join(self.video_path,str(id_video),'img1')
            filelist_0 = os.listdir(path) #获取该目录下的所有文件名
            filelist=[str(i) for i in range(1,len(filelist_0)+1)]   

            avi_name = join(video_dir,seq + ".avi")#导出路径

            item = path + '/1.jpg' 
            ori_im = cv2.imread(item)
            video = cv2.VideoWriter(avi_name, cv2.VideoWriter_fourcc('M','J','P','G'), 15, (ori_im .shape[1],ori_im.shape[0]))
            for id_img in filelist:
                item = path + '/' + id_img+'.jpg' 
                ori_im = cv2.imread(item)
                video.write(ori_im) 
            video.release()
    
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
示例#30
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    #verifica che sia presente la cartella di salvataggio e in caso non lo sia la crea
    if save_dir:
        mkdir_if_missing(save_dir)

    #crea il JDETracker che esegue le seguenti operazioni (che vengono trovate in src/lib/tracker/multitracker.py nella funzione init)
    #1) Verifica se il programma va eseguito con CPU o GPU
    #2) Crea il modello e lo valuta
    #3) Applica un Kalman Filter: e’ uno strumento per stimare lo stato di un sistema dinamico lineare perturbato da rumore, sulla base di misure (o osservazioni) linearmente dipendenti
    tracker = JDETracker(opt, frame_rate=frame_rate)
    #viene inizializzato il timer per monitorare il tempo di elaborazione
    timer = Timer()
    #inizializzazione array dei risultati
    results = []
    #identificatore del frame
    frame_id = 0
    #scorriamo gli elementi all'interno del dataloader che erano le immagini del database caricate nel main
    for path, img, img0 in dataloader:
        #visualizza il frame rate dopo 20 frame elaborati
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()

        #questa funzione è specifica per le GPU NVIDIA, infatti usa CUDA, si può sostituire con CPU nel caso si voglia eseguire il programma con CPU
        #crea una matrice multidimensionale di almeno 3 elementi per rappresentare l'immagine img

        blob = torch.from_numpy(img).cuda().unsqueeze(0)

        #viene applicata la funzione update del JDETracker che svolge le seguenti funzioni (src/lib/tracker/multitracker.py)
        #1) Vengono passati come parametri gli elementi blob e img0
        #2) Da entrambi si estraggono altezza e larghezza e vengono memorizzate in una variabile
        #3) viene creata una variabile c che memorizza un array di float che contiene come due elementi altezza e larghezza dimezzata di img0
        #4) viene creata una variabile s che memorizza il massimo fra max(float(inp_width) (blob) / float(inp_height) (blob) * height (img0), width (img0)) * 1.0
        #5) Network forward, get detections & embeddings: rileva gli oggetti e gli aggiunge a una pila di tracklet
        #6) Prima associazione con Embedding: Da Paper, il ruolo dell'embedding è quello di distinguere diversi tipi di oggetti
        #7) Seconda associazione con IoU
        #8) Inizializza nuovi Stracks
        #9) Aggiorna lo stato
        #10) Ritorna nella variabile il valore degli stracks attivi
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #vengono iterati i vari stracks
        for t in online_targets:

            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls