Esempio n. 1
0
def demo(opt):
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting tracking...')
    dataloader = datasets.LoadVideo(opt.input_video, opt.img_size)
    dataloader.cap.set(cv2.CAP_PROP_POS_MSEC, round(1000 * 120))
    result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    frame_dir = None if opt.output_format == 'text' else osp.join(
        result_root, 'frame')
    eval_seq(opt,
             dataloader,
             'mot',
             result_filename,
             save_dir=frame_dir,
             show_image=False,
             frame_rate=frame_rate)

    if opt.output_format == 'video':
        output_video_path = osp.join(result_root, 'result.mp4')
        cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -b 5000k -c:v mpeg4 {}'.format(
            osp.join(result_root, 'frame'), output_video_path)
        os.system(cmd_str)
Esempio n. 2
0
def demo(opt):

    tracker = JDETracker(opt, frame_rate=30)  # What is JDE Tracker?

    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    logger.info('Starting detection...')
    root_dir = '/datanew/hwb/data/Football/SoftWare/0'
    channels = regular_videoName(root_dir)
    for channel in channels.keys():
        video_name = channels[channel]
        print('Starting to detect {}/{}'.format(root_dir, video_name))
        input_video = os.path.join(root_dir, video_name)
        dataloader = datasets.LoadVideo(input_video, opt.img_size, gap=1000)
        dataloader.cap.set(cv2.CAP_PROP_POS_MSEC, round(1000 * 120))
        result_filename = os.path.join(result_root, 'results.txt')
        # frame_rate = dataloader.frame_rate
        frame_dir = os.path.join(root_dir, 'detection', video_name[0:4])
        os.makedirs(frame_dir, exist_ok=True)
        # detect(opt,tracker,dataloader, 'mot', result_filename, save_dir=frame_dir, show_image=False)
        detect(opt,
               tracker,
               dataloader,
               dir_id=1,
               save_dir=frame_dir,
               show_image=True)
Esempio n. 3
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             Input_tracker=None,
             frame_rate=30):

    if save_dir:
        mkdir_if_missing(save_dir)

    if Input_tracker == None:
        tracker = JDETracker(opt,
                             frame_rate=frame_rate)  # What is JDE Tracker?
    else:
        tracker = Input_tracker

    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1

    logger.info('Processing frame {} ({:.2f} fps)'.format(
        frame_id, 1. / max(1e-5, timer.average_time)))
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Esempio n. 4
0
def detect(opt, tracker, dataloader, dir_id, save_dir=None, show_image=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    timer = Timer()
    results = []
    frame_id = 0
    save_dir_subimgs = os.path.join(save_dir, 'subimg')
    os.makedirs(save_dir_subimgs, exist_ok=True)

    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(os.path.join(save_dir, 'out_put.avi'), fourcc, 20.0,
                          (1920, 1080), True)

    for path, img, img0 in dataloader:

        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        [dets,
         id_feature] = tracker.update_for_detection(blob, img0, save_dir,
                                                    frame_id)

        timer.toc()
        # save results
        results.append((frame_id + 1, dets, id_feature))

        if show_image or save_dir is not None:
            online_im = vis.plot_detections(img0, dets, save_dir, dir_id,
                                            frame_id)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)

        frame_id += 1
    # save results
    # write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
Esempio n. 5
0
def main(opt,
         data_root='/data/MOT16/train',
         det_root=None,
         seqs=('MOT16-05', ),
         exp_name='demo',
         save_images=False,
         save_videos=False,
         show_image=True):
    logger.setLevel(logging.INFO)
    result_root = os.path.join(data_root, '..', 'results', exp_name)
    mkdir_if_missing(result_root)
    data_type = 'mot'

    # run tracking
    accs = []
    n_frame = 0
    timer_avgs, timer_calls = [], []
    for seq in seqs:
        output_dir = os.path.join(data_root, '..', 'outputs', exp_name,
                                  seq) if save_images or save_videos else None
        logger.info('start seq: {}'.format(seq))
        dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'),
                                         opt.img_size)
        result_filename = os.path.join(result_root, '{}.txt'.format(seq))
        meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
        frame_rate = int(meta_info[meta_info.find('frameRate') +
                                   10:meta_info.find('\nseqLength')])
        nf, ta, tc = eval_seq(opt,
                              dataloader,
                              data_type,
                              result_filename,
                              save_dir=output_dir,
                              show_image=show_image,
                              frame_rate=frame_rate)
        n_frame += nf
        timer_avgs.append(ta)
        timer_calls.append(tc)

        # eval
        logger.info('Evaluate seq: {}'.format(seq))
        evaluator = Evaluator(data_root, seq, data_type)
        accs.append(evaluator.eval_file(result_filename))
        if save_videos:
            output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
            cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(
                output_dir, output_video_path)
            os.system(cmd_str)
    timer_avgs = np.asarray(timer_avgs)
    timer_calls = np.asarray(timer_calls)
    all_time = np.dot(timer_avgs, timer_calls)
    avg_time = all_time / np.sum(timer_calls)
    logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
        all_time, 1.0 / avg_time))

    # get summary
    metrics = mm.metrics.motchallenge_metrics
    mh = mm.metrics.create()
    summary = Evaluator.get_summary(accs, seqs, metrics)
    strsummary = mm.io.render_summary(summary,
                                      formatters=mh.formatters,
                                      namemap=mm.io.motchallenge_metric_names)
    print(strsummary)
    Evaluator.save_summary(
        summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
Esempio n. 6
0
def Short_track_eval(opt,
                     dataloader,
                     data_type,
                     result_filename,
                     target_frame,
                     reference_point,
                     save_dir=None,
                     show_image=True,
                     Input_tracker=None,
                     frame_rate=30):

    if save_dir:
        mkdir_if_missing(save_dir)
    if Input_tracker == None:
        tracker = JDETracker(opt,
                             frame_rate=frame_rate)  # What is JDE Tracker?
    else:
        tracker = Input_tracker

    timer = Timer()
    results = []
    frame_id = 0
    img0_array = []

    for path, img, img0 in dataloader:
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        online_ReID_features = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            ReID_feature = t.curr_feat
            vertical = tlwh[2] / tlwh[3] > 1.6  # w / h > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                online_ReID_features.append(ReID_feature)
        timer.toc()
        # save results
        results.append(
            (frame_id + 1, online_tlwhs, online_ids, online_ReID_features))
        img0_array.append(img0)

        # if show_image or save_dir is not None:
        #     online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
        #                                   fps=1. / timer.average_time)
        # if show_image:
        #     cv2.imshow('online_im', online_im)
        # if save_dir is not None:
        #     cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)

        frame_id += 1

    for bias in [0, 2, -2]:  # 总能检测到的?
        input_result = results[target_frame + bias]
        if len(input_result[1]) == 0:  # 有可能目标帧没有检测到,一个目标都没有。
            target_id = None
            continue
        new_reference_point, target_id = sort_by_point(
            results[target_frame + bias], reference_point)
        if target_id != None:
            break
    # 如果前中后三帧都没有检测到,那就说明这个动作区分不开了。 放弃了。
    if target_id == None:
        # 目标不存在
        return None

    # 把每个sub_box提取出来。
    for r_index, result in enumerate(results):
        img0 = img0_array[r_index]
        I_h, I_w, _ = img0.shape
        bboxes = result[1]
        ids = result[2]
        for id_index, id in enumerate(ids):
            if id != target_id:
                continue
            box = bboxes[id_index]
            x1, y1, w, h = box
            intbox = tuple(
                map(int,
                    (max(0, x1), max(0, y1), min(x1 + w, I_w), min(
                        y1 + h, I_h))))
            # print(intbox)
            sub_img = img0[intbox[1]:intbox[3], intbox[0]:intbox[2]]
            cv2.imwrite(
                os.path.join(save_dir, '{}_{}.jpg'.format(r_index, id)),
                sub_img)

    logger.info('Processing frame {} ({:.2f} fps)'.format(
        frame_id, 1. / max(1e-5, timer.average_time)))

    # save results
    write_results(result_filename, results, data_type)

    return frame_id, timer.average_time, timer.calls