示例#1
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:
    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#2
0
def eval_det(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id < 302:
            frame_id += 1
            continue
        elif frame_id > 302:
            break
        else:
            print(frame_id)
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
            # if frame_id>20:
            #     break

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        dets = tracker.detect(blob, img0)
        # print(path, dets)
        tlbrs = []
        scores = []
        class_ids = []
        for det in dets:
            tlbrs.append(det[:4])
            scores.append(det[4])
            class_ids.append(int(det[5] - 1))
        # print(class_ids)
        if show_image or save_dir is not None:
            online_im = vis.plot_detections(img0,
                                            tlbrs,
                                            scores=None,
                                            ids=class_ids)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
示例#3
0
文件: track.py 项目: hulkwork/FairMOT
    def eval(self, skip_frame=1, show_image=False):
        tracker = JDETracker(self.opt, frame_rate=self.frame_rate)
        timer = Timer()
        frame_id = 0

        for i, (path, img, img0) in enumerate(self.dataloader):
            if i % skip_frame != 0:
                continue
            if frame_id % 20 == 0:
                logger.info('Processing frame {} ({:.2f} fps)'.format(
                    frame_id, 1. / max(1e-5, timer.average_time)))

            # run tracking
            timer.tic()
            if self.use_cuda:
                blob = torch.from_numpy(img).cuda().unsqueeze(0)
            else:
                blob = torch.from_numpy(img).unsqueeze(0)
            online_targets = tracker.update(blob, img0)
            online_tlwhs = []
            online_ids = []
            online_scores = []
            for t in online_targets:
                tlwh = t.tlwh
                tid = t.track_id
                vertical = tlwh[2] / tlwh[3] > 1.6
                if tlwh[2] * tlwh[3] > self.opt.min_box_area and not vertical:
                    online_tlwhs.append(tlwh)
                    online_ids.append(tid)
                    online_scores.append(t.score)
            timer.toc()
            tmp_result = {
                "frame_id": frame_id + 1,
                "bounding_box": online_tlwhs,
                "ids": online_ids,
                "scores": online_scores
            }
            self.send_result(tmp_result, raw_img=img0)

            frame_id += 1
            if show_image:
                online_im = self.send_image(img0, online_tlwhs, online_ids,
                                            frame_id,
                                            1. / max(1e-5, timer.average_time))
                cv2.imshow('Result', online_im)
        if self.video_saver is not None:
            self.video_saver.release()
        return frame_id, timer.average_time, timer.calls
示例#4
0
def eval_seq(opt,
             dataloader,
             data_type,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    writer = VideoWriter(save_dir, dataloader)
    for path, img, img0 in dataloader:

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            # cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
            writer.write(online_im)
        frame_id += 1
    print("***************************** DONE *****************************")
示例#5
0
def eval_seq(opt, save_dir='', frame_rate=30):

    dataloader = LoadImages(opt.multi_test_images_dataset)
    opt = opt.update_dataset_info_and_set_heads(opt, dataloader)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    fd = os.open(save_dir + '/args.txt', os.O_RDWR)
    for i, (path, img, img0) in enumerate(dataloader):

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            online_tlwhs.append(tlwh)
            online_ids.append(tid)
            centre = tlwh[:2] + tlwh[2:] / 2
            text = str(frame_id) + ' ' + str(tid) + ' ' + str(
                centre[1]) + ' ' + str(centre[0]) + '\n'
            os.write(fd, str.encode(text))
        timer.toc()
        # save results

        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                    online_im)
        frame_id += 1
    # save results
    os.close(fd)

    return frame_id, timer.average_time, timer.calls
示例#6
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True,
             gt_path=None):

    gt_results_dict = dict()
    if gt_path:
        if os.path.isfile(gt_path):
            with open(gt_path, 'r') as f:
                for line in f.readlines():
                    linelist = line.split(',')
                    if len(linelist) < 7:
                        continue
                    fid = int(linelist[0])
                    if fid < 1:
                        continue
                    gt_results_dict.setdefault(fid, list())

                    box_size = float(linelist[4]) * float(linelist[5])

                    score = 1

                    # if box_size > 7000:
                    # if box_size <= 7000 or box_size >= 15000:
                    # if box_size < 15000:
                    # continue

                    tlwh = tuple(map(float, linelist[2:6]))
                    target_id = (linelist[1])

                    gt_results_dict[fid].append((tlwh, target_id, score))

    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:

    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))

        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time,
                                          gt_box=gt_results_dict[frame_id + 1])
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#7
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    '''
    img:  Normalized RGB image
    img0: BGR image
    '''
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        # print("\n==> blob.size", blob.size()) 1, 3, 608, 1088
        '''
        tracker update
        '''
        online_targets = tracker.update(blob, img0)

        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        ''' 
        print("==> [track.eval_seq] tracker's output-> online_targets:", online_targets)
        try:
            print("==> [track.eval_seq] len(online_tlwhs):", len(online_tlwhs))
            print("==> [track.eval_seq] online_tlwhs[0]:", online_tlwhs[0])
            print("==> [track.eval_seq] online_ids[0]:", online_ids[0])
        except:
            pass
        
        partial output:
        ==> [multi-tracker.update] len(output_stracks): 5
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-13), OT_2_(1-13), OT_3_(1-13), OT_20_(10-13), OT_7_(2-13)]
        ==> [track.eval_seq] len(online_tlwhs): 5
        ==> [track.eval_seq] online_tlwhs[0]: [     802.38      163.64      24.074      57.376]
        ==> [track.eval_seq] online_ids[0]: 1
        
        ==> [multi-tracker.update] len(output_stracks): 7
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-14), OT_2_(1-14), OT_3_(1-14), OT_20_(10-14), OT_7_(2-14), OT_23_(13-14), OT_13_(4-14)]
        ==> [track.eval_seq] len(online_tlwhs): 7
        ==> [track.eval_seq] online_tlwhs[0]: [     809.96      163.69      25.305      60.319]
        ==> [track.eval_seq] online_ids[0]: 1
        
        ==> [multi-tracker.update] len(output_stracks): 7
        ==> [track.eval_seq] tracker's output-> online_targets: [OT_1_(1-15), OT_2_(1-15), OT_3_(1-15), OT_20_(10-15), OT_7_(2-15), OT_23_(13-15), OT_19_(10-15)]
        ==> [track.eval_seq] len(online_tlwhs): 7
        ==> [track.eval_seq] online_tlwhs[0]: [     818.46       164.4      26.832      63.971]
        ==> [track.eval_seq] online_ids[0]: 1
        '''

        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#8
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    len_all = len(dataloader)
    start_frame = int(len_all / 2)
    frame_id = int(len_all / 2)
    out_queue = Deque(maxlen=5)
    for i, (path, img, img0) in enumerate(dataloader):
        if i < start_frame:
            continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        while len(out_queue) < out_queue.maxlen - 1:
            output = tracker.model.features(blob)[-1]
            out_queue.append(output)
        ref = torch.cat(list(out_queue), dim=0)
        output = tracker.model((blob, ref))[0]
        out_queue.append(output.pop('raw'))
        online_targets = tracker.update(blob, img0, output)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#9
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    tid_max, tid_temp = 1, 1
    for path, img, img0 in dataloader:
        if frame_id < 300:
            frame_id += 1
            continue
        elif frame_id > 302:
            break
        else:
            print(frame_id)
        filename = path.split('/')[-1]
        if '0000001' in path:
            tid_max = tid_temp + 1
            print(path, tid_max)
            tracker = JDETracker(opt, frame_rate=frame_rate)

        if frame_id % 100 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # if frame_id >20:
        #     break
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update_sep3(
            blob,
            img0,
            conf_thres=[0.4, 0.3, 0.4, 0.4, 0.4, 0.4, 0.4, 0.5, 0.5])
        # print(online_targets)
        online_tlwhs = []
        online_ids = []
        online_cids = []  #class id
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id + tid_max
            tcid = t.class_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area:  # and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                online_cids.append(tcid)
            tid_temp = max(tid, tid_temp)
        timer.toc()
        # save results
        results.append((filename, online_tlwhs, online_ids, online_cids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#10
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir='.',
             show_image=True,
             frame_rate=25):

    tracker = JDETracker(opt, frame_rate=frame_rate)
    p = path_root_index[5]
    if save_dir:
        save_dir = osp.join(save_dir, p)
        mkdir_if_missing(save_dir)
    image_path = getimage_path(path_root + p)
    timer = Timer()
    results = []
    frame_id = -1
    result_array_list = []
    result = []
    for path in image_path:

        # img=cv2.imread(path)

        img0 = cv2.imread(path)  # BGR
        # assert img0 is not None, 'Failed to load ' + img_path
        img_height = img0.shape[0]
        img_width = img0.shape[1]
        # print(img_height,img_width)
        # print(img0.shape)
        # Padded resize
        img, _, _, _ = letterbox(img0, height=608, width=1088)

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img /= 255.0

        frame_id += 1

        # if frame_id % 20 == 0:
        #     logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
        # if frame_id==2:
        #   break
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        online_cref = []
        # result_array_list=[]
        for t in online_targets:

            tlwh = t.tlwh

            tid = t.track_id
            confidence = t.score

            vertical = tlwh[2] / tlwh[3] > 1.6
            if confidence < 0.3:
                if tlwh[2] * tlwh[3] > 2700 and not vertical and tlwh[
                        2] * tlwh[3] < 100000:
                    res = [frame_id, tid]
                    res += list(tlwh)
                    res += [1, 0]
                    online_tlwhs.append(tlwh)
                    result_array_list.append(res)
                    online_cref.append(confidence)
                    # print(confidence)
                    # result_array_list.append(tlwh)
                    online_ids.append(tid)

            elif confidence >= 0.3:
                if tlwh[2] * tlwh[3] > 1000 and not vertical and tlwh[
                        2] * tlwh[3] < 100000:
                    # print(tlwh[2] * tlwh[3])
                    res = [frame_id, tid]
                    res += list(tlwh)
                    res += [1, 0]
                    online_tlwhs.append(tlwh)
                    result_array_list.append(res)
                    online_cref.append(confidence)
                    # print(confidence)
                    # result_array_list.append(tlwh)
                    online_ids.append(tid)
        # if frame_id==2:
        #   break
        timer.toc()
        # save results
        print(frame_id)
        # if result_array_list:

        online_tlwhs = np.array(online_tlwhs)
        online_ids = np.array(online_ids)
        online_cref = np.array(online_cref)
        # print(online_tlwhs)
        # print(online_tlwhs.shape)
        # print(online_ids.shape)
        # pick=non_max_suppression(online_tlwhs,0.7,online_cref)

        # online_tlwhsnms=online_tlwhs[pick]
        # online_idsnms=online_ids[pick]
        # online_crefnms=online_cref[pick]
        # result_array_list2=np.array(result_array_list).copy()[pick]
        # result+=list(result_array_list2)
        # print(result)

        # print(frame_id,online_idsnms)
        # result.append(online_tlwhsnms)
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=online_cref,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.waitKey(1)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
    res_array = np.array(result_array_list)
    # print(res_array)
    # print(res_array.shape)
    tmp_data1 = np.where(res_array[:, [2]] < 0,
                         res_array[:, [2]] + res_array[:, [4]], res_array[:,
                                                                          [4]])
    res_array[:, [4]] = tmp_data1
    tmp_data = np.where(res_array[:, [3]] < 0,
                        res_array[:, [3]] + res_array[:, [5]], res_array[:,
                                                                         [5]])
    res_array[:, [5]] = tmp_data
    res_array[:, [2, 3]] = np.maximum(res_array[:, [2, 3]], 0)
    # print(res_array)
    res_array = np.round(res_array, 0)
    # res_array=cutmorecord(res_array,img_width,img_height)
    # print(res_array)
    np.savetxt("{}.txt".format(p),
               res_array,
               fmt='%d,%d,%d,%d,%d,%d,%d,%d',
               delimiter=',')

    # save results
    # write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#11
0
def eval_seq(opt,
             dataloader,
             polygon,
             paths,
             data_type,
             result_filename,
             frame_dir=None,
             save_dir=None,
             bbox_dir=None,
             show_image=True,
             frame_rate=30):
    count = 0
    if save_dir:
        mkdir_if_missing(save_dir)
    if bbox_dir:
        mkdir_if_missing(bbox_dir)
    if frame_dir:
        mkdir_if_missing(frame_dir)
    tracker = JDETracker(opt, polygon, paths, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 1

    f = open(opt.input_video.split('/')[-1][:-4] + '.txt', 'w')

    for path, img, img0 in dataloader:
        img0_clone = copy.copy(img0)
        if frame_id % 1 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(
            0) if opt.gpus[0] >= 0 else torch.from_numpy(img).cpu().unsqueeze(
                0)
        online_targets, detection_boxes, out_of_polygon_tracklet = tracker.update(
            blob, img0)
        if len(out_of_polygon_tracklet) > 0:
            for track in np.asarray(out_of_polygon_tracklet)[:, 2]:
                if track in ['person', 'bicycle', 'motorcycle']:
                    count += 1
            print('count : ' + str(count))
        online_tlwhs = []
        online_ids = []

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            if tlwh[2] * tlwh[3] > opt.min_box_area:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)

        #bbox detection plot
        box_tlbrs = []
        box_scores = []
        box_classes = []
        box_occlusions = []
        img_bbox = img0.copy()
        for box in detection_boxes:
            tlbr = box.tlbr
            tlwh = box.tlwh
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area:
                box_tlbrs.append(tlbr)
                box_scores.append(box.score)
                box_classes.append(box.infer_type())
                box_occlusions.append('occ' if box.occlusion_status ==
                                      True else 'non_occ')

        timer.toc()
        # save results
        for track in out_of_polygon_tracklet:
            frame_idx, id, classes, movement = track
            results.append((opt.input_video.split('/')[-1][:-4], frame_idx,
                            classes, movement))
            f.write(','.join([
                opt.input_video.split('/')[-1][:-4],
                str(frame_idx),
                str(classes),
                str(movement)
            ]) + '\n')
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time,
                                          out_track=out_of_polygon_tracklet)
            bbox_im = vis.plot_detections(img_bbox,
                                          box_tlbrs,
                                          scores=box_scores,
                                          box_occlusion=None,
                                          btypes=box_classes)
        if show_image:
            cv2.polylines(online_im, [np.asarray(polygon)], True,
                          (0, 255, 255))
            cv2.polylines(bbox_im, [np.asarray(polygon)], True, (0, 255, 255))
            cv2.polylines(img0_clone, [np.asarray(polygon)], True,
                          (0, 255, 255))
            cv2.imshow('online_im', online_im)
            cv2.imshow('bbox_im', bbox_im)
        if save_dir is not None:
            cv2.polylines(online_im, [np.asarray(polygon)], True,
                          (0, 255, 255))
            cv2.polylines(bbox_im, [np.asarray(polygon)], True, (0, 255, 255))
            cv2.polylines(img0_clone, [np.asarray(polygon)], True,
                          (0, 255, 255))
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
            cv2.imwrite(os.path.join(bbox_dir, '{:05d}.jpg'.format(frame_id)),
                        bbox_im)
            cv2.imwrite(os.path.join(frame_dir, '{:05d}.jpg'.format(frame_id)),
                        img0_clone)

        frame_id += 1
    # save results

    return frame_id, timer.average_time, timer.calls
示例#12
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    #verifica che sia presente la cartella di salvataggio e in caso non lo sia la crea
    if save_dir:
        mkdir_if_missing(save_dir)

    #crea il JDETracker che esegue le seguenti operazioni (che vengono trovate in src/lib/tracker/multitracker.py nella funzione init)
    #1) Verifica se il programma va eseguito con CPU o GPU
    #2) Crea il modello e lo valuta
    #3) Applica un Kalman Filter: e’ uno strumento per stimare lo stato di un sistema dinamico lineare perturbato da rumore, sulla base di misure (o osservazioni) linearmente dipendenti
    tracker = JDETracker(opt, frame_rate=frame_rate)
    #viene inizializzato il timer per monitorare il tempo di elaborazione
    timer = Timer()
    #inizializzazione array dei risultati
    results = []
    #identificatore del frame
    frame_id = 0
    #scorriamo gli elementi all'interno del dataloader che erano le immagini del database caricate nel main
    for path, img, img0 in dataloader:
        #visualizza il frame rate dopo 20 frame elaborati
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()

        #questa funzione è specifica per le GPU NVIDIA, infatti usa CUDA, si può sostituire con CPU nel caso si voglia eseguire il programma con CPU
        #crea una matrice multidimensionale di almeno 3 elementi per rappresentare l'immagine img

        blob = torch.from_numpy(img).cuda().unsqueeze(0)

        #viene applicata la funzione update del JDETracker che svolge le seguenti funzioni (src/lib/tracker/multitracker.py)
        #1) Vengono passati come parametri gli elementi blob e img0
        #2) Da entrambi si estraggono altezza e larghezza e vengono memorizzate in una variabile
        #3) viene creata una variabile c che memorizza un array di float che contiene come due elementi altezza e larghezza dimezzata di img0
        #4) viene creata una variabile s che memorizza il massimo fra max(float(inp_width) (blob) / float(inp_height) (blob) * height (img0), width (img0)) * 1.0
        #5) Network forward, get detections & embeddings: rileva gli oggetti e gli aggiunge a una pila di tracklet
        #6) Prima associazione con Embedding: Da Paper, il ruolo dell'embedding è quello di distinguere diversi tipi di oggetti
        #7) Seconda associazione con IoU
        #8) Inizializza nuovi Stracks
        #9) Aggiorna lo stato
        #10) Ritorna nella variabile il valore degli stracks attivi
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #vengono iterati i vari stracks
        for t in online_targets:

            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#13
0
文件: track.py 项目: isangu/FairMOT
def eval_seq_realtime(opt, save_dir=None, show_image=True, frame_rate=30):
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    frame_id = 0

    cv2.namedWindow('online_im', cv2.WINDOW_FREERATIO)
    cv2.setWindowProperty('online_im', cv2.WND_PROP_AUTOSIZE,
                          cv2.WND_PROP_AUTOSIZE)
    cap = cv2.VideoCapture(0)
    ret, im = cap.read()

    height = im.shape[0]
    width = im.shape[1]

    while True:
        ret, img0 = cap.read()

        # Padded resize
        img, _, _, _ = letterbox(img0, height=height, width=width)

        # Normalize RGB
        img = img[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img /= 255.0

        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()
        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        text_scale = max(1, online_im.shape[1] / 1600.)
        text_thickness = 1 if text_scale > 1.1 else 1
        line_thickness = max(1, int(online_im.shape[1] / 500.))
        cv2.putText(online_im,
                    'Press ESC to STOP', (300, int(15 * text_scale)),
                    cv2.FONT_HERSHEY_PLAIN,
                    text_scale, (0, 0, 255),
                    thickness=2)
        cv2.imshow('online_im', online_im)
        key = cv2.waitKey(1)
        if key == 27:
            break
        frame_id += 1
    cv2.destroyAllWindows()
示例#14
0
def main(opt):
    acl_resource = AclResource()
    acl_resource.init()

    mot_model = Model('../model/dlav0.om')

    # Create output dir if not exist; default outputs
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    video_name = os.path.basename(opt.input_video).replace(' ',
                                                           '_').split('.')[0]

    # setup dataloader, use LoadVideo or LoadImages
    dataloader = LoadVideo(opt.input_video, (1088, 608))
    # result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    # dir for output images; default: outputs/'VideoFileName'
    save_dir = os.path.join(result_root, video_name)
    if save_dir and os.path.exists(save_dir) and opt.rm_prev:
        shutil.rmtree(save_dir)
    mkdir_if_missing(save_dir)

    # initialize tracker
    tracker = JDETracker(opt, mot_model, frame_rate=frame_rate)
    timer = Timer()
    results = []

    # img:  h w c; 608 1088 3
    # img0: c h w; 3 608 1088
    for frame_id, (path, img, img0) in enumerate(dataloader):
        if frame_id % 20 == 0:
            print('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking, start tracking timer
        timer.tic()

        # list of Tracklet; see multitracker.STrack
        online_targets = tracker.update(np.array([img]), img0)

        # prepare for drawing, get all bbox and id
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()

        # draw bbox and id
        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                    online_im)
示例#15
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    if opt.gpus[0] >= 0:
        opt.device = 'gpu'
    else:
        opt.device = 'cpu'
    paddle.set_device(opt.device)
    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    model = load_model(model, opt.load_model)
    # model = torch.nn.DataParallel(model)
    # model = model.to(opt.device)
    model.eval()
    timer = Timer()
    results = []
    frame_id = 0
    for path, img, img0 in dataloader:
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # run detecting
        timer.tic()
        # blob = torch.from_numpy(img).cuda().unsqueeze(0)
        blob = paddle.to_tensor(img).unsqueeze(0)
        width = img0.shape[1]
        height = img0.shape[0]
        inp_height = blob.shape[2]
        inp_width = blob.shape[3]
        c = np.array([width / 2., height / 2.], dtype=np.float32)
        s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
        meta = {
            'c': c,
            's': s,
            'out_height': inp_height // opt.down_ratio,
            'out_width': inp_width // opt.down_ratio
        }
        # with torch.no_grad():
        with paddle.clear_grad():
            output = model(blob)[-1]
            hm = output['hm'].sigmoid_()
            wh = output['wh']
            reg = output['reg'] if opt.reg_offset else None
            dets, inds = mot_decode(hm, wh, reg=reg, ltrb=opt.ltrb, K=opt.K)

        dets = post_process(opt, dets, meta)
        dets = merge_outputs(opt, [dets])[1]

        dets = dets[dets[:, 4] > 0.1]
        dets[:, :4] = tlbr2tlwh(dets[:, :4])

        tlwhs = []
        scores = []
        for *tlwh, conf in dets:
            tlwhs.append(tlwh)
            scores.append(conf)
        timer.toc()
        # save results
        results.append((frame_id + 1, tlwhs, scores))
        frame_id += 1
    # save results
    write_results_score(result_filename, results)
    #write_results_score_hie(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#16
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    seq = '.'.join(osp.basename(result_filename).split('.')[:-1])
    gt = TrackSet(
        '/mnt/lustre/share/fengweitao/myval/images/val/%s/gt/gt.txt' % seq)
    #for path, img, img0 in dataloader:
    out_queue = Deque(maxlen=6)
    gt_queue = Deque(maxlen=6)
    gt_raw_q = Deque(maxlen=6)
    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        time_clean()
        time_sync('tracker all')
        gt_row = gt[frame_id + 1]
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        with torch.no_grad():
            time_sync('forward_half and queue')
            out = tracker.model.backend.forward_half(
                blob, roi_num=tracker.model.backend.roi_top_k)
            out_ref = {}
            out_ref.update(out)
            out_ref['rois'], out_ref[
                'roi_scores'] = tracker.model.backend.get_rois(
                    out['rpn_map'], 300)
            gt_rois = get_gt_rois(out['rois'], gt_row,
                                  out['rpn_map'].shape[-2:], img0.shape[:2])
            gt_rois_ref = get_gt_rois(out_ref['rois'], gt_row,
                                      out['rpn_map'].shape[-2:],
                                      img0.shape[:2])
            refs = tracker.model.backend.forward_rois(out_ref)
            out_queue.append(refs)
            gt_queue.append(gt_rois_ref)
            gt_raw_q.append(gt_row)
            while len(out_queue) < out_queue.maxlen:
                out_queue.append(refs)
                gt_queue.append(gt_rois_ref)
                gt_raw_q.append(gt_row)
            time_sync('forward_half and queue')
            time_sync('forward_all')

            def wrap_gt(drow, img0):
                labels = [[
                    0,
                    int(d.uid), d.cx / img0.shape[1], d.cy / img0.shape[0],
                    d.w / img0.shape[1], d.h / img0.shape[0]
                ] for d in drow]
                labels = np.array(labels)
                # print(labels)
                input_gt = reldata.parse_input_data(blob, opt, labels)
                input_gt = to_device(input_gt, device=blob.device)
                return input_gt

            labels = wrap_gt(gt_row, img0)
            # print(labels)
            labels_ref = []
            for one in gt_raw_q:
                labels_ref_one = wrap_gt(one, img0)
                labels_ref.append(labels_ref_one)
            input_gt = {}
            for k in labels:
                if k in ['hm', 'reg_mask', 'ind', 'ids']:
                    input_gt[k] = labels[k], [one[k] for one in labels_ref]
                else:
                    input_gt[k] = labels[k]
            output, stuffs, _ = tracker.model.forward_half(out,
                                                           out_queue,
                                                           inputs=input_gt)
            gt_ref_rois = []
            for gt_key_i in range(len(gt_queue[0])):
                tmp_gt_rois = np.concatenate(
                    [one[gt_key_i] for one in gt_queue])
                gt_ref_rois.append(tmp_gt_rois)
            time_sync('forward_all')
        # info_debug(output)
        # jj = output['rois'][0] % out['rpn_map'].shape[-1]
        # ii = output['rois'][0] // out['rpn_map'].shape[-1]
        # u = output['rpn_map'][:, :, ii, jj]
        # print(output['hm'][:20])
        # input()

        # import pickle
        # print(u.shape)
        # print(u.flatten().sigmoid())
        # print((u.flatten().sigmoid() - output['hm'].flatten().sigmoid()).abs().max())
        # with open('two_none.pkll', 'wb') as fd:
        #     pickle.dump(output, fd)
        # input()
        time_sync('tracker update')
        online_targets = tracker.update(blob, img0, output)
        time_sync('tracker update')
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        time_sync('tracker all')
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        # if show_image or save_dir is not None:
        #     online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
        #                                   fps=1. / timer.average_time)
        # if show_image:
        #     cv2.imshow('online_im', online_im)
        # if save_dir is not None:
        #     cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
        frame_id += 1
        if VIS_FLAG:
            import matplotlib.pyplot as plt
            fig = plt.figure(figsize=(16, 9))
            ax = fig.add_subplot(221)
            im1 = img0.copy()
            rpn_map = torch.sigmoid(
                out['rpn_map']).squeeze(0).squeeze(0).cpu().numpy()
            rpn_map = cv2.resize(rpn_map, im1.shape[:2][::-1], cv2.INTER_CUBIC)
            rpn_map = (rpn_map * 255).astype(np.uint8)
            print(rpn_map.max(), rpn_map.min(), rpn_map.shape)
            heat_map = cv2.applyColorMap(rpn_map, cv2.COLORMAP_JET)
            im1 = (im1.astype(np.int16) + heat_map.astype(np.int16)) // 2
            im1 = im1.astype(np.uint8)
            im2 = cv2.cvtColor(im1, cv2.COLOR_BGR2RGB)
            ax.imshow(im2)
            ind = 0
            ax = fig.add_subplot(223)
            affs = stuffs['hm'][0][ind][0]
            if affs.dim() == 3:
                affs = affs.squeeze(1)
            affs = affs.cpu().numpy()
            # sim_map = cv2.resize(affs, ())
            pos_inds = gt_rois[0] > 0
            pos_ref_inds = gt_ref_rois[0] > 0
            print(pos_inds.sum(), pos_ref_inds.sum())
            sim_map = affs[pos_inds][:, pos_ref_inds]
            import csv
            fscon = lambda x: str(float(x))
            fakemap = out['rpn_map'].flatten()[out['rois']
                                               [0].flatten()].sigmoid()
            with open(
                    '/home/toka/code/FairMOT/csv_files/%s_%06d.csv' %
                (seq, frame_id), 'w') as fd:
                rpmap = gt_ref_rois[1][pos_ref_inds]
                pmap = gt_rois[1]
                dw = csv.DictWriter(fd, ['id'] + list(map(str, rpmap)) +
                                    ['pos_sum', 'neg_sum', 'pred', 'ori'])
                dw.writeheader()
                dw.writerows([
                    dict([('id', str(iii))] +
                         [(str(k), fscon(v))
                          for k, v in zip(rpmap, affs[iin][pos_ref_inds])] +
                         [('pos_sum', fscon(affs[iin][pos_ref_inds].sum())),
                          ('neg_sum',
                           fscon(affs[iin].sum() -
                                 affs[iin][pos_ref_inds].sum())),
                          ('pred', fscon(out['hm'][iin])),
                          ('ori', fscon(fakemap[iin]))])
                    for iin, iii in enumerate(pmap)
                ])
            sim_map = cv2.resize(sim_map, (1800, 600))
            print(sim_map.shape, sim_map.max(axis=1))
            ax.imshow(sim_map)
            ax = fig.add_subplot(224)
            affs = stuffs['hm'][0][ind][1]
            if affs.dim() == 3:
                affs = affs.squeeze(1)
            affs = affs.cpu().numpy()
            # sim_map = cv2.resize(affs, ())
            sim_map = affs[pos_inds][:, pos_ref_inds]
            sim_map = cv2.resize(sim_map, (1800, 600))
            # print(sim_map.shape, sim_map.max(axis=1))
            ax.imshow(sim_map)
            fig.show()
            plt.show()
            if frame_id > 10:
                break
    # save results
    import pickle
    with open(result_filename + '.dets.pkl', 'wb') as fd:
        pickle.dump(tracker.raw_dets, fd)
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#17
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             bbox_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    if bbox_dir:
        mkdir_if_missing(bbox_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0

    for path, img0, detection in dataloader:
        if frame_id % 1 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        online_targets, detection_boxes = tracker.update(detection, img0)
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        #bbox detection plot
        box_tlbrs = []
        box_scores = []
        img_bbox = img0.copy()
        for box in detection_boxes:
            tlbr = box.tlbr
            tlwh = box.tlwh
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area:
                box_tlbrs.append(tlbr)
                box_scores.append(box.score)

        timer.toc()
        # save results
        results.append(
            ([frame_id + 1] * len(online_tlwhs), online_tlwhs, online_ids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
            bbox_im = vis.plot_detections(img_bbox,
                                          box_tlbrs,
                                          scores=box_scores)
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.imshow('bbox_im', bbox_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
            cv2.imwrite(os.path.join(bbox_dir, '{:05d}.jpg'.format(frame_id)),
                        bbox_im)

        frame_id += 1
    # save results
    track_pools = []
    id_pools = []
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#18
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             conf_thres=0.3):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = GNNTracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    for i, (path, img, img0, p_img_path, p_img) in enumerate(dataloader):
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        if i == 0:
            p_boxes, init_img_path, p_img = dataloader.initialize(
                use_letter_box=opt.use_letter_box)
        else:
            p_boxes, p_img = prepare_prev_img(dataloader, online_targets, opt,
                                              p_img)

        if opt.use_roi_align:
            p_crops = p_boxes.clone()
            _, h, w = p_img.shape
            p_crops = p_crops.cuda()
            p_crops_lengths = [len(p_crops)]
            edge_index = create_inference_time_graph(opt, p_boxes, p_crops,
                                                     p_img)

            # convert boxes from xyxy to normalized according to p_img dimensions
            p_crops[:, 0] = p_crops[:, 0] / w
            p_crops[:, 1] = p_crops[:, 1] / h
            p_crops[:, 2] = p_crops[:, 2] / w
            p_crops[:, 3] = p_crops[:, 3] / h
            online_targets = tracker.update(
                blob,
                img0,
                p_crops,
                p_crops_lengths,
                edge_index,
                gnn_output_layer=opt.inference_gnn_output_layer,
                p_imgs=p_img.unsqueeze(0).cuda(),
                conf_thres=conf_thres)
        else:
            p_crops = torchvision.ops.roi_align(input=p_img.unsqueeze(0),
                                                boxes=[p_boxes],
                                                output_size=opt.crop_size)
            p_crops = p_crops.cuda()
            p_crops_lengths = [len(p_crops)]

            edge_index = create_inference_time_graph(opt, p_boxes, p_crops,
                                                     p_img)

            online_targets = tracker.update(
                blob,
                img0,
                p_crops,
                p_crops_lengths,
                edge_index,
                gnn_output_layer=opt.inference_gnn_output_layer,
                p_imgs=None,
                conf_thres=conf_thres)
        online_tlwhs = []
        online_ids = []
        online_confs = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            t_conf = t.score
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                online_confs.append(t_conf)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids, online_confs))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          scores=online_confs,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#19
0
文件: track.py 项目: fzf404/AtcMOT
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename=None,
             save_dir=None,
             show_image=False,
             frame_rate=10,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    person_mot = PersonMOT()

    results = []
    frame_id = 0
    rflag = False

    # for path, img, img0 in dataloader:
    for i, (path, img, img0) in enumerate(dataloader):
        if i % 12 != 0:
            continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        online_targets = tracker.update(blob, img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                # online_scores.append(t.score)
        timer.toc()

        # 发送atc帧信息
        rflag = atcs.send_info(opt.device_id, opt.url, online_ids,
                               online_tlwhs, opt.input_stream)

        if rflag and i % opt.check_setp == 0:
            person_mot.handle_crime(opt.device_id, opt.url, opt.trigger,
                                    online_ids, online_tlwhs, img0)

        # 是否渲染图像
        if show_image or save_dir:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
            results.append((frame_id + 1, online_tlwhs, online_ids))
            # 得分
            # results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))

        # 展示图像
        if show_image:
            cv2.imshow('online_im', online_im)
            cv2.waitKey(1)

        # 存储图像
        if save_dir:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1

    # save results
    if opt.save:
        write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#20
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30,
             use_cuda=True):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    frame_id = 0
    #for path, img, img0 in dataloader:
    out_queue = Deque(maxlen=6)
    for i, (path, img, img0) in enumerate(dataloader):
        #if i % 8 != 0:
        #continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        time_clean()
        time_sync('tracker all')
        if use_cuda:
            blob = torch.from_numpy(img).cuda().unsqueeze(0)
        else:
            blob = torch.from_numpy(img).unsqueeze(0)
        with torch.no_grad():
            time_sync('forward_half and queue')
            out = tracker.model.backend.forward_half(
                blob, roi_num=tracker.model.backend.roi_top_k)
            out_ref = {}
            out_ref.update(out)
            out_ref['rois'], _ = tracker.model.backend.get_rois(
                out['rpn_map'], 300)
            refs = tracker.model.backend.forward_rois(out_ref)
            out_queue.append(refs)
            while len(out_queue) < out_queue.maxlen:
                out_queue.append(refs)
            time_sync('forward_half and queue')
            time_sync('forward_all')
            output, stuffs, _ = tracker.model.forward_half(out, out_queue)
            time_sync('forward_all')
        # info_debug(output)
        # jj = output['rois'][0] % out['rpn_map'].shape[-1]
        # ii = output['rois'][0] // out['rpn_map'].shape[-1]
        # u = output['rpn_map'][:, :, ii, jj]
        # print(output['hm'][:20])
        # input()

        # import pickle
        # print(u.shape)
        # print(u.flatten().sigmoid())
        # print((u.flatten().sigmoid() - output['hm'].flatten().sigmoid()).abs().max())
        # with open('two_none.pkll', 'wb') as fd:
        #     pickle.dump(output, fd)
        # input()
        time_sync('tracker update')
        online_targets = tracker.update(blob, img0, output)
        time_sync('tracker update')
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        time_sync('tracker all')
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    import pickle
    with open(result_filename + '.dets.pkl', 'wb') as fd:
        pickle.dump(tracker.raw_dets, fd)
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#21
0
def eval_seq(opt,
             dataloader,
             polygon,
             paths,
             data_type,
             result_filename,
             frame_dir=None,
             save_dir=None,
             bbox_dir=None,
             show_image=True,
             frame_rate=30,
             polygon2=None,
             line1=None,
             line2=None,
             cam_id=None):
    count = 0
    if save_dir:
        mkdir_if_missing(save_dir)
    if bbox_dir:
        mkdir_if_missing(bbox_dir)
    if frame_dir:
        mkdir_if_missing(frame_dir)
    if cam_id is not None:
        if cam_id is not None:
            f = open('/data/submission_output/cam_' + str(cam_id) + ".txt",
                     "w")
        else:
            f = None
    tracker = JDETracker(opt,
                         polygon,
                         paths,
                         frame_rate=frame_rate,
                         polygon2=polygon2)
    timer = Timer()
    results = []
    frame_id = 1

    for path, img, img0 in dataloader:
        img0_clone = copy.copy(img0)
        # if frame_id % 1 == 0:
        #     logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(
            0) if opt.gpus[0] >= 0 else torch.from_numpy(img).cpu().unsqueeze(
                0)
        online_targets, detection_boxes, out_of_polygon_tracklet = tracker.update(
            blob, img0)
        if f is not None:
            for frame_ind, _, track_type, mov_id in out_of_polygon_tracklet:
                if mov_id != 'undetermine' and track_type != 'undetermine':
                    if track_type in [
                            'person', 'motor', 'motorcycle', 'bicycle',
                            "tricycle"
                    ]:
                        track_type = 1
                        f.write('cam_' + str(cam_id) + ',' + str(frame_ind) +
                                ',' + str(mov_id) + ',' + str(track_type) +
                                '\n')
                    elif track_type in ['car', 'van']:
                        track_type = 2
                        f.write('cam_' + str(cam_id) + ',' + str(frame_ind) +
                                ',' + str(mov_id) + ',' + str(track_type) +
                                '\n')
                    elif track_type in ['bus']:
                        track_type = 3
                        f.write('cam_' + str(cam_id) + ',' + str(frame_ind) +
                                ',' + str(mov_id) + ',' + str(track_type) +
                                '\n')
                    elif track_type in ['truck']:
                        track_type = 4
                        f.write('cam_' + str(cam_id) + ',' + str(frame_ind) +
                                ',' + str(mov_id) + ',' + str(track_type) +
                                '\n')

        online_tlwhs = []
        online_ids = []

        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            online_tlwhs.append(tlwh)
            online_ids.append(tid)

        #bbox detection plot
        box_tlbrs = []
        box_scores = []
        box_occlusions = []
        types = []
        img_bbox = img0.copy()
        for box in detection_boxes:
            tlbr = box.tlbr
            tlwh = box.tlwh

            box_tlbrs.append(tlbr)
            box_scores.append(box.score)
            box_occlusions.append('occ' if box.occlusion_status ==
                                  True else 'non_occ')
            types.append(box.infer_type())

        timer.toc()
        # save results
        for track in out_of_polygon_tracklet:
            frame_idx, id, classes, movement = track
            results.append((opt.input_video.split('/')[-1][:-4], frame_idx,
                            classes, movement))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time,
                                          out_track=out_of_polygon_tracklet)
            bbox_im = vis.plot_detections(img_bbox,
                                          box_tlbrs,
                                          scores=box_scores,
                                          box_occlusion=None,
                                          types=types)
        if show_image:
            cv2.polylines(online_im, [np.asarray(polygon)], True,
                          (0, 255, 255))
            cv2.polylines(bbox_im, [np.asarray(polygon)], True, (0, 255, 255))
            cv2.polylines(img0_clone, [np.asarray(polygon)], True,
                          (0, 255, 255))
            cv2.imshow('online_im', online_im)
            cv2.imshow('bbox_im', bbox_im)
        # if save_dir is not None:
        #     cv2.polylines(online_im,[np.asarray(polygon)],True,(0,255,255))
        #     cv2.polylines(bbox_im,[np.asarray(polygon)],True,(0,255,255))
        #     # cv2.polylines(bbox_im,[np.asarray(paths['3'])],True,(0,255,255))
        #     # cv2.polylines(bbox_im,[np.asarray(paths['4'])],True,(0,255,255))

        #     if polygon2 is not None:
        #         cv2.polylines(online_im,[np.asarray(polygon2)],True,(0,0,255))
        #         cv2.polylines(bbox_im,[np.asarray(polygon2)],True,(0,0,255))
        #     if line1 is not None and line2 is not None:
        #         cv2.polylines(online_im,[np.asarray(line1)],True,(134,128,255))
        #         cv2.polylines(online_im,[np.asarray(line2)],True,(134,128,255))

        #     #cv2.polylines(img0_clone,[np.asarray(polygon)],True,(0,255,255))
        #     cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
        #     cv2.imwrite(os.path.join(bbox_dir, '{:05d}.jpg'.format(frame_id)), bbox_im)
        #     cv2.imwrite(os.path.join(frame_dir, '{:05d}.jpg'.format(frame_id)),img0_clone)

        frame_id += 1
        # if frame_id==150:
        #     BaseTrack._count=0
        #     return frame_id, timer.average_time, timer.calls

    # save results
    return frame_id, timer.average_time, timer.calls
示例#22
0
def eval_seq(opt,
             dataloader,
             data_type,
             result_filename,
             save_dir=None,
             show_image=True,
             frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    len_all = len(dataloader)
    start_frame = int(len_all / 2)
    frame_id = int(len_all / 2)
    for i, (path, img, img0) in enumerate(dataloader):
        if i < start_frame:
            continue
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        img_pil = Image.open(path).convert('RGB')
        normalize = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
        img_norm = transforms.Compose(
            [T.RandomResize([800], max_size=1333), normalize])
        img_norm = img_norm(img_pil)
        if frame_id % 20 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        online_targets = tracker.update(img_norm.cuda().unsqueeze(0), img0)
        online_tlwhs = []
        online_ids = []
        #online_scores = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                #online_scores.append(t.score)
        timer.toc()
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids))
        #results.append((frame_id + 1, online_tlwhs, online_ids, online_scores))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0,
                                          online_tlwhs,
                                          online_ids,
                                          frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                        online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    #write_results_score(result_filename, results, data_type)
    return frame_id, timer.average_time, timer.calls
示例#23
0
def eval_seq(opt, dataloader, data_type, result_filename, gt_filename, save_dir=None, show_image=True, frame_rate=30):
    if save_dir:
        mkdir_if_missing(save_dir)
    tracker = JDETracker(opt, frame_rate=frame_rate)
    timer = Timer()
    results = []
    gts = []
    frame_id = 0
    tid_max, tid_temp = 1, 1
    for path, img, img0 in dataloader:
        if frame_id % 100 == 0:
            logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
            # if frame_id>400:
            #     break
        if '0000001' in path:
            tid_max = tid_temp
            tracker = JDETracker(opt, frame_rate=frame_rate)
        # run tracking
        timer.tic()
        blob = torch.from_numpy(img).cuda().unsqueeze(0)
        # online_targets = tracker.update(blob, img0)
        online_targets = tracker.update_sep3(blob, img0, conf_thres=[0.4, 0.3, 0.4, 0.4, 0.4, 0.4, 0.4, 0.5, 0.5])  ## use class-separated tracker
        # print(online_targets)
        online_tlwhs = []
        online_ids = []
        online_cids = [] #class id
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id + tid_max
            tcid = t.class_id
            # vertical = tlwh[2] / tlwh[3] > 1.6
            # if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
            if tlwh[2] * tlwh[3] > opt.min_box_area:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
                online_cids.append(tcid)
            tid_temp = max(tid, tid_temp)
        timer.toc()
        gt_tlwhs = []
        gt_ids = []
        gt_cids = []
        gt_path = path.replace('images', 'labels_with_ids').replace('jpg', 'txt')
        gt_targets = read_gt_txts(gt_path)
        for (tlwh, tid, tcid) in gt_targets:
            gt_tlwhs.append(tlwh)
            gt_ids.append(tid)
            gt_cids.append(tcid)
        # save results
        results.append((frame_id + 1, online_tlwhs, online_ids, online_cids))
        # save gts
        gts.append((frame_id + 1, gt_tlwhs, gt_ids, gt_cids))
        if show_image or save_dir is not None:
            online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
                                          fps=1. / timer.average_time)
        if show_image:
            cv2.imshow('online_im', online_im)
        if save_dir is not None:
            cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
        frame_id += 1
    # save results
    write_results(result_filename, results, data_type)
    # save gts
    write_results(gt_filename, gts, data_type)
    return frame_id, timer.average_time, timer.calls