Exemplo n.º 1
0
    def __next__(self):
        # if self.count < 475:
        #     self.count += 1
        #     return self.count, None, None
        # print('self.count',self.count)

        # Read image
        '''read each item from  subdata of action datas according to the index '''
        channel, action_time, img_point, video_parameter = read_subdata(
            self.action_datas[self.count], self.Videoparameters)
        '''According to the img_points ,calculate the target region and reference point of this region ! '''
        Message = ScreenSHot(img_point,
                             action_time=action_time,
                             video_parameter=video_parameter,
                             setting_parameter=self.setting_parameter)
        if Message[0] == False:
            assert Message[
                0] is not False, 'Failed to load action {:d}'.format(
                    self.count)
        else:
            img0 = Message[1]

        # # Padded resize
        # img, _, _, _ = letterbox(img0, height=self.height, width=self.width)
        # Normalize RGB
        img = img0[:, :, ::-1].transpose(2, 0, 1)
        img = np.ascontiguousarray(img, dtype=np.float32)
        img /= 255.0
        # cv2.imwrite(img_path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1])  # save letterbox image
        self.count += 1
        if self.count == len(self):
            raise StopIteration

        return self.count, img, img0
Exemplo n.º 2
0
    def update(self):
        '''
        将一个视角下的所有图片转换到其他视角下。
        '''
        self.logger.debug('The pid of Calibrate_transfer.update() : {}'.format(
            os.getpid()))
        self.logger.debug(
            'The thread of Calibrate_transfer.update() : {}'.format(
                currentThread()))
        update_timer = Timer()
        sub_img_generate_timer = Timer()
        for action_index in range(self.S_Coordinate_transfer, self.datalen):

            update_timer.tic()  # 开始计时
            self.logger.debug(
                'update() ======================================== action {}'.
                format(action_index))
            Flag, input_index, tracking_results = self.input_Q.get()
            if input_index != action_index:
                self.logger.log(
                    31,
                    '---——————————————————————————————————index does match')
                raise Exception(
                    'Calibrate_transfer.update action_index_update {} != input_index {} '
                    .format(action_index, input_index))

            if Flag == False:
                # Flag == False 的话,直接就不要了
                self.tracking_Q.put((False, (action_index, [], [])))
                self.PreProcess_Q.put((False, (action_index, [])))
                continue

            frames_time, sub_imgs, ReID_feature_list, img_points = tracking_results
            # 分为 追踪结果和 对 每一帧追踪进行坐标转换后得到的检测结果
            # 这里先将追踪结果存入队列中。
            self.tracking_Q.put(
                (True, (action_index, sub_imgs, ReID_feature_list)))

            channel, action_time, img_point, video_parameter = read_subdata(
                self.action_datas[action_index], self.Videoparameters)
            calibrateParameter = video_parameter['CalibrateParameter']

            # 将追踪结果对应的像素坐标转换成世界坐标
            '''队列的首项是终极目标,用于校准,不用于后续的坐标转换计算'''
            '''因此,直接从第二项开始'''
            world_points = []
            start_time = frames_time[1]  # 追踪序列开始的时间,这里的时间是相对于开球时间
            for p_index in range(1, len(img_points)):
                img_point = img_points[p_index]
                # 输入的是连续的轨迹,因为检测原因,可能有诺干帧是没有img_points,长度因此为0
                if len(img_point) == 0:
                    world_points.append(None)
                else:
                    world_point = transform_2d_to_3d(
                        img_point,
                        calibrateParameter.cameraMatrix,
                        calibrateParameter.distCoeffs,
                        calibrateParameter.rotation_vector,
                        calibrateParameter.translation_vector,
                        world_z=0)

                    world_point = np.reshape(world_point, [1, 3])
                    world_points.append(world_point)

            # 将世界坐标转换到其他的视角下,并且 截图+detection\
            # print('len(world_points) : ', len(world_points))
            sub_img_generate_timer.tic()
            self.sub_img_generate_multi_thread(channel, action_index,
                                               world_points, start_time)
            # self.logger.info('Calibrate_transfer.sub_img_generate() action {} consums {}s'.format(action_index,sub_img_generate_timer.toc()))
            self.logger.log(
                22, 'Calibrate_transfer.update() action {} consums {}s'.format(
                    action_index, update_timer.toc()))
Exemplo n.º 3
0
    def update(self):
        '''
        '''
        self.logger.debug('The pid of FMLoader.update_() : {}'.format(
            os.getpid()))
        self.logger.debug('The thread of FMLoader.update() : {}'.format(
            currentThread()))
        self.logger.log(21, 'self.datalen  : {}'.format(self.datalen))
        self.update_timer = Timer()

        # keep looping the whole dataset
        for index in range(self.S_Short_track, self.datalen):

            self.update_timer.tic()
            self.logger.debug('update  <===========> action {} '.format(index))
            # show_memory_info('action _ {}, {}'.format(index, ' S_Short_track begin'))

            # result_root = make_dir(self.root_path, index, Secondary_directory='{}_short_tracking'.format(self.dir_name))
            '''read each item from  subdata of action datas according to the index '''
            channel, action_time, img_point, video_parameter = read_subdata(
                self.action_datas[index], self.Videoparameters)

            video = video_parameter['video']
            # action time need to add the delta time to calibrate the time between channels .
            video_time = action_time + video_parameter['delta_t']
            width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
            height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
            Message = GenerateRect(img_point,
                                   self.setting_parameter['Output_size'],
                                   self.setting_parameter['bias'], width,
                                   height)

            if Message[0] == True:
                # 获取目标区域
                rect = Message[1]
                x_l = int(rect[0])
                y_l = int(rect[1])
                x_r = int(rect[2] + rect[0])
                y_r = int(rect[3] + rect[1])
                rect = [x_l, y_l, x_r, y_r]
                # 目标点坐标相对于从原图中的位置,更新到相对于截图中的位置
                reference_point = (int(img_point[0] - x_l),
                                   int(img_point[1] - y_l))
                Left_Up_points = (rect[0], rect[1])  # 截图的右上角相对于原图的位置
                # sub_img = img[y_l:y_r, x_l:x_r]
            else:
                # 如果没有截图则,则无需放入Queue中。
                self.PostProcess_Q.put(
                    (False, None, None, None, None, None, None))
                continue

            # 没有必要逐帧检测。
            # print('self.setting_parameter[\'Output_size\'], multiple=self.track_len', self.setting_parameter['Output_size'], self.track_len)
            self.logger.debug('Starting Building LoadShortCutVideo...')

            dataloader = LoadShortCutVideo(
                video,
                video_time,
                rect,
                self.setting_parameter['Output_size'],
                multiple=self.track_len)
            # show_memory_info('action _ {}, {}'.format(index, 'LoadShortCutVideo release ==========='))

            target_frame = dataloader.multiple * dataloader.frame_rate
            frame_rate = dataloader.frame_rate
            start_time = action_time - self.track_len  # 调整到需要追踪的小视频,相对于开球的时间。

            # 进行短时徐追踪
            self.logger.debug('Starting tracking...')
            tracker = JDETracker(self.tracker_opt,
                                 self.tracker_model)  # 创建一个追踪器
            results = Short_track(tracker, dataloader, self.tracker_opt)

            # 删除tracker并立即会手内存
            # del tracker
            # gc.collect()

            # 传入Queue中。
            self.PostProcess_Q.put(
                (True, results, target_frame, start_time, frame_rate,
                 Left_Up_points, reference_point))
            # del results
            # show_memory_info('action _ {}, {}'.format(index, 'self.PostProcess_Q.put'))

            self.logger.log(
                21, 'FMLoader.update() action {} consums {}s'.format(
                    index, self.update_timer.toc()))
Exemplo n.º 4
0
    os.makedirs(vis_path, exist_ok=True)

    multi = 10
    C_T_output_queue = Queue(queueSize)
    transfer = Calibrate_transfer(opt,
                                  detector_opt,
                                  Tracker_output_queue,
                                  C_T_output_queue,
                                  vis=True,
                                  queueSize=1024)
    transfer.update_()
    transfer.detect_()
    transfer.postProcess_()

    for index in range(len(action_datas)):
        channel, action_time, img_point, video_parameter = read_subdata(
            action_datas[index], Videoparameters)

        Message = ScreenSHot(img_point,
                             action_time=action_time,
                             video_parameter=video_parameter,
                             setting_parameter=setting_parameter)

        if Message[0] == True:
            count = 1
            # 根据操作员点击的点,进行区域截图。
            img0, reference_point, sub_img_bias = Message[1], Message[
                2], Message[3]
            cv2.circle(img0,
                       (int(reference_point[0]), int(reference_point[1])),
                       radius=5,
                       color=(0, 255, 0),
Exemplo n.º 5
0
def Short_Tracking(opt, game_ID, tracker):
    # define log file
    # 运行程序的日期和时间

    # 比赛场次对应的数据的目录
    root_path = os.path.join(args.data_root, '{}'.format(game_ID))
    logger.info('目标文件夹是{}'.format(root_path))

    file_name = args.file_name

    # read data from json file that software operator made.
    Videoparameters, \
    setting_parameter, \
    action_datas, \
    channel_list, \
    parameter = read_data_from_json_file(root_path, file_name, args)

    for index in range(0, len(action_datas)):
        # if index < 82 :
        # 	continue
        preNum = -1  # 首先假设识别出来的号码为 -1
        print(
            '<===============================================================> action {}'
            .format(index))
        loop_start = time.time()  # calculate the time .

        # 重置追踪器
        tracker.tracked_stracks = []  # type: list[STrack]
        tracker.lost_stracks = []  # type: list[STrack]
        tracker.removed_stracks = []  # type: list[STrack]

        result_root = make_dir(
            root_path,
            index,
            Secondary_directory='{}_short_tracking'.format(game_ID))
        '''read each item from  subdata of action datas according to the index '''
        channel, action_time, img_point, video_parameter = read_subdata(
            action_datas[index], Videoparameters)

        video = video_parameter['video']
        video_time = action_time + video_parameter[
            'delta_t']  # action time need to add the delta time to calibrate the time between channels .
        width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
        height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
        Message = GenerateRect(img_point, setting_parameter['Output_size'],
                               setting_parameter['bias'], width, height)

        if Message[0] == True:
            # 获取目标区域
            rect = Message[1]
            x_l = int(rect[0])
            y_l = int(rect[1])
            x_r = int(rect[2] + rect[0])
            y_r = int(rect[3] + rect[1])
            rect = [x_l, y_l, x_r, y_r]
            new_point = (int(img_point[0] - x_l), int(img_point[1] - y_l))
            # sub_img = img[y_l:y_r, x_l:x_r]
        else:
            continue

        logger.info('Starting tracking...')
        dataloader = datasets.LoadShortCutVideo(
            video, video_time, rect, setting_parameter['Output_size'])
        target_frame = dataloader.multiple * dataloader.frame_rate

        result_filename = os.path.join(result_root, '..',
                                       '{}.txt'.format(index))
        frame_rate = dataloader.frame_rate
        reference_point = (544, 408)
        Short_track_eval(opt,
                         dataloader,
                         'mot',
                         result_filename,
                         target_frame,
                         reference_point,
                         save_dir=result_root,
                         show_image=False,
                         Input_tracker=tracker,
                         frame_rate=frame_rate)