Example #1
0
    def __init__(self,
                 opt,
                 detector_opt,
                 Tracker_output_queue,
                 C_T_output_queue,
                 S_Coordinate_transfer,
                 S_Pose_Estimate,
                 vis=False,
                 save_results=False,
                 queueSize=1024):

        self.logger = Log(__name__, 'Calibrate_transfer').getlog()

        self.opt = opt
        self.dir_name = opt.dir_name
        self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))
        # logger.info('目标文件夹是{}'.format(self.root_path))
        self.file_name = opt.file_name
        # 本来就是要载入两次视频,分开读亦可以
        self.Videoparameters, \
        self.setting_parameter, \
        self.action_datas, \
        self.channel_list, \
        self.parameter = read_data_from_json_file(self.root_path, self.file_name, self.opt)

        self.datalen = len(self.action_datas)

        self.detector_opt = detector_opt  # 用来设置追踪器参数的。
        self.logger.info('Creating model...')
        self.detector_model = create_JDETracker_model(self.detector_opt)
        self.detector = JDETracker(
            self.detector_opt,
            self.detector_model)  # What is JDE Tracker? 把这个tracker 当detector用

        self.input_Q = Tracker_output_queue  # 追踪数据的整体输入
        self.PreProcess_Q = Queue(maxsize=queueSize)  # 在目标检测前,对左边转换后的截图进行预处理
        self.tracking_Q = Queue(maxsize=queueSize)
        self.detecions_Q = Queue(maxsize=queueSize)
        self.output_Q = C_T_output_queue

        self.vis = vis
        if self.vis == True:
            self.vis_path = os.path.join(self.root_path, 'vis')
            os.makedirs(self.vis_path, exist_ok=True)

        self.S_Coordinate_transfer = S_Coordinate_transfer
        self.S_Pose_Estimate = S_Pose_Estimate
        self.save_results = save_results
        if self.save_results == True:
            self.intermediate_results_dir = os.path.join(
                self.root_path, 'intermediate_results', 'Calibrate_transfer')
            os.makedirs(self.intermediate_results_dir, exist_ok=True)
Example #2
0
    def __init__(self,opt, C_T_output_queue,queueSize=1024):

        self.opt = opt
        self.dir_name = opt.dir_name
        self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))
        # logger.info('目标文件夹是{}'.format(self.root_path))
        self.file_name = opt.file_name
        # 本来就是要载入两次视频,分开读亦可以
        self.Videoparameters, \
        self.setting_parameter, \
        self.action_datas, \
        self.channel_list, \
        self.parameter = read_data_from_json_file(self.root_path, self.file_name, self.opt)

        self.datalen = len(self.action_datas)

        self.output_Q = C_T_output_queue
Example #3
0
    def __init__(self, path, file_name, opt, frame_rate=25):

        # read data from json file that software operator made.
        self.Videoparameters, \
        self.setting_parameter, \
        self.action_datas, \
        self.channel_list, \
        self.parameter = read_data_from_json_file(path, file_name, opt)

        # self.cap = cv2.VideoCapture(path)
        self.frame_rate = frame_rate
        self.vw = opt.img_size[0]
        self.vh = opt.img_size[1]
        self.width = opt.img_size[0]
        self.height = opt.img_size[1]
        self.count = 0

        self.w, self.h = opt.img_size[0], opt.img_size[1]

        self.vn = len(self.action_datas)
        print('Lenth of the video: {:d} frames'.format(self.vn))
Example #4
0
    from opt import opt
    from FairMot.lib.opts import opts
    from CalibrateTransfer.img_operation import ScreenSHot

    detector_opt = opts().init()

    queueSize = 1000
    Tracker_output_queue = Queue(1000)
    dir_name = opt.dir_name
    root_path = os.path.join(opt.data_root, '{}'.format(dir_name))
    file_name = opt.file_name
    Videoparameters, \
    setting_parameter, \
    action_datas, \
    channel_list, \
    parameter = read_data_from_json_file(root_path, file_name, opt)
    vis_path = os.path.join(root_path, 'vis')
    os.makedirs(vis_path, exist_ok=True)

    multi = 10
    C_T_output_queue = Queue(queueSize)
    transfer = Calibrate_transfer(opt,
                                  detector_opt,
                                  Tracker_output_queue,
                                  C_T_output_queue,
                                  vis=True,
                                  queueSize=1024)
    transfer.update_()
    transfer.detect_()
    transfer.postProcess_()
Example #5
0
    def __init__(self,
                 opt,
                 tracker_opt,
                 Tracker_output_queue,
                 S_Short_track,
                 S_Coordinate_transfer,
                 track_len=2,
                 vis=False,
                 save_results=False,
                 queueSize=1000,
                 sp=False):

        self.logger = Log(__name__, 'FMLoader').getlog()

        self.opt = opt
        self.track_len = track_len  # 相对于动作点,前后追踪 track_len 秒

        self.dir_name = opt.dir_name
        self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))

        # 从缓存中读取已经计算出来的结果。
        self.S_Short_track = S_Short_track  #代表的是一个index, 在这个数之前追踪结果的都已经计算并保存了。
        self.S_Coordinate_transfer = S_Coordinate_transfer  #代表的是一个index, 在这个数之前转换结果的都已经计算并保存了。

        # logger.info('目标文件夹是{}'.format(self.root_path))
        self.file_name = opt.file_name
        # 本来就是要载入两次视频,分开读亦可以
        self.Videoparameters, \
        self.setting_parameter, \
        self.action_datas, \
        self.channel_list, \
        self.parameter = read_data_from_json_file(self.root_path, self.file_name, self.opt)

        self.datalen = len(self.action_datas)
        self.logger.log(
            21, '___________________一共有 {} 条数据_______________'.format(
                self.datalen))

        # 是否要将图画出来,可视化给别人看
        self.vis = vis
        if self.vis == True:
            self.vis_path = os.path.join(self.root_path, 'vis')
            os.makedirs(self.vis_path, exist_ok=True)

        self.save_results = save_results
        if self.save_results == True:
            self.intermediate_results_dir = os.path.join(
                self.root_path, 'intermediate_results', 'FMLoader')
            os.makedirs(self.intermediate_results_dir, exist_ok=True)

        self.tracker_opt = tracker_opt  # 用来设置追踪器参数的。
        self.IoUthreshold = 0.5  #

        self.logger.info('Creating model...')
        self.tracker_model = create_JDETracker_model(self.tracker_opt)
        # self.tracker = JDETracker(self.tracker_opt ) # What is JDE Tracker?

        # initialize the queue used to store frames read from
        # the video file

        self.PostProcess_Q = Queue(maxsize=queueSize)
        self.Output_Q = Tracker_output_queue
Example #6
0
def Short_Tracking(opt, game_ID, tracker):
    # define log file
    # 运行程序的日期和时间

    # 比赛场次对应的数据的目录
    root_path = os.path.join(args.data_root, '{}'.format(game_ID))
    logger.info('目标文件夹是{}'.format(root_path))

    file_name = args.file_name

    # read data from json file that software operator made.
    Videoparameters, \
    setting_parameter, \
    action_datas, \
    channel_list, \
    parameter = read_data_from_json_file(root_path, file_name, args)

    for index in range(0, len(action_datas)):
        # if index < 82 :
        # 	continue
        preNum = -1  # 首先假设识别出来的号码为 -1
        print(
            '<===============================================================> action {}'
            .format(index))
        loop_start = time.time()  # calculate the time .

        # 重置追踪器
        tracker.tracked_stracks = []  # type: list[STrack]
        tracker.lost_stracks = []  # type: list[STrack]
        tracker.removed_stracks = []  # type: list[STrack]

        result_root = make_dir(
            root_path,
            index,
            Secondary_directory='{}_short_tracking'.format(game_ID))
        '''read each item from  subdata of action datas according to the index '''
        channel, action_time, img_point, video_parameter = read_subdata(
            action_datas[index], Videoparameters)

        video = video_parameter['video']
        video_time = action_time + video_parameter[
            'delta_t']  # action time need to add the delta time to calibrate the time between channels .
        width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
        height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
        Message = GenerateRect(img_point, setting_parameter['Output_size'],
                               setting_parameter['bias'], width, height)

        if Message[0] == True:
            # 获取目标区域
            rect = Message[1]
            x_l = int(rect[0])
            y_l = int(rect[1])
            x_r = int(rect[2] + rect[0])
            y_r = int(rect[3] + rect[1])
            rect = [x_l, y_l, x_r, y_r]
            new_point = (int(img_point[0] - x_l), int(img_point[1] - y_l))
            # sub_img = img[y_l:y_r, x_l:x_r]
        else:
            continue

        logger.info('Starting tracking...')
        dataloader = datasets.LoadShortCutVideo(
            video, video_time, rect, setting_parameter['Output_size'])
        target_frame = dataloader.multiple * dataloader.frame_rate

        result_filename = os.path.join(result_root, '..',
                                       '{}.txt'.format(index))
        frame_rate = dataloader.frame_rate
        reference_point = (544, 408)
        Short_track_eval(opt,
                         dataloader,
                         'mot',
                         result_filename,
                         target_frame,
                         reference_point,
                         save_dir=result_root,
                         show_image=False,
                         Input_tracker=tracker,
                         frame_rate=frame_rate)