Ejemplo n.º 1
0
 def _send_ack(self, msg: Message):
     """ Sends the ack message """
     Log.log(MsgType.INFO, "Adapter - Sending ACK", msg)
     self._adaptee.send(
         Message(msg_type=Message.MsgType.ack,
                 msg_id=msg.msg_id,
                 destination=msg.source,
                 source=msg.destination))
Ejemplo n.º 2
0
 def send(self, msg: Message):
     """ Sends a message """
     if msg.destination == self._destination:
         self._validation[str(msg.msg_id)] = {
             "sent_at": time.time(),
             "source": msg.source
         }
         Log.log(MsgType.INFO, "Adapter - Send", msg)
         self._adaptee.send(msg)
     else:
         raise ValueError("Wrong destination")
class TorchReader():
    """
    Utility for reading in pre-processed OpenFoam tensor files.
    """
    def __init__(self):
        self.lg = Log()

    def loadTensor(self, fileName):
        """
        Read in tensor
        """
        try:
            self.lg.log('Attempting to read file: '+str(fileName))
            self.lg.log('Parsing file...')
            t0 = th.load(fileName)
            self.lg.success('Data field file successfully read.')

        except OSError as err:
            print("OS error: {0}".format(err))
            return
        except IOError as err:
            print("File read error: {0}".format(err))
            return
        except:
            print("Unexpected error:{0}".format(sys.exc_info()[0]))
            return

        return t0

    def readScalarTh(self, timeStep, fieldName, dirPath = '.'):
        data0 = self.loadTensor('{}/{}/{}-torch.th'.format(str(dirPath),str(timeStep),fieldName))
        try:
            data = data0.squeeze(1)
        except:
            data = data0
        return data

    def readVectorTh(self, timeStep, fieldName, dirPath = '.'):
        return self.loadTensor('{}/{}/{}-torch.th'.format(str(dirPath),str(timeStep),fieldName)).type(dtype)

    def readTensorTh(self, timeStep, fieldName, dirPath = '.'):
        data0 = self.loadTensor('{}/{}/{}-torch.th'.format(str(dirPath),str(timeStep),fieldName)).type(dtype)
        #Reshape into [nCells,3,3] Tensor
        return data0.view(data0.size()[0],3,-1)

    def readSymTensorTh(self, timeStep, fieldName, dirPath = '.'):
        data0 = self.loadTensor('{}/{}/{}-torch.th'.format(str(dirPath),str(timeStep),fieldName)).type(dtype)
        #Reshape into [nCells,3,3] Tensor
        return data0.view(data0.size()[0],3,-1)

    def readCellCenters(self, timeStep, dirPath='.'):
        return self.loadTensor('{}/{}/cellCenters-torch.th'.format(str(dirPath),str(timeStep))).type(dtype)
Ejemplo n.º 4
0
    def _cleanup_unresponded_msgs(self):
        """ Cleans all messages that were not responded on time """
        msg_to_delete = []
        current_time = time.time()

        for msg_id in self._validation:
            elapsed_time = current_time - self._validation[msg_id]["sent_at"]

            if elapsed_time > self._timeout:
                msg = Message(msg_type=Message.MsgType.no_ack,
                              msg_id=msg_id,
                              destination=self._validation[msg_id]["source"],
                              source=self._destination)

                Log.log(MsgType.FAIL, "Adapter - NO-ACK", msg)
                msg_to_delete.append(msg_id)

        for msg_id in msg_to_delete:
            m = self._validation.pop(str(msg_id), None)
            del m
Ejemplo n.º 5
0
    def _recv(self):
        """ Listen for messages and executes the callback function """
        while True:

            if self._adaptee.has_data():
                msg = self._adaptee.recv()

                if msg is None:
                    continue

                Log.log(MsgType.INFO, "Adapter - Reciving", msg)
                is_ack = msg.msg_type is Message.MsgType.ack

                if is_ack:
                    self._remove_ack_msg(msg)
                else:
                    # Send ACK
                    self._send_ack(msg)

                    # Execute command TIENE QUE SER ASYNC LO DE ABAJO
                    self._callback_func(msg)

            self._cleanup_unresponded_msgs()
Ejemplo n.º 6
0
    def _execute_commands(self):
        """ If the command destination matches with the unit, it is executed locally.
            Otherwise it is sent to another module using the adapter (sender)"""
        remove_cmd = []
        for cmd in self._command_queue:

            # Handle Command: this command must be processed by this unit
            if cmd.dest == self._unit_name:

                Log.log(MsgType.INFO, "CMD Executor - Executing CMD",
                        str(cmd.cmd_id))

                cmd.execute_command()
                cmd.status = Command.Status.executed

                status = Message.MsgType.executed if cmd.status is not Command.Status.failed else Command.Status.failed
                data = CommandEncoding.command_encoder(cmd)
                msg = Message(msg_type=status,
                              msg_id=cmd.cmd_id,
                              destination=cmd.source,
                              source=cmd.dest,
                              data=data)

                self._sender(msg)

            # This command must be sent to other unit
            else:
                Log.log(MsgType.INFO,
                        "CMD Executor - Sending command to %s" % cmd.dest,
                        str(cmd.cmd_id))
                # Command encode
                cmd.status = Command.Status.in_progress
                data = CommandEncoding.command_encoder(cmd)
                msg = Message(msg_type=Message.MsgType.command,
                              msg_id=cmd.cmd_id,
                              destination=cmd.dest,
                              source=cmd.source,
                              data=data)
                Log.log(MsgType.INFO, "CMD Executor - Sending",
                        str(cmd.cmd_id))
                self._sender(msg)

                if self._validator is not None:
                    self._validator.add_command(cmd)

            # Command cleanup
            remove_cmd.append(cmd)

        for cmd in remove_cmd:
            #self._command_queue.remove(cmd)
            del cmd
Ejemplo n.º 7
0
 def _remove_ack_msg(self, msg: Message):
     """ Removes the ack message """
     Log.log(MsgType.SUCCESS, "Adapter - ACK", msg)
     m = self._validation.pop(str(msg.msg_id), None)
     del m
Ejemplo n.º 8
0
class FMLoader:
    '''
    载入FairMot的短时序追踪结果
    '''
    def __init__(self,
                 opt,
                 tracker_opt,
                 Tracker_output_queue,
                 S_Short_track,
                 S_Coordinate_transfer,
                 track_len=2,
                 vis=False,
                 save_results=False,
                 queueSize=1000,
                 sp=False):

        self.logger = Log(__name__, 'FMLoader').getlog()

        self.opt = opt
        self.track_len = track_len  # 相对于动作点,前后追踪 track_len 秒

        self.dir_name = opt.dir_name
        self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))

        # 从缓存中读取已经计算出来的结果。
        self.S_Short_track = S_Short_track  #代表的是一个index, 在这个数之前追踪结果的都已经计算并保存了。
        self.S_Coordinate_transfer = S_Coordinate_transfer  #代表的是一个index, 在这个数之前转换结果的都已经计算并保存了。

        # logger.info('目标文件夹是{}'.format(self.root_path))
        self.file_name = opt.file_name
        # 本来就是要载入两次视频,分开读亦可以
        self.Videoparameters, \
        self.setting_parameter, \
        self.action_datas, \
        self.channel_list, \
        self.parameter = read_data_from_json_file(self.root_path, self.file_name, self.opt)

        self.datalen = len(self.action_datas)
        self.logger.log(
            21, '___________________一共有 {} 条数据_______________'.format(
                self.datalen))

        # 是否要将图画出来,可视化给别人看
        self.vis = vis
        if self.vis == True:
            self.vis_path = os.path.join(self.root_path, 'vis')
            os.makedirs(self.vis_path, exist_ok=True)

        self.save_results = save_results
        if self.save_results == True:
            self.intermediate_results_dir = os.path.join(
                self.root_path, 'intermediate_results', 'FMLoader')
            os.makedirs(self.intermediate_results_dir, exist_ok=True)

        self.tracker_opt = tracker_opt  # 用来设置追踪器参数的。
        self.IoUthreshold = 0.5  #

        self.logger.info('Creating model...')
        self.tracker_model = create_JDETracker_model(self.tracker_opt)
        # self.tracker = JDETracker(self.tracker_opt ) # What is JDE Tracker?

        # initialize the queue used to store frames read from
        # the video file

        self.PostProcess_Q = Queue(maxsize=queueSize)
        self.Output_Q = Tracker_output_queue

    def Read_From_Cache(self):
        '''
        从文件把之前计算过的结果提取出来
        '''
        from utils.index_operation import get_index

        self.logger.debug('The pid of FMLoader.Read_From_Cache() : {}'.format(
            os.getpid()))
        self.logger.debug(
            'The thread of FMLoader.Read_From_Cache() : {}'.format(
                currentThread()))

        cache_index = get_index(self.intermediate_results_dir)
        # 只需读取有用的部分即可。
        action_index = self.S_Coordinate_transfer
        for action_index in range(self.S_Coordinate_transfer,
                                  self.S_Short_track):

            if action_index not in cache_index:
                # cache 中没有保存说明 此动作本身是False
                self.Output_Q.put((False, action_index, []))
            else:
                # 从文件夹中读取出该动作对应的计算结果。
                _, [
                    frames_time, sub_imgs, ReID_feature_list,
                    bottom_center_point_list
                ] = self.load_intermediate_resutls(action_index)
                self.Output_Q.put((True, action_index, [
                    frames_time, sub_imgs, ReID_feature_list,
                    bottom_center_point_list
                ]))

        self.logger.log(
            21, 'length of self.Output_Q = {}'.format(self.Output_Q.qsize()))
        self.logger.log(
            21,
            ' FMLoader loads action {} from Cache file '.format(action_index))
        # show_memory_info('===========Read_From_Cache==============')

    def update_(self):

        self.t_update = Thread(target=self.update, args=())
        self.t_update.daemon = True
        self.t_update.start()

        return self

    def update(self):
        '''
        '''
        self.logger.debug('The pid of FMLoader.update_() : {}'.format(
            os.getpid()))
        self.logger.debug('The thread of FMLoader.update() : {}'.format(
            currentThread()))
        self.logger.log(21, 'self.datalen  : {}'.format(self.datalen))
        self.update_timer = Timer()

        # keep looping the whole dataset
        for index in range(self.S_Short_track, self.datalen):

            self.update_timer.tic()
            self.logger.debug('update  <===========> action {} '.format(index))
            # show_memory_info('action _ {}, {}'.format(index, ' S_Short_track begin'))

            # result_root = make_dir(self.root_path, index, Secondary_directory='{}_short_tracking'.format(self.dir_name))
            '''read each item from  subdata of action datas according to the index '''
            channel, action_time, img_point, video_parameter = read_subdata(
                self.action_datas[index], self.Videoparameters)

            video = video_parameter['video']
            # action time need to add the delta time to calibrate the time between channels .
            video_time = action_time + video_parameter['delta_t']
            width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
            height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
            Message = GenerateRect(img_point,
                                   self.setting_parameter['Output_size'],
                                   self.setting_parameter['bias'], width,
                                   height)

            if Message[0] == True:
                # 获取目标区域
                rect = Message[1]
                x_l = int(rect[0])
                y_l = int(rect[1])
                x_r = int(rect[2] + rect[0])
                y_r = int(rect[3] + rect[1])
                rect = [x_l, y_l, x_r, y_r]
                # 目标点坐标相对于从原图中的位置,更新到相对于截图中的位置
                reference_point = (int(img_point[0] - x_l),
                                   int(img_point[1] - y_l))
                Left_Up_points = (rect[0], rect[1])  # 截图的右上角相对于原图的位置
                # sub_img = img[y_l:y_r, x_l:x_r]
            else:
                # 如果没有截图则,则无需放入Queue中。
                self.PostProcess_Q.put(
                    (False, None, None, None, None, None, None))
                continue

            # 没有必要逐帧检测。
            # print('self.setting_parameter[\'Output_size\'], multiple=self.track_len', self.setting_parameter['Output_size'], self.track_len)
            self.logger.debug('Starting Building LoadShortCutVideo...')

            dataloader = LoadShortCutVideo(
                video,
                video_time,
                rect,
                self.setting_parameter['Output_size'],
                multiple=self.track_len)
            # show_memory_info('action _ {}, {}'.format(index, 'LoadShortCutVideo release ==========='))

            target_frame = dataloader.multiple * dataloader.frame_rate
            frame_rate = dataloader.frame_rate
            start_time = action_time - self.track_len  # 调整到需要追踪的小视频,相对于开球的时间。

            # 进行短时徐追踪
            self.logger.debug('Starting tracking...')
            tracker = JDETracker(self.tracker_opt,
                                 self.tracker_model)  # 创建一个追踪器
            results = Short_track(tracker, dataloader, self.tracker_opt)

            # 删除tracker并立即会手内存
            # del tracker
            # gc.collect()

            # 传入Queue中。
            self.PostProcess_Q.put(
                (True, results, target_frame, start_time, frame_rate,
                 Left_Up_points, reference_point))
            # del results
            # show_memory_info('action _ {}, {}'.format(index, 'self.PostProcess_Q.put'))

            self.logger.log(
                21, 'FMLoader.update() action {} consums {}s'.format(
                    index, self.update_timer.toc()))

    def PostProcess_(self):
        self.t_PostProcess = Thread(target=self.PostProcess, args=())
        self.t_PostProcess.daemon = True
        self.t_PostProcess.start()

    def PostProcess(self):
        '''
        数据结果后处理
        '''
        self.PostProcess_timer = Timer()
        self.logger.debug('The pid of FMLoader.PostProcess : {}'.format(
            os.getpid()))
        self.logger.debug('The thread of FMLoader.PostProcess : {}'.format(
            currentThread()))

        for action_index in range(self.S_Short_track, self.datalen):
            self.PostProcess_timer.tic()

            # show_memory_info('action _ {}, {}'.format(action_index, 'Before get '))
            Flag, results, target_frame, start_time, frame_rate, Left_Up_points, reference_point = self.PostProcess_Q.get(
            )
            self.logger.debug(
                'PostProcess <===========> action {} '.format(action_index))

            # 截图都没有。
            if Flag == False:
                self.Output_Q.put((False, action_index, []))
                continue

            # 把每个sub_box提取出来。
            sub_imgs = []
            ReID_feature_list = []
            frames_time = []
            bottom_center_point_list = []

            for bias in [0, -1, 1, -2, 2]:  # 总能检测到的?
                input_result = results[target_frame + bias]
                if len(input_result[1]) == 0:  # 有可能目标帧没有检测到,一个目标都没有。
                    target_id = None
                    continue
                new_reference_point, target_id = sort_by_point(
                    results[target_frame + bias],
                    reference_point,
                    IoUthreshold=self.IoUthreshold)

                if target_id != None:
                    # 检测到了的话,就跳出循环
                    # 将目标帧的图片放在了sub_imgs 和 ReID_feature_list 队列的首个
                    bboxes = input_result[1]
                    ids = input_result[2]
                    target_id_index = ids.index(target_id)
                    box = bboxes[target_id_index]
                    ReID_features = input_result[3]
                    ReID_feature = ReID_features[target_id_index]
                    img0 = input_result[4]
                    I_h, I_w, _ = img0.shape
                    x1, y1, w, h = box
                    intbox = tuple(
                        map(int, (max(0, x1), max(0, y1), min(
                            x1 + w, I_w), min(y1 + h, I_h))))
                    sub_img = img0[intbox[1]:intbox[3], intbox[0]:intbox[2]]
                    '''队列的首项是终极目标,用于校准,不用于后续的坐标转换计算'''
                    frames_time.append(None)
                    bottom_center_point_list.append(None)
                    # sub_imgs 和 ReID_feature_list 在后续直接用于计算,因此不需要与 frames_time 保持长度上的一致
                    sub_imgs.append(sub_img)
                    ReID_feature_list.append(ReID_feature)

                    if self.vis == True:
                        vis_dir_ = os.path.join(self.vis_path,
                                                '{}'.format(action_index),
                                                'tracking')
                        makedir_v1(vis_dir_)

                        self.vis_target_frame(input_result, target_id,
                                              reference_point,
                                              new_reference_point, vis_dir_)

                    break
            # 如果前中后三帧都没有检测到,那就说明这个动作区分不开了。 放弃了。
            if target_id == None:
                # 目标不存在
                self.Output_Q.put((False, action_index, []))
                continue

            # 对所有结果进行筛选,选出和目标人物相同ID的。
            for r_index, result in enumerate(results):

                frame_id = result[0]
                time = start_time + frame_id / frame_rate  # 这帧画面对应的时间(相对于开球时间)
                bboxes = result[1]
                # ids = np.arryy(result[2])
                ids = result[2]
                ReID_features = result[3]
                img0 = result[4]
                I_h, I_w, _ = img0.shape

                # 找出复合target id的那一个bbox。 每一帧最多存在一个复合要求的box
                # 有可能有几帧是不存在的。因此需要标注时间,或者相对帧数
                if target_id not in ids:
                    # 需要记录每个sub_imgs所对应的时间。
                    # 要保证时间连续,就算没有信号,也需要添加在其中,
                    # 各个参数之间的长度需要保持一致
                    frames_time.append(time)
                    bottom_center_point_list.append([])
                    continue
                else:
                    id_index = ids.index(target_id)
                    box = bboxes[id_index]
                    ReID_feature = ReID_features[id_index]

                    x1, y1, w, h = box
                    intbox = tuple(
                        map(int, (max(0, x1), max(0, y1), min(
                            x1 + w, I_w), min(y1 + h, I_h))))
                    # print(intbox)
                    sub_img = img0[intbox[1]:intbox[3], intbox[0]:intbox[2]]

                    # 底部中心的坐标从相对于截图的位置,还原到相对于原图的位置。
                    bottom_center_point = (x1 + 0.5 * w + Left_Up_points[0],
                                           y1 + h + Left_Up_points[1])

                    # 需要记录每个bottom_center_point所对应的时间。
                    # frames_time 和 bottom_center_point 需要在长度上保持一致
                    frames_time.append(time)
                    bottom_center_point_list.append(bottom_center_point)

                    # sub_imgs 和 ReID_feature_list 在后续直接用于计算,因此不需要与 frames_time 保持长度上的一致
                    sub_imgs.append(sub_img)
                    ReID_feature_list.append(ReID_feature)

                    if self.vis == True:
                        img_vis = np.copy(img0)
                        cv2.rectangle(img_vis, (intbox[0], intbox[1]),
                                      (intbox[2], intbox[3]), (255, 255, 0),
                                      thickness=2)
                        cv2.imwrite(
                            os.path.join(vis_dir_, '{}.jpg'.format(r_index)),
                            img_vis)
                        # cv2.imwrite(os.path.join(vis_dir_,'{}.jpg'.format(r_index)),img_vis)

            self.Output_Q.put((True, action_index, [
                frames_time, sub_imgs, ReID_feature_list,
                bottom_center_point_list
            ]))
            if self.save_results == True:
                self.save_intermediate_resutls(action_index, frames_time,
                                               sub_imgs, ReID_feature_list,
                                               bottom_center_point_list)

            self.logger.log(
                21, 'FMLoader.PostProcess() action {} consums {}s'.format(
                    action_index, self.PostProcess_timer.toc()))
            # show_memory_info('action _ {}, {}'.format(action_index, 'Before del results '))
            # print('action index {} sys.getrefcount(results)'.format(action_index),sys.getrefcount(results))
            # del results
            # show_memory_info('action _ {}, {}'.format(action_index, 'After del results '))

        # self.logger.log(21, '-----------------------------Finished FMLoader.PostProcess() datalen = {}-----------------------------'.format(self.datalen))

    def get__(self):
        for action_index in range(self.S_Short_track, self.datalen):
            results = self.Output_Q.get()
            self.logger.log(21,
                            'FMLoader.get__() action {} '.format(action_index))

    def save_intermediate_resutls(self, action_index, frames_time, sub_imgs,
                                  ReID_feature_list, bottom_center_point_list):
        '''将每一次计算的结果保存下来。'''
        intermediate_resutls_path = os.path.join(self.intermediate_results_dir,
                                                 '{}'.format(action_index))
        os.makedirs(intermediate_resutls_path, exist_ok=True)
        # 保存 ReID
        ReID_feature_list = np.array(ReID_feature_list)
        np.save(
            os.path.join(intermediate_resutls_path,
                         '{}_ReID_feature_list.npy'.format(action_index)),
            ReID_feature_list)
        # 保存图片
        for img_index in range(len(sub_imgs)):
            cv2.imwrite(
                os.path.join(intermediate_resutls_path,
                             '{}.jpg'.format(img_index)), sub_imgs[img_index])
        # 保存 frames_time 和 bottom_center_point_list
        with open(
                os.path.join(
                    intermediate_resutls_path,
                    '{}_frames_time_and_bottom_center_point_list.json'.format(
                        action_index)), 'w') as f:
            results = {
                'frames_time': frames_time,
                'bottom_center_point_list': bottom_center_point_list
            }
            json.dump(results, f)

    def load_intermediate_resutls(self, action_index):
        '''将中间结果读取出来。'''
        intermediate_resutls_path = os.path.join(self.intermediate_results_dir,
                                                 '{}'.format(action_index))

        ReID_feature_list = np.load(
            os.path.join(intermediate_resutls_path,
                         '{}_ReID_feature_list.npy'.format(action_index)))
        ReID_feature_list = [_ for _ in ReID_feature_list]  # 转换为我们需要的格式

        # 把这个文件夹下的图片名称读出来。
        sub_imgs_names = [
            img_name for img_name in os.listdir(intermediate_resutls_path)
            if img_name.split('.')[-1] == 'jpg'
        ]
        # 把图片名字按升序排列
        sub_imgs_names = sorted(
            sub_imgs_names, key=lambda img_index: int(img_index.split('.')[0]))
        sub_imgs = []
        for img_name in sub_imgs_names:
            sub_img = cv2.imread(
                os.path.join(intermediate_resutls_path, img_name))
            sub_imgs.append(sub_img)
        with open(
                os.path.join(
                    intermediate_resutls_path,
                    '{}_frames_time_and_bottom_center_point_list.json'.format(
                        action_index)), 'r') as f:
            results = json.load(f)
            frames_time = results['frames_time']
            bottom_center_point_list = results['bottom_center_point_list']

        return action_index, [
            frames_time, sub_imgs, ReID_feature_list, bottom_center_point_list
        ]

    def read(self):
        # return next frame in the queue
        return self.Output_Q.get()

    def len(self):
        # return queue len
        return self.Output_Q.qsize()

    def vis_target_frame(self, input_result, target_id, reference_point,
                         new_reference_point, vis_dir_):
        '''
        将目标帧的效果画出来。
        '''
        bboxes = input_result[1]
        ids = input_result[2]
        img0 = input_result[4]
        img_vis = np.copy(img0)
        id_index = ids.index(target_id)
        box = bboxes[id_index]
        x1, y1, w, h = box
        I_h, I_w, _ = img0.shape
        intbox = tuple(
            map(int,
                (max(0, x1), max(0, y1), min(x1 + w, I_w), min(y1 + h, I_h))))
        cv2.rectangle(img_vis, (intbox[0], intbox[1]), (intbox[2], intbox[3]),
                      (255, 255, 0),
                      thickness=2)  # 追踪效果和转换后的点为黄色
        cv2.circle(img_vis,
                   (int(new_reference_point[0]), int(new_reference_point[1])),
                   radius=5,
                   color=(25, 255, 0),
                   thickness=-1)
        cv2.circle(img_vis, (int(reference_point[0]), int(reference_point[1])),
                   radius=5,
                   color=(0, 255, 0),
                   thickness=-1)  # 原始点为红色

        cv2.imwrite(os.path.join(vis_dir_, 'target_id.jpg'), img0)
Ejemplo n.º 9
0
class FoamSVGD():
    """
    Additional Useful References:
        - Zhu, Yinhao, and Nicholas Zabaras. "Bayesian deep convolutional 
        encoder–decoder networks for surrogate modeling and uncertainty 
        quantification." Journal of Computational Physics 366 (2018): 415-447.

        - Liu, Qiang, and Dilin Wang. "Stein variational gradient descent:
        A general purpose bayesian inference algorithm."
        Advances In Neural Information Processing Systems. 2016.
    
    """
    def __init__(self, n_samples, H):
        self.lg = Log()
        self.lg.info('Constructing neural network...')

        self.n_samples = n_samples  # Number of SVGD particles
        self.turb_nn = TurbNN(D_in=9, H=H,
                              D_out=9).double()  #Construct neural network

        # Student's t-distribution: w ~ St(w | mu=0, lambda=shape/rate, nu=2*shape)
        # See PRML by Bishop Page 103
        self.prior_w_shape = 1.0
        self.prior_w_rate = 0.02
        #self.prior_w_rate = 0.5

        # noise variance: beta ~ Gamma(beta | shape, rate)
        self.prior_beta_shape = 100
        #self.prior_beta_rate = 2e-4
        #self.prior_beta_shape = 2.0
        self.prior_beta_rate = 4.0

        # Create n_samples SVGD particles
        # This is done by deep copying the invariant nn
        instances = []
        for i in range(n_samples):
            new_instance = copy.deepcopy(self.turb_nn)
            new_instance.reset_parameters(
                self.prior_w_shape,
                self.prior_w_rate)  # Reset parameters to spread particles out
            instances.append(new_instance)

        self.models = th.nn.ModuleList(instances)
        del instances

        # Now set up parameters for the noise hyper-prior
        beta_size = (self.n_samples, 1)
        log_beta = np.log(
            np.random.gamma(self.prior_beta_shape,
                            1. / self.prior_beta_rate,
                            size=beta_size))
        # Log of the additive output-wise noise (beta)
        for i in range(n_samples):
            self.models[i].log_beta = Parameter(
                th.Tensor(log_beta[i]).type(dtype))

        # Network weights learning weight
        self.lr = 1e-2
        # Output-wise noise learning weight
        self.lr_noise = 0.01

        # Construct individual optimizers and learning rate schedulers
        self.schedulers = []
        self.optimizers = []
        for i in range(n_samples):
            # Pre-pend output-wise noise to model parameter list
            parameters = [{
                'params': [self.models[i].log_beta],
                'lr': self.lr_noise
            }, {
                'params': [
                    p for n, p in self.models[i].named_parameters()
                    if n != 'log_beta'
                ]
            }]

            #parameters = [{'params': [p for n, p in self.models[i].named_parameters() if n!='log_beta']}]

            # ADAM optimizer (minor weight decay)
            optim = th.optim.Adam(parameters,
                                  lr=self.lr,
                                  betas=(0.9, 0.999),
                                  eps=1e-08,
                                  weight_decay=0.0)
            #optim = th.optim.Adam(parameters, lr=lr)

            # Decay learning weight on plateau, can adjust these parameters depending on data
            scheduler = ReduceLROnPlateau(optim,
                                          mode='min',
                                          factor=0.75,
                                          patience=5,
                                          verbose=True,
                                          threshold=0.01,
                                          threshold_mode='abs',
                                          cooldown=0,
                                          min_lr=0,
                                          eps=1e-07)

            self.optimizers.append(optim)
            self.schedulers.append(scheduler)

    def forward(self, input, t_data):
        """
        Computes all the `n_samples` NN output
        Args: 
            input (Tensor): [nx5] tensor of input invariants
            t_data (Tensor): [nx10x3x3] tensor of linear independent tensore basis functions
        Return: out (Tensor): [nx3x3] tensor of predicted scaled anisotropic terms
        """
        #out_size = (3, 3)
        out_size = (9)
        output = Variable(
            th.Tensor(self.n_samples, input.size(0), *out_size).type(dtype))
        for i in range(self.n_samples):
            output[i] = self.models[i].forward(input)
            #g_pred = self.models[i].forward(input)
            #g_pred0 = th.unsqueeze(th.unsqueeze(g_pred, 2), 3)
            #output[i] = th.sum(g_pred0*t_data,1)
        return output

    def compute_loss(self, output, target, index=None):
        """
        Computes the joint log probability ignoring constant terms
        See Eq. 23 in paper
        Args:
            output: B x oC x oH x oW
            target: B x oC x oH x oW
            index (None or int): model index, 0, 1, ..., n_samples.
        Returns:
            If index = None, return a list of joint probabilities, i.e. log
            unnormalized posterior. If index is assigned an int, return the
            joint probability for this model instance.
        """
        if index not in range(self.n_samples):
            ValueError(
                "model index should be in [0, ..., {}], but got {}".format(
                    self.n_samples, index))
        else:
            # Log Gaussian likelihood
            # See Eq. 18-19 in paper
            log_likelihood = len(self.trainingLoader.dataset) / output.size(0) \
                                * (-0.5 * self.models[index].log_beta.exp()
                                * (target - output).pow(2).sum()
                                + 0.5 * target.numel()
                                * self.models[index].log_beta)

            #log_likelihood =    (-0.5 * self.models[index].log_beta.exp()
            #                    * (target - output).pow(2).sum()
            #                    + 0.5 * target.numel()
            #                    * self.models[index].log_beta)

            # Log Gaussian weight prior
            # See Eq. 17 in paper
            prior_ws = Variable(th.Tensor([0]).type(dtype))
            for param in self.models[index].parameters():
                prior_ws += th.log1p(0.5 / self.prior_w_rate *
                                     param.pow(2)).sum()
            prior_ws *= -(self.prior_w_shape + 0.5)

            # Log Gamma Output-wise noise prior
            # See Eq. 20 in paper
            prior_log_beta = ((self.prior_beta_shape-1.0) * self.models[index].log_beta \
                        - self.models[index].log_beta.exp() * self.prior_beta_rate).sum()

            return log_likelihood + prior_ws + prior_log_beta, \
                   log_likelihood.data.item()

    def compute_loss2(self, output, target, index=None):

        log_likelihood = (target - output).pow(2).sum()
        return log_likelihood, log_likelihood.data.item()

    def _squared_dist(self, X):
        """
        Computes the square distance for a set of vectors ||x1 - x2||.
        For two vectors ||q - p||^2 = ||p||^2 + ||q||^2 - 2p.q
        Args:
            X (Tensor): [sxp] Tensor we wish to compute the squared distance.
                              In SVGD, s is the number of particle (invar neural networks)
                              and p is the number of parameters (weights) assigned to that network
        Returns:
            (Tensor): [pxp] metrics of computed distances between each vector
        """
        # Compute dot product of each vector componation [PxP]
        XXT = th.mm(X, X.t())
        # Get ||p||^2 and ||q||^2 terms on diagonal [P vector]
        XTX = XXT.diag()
        # Return squared distance, note that PyTorch will broadcast the vectors
        return -2.0 * XXT + XTX + XTX.unsqueeze(1)

    def _Kxx_dxKxx(self, X):
        """
        Computes covariance matrix K(X,X) and its gradient w.r.t. X
        for RBF kernel with design matrix X, as in the second term in Eq. (8)
        of SVGD paper
        Args:
            X (Tensor): [sxp] Tensor we wish to compute the covariance matrix for.
                              In SVGD, s is the number of particle (invar neural networks)
                              and p is the number of parameters (weights) assigned to that network
        """
        squared_dist = self._squared_dist(X)

        triu_indices = squared_dist.triu(1).nonzero().transpose(0, 1)
        off_diag = squared_dist[triu_indices[0], triu_indices[1]]
        l_square = 0.5 * off_diag.median() / self.n_samples
        Kxx = th.exp(-0.5 / l_square * squared_dist)
        # Matrix form for the second term of optimal functional gradient in eqn (8) of SVGD paper
        # This line needs S x P memory
        dxKxx = (Kxx.sum(1).diag() - Kxx).matmul(X) / l_square

        return Kxx, dxKxx

    def train(self, n_epoch=250, gpu=True):
        """
        Training the neural network(s) using SVGD
        Args:
            n_epoch (int): Number of epochs to train for
            gpu (boolean): Whether or not to use a GPU (default is true)
        """
        if (not self.trainingLoader):
            self.lg.error(
                'Training data loader not created! Stopping training')
            return

        if (gpu):
            self.lg.info('GPU network training enabled')
            #Transfer network and training data onto GPU
            for i in range(self.n_samples):
                self.models[i].cuda()
        else:
            self.lg.info('CPU network training enabled')

        # Unique indexs of the symmetric deviatoric tensor
        #indx = Variable(th.LongTensor([0,1,2,4,5,8]), requires_grad=False)
        #if (gpu):
        #    indx = indx.cuda()

        #self.lg.warning('Starting NN training with a experiment size of '+str(self.n_data))
        # store the joint probabilities
        for epoch in range(n_epoch):

            if (epoch + 1) % 20 == 0:
                self.lg.info('Running test samples...')
                self.test(epoch, gpu=gpu)

            training_loss = 0.
            training_MNLL = 0.
            training_MSE = 0.
            # Mini-batch the training set
            for batch_idx, (x_data, y_data) in enumerate(self.trainingLoader):
                x_data = Variable(x_data)
                y_data = Variable(y_data, requires_grad=False)
                if (gpu):
                    x_data = x_data.cuda()
                    y_data = y_data.cuda()

                # all gradients of log joint probability:
                # n_samples x num_parameters
                grad_log_joints = []
                # all model parameters (particles): n_samples x num_parameters
                theta = []
                b_pred_tensor = Variable(
                    th.Tensor(self.n_samples, x_data.size(0),
                              y_data.shape[1]).type(dtype),
                    requires_grad=False)

                # Now iterate through each model
                for i in range(self.n_samples):
                    self.models[i].zero_grad()
                    # Predict mixing coefficients -> g_pred [Nx10]
                    g_pred = self.models[i].forward(x_data)
                    b_pred_tensor[i] = g_pred
                    #b_pred = g_pred

                    loss, log_likelihood = self.compute_loss(g_pred, y_data, i)
                    # backward to compute gradients of log joint probabilities
                    loss.backward()
                    # monitoring purpose
                    training_loss += loss.data.item()
                    training_MNLL += log_likelihood

                    # Extract parameters and their gradients out from models
                    vec_param, vec_grad_log_joint = nnUtils.parameters_to_vector(
                        self.models[i].parameters(), both=True)

                    grad_log_joints.append(vec_grad_log_joint.unsqueeze(0))
                    theta.append(vec_param.unsqueeze(0))

                # calculating the kernel matrix and its gradients
                theta = th.cat(theta)
                Kxx, dxKxx = self._Kxx_dxKxx(theta)
                grad_log_joints = th.cat(grad_log_joints)
                grad_logp = th.mm(Kxx, grad_log_joints)

                # Negate the gradients here
                grad_theta = -(grad_logp + dxKxx) / self.n_samples

                # update param gradients
                for i in range(self.n_samples):
                    nnUtils.vector_to_parameters(grad_theta[i],
                                                 self.models[i].parameters(),
                                                 grad=True)
                    self.optimizers[i].step()
                del grad_theta

                # Scaled MSE
                training_MSE += (
                    (y_data -
                     b_pred_tensor.mean(0))**2).sum(1).sum(0).data.item()
                # Mini-batch progress log
                if ((batch_idx + 1) % 500 == 0):
                    self.lg.log(
                        'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tNoise: {:.3f}'
                        .format(
                            epoch, batch_idx * len(x_data),
                            len(self.trainingLoader.dataset),
                            100. * batch_idx * len(x_data) /
                            len(self.trainingLoader.dataset), loss.data.item(),
                            self.models[0].log_beta.data.item()))

            # Log training loss, MNLL, and mean mse
            ndata = len(self.trainingLoader.dataset)

            if (epoch + 1) % 1 == 0:
                self.lg.log(
                    "===> Epoch: {}, Current loss: {:.6f} Log Beta: {:.6f} Scaled-MSE: {:.6f}"
                    .format(epoch + 1, training_loss,
                            self.models[0].log_beta.data.item(),
                            training_MSE / (ndata)))
                self.lg.logLoss(epoch, training_loss/(ndata*self.n_samples), \
                    training_MNLL/(ndata*self.n_samples), training_MSE/ndata, self.extra)

            # Update learning rate if needed
            for i in range(self.n_samples):
                self.schedulers[i].step(abs(training_loss))

    def train2(self, x_dataTr, y_dataTr, nb=1, n_epoch=1, gpu=True):
        """
        Training the neural network(s) using SVGD
        Args:
            n_epoch (int): Number of epochs to train for
            gpu (boolean): Whether or not to use a GPU (default is true)
        """

        if (gpu):
            self.lg.info('GPU network training enabled')
            #Transfer network and training data onto GPU
            for i in range(self.n_samples):
                self.models[i].cuda()
        else:
            self.lg.info('CPU network training enabled')

        # Unique indexs of the symmetric deviatoric tensor
        #indx = Variable(th.LongTensor([0,1,2,4,5,8]), requires_grad=False)
        #if (gpu):
        #    indx = indx.cuda()

        #self.lg.warning('Starting NN training with a experiment size of '+str(self.n_data))
        # store the joint probabilities
        x_dataTr = th.from_numpy(x_dataTr)
        y_dataTr = th.from_numpy(y_dataTr)
        self.trainingLoader = th.utils.data.DataLoader(FoamNetDataset2D(
            x_dataTr, y_dataTr),
                                                       batch_size=nb,
                                                       shuffle=True)
        for i in range(self.n_samples):
            self.models[i].train()

        for epoch in range(n_epoch):

            #if (epoch + 1) % 20 == 0:
            #    self.lg.info('Running test samples...')
            #    self.test(epoch, gpu=gpu)

            training_loss = 0.
            training_MNLL = 0.
            training_MSE = 0.
            for batch_idx, (x_data, y_data) in enumerate(self.trainingLoader):
                # Mini-batch the training set
                x_data = Variable(x_data)
                y_data = Variable(y_data, requires_grad=False)
                if (gpu):
                    x_data = x_data.cuda()
                    y_data = y_data.cuda()

                # all gradients of log joint probability:
                # n_samples x num_parameters
                grad_log_joints = []
                # all model parameters (particles): n_samples x num_parameters
                theta = []
                b_pred_tensor = Variable(
                    th.Tensor(self.n_samples, x_data.size(0),
                              y_data.shape[1]).type(dtype),
                    requires_grad=False)

                # Now iterate through each model
                for i in range(self.n_samples):
                    self.models[i].zero_grad()
                    # Predict mixing coefficients -> g_pred [Nx10]
                    g_pred = self.models[i].forward(x_data)
                    b_pred_tensor[i] = g_pred
                    #b_pred = g_pred

                    loss, log_likelihood = self.compute_loss(g_pred, y_data, i)
                    # backward to compute gradients of log joint probabilities
                    loss.backward()
                    # monitoring purpose
                    training_loss += loss.data.item()
                    training_MNLL += log_likelihood

                    # Extract parameters and their gradients out from models
                    vec_param, vec_grad_log_joint = nnUtils.parameters_to_vector(
                        self.models[i].parameters(), both=True)

                    grad_log_joints.append(vec_grad_log_joint.unsqueeze(0))
                    theta.append(vec_param.unsqueeze(0))

                # calculating the kernel matrix and its gradients
                theta = th.cat(theta)
                Kxx, dxKxx = self._Kxx_dxKxx(theta)
                grad_log_joints = th.cat(grad_log_joints)
                grad_logp = th.mm(Kxx, grad_log_joints)

                # Negate the gradients here
                grad_theta = -(grad_logp + dxKxx) / self.n_samples

                # update param gradients
                for i in range(self.n_samples):
                    nnUtils.vector_to_parameters(grad_theta[i],
                                                 self.models[i].parameters(),
                                                 grad=True)
                    self.optimizers[i].step()
                del grad_theta

                # Scaled MSE
                training_MSE += (
                    (y_data -
                     b_pred_tensor.mean(0))**2).sum(1).sum(0).data.item()

            # Update learning rate if needed
            for i in range(self.n_samples):
                self.schedulers[i].step(abs(training_loss))

            ndata = len(self.trainingLoader.dataset)
            if (epoch + 1) % 1 == 0:
                self.lg.log(
                    "===> Epoch: {}, Current loss: {:.6f} Log Beta: {:.6f} Scaled-MSE: {:.6f}"
                    .format(epoch + 1, training_loss,
                            self.models[0].log_beta.data.item(),
                            training_MSE / (ndata)))
                self.lg.logLoss(epoch, training_loss/(ndata*self.n_samples), \
                    training_MNLL/(ndata*self.n_samples), training_MSE/ndata, self.extra)
            if (training_MSE / ndata < 0.0001): break

    def test(self, epoch, gpu=True):
        """
        Tests the neural network(s) on validation/testing datasets
        Args:
            n_epoch (int): current epoch (just used for logging purposes)
            gpu (boolean): Whether or not to use a GPU (default is true)
        """
        try:
            self.testingLoaders
        except:
            self.lg.error('Testing data loader not created! Stopping testing')
            return

        for i in range(self.n_samples):
            self.models[i].eval()

        # Mini-batch the training set
        flow_mspe = np.zeros(len(self.testingLoaders))
        flow_mnll = np.zeros(len(self.testingLoaders))
        for n, testingLoader in enumerate(self.testingLoaders):
            net_mse = 0
            testing_MNLL = 0
            for batch_idx, (x_data, y_data) in enumerate(testingLoader):
                #self.lg.info("Testing batch " + str(batch_idx))
                # Make mini-batch data variables
                x_data = Variable(x_data)
                y_data = Variable(y_data, requires_grad=False)
                if (gpu):
                    x_data = x_data.cuda()
                    y_data = y_data.cuda()

                b_pred = Variable(
                    th.Tensor(self.n_samples, x_data.size(0),
                              y_data.shape[1]).type(dtype))

                # Now iterate through each SVGD particle (invariant nn)
                for i in range(self.n_samples):
                    self.models[i].zero_grad()
                    g_pred = self.models[i].forward(x_data)
                    b_pred[i] = g_pred
                    loss, log_likelihood = self.compute_loss(
                        b_pred[i], y_data, i)
                    testing_MNLL += log_likelihood

                mse = ((y_data - b_pred.mean(0))**2).sum()
                net_mse += mse.data.item()

            # Total amount of testing data
            ndata = len(self.testingLoaders[n].dataset)
            flow_mspe[n] = net_mse / (ndata)
            flow_mnll[n] = testing_MNLL / (ndata * self.n_samples)

        # Log testing results
        self.lg.log("===> Current Total Validation Loss: {}, Validation MSE: {}" \
                .format(flow_mnll.sum(0)/ndata,flow_mspe.sum(0)))
        self.lg.logTest(epoch, flow_mnll, flow_mspe, self.extra)
        return b_pred

    def predict2(self, trainS=True, gpu=True):
        """
        Tests the neural network(s) on validation/testing datasets
        Args:
            n_epoch (int): current epoch (just used for logging purposes)
            gpu (boolean): Whether or not to use a GPU (default is true)
        """
        try:
            self.testingLoaders
        except:
            self.lg.error('Testing data loader not created! Stopping testing')
            return

        # Switch models to eval state (shuts off dropout)
        for i in range(self.n_samples):
            self.models[i].eval()
        if (trainS):
            y_data = self.trainingLoader.dataset.target_tensor
            x_data = self.trainingLoader.dataset.x_tensor
            b_mean = th.Tensor(x_data.shape[0],
                               y_data.shape[1]).type(th.DoubleTensor)
            b_var = th.Tensor(x_data.shape[0],
                              y_data.shape[1]).type(th.DoubleTensor)

        else:
            x_data = self.testingLoaders[0].dataset.x_tensor
            y_data = self.testingLoaders[0].dataset.target_tensor
            b_mean = th.Tensor(x_data.shape[0],
                               y_data.shape[1]).type(th.DoubleTensor)
            b_var = th.Tensor(x_data.shape[0],
                              y_data.shape[1]).type(th.DoubleTensor)

        if (gpu):
            x_data = x_data.cuda()
            #y_data = y_data.cuda()

        predict = Variable(th.Tensor(self.n_samples, x_data.size(0),
                                     y_data.shape[1]).type(dtype),
                           requires_grad=False)

        for i in range(self.n_samples):
            predict[i] = self.models[i].forward(x_data)

        eyyt = (predict.data**2).mean(0)
        eyeyt = predict.mean(0).data**2
        beta_inv = th.cat([(-self.models[i].log_beta.data).exp().unsqueeze(0)
                           for i in range(self.n_samples)])
        y_noise_var = beta_inv.mean(0)
        var = eyyt.cpu() - eyeyt.cpu()
        var_noise = y_noise_var.cpu()

        meanPred = predict.mean(0)

        return meanPred.cpu(), var, var_noise

    def predict3(self, gpu=True):

        # Switch models to eval state (shuts off dropout)
        for i in range(self.n_samples):
            self.models[i].eval()

        if (gpu):
            self.lg.info('GPU network training enabled')
            #Transfer network and training data onto GPU
            for i in range(self.n_samples):
                self.models[i].cuda()
        else:
            self.lg.info('CPU network training enabled')

        # Note we store the mean and variance fields on the CPU since the fluid field may be very larges
        # Mini-batches are then brought onto the GPU from forward passes.
        x_data = self.trainingLoader.dataset.x_tensor
        b_mean = th.Tensor(x_data.size(0), 1).type(th.DoubleTensor)
        targ = th.Tensor(x_data.size(0), 1).type(th.DoubleTensor)
        b_var = th.Tensor(x_data.size(0), 1).type(th.DoubleTensor)
        b_index = 0
        # Mini-batch the pred
        for batch_idx, (x_data, t_data) in enumerate(self.trainingLoader):
            x_data = Variable(x_data, requires_grad=False)
            t_data = Variable(t_data, requires_grad=False)
            if (gpu):
                x_data = x_data.cuda()
                t_data = t_data.cuda()

            b_pred = Variable(
                th.Tensor(self.n_samples, x_data.size(0), 1).type(dtype))

            # Now iterate through each model
            for i in range(self.n_samples):
                self.models[i].zero_grad()
                g_pred = self.models[i].forward(x_data)
                b_pred[i] = g_pred

            # Compute expectation (Eq. 30 in paper)
            b_mean[b_index:b_index +
                   x_data.size(0)] = b_pred.mean(0).data.cpu()
            targ[b_index:b_index + x_data.size(0)] = t_data
            # Compute variance of each output  (Eq. 31 in paper)
            b_eyyt = (b_pred.data**2).mean(0)
            b_eyeyt = b_pred.mean(0).data**2
            beta_inv = th.cat([
                (-self.models[i].log_beta.data).exp().unsqueeze(0)
                for i in range(self.n_samples)
            ])
            y_noise_var = beta_inv.mean(0).unsqueeze(0).unsqueeze(
                -1).unsqueeze(-1)
            b_var[b_index:b_index + x_data.size(0)] = b_eyyt.cpu(
            ) - b_eyeyt.cpu() + y_noise_var.cpu()
            # Step index
            b_index += x_data.size(0)

        return b_mean, targ

    def predict(self, ransS, ransR, n_mb=200, gpu=True):
        """
        Method for obtaining mean and variance predictions for a given flow [depreciated]
        (Un-used during training/ testing but left in for reference)
        Args:
            ransS (Tensor): [nCellx3x3] Baseline RANS rate-of-strain tensor
            ransS (Tensor): [nCellx3x3] Baseline RANS rotation tensor
        """
        # Get invariants and tensor functions for the provided field
        fl = Invariant()
        x_data = fl.getInvariants(ransS, ransR)
        t_data = fl.getTensorFunctions(ransS, ransR)
        # Create data loader
        predictionLoader = th.utils.data.DataLoader(PredictDataset(
            x_data, t_data),
                                                    batch_size=n_mb,
                                                    shuffle=False)

        # Switch models to eval state (shuts off dropout)
        for i in range(self.n_samples):
            self.models[i].eval()

        if (gpu):
            self.lg.info('GPU network training enabled')
            #Transfer network and training data onto GPU
            for i in range(self.n_samples):
                self.models[i].cuda()
        else:
            self.lg.info('CPU network training enabled')

        # Note we store the mean and variance fields on the CPU since the fluid field may be very larges
        # Mini-batches are then brought onto the GPU from forward passes.
        out_size = (3, 3)
        b_mean = th.Tensor(x_data.size(0), *out_size).type(th.DoubleTensor)
        b_var = th.Tensor(x_data.size(0), *out_size).type(th.DoubleTensor)
        b_index = 0
        # Mini-batch the pred
        for batch_idx, (x_data, t_data) in enumerate(predictionLoader):
            x_data = Variable(x_data, requires_grad=False)
            t_data = Variable(t_data, requires_grad=False)
            if (gpu):
                x_data = x_data.cuda()
                t_data = t_data.cuda()

            b_pred = Variable(
                th.Tensor(self.n_samples, x_data.size(0),
                          *out_size).type(dtype))

            # Now iterate through each model
            for i in range(self.n_samples):
                self.models[i].zero_grad()
                g_pred = self.models[i].forward(x_data)
                g_pred0 = th.unsqueeze(th.unsqueeze(g_pred, 2), 3)
                b_pred[i] = th.sum(g_pred0[:, :, :, :] * t_data[:, :, :, :], 1)

            # Compute expectation (Eq. 30 in paper)
            b_mean[b_index:b_index +
                   x_data.size(0)] = b_pred.mean(0).data.cpu()
            # Compute variance of each output  (Eq. 31 in paper)
            b_eyyt = (b_pred.data**2).mean(0)
            b_eyeyt = b_pred.mean(0).data**2
            beta_inv = th.cat([
                (-self.models[i].log_beta.data).exp().unsqueeze(0)
                for i in range(self.n_samples)
            ])
            y_noise_var = beta_inv.mean(0).unsqueeze(0).unsqueeze(
                -1).unsqueeze(-1)
            b_var[b_index:b_index + x_data.size(0)] = b_eyyt.cpu(
            ) - b_eyeyt.cpu() + y_noise_var.cpu()
            # Step index
            b_index += x_data.size(0)

            if ((batch_idx + 1) % 50 == 0):
                self.lg.log(
                    'Mini-batching predictive field. [{}/{} ({:.0f}%)]'.format(
                        batch_idx * len(x_data), len(predictionLoader.dataset),
                        100. * batch_idx * len(x_data) /
                        len(predictionLoader.dataset)))

        return b_mean, b_var

    def getDataPoints(self,
                      dataManager,
                      XTrdirs,
                      YTrdirs,
                      Xdirs,
                      Ydirs,
                      stp=10,
                      n_mb=250):
        """ 
        Used for creating training and also validation datasets
        Args: dataManager: dataManager object for loading openFoam data
              n_data: total number of training/ validation data to use
              n_mb: size of minibatch
              n_valid: number of points to use as validation for current dataset
        """
        self.lg.info('Creating data-sets')
        self.n_mb = n_mb

        # Get the set of training points from the data manager
        #x0, t0, k0, y0 = dataManager.getDataPoints(self, n_data)
        x_train, y_train = dataManager.getDataPoints2D(XTrdirs[0], YTrdirs[0],
                                                       stp)
        x_test, y_test = dataManager.getDataPoints2D(Xdirs[0], Ydirs[0], stp)

        for i in range(1, len(XTrdirs)):
            x, y = dataManager.getDataPoints2D(XTrdirs[i], YTrdirs[i], stp)
            x_train = np.append(x_train, x, axis=0)
            y_train = np.append(y_train, y, axis=0)

        for i in range(1, len(Xdirs)):
            x, y = dataManager.getDataPoints2D(Xdirs[i], Ydirs[i], stp)
            x_test = np.append(x_train, x, axis=0)
            y_test = np.append(y_train, y, axis=0)
        #cns = x_train[0,0]
        #cns2 = x_test[0,0]
        x_train, x_test = dataManager.do_normalization(x_train, x_test, 'std')
        y_train, y_test = dataManager.do_normalization(y_train, y_test, 'std')
        #x_train[:,0] = cns
        #x_train[:,1] = 0.0

        #x_test[:,0] = cns2
        #x_test[:,1] = 0.0

        #y_train[:,[0,1]] = 0.0
        #y_test[:,[0,1]] = 0.0

        x_train = th.from_numpy(x_train).double()
        x_test = th.from_numpy(x_test).double()
        y_train = th.from_numpy(y_train).double()
        y_test = th.from_numpy(y_test).double()

        # Create data sets
        self.trainingDataSet = FoamNetDataset2D(x_train, y_train)
        # Now create loaders (set mini-batch size and also turn on shuffle)
        self.trainingLoader = th.utils.data.DataLoader(self.trainingDataSet,
                                                       batch_size=n_mb,
                                                       shuffle=False)
        self.testingLoaders = []
        # Create data sets
        self.testingDataSet = FoamNetDataset2D(x_test, y_test)
        # Now create loaders (set mini-batch size and also turn on shuffle)
        self.testingLoaders.append(
            th.utils.data.DataLoader(self.testingDataSet,
                                     batch_size=n_mb,
                                     shuffle=False))

    def getTrainingPoints(self, dataManager, n_data=5000, n_mb=250, n_valid=0):
        """ 
        Used for creating training and also validation datasets
        Args: dataManager: dataManager object for loading openFoam data
              n_data: total number of training/ validation data to use
              n_mb: size of minibatch
              n_valid: number of points to use as validation for current dataset
        """
        self.lg.info('Creating training data-set')
        self.n_data = n_data
        self.n_mb = n_mb

        # Get the set of training points from the data manager
        #x0, t0, k0, y0 = dataManager.getDataPoints(self, n_data)
        x_train, y_train = dataManager.getDataPoints2D( './RR_data/data2d_lower2.bin', \
                           './RR_data/reac2d_lower2.bin')

        # Randomly permute the read data
        #perm0 = th.randperm(n_data)
        #x0 = x0[perm0]
        #t0 = t0[perm0]
        #k0 = k0[perm0]
        #y0 = y0[perm0]

        # Set training and test data
        #x_train = x0[n_valid:]
        #t_train = t0[n_valid:]
        #k_train = k0[n_valid:]
        #y_train = y0[n_valid:]

        # Create data sets
        self.trainingDataSet = FoamNetDataset2D(x_train, y_train)
        # Now create loaders (set mini-batch size and also turn on shuffle)
        self.trainingLoader = th.utils.data.DataLoader(self.trainingDataSet,
                                                       batch_size=n_mb,
                                                       shuffle=True)

        # If we wish to have validation dataset create testing loaders
        # This ensures validation data is never used during training
        if (n_valid > 0):
            x_test = x0[0:n_valid]
            t_test = t0[0:n_valid]
            k_test = k0[0:n_valid]
            y_test = y0[0:n_valid]

            self.testingDataSet = FoamNetDataset(x_test, t_test, k_test,
                                                 y_test)
            # Check to see if testing loaders created
            try:
                self.testingLoaders
            except AttributeError:
                self.lg.info('Creating testing loader list...')
                self.testingLoaders = []
            self.testingLoaders.append(
                th.utils.data.DataLoader(self.testingDataSet,
                                         batch_size=n_mb,
                                         shuffle=True))

    def getTestingPoints(self, dataManager, n_data=500, n_mb=250):
        """ 
        Creates independent testing/validation datasets
        Args: dataManager: dataManager object for loading openFoam data
              n_data: total number of training/ validation data to use
              n_mb: size of minibatch
              n_valid: number of points to use as validation
        """
        self.lg.info('Creating testing data-set')
        # Check to see if testing loaders created
        try:
            self.testingLoaders
        except AttributeError:
            self.lg.info('Creating testing loader list...')
            self.testingLoaders = []

        # Get the set of testing points from the data manager
        # Note turn mask -> True so these points are not used during training
        x_test,  y_test = dataManager.getDataPoints2DTest('./RR_data/data2d_lower2.bin', \
                                      './RR_data/reac2d_lower2.bin','./RR_data/data2d_base.bin', \
                                      './RR_data/reac2d_base.bin')
        # Create data sets
        self.testingDataSet = FoamNetDataset2D(x_test, y_test)
        # Now create test loaders
        self.testingLoaders.append(
            th.utils.data.DataLoader(self.testingDataSet,
                                     batch_size=n_mb,
                                     shuffle=True))

    def saveNeuralNet(self, filename):
        """
        Save the current neural network state
        Args:
            filename (string): name of the file to save the neural network in
        """
        self.lg.log(
            "Saving neural networks to: ./torchNets/{}".format(filename))
        if not os.path.exists("torchNets"):
            os.makedirs("torchNets")
        # Iterate through each model
        for i in range(self.n_samples):
            th.save(self.models[i].state_dict(),
                    "./torchNets/{}-{}.nn".format(filename, i))

    def loadNeuralNet(self, filename):
        """
        Load the current neural network state
        Args:
            filename (string): name of the file to save the neural network in
        """
        for i in range(self.n_samples):
            self.models[i].load_state_dict(
                th.load("{}-{}.nn".format(filename, i),
                        map_location=lambda storage, loc: storage))

    def getTurbNet(self):
        """
        Accessor to get the neural net object
        Returns:
            TurbNN (th.nn.Module): PyTorch neural network object
        """
        return self.turb_nn

    def btrace(self, a, gpu=True):
        """
        Return the batch trace of tensor a
        """
        if (gpu):
            eye = th.eye(3).unsqueeze(0).repeat(a.size()[0], 1, 1).cuda()
        else:
            eye = th.eye(3).unsqueeze(0).repeat(a.size()[0], 1, 1)
        return th.sum(th.sum(th.bmm(a, eye), 2), 1)

    def btraceVariable(self, a, gpu=True):
        """
        Return the batch trace of tensor a
        """
        if (gpu):
            eye = Variable(th.eye(3).unsqueeze(0).repeat(a.size()[0], 1, 1),
                           requires_grad=False).cuda()
        else:
            eye = Variable(th.eye(3).unsqueeze(0).repeat(a.size()[0], 1, 1),
                           requires_grad=False)
        return th.sum(th.sum(th.bmm(a, eye), 2), 1)
Ejemplo n.º 10
0
class FoamReader():
    """
    Utility for reading in OpenFoam data files.
    Functions for reading in mesh data need to be updated if needed
    """
    def __init__(self):
        self.lg = Log()

    def readFieldData(self, fileName):
        """
        Reads in openFoam field (vector, or tensor)
        Args:
            fileName(string): File name
        Returns:
            data (FloatTensor): tensor of data read from file
        """
        #Attempt to read text file and extact data into a list
        try:
            self.lg.log('Attempting to read file: ' + str(fileName))
            rgx = re.compile('[%s]' % '(){}<>')
            rgx2 = re.compile('\((.*?)\)')  #regex to get stuff in parenthesis
            file_object = open(str(fileName), "r").read().splitlines()

            #Find line where the internal field starts
            self.lg.log('Parsing file...')
            fStart = [
                file_object.index(i)
                for i in file_object if 'internalField' in i
            ][-1] + 1
            fEnd = [
                file_object.index(i) for i in file_object[fStart:] if ';' in i
            ][0]

            data_list = [[float(rgx.sub('', elem)) for elem in vector.split()]
                         for vector in file_object[fStart + 1:fEnd]
                         if not rgx2.search(vector) is None]
            #For scalar fields
            if (len(data_list) == 0):
                data_list = [
                    float(rgx.sub('', elem))
                    for elem in file_object[fStart + 1:fEnd]
                    if not len(rgx.sub('', elem)) is 0
                ]
        except OSError as err:
            print("OS error: {0}".format(err))
            return
        except IOError as err:
            print("File read error: {0}".format(err))
            return
        except:
            print("Unexpected error:{0}".format(sys.exc_info()[0]))
            return

        self.lg.success('Data field file successfully read.')
        data = th.DoubleTensor(data_list)
        return data

    def readScalarData(self, timeStep, fileName, dirPath=''):
        return self.readFieldData(
            str(dirPath) + '/' + str(timeStep) + '/' + fileName).type(dtype)

    def readVectorData(self, timeStep, fileName, dirPath=''):
        return self.readFieldData(
            str(dirPath) + '/' + str(timeStep) + '/' + fileName).type(dtype)

    def readTensorData(self, timeStep, fileName, dirPath=''):
        data0 = self.readFieldData(
            str(dirPath) + '/' + str(timeStep) + '/' + fileName).type(dtype)
        #Reshape into [nCells,3,3] Tensor
        return data0.view(data0.size()[0], 3, -1)

    def readSymTensorData(self, timeStep, fileName, dirPath=''):
        data0 = self.readFieldData(
            str(dirPath) + '/' + str(timeStep) + '/' + fileName).type(dtype)
        #Reshape into [nCells,3,3] Tensor
        data = th.DoubleTensor(data0.size()[0], 3, 3)
        data[:, 0, :] = data0[:, 0:3]  #First Row is consistent
        data[:, 1, 0] = data0[:, 1]  #YX = XY
        data[:, 1, 1] = data0[:, 3]  #YY
        data[:, 1, 2] = data0[:, 4]  #YZ
        data[:, 2, 0] = data0[:, 2]  #ZX = XZ
        data[:, 2, 1] = data0[:, 4]  #ZY = YZ
        data[:, 2, 2] = data0[:, 5]

        return data

    def readCellCenters(self, timeStep, dirPath=''):
        """
        Reads in openFoam vector field for the specified timestep
        Args:
            timeStep (float): Time value to read in at
            fileName(string): File name
        Returns:
            data (DoubleTensor): array of data read from file
        """
        #Attempt to read text file and extact data into a list
        try:
            file_path = dir + "/" + str(timeStep) + "/cellCenters"
            self.lg.log('Reading mesh cell centers ' + file_path)

            rgx = re.compile('\((.*?)\)')  #regex to get stuff in parenthesis
            file_object = open(file_path, "r").read().splitlines()
            #Find line where the internal field starts
            commentLines = [
                file_object.index(line) for line in file_object
                if "//*****" in line.replace(" ", "")
            ]
            fStart = [
                file_object.index(i)
                for i in file_object if 'internalField' in i
            ][-1] + 1
            fEnd = [
                file_object.index(i) for i in file_object[fStart:] if ';' in i
            ][0]

            cell_list0 = [
                rgx.search(center).group(1)
                for center in file_object[fStart + 1:fEnd]
                if not rgx.search(center) is None
            ]
            cell_list = [[float(elem) for elem in c0.split()]
                         for c0 in cell_list0]
        except OSError as err:
            print("OS error: {0}".format(err))
            return
        except IOError as err:
            print("File read error: {0}".format(err))
            return
        except:
            print("Unexpected error:{0}".format(sys.exc_info()[0]))
            return

        return th.DoubleTensor(cell_list)
Ejemplo n.º 11
0
class WeaponMetadata:
    def sql_values(self):
        rt_tuple = (
            str(self.item_name if hasattr(self, 'item_name'
                                          ) else '#DataNotFound#'),
            str(self.buff_id if hasattr(self, 'buff_id') else '#DataNotFound#'
                ),
            str(self.meta_data_refreshed_time if hasattr(
                self, 'meta_data_refreshed_time') else '#DataNotFound#'),
            str(self.type if hasattr(self, 'type') else '#DataNotFound#'),
            str(self.exhibition_image if hasattr(self, 'exhibition_image'
                                                 ) else '#DataNotFound#'),
            str(self.steam_market_url if hasattr(self, 'steam_market_url'
                                                 ) else '#DataNotFound#'))

        return rt_tuple

    def __init__(self, buff_id, db):
        self.buff_id = buff_id
        self.logger = Log('Weapon-Metadata')

        query_result = db.query_weapon_metadata(buff_id)

        if not query_result[0]:

            url = api_concat(api_list.BUFF_SELL_ORDER_API, buff_id)
            request = requests.get(url, headers=HEADERS, cookies=COOKIES)
            request.encoding = 'utf-8'
            content = request.text

            if content:
                jsonified = json.loads(content)

                if jsonified['code'] == 'OK':

                    item = jsonified['data']['goods_infos'][str(buff_id)]

                    try:

                        self.item_name = item['name']
                        self.meta_data_refreshed_time = time.time()
                        self.steam_market_url = 'https://steamcommunity.com/market/listings/730/' + str(
                            item['market_hash_name']).replace(' ', '%20')
                        self.exhibition_image = item['icon_url']
                        self.type = item['tags']['category']['internal_name']

                    # In case of single column of data-loss, keep it the False value.
                    # This error will be sent to database to save as NULL value.
                    except KeyError:
                        self.logger.log(
                            'Item %s does not contain some value.' %
                            self.item_name)

                else:
                    raise ValueError('[WeaponMetadata] Buff fetch error.')
            else:
                raise ValueError('Buff returned nothing.')
        else:
            self.item_name, self.meta_data_refreshed_time, self.steam_market_url, self.exhibition_image, self.type = \
                (query_result[1][0][0], query_result[1][0][2], query_result[1][0][5], query_result[1][0][4],
                 query_result[1][0][3])
Ejemplo n.º 12
0
class SVHN_Predict():
    def __init__(self,dir_root, ReIDCfg, Num_Pred_opt, vis, queueSize=1024):

        self.dir_root = dir_root
        self.dir_list = [d for d in os.listdir(self.dir_root) if os.path.isdir(os.path.join(self.dir_root, d))]
        self.dir_list = sorted(self.dir_list, key=lambda x: int(x))
        # logger.info('目标文件夹是{}'.format(self.root_path))
        self.datalen = len(self.dir_list)
        self.Start_Index = 0

        if vis:
            self.vis_path = vis


        # 号码纠正器, 根据四官报告来修改参数
        self.Number_Rectifier = Number_Rectifier

        self.batch_size = 60
        self.Num_Pred_opt = Num_Pred_opt  # 用来设置号码识别模型的参数。
        self.SVHN_predictor = load_in_Svhn_model(self.Num_Pred_opt)

        self.PreProcess_Q = Queue(maxsize=queueSize)  # 在号码识别前,对输入图片进行预处理。
        self.SVHN_Q = Queue(maxsize=queueSize)

        self.transform = transforms.Compose([
            transforms.Resize([54, 54]),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])

        self.height_threshold = 21
        self.width_threshold = 12

        # 加载 ReID 模型
        self.ReIDCfg = ReIDCfg
        self.num_cls = 4 # 场上有几种类型的人

        self.logger = Log(__name__, 'SVHN_Predict').getlog()

    def PreProcess_(self):
        self.t_PreProcess = Thread(target=self.PreProcess, args=())
        self.t_PreProcess.daemon = True
        self.t_PreProcess.start()

    def PreProcess(self):
        '''
        对需要号码识别的图片进行预处理。
        '''
        self.logger.debug('The pid of SVHN_Predict.PreProcess() : {}'.format(os.getpid()))
        self.logger.debug('The thread of SVHN_Predict.PreProcess() : {}'.format(currentThread()))
        PreProcess_timer = Timer()

        for dir_index in range(self.Start_Index, self.datalen):
            PreProcess_timer.tic()  # 开始计时
            # self.logger.debug('PreProcess() ======================================== action {}'.format(dir_index))

            this_dir = os.path.join(self.dir_root,self.dir_list[dir_index],'Target')
            imgs_name_list= os.listdir(this_dir)

            if len(imgs_name_list) <= 0:
                self.PreProcess_Q.put((False, (dir_index, [])))
                print('{} is empty'.format(this_dir))
                continue

            imgs_transfered_list = []
            original_imgs = []
            for img_name in imgs_name_list:
                this_img_path = os.path.join(this_dir,img_name)
                this_img = cv2.imread(this_img_path)
                if this_img.size == 0:
                    print('dir_index : {}, img_name : {} is empty'.format(dir_index, img_name) )
                    continue

                height, width, _ = this_img.shape
                if height < self.height_threshold or width < self.width_threshold:
                    # 图片太小了,就不放入骨骼点检测的序列中。
                    continue
                img_transfered = Image.fromarray(this_img)
                img_transfered = self.transform(img_transfered)
                imgs_transfered_list.append(img_transfered)
                original_imgs.append(this_img)
            # 如果都不符合条件的话。
            if len(original_imgs) == 0:
                self.PreProcess_Q.put((False, (dir_index, [])))
            else:
                imgs_transfered_list = torch.stack(imgs_transfered_list, dim=0)
                self.PreProcess_Q.put((True, (dir_index, imgs_transfered_list, original_imgs)))

            # self.logger.info('Calibrate_transfer.sub_img_generate() action {} consums {}s'.format(action_index,sub_img_generate_timer.toc()))
            # self.logger.log(24, 'SVHN_Predict.PreProcess() action {} consums {}s'.format(dir_index, PreProcess_timer.toc()))

    def Predict_(self):
        self.t_Predict = Thread(target=self.Predict, args=())
        self.t_Predict.daemon = True
        self.t_Predict.start()

    def Predict(self):
        '''
        使用 SVHN 对完成预处理的图片进行号码预测
        '''
        Predict_timer = Timer()
        self.logger.debug( 'The pid of SVHN_Predict.Predict() : {}'.format(os.getpid()))
        self.logger.debug( 'The thread of SVHN_Predict.Predict() : {}'.format(currentThread()))

        Number_TrackingID_dict = {}
        for dir_index in range(self.Start_Index, self.datalen):
            Predict_timer.tic() # 开始计时
            Predict_len = 0
            dir_name = self.dir_list[dir_index]
            PreProcess_Flag, PreResults = self.PreProcess_Q.get()
            # self.logger.debug('Predict() ======================================== action {}'.format(action_index))

            if PreProcess_Flag == False:
                # 输入的数据无意义
                preNum = -1

            else:
                # 输入的数据有意义, 读取数据
                _, rectangle_imgs,original_imgs = PreResults
                imgs_length = rectangle_imgs.size(0)
                leftover = 0
                if (imgs_length) % self.batch_size:
                    leftover = 1
                num_batches = imgs_length // self.batch_size + leftover

                if self.vis_path:
                    vis_dir = os.path.join(self.vis_path,'{}'.format(dir_name),'SVHN_Predict')
                    makedir_v1(vis_dir)
                    vis_dir_0 = os.path.join(self.vis_path, '{}'.format(dir_name), 'SVHN_Predict_Minus_one')
                    makedir_v1(vis_dir_0)

                NumsArray = []
                for j in range(num_batches):
                    input_imgs_j = rectangle_imgs[j*self.batch_size:min((j+1)*self.batch_size , imgs_length)]
                    length_logits_j, digits_logits_j = self.SVHN_predictor(input_imgs_j.cuda())

                    '''This max function return two column, the first row is value, and the second row is index '''
                    length_predictions_j = length_logits_j.max(1)[1].cpu().tolist()
                    digits_predictions_j = [digits_logits_j.max(1)[1].cpu().tolist() for digits_logits_j in digits_logits_j]

                    NumsArray_j = []
                    for Num_i in range(len(length_predictions_j)):
                        Number_len = length_predictions_j[Num_i]

                        if Number_len == 1:
                            Num = digits_predictions_j[0][Num_i]
                            NumsArray_j.append(Num)
                        elif Number_len == 2:
                            Num = digits_predictions_j[0][Num_i] * 10 + digits_predictions_j[1][Num_i]
                            NumsArray_j.append(Num)
                        elif Number_len == 0:
                            Num = -1
                            if self.vis_path:
                                cv2.imwrite(os.path.join(vis_dir_0, '{}_P{}.jpg'.format(num_batches*j + Num_i, Num)), original_imgs[Num_i])
                            continue
                        else:
                            continue

                        if self.vis_path:
                            cv2.imwrite(os.path.join(vis_dir, '{}_P{}.jpg'.format(num_batches*j + Num_i, Num)), original_imgs[Num_i])

                    NumsArray.extend(NumsArray_j)
                    Predict_len = len(NumsArray)

                if Predict_len > 1:
                    # NumberArray range from 0 to 99.
                    # We need to count how many times does each number appear!
                    NumsArray = np.histogram(NumsArray, bins=100, range=(0, 100))[0]
                    preNum = np.argmax(NumsArray)
                    # if preNum == 10:
                    #     print('wrong value')
                    preNum_count = NumsArray[preNum]
                    if np.where(NumsArray == preNum_count)[0].size > 1:
                        # if there are more than one number have the maximun counts, then return -1
                        # can sort by number classification scores.
                        preNum = -1
                else:
                    preNum = -1

            # 保存数据
            # self.logger.log(24, 'SVHN_Predict.Predict action {} consums {}s'.format(action_index, Predict_timer.toc()))
            self.logger.log(24,'dir_name {} Predict_len = {} Predict num = {} ============='.format(dir_name, Predict_len, preNum))
            Number_TrackingID_dict[int(dir_name)] = int(preNum)

        with open(os.path.join(self.vis_path,'Number_results.json'),'w') as f :
            json.dump(Number_TrackingID_dict,f)


        self.logger.log(24, '-----------------------------Finished SVHN_Predict.Predict() datalen = {}-----------------------------'.format(self.datalen))
Ejemplo n.º 13
0
class SVHN_Predict():
    def __init__(self,
                 opt,
                 ReIDCfg,
                 Num_Pred_opt,
                 Pose_output_queue,
                 S_Number_Predict,
                 vis=False,
                 save_results=False,
                 queueSize=1024):

        self.opt = opt
        self.dir_name = opt.dir_name
        self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))
        # logger.info('目标文件夹是{}'.format(self.root_path))
        self.file_name = opt.file_name

        self.file_save_name_before_Number_Rectify = 'Step1_'
        self.file_save_name_after_Number_Rectify = 'Step2_'

        # 本来就是要载入两次视频,分开读亦可以
        self.Videoparameters, \
        self.setting_parameter, \
        self.action_datas, \
        self.channel_list, \
        self.parameter = read_data_from_json_file_v2(self.root_path, self.file_name, self.opt)

        # 号码纠正器, 根据四官报告来修改参数
        self.Number_Rectifier = Number_Rectifier

        self.datalen = len(self.action_datas)
        self.batch_size = 60

        self.Num_Pred_opt = Num_Pred_opt  # 用来设置号码识别模型的参数。
        self.SVHN_predictor = load_in_Svhn_model(self.Num_Pred_opt)

        self.input_Q = Pose_output_queue  # 骨骼关键节点检测后的输入结果
        self.PreProcess_Q = Queue(maxsize=queueSize)  # 在号码识别前,对输入图片进行预处理。
        self.SVHN_Q = Queue(maxsize=queueSize)

        self.transform = transforms.Compose([
            transforms.Resize([54, 54]),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])

        self.vis = vis
        if self.vis == True:
            self.vis_path = os.path.join(self.root_path, 'vis')
            os.makedirs(self.vis_path, exist_ok=True)

        self.S_Number_Predict = S_Number_Predict
        self.S_Final = max(S_Number_Predict - 1, 0)
        self.height_threshold = 21
        self.width_threshold = 12
        self.save_results = save_results
        if self.save_results == True:
            self.intermediate_results_dir = os.path.join(
                self.root_path, 'intermediate_results', 'SVHN_Predict')
            os.makedirs(self.intermediate_results_dir, exist_ok=True)

        self.main_imgs_dir = os.path.join(self.root_path,
                                          'intermediate_results', 'main_imgs')
        self.FMLoader_dir = os.path.join(self.root_path,
                                         'intermediate_results', 'FMLoader')

        # 加载 ReID 模型
        self.ReIDCfg = ReIDCfg
        self.num_cls = 4  # 场上有几种类型的人

        self.logger = Log(__name__, 'SVHN_Predict').getlog()

    def Read_From_Cache(self):
        '''
        从文件把之前计算过的结果提取出来
        '''
        self.logger.debug(
            'The pid of SVHN_Predict.Read_From_Cache() : {}'.format(
                os.getpid()))
        self.logger.debug(
            'The thread of SVHN_Predict.Read_From_Cache() : {}'.format(
                currentThread()))
        self.load_intermediate_resutls(self.S_Final)
        self.logger.log(
            24, ' SVHN_Predict loads action {} from Cache file '.format(
                self.S_Final))

    def PreProcess_(self):
        self.t_PreProcess = Thread(target=self.PreProcess, args=())
        self.t_PreProcess.daemon = True
        self.t_PreProcess.start()

    def PreProcess(self):
        '''
        对需要号码识别的图片进行预处理。
        '''
        self.logger.debug('The pid of SVHN_Predict.PreProcess() : {}'.format(
            os.getpid()))
        self.logger.debug(
            'The thread of SVHN_Predict.PreProcess() : {}'.format(
                currentThread()))
        PreProcess_timer = Timer()
        for action_index in range(self.S_Number_Predict, self.datalen):
            PreProcess_timer.tic()  # 开始计时
            self.logger.debug(
                'PreProcess() ======================================== action {}'
                .format(action_index))
            Flag, input_results = self.input_Q.get()

            if Flag == False:
                # Flag == False 的话,直接就不要了
                self.PreProcess_Q.put((False, (action_index, [])))
                continue
            #输入的数据有意义,可以接着处理
            [input_index, sub_imgs_out, target_regions] = input_results

            if input_index != action_index:
                self.logger.log(
                    31,
                    '---——————————————————————————————————index does match')
                raise Exception(
                    'SVHN_Predict.PreProcess action_index_update {} != input_index {} '
                    .format(action_index, input_index))
            # 对数据进行预处理。
            rectangle_imgs, original_imgs = self.img_pre_for_SVHN(
                sub_imgs_out, target_regions)

            if type(rectangle_imgs) != torch.Tensor:
                self.PreProcess_Q.put((False, (action_index, [])))
            else:
                self.PreProcess_Q.put(
                    (True, (action_index, rectangle_imgs, original_imgs)))

            # self.logger.info('Calibrate_transfer.sub_img_generate() action {} consums {}s'.format(action_index,sub_img_generate_timer.toc()))
            self.logger.log(
                24, 'SVHN_Predict.PreProcess() action {} consums {}s'.format(
                    action_index, PreProcess_timer.toc()))

    def img_pre_for_SVHN(self, sub_imgs_out, target_regions):
        '''
        对需要 SVHN 的图片进行 数据预处理的 具体操作
        '''
        rectangle_imgs = []
        original_imgs = []
        for target_index in range(len(target_regions)):
            sub_img = sub_imgs_out[target_index]
            [xmin, xmax, ymin, ymax] = target_regions[target_index]

            i_height, i_weight, i_channel = sub_img.shape
            crop_img = sub_img[max(ymin, 0):min(i_height, ymax),
                               max(xmin, 0):min(xmax, i_weight)]
            h_i, w_i, _ = crop_img.shape
            if h_i < self.height_threshold or w_i < self.width_threshold:
                # 如果背部区域太小了,也要舍弃。
                continue
            crop_image = Image.fromarray(crop_img)
            crop_image = self.transform(crop_image)
            rectangle_imgs.append(crop_image)
            original_imgs.append(sub_img)
        # 如果都不符合条件的话。
        if len(rectangle_imgs) == 0:
            return None, None

        rectangle_imgs = torch.stack(rectangle_imgs, dim=0)
        return rectangle_imgs, original_imgs

    def Predict_(self):
        self.t_Predict = Thread(target=self.Predict, args=())
        self.t_Predict.daemon = True
        self.t_Predict.start()

    def Predict(self):
        '''
        使用 SVHN 对完成预处理的图片进行号码预测
        '''
        Predict_timer = Timer()
        self.logger.debug('The pid of SVHN_Predict.Predict() : {}'.format(
            os.getpid()))
        self.logger.debug('The thread of SVHN_Predict.Predict() : {}'.format(
            currentThread()))
        for action_index in range(self.S_Number_Predict, self.datalen):
            Predict_timer.tic()  # 开始计时
            PreProcess_Flag, PreResults = self.PreProcess_Q.get()
            self.logger.debug(
                'Predict() ======================================== action {}'.
                format(action_index))

            if PreProcess_Flag == False:
                # 输入的数据无意义
                preNum = -1
                self.action_datas[action_index]['predicted_nums'] = []

            else:
                # 输入的数据有意义, 读取数据
                _, rectangle_imgs, original_imgs = PreResults
                imgs_length = rectangle_imgs.size(0)
                leftover = 0
                if (imgs_length) % self.batch_size:
                    leftover = 1
                num_batches = imgs_length // self.batch_size + leftover

                if self.vis == True:
                    vis_dir = os.path.join(self.vis_path,
                                           '{}'.format(action_index),
                                           'SVHN_Predict')
                    makedir_v1(vis_dir)
                    vis_dir_0 = os.path.join(self.vis_path,
                                             '{}'.format(action_index),
                                             'SVHN_Predict_Minus_one')
                    makedir_v1(vis_dir_0)

                NumsArray = []
                for j in range(num_batches):
                    input_imgs_j = rectangle_imgs[j * self.batch_size:min(
                        (j + 1) * self.batch_size, imgs_length)]
                    length_logits_j, digits_logits_j = self.SVHN_predictor(
                        input_imgs_j.cuda())
                    '''This max function return two column, the first row is value, and the second row is index '''
                    length_predictions_j = length_logits_j.max(
                        1)[1].cpu().tolist()
                    digits_predictions_j = [
                        digits_logits_j.max(1)[1].cpu().tolist()
                        for digits_logits_j in digits_logits_j
                    ]

                    NumsArray_j = []
                    for Num_i in range(len(length_predictions_j)):
                        Number_len = length_predictions_j[Num_i]

                        if Number_len == 1:
                            Num = digits_predictions_j[0][Num_i]
                            NumsArray_j.append(Num)
                        elif Number_len == 2:
                            Num = digits_predictions_j[0][
                                Num_i] * 10 + digits_predictions_j[1][Num_i]
                            NumsArray_j.append(Num)
                        elif Number_len == 0:
                            Num = -1
                            if self.vis == True:
                                cv2.imwrite(
                                    os.path.join(
                                        vis_dir_0, '{}_P{}.jpg'.format(
                                            num_batches * j + Num_i, Num)),
                                    original_imgs[Num_i])
                            continue
                        else:
                            continue

                        if self.vis == True:
                            cv2.imwrite(
                                os.path.join(
                                    vis_dir, '{}_P{}.jpg'.format(
                                        num_batches * j + Num_i, Num)),
                                original_imgs[Num_i])

                    NumsArray.extend(NumsArray_j)

                # 将数据保存下来
                self.action_datas[action_index]['predicted_nums'] = NumsArray

                if len(NumsArray) > 1:
                    # NumberArray range from 0 to 99.
                    # We need to count how many times does each number appear!
                    NumsArray = np.histogram(NumsArray,
                                             bins=100,
                                             range=(0, 100))[0]
                    preNum = np.argmax(NumsArray)
                    # if preNum == 10:
                    #     print('wrong value')
                    preNum_count = NumsArray[preNum]
                    if np.where(NumsArray == preNum_count)[0].size > 1:
                        # if there are more than one number have the maximun counts, then return -1
                        # can sort by number classification scores.
                        preNum = -1
                else:
                    preNum = -1

            # 保存数据
            True_num = self.action_datas[action_index]['num']
            self.action_datas[action_index]['num'] = '{}'.format(preNum)
            self.logger.log(
                24, 'SVHN_Predict.Predict action {} consums {}s'.format(
                    action_index, Predict_timer.toc()))
            self.logger.log(
                24,
                'action {} ====================== True num = {}, Predict num = {} ============='
                .format(action_index, True_num, preNum))

            if self.save_results == True:
                self.save_intermediate_resutls(action_index)

        self.logger.log(
            24,
            '-----------------------------Finished SVHN_Predict.Predict() datalen = {}-----------------------------'
            .format(self.datalen))

        # Finished 完成了所有的计算,保存最终结果,未进行号码矫正
        write_data_to_json_file(
            self.root_path,
            self.file_name,
            self.action_datas,
            self.parameter,
            file_save_name=self.file_save_name_before_Number_Rectify)

        # 根据四官报告修改最终结果
        self.action_datas = self.Number_Rectifier(
            os.path.join(
                self.root_path, self.file_save_name_before_Number_Rectify +
                self.file_name)).rectify()
        self.logger.log(
            24,
            'Successfully Rectify numbers according to four officials report')
        write_data_to_json_file(
            self.root_path,
            self.file_name,
            self.action_datas,
            self.parameter,
            file_save_name=self.file_save_name_after_Number_Rectify)

        # 并根据ReID特征划分主图片。
        self.cluster_main_imgs()

    def save_intermediate_resutls(self, action_index):
        '''将每一次计算的结果保存下来。'''
        intermediate_resutls_path = os.path.join(self.intermediate_results_dir,
                                                 '{}'.format(action_index))
        os.makedirs(intermediate_resutls_path, exist_ok=True)
        json_file = os.path.join(intermediate_resutls_path,
                                 '{}_action_data.json'.format(action_index))
        with open(json_file, 'w') as f:
            json.dump(self.action_datas, f)

    def load_intermediate_resutls(self, action_index):
        '''将中间结果读取出来'''
        intermediate_resutls_path = os.path.join(self.intermediate_results_dir,
                                                 '{}'.format(action_index))
        os.makedirs(intermediate_resutls_path, exist_ok=True)
        json_file = os.path.join(intermediate_resutls_path,
                                 '{}_action_data.json'.format(action_index))
        with open(json_file, 'r') as f:
            self.action_datas = json.load(f)

    def mk_cluster_dirs(self, save_dir, num_cls):
        '''
        save_dir : 保存分类结果的根目录
        num_cls : 分类的数量,种类数
        '''
        for i in range(num_cls):
            sub_dir = os.path.join(save_dir, str(i))
            if os.path.exists(sub_dir):
                shutil.rmtree(sub_dir)
            os.makedirs(sub_dir, exist_ok=True)

    def generate_main_imgs(self):
        '''在追踪结果的基础之上,生成各个动作的主图片。'''
        if os.path.exists(self.main_imgs_dir):
            shutil.rmtree(self.main_imgs_dir)
        os.makedirs(self.main_imgs_dir)

        FMLoader = self.FMLoader_dir
        if os.path.exists(FMLoader):
            print('{} exists'.format(FMLoader))
            action_indexes = os.listdir(FMLoader)
            action_indexes = sorted(action_indexes, key=lambda x: int(x))
            for action_index in action_indexes:
                action_dir = os.path.join(FMLoader, '{}'.format(action_index))
                if os.path.exists(action_dir):
                    target_read_path = os.path.join(action_dir, '0.jpg')
                    target_save_path = os.path.join(
                        self.main_imgs_dir, '{}.jpg'.format(action_index))
                    shutil.copy(target_read_path, target_save_path)
        self.logger.log(24, 'SVHN_Predict.generate_main_imgs() Finished')

    def cluster_main_imgs(self):
        '''
        	:param ReID: ReID model
        	:param ReIDCfg: ReID configure
        	:param main_img_dir: The dir save the imgs which the programme what to cluster.
        	:param action_datas:
        	:param save_dir:
        	:param num_cls: how many classes that the programme want !
        	:return:
        	'''
        # 计时器
        cluster_main_imgs_timer = Timer()
        cluster_main_imgs_timer.tic()
        '''在追踪结果的基础之上,生成各个动作的主图片。'''
        self.generate_main_imgs()

        # 创建ReID模型
        self.ReID = ReID_Model(self.ReIDCfg)
        self.ReID.cuda()

        # make directories to save the clustered imgs.
        action_datas = self.action_datas

        # 场上有四类目标人物,创建四个子文件夹
        save_dir = self.main_imgs_dir
        self.mk_cluster_dirs(save_dir, self.num_cls)
        '''Preprocess the imgs before ReID'''
        if not os.path.exists(self.main_imgs_dir):
            raise ValueError("The main_img_dir is not exits")
        '''对要输入ReID网络的图片进行预处理'''
        imgs_arrays_all, img_names_all = ReID_imgs_load_by_home_and_away(
            self.ReIDCfg, self.main_imgs_dir, self.action_datas)

        # 分成主客两队
        cls_res_all = {
            'Home': 0,
            'Away': 2
        }  # 主队保存在前两个文件夹 0 和 1, 客队保存在后两个文件夹 2 和 3
        for TeanIndex, TeamType in enumerate(['Home', 'Away']):

            imgs_arrays = imgs_arrays_all[TeamType]
            img_names = img_names_all[TeamType]
            cls_res = cls_res_all[TeamType]
            all_feats = []  # 用来存储各个动作主图片的ReID特征
            with torch.no_grad():

                for imgs_array in imgs_arrays:
                    imgs_array = imgs_array.to('cuda')
                    feats = self.ReID(imgs_array).cpu().numpy().tolist()
                    all_feats.extend(feats)

            length = len(all_feats)
            self.logger.log(
                24,
                ' ReID models ,there are {} actions of TeamType {} want to be delt with.'
                .format(length, TeamType))
            '''根据ReID特征,进行分类,分成num_cls类, 门将和球员'''
            assignments, dataset = k_means(all_feats, 2)
            '''根据分类结果,将图片按文件夹分类'''
            for index, cls in enumerate(assignments):
                cls += cls_res  # 所要保存的文件夹的序号
                # 是否有识别成功以号码检测为准。
                if int(action_datas[int(img_names[index])]['num']) == -1 or \
                        action_datas[int(img_names[index])]['num'] == None:

                    shutil.copyfile(
                        os.path.join(self.main_imgs_dir,
                                     img_names[index] + '.jpg'),
                        os.path.join(save_dir, '{}'.format(cls),
                                     '{}_.jpg'.format(img_names[index])))
                else:
                    shutil.copyfile(
                        os.path.join(self.main_imgs_dir,
                                     img_names[index] + '.jpg'),
                        os.path.join(
                            save_dir, '{}'.format(cls), '{}_{}.jpg'.format(
                                img_names[index],
                                action_datas[int(img_names[index])]['num'])))

                action_datas[int(img_names[index])]['team'] = str(cls)

        self.action_datas = action_datas
        self.logger.log(
            24,
            'SVHN_Predict.cluster_main_imgs() Finished, consums {}s'.format(
                cluster_main_imgs_timer.toc()))
Ejemplo n.º 14
0
def main():
    args = parse_args()
    cfg = Config.fromfile(args.config)

    device = torch.device('cuda' if cfg.use_gpu else 'cpu')

    if cfg.use_gpu and not torch.cuda.is_available():
        sys.exit('Error: CUDA requested but not available')

    os.makedirs(cfg.checkpoint_dir, exist_ok=True)

    assert cfg.model.num_classes == len(cfg.data.classes)
    num_classes = cfg.model.num_classes

    model_cfg = cfg.model.copy()
    model_name = model_cfg.pop('name')
    net = getattr(models, model_name)
    net = net(**model_cfg)
    net = DataParallel(net)
    net = net.to(device)

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    try:
        weight = torch.Tensor(cfg.data.weights)
    except KeyError:
        if cfg.loss in ('CrossEntropy', 'mIoU', 'Focal'):
            sys.exit(
                'Error: The loss function used, need dataset weights values')

    optimizer_cfg = cfg.optimizer.copy()
    optimizer_name = optimizer_cfg.pop('name')
    if optimizer_name == 'Adam':
        optimizer_cfg.pop('momentum')
    optimizer = getattr(optim, optimizer_name)
    optimizer = optimizer(net.parameters(), **optimizer_cfg)

    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, **cfg.scheduler)

    resume = 0
    if cfg.checkpoint:

        def map_location(storage, _):
            return storage.cuda() if cfg.use_gpu else storage.cpu()

        # https://github.com/pytorch/pytorch/issues/7178
        chkpt = torch.load(cfg.checkpoint, map_location=map_location)
        net.load_state_dict(chkpt['state_dict'])

        if cfg.resume:
            optimizer.load_state_dict(chkpt['optimizer'])
            resume = chkpt['epoch']

    if cfg.loss == 'CrossEntropy':
        criterion = CrossEntropyLoss2d(weight=weight).to(device)
    elif cfg.loss == 'mIoU':
        criterion = mIoULoss2d(weight=weight).to(device)
    elif cfg.loss == 'Focal':
        criterion = FocalLoss2d(weight=weight).to(device)
    elif cfg.loss == 'Lovasz':
        criterion = LovaszLoss2d().to(device)
    elif cfg.loss == 'Dice':
        criterion = DiceLoss().to(device)
    elif cfg.loss == 'Mix':
        criterion = MixedLovaszCrossEntropyLoss(weight=weight).to(device)
    else:
        sys.exit('Error: Unknown cfg.loss value !')

    train_loader, val_loader = get_dataset_loaders(cfg)

    num_epochs = cfg.num_epochs
    if resume >= num_epochs:
        sys.exit('Error: Epoch {} already reached by the checkpoint provided'.
                 format(num_epochs))

    history = collections.defaultdict(list)
    log = Log(os.path.join(cfg.checkpoint_dir, 'log'))

    log.log('--- Hyper Parameters on this training: ---')
    log.log('Model:\t\t {}'.format(model_name))
    log.log('Backbone:\t {}'.format(cfg.model.backbone_name))
    log.log('Pretrained:\t {}'.format(cfg.model.pretrained))
    log.log('Loss function:\t {}'.format(cfg.loss))
    log.log('Batch Size:\t {}'.format(cfg.batch_size))
    log.log('optimizer:\t {}'.format(optimizer_name))
    log.log('Learning Rate:\t {}'.format(cfg.optimizer.lr))
    log.log('Momentum:\t {}'.format(cfg.optimizer.momentum))
    log.log('Weight Decay:\t {}'.format(cfg.optimizer.weight_decay))
    log.log('Step size:\t {}'.format(cfg.scheduler.step_size))
    log.log('Gamma:\t\t {}'.format(cfg.scheduler.gamma))
    log.log('Image Size:\t {}'.format(cfg.data.train.crop_size))
    log.log('Resize Scale:\t {}'.format(cfg.data.train.resize_scale))
    log.log('Flip Probability:\t {}'.format(cfg.data.train.flip_prob))
    log.log('Rotation Probability:\t {}'.format(cfg.data.train.rotation_prob))
    log.log('Rotation Degree:\t {}'.format(cfg.data.train.rotation_degree))
    log.log('Rotate Degree:\t {}'.format(cfg.data.train.rotate_degree))

    if 'weight' in locals():
        log.log('Weights:\t {}'.format(cfg.data.weights))
    log.log('------------------------------------------')

    for epoch in range(resume, num_epochs):
        log.log('Epoch: {}/{}'.format(epoch + 1, num_epochs))

        train_hist = train(train_loader, num_classes, device, net, optimizer,
                           criterion, exp_lr_scheduler)
        log.log(
            'Train    loss: {:.4f}, mIoU: {:.3f}, {} IoU: {:.3f}, MCC: {:.3f}'.
            format(
                train_hist['loss'],
                train_hist['miou'],
                cfg.data.classes[1],
                train_hist['fg_iou'],
                train_hist['mcc'],
            ))

        for k, v in train_hist.items():
            history['train ' + k].append(v)

        val_hist = validate(val_loader, num_classes, device, net, criterion)
        log.log(
            'Validate loss: {:.4f}, mIoU: {:.3f}, {} IoU: {:.3f}, MCC: {:.3f}'.
            format(val_hist['loss'], val_hist['miou'], cfg.data.classes[1],
                   val_hist['fg_iou'], val_hist['mcc']))

        for k, v in val_hist.items():
            history['val ' + k].append(v)

        visual = 'history-{:05d}-of-{:05d}.png'.format(epoch + 1, num_epochs)
        plot(os.path.join(cfg.checkpoint_dir, visual), history)

        checkpoint = 'checkpoint-{:05d}-of-{:05d}.pth'.format(
            epoch + 1, num_epochs)

        states = {
            'epoch': epoch + 1,
            'state_dict': net.state_dict(),
            'optimizer': optimizer.state_dict()
        }

        torch.save(states, os.path.join(cfg.checkpoint_dir, checkpoint))
Ejemplo n.º 15
0
class TrainFlow(object):
    '''
    Trains recursive encoder attention-driven decoder
    Args:
        args (argparse): object with programs arguments
        model (torch.nn): Glow-TM model
        train_loader (torch.dataloader): dataloader with training cases
        test_loader (torch.dataloader): dataloader with training cases
        log (Log): class for logging console outputs
    '''
    def __init__(self, args, model, train_loader, test_loader, log=None):
        super().__init__()
        self.args = args
        self.trainingLoader = train_loader
        self.testingLoader = test_loader

        if (log is None):
            self.log = Log(self.args)
        else:
            self.log = log

        loss = TMGLowLoss(args, model).to(args.src_device)
        self.parallel_loss = DataParallelCriterion(loss, args.device_ids)

    def trainParallel(self, model, optimizer, tback=1, epoch=0, **kwargs):
        '''
        Trains the model for a single epoch
        Args:
            model (torch.nn.Module): PyTorch model to train
            optimizer (torch.optim): PyTorch optimizer to update the models parameters
            tback (int): number of time-steps to back propagate through in time
            stride (int): The stride the low-fidelity input takes compared to output
            epoch (int): current epoch
        Returns:
            total_loss (float): current loss
        '''
        model.train()
        # Total training loss
        total_loss = 0
        beta = self.args.beta

        print("Beta:", beta)
        optimizer.zero_grad()
        for mbIdx, (input0, target0,
                    lstm_seeds) in enumerate(self.trainingLoader):

            aKey = model.module.initLSTMStates(
                lstm_seeds,
                [target0.size(-2), target0.size(-1)])
            # aKey = model.module.initLSTMStates(torch.LongTensor(input0.size(0)).random_(0, int(1e8)), [target0.size(-2), target0.size(-1)])
            a0 = copy.deepcopy(aKey)

            loss = 0  # Time-series loss
            # Loop of time-steps
            tmax = target0.size(1)
            tback = 10

            input_next = input0[:, :tback].to(self.args.device)
            target_next = target0[:, :tback].to(self.args.device)

            target0_mean = torch.mean(target0, axis=1).to(self.args.device)
            target0_rms = torch.sqrt(
                torch.mean((target0.to(self.args.device) -
                            target0_mean.unsqueeze(1))**2,
                           dim=1)).to(self.args.device)

            # Splits time-series into smaller blocks to calculate back-prop through time
            for i in range(0, tmax // tback):

                input = input_next
                ytarget = target_next

                # Asynch load the next time-series
                if (i + 1 < tmax // tback):
                    input_next = input0[:, (i + 1) * tback:(i + 2) *
                                        tback].cuda(self.args.device,
                                                    non_blocking=True)
                    target_next = target0[:, (i + 1) * tback:(i + 2) *
                                          tback].cuda(self.args.device,
                                                      non_blocking=True)

                loss = 0
                gpu_loss = [0 for i in range(self.args.n_gpu)]
                modelPredictions = TMGLowPredictionItem()
                model.scatterModel()

                for tstep in range(tback):

                    # Model forward
                    outputs = model.sample(input[:, tstep], a0)

                    if (isinstance(outputs, list)):
                        yPred = [output[0] for output in outputs]
                        logp = [output[1] for output in outputs]
                        a0 = [output[2] for output in outputs]
                    else:
                        yPred, logp, a0 = outputs

                    modelPredictions.add(yPred, logp, ytarget[:, tstep])
                    # Recompile recurrent states onto the source device
                    if (self.args.n_gpu > 1):
                        a0 = model.gatherLSTMStates(a0)
                    else:
                        a0 = outputs[2]

                # Compute the reverse KL divergence loss
                outputs = modelPredictions.getOutputs()
                targets = modelPredictions.getTargets()
                loss0 = self.parallel_loss(outputs, targets, target0_mean,
                                           target0_rms)
                if (self.args.n_gpu > 1):
                    gpu_loss = [
                        gpu_loss[i] + loss0[i] for i in range(len(loss0))
                    ]
                else:
                    gpu_loss = [gpu_loss[0] + loss0]

                modelPredictions.clear()
                loss = self.parallel_loss.gather(
                    gpu_loss, output_device=self.args.src_device).mean()
                # Backwards!
                loss.backward()

                # print(getGpuMemoryMap())
                torch.nn.utils.clip_grad_norm_(model.parameters(),
                                               self.args.max_grad_norm)
                optimizer.step()
                optimizer.zero_grad()
                # Average the LSTM states with the initial state to prevent convergence
                for j in range(len(a0)):
                    a_out, c_out = a0[j]
                    a_key, c_key = aKey[j]
                    a0[j] = (0.5 * a_out.detach() + 0.5 * a_key,
                             0.5 * c_out.detach() + 0.5 * c_key)

                total_loss = total_loss + loss.detach()
                # Sync cuda processes here
                # Note sure if needed, but hopefully makes sure next data is loaded.
                torch.cuda.synchronize()
                torch.cuda.empty_cache()

            # Add loss of time-series to total loss
            # Mini-batch progress log
            if ((mbIdx + 1) % 5 == 0):
                self.log.log(
                    'Train Epoch: {}; Mini-batch: {}/{} ({:.0f}%); \t Current Loss: {:.6f}'
                    .format(epoch, mbIdx, len(self.trainingLoader),
                            100. * mbIdx / len(self.trainingLoader),
                            total_loss))

        return total_loss

    def test(self, model, samples=1, epoch=0, plot=True):
        '''
        Tests the model
        Args:
            model (torch.nn.Module): PyTorch model to test
            stride (int): The stride the low-fidelity input takes compared to output
            samples (int): Number of prediction to sample from the model
            epoch (int): current epoch
            plot (boolean): If to plot two of the predictions or not
        Returns:
            mse (float): mean-squared-error between the predictive mean and targets
        '''
        model.eval()
        # Total test loss
        total_loss = 0
        out_std = model.module.out_std.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
        out_mu = model.module.out_mu.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
        for mbIdx, (input0, target0, u0) in enumerate(self.testingLoader):

            u0 = u0.to(self.args.device).unsqueeze(-1).unsqueeze(-1).unsqueeze(
                -1).unsqueeze(-1)
            u0 = torch.cat((u0 / 2.0, u0 / 2.0, u0**2), dim=2)

            u0in = torch.ones(input0[:, :, :1].size()).to(
                self.args.device) * u0[:, :, 0, :, :].unsqueeze(2)
            # input = torch.cat((input0.to(self.args.device), u0in), dim=2)
            input = input0.to(self.args.device)

            ytarget = out_std * target0.to(self.args.device) + out_mu

            dims = [samples] + list(ytarget.size())
            yPred = torch.zeros(dims).type(ytarget.type())

            # Max number of time steps
            tmax = 40
            # Loop through samples
            for i in range(samples):

                aKey = model.module.initLSTMStates(
                    torch.LongTensor(input.size(0)).random_(0, int(1e8)),
                    [ytarget.size(-2), ytarget.size(-1)])
                a0 = copy.deepcopy(aKey)

                # Loop of time-steps
                model.scatterModel()

                for tstep in range(0, tmax + 1):

                    # Model forward
                    outputs = model.sample(input[:, tstep], a0)
                    yPred0, logp, a0 = model.gather(outputs,
                                                    self.args.src_device)

                    out_std = model.module.out_std.unsqueeze(0).unsqueeze(
                        -1).unsqueeze(-1)
                    out_mu = model.module.out_mu.unsqueeze(0).unsqueeze(
                        -1).unsqueeze(-1)
                    yPredHat = out_std * yPred0 + out_mu
                    yPred[i, :, tstep] = yPredHat.detach()

                    # Average current LSTM states with initial state
                    if (tstep % 10 == 0):
                        for j in range(len(a0)):
                            a_out, c_out = a0[j]
                            a_key, c_key = aKey[j]
                            a0[j] = (0.5 * a_out.detach() + 0.5 * a_key,
                                     0.5 * c_out.detach() + 0.5 * c_key)

            if (plot and mbIdx == 0):
                self.log.log('Plotting predictions.')
                plotVelocityPred(self.args,
                                 input,
                                 yPred,
                                 ytarget,
                                 bidx=0,
                                 stride=4,
                                 epoch=epoch)
                plotVelocityPred(self.args,
                                 input,
                                 yPred,
                                 ytarget,
                                 bidx=1,
                                 stride=4,
                                 epoch=epoch)

            # Summation of the squared error between the mean of the samples and target
            total_loss = total_loss + (torch.pow(
                torch.mean(yPred[:, :, 1:tmax + 1], dim=0) -
                ytarget[:, 1:tmax + 1], 2)).sum().detach()

        # Return the mse
        return total_loss / (self.args.ntest * tmax * yPred.size(-2) *
                             yPred.size(-1))
Ejemplo n.º 16
0
    foamNN = FoamSVGD(nsamples, 64) # Number of SVGD particles
    # Load pre-trained neural networks
    #foamNN.loadNeuralNet('./torchNets/foamNet')

    # First set up validation dataset
    #foamNN.getTestingPoints(dataManager, n_data=500, n_mb=256)

    XTdirs = ['../../IgnDelay/xdataTrV']
    YTdirs = ['../../IgnDelay/ydataTrV']
    Xdirs  = ['../../IgnDelay/xdataTeV']
    Ydirs  = ['../../IgnDelay/ydataTeV']

    n_mb=32
    foamNN.getDataPoints(dataManager, XTdirs, YTdirs, Xdirs, Ydirs, stp=2, n_mb=n_mb)

    lg.log('Batch size is ' + str(n_mb))
    n = 1 # Number of training sets
    n_data = [1000 for i in range(n)] # Number of data per training set
    n_mb = [1024 for i in range(n)] # Mini-batch size
    n_epoch = [200 for i in range(n)] # Number of epochs per training set

    foamNN.extra = "-" + str(foamNN.prior_w_shape) + "-" + str(foamNN.prior_w_rate) + "-" + str(foamNN.prior_beta_shape) +  \
                    "-" + str(foamNN.prior_beta_rate) + "-lr-" + str(foamNN.lr) + "-" + str(foamNN.lr_noise) + "-bs-" +  str(foamNN.n_mb) + "-" + str(nsamples) + "-64neu-Vode"


    # Training loop
    for i in range(n):
        # Parse data and create data loaders
        #foamNN.getTrainingPoints(dataManager, n_data = n_data[i], n_mb = n_mb[i])

        lg.log('Training data-set number: '+str(i+1))
Ejemplo n.º 17
0
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, 13, 2)

    if (args.epoch_start > 0):
        model.module.load_state_dict(model_state_dict)
        optimizer.load_state_dict(optimizer_state_dict)

    data_loader, training_loader, testing_loader = DataLoaderAuto.init_data_loaders(
        args, model, log)
    modelTrainer = TrainFlow(args,
                             model,
                             training_loader,
                             testing_loader,
                             log=log)

    # ========== Epoch loop ============
    log.log('Training network, lets rock.')
    for epoch in range(args.epoch_start + 1, args.epochs + 1):

        # Time-step size to take
        tstep = (int(epoch / 2) + 10)
        # tstep = (int(epoch/10)+2)
        log.log('tsteps: {}'.format(tstep))

        loss = modelTrainer.trainParallel(model, optimizer, epoch=epoch)
        log.log('Epoch {:d}: Sample Training Loss: {}'.format(epoch, loss))

        if (not scheduler is None):
            scheduler.step()
            for param_group in optimizer.param_groups:
                log.log('Learning-rate: {:0.05f}'.format(param_group['lr']))
class DataManager():

    def __init__(self, directories, ransTimes, lesTimes):
        """
        Manages the training data, training data should be organized in the following structure:
        >trainingData
        **|- Flow name
        ****|- RANS
        ******|- Timestep
        ********|- S (symmetric tensor, see Eq. 12)
        ********|- R (rotational tensor, see Eq. 12 )
        ********|- k (RANS TKE)
        ****|- LES
        ******|- Timestep
        ********|- UPrime2Mean (Reynolds stress)
        Args:
            directories (string list): list of strings that contain the directory of training data
            ransTimes (int list): list of RANS training data
            lesTimes (int list): list of LES training data 
        """
        self.lg = Log()
        self.torchReader = TorchReader()
        self.foamReader = FoamReader()
        self.invar = Invariant()

        self.flowDict = []
        for i, dir0 in enumerate(directories):
            # Check to make sure there is a RANS and LES folder
            if(not os.path.isdir(dir0+"/RANS") or not os.path.isdir(dir0+"/LES")):
                self.lg.error('Cannot find RANS or LES folder in flow directory: '+dir0)
                self.lg.warning('Skipping flow')
                continue
            
            # Now check for RANS timestep folder
            if(not os.path.isdir(dir0+"/RANS/"+str(ransTimes[i]))):
                self.lg.error('Incorrect RANS timestep provided for: '+dir0)
                self.lg.warning('Skipping flow')
                continue

            # Now check for LES timestep folder
            if(not os.path.isdir(dir0+"/LES/"+str(lesTimes[i]))):
                self.lg.error('Incorrect LES timestep provided for: '+dir0)
                self.lg.warning('Skipping flow')
                continue
            # If prelim checks pass add the flow to dictionary
            mydict = {}
            mydict.update({'dir': dir0})
            mydict.update({'tRANS': ransTimes[i]})
            mydict.update({'tLES': lesTimes[i]})
            mydict.update({'idxmask': np.array([])})
            self.flowDict.append(mydict)

    def getDataPoints(self, svgdNN, nData, partition=None, mask=False, gpu=True):
        """
        Reads in flow data for training
        Args:
            svgdNN (foamSVGD) : Neural network model. Not used when randomly sampling 
                                but can be used for other methods when selecting training points
            nData (int): Number of training data
            partition (IntList): List of flow partitions, default evenly distributions
            mask (boolean): True if 
        Returns:
            x_train (DoubleTensor): [nCellx5] Tensor containing the 5 invariant NN inputs
            t_train (DoubleTensor): [nCellx5] Tensor containing the 10 tensor basis
            k_train (DoubleTensor): [nCellx5] RANS TKE
            y_train (DoubleTensor): [nCellx5] Target anisotropic tensor outputs
        """
        x_train = th.Tensor(nData,5).type(dtype) # Invariant inputs
        t_train = th.Tensor(nData,10,3,3).type(dtype) # Tensor basis
        k_train = th.Tensor(nData).type(dtype) # RANS TKE
        y_train = th.Tensor(nData,9).type(dtype) # Target output
    
        # Partition training points between the provided flows
        if(not partition):
            self.lg.warning('No partition array provided...')
            self.lg.log('Evenly distributing points between provided flows')
            partition = []
            for fDict in self.flowDict[:-1]:
                partition.append(int(nData/len(self.flowDict)))
            partition.append(int(nData-sum(partition)))
        else:
            self.lg.log('Using user defined partition')

        indexes = []
        # Randomly select points from the training flows
        self.lg.info('Sampling points based off random selection') 
        for i, fDict in enumerate(self.flowDict):
            # Read in RANS and LES data
            # Can read pre-processed data (e.g. readTensorTh in torchReader.py)
            s = self.torchReader.readTensorTh(fDict['tRANS'], 'S', dirPath=fDict['dir']+'/RANS')
            r = self.torchReader.readTensorTh(fDict['tRANS'], 'R', dirPath=fDict['dir']+'/RANS')
            k0 = self.torchReader.readScalarTh(fDict['tRANS'], 'k', dirPath=fDict['dir']+'/RANS')
            rs_avg = self.torchReader.readSymTensorTh(fDict['tLES'], 'UPrime2Mean', dirPath=fDict['dir']+'/LES')

            # Can read raw openFOAM data if one desires (e.g. readTensorData in foamReader.py)
            # s = self.foamReader.readTensorData(fDict['tRANS'], 'S', dirPath=fDict['dir']+'/RANS')
            # r = self.foamReader.readTensorData(fDict['tRANS'], 'R', dirPath=fDict['dir']+'/RANS')
            # k0 = self.foamReader.readScalarData(fDict['tRANS'], 'k', dirPath=fDict['dir']+'/RANS')
            # rs_avg = self.foamReader.readSymTensorData(fDict['tLES'], 'UPrime2Mean', dirPath=fDict['dir']+'/LES')

            k = k0.unsqueeze(0).unsqueeze(0).expand(3,3,k0.size()[0])
            k = k.permute(2, 0, 1)

            # Calculate the target scaled anisotropis tensor 
            # Note: scale by RANS TKE so it is consistent during the forward simulation
            # R-S = 2/3k + k*b
            b_avg = rs_avg/k - (2.0/3.0)*th.eye(3).type(dtype)

            # Randomly select the data points
            indx0 = np.arange(s.size()[0]).astype(int)
            indx0 = np.delete(indx0, fDict['idxmask'])
            np.random.shuffle(indx0)
            meshInd = th.from_numpy(indx0).type(th.LongTensor)[:partition[i]]
            # If add these indexes to the flows mask (used for validation data)
            if(mask):
                indxs = meshInd.numpy()
                fDict['idxmask'] = np.append(fDict['idxmask'], indxs.astype(int))

            # Start end indexes for current flow
            start0 = sum(partition[:i])
            end0 = sum(partition[:i+1])
            x_train[start0:end0] = self.invar.getInvariants(th.index_select(s,0,meshInd), th.index_select(r,0,meshInd))
            t_train[start0:end0] = self.invar.getTensorFunctions(th.index_select(s,0,meshInd), th.index_select(r,0,meshInd))
            k_train[start0:end0] = th.index_select(k0, 0, meshInd)
            y_train[start0:end0] = th.index_select(b_avg.view(-1,9), 0, meshInd)

        return x_train, t_train, k_train, y_train

    def getDataPoints2D(self,fnameX,fnameY,stp):
        #dataT = self.read_data(fnameX,56)
        #reacT = self.read_reaction(fnameY)
        dataT = np.loadtxt(fnameX)
        reacT = np.loadtxt(fnameY)
        #return dataT[0:-1:stp,2:55], reacT[0:-1:stp,2:54]
        return dataT, reacT

    def getDataPoints2DTest(self,fnameXT,fnameYT,fnameX,fnameY):
        dataT = self.read_data(fnameX,56)
        reacT = self.read_reaction(fnameY)
        dataTe = self.read_data(fnameX,56)
        reacTe = self.read_reaction(fnameY)

        nc=55
        datanormT = self.do_normalization(dataTe[:,0:nc],dataT[:,0:nc],'std')
        nc2 = 54
        RRnormT = self.do_normalization(reacTe[:,0:nc2],reacT[:,0:nc2],'std')
        return th.from_numpy(datanormT[0:-1:10,:]).double(), th.from_numpy(RRnormT[0:-1:10,53,None]).double()


    def do_normalization(self,dataTr,dataT,which):

        if(which=='std'):
            datanormTr = (dataTr-np.mean(dataTr,0))/(np.std(dataTr,0))
            datanormT = (dataT-np.mean(dataTr,0))/(np.std(dataTr,0))
        if(which=='minmax'):
            datanormTr = (dataTr-np.min(dataTr,0))/(np.max(dataTr,0)-np.min(dataTr,0))
            datanormT = (dataT-np.min(dataTr,0))/(np.max(dataTr,0)-np.min(dataTr,0))



        return datanormTr, datanormT

            

    def read_data(self,fname,nc):    
            data = np.fromfile(fname,dtype=np.single)
            data = np.reshape(data,(int(data.size/nc),nc))
            #HRR = data[:,0]
            data = np.delete(data,0,1)
            return data
    def read_reaction(self,fname):
            data = np.fromfile(fname,dtype=np.single)
            data = np.reshape(data,(int(data.size/56),56))
            HRR = data[:,0]
            data = np.delete(data,0,1)
            data[:,53]=HRR
            data = np.delete(data,54,1)
            return data
Ejemplo n.º 19
0
		'../training-data/tandem-cylinders']
    trainingDir = [os.path.join(os.getcwd(), dir0) for dir0 in trainingDir]
    ransTimes = [60, 90, 60, 60, 60]
    lesTimes = [200, 1000, 250, 1700, 170]
    dataManager = DataManager(trainingDir, ransTimes, lesTimes)
    
    foamNN = FoamSVGD(20) # Number of SVGD particles
    # Load pre-trained neural networks
    #foamNN.loadNeuralNet('./torchNets/foamNet')
    
    # First set up validation dataset
    foamNN.getTestingPoints(dataManager, n_data=500, n_mb=50)

    n = 10 # Number of training sets
    n_data = [10000 for i in range(n)] # Number of data per training set
    n_mb = [20 for i in range(n)] # Mini-batch size
    n_epoch = [10 for i in range(n)] # Number of epochs per training set

    # Training loop
    for i in range(n):
        # Parse data and create data loaders
        foamNN.getTrainingPoints(dataManager, n_data = n_data[i], n_mb = n_mb[i])

        lg.log('Training data-set number: '+str(i+1))
        foamNN.train(n_epoch[i], gpu=True)
        # Save neural networks
        foamNN.saveNeuralNet('foamNet')



Ejemplo n.º 20
0
class Calibrate_transfer():
    def __init__(self,
                 opt,
                 detector_opt,
                 Tracker_output_queue,
                 C_T_output_queue,
                 S_Coordinate_transfer,
                 S_Pose_Estimate,
                 vis=False,
                 save_results=False,
                 queueSize=1024):

        self.logger = Log(__name__, 'Calibrate_transfer').getlog()

        self.opt = opt
        self.dir_name = opt.dir_name
        self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))
        # logger.info('目标文件夹是{}'.format(self.root_path))
        self.file_name = opt.file_name
        # 本来就是要载入两次视频,分开读亦可以
        self.Videoparameters, \
        self.setting_parameter, \
        self.action_datas, \
        self.channel_list, \
        self.parameter = read_data_from_json_file(self.root_path, self.file_name, self.opt)

        self.datalen = len(self.action_datas)

        self.detector_opt = detector_opt  # 用来设置追踪器参数的。
        self.logger.info('Creating model...')
        self.detector_model = create_JDETracker_model(self.detector_opt)
        self.detector = JDETracker(
            self.detector_opt,
            self.detector_model)  # What is JDE Tracker? 把这个tracker 当detector用

        self.input_Q = Tracker_output_queue  # 追踪数据的整体输入
        self.PreProcess_Q = Queue(maxsize=queueSize)  # 在目标检测前,对左边转换后的截图进行预处理
        self.tracking_Q = Queue(maxsize=queueSize)
        self.detecions_Q = Queue(maxsize=queueSize)
        self.output_Q = C_T_output_queue

        self.vis = vis
        if self.vis == True:
            self.vis_path = os.path.join(self.root_path, 'vis')
            os.makedirs(self.vis_path, exist_ok=True)

        self.S_Coordinate_transfer = S_Coordinate_transfer
        self.S_Pose_Estimate = S_Pose_Estimate
        self.save_results = save_results
        if self.save_results == True:
            self.intermediate_results_dir = os.path.join(
                self.root_path, 'intermediate_results', 'Calibrate_transfer')
            os.makedirs(self.intermediate_results_dir, exist_ok=True)

    def Read_From_Cache(self):
        '''
        从文件把之前计算过的结果提取出来
        '''
        from utils.index_operation import get_index

        self.logger.debug(
            'The pid of Calibrate_transfer.Read_From_Cache() : {}'.format(
                os.getpid()))
        self.logger.debug(
            'The thread of Calibrate_transfer.Read_From_Cache() : {}'.format(
                currentThread()))

        cache_index = get_index(self.intermediate_results_dir)
        # 只需读取有用的部分即可。
        action_index = self.S_Pose_Estimate
        for action_index in range(self.S_Pose_Estimate,
                                  self.S_Coordinate_transfer):

            if action_index not in cache_index:
                # cache 中没有保存说明 此动作本身是False
                self.output_Q.put((False, (action_index, [], [], [], [])))

            else:
                # 从文件夹中读取出该动作对应的计算结果。
                _, sub_img_tracking, ReID_features_tracking, sub_imgs_detection, ReID_features_detection = self.load_intermediate_resutls(
                    action_index)
                self.output_Q.put(
                    (True,
                     (action_index, sub_img_tracking, ReID_features_tracking,
                      sub_imgs_detection, ReID_features_detection)))

        self.logger.log(
            22, ' Calibrate_transfer loads action {} from Cache file '.format(
                action_index))

    def update_(self):
        self.t_update = Thread(target=self.update, args=())
        self.t_update.daemon = True
        self.t_update.start()

    def update(self):
        '''
        将一个视角下的所有图片转换到其他视角下。
        '''
        self.logger.debug('The pid of Calibrate_transfer.update() : {}'.format(
            os.getpid()))
        self.logger.debug(
            'The thread of Calibrate_transfer.update() : {}'.format(
                currentThread()))
        update_timer = Timer()
        sub_img_generate_timer = Timer()
        for action_index in range(self.S_Coordinate_transfer, self.datalen):

            update_timer.tic()  # 开始计时
            self.logger.debug(
                'update() ======================================== action {}'.
                format(action_index))
            Flag, input_index, tracking_results = self.input_Q.get()
            if input_index != action_index:
                self.logger.log(
                    31,
                    '---——————————————————————————————————index does match')
                raise Exception(
                    'Calibrate_transfer.update action_index_update {} != input_index {} '
                    .format(action_index, input_index))

            if Flag == False:
                # Flag == False 的话,直接就不要了
                self.tracking_Q.put((False, (action_index, [], [])))
                self.PreProcess_Q.put((False, (action_index, [])))
                continue

            frames_time, sub_imgs, ReID_feature_list, img_points = tracking_results
            # 分为 追踪结果和 对 每一帧追踪进行坐标转换后得到的检测结果
            # 这里先将追踪结果存入队列中。
            self.tracking_Q.put(
                (True, (action_index, sub_imgs, ReID_feature_list)))

            channel, action_time, img_point, video_parameter = read_subdata(
                self.action_datas[action_index], self.Videoparameters)
            calibrateParameter = video_parameter['CalibrateParameter']

            # 将追踪结果对应的像素坐标转换成世界坐标
            '''队列的首项是终极目标,用于校准,不用于后续的坐标转换计算'''
            '''因此,直接从第二项开始'''
            world_points = []
            start_time = frames_time[1]  # 追踪序列开始的时间,这里的时间是相对于开球时间
            for p_index in range(1, len(img_points)):
                img_point = img_points[p_index]
                # 输入的是连续的轨迹,因为检测原因,可能有诺干帧是没有img_points,长度因此为0
                if len(img_point) == 0:
                    world_points.append(None)
                else:
                    world_point = transform_2d_to_3d(
                        img_point,
                        calibrateParameter.cameraMatrix,
                        calibrateParameter.distCoeffs,
                        calibrateParameter.rotation_vector,
                        calibrateParameter.translation_vector,
                        world_z=0)

                    world_point = np.reshape(world_point, [1, 3])
                    world_points.append(world_point)

            # 将世界坐标转换到其他的视角下,并且 截图+detection\
            # print('len(world_points) : ', len(world_points))
            sub_img_generate_timer.tic()
            self.sub_img_generate_multi_thread(channel, action_index,
                                               world_points, start_time)
            # self.logger.info('Calibrate_transfer.sub_img_generate() action {} consums {}s'.format(action_index,sub_img_generate_timer.toc()))
            self.logger.log(
                22, 'Calibrate_transfer.update() action {} consums {}s'.format(
                    action_index, update_timer.toc()))

    def sub_img_generate(self, video_parameter, setting_parameter,
                         world_points, start_time):
        '''
        基于世界坐标,生成其他视角下的像素坐标
        '''
        results = []
        video = video_parameter['video']
        width = video.get(cv2.CAP_PROP_FRAME_WIDTH)
        height = video.get(cv2.CAP_PROP_FRAME_HEIGHT)
        # 将时间调整至追踪序列的开头,然后逐帧读取
        time = start_time + video_parameter[
            'delta_t']  # action time need to add the delta time to calibrate the time between channels .
        video.set(cv2.CAP_PROP_POS_MSEC, round(1000 * time))

        for index in range(len(world_points)):
            _, img = video.read()
            world_point = world_points[index]

            if type(world_point) != np.ndarray:
                continue
            img_point_of_other = object_To_pixel(
                world_point, video_parameter['CalibrateParameter'])
            img_point_of_other = np.reshape(img_point_of_other, 2)

            Message = ScreenSHot_batch(img_point_of_other, img,
                                       setting_parameter, width, height)

            if Message[0] == True:
                # print('sub img of channel {} candidate {} exists'.format(other_channel,img_count_))
                image = Message[1]
                reference_point = Message[2]
                sub_imgs_bias = Message[3]
                results.append([index, image, reference_point, sub_imgs_bias])
            else:
                continue

        return results

    def sub_img_generate_multi_thread(self, channel, action_index,
                                      world_points, start_time):
        '''
        基于世界坐标,生成其他视角下的像素坐标
        '''
        results_all = []
        executor = ThreadPoolExecutor(max_workers=len(self.channel_list) - 1)
        task_list = []

        for other_channel in self.channel_list:
            # 同一个视角,无需在生成截图
            if other_channel == channel:
                continue
            video_parameter = self.Videoparameters[other_channel]
            setting_parameter = self.setting_parameter

            task = executor.submit(self.sub_img_generate, video_parameter,
                                   setting_parameter, world_points, start_time)
            task_list.append(task)
        for task in task_list:
            while not task.done():
                time.sleep(1)
            results_all += task.result()

        if len(results_all) > 0:
            self.PreProcess_Q.put((True, (action_index, results_all)))
        else:
            self.PreProcess_Q.put((False, (action_index, results_all)))

    def detect_(self):
        self.t_detect = Thread(target=self.detect, args=())
        self.t_detect.daemon = True
        self.t_detect.start()

    def detect(self):
        '''
        用检测其检测每一场图片中的人物
        '''
        detect_timer = Timer()
        self.logger.debug('The pid of Calibrate_transfer.detect() : {}'.format(
            os.getpid()))
        self.logger.debug(
            'The thread of Calibrate_transfer.detect() : {}'.format(
                currentThread()))

        for action_index in range(self.S_Coordinate_transfer, self.datalen):

            self.logger.debug(
                'Calibrate_transfer.Detection ------------action {} has been read '
                .format(action_index))
            Flag_PreProcess, (acton_index,
                              Preprocess_results) = self.PreProcess_Q.get()
            detect_timer.tic()
            results = []

            if Flag_PreProcess == False:
                self.detecions_Q.put((False, (acton_index, results)))
                continue
            # 争取写成并行的
            for [index, img0, reference_point,
                 sub_img_bias] in Preprocess_results:
                # Img preprocess before detection
                img = img0[:, :, ::-1].transpose(2, 0, 1)
                img = np.ascontiguousarray(img, dtype=np.float32)
                img /= 255.0
                # timer.tic()
                blob = torch.from_numpy(img).cuda().unsqueeze(0)
                # detection using tracker kernel
                # dets = [xl, yl, w, h]
                [dets,
                 id_feature] = self.detector.update_for_detection(blob, img0)

                if dets.shape[0] == 0:
                    #截图中没有检测到人物, 继续
                    continue
                results.append(
                    [img0, dets, id_feature, reference_point, sub_img_bias])

            if len(results) > 0:
                self.detecions_Q.put((True, (acton_index, results)))
            else:
                self.detecions_Q.put((False, (acton_index, results)))

            self.logger.log(
                22, 'Calibrate_transfer.detect() action {} consums {}s'.format(
                    action_index, detect_timer.toc()))

    def postProcess_(self):
        self.t_postProcess = Thread(target=self.postProcess, args=())
        self.t_postProcess.daemon = True
        self.t_postProcess.start()

    def postProcess(self):
        '''
        对检测完之后的结果进行后处理
        '''
        postProcess_timer = Timer()
        self.logger.debug(
            'The pid of Calibrate_transfer.postProcess() : {}'.format(
                os.getpid()))
        self.logger.debug(
            'The thread of Calibrate_transfer.postProcess() : {}'.format(
                currentThread()))

        for action_index in range(self.S_Coordinate_transfer, self.datalen):

            self.logger.debug(
                'postProcess ------------action {} has been read '.format(
                    action_index))

            Flag_detect, (acton_index_detection,
                          results) = self.detecions_Q.get()
            Flag_tracking, (action_index_tracking, sub_imgs_tracking,
                            ReID_features_tracking) = self.tracking_Q.get()

            postProcess_timer.tic()

            if Flag_detect == False or Flag_tracking == False:
                self.output_Q.put((False, (action_index, [], [], [], [])))
                continue

            elif acton_index_detection != action_index or action_index_tracking != action_index:
                raise Exception(
                    'acton_index_detection {} != action_index_tracking {} '.
                    format(acton_index_detection, action_index_tracking))

            if self.vis == True:
                vis_dir_ = os.path.join(self.vis_path,
                                        '{}'.format(action_index),
                                        'Calibrate_transfer')
                makedir_v1(vis_dir_)

            # 把每个sub_box提取出来。
            sub_imgs_detection = []
            ReID_features_detection = []
            # 对所有结果进行筛选,选出和目标人物相同ID的。
            for r_index, [
                    img0, dets, id_feature, reference_point, sub_img_bias
            ] in enumerate(results):

                I_h, I_w, _ = img0.shape
                new_reference_point, target_id = sort_by_point(
                    [acton_index_detection, dets, False],
                    reference_point,
                    input_index='{}_{}'.format(action_index, r_index))
                if target_id == None:
                    '''根据reference_point来筛选框时,没有合适的框'''
                    if self.vis == True:
                        vis_img = np.copy(img0)
                        for cv2_index in range(int(dets.shape[0])):
                            box = dets[cv2_index].tolist()
                            x1, y1, w, h = box
                            c_intbox = tuple(
                                map(int,
                                    (max(0, x1), max(0, y1), min(
                                        x1 + w, I_w), min(y1 + h, I_h))))
                            cv2.rectangle(vis_img, (c_intbox[0], c_intbox[1]),
                                          (c_intbox[2], c_intbox[3]),
                                          (255, 0, 0),
                                          thickness=2)
                        cv2.circle(
                            vis_img,
                            (int(reference_point[0]), int(reference_point[1])),
                            radius=5,
                            color=(0, 0, 255),
                            thickness=-1)  # 原始点为红色
                        cv2.imwrite(
                            os.path.join(vis_dir_, '{}.jpg'.format(r_index)),
                            vis_img)
                        continue

                # print('dets.shape, target_id : ',dets.shape, target_id)
                target_bbox = dets[target_id]
                # print('target_bbox.shape : ', target_bbox.shape)
                target_bbox = target_bbox.tolist()
                # print('target_bbox : ', target_bbox)
                x1, y1, w, h = target_bbox
                # 目标区域
                intbox = tuple(
                    map(int, (max(0, x1), max(0, y1), min(
                        x1 + w, I_w), min(y1 + h, I_h))))
                sub_img = img0[intbox[1]:intbox[3], intbox[0]:intbox[2]]

                # ids = np.arryy(result[2])
                target_feature = id_feature[target_id]
                sub_imgs_detection.append(sub_img)
                ReID_features_detection.append(target_feature)

                if self.vis == True:
                    vis_img = np.copy(img0)
                    for cv2_index in range(int(dets.shape[0])):
                        box = dets[cv2_index].tolist()
                        x1, y1, w, h = box
                        c_intbox = tuple(
                            map(int, (max(0, x1), max(0, y1), min(
                                x1 + w, I_w), min(y1 + h, I_h))))
                        cv2.rectangle(vis_img, (c_intbox[0], c_intbox[1]),
                                      (c_intbox[2], c_intbox[3]), (255, 0, 0),
                                      thickness=2)
                    cv2.circle(
                        vis_img,
                        (int(reference_point[0]), int(reference_point[1])),
                        radius=5,
                        color=(0, 0, 255),
                        thickness=-1)  # 原始点为红色
                    cv2.circle(vis_img, (int(
                        new_reference_point[0]), int(new_reference_point[1])),
                               radius=5,
                               color=(0, 255, 255),
                               thickness=-1)
                    cv2.rectangle(vis_img, (intbox[0], intbox[1]),
                                  (intbox[2], intbox[3]), (0, 255, 255),
                                  thickness=2)
                    cv2.imwrite(
                        os.path.join(vis_dir_, '{}.jpg'.format(r_index)),
                        vis_img)

            # 可以在此处加一个 ReID 模块 ,用于剔除劣质 sub_imgs
            sub_imgs = sub_imgs_detection + sub_imgs_tracking
            ReID_features = ReID_features_detection + ReID_features_tracking

            self.output_Q.put(
                (True,
                 (action_index, sub_imgs_tracking, ReID_features_tracking,
                  sub_imgs_detection, ReID_features_detection)))
            # 保存中间结果
            if self.save_results == True:
                self.save_intermediate_resutls(action_index, sub_imgs,
                                               ReID_features,
                                               sub_imgs_detection,
                                               sub_imgs_tracking,
                                               ReID_features_detection,
                                               ReID_features_tracking)

            self.logger.log(
                22, 'Calibrate_transfer.postProcess() action {} consums {}s'.
                format(action_index, postProcess_timer.toc()))
        # self.logger.log(22, '-----------------------------Finished Calibrate_transfer.postProcess() datalen = {}-----------------------------'.format(self.datalen))

    def save_intermediate_resutls(self, action_index, sub_imgs, ReID_features,
                                  sub_imgs_detection, sub_imgs_tracking,
                                  ReID_features_detection,
                                  ReID_features_tracking):
        '''将每一次计算的结果保存下来。'''
        intermediate_resutls_path = os.path.join(self.intermediate_results_dir,
                                                 '{}'.format(action_index))
        os.makedirs(intermediate_resutls_path, exist_ok=True)
        ReID_features = np.array(ReID_features)
        np.save(
            os.path.join(intermediate_resutls_path,
                         '{}_ReID_features.npy'.format(action_index)),
            ReID_features)
        for img_index in range(len(sub_imgs)):
            cv2.imwrite(
                os.path.join(intermediate_resutls_path,
                             '{}.jpg'.format(img_index)), sub_imgs[img_index])

        # 保存tracking部分的img和feature
        intermediate_resutls_path_tracking = os.path.join(
            self.intermediate_results_dir, '{}/tracking'.format(action_index))
        os.makedirs(intermediate_resutls_path_tracking, exist_ok=True)
        ReID_features_tracking = np.array(ReID_features_tracking)
        np.save(
            os.path.join(intermediate_resutls_path_tracking,
                         '{}_ReID_features_tracking.npy'.format(action_index)),
            ReID_features_tracking)
        for img_index_tracking in range(len(sub_imgs_tracking)):
            cv2.imwrite(
                os.path.join(intermediate_resutls_path_tracking,
                             '{}.jpg'.format(img_index_tracking)),
                sub_imgs_tracking[img_index_tracking])

        # 保存detection部分的img和feature
        intermediate_resutls_path_detection = os.path.join(
            self.intermediate_results_dir, '{}/detection'.format(action_index))
        os.makedirs(intermediate_resutls_path_detection, exist_ok=True)
        ReID_features_detection = np.array(ReID_features_detection)
        np.save(
            os.path.join(
                intermediate_resutls_path_detection,
                '{}_ReID_features_detection.npy'.format(action_index)),
            ReID_features_detection)
        for img_index_detection in range(len(sub_imgs_detection)):
            cv2.imwrite(
                os.path.join(intermediate_resutls_path_detection,
                             '{}.jpg'.format(img_index_detection)),
                sub_imgs_detection[img_index_detection])

    def load_intermediate_resutls(self, action_index):
        '''将中间结果读取出来'''
        intermediate_resutls_path = os.path.join(self.intermediate_results_dir,
                                                 '{}'.format(action_index))

        ReID_features = np.load(
            os.path.join(intermediate_resutls_path,
                         '{}_ReID_features.npy'.format(action_index)))
        ReID_features = [_ for _ in ReID_features]  # 转换为我们需要的格式

        # 把这个文件夹下的图片名称读出来。
        sub_imgs_names = [
            img_name for img_name in os.listdir(intermediate_resutls_path)
            if img_name.split('.')[-1] == 'jpg'
        ]
        # 把图片名字按升序排列
        sub_imgs_names = sorted(
            sub_imgs_names, key=lambda img_index: int(img_index.split('.')[0]))
        sub_imgs = []
        for img_name in sub_imgs_names:
            sub_img = cv2.imread(
                os.path.join(intermediate_resutls_path, img_name))
            sub_imgs.append(sub_img)

        # 读取追踪部分
        intermediate_resutls_path_tracking = os.path.join(
            intermediate_resutls_path, 'tracking')
        ReID_features_tracking = np.load(
            os.path.join(intermediate_resutls_path_tracking,
                         '{}_ReID_features_tracking.npy'.format(action_index)))
        ReID_features_tracking = [_ for _ in ReID_features_tracking
                                  ]  # 转换为我们需要的格式

        # 把这个文件夹下的图片名称读出来。
        sub_imgs_names_tracking = [
            img_name_tracking for img_name_tracking in os.listdir(
                intermediate_resutls_path_tracking)
            if img_name_tracking.split('.')[-1] == 'jpg'
        ]
        # 把图片名字按升序排列
        sub_imgs_names_tracking = sorted(
            sub_imgs_names_tracking,
            key=lambda img_index: int(img_index.split('.')[0]))
        sub_imgs_tracking = []
        for img_name_tracking in sub_imgs_names_tracking:
            sub_img_tracking = cv2.imread(
                os.path.join(intermediate_resutls_path_tracking,
                             img_name_tracking))
            sub_imgs_tracking.append(sub_img_tracking)

        # 读取 坐标转换部分
        intermediate_resutls_path_detection = os.path.join(
            intermediate_resutls_path, 'detection')
        ReID_features_detection = np.load(
            os.path.join(
                intermediate_resutls_path_detection,
                '{}_ReID_features_detection.npy'.format(action_index)))
        ReID_features_detection = [_ for _ in ReID_features_detection
                                   ]  # 转换为我们需要的格式

        # 把这个文件夹下的图片名称读出来。
        sub_imgs_names_detection = [
            img_name_detection for img_name_detection in os.listdir(
                intermediate_resutls_path_detection)
            if img_name_detection.split('.')[-1] == 'jpg'
        ]
        # 把图片名字按升序排列
        sub_imgs_names_detection = sorted(
            sub_imgs_names_detection,
            key=lambda img_index: int(img_index.split('.')[0]))
        sub_imgs_detection = []
        for img_name_detection in sub_imgs_names_detection:
            sub_img_detection = cv2.imread(
                os.path.join(intermediate_resutls_path_detection,
                             img_name_detection))
            sub_imgs_detection.append(sub_img_detection)

        return action_index, sub_imgs_tracking, ReID_features_tracking, sub_imgs_detection, ReID_features_detection
Ejemplo n.º 21
0
class TMGLowDataLoader(object):
    '''
    Parent class for TM-Glow dataloader creators
    Note: These are not used as actual data loaders, they create data loaders
    '''
    def __init__(self, training_dir='.', testing_dir='.', log=None):
        super().__init__()
        # Directory of data
        self.training_dir = training_dir
        self.testing_dir = testing_dir

        if(log is None):
            self.log = Log()
        else:
            self.log = log

        self.input_mean = None
        self.output_mean = None
        self.input_std = None
        self.output_std = None

    def readFluidData(self, input_file_name:str, target_file_name:str, fStride=1, cStride=1):

        coarse_file = os.path.join(self.training_dir, input_file_name)
        try:
            data_npz = np.load(coarse_file)
            self.log.log('Reading file {:s}.'.format(input_file_name), rec=False)
            # Remove z-velocity as it is not needed
            inputData = np.concatenate([data_npz['data'][::cStride,:2,:,:], data_npz['data'][::cStride,3:,:,:]], axis=1)
            # inputData.append(data_np)

            # inputTime.append(data_npz['times'])
        except FileNotFoundError:
            self.log.error("Uh-oh, seems a low-fidelity data file couldn't be found!")
            self.log.error('Check this file exists: {}'.format(coarse_file))
            inputData = None

        # Read in high-fidelity (target data)
        fine_file = os.path.join(self.training_dir, target_file_name)
        try:
            data_npz = np.load(fine_file)
            self.log.log('Reading file {:s}.'.format(target_file_name), rec=False)
            # Remove z-velocity as it is not needed
            targetData = np.concatenate([data_npz['data'][::fStride,:2,:,:], data_npz['data'][::fStride,3:,:,:]], axis=1)
            # targetData.append(data_np)

            # targetTime.append(data_npz['times'])
        except FileNotFoundError:
            self.log.error("Uh-oh, seems a high-fidelity data file couldn't be found!")
            self.log.error('Check this file exists: {}'.format(fine_file))
            targetData = None

        return inputData, targetData

    def calcNormalizingParams(self, inputData, targetData):
        '''
        Calculates the hyper-paramters used for normalizing the 
        training input/output data. Normalizes data to a standard unit Gaussian.
        Args:
            inputData (tensor): [b,t,c,d1,d2] tensor of low-fidelity inputs
            targetData (tensor): [b,t,c,d1*,d2*] tensor of high-fidelity target
        '''
        self.log.warning('Calculating normalizing constants')
        self.input_mean = torch.zeros(3)
        self.output_mean = torch.zeros(3)

        self.input_mean[0] = torch.mean(inputData[:,:,0])
        self.input_mean[1] = torch.mean(inputData[:,:,1])
        self.input_mean[2] = torch.mean(inputData[:,:,2])

        self.output_mean[0] = torch.mean(targetData[:,:,0])
        self.output_mean[1] = torch.mean(targetData[:,:,1])
        self.output_mean[2] = torch.mean(targetData[:,:,2])

        self.input_std = torch.zeros(3)+1
        self.output_std = torch.zeros(3)+1

        self.input_std[0] = torch.std(inputData[:,:,0])
        self.input_std[1] = torch.std(inputData[:,:,1])
        self.input_std[2] = torch.std(inputData[:,:,2])

        self.output_std[0] = torch.std(targetData[:,:,0])
        self.output_std[1] = torch.std(targetData[:,:,1])
        self.output_std[2] = torch.std(targetData[:,:,2])

    def setNormalizingParams(self, model):
        '''
        Given a PyTorch model this sets the normalizing paramters of
        the loader class using what is stored in the model. This is done
        to save normalizing constants between runs.
        Args:
            model: PyTorch model with normalizing constants as 
        '''
        self.input_mean = torch.zeros(3)
        self.output_mean = torch.zeros(3)
        self.input_mean = model.in_mu.cpu()
        self.output_mean = model.out_mu.cpu()

        self.input_std = torch.zeros(3)
        self.output_std = torch.zeros(3)
        self.input_std = model.in_std.cpu()
        self.output_std = model.out_std.cpu()

    def transferNormalizingParams(self, model):
        '''
        Given a PyTorch model this gets the calculated normalizing 
        parameters and assigned them to registered parameters of 
        the model. This is done to save normalizing constants between runs.
        Args:
            model: PyTorch model with normalizing constants params to be set
            device (PyTorch device): device the PyTorch model is on
        '''
        device = next(model.parameters()).device # Model's device
        model.in_mu = self.input_mean.to(device)
        model.out_mu = self.output_mean.to(device)

        model.in_std = self.input_std.to(device)
        model.out_std = self.output_std.to(device)

    def normalizeInputData(self, inputData):
        '''
        Normalize the input tensor on each channel (x-vel, y-vel, pressure) 
        '''
        # Normalize training data to unit Gaussian 
        inputData[:,:,0] = inputData[:,:,0] - self.input_mean[0]
        inputData[:,:,1] = inputData[:,:,1] - self.input_mean[1]
        inputData[:,:,2] = inputData[:,:,2] - self.input_mean[2]

        inputData[:,:,0] = inputData[:,:,0] / self.input_std[0]
        inputData[:,:,1] = inputData[:,:,1] / self.input_std[1]
        inputData[:,:,2] = inputData[:,:,2] / self.input_std[2]

        return inputData
    
    def normalizeTargetData(self, targetData):
        '''
        Normalize the target tensor on each channel (x-vel, y-vel, pressure)
        '''
        targetData[:,:,0] = targetData[:,:,0] - self.output_mean[0]
        targetData[:,:,1] = targetData[:,:,1] - self.output_mean[1]
        targetData[:,:,2] = targetData[:,:,2] - self.output_mean[2]

        targetData[:,:,0] = targetData[:,:,0] / self.output_std[0]
        targetData[:,:,1] = targetData[:,:,1] / self.output_std[1]
        targetData[:,:,2] = targetData[:,:,2] / self.output_std[2]

        return targetData
Ejemplo n.º 22
0
class Alphapose():
    def __init__(self,opt, pose_opt, ReIDCfg, C_T_output_queue,Pose_output_queue, S_Pose_Estimate, S_Number_Predict, vis=False,save_results=False, queueSize=1024):

        self.opt = opt
        self.dir_name = opt.dir_name
        self.root_path = os.path.join(opt.data_root, '{}'.format(opt.dir_name))
        # logger.info('目标文件夹是{}'.format(self.root_path))
        self.file_name = opt.file_name

        self.Videoparameters, \
        self.setting_parameter, \
        self.action_datas, \
        self.channel_list, \
        self.parameter = read_data_from_json_file_v2(self.root_path, self.file_name, self.opt) # 视频是否需要再次读入呢? 是否暂用资源


        self.datalen = len(self.action_datas)

        # 加载 poser
        self.device = torch.device('cuda')
        self.batchSize = 4
        self.ReID_BatchSize = 50
        self.gpus = opt.gpus
        self.pose_model = build_poser(pose_opt,self.gpus)

        # 加载 ReID 模型
        self.ReIDCfg = ReIDCfg
        self.ReID = ReID_Model(self.ReIDCfg)
        self.ReID.cuda()


        # ReID 模型参数
        self.distance_threshold = 1
        self.height_threshold = 95
        self.width_threshold = 40

        self._input_size = pose_opt.DATA_PRESET.IMAGE_SIZE
        self._output_size = pose_opt.DATA_PRESET.HEATMAP_SIZE
        self._sigma = pose_opt.DATA_PRESET.SIGMA
        self.aspect_ratio = 0.45


        if pose_opt.DATA_PRESET.TYPE == 'simple':
            self.transformation = SimpleTransform(
                self, scale_factor=0,
                input_size=self._input_size,
                output_size=self._output_size,
                rot=0, sigma=self._sigma,
                train=False, add_dpg=False, gpu_device=self.device)


        self.input_Q = C_T_output_queue  # 追踪数据的整体输入
        self.Posing_Q = Queue(maxsize=queueSize) #在骨骼关键点检测前,对左边转换后的截图进行预处理
        self.PostProcess_Q = Queue(maxsize=queueSize) # 在骨骼关键点检测前,对左边转换后的截图进行预处理
        self.output_Q = Pose_output_queue

        self.vis = vis
        if self.vis == True:
            self.vis_path = os.path.join(self.root_path, 'vis')
            os.makedirs(self.vis_path, exist_ok=True)

        self.save_results = save_results
        self.S_Pose_Estimate = S_Pose_Estimate
        self.S_Number_Predict = S_Number_Predict

        if self.save_results == True:
            self.intermediate_results_dir = os.path.join(self.root_path, 'intermediate_results','Alphapose')
            os.makedirs(self.intermediate_results_dir, exist_ok=True)

        self.logger = Log(__name__, 'Alphapose' ).getlog()

    def Read_From_Cache(self):
        '''
        从文件把之前计算过的结果提取出来
        '''
        from utils.index_operation import get_index

        self.logger.debug('The pid of Alphapose.Read_From_Cache() : {}'.format(os.getpid()))
        self.logger.debug('The thread of Alphapose.Read_From_Cache() : {}'.format(currentThread()))

        cache_index = get_index(self.intermediate_results_dir)
        # 只需读取有用的部分即可。
        action_index = self.S_Number_Predict
        for action_index in range(self.S_Number_Predict,self.S_Pose_Estimate):

            if action_index not in cache_index:
                # cache 中没有保存说明 此动作本身是False
                self.output_Q.put((False, [action_index]))

            else:
                # 从文件夹中读取出该动作对应的计算结果。
                _, sub_imgs_out, target_regions = self.load_intermediate_resutls(action_index)
                self.output_Q.put((True, [action_index,sub_imgs_out,target_regions]))

        self.logger.log(23, ' Alphapose loads action {} from Cache file '.format(action_index))


    def posing_preprocess_(self):
        self.t_posing_preprocess = Thread(target=self.posing_preprocess, args=())
        self.t_posing_preprocess.daemon = True
        self.t_posing_preprocess.start()

    def posing_preprocess(self):
        # 预处理
        self.logger.debug('The pid of Alphapose.posing_preprocess() : {}'.format(os.getpid()))
        self.logger.debug('The thread of Alphapose.posing_preprocess() : {}'.format(currentThread()))
        posing_preprocess_timer = Timer()
        for action_index in range(self.S_Pose_Estimate, self.datalen):

            self.logger.debug('alphapose.posing_preprocess() ======================================== action {}'.format(action_index))
            Flag_PreProcess, (input_index, sub_imgs_tracking, ReID_features_tracking,sub_imgs_detection,ReID_features_detection) = self.input_Q.get()
            if input_index != action_index:
                self.logger.log(31, '---——————————————————————————————————index does match')
                raise Exception('Alphapose.update action_index_update {} != input_index {} '.format(action_index, input_index))

            if Flag_PreProcess == False:
                self.Posing_Q.put((False,[]))
                continue
            else:
                # 开始计时
                posing_preprocess_timer.tic()

                inps = []
                cropped_boxes = []

                # 通过 ReID 特征剔除一部分。

                sub_imgs = self.imgs_sorted_by_ReID(sub_imgs_tracking,sub_imgs_detection,action_index)

                if len(sub_imgs) == 0 :
                    # 被筛选后的图片序列为0,则跳过。
                    self.Posing_Q.put((False, []))
                    continue

                for imgs_index in range(len(sub_imgs)):
                    orig_img = sub_imgs[imgs_index]
                    height, width, _ = orig_img.shape
                    box = [0, 0, width - 1, height - 1]
                    inp, cropped_box = self.transformation.test_transform(orig_img, box)
                    inps.append(inp)
                    cropped_boxes.append(cropped_box)

                inps_ = torch.stack(inps,dim=0)
                self.Posing_Q.put((True,(sub_imgs,inps_,cropped_boxes)))
                self.logger.log(23, 'alphapose.posing_preprocess() action {} consums {}s'.format(action_index,
                                                                                                 posing_preprocess_timer.toc()))

    def posing_detect_(self):
        self.t_posing_detect = Thread(target=self.posing_detect, args=())
        self.t_posing_detect.daemon = True
        self.t_posing_detect.start()

    def posing_detect(self):
        posing_detect_timer = Timer()

        for action_index in range(self.S_Pose_Estimate, self.datalen):
            self.logger.debug('posing_detect ------------action {} has been read '.format(action_index))
            Flag_Posing_detect, preprocess_results = self.Posing_Q.get()

            if Flag_Posing_detect == False:
                self.PostProcess_Q.put((False,[]))
                continue
            else:
                posing_detect_timer.tic()
                sub_imgs, inps_ , cropped_boxes = preprocess_results
                inps = inps_.to(self.device)
                inps_len = inps_.size(0)
                leftover = 0
                if (inps_len) % self.batchSize:
                    leftover = 1
                num_batches = inps_len // self.batchSize + leftover
                keypoints_all = []
                for j in range(num_batches):
                    inps_j = inps[j * self.batchSize : min((j + 1) * self.batchSize, inps_len)]
                    sub_cropped_boxes = cropped_boxes[j * self.batchSize : min((j + 1) * self.batchSize, inps_len)]
                    # self.logger.log(23, ' j : {}, inps_j.size() '.format(j, inps_j.size()))
                    hm_j = self.pose_model(inps_j)
                    keypoints_several = self.heats_to_maps(hm_j, sub_cropped_boxes)
                    keypoints_all.extend(keypoints_several)

                self.PostProcess_Q.put((True,(keypoints_all,sub_imgs)))
                self.logger.log(23, 'alphapose.posing_detect() action {} consums {}s'.format(action_index,
                                                                                                 posing_detect_timer.toc()))

    def posing_postprocess_(self):
        self.t_posing_postprocess = Thread(target=self.posing_postprocess, args=())
        self.t_posing_postprocess.daemon = True
        self.t_posing_postprocess.start()

    def posing_postprocess(self):
        '''对骨骼关键节点的检测结果坐后处理,并通过 简单规则对结果进行以此初步筛选。'''
        pposing_postprocess_timer = Timer()
        for action_index in range(self.S_Pose_Estimate, self.datalen):
            self.logger.debug('posing_postprocess ------------action {} has been read '.format(action_index))

            Flag_posing_postprocess, posing_detect_resutls = self.PostProcess_Q.get()

            if Flag_posing_postprocess == False:
                self.output_Q.put((False,[action_index]))
                continue
            else:
                pposing_postprocess_timer.tic()
                keypoints_all,sub_imgs = posing_detect_resutls

                target_regions = []
                sub_imgs_out = []

                if self.vis == True:
                    vis_dir_positive = os.path.join(self.vis_path, '{}'.format(action_index), 'Alphapose_positive')
                    makedir_v1(vis_dir_positive)
                    vis_dir_negative = os.path.join(self.vis_path, '{}'.format(action_index), 'Alphapose_negative')
                    makedir_v1(vis_dir_negative)
                    Negative_num = 0
                    vis_dir_small_size = os.path.join(self.vis_path, '{}'.format(action_index), 'Alphapose_small_size')
                    makedir_v1(vis_dir_small_size)
                    small_size_num = 0
                    vis_dir_small_target = os.path.join(self.vis_path, '{}'.format(action_index), 'Alphapose_small_target')
                    makedir_v1(vis_dir_small_target)
                    small_target_num = 0

                Positive_num = 0
                for k_index in range(len(keypoints_all)):
                    # 对每一张关节点图做逐一处理
                    origin_img = sub_imgs[k_index]
                    height, width, _ = origin_img.shape

                    if height < self.height_threshold or width < self.width_threshold:
                        small_size_num += 1
                        if self.vis == True:
                            img_name = '{}.jpg'.format(k_index)
                            cv2.imwrite(os.path.join(vis_dir_small_size, img_name), origin_img)
                        continue
                    keypoints = keypoints_all[k_index]

                    # 这个判断标准和get_box的标准不一样。
                    # 用来判断是否背向的
                    l_x_max = max(keypoints[5 * 3], keypoints[11 * 3])
                    r_x_min = min(keypoints[6 * 3], keypoints[12 * 3])
                    t_y_max = max(keypoints[5 * 3 + 1], keypoints[6 * 3 + 1])
                    b_y_min = min(keypoints[11 * 3 + 1], keypoints[12 * 3 + 1])

                    if l_x_max < r_x_min and t_y_max < b_y_min:
                        '初步判断球员是否背向'
                        [xmin_old, xmax_old], [xmin, xmax, ymin, ymax] = self.get_box(keypoints, height, width, ratio=0.1,
                                                                                 expand_w_min=10)
                        # 计算上半身体长度
                        body_length = ymax - ymin
                        if body_length < 20:  # 130 和 60 应该来自 opt
                            small_target_num += 1
                            if self.vis == True:
                                img_name = '{}.jpg'.format(k_index)
                                cv2.imwrite(os.path.join(vis_dir_small_target, img_name), origin_img)
                            continue

                        # 计算肩宽、胯宽
                        Shoulder_width = keypoints[6 * 3] - keypoints[5 * 3]
                        Crotch_width = keypoints[12 * 3] - keypoints[11 * 3]

                        aspect_ratio = (max(Shoulder_width, Crotch_width)) / (body_length) # 计算比例
                        if aspect_ratio >= self.aspect_ratio:
                            # 如果这个比例合适,则送入号码检测
                            sub_imgs_out.append(origin_img)
                            target_regions.append([xmin, xmax, ymin, ymax])
                            Positive_num += 1 # 复合条件的 +1
                            if self.vis == True:
                                img_name = '{}.jpg'.format(k_index)
                                vis_img = np.copy(origin_img)
                                cv2.rectangle(vis_img, (xmin_old, ymin), (xmax_old, ymax), color=(255, 0, 0), thickness=1)
                                cv2.rectangle(vis_img, (xmin, ymin), (xmax, ymax), color=(0, 255, 0), thickness=1)
                                cv2.imwrite(os.path.join(vis_dir_positive, img_name), vis_img)
                    else:
                        Negative_num += 1
                        if self.vis == True:
                            img_name = '{}.jpg'.format(k_index)
                            cv2.imwrite(os.path.join(vis_dir_negative, img_name), origin_img)

                self.output_Q.put((True, [action_index, sub_imgs_out,target_regions ]))
                # 保存中间结果

                if self.save_results == True:
                    self.save_intermediate_resutls(action_index,sub_imgs_out,target_regions)
                # # 输出 日志
                # self.logger.log(23,'Positive_num {}, Negative_num {}, small_target_num {}, small_size_num {}, all {}'.format(
                #     Positive_num,
                #     Negative_num,
                #     small_target_num,
                #     small_size_num,
                #     len(keypoints_all)))

                self.logger.log(23, 'alphapose.posing_postprocess() action {} consums {}s Positive_num / All = {}/{}'.format(
                    action_index,
                    pposing_postprocess_timer.toc(),
                    Positive_num,
                    len(keypoints_all)))

    def save_intermediate_resutls(self,action_index,sub_imgs_out,target_regions):
        '''将每一次计算的结果保存下来。'''
        intermediate_resutls_path = os.path.join(self.intermediate_results_dir,'{}'.format(action_index))
        os.makedirs(intermediate_resutls_path,exist_ok=True)
        # 保存图片
        for img_index in range(len(sub_imgs_out)):
            cv2.imwrite(os.path.join(intermediate_resutls_path,'{}.jpg'.format(img_index)),sub_imgs_out[img_index])
        # 保存 target_regions
        with open(os.path.join(intermediate_resutls_path,'{}_target_regions.json'.format(action_index)),'w') as f:
            results = {'target_regions' : target_regions}
            json.dump(results,f)

    def load_intermediate_resutls(self,action_index):
        '''将中间结果读取出来。'''
        intermediate_resutls_path = os.path.join(self.intermediate_results_dir,'{}'.format(action_index))
        # 把这个文件夹下的图片名称读出来。
        sub_imgs_names = [ img_name for img_name in os.listdir(intermediate_resutls_path) if img_name.split('.')[-1] == 'jpg' ]
        # 把图片名字按升序排列
        sub_imgs_names = sorted(sub_imgs_names, key=lambda img_index : int(img_index.split('.')[0]))
        sub_imgs_out = []
        for img_name in sub_imgs_names:
            sub_img = cv2.imread(os.path.join(intermediate_resutls_path,img_name))
            sub_imgs_out.append(sub_img)
            # 保存 target_regions

        with open(os.path.join(intermediate_resutls_path, '{}_target_regions.json'.format(action_index)), 'r') as f:
            results = json.load(f)
            target_regions = results['target_regions']

        return action_index,sub_imgs_out,target_regions

    def heats_to_maps(self,hm_data,cropped_boxes):
        # 将 heatmap 转化成  keypoints 数组
        pred = hm_data.cpu().data.numpy()
        assert pred.ndim == 4

        keypoints_all = []
        for hms_index in range(hm_data.size(0)):
            pose_coord, pose_score = heatmap_to_coord_simple(pred[hms_index], cropped_boxes[hms_index])
            keypoints_single = []
            for n in range(pose_score.shape[0]):
                keypoints_single.append(float(pose_coord[n, 0]))
                keypoints_single.append(float(pose_coord[n, 1]))
                keypoints_single.append(float(pose_score[n]))
            keypoints_all.append(keypoints_single)

        return keypoints_all

    def get_box(self, keypoints, img_height, img_width, ratio=0.1, expand_w_min=10):
        '''这个get box 是用来获取球员的背部区域的'''
        xmin = min(keypoints[5 * 3], keypoints[11 * 3])
        xmax = max(keypoints[6 * 3], keypoints[12 * 3])
        ymin = min(keypoints[5 * 3 + 1], keypoints[6 * 3 + 1])
        ymax = max(keypoints[11 * 3 + 1], keypoints[12 * 3 + 1])

        return [int(round(xmin)), int(round(xmax))], self.expand_bbox(xmin, xmax, ymin, ymax, img_width, img_height,
                                                                 ratio, expand_w_min)
    def expand_bbox(self, left, right, top, bottom, img_width, img_height,ratio = 0.1, expand_w_min = 10):
        '''
        以一定的ratio向左右外扩。 不向上向下扩展了。
        '''
        width = right - left
        height = bottom - top
         # expand ratio
        expand_w_min = max(ratio * width , expand_w_min) # 最小外扩 expand_w_min
        new_left = np.clip(left - expand_w_min, 0, img_width)
        new_right = np.clip(right + expand_w_min, 0, img_width)

        return [int(new_left), int(new_right), int(top), int(bottom)]

    def imgs_sorted_by_ReID(self,imgs_tracking,imgs_detection,action_index):
        '''通过ReID模型来筛选与目标特征相符的图片'''

        sub_imgs = []
        # 把追踪序列和目标人物进行对比,剔除后得到追踪序列的平均ReID特征值
        if len(imgs_tracking) == 0:
            # 如果追踪序列长度为0的话,那就没什么好处理的了,直接返回 空 就行。
            return sub_imgs
        else:
            imgs_tracking_index, distmat_tracking, output_feature = imgs_sorted_by_ReID(self.ReID, self.ReIDCfg, imgs_tracking,
                                                                                        distance_threshold=self.distance_threshold,
                                                                                        feat_norm='yes',
                                                                                        version=0,
                                                                                        batch_size=self.ReID_BatchSize)
            for P_index in imgs_tracking_index:
                sub_imgs.append(imgs_tracking[P_index])

            if len(imgs_detection) > 0:
                # 把追踪序列的平均ReID特征值和坐标转换序列对比,进行第二次筛选
                imgs_detection_index, distmat_detection, _ = imgs_sorted_by_ReID(self.ReID, self.ReIDCfg, imgs_detection,
                                                                                 distance_threshold=self.distance_threshold,
                                                                                 feat_norm='yes',
                                                                                 version=2,
                                                                                 input_features=output_feature,
                                                                                 batch_size=self.ReID_BatchSize)
                for P_index_detection in imgs_detection_index:
                    sub_imgs.append(imgs_detection[P_index_detection])

            if self.vis ==True:
                # 将追踪序列的sub_imgs 按ReID的分类结果保存
                Positive_dir = os.path.join(self.vis_path, '{}/ReID'.format(action_index))
                makedir_v1(Positive_dir)
                Negative_dir = os.path.join(self.vis_path, '{}/ReID/Negative'.format(action_index))

                for P_index, _ in enumerate(imgs_tracking):
                    distance = distmat_tracking[0, P_index]
                    if P_index in imgs_tracking_index:
                        cv2.imwrite(os.path.join(Positive_dir, '{}_{:3f}.jpg'.format(P_index, distance)), imgs_tracking[P_index])
                    else:
                        cv2.imwrite(os.path.join(Negative_dir, '{}_{:3f}.jpg'.format(P_index, distance)), imgs_tracking[P_index])

                # 将坐标转换后序列的sub_imgs 按ReID的分类结果保存
                Positive_dir_detection = os.path.join(self.vis_path, '{}/ReID/detection'.format(action_index))
                makedir_v1(Positive_dir_detection)
                Negative_dir_detection = os.path.join(self.vis_path, '{}/ReID/detection/Negative'.format(action_index))
                makedir_v1(Negative_dir_detection)
                for P_index_detection, _ in enumerate(imgs_detection):
                    distance = distmat_detection[0, P_index_detection]
                    if P_index_detection in imgs_detection_index:
                        cv2.imwrite(os.path.join(Positive_dir_detection, '{}_{:3f}.jpg'.format(P_index_detection, distance)),
                                    imgs_detection[P_index_detection])
                    else:
                        cv2.imwrite(os.path.join(Negative_dir_detection, '{}_{:3f}.jpg'.format(P_index_detection, distance)),
                                    imgs_detection[P_index_detection])

            return sub_imgs

    @property
    def joint_pairs(self):
        """Joint pairs which defines the pairs of joint to be swapped
        when the image is flipped horizontally."""
        return [[1, 2], [3, 4], [5, 6], [7, 8],
                [9, 10], [11, 12], [13, 14], [15, 16]]