예제 #1
0
    def add_plugin(cls, class_obj):
        """
        添加插件

        @param {object} class_obj - 插件类
        """
        # 获取插件字典
        _plugins = RunTool.get_global_var(PIPELINE_PLUGINS_VAR_NAME)
        if _plugins is None:
            _plugins = {'processer': dict(), 'router': dict()}
            RunTool.set_global_var(PIPELINE_PLUGINS_VAR_NAME, _plugins)

        # 判断类型
        _type_fun = getattr(class_obj, 'processer_name', None)
        _plugin_type = 'processer'
        if _type_fun is None or not callable(_type_fun):
            _type_fun = getattr(class_obj, 'router_name', None)
            _plugin_type = 'router'

        if _type_fun is None or not callable(_type_fun):
            # 不是标准插件类
            return

        # 执行初始化
        class_obj.initialize()

        # 放入插件配置
        _plugins[_plugin_type][_type_fun()] = class_obj
예제 #2
0
    def __bg_thread_fun(self):
        """
        后台输出的线程执行函数

        """
        self._is_bg_thread_running = True
        while(True):
            print('__bg_thread_fun')
            try:
                if self._bg_thread_stop:
                    # 收到停止标志,结束线程处理,注意如果wait_write_end_when_asyn参数为True,则需要写完
                    if not self._wait_write_end_when_asyn or self._buffer.empty():
                        break
                # 尝试从队列获取要写的对象
                if self._buffer.empty():
                    # 没有数据,下一次循环
                    RunTool.sleep(0.1)
                else:
                    data = self._buffer.get(False)
                    self.__write(data)
            except Exception:
                # 执行出现异常时不退出处理
                RunTool.sleep(0.1)
                continue
        # 线程自然消亡
        self._is_bg_thread_running = False
예제 #3
0
    def debug_print(*args, **kwargs):
        """
        打印调试信息,可打印传入的多个对象

        """
        DEBUG_TOOLS_SWITCH_ON = RunTool.get_global_var('DEBUG_TOOLS_SWITCH_ON')
        if DEBUG_TOOLS_SWITCH_ON is None:
            DEBUG_TOOLS_SWITCH_ON = False

        DEBUG_TOOLS_LOGGER = RunTool.get_global_var('DEBUG_TOOLS_LOGGER')

        if not DEBUG_TOOLS_SWITCH_ON:
            # 未启动调试
            return

        # 输出打印信息,先准备整体信息
        _print_info = u'[%s][%s][行:%s]DEBUG INFO:\n%s' % (
            os.path.split(
                os.path.realpath(
                    sys._getframe().f_back.f_code.co_filename)
            )[1],
            sys._getframe().f_back.f_code.co_name,
            sys._getframe().f_back.f_lineno,
            '\n'.join(DebugTool.__get_print_str_seq(args, kwargs))
        )
        DEBUG_TOOLS_LOGGER.debug(_print_info)
예제 #4
0
def func_case1_no_para_with_cresult():
    RunTool.get_global_logger().log(simple_log.INFO,
                                    'runing func_case1_no_para_with_cresult')
    time.sleep(0.001)
    _ret = CResult()
    _ret.self_para = [3, 'str']
    return _ret
예제 #5
0
    def pause(self, run_id: str = None):
        """
        暂停管道执行

        @param {str} run_id=None - 要暂停的管道运行ID
            注:如果不传入则获取最后执行的管道ID
        """
        if not self.is_asyn:
            # 非异步模式不支持
            raise RuntimeError("Pipeline is not asynchronous!")

        _run_id, _run_cache = self._get_run_cache(run_id)
        if _run_cache is None:
            raise RuntimeError("Run id not exists!")

        if _run_cache['status'] != 'R':
            _msg = 'Pipeline [%s] not running!' % self.name
            self.log_error('Error: ' % _msg)
            raise RuntimeError(_msg)

        # 只要设置管道状态为 P 即可
        self._set_status('P', run_id=_run_id)
        if self.pipeline[_run_cache['node_id']].get('is_sub_pipeline', False):
            # 正在执行子管道, 对子管道也添加暂停的指令
            try:
                self.running_sub_pipeline[_run_id].pause(run_id=_run_id)
            except:
                pass

        while _run_cache['thread_running']:
            # 等待运行线程结束
            RunTool.sleep(0.01)

        # 记录日志
        self.log_info('Pipeline [%s] pause!' % self.name)
def test_JadeTypeDetect():
    """
    测试翡翠类型处理器
    """
    _execute_path = RunTool.get_global_var('EXECUTE_PATH')
    _pipeline = RunTool.get_global_var('EMPTY_PIPELINE')
    _processer_class: PipelineProcesser = Pipeline.get_plugin('processer', 'JadeTypeDetect')
    _filelist = FileTool.get_filelist(
        os.path.join(_execute_path, os.path.pardir, 'test_data/test_pic/'),
        is_fullname=True
    )
    for _file in _filelist:
        # 遍历执行
        with open(_file, 'rb') as _fid:
            _file_bytes = _fid.read()
            _input = {
                'type': '',
                'sub_type': '',
                'image': Image.open(_file_bytes),
                'score': 0.0
            }
            _output = _processer_class.execute(_input, {}, _pipeline)

            # 输出图片和对应文字
            _image = _output['image']
            _print_str = 'type: %s\nsub_type: %s\nscore: %s' % (
                _output['type'], _output['sub_type'], str(_output['score']))
            _draw = ImageDraw.Draw(_image)  # PIL图片上打印汉字
            # 参数1:字体文件路径,参数2:字体大小;Windows系统“simhei.ttf”默认存储在路径:C:\Windows\Fonts中
            _font = ImageFont.truetype("simhei.ttf", 20, encoding="utf-8")
            _draw.text((0, 0), _print_str, (255, 0, 0), font=_font)

            plt.figure(_file)
            plt.imshow(_image)
            plt.show()
예제 #7
0
    def stop_stream(self, stream_tag='default', is_wait=True):
        """
        关闭指定标签的流处理

        @param {string} stream_tag='default' - 需要关闭的流处理标签
        @param {bool} is_wait=True - 是否等待流关闭后再返回

        @throws {AttributeError} - 当keep_wait_data为False时,会自动关闭流,调用本方法应直接抛出异常
        @throws {KeyError} - 当传入的流标识不存在时抛出该异常

        """
        self._stream_list_lock.acquire()
        try:
            if not self._keep_wait_data:
                # 自动关闭流,参数无效
                raise AttributeError(u'流参数为自动关闭,不允许手工关闭')

            if stream_tag not in self._stream_list.keys():
                # 流标识不存在
                raise KeyError(u'处理标识不存在')

            # 设置停止标签
            self._stream_list_tag[stream_tag] = (
                True, self._stream_list_tag[stream_tag][1])
        finally:
            self._stream_list_lock.release()

        # 是否等待关闭后才返回
        if is_wait:
            while True:
                if stream_tag not in self._stream_list.keys():
                    break
                RunTool.sleep(0.01)
예제 #8
0
def setUpModule():
    # print("test module start >>>>>>>>>>>>>>")
    _i18n_obj = SimpleI18N(lang='zh_cn',
                           trans_file_path=_I18N_File_DIR,
                           trans_file_prefix='message',
                           auto_loads=True)
    set_global_i18n(_i18n_obj)
    RunTool.set_global_var('CONSOLE_GLOBAL_PARA', dict())
예제 #9
0
def set_global_i18n(i18n_obj):
    """
    设置通用的SimpleI18N实例对象

    @param {object} i18n_obj - SimpleI18N实例对象

    """
    RunTool.set_global_var('SIMPLE_I18N_GLOBAL_OBJECT', i18n_obj)
예제 #10
0
def index():
    """
    主页执行函数
    """
    global RANDOM_STR
    RunTool.sleep(1)  # 阻塞1秒
    print('resp: ' + RANDOM_STR)
    return RANDOM_STR
예제 #11
0
def func_case1_call_2(a, b, c, **kwargs):
    RunTool.get_global_logger().log(
        simple_log.INFO, 'runing func_case1_call_2 : a=%s, b=%s, c=%s: %s' %
        (str(a), str(b), str(c), str(kwargs)))
    time.sleep(0.001)
    # 执行4
    func_case1_call_4('4a', '4b', '4c', k1=kwargs['k1'])
    return
예제 #12
0
    def start_server(self, is_wait=False):
        """
        启动服务
        注意服务必须处于停止状态才能启动

        @param {bool} is_wait=False - 是否等待服务启动完成后再退出

        @returns {CResult} - 启动结果,result.code:'00000'-成功,'21401'-服务不属于停止状态,不能启动,其他-异常

        """
        _result = CResult(code='00000')  # 成功
        with ExceptionTool.ignored_cresult(
                _result,
                logger=self._logger,
                self_log_msg='[%s-STARTING][NAME:%s]%s: ' %
            (self._server_log_prefix, self._server_name,
             _('start service error')),
                force_log_level=logging.ERROR):
            # 先获取锁,拿到最准确的服务状态
            self.__server_run_status_lock.acquire()
            try:
                if self.__server_run_status != EnumServerRunStatus.Stop:
                    # 不属于停止状态,不能启动
                    _temp_result = CResult(code='21401')  # 服务启动失败-服务已启动
                    self._logger.log(
                        self._log_level, '[%s-STARTING][NAME:%s]%s' %
                        (self._server_log_prefix, self._server_name,
                         _temp_result.msg))
                    return _temp_result

                # 执行启动服务的动作,通过线程方式启动,避免调用方等待
                self.__server_begin_time = datetime.datetime.now()
                self._logger.log(
                    self._log_level, '[%s-STARTING][NAME:%s]%s' %
                    (self._server_log_prefix, self._server_name,
                     _('service starting')))
                self._server_status_change(EnumServerRunStatus.WaitStart,
                                           _result)
                _server_thread = threading.Thread(
                    target=self.__start_server_thread_fun,
                    args=(1, ),
                    name='Thread-Server-Main')
                _server_thread.setDaemon(True)
                _server_thread.start()
            finally:
                # 释放锁
                self.__server_run_status_lock.release()

        # 返回结果,循环等待
        while is_wait and self.__server_run_status == EnumServerRunStatus.WaitStart:
            RunTool.sleep(0.01)

        # 如果是等待模式,检查一次结果,如果没有正常运行返回最后一次启动结果
        if is_wait:
            if self.__server_run_status != EnumServerRunStatus.Running:
                _result = self.__last_start_result

        return _result
예제 #13
0
    def mask_processer_initialize(cls, graph_var_name: str,
                                  processer_name: str):
        """
        对象识别掩码处理器的公共初始化函数

        @param {str} graph_var_name - 对象识别冻结图全局变量名
        @param {str} processer_name - 处理器名
        """
        _graph = RunTool.get_global_var(graph_var_name)
        if _graph is None:
            _graph = dict()
            RunTool.set_global_var(graph_var_name, _graph)
        else:
            # 模型已装载,无需继续处理
            return

        _execute_path = RunTool.get_global_var('EXECUTE_PATH')
        if _execute_path is None:
            _execute_path = os.getcwd()
        _config = RunTool.get_global_var(
            'PIPELINE_PROCESSER_PARA')[processer_name]

        _pb_file = os.path.join(_execute_path, _config['frozen_graph'])
        _pb_labelmap = os.path.join(_execute_path, _config['labelmap'])

        # 识别基础参数
        _graph['min_score'] = _config.get('min_score', 0.8)
        _graph['labelmap'], _graph['other_id'] = Tools.load_labelmap(
            _pb_labelmap, encoding=_config.get('encoding', 'utf-8'))

        _mask_graph = tf.Graph()
        with _mask_graph.as_default():
            _od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(_pb_file, 'rb') as _fid:
                _serialized_graph = _fid.read()
                _od_graph_def.ParseFromString(_serialized_graph)
                tf.import_graph_def(_od_graph_def, name='')

        _ops = _mask_graph.get_operations()
        _all_tensor_names = {
            output.name
            for op in _ops for output in op.outputs
        }
        _tensor_dict = {}
        for key in [
                'num_detections', 'detection_boxes', 'detection_scores',
                'detection_classes', 'detection_masks'
        ]:
            _tensor_name = key + ':0'
            if _tensor_name in _all_tensor_names:
                _tensor_dict[key] = _mask_graph.get_tensor_by_name(
                    _tensor_name)

        _graph['session'] = tf.Session(graph=_mask_graph)
        _graph['tensor_dict'] = _tensor_dict
        _graph['image_tensor'] = _mask_graph.get_tensor_by_name(
            'image_tensor:0')
예제 #14
0
    def __del__(self):
        """
        析构函数,删除对象时处理全局变量的删除
        """
        _ROBOT_SELF_ROUTERS = RunTool.get_global_var('ROBOT_SELF_ROUTERS')
        _ROBOT_SELF_ROUTERS.pop(self.robot_id, None)

        _ROBOT_INFOS = RunTool.get_global_var('ROBOT_INFOS')
        _ROBOT_INFOS.pop(self.robot_id, None)
예제 #15
0
    def detect_processer_initialize(cls, graph_var_name: str,
                                    processer_name: str):
        """
        物体识别处理器的公共初始化函数

        @param {str} graph_var_name - 对象识别冻结图全局变量名
        @param {str} processer_name - 处理器名
        """
        _graph = RunTool.get_global_var(graph_var_name)
        if _graph is None:
            _graph = dict()
            RunTool.set_global_var(graph_var_name, _graph)
        else:
            # 模型已装载,无需继续处理
            return

        _execute_path = RunTool.get_global_var('EXECUTE_PATH')
        if _execute_path is None:
            _execute_path = os.getcwd()
        _config = RunTool.get_global_var(
            'PIPELINE_PROCESSER_PARA')[processer_name]

        _pb_file = os.path.join(_execute_path, _config['frozen_graph'])
        _pb_labelmap = os.path.join(_execute_path, _config['labelmap'])

        # 识别基础参数
        _graph['min_score'] = _config.get('min_score', 0.8)
        _graph['labelmap'], _graph['other_id'] = Tools.load_labelmap(
            _pb_labelmap, encoding=_config.get('encoding', 'utf-8'))

        _detection_graph = tf.Graph()
        with _detection_graph.as_default():
            _od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(_pb_file, 'rb') as _fid:
                _serialized_graph = _fid.read()
                _od_graph_def.ParseFromString(_serialized_graph)
                tf.import_graph_def(_od_graph_def, name='')

            _graph['session'] = tf.Session(graph=_detection_graph)

        # Input tensor is the image
        _graph['image_tensor'] = _detection_graph.get_tensor_by_name(
            'image_tensor:0')
        # Output tensors are the detection boxes, scores, and classes
        # Each box represents a part of the image where a particular object was detected
        _graph['detection_boxes'] = _detection_graph.get_tensor_by_name(
            'detection_boxes:0')
        # Each score represents level of confidence for each of the objects.
        # The score is shown on the result image, together with the class label.
        _graph['detection_scores'] = _detection_graph.get_tensor_by_name(
            'detection_scores:0')
        _graph['detection_classes'] = _detection_graph.get_tensor_by_name(
            'detection_classes:0')
        # Number of objects detected
        _graph['num_detections'] = _detection_graph.get_tensor_by_name(
            'num_detections:0')
예제 #16
0
def start_server(**kwargs):
    """
    启动聊天服务端应用
    """
    SERVER_CONFIG = RunTool.get_global_var('SERVER_CONFIG')
    _loader = QAServerLoader(SERVER_CONFIG, app=app)
    RunTool.set_global_var('QA_LOADER', _loader)

    # 启动服务
    _loader.start_restful_server()
예제 #17
0
def func_case1_call_1(a, b, **kwargs):
    RunTool.get_global_logger().log(
        simple_log.INFO, 'runing func_case1_call_1 : a=%s, b=%s : %s' %
        (str(a), str(b), str(kwargs)))
    time.sleep(0.001)
    # 执行2
    func_case1_call_2('2a', '2b', '2c', k1=kwargs['k1'])
    # 执行3
    func_case1_call_3(k1=kwargs['k1'])
    return
예제 #18
0
    def _accept_one(self, server_opts, net_info):
        """
        监听接受一个请求并返回
        提供监听并获取到请求连接返回的方法;注意该该函数必须捕获并处理异常

        @param {objcet} server_opts - 网络服务启动参数
        @param {objcet} net_info - 网络连接信息对象,_start_server_without_accept中获取到的结果

        @returns {CResult} - 获取网络连接结果:
            result.code :'00000'-成功,'20407'-获取客户端连接请求超时
            result.net_info :客户端连接信息对象,该对象将传给后续单个连接处理的线程

        """
        # 子类必须定义该功能
        _result = CResult('00000')
        _result.net_info = None
        with ExceptionTool.ignored_cresult(
            _result,
            logger=self._logger,
            expect=(BlockingIOError),
            expect_no_log=True,  # 超时不记录日志
            error_map={BlockingIOError: ('20407', None)},
            self_log_msg='[LIS][NAME:%s]%s error: ' % (
                self._server_name, _('accept client connect')),
            force_log_level=None
        ):
            # _sys_str = platform.system()
            _csocket, _addr = net_info.csocket.accept()  # 接收客户端连接,返回客户端和地址
            _csocket.setblocking(False)   # 将socket设置为非阻塞. 在创建socket对象后就进行该操作.
            _result.net_info = NullObj()
            _result.net_info.csocket = _csocket
            _result.net_info.raddr = _addr
            _result.net_info.laddr = _csocket.getsockname()
            _result.net_info.send_timeout = server_opts.send_timeout
            _result.net_info.recv_timeout = server_opts.recv_timeout

            # 采用非阻塞模式处理数据,超时自行实现
            """
            if (_sys_str == 'Windows'):
                _csocket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, server_opts.recv_timeout)
                _csocket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDTIMEO, server_opts.send_timeout)
            else:
                # linux 设置超时时间不同,需重新测试
                _csocket.settimeout(server_opts.recv_timeout / 1000)
            """
            self._logger.log(
                self._log_level,
                '[LIS][NAME:%s]%s: %s - %s' % (
                    self._server_name, _('accept one client connection'), str(_addr), str(_csocket)
                )
            )
        if not _result.is_success():
            # 出现异常,睡眠一段时间
            RunTool.sleep(0.01)
        return _result
예제 #19
0
    def set_global_time_wait(cls, robot_info: dict, action_name: str,
                             run_id: str, interval: float, **kwargs):
        """
        设置全局统一等待时长

        @param {dict} robot_info - 通用参数,调用时默认传入的机器人信息
        @param {str} action_name - 通用参数,调用时默认传入的动作名
        @param {str} run_id - 运行id
        @param {float} interval - 要设置的等待时长
        """
        RunTool.set_global_var('COMMON_ACTION_TIME_WAIT', interval)
예제 #20
0
    def chrome_para_set_find_step_tag(cls, robot_info: dict, action_name: str,
                                      run_id: str, value: float,
                                      **kwargs) -> float:
        """
        获取参数值 - 控件查找类型标志

        @param {dict} robot_info - 通用参数,调用时默认传入的机器人信息
        @param {str} action_name - 通用参数,调用时默认传入的动作名
        @param {str} run_id - 运行id
        @param {float} value - 要设置的参数
        """
        RunTool.set_global_var('CHROME_ACTION_PARA_FIND_STEPS_TAG', value)
예제 #21
0
    def chrome_para_set_wait_less_timeout(cls, robot_info: dict,
                                          action_name: str, run_id: str,
                                          value: float, **kwargs) -> float:
        """
        获取参数值 - 最小等待超时时间

        @param {dict} robot_info - 通用参数,调用时默认传入的机器人信息
        @param {str} action_name - 通用参数,调用时默认传入的动作名
        @param {str} run_id - 运行id
        @param {float} value - 要设置的参数,单位秒2秒
        """
        RunTool.set_global_var('CHROME_ACTION_PARA_WAIT_LESS_TIMEOUT', value)
예제 #22
0
    def recv_data(cls, net_info, recv_para={}):
        """
        从指定的网络连接中读取数据

        @param {object} net_info - 要读取数据的网络信息对象(例如socket对象)
        @param {dict} recv_para - 读取数据的参数, 包括:
            recv_len {int} - 要获取的数据长度, 必要参数
            overtime {int} - 获取超时时间,单位为毫秒,非必要参数

        @returns {CResult} - 数据获取结果:
            result.code :'00000'-成功,'20403'-获取数据超时,其他为获取失败
            result.data :获取到的数据对象(具体类型和定义,由实现类自定义)
            result.recv_time : datetime 实际开始接受数据时间
            result.overtime : int 超时时间(毫秒),当返回结果为超时,可获取超时时间信息

        """
        # 子类必须定义该功能
        if type(recv_para) != dict:
            recv_para = {}
        _result = CResult('00000')
        _result.data = b''
        _result.recv_time = datetime.datetime.now()
        _overtime = 10000
        if 'overtime' in recv_para.keys():
            # 外部有传入,优先使用该超时时间
            _overtime = recv_para['overtime']
        elif hasattr(net_info, 'recv_timeout'):
            # 如果net_info有超时的设置
            _overtime = net_info.recv_timeout
        _result.overtime = _overtime

        with ExceptionTool.ignored_cresult(
            _result
        ):
            _rest_bytes = recv_para['recv_len']
            while _rest_bytes > 0:
                # 检查是否超时
                if (datetime.datetime.now() - _result.recv_time).total_seconds() * 1000 > _overtime:
                    # 已超时
                    _result.change_code(code='20403')
                    break

                _buffer = b''
                with ExceptionTool.ignored(expect=(BlockingIOError)):
                    # 获取数据
                    _buffer = net_info.csocket.recv(_rest_bytes)
                    if len(_buffer) > 0:
                        _result.data = _result.data + _buffer
                        _rest_bytes = _rest_bytes - len(_buffer)
                    else:
                        # 休眠一下
                        RunTool.sleep(0.001)
        return _result
예제 #23
0
    def tap_continuity(self,
                       pos_seed: list,
                       times: float,
                       thread_count: int = 2,
                       random_sleep: bool = False,
                       sleep_min: float = 0.0,
                       sleep_max: float = 0.5):
        """
        在指定范围随机连续点击

        @param {list} pos_seed - 允许点击的位置坐标清单[(x,y), ...], 随机获取
        @param {float} times - 要点击的时长, 单位为秒
        @param {int} thread_count=2 - 生产点击线程,数量越多点击频率越密集
        @param {bool} random_sleep=False - 每个线程点击期间是否自动休眠指定时长
        @param {float} sleep_min=0.0 - 每个线程点击期间自动休眠最小时长, 单位为秒
        @param {float} sleep_max=0.5 - 每个线程点击期间自动休眠最大时长, 单位为秒
        """
        # 参数准备
        _cmd_mode = 'shell input tap'
        _seed_len = len(pos_seed)

        # 定义点击线程函数
        def tap_thread_fun():
            while True:
                # 循环处理,自身不结束
                _pos = pos_seed[random.randint(0, _seed_len - 1)]
                _cmd = '%s %d %d' % (_cmd_mode, _pos[0], _pos[1])
                # 不检查结果
                self.adb_run_inner(_cmd, ignore_error=True)
                # 看是否休眠
                if random_sleep:
                    time.sleep(random.uniform(sleep_min, sleep_max))

        # 启动执行动作
        _start = datetime.datetime.now()
        _thread_list = list()
        for i in range(thread_count):
            _running_thread = threading.Thread(target=tap_thread_fun,
                                               name='Thread-Tap-Running %s' %
                                               str(i))
            _running_thread.setDaemon(True)
            _running_thread.start()

            # 添加到列表,用于停止线程
            _thread_list.append(_running_thread)

        # 监控时长
        while (datetime.datetime.now() - _start).total_seconds() < times:
            time.sleep(0.01)

        # 停止线程
        for _thread in _thread_list:
            RunTool.stop_thread(_thread)
예제 #24
0
    def get_token_auth(cls) -> HTTPTokenAuth:
        """
        获取HTTPTokenAuth对象

        @returns {HTTPTokenAuth} - 返回唯一的HTTPTokenAuth对象
        """
        _auth = RunTool.get_global_var('HTTP_TOKEN_AUTH')
        if _auth is None:
            _auth = HTTPTokenAuth(scheme='JWT')
            RunTool.set_global_var('HTTP_TOKEN_AUTH', _auth)

        return _auth
예제 #25
0
    def __get_map_error_code(self):
        """
        获取全局的错误码映射表

        """
        _map_error_code = RunTool.get_global_var('HIVENET_ERROR_CODE_MAP')
        if _map_error_code is None:
            _map_file = os.path.realpath(os.path.abspath(os.path.dirname(__file__) + '/') +
                                         '/hivenet_error_code/map_error_code.json')
            _map_error_code = {}
            with open(_map_file, 'rt', encoding='utf-8') as f:
                _map_error_code = json.load(f)
            RunTool.set_global_var('HIVENET_ERROR_CODE_MAP', _map_error_code)
        return _map_error_code
예제 #26
0
    def stop_stream_force(self, is_wait=True):
        """
        强制关闭当前所有正在处理的流

        @param {bool} is_wait=True - 是否等待所有流关闭后再返回

        """
        self._force_stop_tag = True
        if is_wait:
            # 检查是否都已停止
            while True:
                if len(self._stream_list_tag.keys()) == 0:
                    break
                RunTool.sleep(0.01)
def init_pipeline_plugins():
    """
    装载管道插件
    """
    # 装载配置
    _execute_path = os.path.realpath(os.path.join(
        os.path.dirname(__file__), os.path.pardir, 'search_by_image'
    ))
    RunTool.set_global_var('EXECUTE_PATH', _execute_path)

    _config = os.path.join(_execute_path, 'conf/server_jade.xml')
    _config_xml = SimpleXml(_config, encoding='utf-8')
    _server_config = _config_xml.to_dict()['server']

    RunTool.set_global_var(
        'PIPELINE_PROCESSER_PARA', _server_config['pipeline']['processer_para']
    )
    RunTool.set_global_var('PIPELINE_ROUTER_PARA', _server_config['pipeline']['router_para'])
    _plugins_path_list = _server_config['pipeline']['plugins_path'].split(',')
    for _plugins_path in _plugins_path_list:
        Pipeline.load_plugins_by_path(
            os.path.join(_execute_path, _plugins_path.strip())
        )

    _logger: Logger = None
    if 'logger' in _server_config.keys():
        _logger_config = _server_config['logger']
        if len(_logger_config['conf_file_name']) > 0 and _logger_config['conf_file_name'][0] == '.':
            # 相对路径
            _logger_config['conf_file_name'] = os.path.join(
                _execute_path, _logger_config['conf_file_name']
            )
        if len(_logger_config['logfile_path']) > 0 and _logger_config['logfile_path'][0] == '.':
            # 相对路径
            _logger_config['logfile_path'] = os.path.join(
                _execute_path, _logger_config['logfile_path']
            )
        _logger = Logger.create_logger_by_dict(_logger_config)

    # 创建空管道用于测试
    _empty_pipeline = Pipeline('empty', '{}', logger=_logger)
    RunTool.set_global_var('EMPTY_PIPELINE', _empty_pipeline)

    # 创建测试管道
    _jade_pipeline = Pipeline(
        'jade_search',
        _server_config['pipeline']['pipeline_config']['JadeSearch'],
        logger=_logger
    )
    RunTool.set_global_var('JADE_PIPELINE', _jade_pipeline)
예제 #28
0
    def inception_v4_processer_initialize(cls, graph_var_name: str,
                                          processer_name: str):
        """
        inception_v4图像分类模型的公共初始化函数

        @param {str} graph_var_name - 对象识别冻结图全局变量名
        @param {str} processer_name - 处理器名
        """
        _graph = RunTool.get_global_var(graph_var_name)
        if _graph is None:
            _graph = dict()
            RunTool.set_global_var(graph_var_name, _graph)
        else:
            # 模型已装载,无需继续处理
            return

        _execute_path = RunTool.get_global_var('EXECUTE_PATH')
        if _execute_path is None:
            _execute_path = os.getcwd()
        _config = RunTool.get_global_var(
            'PIPELINE_PROCESSER_PARA')[processer_name]

        _pb_file = os.path.join(_execute_path, _config['frozen_graph'])
        _pb_labelmap = os.path.join(_execute_path, _config['labelmap'])

        # 识别基础参数
        _graph['min_score'] = _config.get('min_score', 0.8)
        _graph['image_size'] = _config.get('image_size', 299)
        _graph['labelmap'] = Tools.load_label(_pb_labelmap,
                                              encoding=_config.get(
                                                  'encoding', 'utf-8'))

        _detection_graph = tf.Graph()
        with _detection_graph.as_default():
            _od_graph_def = tf.GraphDef()
            with tf.gfile.GFile(_pb_file, 'rb') as _fid:
                _serialized_graph = _fid.read()
                _od_graph_def.ParseFromString(_serialized_graph)
                tf.import_graph_def(_od_graph_def, name='')

            _graph['session'] = tf.Session(graph=_detection_graph)

        # 图像分类出口
        _graph['softmax_tensor'] = _detection_graph.get_tensor_by_name(
            'InceptionV4/Logits/Predictions:0')

        # 特征变量出口
        _graph['vertor_tensor'] = _detection_graph.get_tensor_by_name(
            'InceptionV4/Logits/AvgPool_1a/AvgPool:0')
예제 #29
0
    def _running_thread_fun(self, run_id: str):
        """
        启动管道运行线程

        @param {str} run_id - 运行id
        """
        _run_id, _run_cache = self._get_run_cache(run_id)
        if _run_cache is None:
            _msg = '[Pipeline:%s] run_id [%s] not exists!' % (self.name,
                                                              run_id)
            self.log_error('Error: ' % _msg)
            raise RuntimeError(_msg)

        _run_cache['thread_running'] = True
        try:
            while _run_cache['status'] == 'R':
                if _run_cache['node_status'] == 'R':
                    # 当前节点正在执行,未返回执行结果
                    break

                # 执行当前节点
                _next_id = self._run_node(_run_id, _run_cache['node_id'])
                if _next_id is None:
                    # 已经是最后一个节点
                    break
                else:
                    # 判断是否要逐步执行
                    if _run_cache['is_step_by_step']:
                        # 执行一步就设置状态为暂停
                        self._set_status('P', _run_id)

                    if _next_id == '':
                        # 异步模式,直接退出线程处理
                        break
                    else:
                        # 设置上下文,执行下一个节点
                        _run_cache['node_id'] = _next_id
                        _run_cache['node_status'] = 'I'
                        _run_cache['node_status_msg'] = ''
                        RunTool.sleep(0.0001)
        except:
            # 如果在线程中出了异常,结束掉执行
            _run_cache['node_status'] = 'E'
            self._set_status('E', _run_id)
            _run_cache['output'] = None
            raise
        finally:
            _run_cache['thread_running'] = False
예제 #30
0
def client_simple_call_para_server_tsl(a,
                                       b,
                                       *args,
                                       c=10,
                                       d={'d1': 'd1value'},
                                       **kwargs):
    """
    测试简单调用,进行单向认证(客户端验证服务端证书)
    """
    # 转换参数
    _para_values = RunTool.get_current_function_parameter_values(
        is_simple_mode=True)
    _para_obj = SimpleGRpcTools.parameters_to_json(_para_values)
    _req_obj = SimpleGRpcTools.generate_request_obj(
        service_name='service_simple_call_para',
        para_json=_para_obj.para_json,
        has_para_bytes=_para_obj.has_para_bytes,
        para_bytes=_para_obj.para_bytes)
    # 发送请求
    with open(_TEMP_DIR + '/../../simple_grpc/server.crt', 'rb') as f:
        _root_certificates = f.read()
    _connect_para = SimpleGRpcTools.generate_connect_para(
        ip='localhost',
        port=50053,
        is_use_ssl=True,
        root_certificates=_root_certificates)
    _cresult = SimpleGRpcTools.grpc_call(_connect_para, _req_obj)
    _cresult.return_obj = SimpleGRpcTools.json_to_object_by_para_mapping(
        _cresult.return_json, 'client_simple_call_para')
    return _cresult