Exemplo n.º 1
0
    def run_deploy() -> None:
        """
        定时任务,建议使用if而非for构造任务线程池
        @return:
        """
        try:
            # 初始化任务对象
            dockers = []

            # 载入定时任务权限配置
            tasks = ENABLE_DEPLOY['tasks']
            for docker_name, permission in tasks.items():
                if permission:
                    dockers.append({
                        "name": docker_name,
                        "api": eval(f"_cd.startup_{docker_name}")
                    })
            # 无论有无权限都要装载采集器
            if not tasks['collector']:
                dockers.append({
                    "name": 'collector',
                    "api": _cd.startup_collector
                })
            # 启动定时任务
            GeventSchedule(dockers=dockers)
        except KeyError:
            logger.critical('config中枢层配置被篡改,ENABLE_DEPLOY 配置中无”tasks“键值对')
            exit()
        except NameError:
            logger.critical('eval()或exec()语法异常,检测变量名是否不一致。')
Exemplo n.º 2
0
 def sync_launch_interval() -> dict:
     # 读取配置文件
     launch_interval = LAUNCH_INTERVAL
     # 检查配置并返回修正过后的任务配置
     for task_name, task_interval in launch_interval.items():
         # 未填写或填写异常数字
         if (not task_interval) or (task_interval <= 1):
             logger.critical(
                 f"<launch_interval>--{task_name}设置出现致命错误,即将熔断线程。间隔为空或小于1")
             raise Exception
         # 填写浮点数
         if not isinstance(task_interval, int):
             logger.warning(
                 f"<launch_interval>--{task_name}任务间隔应为整型int,参数已拟合")
             # 尝试类型转换若不中则赋一个默认值 60s
             try:
                 launch_interval.update({task_name: int(task_interval)})
             except TypeError:
                 launch_interval.update({task_name: 60})
         # 填写过小的任务间隔数,既设定的发动频次过高,主动拦截并修正为最低容错 60s/run
         if task_interval < 60:
             logger.warning(
                 f"<launch_interval>--{task_name}任务频次过高,应不少于60/次,参数已拟合")
             launch_interval.update({task_name: 60})
     else:
         return launch_interval
Exemplo n.º 3
0
 def offload_task(self):
     for key_ in self.keys:
         try:
             for sub, _ in self.rc.hgetall(key_).items():
                 self.work_q.put_nowait([sub, key_])
         except redis_error.ResponseError:
             logger.critical("Link pool is broken down.")
Exemplo n.º 4
0
 def _del_subs(self, key_: str, subs: str, err_) -> None:
     try:
         self.rc.hdel(key_, subs)
         terminal_echo(f"detach -> {subs} {err_}", 3)
     except redis_error.ConnectionError:
         logger.critical(
             "<SubscribeCleaner> The local network communication is abnormal."
         )
Exemplo n.º 5
0
    def run_deploy() -> None:
        """
        定时任务,建议使用if而非for构造任务线程池
        @return:
        """
        # 载入定时任务权限配置
        tasks = ENABLE_DEPLOY['tasks']
        task2function = {
            'ddt_decouple': _cd.startup_ddt_decouple,
            'ddt_overdue': _cd.startup_ddt_overdue,
        }
        try:

            # 初始化调度器
            docker_of_based_scheduler = TasksScheduler()
            docker_of_collector_scheduler = CollectorScheduler()
            # 清洗配置 使调度间隔更加合理
            interval = _cd.sync_launch_interval()
            # 添加任务
            for docker_name, permission in tasks.items():
                logger.info(
                    f"[Job] {docker_name} -- interval: {interval[docker_name]}s -- run: {permission}"
                )
                # 若开启采集器则使用CollectorScheduler映射任务
                # 使用久策略将此分流判断注释既可
                if docker_name == "collector":
                    docker_of_collector_scheduler.mapping_config({
                        'interval':
                        interval[docker_name],
                        'permission':
                        permission,
                    })
                    continue
                if permission:
                    docker_of_based_scheduler.add_job({
                        "name":
                        docker_name,
                        "api":
                        task2function[docker_name],
                        'interval':
                        interval[docker_name],
                        'permission':
                        True
                    })
            # 启动定时任务 要求执行采集任务时必须至少携带另一种其他部署任务
            docker_of_collector_scheduler.deploy_jobs()
            docker_of_based_scheduler.deploy_jobs()
        except ConnectionError:
            logger.warning(
                "<RedisIO> Network communication failure, please check the network connection."
            )
        except KeyError:
            logger.critical(f'config中枢层配置被篡改,ENABLE_DEPLOY 配置中无对应键值对{tasks}')
            sys.exit()
        except NameError:
            logger.critical('eval()或exec()语法异常,检测变量名是否不一致。')
Exemplo n.º 6
0
 def _sync_launch_interval() -> dict:
     # 热读取配置文件
     launch_interval = LAUNCH_INTERVAL
     for check in launch_interval.items():
         if not check[-1] or check[-1] <= 1:
             logger.critical(
                 f"<launch_interval>--{check[0]}设置出现致命错误,即将熔断线程。间隔为空或小于1")
             raise Exception
         if not isinstance(check[-1], int):
             logger.warning(
                 f"<launch_interval>--{check[0]}任务间隔应为整型int,参数已拟合")
             launch_interval.update({check[0]: round(check[-1])})
         if check[-1] < 60:
             logger.warning(
                 f"<launch_interval>--{check[0]}任务频次过高,应不少于60/次,参数已拟合")
             launch_interval.update({check[0]: 60})
     else:
         return launch_interval
Exemplo n.º 7
0
    def get(self, key_name, pop_=0) -> str or bool:
        """
        分发订阅链接
        每次get请求都会强制关闭连接
        @param pop_:
        @param key_name: 任务类型,用于定位 redis hash name
        @return:
        """
        try:
            while True:
                # 一次性抽取key_name(hashName)下的所有subscribe
                target_raw: dict = self.db.hgetall(key_name)
                try:
                    # 弹出并捕获 <离当前时间> <最远一次入库>的订阅连接 既订阅链接并未按end_life排序
                    self.subscribe, end_life = list(
                        target_raw.items()).pop(pop_)

                    # end_life: requests_time(采集时间点) + vip_crontab(机场会员时长(账号试用时长))

                    # 该模块最初设计时仅针对<vip_crontab = 1day>的机场采集
                    # 后续又将试用几天甚至几个月的机场加入了任务队列
                    # 但该分发逻辑并未更新 以后版本将继续沿用这套分发逻辑

                    # 若之后版本的项目支持end_life动态更新时(既加入某种技术能够实时反馈机场主对会员规则的修改)
                    # 此分发逻辑将会加入排序功能

                    # 若链接过期 -> loop next -> finally :db-del stale subscribe
                    if self.is_stale(end_life, beyond=3):
                        continue
                    # 若链接可用 -> break off -> 分发 -> finally :db-del subscribe
                    else:
                        return self.subscribe
                # 出现该错误视为redis队列被击穿 无任何可用的链接分发,中断循环
                except IndexError:
                    logger.critical("{}.get() IndexError".format(
                        self.__class__.__name__))
                    return False
                # 关联解除
                finally:
                    from src.BusinessCentralLayer.middleware.subscribe_io import detach
                    detach(self.subscribe, beat_sync=True)
        finally:
            # 关闭连接
            self.kill()
Exemplo n.º 8
0
    def control_driver(self, sub_info: List[str]):
        """

        @param sub_info: [subs,key_secret_class]
        @return:
        """
        try:
            # 解耦指定簇
            if self.kill_ and self.kill_ in sub_info[0]:
                self._del_subs(sub_info[-1], sub_info[0], "target")

            else:
                # 解析订阅
                node_info: dict = subs2node(sub_info[0], False)
                # 打印debug信息
                if self.debug:
                    print(
                        f"check -- {node_info['subs']} -- {node_info['node'].__len__()}"
                    )
                # 订阅解耦
                if node_info['node'].__len__() <= 4:
                    self._del_subs(sub_info[-1], sub_info[0], "decouple")

        except UnicodeDecodeError or TypeError as e:
            logger.debug(
                f"Retry put the subscribe({sub_info}) to work queue -- {e}")

            # 单个链接重试3次,标记超时链接
            if self.temp_cache.get(sub_info[0]):
                self.temp_cache[sub_info[0]] += 1
            else:
                self.temp_cache[sub_info[0]] = 1
            if self.temp_cache[sub_info[0]] <= 3:
                self.work_q.put_nowait(sub_info)
            else:
                self._del_subs(sub_info[-1], sub_info[0], e)

        except SystemExit:
            logger.critical("请关闭系统代理后再执行订阅清洗操作")
        except Exception as e:
            logger.warning(f"{sub_info} -- {e}")
            self._del_subs(sub_info[-1], sub_info[0])