def startup() -> None: process_list = [] try: # 部署<单进程多线程>定时任务 if ENABLE_DEPLOY['global']: process_list.append( multiprocessing.Process(target=_SystemEngine.run_deploy, name='deploymentTimingTask')) # 部署flask if ENABLE_SERVER: process_list.append( multiprocessing.Process(target=_SystemEngine.run_server, name='deploymentFlaskAPI')) # 执行多进程任务 for process_ in process_list: logger.success(f'<SystemProcess> Startup -- {process_.name}') process_.start() # 添加阻塞 for process_ in process_list: process_.join() except TypeError or AttributeError as e: logger.exception(e) send_email(f"[程序异常终止]{str(e)}", to_='self') except KeyboardInterrupt: # FIXME 确保进程间不产生通信的情况下终止 logger.debug('<SystemProcess> Received keyboard interrupt signal') for process_ in process_list: process_.terminate() finally: logger.success('<SystemProcess> End the V2RayCloudSpider')
def _deploy_jobs(self): """ @return: """ try: for docker in self.dockers: # 添加任务 self.scheduler_.add_job( func=docker['api'], trigger=IntervalTrigger(seconds=self.interval_[docker['name']]), id=docker['name'], jitter=5 ) # 打印日志 logger.info( f'<BlockingScheduler> Add job -- <{docker["name"]}>' f' IntervalTrigger: {self.interval_[docker["name"]]}s' ) # 启动任务 self.scheduler_.start() except KeyboardInterrupt as err: logger.stop('Forced stop ||{}'.format(err)) except Exception as err: logger.exception(f'<BlockingScheduler>||{err}')
def pop_subs_to_admin(class_: str): """ @param class_: @return: """ logger.debug("<SuperAdmin> -- 获取订阅") from BusinessLogicLayer.cluster.sailor import manage_task try: # 获取该类型订阅剩余链接 remain_subs: list = RedisClient().sync_remain_subs( REDIS_SECRET_KEY.format(class_)) while True: # 若无可用链接则返回错误信息 if remain_subs.__len__() == 0: logger.error(f'<SuperAdmin> -- 无可用<{class_}>订阅') return {'msg': 'failed', 'info': f"无可用<{class_}>订阅"} else: # 从池中获取(最新加入的)订阅s-e subs, end_life = remain_subs.pop() # 将s-e加入缓冲队列,该队列将被ddt的refresh工作流同过期链接一同删除 # 使用缓冲队列的方案保证节拍同步,防止过热操作/失误操作贯穿Redis # 既当管理员通过此接口获取链接时,被返回的链接不会直接从池中删去 # 而是触发缓冲机制,既将该链接标记后加入apollo缓冲队列 # apollo队列内的元素都是欲删除缓存,当ddt发动后会一次性情况当前所有的缓存 # 对订阅进行质量粗检 if subs2node(subs=subs, cache_path=False, timeout=2)['node'].__len__() <= 3: logger.debug(f"<check> BadLink -- {subs}") continue # 使用节拍同步线程锁发起连接池回滚指令,仅生成/同步一枚原子任务 threading.Thread(target=manage_task, kwargs={ "class_": class_, "only_sync": True }).start() logger.success('管理员模式--链接分发成功') # 立即执行链接解耦,将同一账号的所有订阅移除 # beat_sync =True立即刷新,False延迟刷新(节拍同步) threading.Thread(target=detach, kwargs={ "subscribe": subs, 'beat_sync': True }).start() return { 'msg': 'success', 'subscribe': subs, 'subsType': class_ } except Exception as e: logger.exception(e) return {'msg': 'failed', 'info': str(e)}
def to_sqlite3(docker: dict): """ @param docker: {uuid1:{key1:value1, key2:value2, ...}, uuid2:{key1:value1, key2:value2, ...}, ...} len >= 1 @return: """ try: if docker.keys().__len__() >= 1: docker = [tuple(data.values()) for data in docker.values()] # logger.success(f'>> STORING -> Sqlite3') except Exception as e: logger.exception(e) finally: FlowTransferStation(docker=docker).add()
def run(self): logger.info("DO -- <{}>:beat_sync:{}".format(self.__class__.__name__, self.beat_sync)) api = self.set_spider_option() api.get(self.register_url) try: self.sign_up(api) self.wait(api, 20, "//div[@class='card-body']") # get v2ray link if self.hyper_params['v2ray']: self.load_any_subscribe( api, "//div[@class='buttons']//a[contains(@class,'v2ray')]", 'data-clipboard-text', 'v2ray' ) # get ssr link if self.hyper_params['ssr']: self.load_any_subscribe( api, """//a[@onclick="importSublink('ssr')"]/..//a[contains(@class,'copy')]""", 'data-clipboard-text', 'ssr' ) # if self.hyper_params['trojan']: ... # if self.hyper_params['kit']: ... # if self.hyper_params['qtl']: ... except TimeoutException: logger.error(f'>>> TimeoutException <{self.__class__.__name__}> -- {self.register_url}') except WebDriverException as e: logger.exception(f">>> Exception <{self.__class__.__name__}> -- {e}") finally: # Middleware.hera.put_nowait("push") api.quit()
def __init__(self): super(RedisDataDisasterTolerance, self).__init__() from BusinessCentralLayer.setting import REDIS_SLAVER_DDT if not REDIS_SLAVER_DDT.get('host'): logger.warning('未设置数据容灾服务器,该职能将由Master执行') # 拷贝参数 redis_virtual = REDIS_MASTER # 改动浅拷贝数据库 redis_virtual.update({'db': redis_virtual['db'] + 1}) logger.debug("备份重定向 --> {}".format(redis_virtual)) else: redis_virtual = REDIS_SLAVER_DDT # 容器初始化 self.docker = {} try: self.acm = RedisClient(host=redis_virtual['host'], port=redis_virtual['port'], password=redis_virtual['password']) logger.info("DDT: Master({}) -> Slaver({})".format(REDIS_MASTER['host'], redis_virtual['host'])) except redis.exceptions.ConnectionError as e: logger.exception(e) finally: self.redis_virtual = redis_virtual
def run(self, class_: str) -> None: """ Data disaster tolerance or one class_ @param class_: subscribe type `ssr` or `v2ray` or `trojan` ... @return: """ key_name = REDIS_SECRET_KEY.format(class_) self.refresh(key_name, cross_threshold=6) # 数据拷贝 ... -> self for subscribe, end_life in self.db.hgetall(key_name).items(): self.docker.update({subscribe: end_life}) # logger.info("{} {}".format(key_name, subscribe)) # 映射迁移 acm <- ... try: self.acm.get_driver().hset(key_name, mapping=self.docker) except redis.exceptions.DataError: logger.warning(f'({class_}):缓存可能被击穿或缓存为空,请系统管理员及时维护链接池!') except redis.exceptions.ConnectionError: logger.error(f"redis-slave {self.redis_virtual} 可能宕机") except Exception as e: logger.exception(e)