Пример #1
0
class SubscribesCleaner(lsu):
    """解耦清洗插件:国内IP调用很可能出现性能滑坡"""
    def __init__(self, debug=False):
        super(SubscribesCleaner, self).__init__()
        self.debug = debug
        self.keys = [REDIS_SECRET_KEY.format(s) for s in CRAWLER_SEQUENCE]
        self.rc = RedisClient().get_driver()

    def offload_task(self):
        for key_ in self.keys:
            for sub, _ in self.rc.hgetall(key_).items():
                self.work_q.put_nowait([sub, key_])

    def control_driver(self, sub_info):
        try:
            node_info: dict = subs2node(sub_info[0], False)
            if self.debug:
                print(node_info['subs'], node_info['node'].__len__())

            if node_info['node'].__len__() <= 3:
                self.rc.hdel(sub_info[-1], sub_info[0])
                logger.debug(f'>> Detach -> {sub_info[0]}')
        except UnicodeDecodeError or TypeError as e:
            logger.debug(
                f"Retry put the subscribe({sub_info}) to work queue -- {e}")
            self.work_q.put_nowait(sub_info)
        except Exception as e:
            logger.warning(f"{sub_info} -- {e}")
Пример #2
0
def apis_get_subs_num():
    from BusinessCentralLayer.middleware.redis_io import RedisClient
    rc = RedisClient()
    response = {
        "v2ray": rc.__len__(REDIS_SECRET_KEY.format('v2ray')),
        "ssr": rc.__len__(REDIS_SECRET_KEY.format('ssr'))
    }
    return response
Пример #3
0
 def startup_ddt_overdue(self, task_name: str = None):
     if task_name is None:
         for task_name in self.deploy_cluster:
             RedisClient().refresh(
                 key_name=REDIS_SECRET_KEY.format(task_name),
                 cross_threshold=3)
     else:
         RedisClient().refresh(key_name=REDIS_SECRET_KEY.format(task_name),
                               cross_threshold=3)
Пример #4
0
    def to_redis():
        r = RedisClient().get_driver()
        for docker in Middleware.cache_redis_queue.items():
            key_name = REDIS_SECRET_KEY.format(docker[0])
            if docker[-1]:
                r.hset(key_name, mapping=docker[-1])
        # logger.success(f">> PUSH -> Redis")

        for k in Middleware.cache_redis_queue.keys():
            Middleware.cache_redis_queue[k] = {}
def flexible_distribute(subscribe, class_, end_life: str, driver_name=None):
    """

    @param subscribe:
    @param class_:
    @param end_life:
    @param driver_name:
    @return:
    """
    from datetime import datetime
    # data --> Database(Mysql)

    # data --> Database(MongoDB)

    # data --> Redis
    threading.Thread(target=RedisClient().add, args=(REDIS_SECRET_KEY.format(class_), subscribe, end_life)).start()

    # data --> csv
    with open(SERVER_PATH_DATABASE_FETCH, 'a', encoding='utf-8', newline='') as f:
        writer = csv.writer(f)
        # 入库时间 subscribe 类型
        now_ = str(datetime.now(TIME_ZONE_CN)).split('.')[0]
        writer.writerow([f'{now_}', f"{driver_name}", f'{subscribe}', class_])

    # data --> <Nginx> if linux or <Cache>
    try:
        with open(NGINX_SUBSCRIBE.format(class_), 'w', encoding='utf-8') as f:
            f.write(subscribe)
    except FileNotFoundError as e:
        print(e)
Пример #6
0
def pop_subs_to_admin(class_: str):
    """

    @param class_:
    @return:
    """
    logger.debug("<SuperAdmin> -- 获取订阅")
    from BusinessLogicLayer.cluster.sailor import manage_task

    try:
        # 获取该类型订阅剩余链接
        remain_subs: list = RedisClient().sync_remain_subs(
            REDIS_SECRET_KEY.format(class_))
        while True:
            # 若无可用链接则返回错误信息
            if remain_subs.__len__() == 0:
                logger.error(f'<SuperAdmin> --  无可用<{class_}>订阅')
                return {'msg': 'failed', 'info': f"无可用<{class_}>订阅"}
            else:
                # 从池中获取(最新加入的)订阅s-e
                subs, end_life = remain_subs.pop()

                # 将s-e加入缓冲队列,该队列将被ddt的refresh工作流同过期链接一同删除
                # 使用缓冲队列的方案保证节拍同步,防止过热操作/失误操作贯穿Redis

                # 既当管理员通过此接口获取链接时,被返回的链接不会直接从池中删去
                # 而是触发缓冲机制,既将该链接标记后加入apollo缓冲队列
                # apollo队列内的元素都是欲删除缓存,当ddt发动后会一次性情况当前所有的缓存

                # 对订阅进行质量粗检
                if subs2node(subs=subs, cache_path=False,
                             timeout=2)['node'].__len__() <= 3:
                    logger.debug(f"<check> BadLink -- {subs}")
                    continue

                # 使用节拍同步线程锁发起连接池回滚指令,仅生成/同步一枚原子任务
                threading.Thread(target=manage_task,
                                 kwargs={
                                     "class_": class_,
                                     "only_sync": True
                                 }).start()
                logger.success('管理员模式--链接分发成功')

                # 立即执行链接解耦,将同一账号的所有订阅移除
                # beat_sync =True立即刷新,False延迟刷新(节拍同步)
                threading.Thread(target=detach,
                                 kwargs={
                                     "subscribe": subs,
                                     'beat_sync': True
                                 }).start()

                return {
                    'msg': 'success',
                    'subscribe': subs,
                    'subsType': class_
                }
    except Exception as e:
        logger.exception(e)
        return {'msg': 'failed', 'info': str(e)}
Пример #7
0
def detach(subscribe, beat_sync=False):
    """

    @param subscribe:
    @param beat_sync: 是否立即删除, True:立即删除,False:节拍同步,随ddt删除
    @return:
    """
    from faker import Faker
    from urllib.parse import urlparse

    # 清洗出订阅中的token
    token = urlparse(subscribe).path

    r = RedisClient().get_driver()

    # 遍历所有任务类型
    for task in CRAWLER_SEQUENCE:
        # 遍历某种类型的链接池
        for sub in r.hgetall(REDIS_SECRET_KEY.format(task)).items():
            # 匹配用户token
            if token == urlparse(sub[0]).path:
                # 若节拍同步,立即移除订阅
                if beat_sync:
                    r.hdel(REDIS_SECRET_KEY.format(task), sub[0])
                    logger.debug(f'>> Detach -> {sub[0]}')
                # 否则将订阅过期时间标记为过期,该链接将随下一波任一节点的ddt任务被删除
                else:
                    r.hset(REDIS_SECRET_KEY.format(task), sub[0],
                           str(Faker().past_datetime()))
                break
Пример #8
0
def detach(subscribe, at_once=False):
    """

    @param subscribe:
    @param at_once: 是否立即删除, True:立即删除,False:节拍同步,随ddt删除
    @return:
    """
    from faker import Faker
    from urllib.parse import urlparse
    from config import CRAWLER_SEQUENCE

    token = urlparse(subscribe).path

    r = RedisClient().get_driver()

    for task in CRAWLER_SEQUENCE:
        for sub in r.hgetall(REDIS_SECRET_KEY.format(task)).items():
            if token == urlparse(sub[0]).path:
                if at_once:
                    r.hdel(REDIS_SECRET_KEY.format(task), sub[0])
                else:
                    r.hset(REDIS_SECRET_KEY.format(task), sub[0],
                           str(Faker().past_datetime()))
                logger.debug(f'>> Detach -> {sub[0]}')
                break
Пример #9
0
def to_admin(class_):
    # 获取链接
    if class_ in CRAWLER_SEQUENCE:
        try:
            logger.debug("管理员模式--点取链接")

            key_name = REDIS_SECRET_KEY.format(class_)

            me = [
                i
                for i in RedisClient().get_driver().hgetall(key_name).items()
            ]

            if me.__len__() >= 1:
                # 从池中获取(最新)链接(不删除)
                subs, end_life = me.pop()

                # 将s-e加入缓冲队列,该队列将被ddt的refresh工作流同过期链接一同删除
                # 使用缓冲队列的方案保证节拍同步,防止过热操作/失误操作贯穿Redis

                # 既当管理员使用此权限获取链接时,刷出的链接并不会直接从池中删去
                # 而是被加入缓冲队列,当ddt发动时,refresh机制会一次性删除池中所有过期链接
                # 而apollo队列内的元素会被标记为过时信息,此时refresh将从apollo中弹出元素
                # 与池中链接进行查找比对,若找到,则一同删去

                if subs:
                    # from BusinessLogicLayer.dog import subs2node
                    # node_info: dict = subs2node(os.path.join(SERVER_DIR_DATABASE_CACHE, 'subs2node.txt'), subs)
                    # cache = {'subscribe': node_info.get('subs'), 'nodeInfo': node_info.get('node')}
                    logger.success('管理员模式--链接分发成功')
                    threading.Thread(target=step_admin_element,
                                     kwargs={
                                         "class_": class_
                                     }).start()

                    # at_once =True立即刷新,False延迟刷新(节拍同步)
                    logger.info(f'>> Try to detach subs')
                    threading.Thread(target=detach,
                                     kwargs={
                                         "subscribe": subs,
                                         'at_once': True
                                     }).start()

                    return {
                        'msg': 'success',
                        'subscribe': subs,
                        'subsType': class_
                    }
                else:
                    return {'msg': 'failed'}
            # else:
            #     logger.error('链接池为空,正在紧急补充链接池')
            #     threading.Thread(target=step_admin_element, kwargs={"class_": class_}).start()
            #     return to_admin(class_)
        except Exception as e:
            logger.exception(e)
            return {'msg': 'failed'}
Пример #10
0
def markup_admin_element(class_: str) -> str:
    """

    @param class_: seq str
    @return:
    """
    key_name = REDIS_SECRET_KEY.format(class_)

    r = RedisClient().get_driver()

    me = [i for i in r.hgetall(key_name).items()]

    if me.__len__() >= 1:
        # 从池中获取(最新)链接(不删除)
        subs, end_life = [i for i in r.hgetall(key_name).items()].pop()
        flag = str(datetime.now(TIME_ZONE_CN)).split('.')[0]
        # 将s-e加入缓冲队列,该队列将被ddt的refresh工作流同过期链接一同删除
        # 使用缓冲队列的方案保证节拍同步,防止过热操作/失误操作贯穿Redis

        r.hset(key_name, key=subs, value=flag)
        # 既当管理员使用此权限获取链接时,刷出的链接并不会直接从池中删去
        # 而是被加入缓冲队列,当ddt发动时,refresh机制会一次性删除池中所有过期链接
        # 而apollo队列内的元素会被标记为过时信息,此时refresh将从apollo中弹出元素
        # 与池中链接进行查找比对,若找到,则一同删去
        return subs
    else:
        return ''
def detach(subscribe, at_once=False):
    """

    @param subscribe:
    @param at_once: 是否立即删除, True:立即删除,False:节拍同步,随ddt删除
    @return:
    """
    from faker import Faker
    from urllib.parse import urlparse

    detach_subs = [[REDIS_SECRET_KEY.format(ft[2]), ft[1], str(Faker().past_datetime())] for ft in
                   FlowTransferStation().fetch_all() if urlparse(subscribe).path in ft[1]]
    r = RedisClient().get_driver()
    for sub in detach_subs:
        if at_once:
            r.hdel(sub[0], sub[1])
        else:
            r.hset(sub[0], sub[1], sub[-1])
    logger.debug(f'>> Detach -> {detach_subs}')
Пример #12
0
        logger_local.info(usr_c)
        sp = SubscribeRequester()
        try:
            if "[1]V2Ray订阅链接" in usr_c:
                resp = sp.run(mode="v2ray")
            elif "[2]SSR订阅链接" in usr_c:
                resp = sp.run(mode="ssr")
            elif "[3]Trojan订阅连接" in usr_c:
                resp = sp.run(mode="trojan")
            elif "[4]查询可用链接" in usr_c:
                resp = sp.find_aviLink()
            elif "[5]返回" in usr_c:
                resp = True
            else:
                resp = False
        except TypeError:
            resp = True
        finally:
            return resp


# --------------------------------
# 权限验证
# --------------------------------
if ThreadPoolExecutor(max_workers=1).submit(NetChainReview().run).result():
    rc = RedisClient()
else:
    logger_local.warning("网络异常")
    easygui.msgbox("网络异常", title=TITLE)
    exit()
Пример #13
0
 def __init__(self, debug=False, kill_target: str = None):
     super(SubscribesCleaner, self).__init__()
     self.debug = debug
     self.keys = [REDIS_SECRET_KEY.format(s) for s in CRAWLER_SEQUENCE]
     self.rc = RedisClient().get_driver()
     self.kill_ = kill_target
Пример #14
0
class SubscribesCleaner(lsu):
    """解耦清洗插件:国内IP调用很可能出现性能滑坡"""
    def __init__(self, debug=False, kill_target: str = None):
        super(SubscribesCleaner, self).__init__()
        self.debug = debug
        self.keys = [REDIS_SECRET_KEY.format(s) for s in CRAWLER_SEQUENCE]
        self.rc = RedisClient().get_driver()
        self.kill_ = kill_target

    def offload_task(self):
        for key_ in self.keys:
            for sub, _ in self.rc.hgetall(key_).items():
                self.work_q.put_nowait([sub, key_])

    def killer(self):
        """
        @todo redis批量移除或移动hash
        @return:
        """
        if self.apollo:
            for kill_ in self.apollo:
                self.rc.hdel(kill_[0], kill_[-1])
                logger.debug(f'>> Detach -> {kill_[-1]}')

    def control_driver(self, sub_info: List[str]):
        """

        @param sub_info: [subs,key_secret_class]
        @return:
        """
        try:
            # 解耦指定簇
            if self.kill_ and self.kill_ in sub_info[0]:
                self.apollo.append([sub_info[-1], sub_info[0]])
            else:
                # 解析订阅
                node_info: dict = subs2node(sub_info[0], False)
                # 打印debug信息
                if self.debug:
                    print(
                        f"check -- {node_info['subs']} -- {node_info['node'].__len__()}"
                    )
                # 订阅解耦
                if node_info['node'].__len__() <= 3:
                    self.apollo.append([sub_info[-1], sub_info[0]])
        except UnicodeDecodeError or TypeError as e:
            logger.debug(
                f"Retry put the subscribe({sub_info}) to work queue -- {e}")

            # 单个链接重试3次,标记超时链接
            if self.temp_cache.get(sub_info[0]):
                self.temp_cache[sub_info[0]] += 1
            else:
                self.temp_cache[sub_info[0]] = 1
            if self.temp_cache[sub_info[0]] <= 3:
                self.work_q.put_nowait(sub_info)
            else:
                self.apollo.append([sub_info[-1], sub_info[0]])

        except Exception as e:
            logger.warning(f"{sub_info} -- {e}")
Пример #15
0
def apis_get_subs_num() -> dict:
    return RedisClient().subs_info()
Пример #16
0
def _sync_actions(
    class_: str,
    mode_sync: str = None,
    only_sync=False,
    beat_sync=True,
):
    """

    @param class_:
    @param mode_sync:  是否同步消息队列。False:同步本机任务队列,True:同步Redis订阅任务
    @param only_sync:
    @param beat_sync:
    @return:
    """
    logger.info(
        f"<TaskManager> Sync{mode_sync.title()} || 正在同步<{class_}>任务队列...")

    # TODO 原子化同步行为
    rc = RedisClient()

    # 拷贝生成队列,需使用copy()完成拷贝,否则pop()会影响actions-list本体
    task_list: list = actions.__all__.copy()
    random.shuffle(task_list)

    # 在本机环境中生成任务并加入消息队列
    if mode_sync == 'upload':

        # 持续实例化采集任务
        while True:
            if task_list.__len__() == 0:
                logger.success("<TaskManager> EmptyList -- 本机任务为空或已完全生成")
                break
            else:
                slave_ = task_list.pop()

                # 将相应的任务执行语句转换成exec语法
                expr = f'from BusinessLogicLayer.cluster.slavers.actions import {slave_}\n' \
                       f'{slave_}(beat_sync={beat_sync}).run()'

                # 将执行语句同步至消息队列
                rc.sync_message_queue(mode='upload', message=expr)

                # 节拍同步线程锁
                if only_sync:
                    logger.warning(
                        "<TaskManager> OnlySync -- 触发节拍同步线程锁,仅上传一枚原子任务")
                    break

        logger.info(
            f"<TaskManager> 本节点任务({actions.__all__.__len__()})已同步至消息队列,"
            f"待集群接收订阅后既可完成后续任务")

    # 同步分布式消息队列的任务
    elif mode_sync == 'download':
        while True:

            # 判断同步状态
            # 防止过载。当本地缓冲任务即将突破容载极限时停止同步
            # _state 状态有三,continue/offload/stop
            _state = _is_overflow(task_name=class_, rc=rc)
            if _state != 'continue':
                return _state

            # 获取原子任务,该任务应已封装为exec语法
            # todo 将入队操作封装到redis里,以获得合理的循环退出条件
            atomic = rc.sync_message_queue(mode='download')

            # 若原子有效则同步数据
            if atomic:
                # 将执行语句推送至Poseidon本机消息队列
                Middleware.poseidon.put_nowait(atomic)
                logger.info(f'<TaskManager> offload atomic<{class_}>')

                # 节拍同步线程锁
                if only_sync:
                    logger.warning(
                        f"<TaskManager> OnlySync -- <{class_}>触发节拍同步线程锁,仅下载一枚原子任务"
                    )
                    return 'offload'

            # 否则打印警告日志并提前退出同步
            else:
                logger.warning(f"<TaskManager> SyncFinish -- <{class_}>无可同步任务")
                break

    elif mode_sync == 'force_run':
        for slave_ in task_list:

            # force_run :适用于单机部署或单步调试下
            _state = _is_overflow(task_name=class_, rc=rc)
            # 需要确保无溢出风险,故即使是force_run的启动模式,任务执行数也不应逾越任务容载数
            if _state == 'stop':
                return 'stop'

            # 将相应的任务执行语句转换成exec语法
            expr = f'from BusinessLogicLayer.cluster.slavers.actions import {slave_}\n' \
                   f'{slave_}(beat_sync={beat_sync}).run()'

            # 将执行语句推送至Poseidon本机消息队列
            Middleware.poseidon.put_nowait(expr)

            # 在force_run模式下仍制约于节拍同步线程锁
            # 此举服务于主机的订阅补充操作
            # 优先级更高,不受队列可用容载影响强制中断同步操作
            if only_sync:
                logger.warning(
                    f"<TaskManager> OnlySync -- <{class_}>触发节拍同步线程锁,仅下载一枚原子任务")
                return 'stop'
        else:
            logger.success(f"<TaskManager> ForceCollect"
                           f" -- 已将本地预设任务({actions.__all__.__len__()})录入待执行队列")
            return 'offload'