Esempio n. 1
0
def validate_support_config(cluster_name):
    """
    Validates the provided non-encrypted helm chart values files for the support chart
    of a specific cluster.
    """
    _prepare_helm_charts_dependencies_and_schemas()

    config_file_path = find_absolute_path_to_cluster_file(cluster_name)
    with open(config_file_path) as f:
        cluster = Cluster(yaml.load(f), config_file_path.parent)

    if cluster.support:
        print_colour(
            f"Validating non-encrypted support values files for {cluster_name}..."
        )

        cmd = [
            "helm",
            "template",
            str(helm_charts_dir.joinpath("support")),
        ]

        for values_file in cluster.support["helm_chart_values_files"]:
            cmd.append(f"--values={config_file_path.parent.joinpath(values_file)}")

            try:
                subprocess.check_output(cmd, text=True)
            except subprocess.CalledProcessError as e:
                print(e.stdout)
                sys.exit(1)
    else:
        print_colour(f"No support defined for {cluster_name}. Nothing to validate!")
Esempio n. 2
0
async def deal_comments_by_id(spider, uid):
    """
    对应id评论相关
    :param spider:
    :return:
    """
    while True:
        print_colour('', 'yellow')
        com2_cmd = input(help_comments2()).lower()
        com2_cmd = com2_cmd.split(':')
        if not com2_cmd[0]:
            print_colour('输入有误!', 'red')
            continue
        exit(com2_cmd[0])
        if com2_cmd[0] == 'back':
            break
        elif com2_cmd[0] == 'up':
            result = await spider.endorse_comment(uid, False)
            print_vote_comments(result, 'up')
        elif com2_cmd[0] == 'neutral':
            result = await spider.endorse_comment(uid, True)
            print_colour(result)
            print_vote_comments(result, 'neutral')
        elif com2_cmd[0] == 'reply' and len(com2_cmd) == 2:
            # todo 回复评论
            data = {
                'content': com2_cmd[1],
                'replyToId': uid,
            }
            print_colour('功能还在开发中...', 'red')
            continue
        else:
            print_colour('输入有误!', 'red')
            continue
    pass
Esempio n. 3
0
    def deploy_support(self, cert_manager_version):
        cert_manager_url = "https://charts.jetstack.io"

        print_colour("Provisioning cert-manager...")
        subprocess.check_call([
            "kubectl",
            "apply",
            "-f",
            f"https://github.com/cert-manager/cert-manager/releases/download/{cert_manager_version}/cert-manager.crds.yaml",
        ])
        subprocess.check_call([
            "helm",
            "upgrade",
            "cert-manager",  # given release name (aka. installation name)
            "cert-manager",  # helm chart to install
            f"--repo={cert_manager_url}",
            "--install",
            "--create-namespace",
            "--namespace=cert-manager",
            f"--version={cert_manager_version}",
        ])
        print_colour("Done!")

        print_colour("Provisioning support charts...")

        support_dir = (Path(__file__).parent.parent).joinpath(
            "helm-charts", "support")
        subprocess.check_call(["helm", "dep", "up", support_dir])

        # contains both encrypted and unencrypted values files
        values_file_paths = [
            support_dir.joinpath("enc-support.secret.values.yaml")
        ] + [
            self.config_path.joinpath(p)
            for p in self.support["helm_chart_values_files"]
        ]

        with get_decrypted_files(values_file_paths) as values_files:
            cmd = [
                "helm",
                "upgrade",
                "--install",
                "--create-namespace",
                "--namespace=support",
                "--wait",
                "support",
                str(support_dir),
            ]

            for values_file in values_files:
                cmd.append(f"--values={values_file}")

            print_colour(f"Running {' '.join([str(c) for c in cmd])}")
            subprocess.check_call(cmd)

        print_colour("Done!")
Esempio n. 4
0
async def main():
    try:
        check_setting()
        client = await login(USER, PASSWORD)
        print_logo()
        await run(client)
    # except Exception as e:
    #     print_colour(e, 'red')
    finally:
        print_colour('欢迎再次使用')
        await asyncio.sleep(0)
        await client.close()
def print_logo():
    os.system("clear")
    logo = '''
                                                                                             ;$$;
                                                                                        #############
                                                                                   #############;#####o
                                                          ##                 o#########################
                                                          #####         $###############################
                                                          ##  ###$ ######!    ##########################
                               ##                        ###    $###          ################### ######
                               ###                      ###                   ##o#######################
                              ######                  ;###                    #### #####################
                              ##  ###             ######                       ######&&################
                              ##    ###      ######                            ## ############ #######
                             o##      ########                                  ## ##################
                             ##o                ###                             #### #######o#######
                             ##               ######                             ###########&#####
                             ##                ####                               #############!
                            ###                                                     #########
                   #####&   ##                                                      o####
                 ######     ##                                                   ####*
                      ##   !##                                               #####
                       ##  ##*                                            ####; ##
                        #####                                          #####o   #####
                         ####                                        ### ###   $###o
                          ###                                            ## ####! $###
                          ##                                            #####
                          ##                                            ##
                         ;##                                           ###                           ;
                         ##$                                           ##
                    #######                                            ##
                #####   &##                                            ##
              ###       ###                                           ###
             ###      ###                                             ##
             ##     ;##                                               ##
             ##    ###                                                ##
              ### ###                                                 ##
                ####                                                  ##
                 ###                                                  ##
                 ##;                                                  ##
                 ##$                                                 ##&
                  ##                                                 ##
                  ##;                                               ##
                   ##                                              ##;
                    ###                                          ###         ##$
                      ###                                      ###           ##
       ######################                              #####&&&&&&&&&&&&###
     ###        $#####$     ############&$o$&################################
     #                               $&########&o
    '''
    print_colour(logo, 'ultramarine')
Esempio n. 6
0
def deploy(cluster_name, hub_name, config_path, dask_gateway_version):
    """
    Deploy one or more hubs in a given cluster
    """
    validate_cluster_config(cluster_name)
    validate_hub_config(cluster_name, hub_name)
    assert_single_auth_method_enabled(cluster_name, hub_name)

    with get_decrypted_file(config_path) as decrypted_file_path:
        with open(decrypted_file_path) as f:
            config = yaml.load(f)

    # Most of our hubs use Auth0 for Authentication. This lets us programmatically
    # determine what auth provider each hub uses - GitHub, Google, etc. Without
    # this, we'd have to manually generate credentials for each hub - and we
    # don't want to do that. Auth0 domains are tied to a account, and
    # this is our auth0 domain for the paid account that 2i2c has.
    auth0 = config["auth0"]

    k = KeyProvider(auth0["domain"], auth0["client_id"],
                    auth0["client_secret"])

    # Each hub needs a unique proxy.secretToken. However, we don't want
    # to manually generate & save it. We also don't want it to change with
    # each deploy - that causes a pod restart with downtime. So instead,
    # we generate it based on a single secret key (`PROXY_SECRET_KEY`)
    # combined with the name of each hub. This way, we get unique,
    # cryptographically secure proxy.secretTokens without having to
    # keep much state. We can rotate them by changing `PROXY_SECRET_KEY`.
    # However, if `PROXY_SECRET_KEY` leaks, that means all the hub's
    # proxy.secretTokens have leaked. So let's be careful with that!
    SECRET_KEY = bytes.fromhex(config["secret_key"])

    config_file_path = find_absolute_path_to_cluster_file(cluster_name)
    with open(config_file_path) as f:
        cluster = Cluster(yaml.load(f), config_file_path.parent)

    with cluster.auth():
        hubs = cluster.hubs
        if hub_name:
            hub = next((hub for hub in hubs if hub.spec["name"] == hub_name),
                       None)
            print_colour(f"Deploying hub {hub.spec['name']}...")
            hub.deploy(k, SECRET_KEY, dask_gateway_version)
        else:
            for i, hub in enumerate(hubs):
                print_colour(
                    f"{i+1} / {len(hubs)}: Deploying hub {hub.spec['name']}..."
                )
                hub.deploy(k, SECRET_KEY, dask_gateway_version)
Esempio n. 7
0
def assert_single_auth_method_enabled(cluster_name, hub_name):
    """
    For each hub of a specific cluster, it asserts that only a single auth
    method is enabled. An error is raised when an authenticator
    other than Auth0 is enabled and `auth0` is not explicitly disabled.
    """
    _prepare_helm_charts_dependencies_and_schemas()

    config_file_path = find_absolute_path_to_cluster_file(cluster_name)
    with open(config_file_path) as f:
        cluster = Cluster(yaml.load(f), config_file_path.parent)

    hubs = []
    if hub_name:
        hubs = [h for h in cluster.hubs if h.spec["name"] == hub_name]
    else:
        hubs = cluster.hubs

    for i, hub in enumerate(hubs):
        print_colour(
            f"{i+1} / {len(hubs)}: Validating authenticator config for {hub.spec['name']}..."
        )

        authenticator_class = "auth0"
        for values_file_name in hub.spec["helm_chart_values_files"]:
            if "secret" not in os.path.basename(values_file_name):
                values_file = config_file_path.parent.joinpath(
                    values_file_name)
                # Load the hub extra config from its specific values files
                config = yaml.load(values_file)
                # Check if there's config that specifies an authenticator class
                try:
                    if hub.spec["helm_chart"] != "basehub":
                        authenticator_class = config["basehub"]["jupyterhub"][
                            "hub"]["config"]["JupyterHub"][
                                "authenticator_class"]
                    else:
                        authenticator_class = config["jupyterhub"]["hub"][
                            "config"]["JupyterHub"]["authenticator_class"]
                except KeyError:
                    pass

        # If the authenticator class is other than auth0, then raise an error
        # if auth0 is not explicitly disabled from the cluster config
        if authenticator_class != "auth0" and hub.spec["auth0"].get(
                "enabled", True):
            raise ValueError(
                f"Please disable auth0 for {hub.spec['name']} hub before using another authenticator class!"
            )
def print_question(question: dict):
    """
    打印问题及第默认排序下的第一个回答
    :param output:
    :return:
    """
    title = question['title']
    # question_id = question['id']
    question_content = question['detail']
    question_content = html2text.html2text(question_content)
    print_colour('*' * 50, 'purple')
    print_colour(f'标题:{title}')
    print_colour('问题详情:')
    print_colour(question_content)
    print_colour('*' * 50, 'purple')
Esempio n. 9
0
async def login(user, password):
    """
    登录
    :param user:
    :param password:
    :return:
    """
    client = ZhihuClient(user, password)
    load_cookies = False
    if os.path.exists(client.cookie_file):
        # 如果cookie缓存存在优先读取缓存
        load_cookies = True
    if not load_cookies and (not USER or not PASSWORD):
        print_colour('请正确配置USER, PASSWORD', 'red')
        sys.exit()
    await client.login(load_cookies=load_cookies)
    return client
def print_save(article: dict):
    """
    保存文章到本地
    :param article:
    :return:
    """
    uid = article.get('id')
    title = article.get('question').get('title')
    content = article.get('content')
    save_dir = SAVE_DIR or '/tmp/zhihu_save'
    file = f'{save_dir}/{title}_{uid}.html'
    with open(file, 'w') as f:
        head = '<head> <meta charset="utf-8"><meta http-equiv="Content-Type"' \
               ' content="text/html; charset=utf-8" /> </head>'
        f.write(head)
        f.write(content)
    print_colour(f'保存成功!-->{file}')
Esempio n. 11
0
def validate_hub_config(cluster_name, hub_name):
    """
    Validates the provided non-encrypted helm chart values files for each hub of
    a specific cluster.
    """
    _prepare_helm_charts_dependencies_and_schemas()

    config_file_path = find_absolute_path_to_cluster_file(cluster_name)
    with open(config_file_path) as f:
        cluster = Cluster(yaml.load(f), config_file_path.parent)

    hubs = []
    if hub_name:
        hubs = [h for h in cluster.hubs if h.spec["name"] == hub_name]
    else:
        hubs = cluster.hubs

    for i, hub in enumerate(hubs):
        print_colour(
            f"{i+1} / {len(hubs)}: Validating non-encrypted hub values files for {hub.spec['name']}..."
        )

        cmd = [
            "helm",
            "template",
            str(helm_charts_dir.joinpath(hub.spec["helm_chart"])),
        ]
        for values_file in hub.spec["helm_chart_values_files"]:
            if "secret" not in os.path.basename(values_file):
                cmd.append(
                    f"--values={config_file_path.parent.joinpath(values_file)}"
                )
        # Workaround the current requirement for dask-gateway 0.9.0 to have a
        # JupyterHub api-token specified, for updates if this workaround can be
        # removed, see https://github.com/dask/dask-gateway/issues/473.
        if hub.spec["helm_chart"] in ("daskhub", "binderhub"):
            cmd.append(
                "--set=dask-gateway.gateway.auth.jupyterhub.apiToken=dummy")
        try:
            subprocess.check_output(cmd, text=True)
        except subprocess.CalledProcessError as e:
            print(e.stdout)
            sys.exit(1)
Esempio n. 12
0
async def run(client):
    spider = DataExtractor(client)
    output = await spider.get_self_info()
    print_colour(f'hello {output["name"]} 欢迎使用terminal-zhihu!', 'ultramarine')
    flag = True
    while flag:
        print_colour('', 'yellow')
        cmd = input(help_main()).lower()
        if not cmd:
            print_colour('输入有误!', 'red')
            continue
        exit(cmd)
        if cmd == 'remd':
            await deal_remd(spider)
        elif cmd == 'aten':
            # todo 获取关注动态
            print_colour('功能还在开发中...', 'red')
            continue
        else:
            print_colour('输入有误!', 'red')
            continue
Esempio n. 13
0
async def deal_article(spider, article):
    """
    处理文章内容命令
    :param spider:
    :param recommend_articles:
    :param ids:
    :return:
    """
    while True:
        print_colour('', 'yellow')
        arl_cmd = input(help_article()).lower()
        if not arl_cmd:
            print_colour('输入有误!', 'red')
            continue
        exit(arl_cmd)
        if arl_cmd == 'back':
            break

        elif arl_cmd in ('up', 'down', 'neutral', 'thank', 'unthank'):

            uid = article.get('id')
            func = get_com_func(arl_cmd)
            result = await getattr(spider, func)(uid)
            print_vote_thank(result, arl_cmd)
            continue
        elif arl_cmd == 'comment':
            typ = article['type']
            uid = article.get('id')
            result, paging = await spider.get_comments(uid, typ)
            print_comments(result)
            await deal_comments(spider, result, paging)
            continue
        elif arl_cmd == 'save':
            print_save(article)
            continue
        elif arl_cmd == 'enshrine':
            # todo 收藏回答
            print_colour('功能还在开发中...', 'red')
            continue
        elif arl_cmd == 'question':
            await deal_question(spider,
                                article.get('question').get('id'),
                                article.get('id'))
            continue
        else:
            print_colour('输入有误!', 'red')
            continue
def print_vote_comments(output: dict, typ: str):
    """
    打印赞同感谢  up', 'down', 'neutral'
    :param output:
    :return:
    """
    if output.get('error'):
        print_colour(output.get('error'), 'red')
    elif typ == 'up':
        print_colour(f'点赞评论成功!被赞总数{output["vote_count"]}')
    elif typ == 'neutral':
        print_colour(f'保持中立!被赞总数{output["vote_count"]}')
Esempio n. 15
0
async def deal_remd(spider):
    """
    处理推荐文章命令
    :param spider:
    :return:
    """
    is_print = True
    while True:
        if is_print:
            recommend_articles = await spider.get_recommend_article()
            ids = [d.get('id') for d in recommend_articles]
            print_recommend_article(recommend_articles)
            is_print = False
        print_colour('', 'yellow')
        remd_cmd = input(help_recommend()).lower()
        remd_cmd = remd_cmd.split(':')
        if not remd_cmd:
            print_colour('输入有误!', 'red')
            continue
        exit(remd_cmd[0])
        if remd_cmd[0] == 'f':
            is_print = True
            continue
        elif remd_cmd[0] == 'r':
            print_recommend_article(recommend_articles)
            continue
        elif remd_cmd[0] == 'read':
            if len(remd_cmd) != 2:
                print_colour('输入有误!', 'red')
                continue
            if remd_cmd[1] not in ids:
                print_colour('输入id有误!', 'red')
                continue
            output = [d for d in recommend_articles
                      if d['id'] == remd_cmd[1]][0]
            print_article_content(output)
            await deal_article(spider, output)
            continue
        elif remd_cmd[0] == 'question':
            question_ids = [
                d.get('question').get('id') for d in recommend_articles
            ]
            if len(remd_cmd) != 2:
                print_colour('输入有误!', 'red')
                continue
            if remd_cmd[1] not in question_ids:
                print_colour('输入id有误!', 'red')
                continue
            assert len(ids) == len(question_ids)
            id_map = dict(zip(question_ids, ids))
            uid = id_map[remd_cmd[1]]
            await deal_question(spider, remd_cmd[1], uid)
            continue
        elif remd_cmd[0] == 'back':
            break
        else:
            print_colour('输入有误!', 'red')
            continue
Esempio n. 16
0
async def deal_question(spider, question_id, uid):
    """
    处理问题命令
    :param spider:
    :param uid:
    :param id_map:
    :return:
    """
    is_print = True
    while True:
        if is_print:
            question_articles, paging = await spider.get_article_by_question(
                question_id)
            ids = [d.get('id') for d in question_articles]
            print_recommend_article(question_articles)
            is_print = False
        print_colour('', 'yellow')
        ques_cmd = input(help_question()).lower()
        ques_cmd = ques_cmd.split(':')
        if not ques_cmd:
            print_colour('输入有误!', 'red')
            continue
        exit(ques_cmd[0])
        if ques_cmd[0] == 'read':
            if len(ques_cmd) != 2:
                print_colour('输入有误!', 'red')
                continue
            if ques_cmd[1] not in ids:
                print_colour('输入id有误!', 'red')
                continue
            output = [d for d in question_articles
                      if d['id'] == ques_cmd[1]][0]
            print_article_content(output)
            await deal_article(spider, output)
            continue
        elif ques_cmd[0] == 'qsdl':
            question_detail = await spider.get_question_details(
                question_id, uid)
            print_question(question_detail)
        elif ques_cmd[0] == 'n':
            if paging.get('is_end'):
                print_colour('已是最后一页!', 'red')
                continue
            url = paging['next']
            question_articles, paging = await spider.get_article_by_question_url(
                url)
            ids = [d.get('id') for d in question_articles]
            print_recommend_article(question_articles)
            continue
        elif ques_cmd[0] == 'p':
            if paging.get('is_start'):
                print_colour('已是第一页!', 'red')
                continue
            url = paging['previous']
            question_articles, paging = await spider.get_article_by_question_url(
                url)
            ids = [d.get('id') for d in question_articles]
            print_recommend_article(question_articles)
        elif ques_cmd[0] == 'r':
            print_recommend_article(question_articles)
            continue
        elif ques_cmd[0] == 'back':
            break
        else:
            print_colour('输入有误!', 'red')
            continue
Esempio n. 17
0
async def deal_comments(spider, result, paging):
    """
    处理评论命令
    :param spider:
    :return:
    """
    # all_coments = []
    while True:
        comment_ids = []
        for d in result:
            comment_ids.append(d['id'])
            for clild in d.get('child_comments'):
                comment_ids.append(clild['id'])
        comment_ids = list(set(comment_ids))
        print_colour('', 'yellow')
        comm_cmd = input(help_comments()).lower()
        comm_cmd = comm_cmd.split(':')
        if not comm_cmd:
            print_colour('输入有误!', 'red')
            continue
        exit(comm_cmd[0])
        if comm_cmd[0] == 'back':
            break
        elif comm_cmd[0] == 'n':
            if paging.get('is_end'):
                print_colour('已是最后一页!', 'red')
                continue
            # url = paging['next'].replace('https://www.zhihu.com/', 'https://www.zhihu.com/api/v4/')
            url = paging['next']
            result, paging = await spider.get_comments_by_url(url)
            print_comments(result)
            continue
        elif comm_cmd[0] == 'p':
            if paging.get('is_start'):
                print_colour('已是第一页!', 'red')
                continue
            # url = paging['previous'].replace('https://www.zhihu.com/', 'https://www.zhihu.com/api/v4/')
            url = paging['previous']
            result, paging = await spider.get_comments_by_url(url)
            print_comments(result)
            continue
        elif comm_cmd[0] == 'com':
            if len(comm_cmd) != 2:
                print_colour('输入有误!', 'red')
                continue
            if comm_cmd[1] not in comment_ids:
                print_colour('输入id有误!', 'red')
                continue
            await deal_comments_by_id(spider, comm_cmd[1])
            continue
        else:
            print_colour('输入有误!', 'red')
            continue
def print_comments(output: list):
    """
    打印评论
    :param output:
    :return:
    """
    for d in output:
        author = d.get('author').get('name')
        reply_to_author = d.get('reply_to_author').get('name')
        content = d.get('content')
        vote_count = d.get('vote_count')
        comment_id = d.get('id')
        child_comments = d.get('child_comments')
        print_colour(f'comment_id:{comment_id}', 'purple')
        if d.get('featured'):
            print_colour('热评🔥', end='')
        if reply_to_author:
            print_colour(f'{author}->{reply_to_author}', end='')
        else:
            print_colour(f'{author}', end='')
        print_colour(f'(赞:{vote_count}):{content}')
        if child_comments:
            for clild in child_comments:
                author = clild.get('author').get('name')
                reply_to_author = clild.get('reply_to_author').get('name')
                content = clild.get('content')
                vote_count = clild.get('vote_count')
                comment_id = clild.get('id')
                print_colour(f'         comment_id:{comment_id}', 'purple')
                if d.get('featured'):
                    print_colour('         热评🔥', end='')
                if reply_to_author:
                    print_colour(f'         {author}->{reply_to_author}',
                                 end='')
                else:
                    print_colour(f'         {author}', end='')
                print_colour(f'         (赞:{vote_count}):{content}')
                print_colour(
                    '         *********************************************************',
                    'blue')
        print_colour(
            '==========================================================',
            'blue')
Esempio n. 19
0
    def deploy(self, auth_provider, secret_key, dask_gateway_version):
        """
        Deploy this hub
        """
        # Support overriding domain configuration in the loaded cluster.yaml via
        # a cluster.yaml specified enc-<something>.secret.yaml file that only
        # includes the domain configuration of a typical cluster.yaml file.
        #
        # Check if this hub has an override file. If yes, apply override.
        #
        # FIXME: This could could be generalized so that the cluster.yaml would allow
        #        any of this configuration to be specified in a secret file instead of a
        #        publicly readable file. We should not keep adding specific config overrides
        #        if such need occur but instead make cluster.yaml be able to link to
        #        additional secret configuration.
        if "domain_override_file" in self.spec.keys():
            domain_override_file = self.spec["domain_override_file"]

            with get_decrypted_file(
                self.cluster.config_path.joinpath(domain_override_file)
            ) as decrypted_path:
                with open(decrypted_path) as f:
                    domain_override_config = yaml.load(f)

            self.spec["domain"] = domain_override_config["domain"]

        generated_values = self.get_generated_config(auth_provider, secret_key)

        if self.spec["helm_chart"] == "daskhub":
            # Install CRDs for daskhub before deployment
            manifest_urls = [
                f"https://raw.githubusercontent.com/dask/dask-gateway/{dask_gateway_version}/resources/helm/dask-gateway/crds/daskclusters.yaml",
                f"https://raw.githubusercontent.com/dask/dask-gateway/{dask_gateway_version}/resources/helm/dask-gateway/crds/traefik.yaml",
            ]

            for manifest_url in manifest_urls:
                subprocess.check_call(["kubectl", "apply", "-f", manifest_url])

        with tempfile.NamedTemporaryFile(
            mode="w"
        ) as generated_values_file, get_decrypted_files(
            self.cluster.config_path.joinpath(p)
            for p in self.spec["helm_chart_values_files"]
        ) as values_files:
            json.dump(generated_values, generated_values_file)
            generated_values_file.flush()

            cmd = [
                "helm",
                "upgrade",
                "--install",
                "--create-namespace",
                "--wait",
                f"--namespace={self.spec['name']}",
                self.spec["name"],
                helm_charts_dir.joinpath(self.spec["helm_chart"]),
                # Ordering matters here - config explicitly mentioned in cli should take
                # priority over our generated values. Based on how helm does overrides, this means
                # we should put the config from cluster.yaml last.
                f"--values={generated_values_file.name}",
            ]

            # Add on the values files
            for values_file in values_files:
                cmd.append(f"--values={values_file}")

            # join method will fail on the PosixPath element if not transformed
            # into a string first
            print_colour(f"Running {' '.join([str(c) for c in cmd])}")
            subprocess.check_call(cmd)
Esempio n. 20
0
    def deploy(self, auth_provider, secret_key, skip_hub_health_test=False):
        """
        Deploy this hub
        """
        # Ensure helm charts are up to date
        os.chdir("helm-charts")
        subprocess.check_call(["helm", "dep", "up", "basehub"])
        if self.spec["template"] == "daskhub":
            subprocess.check_call(["helm", "dep", "up", "daskhub"])
        os.chdir("..")

        # Check if this cluster has any secret config. If yes, read it in.
        secret_config_path = (
            Path(os.getcwd())
            / "secrets/config/hubs"
            / f'{self.cluster.spec["name"]}.cluster.yaml'
        )

        secret_hub_config = {}
        if os.path.exists(secret_config_path):
            with decrypt_file(secret_config_path) as decrypted_file_path:
                with open(decrypted_file_path) as f:
                    secret_config = yaml.load(f)

            if secret_config.get("hubs", {}):
                hubs = secret_config["hubs"]
                current_hub = next(
                    (hub for hub in hubs if hub["name"] == self.spec["name"]), {}
                )
                # Support domain name overrides
                if "domain" in current_hub:
                    self.spec["domain"] = current_hub["domain"]
                secret_hub_config = current_hub.get("config", {})

        generated_values = self.get_generated_config(auth_provider, secret_key)

        with tempfile.NamedTemporaryFile(
            mode="w"
        ) as values_file, tempfile.NamedTemporaryFile(
            mode="w"
        ) as generated_values_file, tempfile.NamedTemporaryFile(
            mode="w"
        ) as secret_values_file:
            json.dump(self.spec["config"], values_file)
            json.dump(generated_values, generated_values_file)
            json.dump(secret_hub_config, secret_values_file)
            values_file.flush()
            generated_values_file.flush()
            secret_values_file.flush()

            cmd = [
                "helm",
                "upgrade",
                "--install",
                "--create-namespace",
                "--wait",
                f"--namespace={self.spec['name']}",
                self.spec["name"],
                os.path.join("helm-charts", self.spec["template"]),
                # Ordering matters here - config explicitly mentioned in clu should take
                # priority over our generated values. Based on how helm does overrides, this means
                # we should put the config from config/hubs last.
                f"--values={generated_values_file.name}",
                f"--values={values_file.name}",
                f"--values={secret_values_file.name}",
            ]

            print_colour(f"Running {' '.join(cmd)}")
            # Can't test without deploying, since our service token isn't set by default
            subprocess.check_call(cmd)

            if not skip_hub_health_test:

                # FIXMEL: Clean this up
                if self.spec["template"] != "basehub":
                    service_api_token = generated_values["basehub"]["jupyterhub"][
                        "hub"
                    ]["services"]["hub-health"]["apiToken"]
                else:
                    service_api_token = generated_values["jupyterhub"]["hub"][
                        "services"
                    ]["hub-health"]["apiToken"]

                hub_url = f'https://{self.spec["domain"]}'

                # On failure, pytest prints out params to the test that failed.
                # This can contain sensitive info - so we hide stderr
                # FIXME: Don't use pytest - just call a function instead
                print_colour("Running hub health check...")
                # Show errors locally but redirect on CI
                gh_ci = os.environ.get("CI", "false")
                pytest_args = [
                    "-q",
                    "deployer/tests",
                    "--hub-url",
                    hub_url,
                    "--api-token",
                    service_api_token,
                    "--hub-type",
                    self.spec["template"],
                ]
                if gh_ci == "true":
                    print_colour("Testing on CI, not printing output")
                    with open(os.devnull, "w") as dn, redirect_stderr(
                        dn
                    ), redirect_stdout(dn):
                        exit_code = pytest.main(pytest_args)
                else:
                    print_colour("Testing locally, do not redirect output")
                    exit_code = pytest.main(pytest_args)
                if exit_code != 0:
                    print("Health check failed!", file=sys.stderr)
                    sys.exit(exit_code)
                else:
                    print_colour("Health check succeeded!")
def print_recommend_article(output: list):
    """
    打印推荐文章简述
    :param output:
    :return:
    """
    for d in output:
        print_colour('=' * 60, 'white')
        print_colour(f'article_id:{d["id"]}', 'purple')
        print_colour(f'question_id:{d["question"]["id"]}', 'purple')
        print_colour(d['question']['title'], 'purple', end='')
        print_colour(f"({d['author']['name']})", 'purple')
        print_colour(d['excerpt'])
        print_colour(
            f"*赞同数{d.get('voteup_count')} 感谢数{d.get('thanks_count', 0)} "
            f"评论数{d.get('comment_count')} 浏览数{d.get('visited_count')}*",
            'purple')
def print_article_content(output: dict):
    """
    打印文章内容
    :param output:
    :return:
    """
    content = output['content']
    title = output['question']['title']
    question_id = output['question']['id']
    content = html2text.html2text(content)
    print_colour(content)
    print_colour('-----------------------------------------------------',
                 'purple')
    print_colour(f'|article_id:{output["id"]}', 'purple')
    print_colour(f'|question_id:{question_id}', 'purple')
    print_colour(f'|title:{title}', 'purple')
    print_colour('-----------------------------------------------------',
                 'purple')
Esempio n. 23
0
def deploy_grafana_dashboards(cluster_name):
    """
    Deploy grafana dashboards to a cluster that provide useful metrics
    for operating a JupyterHub

    Grafana dashboards and deployment mechanism in question are maintained in
    this repo: https://github.com/jupyterhub/grafana-dashboards
    """
    validate_cluster_config(cluster_name)
    validate_support_config(cluster_name)

    config_file_path = find_absolute_path_to_cluster_file(cluster_name)
    with open(config_file_path) as f:
        cluster = Cluster(yaml.load(f), config_file_path.parent)

    # If grafana support chart is not deployed, then there's nothing to do
    if not cluster.support:
        print_colour(
            "Support chart has not been deployed. Skipping Grafana dashboards deployment..."
        )
        return

    grafana_token_file = (
        config_file_path.parent).joinpath("enc-grafana-token.secret.yaml")

    # Read the cluster specific secret grafana token file
    with get_decrypted_file(grafana_token_file) as decrypted_file_path:
        with open(decrypted_file_path) as f:
            config = yaml.load(f)

    # Check GRAFANA_TOKEN exists in the secret config file before continuing
    if "grafana_token" not in config.keys():
        raise ValueError(
            f"`grafana_token` not provided in secret file! Please add it and try again: {grafana_token_file}"
        )

    # FIXME: We assume grafana_url and uses_tls config will be defined in the first
    #        file listed under support.helm_chart_values_files.
    support_values_file = cluster.support.get("helm_chart_values_files", [])[0]
    with open(config_file_path.parent.joinpath(support_values_file)) as f:
        support_values_config = yaml.load(f)

    # Get the url where grafana is running from the support values file
    grafana_url = (support_values_config.get("grafana",
                                             {}).get("ingress",
                                                     {}).get("hosts", {}))
    uses_tls = (support_values_config.get("grafana",
                                          {}).get("ingress",
                                                  {}).get("tls", {}))

    if not grafana_url:
        print_colour(
            "Couldn't find `config.grafana.ingress.hosts`. Skipping Grafana dashboards deployment..."
        )
        return

    grafana_url = (f"https://{grafana_url[0]}"
                   if uses_tls else f"http://{grafana_url[0]}")

    # Use the jupyterhub/grafana-dashboards deployer to deploy the dashboards to this cluster's grafana
    print_colour("Cloning jupyterhub/grafana-dashboards...")

    dashboards_dir = "grafana_dashboards"

    subprocess.check_call([
        "git",
        "clone",
        "https://github.com/jupyterhub/grafana-dashboards",
        dashboards_dir,
    ])

    # We need the existing env too for the deployer to be able to find jssonnet and grafonnet
    deploy_env = os.environ.copy()
    deploy_env.update({"GRAFANA_TOKEN": config["grafana_token"]})

    try:
        print_colour(f"Deploying grafana dashboards to {cluster_name}...")
        subprocess.check_call(["./deploy.py", grafana_url],
                              env=deploy_env,
                              cwd=dashboards_dir)

        print_colour(f"Done! Dashboards deployed to {grafana_url}.")
    finally:
        # Delete the directory where we cloned the repo.
        # The deployer cannot call jsonnet to deploy the dashboards if using a temp directory here.
        # Might be because opening more than once of a temp file is tried
        # (https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile)
        shutil.rmtree(dashboards_dir)
Esempio n. 24
0
def generate_support_matrix_jobs(
    cluster_file,
    cluster_config,
    cluster_info,
    added_or_modified_files,
    upgrade_support_on_this_cluster=False,
    upgrade_support_on_all_clusters=False,
):
    """Generate a list of dictionaries describing which clusters need to undergo a helm
    upgrade of their support chart based on whether their associated support chart
    values files have been modified. To be parsed to GitHub Actions in order to generate
    jobs in a matrix.

    Args:
        cluster_file (path obj): The absolute path to the cluster.yaml file of a given
            cluster
        cluster_config (dict): The cluster-wide config for a given cluster in
            dictionary format
        cluster_info (dict): A template dictionary for defining matrix jobs prepopulated
            with some info. "cluster_name": The name of the given cluster; "provider":
            the cloud provider the given cluster runs on; "reason_for_redeploy":
            what has changed in the repository to prompt the support chart for this
            cluster to be redeployed.
        added_or_modified_files (set[str]): A set of all added or modified files
            provided in a GitHub Pull Requests
        upgrade_support_on_this_cluster (bool, optional): If True, generates jobs to
            update the support chart on the given cluster. This is triggered when the
            cluster.yaml file itself is modified. Defaults to False.
        upgrade_support_on_all_clusters (bool, optional): If True, generates jobs to
            update the support chart on all clusters. This is triggered when common
            config has been modified in the support helm chart. Defaults to False.

    Returns:
        list[dict]: A list of dictionaries. Each dictionary contains: the name of a
            cluster, the cloud provider that cluster runs on, a Boolean indicating if
            the support chart should be upgraded, and a reason why the support chart
            needs upgrading.

            Example:

            [
                {
                    "cluster_name": 2i2c,
                    "provider": "gcp",
                    "reason_for_support_redeploy": "Support helm chart has been modified",
                    "upgrade_support": True,
                },
            ]
    """
    # Rename dictionary key
    cluster_info["reason_for_support_redeploy"] = cluster_info.pop(
        "reason_for_redeploy")

    # Empty list to store the matrix definitions in
    matrix_jobs = []

    # Double-check that support is defined for this cluster.
    support_config = cluster_config.get("support", {})
    if support_config:
        if upgrade_support_on_all_clusters or upgrade_support_on_this_cluster:
            # We know we're upgrading support on all clusters, so just add the cluster
            # name to the list of matrix jobs and move on
            matrix_job = cluster_info.copy()
            matrix_job["upgrade_support"] = True

            if upgrade_support_on_all_clusters:
                matrix_job[
                    "reason_for_support_redeploy"] = "Support helm chart has been modified"

            matrix_jobs.append(matrix_job)

        else:
            # Have the related support values files for this cluster been modified?
            values_files = [
                cluster_file.parent.joinpath(values_file)
                for values_file in support_config.get(
                    "helm_chart_values_files", {})
            ]
            intersection = added_or_modified_files.intersection(values_files)

            if intersection:
                matrix_job = cluster_info.copy()
                matrix_job["upgrade_support"] = True
                matrix_job[
                    "reason_for_support_redeploy"] = "Following helm chart values files were modified: " + ", ".join(
                        [path.name for path in intersection])
                matrix_jobs.append(matrix_job)

    else:
        print_colour(
            f"No support defined for cluster: {cluster_info['cluster_name']}")

    return matrix_jobs
def print_vote_thank(output: dict, typ: str):
    """
    打印赞同感谢  up', 'down', 'neutral'
    :param output:
    :return:
    """
    if output.get('error'):
        print_colour(output.get('error'), 'red')
    elif typ == 'thank':
        print_colour(f'感谢成功!感谢总数{output["thanks_count"]}')
    elif typ == 'unthank':
        print_colour(f'取消感谢!感谢总数{output["thanks_count"]}')
    elif typ == 'up':
        print_colour(f'赞同成功!赞同总数{output["voteup_count"]}')
    elif typ == 'down':
        print_colour(f'反对成功!赞同总数{output["voteup_count"]}')
    else:
        print_colour(f'保持中立!赞同总数{output["voteup_count"]}')
Esempio n. 26
0
    def deploy_support(self):
        cert_manager_url = "https://charts.jetstack.io"
        cert_manager_version = "v1.3.1"

        print_colour("Adding cert-manager chart repo...")
        subprocess.check_call(
            [
                "helm",
                "repo",
                "add",
                "jetstack",
                cert_manager_url,
            ]
        )

        print_colour("Updating cert-manager chart repo...")
        subprocess.check_call(
            [
                "helm",
                "repo",
                "update",
            ]
        )

        print_colour("Provisioning cert-manager...")
        subprocess.check_call(
            [
                "helm",
                "upgrade",
                "--install",
                "--create-namespace",
                "--namespace=cert-manager",
                "cert-manager",
                "jetstack/cert-manager",
                f"--version={cert_manager_version}",
                "--set=installCRDs=true",
            ]
        )
        print_colour("Done!")

        print_colour("Provisioning support charts...")
        subprocess.check_call(["helm", "dep", "up", "support"])

        support_dir = Path(__file__).parent.parent / "support"
        support_secrets_file = support_dir / "secrets.yaml"

        with tempfile.NamedTemporaryFile(mode="w") as f, decrypt_file(
            support_secrets_file
        ) as secret_file:
            yaml.dump(self.support.get("config", {}), f)
            f.flush()
            subprocess.check_call(
                [
                    "helm",
                    "upgrade",
                    "--install",
                    "--create-namespace",
                    "--namespace=support",
                    "support",
                    str(support_dir),
                    f"--values={secret_file}",
                    f"--values={f.name}",
                    "--wait",
                ]
            )
        print_colour("Done!")
Esempio n. 27
0
    def deploy_support(self):
        cert_manager_url = "https://charts.jetstack.io"
        cert_manager_version = "v1.3.1"

        print_colour("Adding cert-manager chart repo...")
        subprocess.check_call([
            "helm",
            "repo",
            "add",
            "jetstack",
            cert_manager_url,
        ])

        print_colour("Updating cert-manager chart repo...")
        subprocess.check_call([
            "helm",
            "repo",
            "update",
        ])

        print_colour("Provisioning cert-manager...")
        subprocess.check_call([
            "helm",
            "upgrade",
            "--install",
            "--create-namespace",
            "--namespace=cert-manager",
            "cert-manager",
            "jetstack/cert-manager",
            f"--version={cert_manager_version}",
            "--set=installCRDs=true",
        ])
        print_colour("Done!")

        print_colour("Provisioning support charts...")

        support_dir = (Path(__file__).parent.parent).joinpath(
            "helm-charts", "support")
        subprocess.check_call(["helm", "dep", "up", support_dir])

        support_secrets_file = support_dir.joinpath("enc-support.secret.yaml")
        # TODO: Update this with statement to handle any number of context managers
        #       containing decrypted support values files. Not critical right now as
        #       no individual cluster has specific support secrets, but it's possible
        #       to support that if we want to in the future.
        with get_decrypted_file(support_secrets_file) as secret_file:
            cmd = [
                "helm",
                "upgrade",
                "--install",
                "--create-namespace",
                "--namespace=support",
                "--wait",
                "support",
                str(support_dir),
                f"--values={secret_file}",
            ]

            for values_file in self.support["helm_chart_values_files"]:
                cmd.append(
                    f"--values={self.config_path.joinpath(values_file)}")

            print_colour(f"Running {' '.join([str(c) for c in cmd])}")
            subprocess.check_call(cmd)

        print_colour("Done!")
Esempio n. 28
0
    async def login(self, load_cookies: bool = False) -> None:
        """
        登录
        :param load_cookies: 是否加载cookie
        :return:
        """
        if load_cookies:
            self.cookie_jar.load(self.cookie_file)
            self.logger.debug(f'加载cookies从:{self.cookie_file}')
            is_succ = await self.check_login()
            if is_succ:
                print_colour('登录成功!', colour='green')
                return
            else:
                print_colour('通过缓存登录失败尝试重新登录', 'red')
                self.cookie_jar.clear()
                os.remove(self.cookie_file)

        login_data = {
            'client_id': 'c3cef7c66a1843f8b3a9e6a1e3160e20',
            'grant_type': 'password',
            'source': 'com.zhihu.web',
            'username': self.user,
            'password': self.password,
            'lang': 'en',  # en 4位验证码, cn 中文验证码
            'ref_source': 'other_https://www.zhihu.com/signin?next=%2F',
            'utm_source': ''
        }
        xsrf = await self._get_xsrf()
        captcha = await self._get_captcha()
        timestamp = int(time.time() * 1000)
        login_data.update({
            'captcha':
            captcha,
            'timestamp':
            timestamp,
            'signature':
            self._get_signature(timestamp, login_data)
        })
        headers = {
            'accept-encoding':
            'gzip, deflate, br',
            'Host':
            'www.zhihu.com',
            'Referer':
            'https://www.zhihu.com/',
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
            '(KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586',
            'content-type':
            'application/x-www-form-urlencoded',
            'x-zse-83':
            '3_2.0',
            'x-xsrftoken':
            xsrf
        }
        data = self._encrypt(login_data)
        url = 'https://www.zhihu.com/api/v3/oauth/sign_in'
        async with self.post(url, data=data, headers=headers) as r:
            resp = await r.text()
            if 'error' in resp:
                print_colour(json.loads(resp)['error'], 'red')
                self.logger.debug(f"登录失败:{json.loads(resp)['error']}")
                sys.exit()
            self.logger.debug(resp)
            is_succ = await self.check_login()
            if is_succ:
                print_colour('登录成功!', colour='green')
            else:
                print_colour('登录失败!', colour='red')
                sys.exit()
Esempio n. 29
0
def main():
    argparser = argparse.ArgumentParser(
        description="""A command line tool to update Grafana
        datasources.
        """)

    argparser.add_argument(
        "cluster_name",
        type=str,
        nargs="?",
        help="The name of the cluster where the Grafana lives",
        default="2i2c",
    )

    args = argparser.parse_args()
    central_cluster = args.cluster_name
    grafana_host = get_central_grafana_url(central_cluster)
    datasource_endpoint = f"https://{grafana_host}/api/datasources"

    # Get a list of the clusters that already have their prometheus instances used as datasources
    datasources = get_clusters_used_as_datasources(central_cluster,
                                                   datasource_endpoint)

    # Get a list of filepaths to all cluster.yaml files in the repo
    cluster_files = get_all_cluster_yaml_files()

    print("Searching for clusters that aren't Grafana datasources...")
    # Count how many clusters we can't add as datasources for logging
    exceptions = 0
    for cluster_file in cluster_files:
        # Read in the cluster.yaml file
        with open(cluster_file) as f:
            cluster_config = yaml.load(f)

        # Get the cluster's name
        cluster_name = cluster_config.get("name", {})
        if cluster_name and cluster_name not in datasources:
            print(
                f"Found {cluster_name} cluster. Checking if it can be added..."
            )
            # Build the datasource details for the instances that aren't configures as datasources
            try:
                datasource_details = build_datasource_details(cluster_name)
                req_body = json.dumps(datasource_details)

                # Tell Grafana to create and register a datasource for this cluster
                headers = build_request_headers(central_cluster)
                response = requests.post(datasource_endpoint,
                                         data=req_body,
                                         headers=headers)
                if response.status_code != 200:
                    print(
                        f"An error occured when creating the datasource. \nError was {response.text}."
                    )
                    response.raise_for_status()
                print_colour(
                    f"Successfully created a new datasource for {cluster_name}!"
                )
            except Exception as e:
                print_colour(
                    f"An error occured for {cluster_name}.\nError was: {e}.\nSkipping...",
                    "yellow",
                )
                exceptions += 1
                pass

    print_colour(
        f"Failed to add {exceptions} clusters as datasources. See errors above!",
        "red")
    print_colour(
        f"Successfully retrieved {len(datasources)} existing datasources! {datasources}"
    )
Esempio n. 30
0
def deploy_jupyterhub_grafana(cluster_name):
    """
    Deploy grafana dashboards for operating a hub
    """

    # Validate our config with JSON Schema first before continuing
    validate(cluster_name)

    config_file_path = (Path(os.getcwd()) / "config/hubs" /
                        f"{cluster_name}.cluster.yaml")
    with open(config_file_path) as f:
        cluster = Cluster(yaml.load(f))

    # If grafana support chart is not deployed, then there's nothing to do
    if not cluster.support:
        print_colour(
            "Support chart has not been deployed. Skipping Grafana dashboards deployment..."
        )
        return

    secret_config_file = (Path(os.getcwd()) / "secrets/config/hubs" /
                          f"{cluster_name}.cluster.yaml")

    # Read and set GRAFANA_TOKEN from the cluster specific secret config file
    with decrypt_file(secret_config_file) as decrypted_file_path:
        with open(decrypted_file_path) as f:
            config = yaml.load(f)

    # Get the url where grafana is running from the cluster config
    grafana_url = (cluster.support.get("config",
                                       {}).get("grafana",
                                               {}).get("ingress",
                                                       {}).get("hosts", {}))
    uses_tls = (cluster.support.get("config",
                                    {}).get("grafana",
                                            {}).get("ingress",
                                                    {}).get("tls", {}))

    if not grafana_url:
        print_colour(
            "Couldn't find `config.grafana.ingress.hosts`. Skipping Grafana dashboards deployment..."
        )
        return

    grafana_url = ("https://" + grafana_url[0] if uses_tls else "http://" +
                   grafana_url[0])

    # Use the jupyterhub/grafana-dashboards deployer to deploy the dashboards to this cluster's grafana
    print_colour("Cloning jupyterhub/grafana-dashboards...")

    dashboards_dir = "grafana_dashboards"

    subprocess.check_call([
        "git",
        "clone",
        "https://github.com/jupyterhub/grafana-dashboards",
        dashboards_dir,
    ])

    # We need the existing env too for the deployer to be able to find jssonnet and grafonnet
    deploy_env = os.environ.copy()
    deploy_env.update({"GRAFANA_TOKEN": config["grafana_token"]})

    try:
        print_colour(f"Deploying grafana dashboards to {cluster_name}...")
        subprocess.check_call(["./deploy.py", grafana_url],
                              env=deploy_env,
                              cwd=dashboards_dir)

        print_colour(f"Done! Dasboards deployed to {grafana_url}.")
    finally:
        # Delete the directory where we cloned the repo.
        # The deployer cannot call jsonnet to deploy the dashboards if using a temp directory here.
        # Might be because opening more than once of a temp file is tried
        # (https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile)
        shutil.rmtree(dashboards_dir)